diff options
| author | Tony Nguyen <anthony.l.nguyen@intel.com> | 2021-10-07 15:56:57 -0700 | 
|---|---|---|
| committer | Tony Nguyen <anthony.l.nguyen@intel.com> | 2021-12-14 10:19:13 -0800 | 
| commit | 5e24d5984c805c644de8bf5217a820e22e28318c (patch) | |
| tree | e01bc2279e2be3fc28d0cec6c3cdfa9bdf135b8c /drivers/net/ethernet/intel/ice/ice_sched.c | |
| parent | 5f87ec4861aa1b8458da0dfd730abbd0bdb2f5f9 (diff) | |
ice: Use int for ice_status
To prepare for removal of ice_status, change the variables from
ice_status to int. This eases the transition when values are changed to
return standard int error codes over enum ice_status.
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_sched.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.c | 220 | 
1 files changed, 110 insertions, 110 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index ce3c7bded4cb..91b0c4efa3aa 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -11,7 +11,7 @@   * This function inserts the root node of the scheduling tree topology   * to the SW DB.   */ -static enum ice_status +static int  ice_sched_add_root_node(struct ice_port_info *pi,  			struct ice_aqc_txsched_elem_data *info)  { @@ -96,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)   *   * This function sends a scheduling elements cmd (cmd_opc)   */ -static enum ice_status +static int  ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,  			    u16 elems_req, void *buf, u16 buf_size,  			    u16 *elems_resp, struct ice_sq_cd *cd)  {  	struct ice_aqc_sched_elem_cmd *cmd;  	struct ice_aq_desc desc; -	enum ice_status status; +	int status;  	cmd = &desc.params.sched_elem_cmd;  	ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); @@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,   *   * Query scheduling elements (0x0404)   */ -enum ice_status +int  ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,  			 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,  			 u16 *elems_ret, struct ice_sq_cd *cd) @@ -145,14 +145,14 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,   *   * This function inserts a scheduler node to the SW DB.   */ -enum ice_status +int  ice_sched_add_node(struct ice_port_info *pi, u8 layer,  		   struct ice_aqc_txsched_elem_data *info)  {  	struct ice_aqc_txsched_elem_data elem;  	struct ice_sched_node *parent;  	struct ice_sched_node *node; -	enum ice_status status; +	int status;  	struct ice_hw *hw;  	if (!pi) @@ -209,7 +209,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,   *   * Delete scheduling elements (0x040F)   */ -static enum ice_status +static int  ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,  			  struct ice_aqc_delete_elem *buf, u16 buf_size,  			  u16 *grps_del, struct ice_sq_cd *cd) @@ -228,13 +228,13 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,   *   * This function remove nodes from HW   */ -static enum ice_status +static int  ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,  		       u16 num_nodes, u32 *node_teids)  {  	struct ice_aqc_delete_elem *buf;  	u16 i, num_groups_removed = 0; -	enum ice_status status; +	int status;  	u16 buf_size;  	buf_size = struct_size(buf, teid, num_nodes); @@ -369,14 +369,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)   *   * Get default scheduler topology (0x400)   */ -static enum ice_status +static int  ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,  		     struct ice_aqc_get_topo_elem *buf, u16 buf_size,  		     u8 *num_branches, struct ice_sq_cd *cd)  {  	struct ice_aqc_get_topo *cmd;  	struct ice_aq_desc desc; -	enum ice_status status; +	int status;  	cmd = &desc.params.get_topo;  	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); @@ -399,7 +399,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,   *   * Add scheduling elements (0x0401)   */ -static enum ice_status +static int  ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,  		       struct ice_aqc_add_elem *buf, u16 buf_size,  		       u16 *grps_added, struct ice_sq_cd *cd) @@ -420,7 +420,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,   *   * Configure scheduling elements (0x0403)   */ -static enum ice_status +static int  ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,  		       struct ice_aqc_txsched_elem_data *buf, u16 buf_size,  		       u16 *elems_cfgd, struct ice_sq_cd *cd) @@ -441,7 +441,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,   *   * Move scheduling elements (0x0408)   */ -static enum ice_status +static int  ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,  			struct ice_aqc_move_elem *buf, u16 buf_size,  			u16 *grps_movd, struct ice_sq_cd *cd) @@ -462,7 +462,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,   *   * Suspend scheduling elements (0x0409)   */ -static enum ice_status +static int  ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,  			   u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)  { @@ -482,7 +482,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,   *   * resume scheduling elements (0x040A)   */ -static enum ice_status +static int  ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,  			  u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)  { @@ -500,7 +500,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,   *   * Query scheduler resource allocation (0x0412)   */ -static enum ice_status +static int  ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,  		       struct ice_aqc_query_txsched_res_resp *buf,  		       struct ice_sq_cd *cd) @@ -520,12 +520,12 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,   *   * This function suspends or resumes HW nodes   */ -static enum ice_status +static int  ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,  			       bool suspend)  {  	u16 i, buf_size, num_elem_ret = 0; -	enum ice_status status; +	int status;  	__le32 *buf;  	buf_size = sizeof(*buf) * num_nodes; @@ -558,7 +558,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,   * @tc: TC number   * @new_numqs: number of queues   */ -static enum ice_status +static int  ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)  {  	struct ice_vsi_ctx *vsi_ctx; @@ -602,7 +602,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)   * @tc: TC number   * @new_numqs: number of queues   */ -static enum ice_status +static int  ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)  {  	struct ice_vsi_ctx *vsi_ctx; @@ -651,14 +651,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)   *   * RL profile function to add, query, or remove profile(s)   */ -static enum ice_status +static int  ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,  		  u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,  		  u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)  {  	struct ice_aqc_rl_profile *cmd;  	struct ice_aq_desc desc; -	enum ice_status status; +	int status;  	cmd = &desc.params.rl_profile; @@ -682,7 +682,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,   *   * Add RL profile (0x0410)   */ -static enum ice_status +static int  ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,  		      struct ice_aqc_rl_profile_elem *buf, u16 buf_size,  		      u16 *num_profiles_added, struct ice_sq_cd *cd) @@ -702,7 +702,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,   *   * Remove RL profile (0x0415)   */ -static enum ice_status +static int  ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,  			 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,  			 u16 *num_profiles_removed, struct ice_sq_cd *cd) @@ -721,13 +721,13 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,   * its associated parameters from HW DB,and locally. The caller needs to   * hold scheduler lock.   */ -static enum ice_status +static int  ice_sched_del_rl_profile(struct ice_hw *hw,  			 struct ice_aqc_rl_profile_info *rl_info)  {  	struct ice_aqc_rl_profile_elem *buf;  	u16 num_profiles_removed; -	enum ice_status status; +	int status;  	u16 num_profiles = 1;  	if (rl_info->prof_id_ref != 0) @@ -763,7 +763,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)  		list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,  					 &pi->rl_prof_list[ln], list_entry) {  			struct ice_hw *hw = pi->hw; -			enum ice_status status; +			int status;  			rl_prof_elem->prof_id_ref = 0;  			status = ice_sched_del_rl_profile(hw, rl_prof_elem); @@ -875,7 +875,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)   *   * This function add nodes to HW as well as to SW DB for a given layer   */ -static enum ice_status +static int  ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  		    struct ice_sched_node *parent, u8 layer, u16 num_nodes,  		    u16 *num_nodes_added, u32 *first_node_teid) @@ -883,7 +883,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  	struct ice_sched_node *prev, *new_node;  	struct ice_aqc_add_elem *buf;  	u16 i, num_groups_added = 0; -	enum ice_status status = 0; +	int status = 0;  	struct ice_hw *hw = pi->hw;  	size_t buf_size;  	u32 teid; @@ -974,7 +974,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,   *   * Add nodes into specific HW layer.   */ -static enum ice_status +static int  ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,  				struct ice_sched_node *tc_node,  				struct ice_sched_node *parent, u8 layer, @@ -1018,7 +1018,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,   *   * This function add nodes to a given layer.   */ -static enum ice_status +static int  ice_sched_add_nodes_to_layer(struct ice_port_info *pi,  			     struct ice_sched_node *tc_node,  			     struct ice_sched_node *parent, u8 layer, @@ -1027,7 +1027,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,  {  	u32 *first_teid_ptr = first_node_teid;  	u16 new_num_nodes = num_nodes; -	enum ice_status status = 0; +	int status = 0;  	*num_nodes_added = 0;  	while (*num_nodes_added < num_nodes) { @@ -1152,7 +1152,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)  	}  	if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {  		u32 teid = le32_to_cpu(node->info.node_teid); -		enum ice_status status; +		int status;  		/* remove the default leaf node */  		status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); @@ -1198,10 +1198,10 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)   * resources, default topology created by firmware and storing the information   * in SW DB.   */ -enum ice_status ice_sched_init_port(struct ice_port_info *pi) +int ice_sched_init_port(struct ice_port_info *pi)  {  	struct ice_aqc_get_topo_elem *buf; -	enum ice_status status; +	int status;  	struct ice_hw *hw;  	u8 num_branches;  	u16 num_elems; @@ -1300,10 +1300,10 @@ err_init_port:   *   * query FW for allocated scheduler resources and store in HW struct   */ -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +int ice_sched_query_res_alloc(struct ice_hw *hw)  {  	struct ice_aqc_query_txsched_res_resp *buf; -	enum ice_status status = 0; +	int status = 0;  	__le16 max_sibl;  	u16 i; @@ -1614,14 +1614,14 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)   * This function adds the VSI child nodes to tree. It gets called for   * LAN and RDMA separately.   */ -static enum ice_status +static int  ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,  			      struct ice_sched_node *tc_node, u16 *num_nodes,  			      u8 owner)  {  	struct ice_sched_node *parent, *node;  	struct ice_hw *hw = pi->hw; -	enum ice_status status; +	int status;  	u32 first_node_teid;  	u16 num_added = 0;  	u8 i, qgl, vsil; @@ -1717,12 +1717,12 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,   * This function adds the VSI supported nodes into Tx tree including the   * VSI, its parent and intermediate nodes in below layers   */ -static enum ice_status +static int  ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,  				struct ice_sched_node *tc_node, u16 *num_nodes)  {  	struct ice_sched_node *parent = tc_node; -	enum ice_status status; +	int status;  	u32 first_node_teid;  	u16 num_added = 0;  	u8 i, vsil; @@ -1766,7 +1766,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,   *   * This function adds a new VSI into scheduler tree   */ -static enum ice_status +static int  ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)  {  	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; @@ -1794,7 +1794,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)   *   * This function updates the VSI child nodes based on the number of queues   */ -static enum ice_status +static int  ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,  				 u8 tc, u16 new_numqs, u8 owner)  { @@ -1802,7 +1802,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,  	struct ice_sched_node *vsi_node;  	struct ice_sched_node *tc_node;  	struct ice_vsi_ctx *vsi_ctx; -	enum ice_status status = 0; +	int status = 0;  	struct ice_hw *hw = pi->hw;  	u16 prev_numqs; @@ -1869,13 +1869,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,   * enabled and VSI is in suspended state then resume the VSI back. If TC is   * disabled then suspend the VSI if it is not already.   */ -enum ice_status +int  ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,  		  u8 owner, bool enable)  {  	struct ice_sched_node *vsi_node, *tc_node;  	struct ice_vsi_ctx *vsi_ctx; -	enum ice_status status = 0; +	int status = 0;  	struct ice_hw *hw = pi->hw;  	ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); @@ -1993,10 +1993,10 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)   * This function removes the VSI and its LAN or RDMA children nodes from the   * scheduler tree.   */ -static enum ice_status +static int  ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)  { -	enum ice_status status = ICE_ERR_PARAM; +	int status = ICE_ERR_PARAM;  	struct ice_vsi_ctx *vsi_ctx;  	u8 i; @@ -2065,7 +2065,7 @@ exit_sched_rm_vsi_cfg:   * This function clears the VSI and its LAN children nodes from scheduler tree   * for all TCs.   */ -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)  {  	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);  } @@ -2078,7 +2078,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)   * This function clears the VSI and its RDMA children nodes from scheduler tree   * for all TCs.   */ -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)  {  	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);  } @@ -2188,13 +2188,13 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,   *   * This function move the child nodes to a given parent.   */ -static enum ice_status +static int  ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,  		     u16 num_items, u32 *list)  {  	struct ice_aqc_move_elem *buf;  	struct ice_sched_node *node; -	enum ice_status status = 0; +	int status = 0;  	u16 i, grps_movd = 0;  	struct ice_hw *hw;  	u16 buf_len; @@ -2251,14 +2251,14 @@ move_err_exit:   * This function moves a VSI to an aggregator node or its subtree.   * Intermediate nodes may be created if required.   */ -static enum ice_status +static int  ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,  			  u8 tc)  {  	struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;  	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };  	u32 first_node_teid, vsi_teid; -	enum ice_status status; +	int status;  	u16 num_nodes_added;  	u8 aggl, vsil, i; @@ -2333,14 +2333,14 @@ move_nodes:   * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The   * caller holds the scheduler lock.   */ -static enum ice_status +static int  ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,  			     struct ice_sched_agg_info *agg_info, u8 tc,  			     bool rm_vsi_info)  {  	struct ice_sched_agg_vsi_info *agg_vsi_info;  	struct ice_sched_agg_vsi_info *tmp; -	enum ice_status status = 0; +	int status = 0;  	list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,  				 list_entry) { @@ -2397,7 +2397,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)   * This function removes the aggregator node and intermediate nodes if any   * from the given TC   */ -static enum ice_status +static int  ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)  {  	struct ice_sched_node *tc_node, *agg_node; @@ -2445,11 +2445,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)   * the aggregator configuration completely for requested TC. The caller needs   * to hold the scheduler lock.   */ -static enum ice_status +static int  ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,  		  u8 tc, bool rm_vsi_info)  { -	enum ice_status status = 0; +	int status = 0;  	/* If nothing to remove - return success */  	if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) @@ -2478,7 +2478,7 @@ exit_rm_agg_cfg_tc:   * Save aggregator TC bitmap. This function needs to be called with scheduler   * lock held.   */ -static enum ice_status +static int  ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,  		       unsigned long *tc_bitmap)  { @@ -2501,12 +2501,12 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,   * This function creates an aggregator node and intermediate nodes if required   * for the given TC   */ -static enum ice_status +static int  ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)  {  	struct ice_sched_node *parent, *agg_node, *tc_node;  	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; -	enum ice_status status = 0; +	int status = 0;  	struct ice_hw *hw = pi->hw;  	u32 first_node_teid;  	u16 num_nodes_added; @@ -2591,12 +2591,12 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)   * resources and remove aggregator ID.   * This function needs to be called with scheduler lock held.   */ -static enum ice_status +static int  ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,  		  enum ice_agg_type agg_type, unsigned long *tc_bitmap)  {  	struct ice_sched_agg_info *agg_info; -	enum ice_status status = 0; +	int status = 0;  	struct ice_hw *hw = pi->hw;  	u8 tc; @@ -2653,12 +2653,12 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,   *   * This function configures aggregator node(s).   */ -enum ice_status +int  ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,  	    u8 tc_bitmap)  {  	unsigned long bitmap = tc_bitmap; -	enum ice_status status; +	int status;  	mutex_lock(&pi->sched_lock);  	status = ice_sched_cfg_agg(pi, agg_id, agg_type, @@ -2724,7 +2724,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)   * Save VSI to aggregator TC bitmap. This function needs to call with scheduler   * lock held.   */ -static enum ice_status +static int  ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,  			   unsigned long *tc_bitmap)  { @@ -2754,13 +2754,13 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,   * already associated to the aggregator node then no operation is performed on   * the tree. This function needs to be called with scheduler lock held.   */ -static enum ice_status +static int  ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,  			   u16 vsi_handle, unsigned long *tc_bitmap)  {  	struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;  	struct ice_sched_agg_info *agg_info, *old_agg_info; -	enum ice_status status = 0; +	int status = 0;  	struct ice_hw *hw = pi->hw;  	u8 tc; @@ -2851,12 +2851,12 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)   * returns success or error on config sched element failure. The caller   * needs to hold scheduler lock.   */ -static enum ice_status +static int  ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,  		      struct ice_aqc_txsched_elem_data *info)  {  	struct ice_aqc_txsched_elem_data buf; -	enum ice_status status; +	int status;  	u16 elem_cfgd = 0;  	u16 num_elems = 1; @@ -2893,7 +2893,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,   *   * This function configures node element's BW allocation.   */ -static enum ice_status +static int  ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,  			    enum ice_rl_type rl_type, u16 bw_alloc)  { @@ -2925,12 +2925,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,   *   * Move or associate VSI to a new or default aggregator node.   */ -enum ice_status +int  ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,  		    u8 tc_bitmap)  {  	unsigned long bitmap = tc_bitmap; -	enum ice_status status; +	int status;  	mutex_lock(&pi->sched_lock);  	status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, @@ -3098,11 +3098,11 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)   *   * This function converts the BW to profile structure format.   */ -static enum ice_status +static int  ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,  			   struct ice_aqc_rl_profile_elem *profile)  { -	enum ice_status status = ICE_ERR_PARAM; +	int status = ICE_ERR_PARAM;  	s64 bytes_per_sec, ts_rate, mv_tmp;  	bool found = false;  	s32 encode = 0; @@ -3176,7 +3176,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,  	struct ice_aqc_rl_profile_info *rl_prof_elem;  	u16 profiles_added = 0, num_profiles = 1;  	struct ice_aqc_rl_profile_elem *buf; -	enum ice_status status; +	int status;  	struct ice_hw *hw;  	u8 profile_type; @@ -3249,7 +3249,7 @@ exit_add_rl_prof:   *   * This function configures node element's BW limit.   */ -static enum ice_status +static int  ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,  			  enum ice_rl_type rl_type, u16 rl_prof_id)  { @@ -3420,12 +3420,12 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)   * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold   * scheduler lock.   */ -static enum ice_status +static int  ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,  			u16 profile_id)  {  	struct ice_aqc_rl_profile_info *rl_prof_elem; -	enum ice_status status = 0; +	int status = 0;  	if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)  		return ICE_ERR_PARAM; @@ -3461,12 +3461,12 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,   * type CIR, EIR, or SRL to default. This function needs to be called   * with the scheduler lock held.   */ -static enum ice_status +static int  ice_sched_set_node_bw_dflt(struct ice_port_info *pi,  			   struct ice_sched_node *node,  			   enum ice_rl_type rl_type, u8 layer_num)  { -	enum ice_status status; +	int status;  	struct ice_hw *hw;  	u8 profile_type;  	u16 rl_prof_id; @@ -3518,7 +3518,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,   * them may be set for any given element. This function needs to be called   * with the scheduler lock held.   */ -static enum ice_status +static int  ice_sched_set_eir_srl_excl(struct ice_port_info *pi,  			   struct ice_sched_node *node,  			   u8 layer_num, enum ice_rl_type rl_type, u32 bw) @@ -3562,12 +3562,12 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,   * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile   * ID from local database. The caller needs to hold scheduler lock.   */ -static enum ice_status +static int  ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,  		      enum ice_rl_type rl_type, u32 bw, u8 layer_num)  {  	struct ice_aqc_rl_profile_info *rl_prof_info; -	enum ice_status status = ICE_ERR_PARAM; +	int status = ICE_ERR_PARAM;  	struct ice_hw *hw = pi->hw;  	u16 old_id, rl_prof_id; @@ -3608,12 +3608,12 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,   * It updates node's BW limit parameters like BW RL profile ID of type CIR,   * EIR, or SRL. The caller needs to hold scheduler lock.   */ -static enum ice_status +static int  ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,  			  enum ice_rl_type rl_type, u32 bw)  {  	struct ice_sched_node *cfg_node = node; -	enum ice_status status; +	int status;  	struct ice_hw *hw;  	u8 layer_num; @@ -3657,7 +3657,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,   * type CIR, EIR, or SRL to default. This function needs to be called   * with the scheduler lock held.   */ -static enum ice_status +static int  ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,  			       struct ice_sched_node *node,  			       enum ice_rl_type rl_type) @@ -3675,7 +3675,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,   * behalf of the requested node (first argument). This function needs to be   * called with scheduler lock held.   */ -static enum ice_status +static int  ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)  {  	/* SRL profiles are not available on all layers. Check if the @@ -3701,7 +3701,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)   *   * Save BW information of queue type node for post replay use.   */ -static enum ice_status +static int  ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)  {  	switch (rl_type) { @@ -3731,11 +3731,11 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)   *   * This function sets BW limit of queue scheduling node.   */ -static enum ice_status +static int  ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,  		       u16 q_handle, enum ice_rl_type rl_type, u32 bw)  { -	enum ice_status status = ICE_ERR_PARAM; +	int status = ICE_ERR_PARAM;  	struct ice_sched_node *node;  	struct ice_q_ctx *q_ctx; @@ -3794,7 +3794,7 @@ exit_q_bw_lmt:   *   * This function configures BW limit of queue scheduling node.   */ -enum ice_status +int  ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,  		 u16 q_handle, enum ice_rl_type rl_type, u32 bw)  { @@ -3812,7 +3812,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,   *   * This function configures BW default limit of queue scheduling node.   */ -enum ice_status +int  ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,  		      u16 q_handle, enum ice_rl_type rl_type)  { @@ -3880,12 +3880,12 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,   * This function sets BW limit of VSI or Aggregator scheduling node   * based on TC information from passed in argument BW.   */ -static enum ice_status +int  ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,  				 enum ice_agg_type agg_type, u8 tc,  				 enum ice_rl_type rl_type, u32 bw)  { -	enum ice_status status = ICE_ERR_PARAM; +	int status = ICE_ERR_PARAM;  	struct ice_sched_node *node;  	if (!pi) @@ -3921,7 +3921,7 @@ exit_set_node_bw_lmt_per_tc:   * This function configures BW limit of VSI scheduling node based on TC   * information.   */ -enum ice_status +int  ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,  			  enum ice_rl_type rl_type, u32 bw)  { @@ -3948,7 +3948,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,   * This function configures default BW limit of VSI scheduling node based on TC   * information.   */ -enum ice_status +int  ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,  			       enum ice_rl_type rl_type)  { @@ -3976,7 +3976,7 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,   * burst size value is used for future rate limit calls. It doesn't change the   * existing or previously created RL profiles.   */ -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)  {  	u16 burst_size_to_prog; @@ -4017,13 +4017,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)   * This function configures node element's priority value. It   * needs to be called with scheduler lock held.   */ -static enum ice_status +static int  ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,  			   u8 priority)  {  	struct ice_aqc_txsched_elem_data buf;  	struct ice_aqc_txsched_elem *data; -	enum ice_status status; +	int status;  	buf = node->info;  	data = &buf.data; @@ -4044,12 +4044,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,   * This function restores node's BW from bw_t_info. The caller needs   * to hold the scheduler lock.   */ -static enum ice_status +static int  ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,  			 struct ice_bw_type_info *bw_t_info)  {  	struct ice_port_info *pi = hw->port_info; -	enum ice_status status = ICE_ERR_PARAM; +	int status = ICE_ERR_PARAM;  	u16 bw_alloc;  	if (!node) @@ -4137,7 +4137,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)  		if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,  				  ICE_MAX_TRAFFIC_CLASS)) {  			DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); -			enum ice_status status; +			int status;  			bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);  			ice_sched_get_ena_tc_bitmap(pi, @@ -4191,14 +4191,14 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)   * their node bandwidth information. This function needs to be called with   * scheduler lock held.   */ -static enum ice_status +static int  ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)  {  	DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);  	struct ice_sched_agg_vsi_info *agg_vsi_info;  	struct ice_port_info *pi = hw->port_info;  	struct ice_sched_agg_info *agg_info; -	enum ice_status status; +	int status;  	bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);  	if (!ice_is_vsi_valid(hw, vsi_handle)) @@ -4233,10 +4233,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)   * This function replays association of VSI to aggregator type nodes, and   * node bandwidth information.   */ -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)  {  	struct ice_port_info *pi = hw->port_info; -	enum ice_status status; +	int status;  	mutex_lock(&pi->sched_lock);  	status = ice_sched_replay_vsi_agg(hw, vsi_handle); @@ -4252,7 +4252,7 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)   * This function replays queue type node bandwidth. This function needs to be   * called with scheduler lock held.   */ -enum ice_status +int  ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)  {  	struct ice_sched_node *q_node;  | 
