diff options
Diffstat (limited to 'sys/dev/ice/ice_sched.c')
-rw-r--r-- | sys/dev/ice/ice_sched.c | 487 |
1 files changed, 253 insertions, 234 deletions
diff --git a/sys/dev/ice/ice_sched.c b/sys/dev/ice/ice_sched.c index cd0d7de62b33..d57733dbfa7e 100644 --- a/sys/dev/ice/ice_sched.c +++ b/sys/dev/ice/ice_sched.c @@ -39,7 +39,7 @@ * This function inserts the root node of the scheduling tree topology * to the SW DB. */ -static enum ice_status +static int ice_sched_add_root_node(struct ice_port_info *pi, struct ice_aqc_txsched_elem_data *info) { @@ -62,9 +62,9 @@ ice_sched_add_root_node(struct ice_port_info *pi, return ICE_ERR_NO_MEMORY; } - ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA); + ice_memcpy(&root->info, info, sizeof(*info), ICE_NONDMA_TO_NONDMA); pi->root = root; - return ICE_SUCCESS; + return 0; } /** @@ -83,6 +83,9 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) { u16 i; + if (!start_node) + return NULL; + /* The TEID is same as that of the start_node */ if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) return start_node; @@ -123,14 +126,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) * * This function sends a scheduling elements cmd (cmd_opc) */ -static enum ice_status +static int ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, u16 elems_req, void *buf, u16 buf_size, u16 *elems_resp, struct ice_sq_cd *cd) { struct ice_aqc_sched_elem_cmd *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.sched_elem_cmd; ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); @@ -154,7 +157,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, * * Query scheduling elements (0x0404) */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) @@ -173,7 +176,7 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, * * This function inserts a scheduler node to the SW DB. */ -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info, struct ice_sched_node *prealloc_node) @@ -181,8 +184,8 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; struct ice_sched_node *node; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi) return ICE_ERR_PARAM; @@ -226,7 +229,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node->tx_sched_layer = layer; parent->children[parent->num_children++] = node; node->info = elem; - return ICE_SUCCESS; + return 0; } /** @@ -240,7 +243,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, * * Delete scheduling elements (0x040F) */ -static enum ice_status +static int ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_delete_elem *buf, u16 buf_size, u16 *grps_del, struct ice_sq_cd *cd) @@ -259,14 +262,14 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, * * This function remove nodes from HW */ -static enum ice_status +static int ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, u16 num_nodes, u32 *node_teids) { struct ice_aqc_delete_elem *buf; u16 i, num_groups_removed = 0; - enum ice_status status; u16 buf_size; + int status; buf_size = ice_struct_size(buf, teid, num_nodes); buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size); @@ -280,7 +283,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); - if (status != ICE_SUCCESS || num_groups_removed != 1) + if (status || num_groups_removed != 1) ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", hw->adminq.sq_last_status); @@ -400,14 +403,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) * * Get default scheduler topology (0x400) */ -static enum ice_status +static int ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, struct ice_aqc_get_topo_elem *buf, u16 buf_size, u8 *num_branches, struct ice_sq_cd *cd) { struct ice_aqc_get_topo *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); @@ -430,7 +433,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, * * Add scheduling elements (0x0401) */ -static enum ice_status +static int ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_add_elem *buf, u16 buf_size, u16 *grps_added, struct ice_sq_cd *cd) @@ -451,7 +454,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, * * Configure scheduling elements (0x0403) */ -static enum ice_status +static int ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_cfgd, struct ice_sq_cd *cd) @@ -472,7 +475,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, * * Move scheduling elements (0x0408) */ -enum ice_status +int ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) @@ -493,7 +496,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, * * Suspend scheduling elements (0x0409) */ -static enum ice_status +static int ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -513,7 +516,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * resume scheduling elements (0x040A) */ -static enum ice_status +static int ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -531,7 +534,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * Query scheduler resource allocation (0x0412) */ -static enum ice_status +static int ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, struct ice_aqc_query_txsched_res_resp *buf, struct ice_sq_cd *cd) @@ -551,13 +554,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, * * This function suspends or resumes HW nodes */ -static enum ice_status +static int ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { u16 i, buf_size, num_elem_ret = 0; - enum ice_status status; __le32 *buf; + int status; buf_size = sizeof(*buf) * num_nodes; buf = (__le32 *)ice_malloc(hw, buf_size); @@ -575,7 +578,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, status = ice_aq_resume_sched_elems(hw, num_nodes, buf, buf_size, &num_elem_ret, NULL); - if (status != ICE_SUCCESS || num_elem_ret != num_nodes) + if (status || num_elem_ret != num_nodes) ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); ice_free(hw, buf); @@ -589,7 +592,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -605,7 +608,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) if (!vsi_ctx->lan_q_ctx[tc]) return ICE_ERR_NO_MEMORY; vsi_ctx->num_lan_q_entries[tc] = new_numqs; - return ICE_SUCCESS; + return 0; } /* num queues are increased, update the queue contexts */ if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { @@ -621,7 +624,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx->lan_q_ctx[tc] = q_ctx; vsi_ctx->num_lan_q_entries[tc] = new_numqs; } - return ICE_SUCCESS; + return 0; } /** @@ -631,7 +634,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -647,7 +650,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) if (!vsi_ctx->rdma_q_ctx[tc]) return ICE_ERR_NO_MEMORY; vsi_ctx->num_rdma_q_entries[tc] = new_numqs; - return ICE_SUCCESS; + return 0; } /* num queues are increased, update the queue contexts */ if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { @@ -663,7 +666,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx->rdma_q_ctx[tc] = q_ctx; vsi_ctx->num_rdma_q_entries[tc] = new_numqs; } - return ICE_SUCCESS; + return 0; } /** @@ -678,14 +681,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * * RL profile function to add, query, or remove profile(s) */ -static enum ice_status +static int ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) { struct ice_aqc_rl_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.rl_profile; @@ -709,7 +712,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, * * Add RL profile (0x0410) */ -static enum ice_status +static int ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_added, struct ice_sq_cd *cd) @@ -728,7 +731,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, * * Query RL profile (0x0411) */ -enum ice_status +int ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -748,7 +751,7 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, * * Remove RL profile (0x0415) */ -static enum ice_status +static int ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_removed, struct ice_sq_cd *cd) @@ -767,14 +770,14 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, * its associated parameters from HW DB,and locally. The caller needs to * hold scheduler lock. */ -static enum ice_status +static int ice_sched_del_rl_profile(struct ice_hw *hw, struct ice_aqc_rl_profile_info *rl_info) { struct ice_aqc_rl_profile_elem *buf; u16 num_profiles_removed; - enum ice_status status; u16 num_profiles = 1; + int status; if (rl_info->prof_id_ref != 0) return ICE_ERR_IN_USE; @@ -810,7 +813,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, &hw->rl_prof_list[ln], ice_aqc_rl_profile_info, list_entry) { - enum ice_status status; + int status; rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); @@ -923,7 +926,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw) * * Configure Node Attributes (0x0417) */ -enum ice_status +int ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, struct ice_aqc_node_attr_elem *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -950,7 +953,7 @@ ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, * * Configure L2 Node CGD (0x0414) */ -enum ice_status +int ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -979,7 +982,7 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, * * This function add nodes to HW as well as to SW DB for a given layer */ -enum ice_status +int ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, u16 *num_nodes_added, u32 *first_node_teid, @@ -988,8 +991,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; u16 i, num_groups_added = 0; - enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; + int status = 0; u16 buf_size; u32 teid; @@ -1019,7 +1022,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, &num_groups_added, NULL); - if (status != ICE_SUCCESS || num_groups_added != 1) { + if (status || num_groups_added != 1) { ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", hw->adminq.sq_last_status); ice_free(hw, buf); @@ -1034,7 +1037,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, else status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL); - if (status != ICE_SUCCESS) { + if (status) { ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", status); break; @@ -1083,7 +1086,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, * * Add nodes into specific hw layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1095,7 +1098,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, *num_nodes_added = 0; if (!num_nodes) - return ICE_SUCCESS; + return 0; if (!parent || layer < pi->hw->sw_entry_point_layer) return ICE_ERR_PARAM; @@ -1127,7 +1130,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, * * This function add nodes to a given layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1136,7 +1139,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, { u32 *first_teid_ptr = first_node_teid; u16 new_num_nodes = num_nodes; - enum ice_status status = ICE_SUCCESS; + int status = 0; u32 temp; *num_nodes_added = 0; @@ -1147,7 +1150,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, layer, new_num_nodes, first_teid_ptr, &num_added); - if (status == ICE_SUCCESS) + if (!status) *num_nodes_added += num_added; /* added more nodes than requested ? */ if (*num_nodes_added > num_nodes) { @@ -1157,10 +1160,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, break; } /* break if all the nodes are added successfully */ - if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes)) + if (!status && (*num_nodes_added == num_nodes)) break; /* break if the error is not max limit */ - if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT) + if (status && status != ICE_ERR_MAX_LIMIT) break; /* Exceeded the max children */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -1255,7 +1258,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) } if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = LE32_TO_CPU(node->info.node_teid); - enum ice_status status; + int status; /* remove the default leaf node */ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); @@ -1301,13 +1304,13 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) * resources, default topology created by firmware and storing the information * in SW DB. */ -enum ice_status ice_sched_init_port(struct ice_port_info *pi) +int ice_sched_init_port(struct ice_port_info *pi) { struct ice_aqc_get_topo_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 num_branches; u16 num_elems; + int status; u8 i, j; if (!pi) @@ -1430,12 +1433,12 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) * * query FW for allocated scheduler resources and store in HW struct */ -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +int ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; - enum ice_status status = ICE_SUCCESS; __le16 max_sibl; - u8 i; + int status = 0; + u16 i; if (hw->layer_info) return status; @@ -1721,12 +1724,12 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) { struct ice_aqc_txsched_elem_data buf; - enum ice_status status; u32 node_teid; + int status; node_teid = LE32_TO_CPU(node->info.node_teid); status = ice_sched_query_elem(hw, node_teid, &buf); - if (status != ICE_SUCCESS) + if (status) return false; if (memcmp(&buf, &node->info, sizeof(buf))) { @@ -1777,7 +1780,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * This function adds the VSI child nodes to tree. It gets called for * LAN and RDMA separately. */ -static enum ice_status +static int ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) @@ -1792,7 +1795,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { - enum ice_status status; + int status; if (!parent) return ICE_ERR_CFG; @@ -1801,7 +1804,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, num_nodes[i], &first_node_teid, &num_added); - if (status != ICE_SUCCESS || num_nodes[i] != num_added) + if (status || num_nodes[i] != num_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next @@ -1820,7 +1823,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, } } - return ICE_SUCCESS; + return 0; } /** @@ -1882,7 +1885,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ -static enum ice_status +static int ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { @@ -1896,13 +1899,13 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { - enum ice_status status; + int status; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); - if (status != ICE_SUCCESS || num_nodes[i] != num_added) + if (status || num_nodes[i] != num_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next @@ -1921,7 +1924,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, parent->vsi_handle = vsi_handle; } - return ICE_SUCCESS; + return 0; } /** @@ -1932,7 +1935,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, * * This function adds a new VSI into scheduler tree */ -static enum ice_status +static int ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; @@ -1960,7 +1963,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) * * This function updates the VSI child nodes based on the number of queues */ -static enum ice_status +static int ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { @@ -1968,8 +1971,8 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; + int status = 0; u16 prev_numqs; tc_node = ice_sched_get_tc_node(pi, tc); @@ -2019,7 +2022,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, else vsi_ctx->sched.max_rdmaq[tc] = new_numqs; - return ICE_SUCCESS; + return 0; } /** @@ -2035,14 +2038,14 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, * enabled and VSI is in suspended state then resume the VSI back. If TC is * disabled then suspend the VSI if it is not already. */ -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; + int status = 0; ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); @@ -2160,11 +2163,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) * This function removes the VSI and its LAN or RDMA children nodes from the * scheduler tree. */ -static enum ice_status +static int ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { - enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; + int status = ICE_ERR_PARAM; u8 i; ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); @@ -2217,7 +2220,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) else vsi_ctx->sched.max_rdmaq[i] = 0; } - status = ICE_SUCCESS; + status = 0; exit_sched_rm_vsi_cfg: ice_release_lock(&pi->sched_lock); @@ -2232,7 +2235,7 @@ exit_sched_rm_vsi_cfg: * This function clears the VSI and its LAN children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); } @@ -2245,7 +2248,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) * This function clears the VSI and its RDMA children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); } @@ -2285,7 +2288,7 @@ bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) * This function retrieves the tree topology from the firmware for a given * node TEID to the root node. */ -enum ice_status +int ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -2405,15 +2408,15 @@ ice_sched_update_parent(struct ice_sched_node *new_parent, * * This function move the child nodes to a given parent. */ -enum ice_status +int ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { struct ice_aqc_move_elem *buf; struct ice_sched_node *node; - enum ice_status status = ICE_SUCCESS; u16 i, grps_movd = 0; struct ice_hw *hw; + int status = 0; u16 buf_len; hw = pi->hw; @@ -2468,16 +2471,16 @@ move_err_exit: * This function moves a VSI to an aggregator node or its subtree. * Intermediate nodes may be created if required. */ -static enum ice_status +static int ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, u8 tc) { struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u32 first_node_teid, vsi_teid; - enum ice_status status; u16 num_nodes_added; u8 aggl, vsil, i; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) @@ -2493,7 +2496,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, /* Is this VSI already part of given aggregator? */ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) - return ICE_SUCCESS; + return 0; aggl = ice_sched_get_agg_layer(pi->hw); vsil = ice_sched_get_vsi_layer(pi->hw); @@ -2518,7 +2521,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, num_nodes[i], &first_node_teid, &num_nodes_added); - if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) + if (status || num_nodes[i] != num_nodes_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next @@ -2550,14 +2553,14 @@ move_nodes: * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The * caller holds the scheduler lock. */ -static enum ice_status +static int ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *tmp; - enum ice_status status = ICE_SUCCESS; + int status = 0; LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list, ice_sched_agg_vsi_info, list_entry) { @@ -2614,7 +2617,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) * This function removes the aggregator node and intermediate nodes if any * from the given TC */ -static enum ice_status +static int ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *tc_node, *agg_node; @@ -2648,7 +2651,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) } ice_free_sched_node(pi, agg_node); - return ICE_SUCCESS; + return 0; } /** @@ -2662,11 +2665,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * the aggregator configuration completely for requested TC. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { - enum ice_status status = ICE_SUCCESS; + int status = 0; /* If nothing to remove - return success */ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) @@ -2695,7 +2698,7 @@ exit_rm_agg_cfg_tc: * Save aggregator TC bitmap. This function needs to be called with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, ice_bitmap_t *tc_bitmap) { @@ -2706,7 +2709,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, return ICE_ERR_PARAM; ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); - return ICE_SUCCESS; + return 0; } /** @@ -2718,15 +2721,15 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, * This function creates an aggregator node and intermediate nodes if required * for the given TC */ -static enum ice_status +static int ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *parent, *agg_node, *tc_node; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; - enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; u32 first_node_teid; u16 num_nodes_added; + int status = 0; u8 i, aggl; tc_node = ice_sched_get_tc_node(pi, tc); @@ -2772,7 +2775,7 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) num_nodes[i], &first_node_teid, &num_nodes_added); - if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) + if (status || num_nodes[i] != num_nodes_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next @@ -2789,7 +2792,7 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) } } - return ICE_SUCCESS; + return 0; } /** @@ -2808,13 +2811,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * resources and remove aggregator ID. * This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap) { struct ice_sched_agg_info *agg_info; - enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; agg_info = ice_get_agg_info(hw, agg_id); @@ -2870,12 +2873,12 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, * * This function configures aggregator node(s). */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap) { ice_bitmap_t bitmap = tc_bitmap; - enum ice_status status; + int status; ice_acquire_lock(&pi->sched_lock); status = ice_sched_cfg_agg(pi, agg_id, agg_type, @@ -2943,7 +2946,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) * Save VSI to aggregator TC bitmap. This function needs to call with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, ice_bitmap_t *tc_bitmap) { @@ -2959,7 +2962,7 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, return ICE_ERR_PARAM; ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); - return ICE_SUCCESS; + return 0; } /** @@ -2973,14 +2976,14 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, * already associated to the aggregator node then no operation is performed on * the tree. This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, ice_bitmap_t *tc_bitmap) { struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; struct ice_sched_agg_info *agg_info, *old_agg_info; - enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) @@ -3071,14 +3074,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw) * returns success or error on config sched element failure. The caller * needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data buf; - enum ice_status status; u16 elem_cfgd = 0; u16 num_elems = 1; + int status; buf = *info; /* For TC nodes, CIR config is not supported */ @@ -3116,13 +3119,13 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, * * This function configures node element's BW allocation. */ -static enum ice_status +static int ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 bw_alloc) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; buf = node->info; data = &buf.data; @@ -3150,12 +3153,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, * * Move or associate VSI to a new or default aggregator node. */ -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap) { ice_bitmap_t bitmap = tc_bitmap; - enum ice_status status; + int status; ice_acquire_lock(&pi->sched_lock); status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, @@ -3175,10 +3178,10 @@ ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, * This function removes aggregator reference to VSI and delete aggregator ID * info. It removes the aggregator configuration completely. */ -enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) +int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) { struct ice_sched_agg_info *agg_info; - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; ice_acquire_lock(&pi->sched_lock); @@ -3257,7 +3260,7 @@ ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) * * Save BW alloc information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { @@ -3280,7 +3283,7 @@ ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, default: return ICE_ERR_PARAM; } - return ICE_SUCCESS; + return 0; } /** @@ -3350,7 +3353,7 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) * * Save BW information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -3374,7 +3377,7 @@ ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, default: return ICE_ERR_PARAM; } - return ICE_SUCCESS; + return 0; } /** @@ -3402,7 +3405,7 @@ static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) * * Save priority information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 prio) { @@ -3416,7 +3419,7 @@ ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, if (tc >= ICE_MAX_TRAFFIC_CLASS) return ICE_ERR_PARAM; ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); - return ICE_SUCCESS; + return 0; } /** @@ -3429,7 +3432,7 @@ ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * * Save BW alloc information of AGG type node for post replay use. */ -static enum ice_status +static int ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { @@ -3450,7 +3453,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, default: return ICE_ERR_PARAM; } - return ICE_SUCCESS; + return 0; } /** @@ -3463,7 +3466,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, * * Save BW information of AGG type node for post replay use. */ -static enum ice_status +static int ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -3487,7 +3490,7 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, default: return ICE_ERR_PARAM; } - return ICE_SUCCESS; + return 0; } /** @@ -3501,11 +3504,11 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, * This function configures BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, ICE_AGG_TYPE_VSI, @@ -3528,11 +3531,11 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function configures default BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, ICE_AGG_TYPE_VSI, @@ -3558,11 +3561,11 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function applies BW limit to aggregator scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, tc, rl_type, bw); @@ -3584,11 +3587,11 @@ ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, * This function applies default BW limit to aggregator scheduling node based * on TC information. */ -enum ice_status +int ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type) { - enum ice_status status; + int status; status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, tc, rl_type, @@ -3613,7 +3616,7 @@ ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic * classes for VSI matching handle. */ -enum ice_status +int ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw) { @@ -3629,7 +3632,7 @@ ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, * This function removes the shared rate limiter(SRL) of all VSI type nodes * across all traffic classes for VSI matching handle. */ -enum ice_status +int ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, @@ -3649,7 +3652,7 @@ ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw) { @@ -3665,7 +3668,7 @@ ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, * This function removes the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) { return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW, @@ -3685,7 +3688,7 @@ ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { @@ -3702,7 +3705,7 @@ ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ -enum ice_status +int ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) { return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, @@ -3721,11 +3724,11 @@ ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) * This function configures the queue node priority (Sibling Priority) of the * passed in VSI's queue(s) for a given traffic class (TC). */ -enum ice_status +int ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, u8 *q_prio) { - enum ice_status status = ICE_ERR_PARAM; + int status = ICE_ERR_PARAM; u16 i; ice_acquire_lock(&pi->sched_lock); @@ -3761,17 +3764,17 @@ ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, * This function configures the node priority (Sibling Priority) of the * passed in VSI's for a given traffic class (TC) of an Aggregator ID. */ -enum ice_status +int ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, u16 num_vsis, u16 *vsi_handle_arr, u8 *node_prio, u8 tc) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_node *tc_node, *agg_node; - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_agg_info *agg_info; bool agg_id_present = false; struct ice_hw *hw = pi->hw; + int status = ICE_ERR_PARAM; u16 i; ice_acquire_lock(&pi->sched_lock); @@ -3848,11 +3851,11 @@ exit_agg_priority_per_tc: * This function configures the BW allocation of the passed in VSI's * node(s) for enabled traffic class. */ -enum ice_status +int ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc) { - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) @@ -3900,14 +3903,14 @@ ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, * This function configures the BW allocation of passed in aggregator for * enabled traffic class(s). */ -enum ice_status +int ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc) { struct ice_sched_agg_info *agg_info; bool agg_id_present = false; - enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; ice_acquire_lock(&pi->sched_lock); @@ -4002,12 +4005,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) * * This function converts the BW to profile structure format. */ -static enum ice_status +static int ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, struct ice_aqc_rl_profile_elem *profile) { - enum ice_status status = ICE_ERR_PARAM; s64 bytes_per_sec, ts_rate, mv_tmp; + int status = ICE_ERR_PARAM; bool found = false; s32 encode = 0; s64 mv = 0; @@ -4052,7 +4055,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, profile->rl_multiply = CPU_TO_LE16(mv); profile->wake_up_calc = CPU_TO_LE16(wm); profile->rl_encode = CPU_TO_LE16(encode); - status = ICE_SUCCESS; + status = 0; } else { status = ICE_ERR_DOES_NOT_EXIST; } @@ -4080,8 +4083,8 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, struct ice_aqc_rl_profile_info *rl_prof_elem; u16 profiles_added = 0, num_profiles = 1; struct ice_aqc_rl_profile_elem *buf; - enum ice_status status; u8 profile_type; + int status; if (!hw || layer_num >= hw->num_tx_sched_layers) return NULL; @@ -4114,7 +4117,7 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, return NULL; status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); - if (status != ICE_SUCCESS) + if (status) goto exit_add_rl_prof; rl_prof_elem->bw = bw; @@ -4149,7 +4152,7 @@ exit_add_rl_prof: * * This function configures node element's BW limit. */ -static enum ice_status +static int ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 rl_prof_id) { @@ -4293,12 +4296,12 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold * scheduler lock. */ -static enum ice_status +static int ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, u16 profile_id) { struct ice_aqc_rl_profile_info *rl_prof_elem; - enum ice_status status = ICE_SUCCESS; + int status = 0; if (!hw || layer_num >= hw->num_tx_sched_layers) return ICE_ERR_PARAM; @@ -4319,7 +4322,7 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, break; } if (status == ICE_ERR_IN_USE) - status = ICE_SUCCESS; + status = 0; return status; } @@ -4334,15 +4337,15 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u8 layer_num) { - enum ice_status status; struct ice_hw *hw; u8 profile_type; u16 rl_prof_id; + int status; u16 old_id; hw = pi->hw; @@ -4373,7 +4376,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, /* Remove stale RL profile ID */ if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || old_id == ICE_SCHED_INVAL_PROF_ID) - return ICE_SUCCESS; + return 0; return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id); } @@ -4390,14 +4393,14 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile * ID from local database. The caller needs to hold scheduler lock. */ -enum ice_status +int ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { struct ice_aqc_rl_profile_info *rl_prof_info; - enum ice_status status = ICE_ERR_PARAM; struct ice_hw *hw = pi->hw; u16 old_id, rl_prof_id; + int status = ICE_ERR_PARAM; rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num); if (!rl_prof_info) @@ -4419,7 +4422,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, /* Check for old ID removal */ if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) - return ICE_SUCCESS; + return 0; return ice_sched_rm_rl_profile(hw, layer_num, rl_prof_info->profile.flags & @@ -4434,7 +4437,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, * * This function sets priority of a node among it's siblings. */ -enum ice_status +int ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, u16 priority) { @@ -4459,7 +4462,7 @@ ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *nod * * This function sets weight of the node for WFQ algorithm. */ -enum ice_status +int ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight) { struct ice_aqc_txsched_elem_data buf; @@ -4491,7 +4494,7 @@ ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, * NOTE: Caller provides the correct SRL node in case of shared profile * settings. */ -enum ice_status +int ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw) { @@ -4524,7 +4527,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type) @@ -4542,7 +4545,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, * behalf of the requested node (first argument). This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) { /* SRL profiles are not available on all layers. Check if the @@ -4555,7 +4558,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) node->num_children == 1) || ((sel_layer == node->tx_sched_layer - 1) && (node->parent && node->parent->num_children == 1))) - return ICE_SUCCESS; + return 0; return ICE_ERR_CFG; } @@ -4568,7 +4571,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) * * Save BW information of queue type node for post replay use. */ -static enum ice_status +static int ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) { switch (rl_type) { @@ -4584,7 +4587,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) default: return ICE_ERR_PARAM; } - return ICE_SUCCESS; + return 0; } /** @@ -4598,13 +4601,13 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) * * This function sets BW limit of queue scheduling node. */ -static enum ice_status +static int ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; struct ice_q_ctx *q_ctx; + int status = ICE_ERR_PARAM; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; @@ -4661,7 +4664,7 @@ exit_q_bw_lmt: * * This function configures BW limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { @@ -4679,7 +4682,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * * This function configures BW default limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type) { @@ -4697,7 +4700,7 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function saves the modified values of bandwidth settings for later * replay purpose (restore) after reset. */ -static enum ice_status +static int ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -4716,9 +4719,12 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, default: return ICE_ERR_PARAM; } - return ICE_SUCCESS; + return 0; } +#define ICE_SCHED_GENERIC_STRICT_MODE BIT(4) +#define ICE_SCHED_GENERIC_PRIO_S 1 + /** * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit * @pi: port information structure @@ -4728,12 +4734,14 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, * * This function configures bandwidth limit of TC node. */ -static enum ice_status +static int ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; struct ice_sched_node *tc_node; + int status = ICE_ERR_PARAM; if (tc >= ICE_MAX_TRAFFIC_CLASS) return status; @@ -4741,6 +4749,17 @@ ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) goto exit_set_tc_node_bw; + + /* update node's generic field */ + buf = tc_node->info; + data = &buf.data; + data->valid_sections = ICE_AQC_ELEM_VALID_GENERIC; + data->generic = (tc << ICE_SCHED_GENERIC_PRIO_S) | + ICE_SCHED_GENERIC_STRICT_MODE; + status = ice_sched_update_elem(pi->hw, tc_node, &buf); + if (status) + goto exit_set_tc_node_bw; + if (bw == ICE_SCHED_DFLT_BW) status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type); else @@ -4763,7 +4782,7 @@ exit_set_tc_node_bw: * This function configures BW limit of TC node. * Note: The minimum guaranteed reservation is done via DCBX. */ -enum ice_status +int ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -4778,7 +4797,7 @@ ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, * * This function configures BW default limit of TC node. */ -enum ice_status +int ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type) { @@ -4794,7 +4813,7 @@ ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, * * Save BW alloc information of VSI type node for post replay use. */ -static enum ice_status +static int ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { @@ -4812,7 +4831,7 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, default: return ICE_ERR_PARAM; } - return ICE_SUCCESS; + return 0; } /** @@ -4826,12 +4845,12 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, * changed settings for replay purpose, and return success if it succeeds * in modifying bandwidth alloc setting. */ -static enum ice_status +static int ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *tc_node; + int status = ICE_ERR_PARAM; if (tc >= ICE_MAX_TRAFFIC_CLASS) return status; @@ -4860,7 +4879,7 @@ exit_set_tc_node_bw_alloc: * This function configures BW limit of TC node. * Note: The minimum guaranteed reservation is done via DCBX. */ -enum ice_status +int ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc) { @@ -4876,11 +4895,11 @@ ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, * and sets node's BW limit to default. This function needs to be * called with the scheduler lock held. */ -enum ice_status +int ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) { struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) @@ -4992,13 +5011,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, * This function sets BW limit of VSI or Aggregator scheduling node * based on TC information from passed in argument BW. */ -enum ice_status +int ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; + int status = ICE_ERR_PARAM; if (!pi) return status; @@ -5031,7 +5050,7 @@ exit_set_node_bw_lmt_per_tc: * different than the VSI node layer on all TC(s).This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) { u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; @@ -5044,7 +5063,7 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *vsi_node; enum ice_rl_type rl_type = ICE_SHARED_BW; - enum ice_status status; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) @@ -5070,7 +5089,7 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) if (status) return status; } - return ICE_SUCCESS; + return 0; } /** @@ -5086,12 +5105,12 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) * class, and saves those value for later use for replaying purposes. The * caller holds the scheduler lock. */ -static enum ice_status +static int ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, struct ice_sched_node *srl_node, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; if (bw == ICE_SCHED_DFLT_BW) { status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); @@ -5118,13 +5137,13 @@ ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, * is passed, it removes the corresponding bw from the node. The caller * holds scheduler lock. */ -static enum ice_status +static int ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { struct ice_sched_node *tc_node, *vsi_node, *cfg_node; - enum ice_status status; u8 layer_num; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) @@ -5172,11 +5191,11 @@ ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is * passed, it removes those value(s) from the node. */ -enum ice_status +int ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw) { - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; if (!pi) @@ -5222,13 +5241,13 @@ exit_set_vsi_bw_shared_lmt: * different than the AGG node layer on all TC(s).This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) { u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; struct ice_sched_agg_info *agg_info; bool agg_id_present = false; - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info, @@ -5277,13 +5296,13 @@ ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) * * This function validates aggregator id. Caller holds the scheduler lock. */ -static enum ice_status +static int ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) { struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *tmp; bool agg_id_present = false; - enum ice_status status; + int status; status = ice_sched_validate_agg_srl_node(pi, agg_id); if (status) @@ -5299,7 +5318,7 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) if (!agg_id_present) return ICE_ERR_PARAM; - return ICE_SUCCESS; + return 0; } /** @@ -5315,12 +5334,12 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) * requested traffic class, and saves those value for later use for * replaying purposes. The caller holds the scheduler lock. */ -static enum ice_status +static int ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, struct ice_sched_node *srl_node, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status; + int status; if (bw == ICE_SCHED_DFLT_BW) { status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); @@ -5347,13 +5366,13 @@ ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller * holds the scheduler lock. */ -static enum ice_status +static int ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { struct ice_sched_node *tc_node, *agg_node, *cfg_node; enum ice_rl_type rl_type = ICE_SHARED_BW; - enum ice_status status = ICE_ERR_CFG; + int status = ICE_ERR_CFG; u8 layer_num; tc_node = ice_sched_get_tc_node(pi, tc); @@ -5402,11 +5421,11 @@ ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the * node(s). */ -enum ice_status +int ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw) { - enum ice_status status; + int status; u8 tc; if (!pi) @@ -5454,12 +5473,12 @@ exit_agg_bw_shared_lmt: * node for a given traffic class for aggregator matching agg_id. When BW * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. */ -enum ice_status +int ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { - enum ice_status status; + int status; if (!pi) return ICE_ERR_PARAM; @@ -5485,14 +5504,14 @@ exit_agg_bw_shared_lmt_per_tc: * This function configures node element's sibling priority only. This * function needs to be called with scheduler lock held. */ -enum ice_status +int ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; if (!hw) return ICE_ERR_PARAM; @@ -5518,7 +5537,7 @@ ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, * burst size value is used for future rate limit calls. It doesn't change the * existing or previously created RL profiles. */ -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) { u16 burst_size_to_prog; @@ -5547,7 +5566,7 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) burst_size_to_prog |= (u16)(bytes / 1024); } hw->max_burst_size = burst_size_to_prog; - return ICE_SUCCESS; + return 0; } /** @@ -5559,13 +5578,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) * This function configures node element's priority value. It * needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; buf = node->info; data = &buf.data; @@ -5586,18 +5605,18 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, * This function restores node's BW from bw_t_info. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, struct ice_bw_type_info *bw_t_info) { struct ice_port_info *pi = hw->port_info; - enum ice_status status = ICE_ERR_PARAM; + int status = ICE_ERR_PARAM; u16 bw_alloc; if (!node) return status; if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) - return ICE_SUCCESS; + return 0; if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) { status = ice_sched_replay_node_prio(hw, node, bw_t_info->generic); @@ -5644,11 +5663,11 @@ ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, * This function re-creates aggregator type nodes. The caller needs to hold * the scheduler lock. */ -static enum ice_status +static int ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) { struct ice_sched_node *tc_node, *agg_node; - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; if (!agg_info) @@ -5721,7 +5740,7 @@ void ice_sched_replay_agg(struct ice_hw *hw) ICE_MAX_TRAFFIC_CLASS)) { ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); - enum ice_status status; + int status; ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); ice_sched_get_ena_tc_bitmap(pi, @@ -5777,9 +5796,9 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) * * Replay root node BW settings. */ -enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) +int ice_sched_replay_root_node_bw(struct ice_port_info *pi) { - enum ice_status status = ICE_SUCCESS; + int status = 0; if (!pi->hw) return ICE_ERR_PARAM; @@ -5797,9 +5816,9 @@ enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) * * This function replay TC nodes. */ -enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) +int ice_sched_replay_tc_node_bw(struct ice_port_info *pi) { - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; if (!pi->hw) @@ -5829,7 +5848,7 @@ enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) * This function replays VSI type nodes bandwidth. This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, ice_bitmap_t *tc_bitmap) { @@ -5837,7 +5856,7 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, struct ice_port_info *pi = hw->port_info; struct ice_bw_type_info *bw_t_info; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = ICE_SUCCESS; + int status = 0; u8 tc; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); @@ -5869,24 +5888,24 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, * their node bandwidth information. This function needs to be called with * scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; - enum ice_status status; + int status; ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; agg_info = ice_get_vsi_agg_info(hw, vsi_handle); if (!agg_info) - return ICE_SUCCESS; /* Not present in list - default Agg case */ + return 0; /* Not present in list - default Agg case */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) - return ICE_SUCCESS; /* Not present in list - default Agg case */ + return 0; /* Not present in list - default Agg case */ ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, replay_bitmap); /* Replay aggregator node associated to vsi_handle */ @@ -5920,10 +5939,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays association of VSI to aggregator type nodes, and * node bandwidth information. */ -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { struct ice_port_info *pi = hw->port_info; - enum ice_status status; + int status; ice_acquire_lock(&pi->sched_lock); status = ice_sched_replay_vsi_agg(hw, vsi_handle); @@ -5939,7 +5958,7 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays queue type node bandwidth. This function needs to be * called with scheduler lock held. */ -enum ice_status +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) { struct ice_sched_node *q_node; |