diff --git a/drivers/net/ub/unic/Makefile b/drivers/net/ub/unic/Makefile index 041815219e0ca790a86610e637e350e71cdfc534..7d0a2632d00d92e5f089d9acd6c4626784ebdd10 100644 --- a/drivers/net/ub/unic/Makefile +++ b/drivers/net/ub/unic/Makefile @@ -8,6 +8,7 @@ ccflags-y += -I$(srctree)/drivers/net/ub/unic/debugfs obj-$(CONFIG_UB_UNIC) += unic.o unic-objs = unic_main.o unic_ethtool.o unic_hw.o unic_guid.o unic_netdev.o unic_dev.o unic_qos_hw.o unic_event.o unic_crq.o -unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_rack_ip.o unic_stats.o +unic-objs += unic_channel.o debugfs/unic_debugfs.o unic_rx.o unic_tx.o unic_txrx.o unic_comm_addr.o unic_ip.o unic_stats.o +unic-objs += unic_lb.o unic_vlan.o unic_mac.o unic-objs += debugfs/unic_ctx_debugfs.o unic_reset.o debugfs/unic_qos_debugfs.o debugfs/unic_entry_debugfs.o unic-$(CONFIG_UB_UNIC_DCB) += unic_dcbnl.o diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.c b/drivers/net/ub/unic/debugfs/unic_debugfs.c index 4c7bf3bb83fccde9b25efbeefad09565f8314ae0..093e3de5e65774c73b4b01efce4f5bd38b64737c 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.c @@ -99,12 +99,21 @@ static const struct unic_dbg_cap_bit_info { bool (*get_bit)(struct unic_dev *dev); } unic_cap_bits[] = { {"\tsupport_ubl: %u\n", &unic_dev_ubl_supported}, + {"\tsupport_pfc: %u\n", &unic_dev_pfc_supported}, {"\tsupport_ets: %u\n", &unic_dev_ets_supported}, {"\tsupport_fec: %u\n", &unic_dev_fec_supported}, + {"\tsupport_pause: %u\n", &unic_dev_pause_supported}, + {"\tsupport_eth: %u\n", &unic_dev_eth_supported}, {"\tsupport_tc_speed_limit: %u\n", &unic_dev_tc_speed_limit_supported}, {"\tsupport_tx_csum_offload: %u\n", &unic_dev_tx_csum_offload_supported}, {"\tsupport_rx_csum_offload: %u\n", &unic_dev_rx_csum_offload_supported}, + {"\tsupport_app_lb: %u\n", &unic_dev_app_lb_supported}, + {"\tsupport_external_lb: %u\n", &unic_dev_external_lb_supported}, + {"\tsupport_serial_serdes_lb: %u\n", &unic_dev_serial_serdes_lb_supported}, + {"\tsupport_parallel_serdes_lb: %u\n", &unic_dev_parallel_serdes_lb_supported}, {"\tsupport_fec_stats: %u\n", &unic_dev_fec_stats_supported}, + {"\tsupport_cfg_mac: %u\n", &unic_dev_cfg_mac_supported}, + {"\tsupport_cfg_vlan_filter: %u\n", &unic_dev_cfg_vlan_filter_supported}, }; static void unic_dbg_dump_caps_bits(struct unic_dev *unic_dev, @@ -125,6 +134,10 @@ static void unic_dbg_dump_caps(struct unic_dev *unic_dev, struct seq_file *s) u32 caps_info; } unic_caps_info[] = { {"\ttotal_ip_tbl_size: %hu\n", unic_caps->total_ip_tbl_size}, + {"\tuc_mac_tbl_size: %u\n", unic_caps->uc_mac_tbl_size}, + {"\tmc_mac_tbl_size: %u\n", unic_caps->mc_mac_tbl_size}, + {"\tvlan_tbl_size: %u\n", unic_caps->vlan_tbl_size}, + {"\tmng_tbl_size: %u\n", unic_caps->mng_tbl_size}, {"\tmax_trans_unit: %hu\n", unic_caps->max_trans_unit}, {"\tmin_trans_unit: %hu\n", unic_caps->min_trans_unit}, {"\tvport_buf_size: %u\n", unic_caps->vport_buf_size}, @@ -314,25 +327,33 @@ static bool unic_dbg_dentry_support(struct device *dev, u32 property) static struct ubase_dbg_dentry_info unic_dbg_dentry[] = { { .name = "ip_tbl", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, { .name = "context", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, { .name = "vport", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, { .name = "qos", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, + .support = unic_dbg_dentry_support, + }, { + .name = "vlan_tbl", + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + }, { + .name = "mac_tbl", + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, .support = unic_dbg_dentry_support, }, /* keep unic at the bottom and add new directory above */ { .name = "unic", - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, }, }; @@ -352,38 +373,59 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_ip_tbl_list, + }, { + .name = "uc_mac_tbl_list", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_uc_mac_tbl_list, + }, { + .name = "mc_mac_tbl_list", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_mc_mac_tbl_list, + }, { + .name = "mac_tbl_spec", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_mac_tbl_spec, }, { .name = "jfs_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfs_ctx_sw, }, { .name = "jfr_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfr_ctx_sw, }, { .name = "sq_jfc_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_sq_jfc_ctx_sw, }, { .name = "rq_jfc_context", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_rq_jfc_ctx_sw, }, { .name = "dev_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_dev_info, @@ -404,98 +446,119 @@ static struct ubase_dbg_cmd_info unic_dbg_cmd[] = { }, { .name = "caps_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_caps_info, + }, { + .name = "mac_tbl_list_hw", + .dentry_index = UNIC_DBG_DENTRY_MAC, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_mac_tbl_list_hw, + }, { + .name = "vlan_tbl_list_hw", + .dentry_index = UNIC_DBG_DENTRY_VLAN, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_vlan_tbl_list_hw, }, { .name = "page_pool_info", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_page_pool_info, }, { .name = "jfs_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfs_context_hw, }, { .name = "jfr_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_jfr_context_hw, }, { .name = "sq_jfc_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_sq_jfc_context_hw, }, { .name = "rq_jfc_context_hw", .dentry_index = UNIC_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_rq_jfc_context_hw, }, { .name = "vl_queue", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_vl_queue, }, { .name = "rss_cfg_hw", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_rss_cfg_hw, }, { .name = "promisc_cfg_hw", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_promisc_cfg_hw, }, { .name = "dscp_vl_map", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_dscp_vl_map, }, { .name = "prio_vl_map", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_prio_vl_map, + }, { + .name = "pfc_info", + .dentry_index = UNIC_DBG_DENTRY_QOS, + .property = UBASE_SUP_UNIC | UBASE_SUP_ETH, + .support = unic_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = unic_dbg_dump_pfc_param, }, { .name = "dscp_prio", .dentry_index = UNIC_DBG_DENTRY_QOS, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_dump_dscp_prio, }, { .name = "link_status_record", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_query_link_record, }, { .name = "clear_link_status_record", .dentry_index = UNIC_DBG_DENTRY_ROOT, - .property = UBASE_SUP_UNIC | UBASE_SUP_UBL, + .property = UBASE_SUP_UNIC | UBASE_SUP_UBL_ETH, .support = unic_dbg_dentry_support, .init = ubase_dbg_seq_file_init, .read_func = unic_dbg_clear_link_record, diff --git a/drivers/net/ub/unic/debugfs/unic_debugfs.h b/drivers/net/ub/unic/debugfs/unic_debugfs.h index 853597b90f45911a263d00d47e25107ae10740de..869be0526ae98d15aecc67961467a05491d581f0 100644 --- a/drivers/net/ub/unic/debugfs/unic_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_debugfs.h @@ -17,6 +17,8 @@ enum unic_dbg_dentry_type { UNIC_DBG_DENTRY_CONTEXT, UNIC_DBG_DENTRY_VPORT, UNIC_DBG_DENTRY_QOS, + UNIC_DBG_DENTRY_VLAN, + UNIC_DBG_DENTRY_MAC, /* must be the last entry. */ UNIC_DBG_DENTRY_ROOT }; diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c index 74b5fdb95aaa311966557beb15122daf471224b3..aecae53cce9ad6f33a66c3a142487dc254e98dd6 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.c @@ -16,6 +16,14 @@ static const char * const unic_entry_state_str[] = { "TO_ADD", "TO_DEL", "ACTIVE" }; +static int unic_dbg_check_dev_state(struct unic_dev *unic_dev) +{ + if (__unic_resetting(unic_dev)) + return -EBUSY; + + return 0; +} + int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data) { struct unic_dev *unic_dev = dev_get_drvdata(s->private); @@ -32,6 +40,93 @@ int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data) return 0; } +static int unic_common_query_addr_list(struct unic_dev *unic_dev, u32 total_size, + u32 size, struct list_head *list, + int (*query_list)(struct unic_dev *, u32 *, + struct list_head *, + bool *complete)) +{ +#define UNIC_LOOP_COUNT(total_size, size) ((total_size) / (size) + 1) + + u32 idx = 0, cnt = 0; + bool complete; + int ret = 0; + + while (cnt < UNIC_LOOP_COUNT(total_size, size)) { + complete = false; + ret = query_list(unic_dev, &idx, list, &complete); + if (ret) { + unic_err(unic_dev, + "failed to query addr list, ret = %d.\n", ret); + break; + } + + if (complete) + break; + cnt++; + } + + return ret == -EPERM ? -EOPNOTSUPP : ret; +} + +static int unic_dbg_dump_mac_tbl_list(struct seq_file *s, void *data, + bool is_unicast) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_vport *vport = &unic_dev->vport; + struct unic_comm_addr_node *mac_node, *tmp; + struct list_head *list; + int i = 0; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + seq_printf(s, "%s mac_list:\n", is_unicast ? "unicast" : "multicast"); + seq_printf(s, "No. %-28sSTATE\n", "MAC_ADDR"); + + list = is_unicast ? + &vport->addr_tbl.uc_mac_list : &vport->addr_tbl.mc_mac_list; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + list_for_each_entry_safe(mac_node, tmp, list, node) { + seq_printf(s, "%-8d", i++); + seq_printf(s, "%-28pM", mac_node->mac_addr); + seq_printf(s, "%s\n", unic_entry_state_str[mac_node->state]); + } + + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + return 0; +} + +int unic_dbg_dump_uc_mac_tbl_list(struct seq_file *s, void *data) +{ + return unic_dbg_dump_mac_tbl_list(s, data, true); +} + +int unic_dbg_dump_mc_mac_tbl_list(struct seq_file *s, void *data) +{ + return unic_dbg_dump_mac_tbl_list(s, data, false); +} + +int unic_dbg_dump_mac_tbl_spec(struct seq_file *s, void *data) +{ + u32 mac_tbl_size, priv_uc_mac_tbl_size, priv_mc_mac_tbl_size; + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + priv_mc_mac_tbl_size = unic_dev->caps.mc_mac_tbl_size; + priv_uc_mac_tbl_size = unic_dev->caps.uc_mac_tbl_size; + mac_tbl_size = priv_mc_mac_tbl_size + priv_uc_mac_tbl_size; + + seq_printf(s, "mac_tbl_size\t: %u\n", mac_tbl_size); + seq_printf(s, "priv_uc_mac_tbl_size\t: %u\n", priv_uc_mac_tbl_size); + seq_printf(s, "priv_mc_mac_tbl_size\t: %u\n", priv_mc_mac_tbl_size); + + return 0; +} + int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) { struct unic_dev *unic_dev = dev_get_drvdata(s->private); @@ -46,7 +141,7 @@ int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) list = &ip_tbl->ip_list; spin_lock_bh(&ip_tbl->ip_list_lock); list_for_each_entry(ip_node, list, node) { - seq_printf(s, "%-4d", i++); + seq_printf(s, "%-4u", i++); seq_printf(s, "%-43pI6c", &ip_node->ip_addr.s6_addr); seq_printf(s, "%-9s", unic_entry_state_str[ip_node->state]); seq_printf(s, "%-3u", ip_node->node_mask); @@ -57,3 +152,195 @@ int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data) return 0; } + +static int unic_query_mac_list_hw(struct unic_dev *unic_dev, u32 *mac_idx, + struct list_head *list, bool *complete) +{ + struct unic_dbg_comm_addr_node *mac_node; + struct unic_dbg_mac_entry *mac_entry; + struct unic_dbg_mac_head req = {0}; + struct unic_dbg_mac_head *head; + struct ubase_cmd_buf in, out; + int ret; + u8 i; + + head = kzalloc(UNIC_QUERY_MAC_LEN, GFP_ATOMIC); + if (!head) + return -ENOMEM; + + mac_entry = head->mac_entry; + req.mac_idx = cpu_to_le32(*mac_idx); + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_MAC_TBL, true, sizeof(req), + &req); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_MAC_TBL, true, + UNIC_QUERY_MAC_LEN, head); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) { + unic_err(unic_dev, + "failed to query mac hw tbl, ret = %d.\n", ret); + goto err_out; + } + + if (head->cur_mac_cnt > UNIC_DBG_MAC_NUM) { + ret = -EINVAL; + unic_err(unic_dev, + "invalid cur_mac_cnt(%u).\n", head->cur_mac_cnt); + goto err_out; + } + + for (i = 0; i < head->cur_mac_cnt; i++) { + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + ret = -ENOMEM; + goto err_out; + } + + memcpy(&mac_node->mac_addr, &mac_entry[i].mac_addr, + sizeof(mac_node->mac_addr)); + mac_node->eport = le32_to_cpu(mac_entry[i].eport); + list_add_tail(&mac_node->node, list); + } + + *complete = head->cur_mac_cnt < UNIC_DBG_MAC_NUM; + + *mac_idx = le32_to_cpu(head->mac_idx); + +err_out: + kfree(head); + + return ret; +} + +int unic_dbg_dump_mac_tbl_list_hw(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_dbg_comm_addr_node *mac_node, *next_node; + struct unic_caps *caps = &unic_dev->caps; + struct list_head list; + int ret, cnt = 0; + u32 size; + + ret = unic_dbg_check_dev_state(unic_dev); + if (ret) + return ret; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + size = caps->uc_mac_tbl_size + caps->mc_mac_tbl_size; + + INIT_LIST_HEAD(&list); + ret = unic_common_query_addr_list(unic_dev, size, + UNIC_DBG_MAC_NUM, &list, + unic_query_mac_list_hw); + if (ret) + goto release_list; + + seq_printf(s, "No %-28sEXTEND_INFO\n", "MAC_ADDR"); + + list_for_each_entry(mac_node, &list, node) { + seq_printf(s, "%-7d", cnt++); + seq_printf(s, "%-28pM", &mac_node->mac_addr); + seq_printf(s, "0x%08x\n", mac_node->eport); + } + +release_list: + list_for_each_entry_safe(mac_node, next_node, &list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } + + return ret; +} + +static int unic_query_vlan_list_hw(struct unic_dev *unic_dev, u32 *idx, + struct list_head *list, bool *complete) +{ + struct unic_dbg_vlan_entry *vlan_entry; + struct unic_dbg_vlan_node *vlan_node; + struct unic_dbg_vlan_head req = {0}; + struct unic_dbg_vlan_head *resp; + struct ubase_cmd_buf in, out; + u16 vlan_cnt, i; + int ret; + + resp = kzalloc(UNIC_QUERY_VLAN_LEN, GFP_ATOMIC); + if (!resp) + return -ENOMEM; + + vlan_entry = resp->vlan_entry; + req.idx = cpu_to_le16((u16)*idx); + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_VLAN_TBL, true, + sizeof(req), &req); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_VLAN_TBL, true, + UNIC_QUERY_VLAN_LEN, resp); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret && ret != -EPERM) { + unic_err(unic_dev, "failed to query vlan hw tbl, ret = %d.\n", + ret); + goto err_out; + } + + vlan_cnt = le16_to_cpu(resp->vlan_cnt); + if (vlan_cnt > UNIC_DBG_VLAN_NUM) { + ret = -EINVAL; + unic_err(unic_dev, "invalid vlan_cnt(%u).\n", vlan_cnt); + goto err_out; + } + + for (i = 0; i < vlan_cnt; i++) { + vlan_node = kzalloc(sizeof(*vlan_node), GFP_ATOMIC); + if (!vlan_node) { + ret = -ENOMEM; + goto err_out; + } + vlan_node->ue_id = le16_to_cpu(vlan_entry[i].ue_id); + vlan_node->vlan_id = le16_to_cpu(vlan_entry[i].vlan_id); + list_add_tail(&vlan_node->node, list); + } + + *idx = le16_to_cpu(resp->idx); + +err_out: + kfree(resp); + + return ret; +} + +int unic_dbg_dump_vlan_tbl_list_hw(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct unic_dbg_vlan_node *vlan_node, *tmp_node; + struct list_head list; + int ret, cnt = 0; + u32 size; + + ret = unic_dbg_check_dev_state(unic_dev); + if (ret) + return ret; + + INIT_LIST_HEAD(&list); + size = unic_dev->caps.vlan_tbl_size; + ret = unic_common_query_addr_list(unic_dev, size, UNIC_DBG_VLAN_NUM, + &list, unic_query_vlan_list_hw); + if (ret) + goto release_list; + + seq_puts(s, "No UE_ID VLAN_ID\n"); + + list_for_each_entry(vlan_node, &list, node) { + seq_printf(s, "%-7d", cnt++); + seq_printf(s, "%-13u", vlan_node->ue_id); + seq_printf(s, "%-12u\n", vlan_node->vlan_id); + } + +release_list: + list_for_each_entry_safe(vlan_node, tmp_node, &list, node) { + list_del(&vlan_node->node); + kfree(vlan_node); + } + + return ret; +} diff --git a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h index 73ab85f4d5f36b8e2a3fc17a065e8b8dcfdf99d4..706180f70078dbf10ce595d7198c30298d48f39e 100644 --- a/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_entry_debugfs.h @@ -8,9 +8,74 @@ #define __UNIC_ENTRY_DEBUGFS_H__ #include +#include #include +#ifndef UBL_ALEN +#define UBL_ALEN 16 +#endif + +#define UNIC_BITMAP_LEN 8 +#define UNIC_DBG_MAC_NUM 16 +#define UNIC_DBG_VLAN_NUM 250 +#define UNIC_QUERY_MAC_LEN (sizeof(struct unic_dbg_mac_head) + \ + sizeof(struct unic_dbg_mac_entry) * UNIC_DBG_MAC_NUM) +#define UNIC_QUERY_VLAN_LEN (sizeof(struct unic_dbg_vlan_head) + \ + sizeof(struct unic_dbg_vlan_entry) * UNIC_DBG_VLAN_NUM) + +struct unic_dbg_mac_entry { + u8 mac_addr[ETH_ALEN]; + __le32 eport; +}; + +struct unic_dbg_mac_head { + __le32 mac_idx; + u8 cur_mac_cnt; + u8 rsv[3]; + struct unic_dbg_mac_entry mac_entry[]; +}; + +struct unic_dbg_vlan_entry { + __le16 ue_id; + __le16 vlan_id; +}; + +struct unic_dbg_vlan_head { + __le16 idx; + __le16 vlan_cnt; + struct unic_dbg_vlan_entry vlan_entry[]; +}; + +struct unic_dbg_vlan_node { + struct list_head node; + u16 ue_id; + u16 vlan_id; +}; + +struct unic_dbg_comm_addr_node { + struct list_head node; + u16 ue_id; + u32 ue_bitmap[UNIC_BITMAP_LEN]; + u32 port_bitmap; + union { + u8 guid[UBL_ALEN]; + struct { + struct in6_addr ip_addr; + u32 extend_info; + }; + struct { + u8 mac_addr[ETH_ALEN]; + u32 eport; + }; + }; +}; + int unic_dbg_dump_ip_tbl_spec(struct seq_file *s, void *data); +int unic_dbg_dump_mac_tbl_spec(struct seq_file *s, void *data); +int unic_dbg_dump_mc_mac_tbl_list(struct seq_file *s, void *data); +int unic_dbg_dump_uc_mac_tbl_list(struct seq_file *s, void *data); int unic_dbg_dump_ip_tbl_list(struct seq_file *s, void *data); +int unic_dbg_dump_vlan_tbl_list_hw(struct seq_file *s, void *data); +int unic_dbg_dump_mac_tbl_list_hw(struct seq_file *s, void *data); #endif /* _UNIC_ENTRY_DEBUGFS_H */ diff --git a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c index 85b3288f0becee866d30fd6559d5def6595438f4..8b56fb4b7c8d41790a18c1a2037303365c404755 100644 --- a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c +++ b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.c @@ -146,3 +146,47 @@ int unic_dbg_dump_dscp_prio(struct seq_file *s, void *data) return 0; } + +int unic_dbg_dump_pfc_param(struct seq_file *s, void *data) +{ + struct unic_dev *unic_dev = dev_get_drvdata(s->private); + struct ubase_eth_mac_stats eth_stats = {0}; + u64 stats_tx[IEEE_8021QAZ_MAX_TCS]; + u64 stats_rx[IEEE_8021QAZ_MAX_TCS]; + u8 pfc_cap, pfc_en; + int i, ret; + + if (!unic_dev_pfc_supported(unic_dev)) + return -EOPNOTSUPP; + + if (__unic_resetting(unic_dev)) + return -EBUSY; + + ret = ubase_get_eth_port_stats(unic_dev->comdev.adev, ð_stats); + if (ret) + return ret; + + pfc_en = unic_dev->channels.vl.pfc_info.pfc_en; + pfc_cap = UNIC_MAX_PRIO_NUM; + + seq_printf(s, "mac_pfc_capacity: %d\n", pfc_cap); + seq_puts(s, "mac_pfc_enable: "); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + seq_printf(s, "%d", + (pfc_en >> (IEEE_8021QAZ_MAX_TCS - i - 1)) & 1); + + seq_puts(s, "\n"); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + stats_tx[i] = unic_get_pfc_tx_pkts(ð_stats, i); + seq_printf(s, "mac_tx_pri%d_pfc_pkts: %llu\n", i, stats_tx[i]); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + stats_rx[i] = unic_get_pfc_rx_pkts(ð_stats, i); + seq_printf(s, "mac_rx_pri%d_pfc_pkts: %llu\n", i, stats_rx[i]); + } + + return 0; +} diff --git a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h index f55616ab16174a9428a1e7f60441602a51751b52..91f136e99cff308363bb2f61ca15e3a0c13d9340 100644 --- a/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h +++ b/drivers/net/ub/unic/debugfs/unic_qos_debugfs.h @@ -11,5 +11,6 @@ int unic_dbg_dump_vl_queue(struct seq_file *s, void *data); int unic_dbg_dump_dscp_vl_map(struct seq_file *s, void *data); int unic_dbg_dump_prio_vl_map(struct seq_file *s, void *data); int unic_dbg_dump_dscp_prio(struct seq_file *s, void *data); +int unic_dbg_dump_pfc_param(struct seq_file *s, void *data); #endif diff --git a/drivers/net/ub/unic/unic.h b/drivers/net/ub/unic/unic.h index e63ee6e900ffe436f6f4e9d7fbb0e3770be16ec4..f2aef47451bf23ed509bd89a8bd639d41412a721 100644 --- a/drivers/net/ub/unic/unic.h +++ b/drivers/net/ub/unic/unic.h @@ -47,10 +47,23 @@ enum { #define UNIC_USER_BPE BIT(2) /* broadcast promisc enabled by user */ #define UNIC_OVERFLOW_MGP BIT(3) /* mulitcast guid overflow */ #define UNIC_OVERFLOW_IPP BIT(4) /* unicast ip overflow */ +#define UNIC_OVERFLOW_UP_MAC BIT(5) /* unicast mac overflow */ +#define UNIC_OVERFLOW_MP_MAC BIT(6) /* multicast mac overflow */ #define UNIC_UPE (UNIC_USER_UPE | \ - UNIC_OVERFLOW_IPP) + UNIC_OVERFLOW_IPP | \ + UNIC_OVERFLOW_UP_MAC) #define UNIC_MPE (UNIC_USER_MPE | \ - UNIC_OVERFLOW_MGP) + UNIC_OVERFLOW_MGP | \ + UNIC_OVERFLOW_MP_MAC) + +#define UNIC_SUPPORT_APP_LB BIT(0) +#define UNIC_SUPPORT_SERIAL_SERDES_LB BIT(1) +#define UNIC_SUPPORT_PARALLEL_SERDES_LB BIT(2) +#define UNIC_SUPPORT_EXTERNAL_LB BIT(3) +#define UNIC_LB_TEST_FLAGS (UNIC_SUPPORT_APP_LB | \ + UNIC_SUPPORT_SERIAL_SERDES_LB | \ + UNIC_SUPPORT_PARALLEL_SERDES_LB | \ + UNIC_SUPPORT_EXTERNAL_LB) #define UNIC_RSS_MAX_VL_NUM UBASE_NIC_MAX_VL_NUM #define UNIC_INVALID_PRIORITY (0xff) diff --git a/drivers/net/ub/unic/unic_cmd.h b/drivers/net/ub/unic/unic_cmd.h index ac571815be6aef863e9db67076a6f4a44e391e91..92e051fc603c61c4c0bb05b3b9198ac47b33e222 100644 --- a/drivers/net/ub/unic/unic_cmd.h +++ b/drivers/net/ub/unic/unic_cmd.h @@ -52,6 +52,14 @@ struct unic_ld_config_mode_cmd { u8 rsv[20]; }; +enum unic_link_fail_code { + UNIC_LF_NORMAL, + UNIC_LF_REF_CLOCK_LOST, + UNIC_LF_XSFP_TX_DISABLE, + UNIC_LF_XSFP_ABSENT, + UNIC_LF_REF_MAX +}; + struct unic_link_status_cmd_resp { u8 status; u8 link_fail_code; @@ -144,6 +152,31 @@ struct unic_query_flush_status_resp { u8 rsv[23]; }; +struct unic_query_mac_addr_resp { + u8 mac[ETH_ALEN]; + u8 rsv[18]; +}; + +struct unic_mac_tbl_entry_cmd { + u8 resp_code; + u8 mac_type; + u8 is_pfc; + u8 rsvd0; + u8 mac_addr[ETH_ALEN]; + u8 rsvd1[14]; +}; + +struct unic_vlan_filter_cfg_cmd { + u16 vlan_id; + u8 is_add; + u8 rsv[21]; +}; + +struct unic_vlan_filter_ctrl_cmd { + u8 filter_en; + u8 rsv[23]; +}; + enum unic_vl_map_type { UNIC_PRIO_VL_MAP, UNIC_DSCP_VL_MAP, @@ -164,4 +197,39 @@ struct unic_config_vl_speed_cmd { u8 resv1[20]; }; +enum unic_lb_en_sub_cmd { + UNIC_LB_APP = 0, + UNIC_LB_SERIAL_SERDES, + UNIC_LB_PARALLEL_SERDES, + UNIC_LB_EXTERNAL, + UNIC_LB_MAX, +}; + +struct unic_lb_en_cfg { + u8 sub_cmd; + u8 lb_en : 1; + u8 rsvd : 7; + u8 result; + u8 rsv[21]; +}; + +struct unic_cfg_mac_pause_cmd { + __le32 tx_en; + __le32 rx_en; + u8 rsv[16]; +}; + +struct unic_cfg_pfc_pause_cmd { + u8 tx_enable : 1; + u8 rx_enable : 1; + u8 rsvd0 : 6; + u8 pri_bitmap; + u8 rsv1[22]; +}; + +struct unic_query_link_diagnosis_resp { + __le32 status_code; + u8 rsv[20]; +}; + #endif diff --git a/drivers/net/ub/unic/unic_comm_addr.c b/drivers/net/ub/unic/unic_comm_addr.c index cc2822453614e20630caf8a5854bcd81b4ea09fa..9edfd9c351aeb3f528a3afa12b8ed23736248115 100644 --- a/drivers/net/ub/unic/unic_comm_addr.c +++ b/drivers/net/ub/unic/unic_comm_addr.c @@ -180,6 +180,7 @@ bool unic_comm_sync_addr_table(struct unic_vport *vport, memcpy(new_node->unic_addr, addr_node->unic_addr, UNIC_ADDR_LEN); new_node->state = addr_node->state; + new_node->is_pfc = addr_node->is_pfc; new_node->node_mask = addr_node->node_mask; list_add_tail(&new_node->node, &tmp_add_list); break; diff --git a/drivers/net/ub/unic/unic_comm_addr.h b/drivers/net/ub/unic/unic_comm_addr.h index ae390e4a7f079077d4e5d6ca8b85c86a501e5468..fd142d51b21c256bc11dc911b17d49223d99b517 100644 --- a/drivers/net/ub/unic/unic_comm_addr.h +++ b/drivers/net/ub/unic/unic_comm_addr.h @@ -22,16 +22,32 @@ enum UNIC_COMM_ADDR_STATE { #define UNIC_IPV4_PREFIX 0xffff0000 #define UNIC_ADDR_LEN 16 + struct unic_comm_addr_node { struct list_head node; enum UNIC_COMM_ADDR_STATE state; union { u8 unic_addr[UNIC_ADDR_LEN]; struct in6_addr ip_addr; + u8 mac_addr[ETH_ALEN]; }; + u8 is_pfc; u16 node_mask; }; +#define UNIC_FORMAT_MAC_LEN 18 +#define UNIC_FORMAT_MAC_OFFSET_0 0 +#define UNIC_FORMAT_MAC_OFFSET_4 4 +#define UNIC_FORMAT_MAC_OFFSET_5 5 +static inline void unic_comm_format_mac_addr(char *format_mac, const u8 *mac) +{ + (void)snprintf(format_mac, UNIC_FORMAT_MAC_LEN, + "%02x:**:**:**:%02x:%02x", + mac[UNIC_FORMAT_MAC_OFFSET_0], + mac[UNIC_FORMAT_MAC_OFFSET_4], + mac[UNIC_FORMAT_MAC_OFFSET_5]); +} + static inline bool unic_comm_addr_equal(const u8 *addr1, const u8 *addr2, u16 mask1, u16 mask2) { diff --git a/drivers/net/ub/unic/unic_crq.c b/drivers/net/ub/unic/unic_crq.c index 29a27c6c547d21fbcc2f6ec59a6ad9ddb1c386fb..fa01e880a005223fbd2c3c5f23daf5c11b6f5dc0 100644 --- a/drivers/net/ub/unic/unic_crq.c +++ b/drivers/net/ub/unic/unic_crq.c @@ -37,6 +37,31 @@ static void __unic_handle_link_status_event(struct auxiliary_device *adev, clear_bit(UNIC_STATE_LINK_UPDATING, &unic_dev->state); } +static void unic_link_fail_parse(struct auxiliary_device *adev, + u8 link_fail_code) +{ + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + static const struct { + u8 link_fail_code; + const char *str; + } codes[] = { + {UNIC_LF_REF_CLOCK_LOST, "Reference clock lost!\n"}, + {UNIC_LF_XSFP_TX_DISABLE, "SFP tx is disabled!\n"}, + {UNIC_LF_XSFP_ABSENT, "SFP is absent!\n"} + }; + + if (link_fail_code == UNIC_LF_NORMAL) + return; + + if (link_fail_code >= UNIC_LF_REF_MAX) { + unic_warn(unic_dev, "unknown fail code, fail_code = %u.\n", + link_fail_code); + return; + } + + unic_warn(unic_dev, "link fail cause: %s", codes[link_fail_code - 1].str); +} + int unic_handle_link_status_event(void *dev, void *data, u32 len) { struct unic_link_status_cmd_resp *resp = data; @@ -45,5 +70,8 @@ int unic_handle_link_status_event(void *dev, void *data, u32 len) __unic_handle_link_status_event(adev, hw_link_status); + if (!hw_link_status && !ubase_adev_ubl_supported(adev)) + unic_link_fail_parse(adev, resp->link_fail_code); + return 0; } diff --git a/drivers/net/ub/unic/unic_dcbnl.c b/drivers/net/ub/unic/unic_dcbnl.c index 5f14cebac540e64251c4a094e9b113dedd3f6c90..82746e3781f3c480b54eea8531ed5f04dec1bd3f 100644 --- a/drivers/net/ub/unic/unic_dcbnl.c +++ b/drivers/net/ub/unic/unic_dcbnl.c @@ -121,7 +121,7 @@ static int unic_setets_preconditions(struct net_device *net_dev) if (netif_running(net_dev)) { unic_err(unic_dev, - "failed to set ets, due to network interface is up, pls down it first and try again.\n"); + "failed to set ets, due to network interface is up, please down it first and try again.\n"); return -EBUSY; } @@ -252,6 +252,108 @@ static int unic_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) return unic_setets_config(ndev, ets, changed, vl_num); } +static int unic_check_pfc_preconditions(struct net_device *net_dev) +{ + struct unic_dev *unic_dev = netdev_priv(net_dev); + + if (!unic_dev_pfc_supported(unic_dev)) + return -EOPNOTSUPP; + + if (unic_resetting(net_dev)) + return -EBUSY; + + if (!(unic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + return 0; +} + +static int unic_pfc_down(struct unic_dev *unic_dev, struct ieee_pfc *pfc) +{ + struct unic_pfc_info *pfc_info = &unic_dev->channels.vl.pfc_info; + u8 tx_pause, rx_pause; + int ret; + + ret = unic_pfc_pause_cfg(unic_dev, pfc->pfc_en); + if (ret) + return ret; + + pfc_info->fc_mode &= ~(UNIC_FC_PFC_EN); + pfc_info->pfc_en = pfc->pfc_en; + + tx_pause = pfc_info->fc_mode & UNIC_TX_PAUSE_EN ? 1 : 0; + rx_pause = pfc_info->fc_mode & UNIC_RX_PAUSE_EN ? 1 : 0; + + return unic_mac_pause_en_cfg(unic_dev, tx_pause, rx_pause); +} + +static int unic_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct ubase_eth_mac_stats eth_stats = {0}; + int i, ret; + + ret = unic_check_pfc_preconditions(ndev); + if (ret) + return ret; + + ret = ubase_get_eth_port_stats(unic_dev->comdev.adev, ð_stats); + if (ret) + return ret; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_en = unic_dev->channels.vl.pfc_info.pfc_en; + pfc->pfc_cap = UNIC_MAX_PRIO_NUM; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + pfc->requests[i] = unic_get_pfc_tx_pkts(ð_stats, i); + pfc->indications[i] = unic_get_pfc_rx_pkts(ð_stats, i); + } + + return 0; +} + +static int unic_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_pfc_info *pfc_info; + int ret; + + pfc_info = &unic_dev->channels.vl.pfc_info; + + ret = unic_check_pfc_preconditions(ndev); + if (ret) + return ret; + + if (pfc->pfc_en == pfc_info->pfc_en) + return 0; + + if (!pfc->pfc_en) + return unic_pfc_down(unic_dev, pfc); + + if (!(pfc_info->fc_mode & UNIC_FC_PFC_EN)) { + ret = unic_mac_pause_en_cfg(unic_dev, false, false); + if (ret) { + unic_info(unic_dev, "failed to disable pause, ret = %d.\n", + ret); + return ret; + } + } + + ret = unic_pfc_pause_cfg(unic_dev, pfc->pfc_en); + if (ret) { + unic_info(unic_dev, + "failed to set pfc tx rx enable or priority, ret = %d.\n", + ret); + return ret; + } + + pfc_info->fc_mode |= UNIC_FC_PFC_EN; + pfc_info->pfc_en = pfc->pfc_en; + + return ret; +} + static int unic_dscp_prio_check(struct net_device *netdev, struct dcb_app *app) { struct unic_dev *unic_dev = netdev_priv(netdev); @@ -260,8 +362,7 @@ static int unic_dscp_prio_check(struct net_device *netdev, struct dcb_app *app) return -EOPNOTSUPP; if (netif_running(netdev)) { - unic_err(unic_dev, - "failed to set dscp-prio, due to network interface is up, pls down it first and try again.\n"); + unic_err(unic_dev, "failed to set dscp-prio, due to network interface is up, please down it first and try again.\n"); return -EBUSY; } @@ -482,6 +583,8 @@ static const struct dcbnl_rtnl_ops unic_dcbnl_ops = { .ieee_setets = unic_dcbnl_ieee_setets, .ieee_getmaxrate = unic_ieee_getmaxrate, .ieee_setmaxrate = unic_ieee_setmaxrate, + .ieee_getpfc = unic_dcbnl_ieee_getpfc, + .ieee_setpfc = unic_dcbnl_ieee_setpfc, .ieee_setapp = unic_dcbnl_ieee_setapp, .ieee_delapp = unic_dcbnl_ieee_delapp, .getdcbx = unic_dcbnl_getdcbx, @@ -492,7 +595,8 @@ void unic_set_dcbnl_ops(struct net_device *netdev) { struct unic_dev *unic_dev = netdev_priv(netdev); - if (!unic_dev_ets_supported(unic_dev)) + if (!unic_dev_ets_supported(unic_dev) && + !unic_dev_pfc_supported(unic_dev)) return; netdev->dcbnl_ops = &unic_dcbnl_ops; diff --git a/drivers/net/ub/unic/unic_dcbnl.h b/drivers/net/ub/unic/unic_dcbnl.h index a721fa51d6e4c97973b5b523037363c7010e54a7..fc8219ba40f5989b992f7ca3941051bdeeae1d81 100644 --- a/drivers/net/ub/unic/unic_dcbnl.h +++ b/drivers/net/ub/unic/unic_dcbnl.h @@ -8,6 +8,28 @@ #define __UNIC_DCBNL_H__ #include +#include + +#include "unic_stats.h" +#include "unic_ethtool.h" + +static inline u64 unic_get_pfc_tx_pkts(struct ubase_eth_mac_stats *eth_stats, + u32 pri) +{ + u16 offset = UNIC_ETH_MAC_STATS_FIELD_OFF(tx_pri0_pfc_pkts) + + pri * sizeof(eth_stats->tx_pri0_pfc_pkts); + + return UNIC_STATS_READ(eth_stats, offset); +} + +static inline u64 unic_get_pfc_rx_pkts(struct ubase_eth_mac_stats *eth_stats, + u32 pri) +{ + u16 offset = UNIC_ETH_MAC_STATS_FIELD_OFF(rx_pri0_pfc_pkts) + + pri * sizeof(eth_stats->rx_pri0_pfc_pkts); + + return UNIC_STATS_READ(eth_stats, offset); +} #ifdef CONFIG_UB_UNIC_DCB void unic_set_dcbnl_ops(struct net_device *netdev); diff --git a/drivers/net/ub/unic/unic_dev.c b/drivers/net/ub/unic/unic_dev.c index a0b25e52695e3b48bb876cd65e0c4f1419ebbd5a..c63ea8116ca8ef27191b27640f70c026869fcd3b 100644 --- a/drivers/net/ub/unic/unic_dev.c +++ b/drivers/net/ub/unic/unic_dev.c @@ -23,9 +23,11 @@ #include "unic_event.h" #include "unic_guid.h" #include "unic_hw.h" +#include "unic_ip.h" #include "unic_qos_hw.h" +#include "unic_mac.h" #include "unic_netdev.h" -#include "unic_rack_ip.h" +#include "unic_vlan.h" #include "unic_dev.h" #define UNIC_WATCHDOG_TIMEOUT (5 * HZ) @@ -232,6 +234,43 @@ static int unic_init_vl_maxrate(struct unic_dev *unic_dev) unic_dev->channels.vl.vl_bitmap); } +static int unic_init_pause(struct unic_dev *unic_dev) +{ + struct unic_pfc_info *pfc_info = &unic_dev->channels.vl.pfc_info; + int ret; + + if (!unic_dev_pause_supported(unic_dev)) + return 0; + + ret = unic_mac_pause_en_cfg(unic_dev, UNIC_RX_TX_PAUSE_ON, + UNIC_RX_TX_PAUSE_ON); + if (ret) + return ret; + + pfc_info->fc_mode = UNIC_TX_PAUSE_EN | UNIC_RX_PAUSE_EN; + + return ret; +} + +static int unic_init_pfc(struct unic_dev *unic_dev) +{ + if (!unic_dev_pfc_supported(unic_dev)) + return 0; + + return unic_pfc_pause_cfg(unic_dev, 0); +} + +static int unic_init_fc_mode(struct unic_dev *unic_dev) +{ + int ret; + + ret = unic_init_pause(unic_dev); + if (ret) + return ret; + + return unic_init_pfc(unic_dev); +} + static int unic_init_vl_info(struct unic_dev *unic_dev) { int ret; @@ -543,6 +582,14 @@ static void unic_set_netdev_attr(struct net_device *netdev) if (unic_dev_ubl_supported(unic_dev)) { netdev->features |= NETIF_F_VLAN_CHALLENGED; netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + } else { + netdev->flags |= IFF_BROADCAST | IFF_MULTICAST; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if (!unic_dev_cfg_vlan_filter_supported(unic_dev)) { + netdev->features |= NETIF_F_VLAN_CHALLENGED; + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } if (unic_dev_tx_csum_offload_supported(unic_dev)) @@ -599,6 +646,10 @@ static int unic_init_mac(struct unic_dev *unic_dev) return ret; } + ret = unic_init_fc_mode(unic_dev); + if (ret) + return ret; + mutex_init(&record->lock); return 0; } @@ -610,6 +661,12 @@ static void unic_uninit_mac(struct unic_dev *unic_dev) mutex_destroy(&record->lock); } +static void unic_uninit_dev_addr(struct unic_dev *unic_dev) +{ + if (unic_dev_eth_mac_supported(unic_dev)) + unic_uninit_mac_addr(unic_dev); +} + int unic_set_mtu(struct unic_dev *unic_dev, int new_mtu) { u16 max_frame_size; @@ -654,8 +711,13 @@ static void unic_periodic_service_task(struct unic_dev *unic_dev) unic_link_status_update(unic_dev); unic_update_port_info(unic_dev); - unic_sync_rack_ip_table(unic_dev); + unic_sync_ip_table(unic_dev); + + if (unic_dev_eth_mac_supported(unic_dev)) + unic_sync_mac_table(unic_dev); + unic_sync_promisc_mode(unic_dev); + unic_sync_vlan_filter(unic_dev); if (!(unic_dev->serv_processed_cnt % UNIC_UPDATE_STATS_TIMER_INTERVAL)) unic_update_stats_for_all(unic_dev); @@ -680,6 +742,12 @@ static void unic_init_vport_info(struct unic_dev *unic_dev) spin_lock_init(&unic_dev->vport.addr_tbl.tmp_ip_lock); INIT_LIST_HEAD(&unic_dev->vport.addr_tbl.ip_list); spin_lock_init(&unic_dev->vport.addr_tbl.ip_list_lock); + + if (unic_dev_eth_mac_supported(unic_dev)) { + INIT_LIST_HEAD(&unic_dev->vport.addr_tbl.uc_mac_list); + INIT_LIST_HEAD(&unic_dev->vport.addr_tbl.mc_mac_list); + spin_lock_init(&unic_dev->vport.addr_tbl.mac_list_lock); + } } static int unic_alloc_vport_buf(struct unic_dev *unic_dev) @@ -764,12 +832,22 @@ static int unic_init_vport(struct unic_dev *unic_dev) unic_init_vport_info(unic_dev); + ret = unic_init_vlan_config(unic_dev); + if (ret) + unic_uninit_vport_buf(unic_dev); + return ret; } static void unic_uninit_vport(struct unic_dev *unic_dev) { - unic_uninit_rack_ip_table(unic_dev); + unic_uninit_ip_table(unic_dev); + + if (unic_dev_eth_mac_supported(unic_dev)) { + unic_uninit_mac_table(unic_dev); + unic_uninit_vlan_config(unic_dev); + } + unic_uninit_vport_buf(unic_dev); } @@ -790,7 +868,7 @@ static int unic_init_dev_addr(struct unic_dev *unic_dev) if (unic_dev_ubl_supported(unic_dev)) return unic_init_guid(unic_dev); - return 0; + return unic_init_mac_addr(unic_dev); } static int unic_init_netdev_priv(struct net_device *netdev, @@ -830,7 +908,7 @@ static int unic_init_netdev_priv(struct net_device *netdev, ret = unic_init_channels_attr(priv); if (ret) - goto unic_unint_mac; + goto err_uninit_dev_addr; ret = unic_init_channels(priv, priv->channels.num); if (ret) { @@ -844,6 +922,8 @@ static int unic_init_netdev_priv(struct net_device *netdev, err_uninit_channels_attr: unic_uninit_channels_attr(priv); +err_uninit_dev_addr: + unic_uninit_dev_addr(priv); unic_unint_mac: unic_uninit_mac(priv); err_uninit_vport: @@ -860,6 +940,7 @@ static void unic_uninit_netdev_priv(struct net_device *netdev) unic_uninit_channels(priv); unic_uninit_channels_attr(priv); + unic_uninit_dev_addr(priv); unic_uninit_mac(priv); unic_uninit_vport(priv); mutex_destroy(&priv->act_info.mutex); @@ -969,6 +1050,11 @@ static struct net_device *unic_alloc_netdev(struct auxiliary_device *adev) dev_warn(adev->dev.parent, "failed to alloc netdev because of ubl macro is not enabled.\n"); #endif + } else { + snprintf(name, IFNAMSIZ, "ethc%ud%ue%u", caps->chip_id, + caps->die_id, caps->ue_id); + netdev = alloc_netdev_mq(sizeof(struct unic_dev), name, + NET_NAME_USER, ether_setup, channel_num); } return netdev; @@ -1010,7 +1096,7 @@ int unic_dev_init(struct auxiliary_device *adev) goto err_unregister_event; } - unic_query_rack_ip(adev); + unic_query_ip_by_ctrlq(adev); unic_start_dev_period_task(netdev); return 0; diff --git a/drivers/net/ub/unic/unic_dev.h b/drivers/net/ub/unic/unic_dev.h index 51708850e38d8791ca596555ea62292607495cdc..c8368040fa9205a85025c1adf3a7e39932b5d374 100644 --- a/drivers/net/ub/unic/unic_dev.h +++ b/drivers/net/ub/unic/unic_dev.h @@ -43,6 +43,8 @@ enum unic_vport_state { UNIC_VPORT_STATE_ALIVE, UNIC_VPORT_STATE_PROMISC_CHANGE, UNIC_VPORT_STATE_IP_TBL_CHANGE, + UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, + UNIC_VPORT_STATE_MAC_TBL_CHANGE, UNIC_VPORT_STATE_IP_QUERYING, }; @@ -124,6 +126,11 @@ struct unic_coal_txrx { struct unic_coalesce rx_coal; }; +struct unic_pfc_info { + u8 fc_mode; + u8 pfc_en; +}; + struct unic_vl { u8 vl_num; u8 dscp_app_cnt; @@ -136,6 +143,7 @@ struct unic_vl { u8 vl_sl[UBASE_MAX_VL_NUM]; u64 vl_maxrate[UBASE_MAX_VL_NUM]; u16 vl_bitmap; + struct unic_pfc_info pfc_info; }; struct unic_channels { @@ -161,7 +169,10 @@ struct unic_channels { struct unic_caps { u16 rx_buff_len; u16 total_ip_tbl_size; - u32 rsvd0[5]; + u32 uc_mac_tbl_size; + u32 mc_mac_tbl_size; + u32 vlan_tbl_size; + u32 mng_tbl_size; u16 max_trans_unit; u16 min_trans_unit; u32 vport_buf_size; /* unit: byte */ @@ -204,6 +215,22 @@ struct unic_addr_tbl { spinlock_t tmp_ip_lock; /* protect ip address from controller */ struct list_head tmp_ip_list; /* Store temprary ip table */ + + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; /* store unicast mac table */ + struct list_head mc_mac_list; /* store multicast mac table */ +}; + +struct unic_vlan_tbl { + bool cur_vlan_fltr_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + struct list_head vlan_list; /* Store vlan table */ + spinlock_t vlan_lock; /* protect vlan list */ +}; + +struct unic_vlan_cfg { + struct list_head node; + u16 vlan_id; }; struct unic_vport_buf { @@ -214,6 +241,7 @@ struct unic_vport_buf { struct unic_vport { struct unic_dev *back; struct unic_addr_tbl addr_tbl; + struct unic_vlan_tbl vlan_tbl; u8 overflow_promisc_flags; u8 last_promisc_flags; unsigned long state; @@ -283,6 +311,11 @@ static inline bool unic_dev_eth_mac_supported(struct unic_dev *unic_dev) return ubase_adev_eth_mac_supported(unic_dev->comdev.adev); } +static inline bool unic_dev_pfc_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PFC_B); +} + static inline bool unic_dev_ets_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_ETS_B); @@ -293,6 +326,21 @@ static inline bool unic_dev_fec_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_B); } +static inline bool unic_dev_pause_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PAUSE_B); +} + +static inline bool unic_dev_eth_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_ETH_B); +} + +static inline bool unic_dev_serial_serdes_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_SERIAL_SERDES_LB_B); +} + static inline bool unic_dev_tc_speed_limit_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_TC_SPEED_LIMIT_B); @@ -308,11 +356,36 @@ static inline bool unic_dev_rx_csum_offload_supported(struct unic_dev *unic_dev) return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_RX_CSUM_OFFLOAD_B); } +static inline bool unic_dev_app_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_APP_LB_B); +} + static inline bool unic_dev_fec_stats_supported(struct unic_dev *unic_dev) { return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_FEC_STATS_B); } +static inline bool unic_dev_external_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_EXTERNAL_LB_B); +} + +static inline bool unic_dev_parallel_serdes_lb_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_PARALLEL_SERDES_LB_B); +} + +static inline bool unic_dev_cfg_vlan_filter_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_CFG_VLAN_FILTER_B); +} + +static inline bool unic_dev_cfg_mac_supported(struct unic_dev *unic_dev) +{ + return unic_get_cap_bit(unic_dev, UNIC_SUPPORT_CFG_MAC_B); +} + static inline bool __unic_removing(struct unic_dev *unic_dev) { return test_bit(UNIC_STATE_REMOVING, &unic_dev->state); diff --git a/drivers/net/ub/unic/unic_ethtool.c b/drivers/net/ub/unic/unic_ethtool.c index f2cfa3df1126095085b770f1550cbaa24264fc48..886f6d6917337c6be147fe55927e89613a8c9217 100644 --- a/drivers/net/ub/unic/unic_ethtool.c +++ b/drivers/net/ub/unic/unic_ethtool.c @@ -8,11 +8,12 @@ #include #include "unic.h" +#include "unic_channel.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_lb.h" #include "unic_netdev.h" #include "unic_stats.h" -#include "unic_channel.h" #include "unic_ethtool.h" static u32 unic_get_link_status(struct net_device *netdev) @@ -82,6 +83,85 @@ static int unic_get_link_ksettings(struct net_device *netdev, return 0; } +static bool unic_speed_supported(struct unic_dev *unic_dev, u32 speed, u32 lanes) +{ + u32 speed_bit = 0; + + if (unic_get_speed_bit(speed, lanes, &speed_bit)) + return false; + + return !!(speed_bit & unic_dev->hw.mac.speed_ability); +} + +static int unic_check_ksettings_param(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_mac *mac = &unic_dev->hw.mac; + u32 lanes; + + if (cmd->base.autoneg && !mac->support_autoneg) { + unic_err(unic_dev, "hw not support autoneg.\n"); + return -EINVAL; + } + + /* when autoneg is on, hw not support specified speed params, + * unnecessary to check them. + */ + if (cmd->base.autoneg) + return 0; + + /* if user not specify lanes, use current lanes */ + lanes = cmd->lanes ? cmd->lanes : mac->lanes; + if (!unic_speed_supported(unic_dev, cmd->base.speed, lanes)) { + unic_err(unic_dev, "speed(%u) and lanes(%u) is not supported.\n", + cmd->base.speed, lanes); + return -EINVAL; + } + + if (cmd->base.duplex != DUPLEX_FULL) { + unic_err(unic_dev, "only support full duplex.\n"); + return -EINVAL; + } + + return 0; +} + +static bool unic_link_ksettings_changed(struct unic_mac *mac, + const struct ethtool_link_ksettings *cmd) +{ + /* when autoneg is disabled and lanes not specified, lanes is 0. */ + if (cmd->base.autoneg == mac->autoneg && + cmd->base.duplex == mac->duplex && + cmd->base.speed == mac->speed && + (cmd->lanes == mac->lanes || (!cmd->lanes && !cmd->base.autoneg))) + return false; + + return true; +} + +static int unic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_mac *mac = &unic_dev->hw.mac; + int ret; + + if (!unic_link_ksettings_changed(mac, cmd)) + return 0; + + ret = unic_check_ksettings_param(netdev, cmd); + if (ret) + return ret; + + unic_info(unic_dev, + "set link: autoneg = %u, speed = %u, duplex = %u, lanes = %u.\n", + cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex, cmd->lanes); + + return unic_set_mac_link_ksettings(unic_dev, cmd); +} + static void unic_get_driver_info(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -108,6 +188,88 @@ static void unic_get_driver_info(struct net_device *netdev, u32_get_bits(fw_version, UBASE_FW_VERSION_BYTE0_MASK)); } +static void unic_update_pause_state(u8 pause_mode, + struct ethtool_pauseparam *eth_pauseparam) +{ + eth_pauseparam->rx_pause = UNIC_RX_TX_PAUSE_OFF; + eth_pauseparam->tx_pause = UNIC_RX_TX_PAUSE_OFF; + + if (pause_mode & UNIC_TX_PAUSE_EN) + eth_pauseparam->tx_pause = UNIC_RX_TX_PAUSE_ON; + + if (pause_mode & UNIC_RX_PAUSE_EN) + eth_pauseparam->rx_pause = UNIC_RX_TX_PAUSE_ON; +} + +static void unic_record_user_pauseparam(struct unic_dev *unic_dev, + struct ethtool_pauseparam *eth_pauseparam) +{ + struct unic_pfc_info *pfc_info = &unic_dev->channels.vl.pfc_info; + u32 rx_en = eth_pauseparam->rx_pause; + u32 tx_en = eth_pauseparam->tx_pause; + + pfc_info->fc_mode = 0; + + if (tx_en) + pfc_info->fc_mode = UNIC_TX_PAUSE_EN; + + if (rx_en) + pfc_info->fc_mode |= UNIC_RX_PAUSE_EN; +} + +static void unic_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *eth_pauseparam) +{ +#define PAUSE_AUTONEG_OFF 0 + + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (!unic_dev_pause_supported(unic_dev)) + return; + + eth_pauseparam->autoneg = PAUSE_AUTONEG_OFF; + + if (unic_dev->channels.vl.pfc_info.fc_mode & UNIC_FC_PFC_EN) { + eth_pauseparam->rx_pause = UNIC_RX_TX_PAUSE_OFF; + eth_pauseparam->tx_pause = UNIC_RX_TX_PAUSE_OFF; + return; + } + + unic_update_pause_state(unic_dev->channels.vl.pfc_info.fc_mode, + eth_pauseparam); +} + +static int unic_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *eth_pauseparam) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret; + + if (!unic_dev_pause_supported(unic_dev)) + return -EOPNOTSUPP; + + if (eth_pauseparam->autoneg) { + unic_warn(unic_dev, + "failed to set pause, set autoneg not supported.\n"); + return -EOPNOTSUPP; + } + + if (unic_dev->channels.vl.pfc_info.fc_mode & UNIC_FC_PFC_EN) { + unic_warn(unic_dev, + "failed to set pause, priority flow control enabled.\n"); + return -EOPNOTSUPP; + } + + ret = unic_mac_pause_en_cfg(unic_dev, eth_pauseparam->tx_pause, + eth_pauseparam->rx_pause); + if (ret) + return ret; + + unic_record_user_pauseparam(unic_dev, eth_pauseparam); + + return ret; +} + static int unic_get_fecparam(struct net_device *ndev, struct ethtool_fecparam *fec) { @@ -276,6 +438,12 @@ static int unic_set_coalesce(struct net_device *netdev, struct unic_coalesce old_tx_coal, old_rx_coal; int ret, ret1; + if (netif_running(netdev)) { + unic_err(unic_dev, + "failed to set coalesce param, due to network interface is up, please down it first and try again.\n"); + return -EBUSY; + } + if (unic_resetting(netdev)) return -EBUSY; @@ -345,6 +513,70 @@ static int unic_reset(struct net_device *ndev, u32 *flags) return 0; } +struct unic_ethtool_link_ext_state_mapping { + u32 status_code; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct unic_ethtool_link_ext_state_mapping +unic_link_ext_state_map[] = { + {516, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS}, + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, +}; + +static int unic_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *info) +{ + const struct unic_ethtool_link_ext_state_mapping *map; + struct unic_query_link_diagnosis_resp resp = {0}; + struct unic_dev *unic_dev = netdev_priv(netdev); + struct ubase_cmd_buf in, out; + u32 status_code; + int ret; + u8 i; + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + if (unic_dev_ubl_supported(unic_dev)) + return -EOPNOTSUPP; + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_LINK_DIAGNOSIS, true, 0, NULL); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_LINK_DIAGNOSIS, false, + sizeof(resp), &resp); + + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) { + unic_err(unic_dev, "failed to query link diagnosis, ret = %d.\n", + ret); + return ret; + } + + status_code = le32_to_cpu(resp.status_code); + if (!status_code) + return -ENODATA; + + for (i = 0; i < ARRAY_SIZE(unic_link_ext_state_map); i++) { + map = &unic_link_ext_state_map[i]; + if (map->status_code == status_code) { + info->link_ext_state = map->link_ext_state; + info->__link_ext_substate = map->link_ext_substate; + return 0; + } + } + + unic_warn(unic_dev, "unknown link failure status_code = %u.\n", + status_code); + + return -ENODATA; +} + #define UNIC_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ ETHTOOL_RING_USE_TX_PUSH) #define UNIC_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ @@ -357,7 +589,10 @@ static const struct ethtool_ops unic_ethtool_ops = { .supported_coalesce_params = UNIC_ETHTOOL_COALESCE, .get_link = unic_get_link_status, .get_link_ksettings = unic_get_link_ksettings, + .set_link_ksettings = unic_set_link_ksettings, .get_drvinfo = unic_get_driver_info, + .get_pauseparam = unic_get_pauseparam, + .set_pauseparam = unic_set_pauseparam, .get_regs_len = unic_get_regs_len, .get_regs = unic_get_regs, .get_ethtool_stats = unic_get_stats, @@ -367,12 +602,14 @@ static const struct ethtool_ops unic_ethtool_ops = { .set_channels = unic_set_channels, .get_ringparam = unic_get_channels_param, .set_ringparam = unic_set_channels_param, + .self_test = unic_self_test, .get_fecparam = unic_get_fecparam, .set_fecparam = unic_set_fecparam, .get_fec_stats = unic_get_fec_stats, .get_coalesce = unic_get_coalesce, .set_coalesce = unic_set_coalesce, .reset = unic_reset, + .get_link_ext_state = unic_get_link_ext_state, }; void unic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ub/unic/unic_ethtool.h b/drivers/net/ub/unic/unic_ethtool.h index 4ba10f0b54b2d3ce90d251c3b84af230749f94ca..fe9e06b3e4e960f1a9f2be19c1be259f7a532b97 100644 --- a/drivers/net/ub/unic/unic_ethtool.h +++ b/drivers/net/ub/unic/unic_ethtool.h @@ -9,9 +9,17 @@ #include #include +#include #define UNIC_TXRX_MIN_DEPTH 64 +#define UNIC_RX_TX_PAUSE_ON 1 +#define UNIC_RX_TX_PAUSE_OFF 0 + +#define UNIC_TX_PAUSE_EN BIT(0) +#define UNIC_RX_PAUSE_EN BIT(1) +#define UNIC_FC_PFC_EN BIT(2) + struct unic_reset_type_map { enum ethtool_reset_flags reset_flags; enum ubase_reset_type reset_type; diff --git a/drivers/net/ub/unic/unic_event.c b/drivers/net/ub/unic/unic_event.c index 1785e0aad7f1e07f5acf82d64a58372527c4883f..5e0df058d06786a04f100fa1fa06a6da99311066 100644 --- a/drivers/net/ub/unic/unic_event.c +++ b/drivers/net/ub/unic/unic_event.c @@ -20,9 +20,10 @@ #include "unic_dcbnl.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_ip.h" +#include "unic_mac.h" #include "unic_netdev.h" #include "unic_qos_hw.h" -#include "unic_rack_ip.h" #include "unic_reset.h" #include "unic_event.h" @@ -86,6 +87,9 @@ static void unic_activate_event_process(struct unic_dev *unic_dev) else clear_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &unic_dev->vport.state); + if (unic_dev_eth_mac_supported(unic_dev)) + unic_activate_mac_table(unic_dev); + out: mutex_lock(&act_info->mutex); act_info->deactivate = false; @@ -119,6 +123,9 @@ static void unic_deactivate_event_process(struct unic_dev *unic_dev) act_info->deactivate = true; mutex_unlock(&act_info->mutex); + if (unic_dev_eth_mac_supported(unic_dev)) + unic_deactivate_mac_table(unic_dev); + ret = unic_activate_promisc_mode(unic_dev, false); if (ret) unic_warn(unic_dev, "failed to close promisc, ret = %d.\n", ret); @@ -160,6 +167,18 @@ static void unic_rack_port_reset(struct unic_dev *unic_dev, bool link_up) unic_dev->hw.mac.link_status = UNIC_LINK_STATUS_DOWN; } +static void unic_port_reset(struct net_device *netdev, bool link_up) +{ + rtnl_lock(); + + if (link_up) + unic_net_open(netdev); + else + unic_net_stop(netdev); + + rtnl_unlock(); +} + static void unic_port_handler(struct auxiliary_device *adev, bool link_up) { struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); @@ -168,7 +187,10 @@ static void unic_port_handler(struct auxiliary_device *adev, bool link_up) if (!netif_running(netdev)) return; - unic_rack_port_reset(unic_dev, link_up); + if (unic_dev_ubl_supported(unic_dev)) + unic_rack_port_reset(unic_dev, link_up); + else + unic_port_reset(netdev, link_up); } static struct ubase_ctrlq_event_nb unic_ctrlq_events[] = { diff --git a/drivers/net/ub/unic/unic_hw.c b/drivers/net/ub/unic/unic_hw.c index 565ac56fb638105c52c46d23a26ff7bae87e590d..9d43c07569f0bdad88efe9796f19cff4eac4e595 100644 --- a/drivers/net/ub/unic/unic_hw.c +++ b/drivers/net/ub/unic/unic_hw.c @@ -31,6 +31,21 @@ static const struct unic_speed_bit_map speed_bit_map[] = { {UNIC_MAC_SPEED_10G, UNIC_LANES_1, UNIC_SUPPORT_10G_X1_BIT}, }; +int unic_get_speed_bit(u32 speed, u32 lanes, u32 *speed_bit) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { + if (speed == speed_bit_map[i].speed && + lanes == speed_bit_map[i].lanes) { + *speed_bit = speed_bit_map[i].speed_bit; + return 0; + } + } + + return -EINVAL; +} + static int unic_get_port_info(struct unic_dev *unic_dev) { struct unic_query_port_info_resp resp = {0}; @@ -113,6 +128,36 @@ int unic_set_mac_speed_duplex(struct unic_dev *unic_dev, u32 speed, u8 duplex, return ret; } +int unic_set_mac_link_ksettings(struct unic_dev *unic_dev, + const struct ethtool_link_ksettings *cmd) +{ + /* if user not specify lanes, use current lanes */ + u32 lanes = cmd->lanes ? cmd->lanes : unic_dev->hw.mac.lanes; + int ret; + + ret = unic_set_mac_autoneg(unic_dev, cmd->base.autoneg); + if (ret) + return ret; + + /* when autoneg is on, hw not support specified speed params. */ + if (cmd->base.autoneg) { + unic_info(unic_dev, + "autoneg is on, ignore other speed params.\n"); + return 0; + } + + ret = unic_set_mac_speed_duplex(unic_dev, cmd->base.speed, + cmd->base.duplex, lanes); + if (ret) + return ret; + + unic_dev->hw.mac.speed = cmd->base.speed; + unic_dev->hw.mac.duplex = cmd->base.duplex; + unic_dev->hw.mac.lanes = lanes; + + return 0; +} + static void unic_set_fec_ability(struct unic_mac *mac) { linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); @@ -250,6 +295,20 @@ static void unic_update_fec_advertising(struct unic_mac *mac) mac->advertising); } +static void unic_update_pause_advertising(struct unic_dev *unic_dev) +{ + u8 fc_mode = unic_dev->channels.vl.pfc_info.fc_mode; + struct unic_mac *mac = &unic_dev->hw.mac; + bool rx_en = false, tx_en = false; + + if (!(fc_mode & UNIC_FC_PFC_EN)) { + rx_en = !!(fc_mode & UNIC_RX_PAUSE_EN); + tx_en = !!(fc_mode & UNIC_TX_PAUSE_EN); + } + + linkmode_set_pause(mac->advertising, tx_en, rx_en); +} + static void unic_update_advertising(struct unic_dev *unic_dev) { struct unic_mac *mac = &unic_dev->hw.mac; @@ -258,6 +317,9 @@ static void unic_update_advertising(struct unic_dev *unic_dev) unic_update_speed_advertising(mac); unic_update_fec_advertising(mac); + + if (unic_dev_pause_supported(unic_dev)) + unic_update_pause_advertising(unic_dev); } static void unic_update_port_capability(struct unic_dev *unic_dev) @@ -297,6 +359,10 @@ static void unic_setup_promisc_req(struct unic_promisc_cfg_cmd *req, req->promisc_mc_ind = 1; req->promisc_rx_mc_en = promisc_en->en_mc; + + req->promisc_rx_uc_mac_en = promisc_en->en_uc_mac; + req->promisc_rx_mc_mac_en = promisc_en->en_mc_mac; + req->promisc_rx_bc_en = promisc_en->en_bc; } int unic_get_promisc_mode(struct unic_dev *unic_dev, @@ -327,6 +393,9 @@ int unic_set_promisc_mode(struct unic_dev *unic_dev, u32 time_out; int ret; + if (!unic_dev_ubl_supported(unic_dev)) + promisc_en->en_bc = 1; + unic_setup_promisc_req(&req, promisc_en); ubase_fill_inout_buf(&in, UBASE_OPC_CFG_PROMISC_MODE, false, @@ -345,6 +414,8 @@ void unic_fill_promisc_en(struct unic_promisc_en *promisc_en, u8 flags) { promisc_en->en_uc_ip = !!(flags & UNIC_UPE); promisc_en->en_mc = !!(flags & UNIC_MPE); + promisc_en->en_uc_mac = !!(flags & UNIC_UPE); + promisc_en->en_mc_mac = !!(flags & UNIC_MPE); } int unic_activate_promisc_mode(struct unic_dev *unic_dev, bool activate) @@ -408,6 +479,9 @@ static void unic_parse_fiber_link_mode(struct unic_dev *unic_dev, unic_set_linkmode_lr(speed_ability, mac->supported); unic_set_linkmode_cr(speed_ability, mac->supported); + if (unic_dev_pause_supported(unic_dev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } @@ -419,6 +493,9 @@ static void unic_parse_backplane_link_mode(struct unic_dev *unic_dev, unic_set_linkmode_kr(speed_ability, mac->supported); + if (unic_dev_pause_supported(unic_dev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } @@ -480,6 +557,10 @@ static void unic_parse_dev_caps(struct unic_dev *unic_dev, caps->rx_buff_len = le16_to_cpu(resp->rx_buff_len); caps->total_ip_tbl_size = le16_to_cpu(resp->total_ip_tbl_size); + caps->uc_mac_tbl_size = le32_to_cpu(resp->uc_mac_tbl_size); + caps->mc_mac_tbl_size = le32_to_cpu(resp->mc_mac_tbl_size); + caps->vlan_tbl_size = le32_to_cpu(resp->vlan_tbl_size); + caps->mng_tbl_size = le32_to_cpu(resp->mng_tbl_size); caps->max_trans_unit = le16_to_cpu(resp->max_trans_unit); caps->min_trans_unit = le16_to_cpu(resp->min_trans_unit); caps->vport_buf_size = le16_to_cpu(resp->vport_buf_size) * KB; @@ -780,6 +861,51 @@ int unic_update_fec_stats(struct unic_dev *unic_dev) return ret; } +int unic_set_vlan_filter_hw(struct unic_dev *unic_dev, bool filter_en) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct unic_vlan_filter_ctrl_cmd req = {0}; + struct ubase_cmd_buf in; + u32 time_out; + int ret; + + req.filter_en = filter_en ? 1 : 0; + + ubase_fill_inout_buf(&in, UBASE_OPC_VLAN_FILTER_CTRL, false, + sizeof(req), &req); + + time_out = unic_cmd_timeout(unic_dev); + ret = ubase_cmd_send_in_ex(unic_dev->comdev.adev, &in, time_out); + if (ret) + dev_err(adev->dev.parent, + "failed to set vlan filter, ret = %d.\n", ret); + + return ret; +} + +int unic_set_port_vlan_hw(struct unic_dev *unic_dev, u16 vlan_id, bool is_add) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct unic_vlan_filter_cfg_cmd req = {0}; + struct ubase_cmd_buf in; + u32 time_out; + int ret; + + req.vlan_id = cpu_to_le16(vlan_id); + req.is_add = is_add ? 1 : 0; + + ubase_fill_inout_buf(&in, UBASE_OPC_VLAN_FILTER_CFG, false, sizeof(req), + &req); + + time_out = unic_cmd_timeout(unic_dev); + ret = ubase_cmd_send_in_ex(unic_dev->comdev.adev, &in, time_out); + if (ret) + dev_err(adev->dev.parent, + "failed to send port vlan command, ret = %d.\n", ret); + + return ret; +} + static void unic_set_rss_tc0_param(struct unic_channels *channels, u16 jfr_cnt, __le16 *jfr_idx) { diff --git a/drivers/net/ub/unic/unic_hw.h b/drivers/net/ub/unic/unic_hw.h index ba64d398e44b2ea7855d0f96b31a4edc4c82912a..59ac0ee4fa36ae0c79cead49256d033cc17cba1b 100644 --- a/drivers/net/ub/unic/unic_hw.h +++ b/drivers/net/ub/unic/unic_hw.h @@ -55,6 +55,9 @@ struct unic_promisc_en { u8 en_uc_ip; u8 en_uc_guid; u8 en_mc; + u8 en_uc_mac; + u8 en_mc_mac; + u8 en_bc; }; #define UNIC_RSS_MAX_CNT 10U @@ -71,11 +74,15 @@ static inline bool unic_is_port_down(struct unic_dev *unic_dev) return unic_dev->hw.mac.link_status == UNIC_LINK_STATUS_DOWN; } +int unic_get_speed_bit(u32 speed, u32 lanes, u32 *speed_bit); + int unic_update_port_info(struct unic_dev *unic_dev); int unic_set_mac_speed_duplex(struct unic_dev *unic_dev, u32 speed, u8 duplex, u8 lanes); int unic_set_mac_autoneg(struct unic_dev *unic_dev, u8 autoneg); +int unic_set_mac_link_ksettings(struct unic_dev *unic_dev, + const struct ethtool_link_ksettings *cmd); int unic_query_dev_res(struct unic_dev *unic_dev); @@ -99,6 +106,8 @@ int unic_query_vport_ctx(struct unic_dev *unic_dev, u16 offset, struct unic_vport_ctx_cmd *resp); int unic_set_fec_mode(struct unic_dev *unic_dev, u32 fec_mode); int unic_update_fec_stats(struct unic_dev *unic_dev); +int unic_set_vlan_filter_hw(struct unic_dev *unic_dev, bool filter_en); +int unic_set_port_vlan_hw(struct unic_dev *unic_dev, u16 vlan_id, bool is_kill); int unic_set_rss_tc_mode(struct unic_dev *unic_dev, u8 tc_vaild); int unic_query_rss_cfg(struct unic_dev *unic_dev, struct unic_cfg_rss_cmd *resp); diff --git a/drivers/net/ub/unic/unic_rack_ip.c b/drivers/net/ub/unic/unic_ip.c similarity index 99% rename from drivers/net/ub/unic/unic_rack_ip.c rename to drivers/net/ub/unic/unic_ip.c index 529856dcff7339ba753196bce585570a8e4b6004..e83baddded87e46210e62fceb2bded07cd749c38 100644 --- a/drivers/net/ub/unic/unic_rack_ip.c +++ b/drivers/net/ub/unic/unic_ip.c @@ -13,7 +13,7 @@ #include "unic_comm_addr.h" #include "unic_trace.h" -#include "unic_rack_ip.h" +#include "unic_ip.h" static void unic_update_rack_addr_state(struct unic_vport *vport, struct unic_comm_addr_node *addr_node, @@ -334,7 +334,7 @@ static void unic_rack_sync_addr_table(struct unic_vport *vport, unic_sync_rack_ip_list(vport, &tmp_add_list, UNIC_CTRLQ_ADD_IP); } -void unic_sync_rack_ip_table(struct unic_dev *unic_dev) +void unic_sync_ip_table(struct unic_dev *unic_dev) { struct unic_vport *vport = &unic_dev->vport; @@ -655,7 +655,7 @@ static void unic_update_rack_ip_list(struct unic_vport *vport, spin_unlock_bh(&vport->addr_tbl.ip_list_lock); } -void unic_query_rack_ip(struct auxiliary_device *adev) +void unic_query_ip_by_ctrlq(struct auxiliary_device *adev) { #define UNIC_LOOP_COUNT(total_size, size) ((total_size) / (size) + 1) @@ -706,7 +706,7 @@ void unic_query_rack_ip(struct auxiliary_device *adev) } } -void unic_uninit_rack_ip_table(struct unic_dev *unic_dev) +void unic_uninit_ip_table(struct unic_dev *unic_dev) { struct unic_vport *vport = &unic_dev->vport; struct list_head *list = &vport->addr_tbl.ip_list; diff --git a/drivers/net/ub/unic/unic_rack_ip.h b/drivers/net/ub/unic/unic_ip.h similarity index 88% rename from drivers/net/ub/unic/unic_rack_ip.h rename to drivers/net/ub/unic/unic_ip.h index 48f62eb0fb708058a43afa2f3b475d7088389f56..a73e8490536cd2ec241e00b6a6ddf41863d3b053 100644 --- a/drivers/net/ub/unic/unic_rack_ip.h +++ b/drivers/net/ub/unic/unic_ip.h @@ -4,8 +4,8 @@ * */ -#ifndef __UNIC_RACK_IP_H__ -#define __UNIC_RACK_IP_H__ +#ifndef __UNIC_IP_H__ +#define __UNIC_IP_H__ #include "unic_dev.h" #include "unic_comm_addr.h" @@ -72,11 +72,11 @@ static inline void unic_format_masked_ip_addr(char *format_masked_ip_addr, ip_addr[IP_START_BYTE + 2], ip_addr[IP_START_BYTE + 3]); } -void unic_sync_rack_ip_table(struct unic_dev *unic_dev); +void unic_sync_ip_table(struct unic_dev *unic_dev); int unic_handle_notify_ip_event(struct auxiliary_device *adev, u8 service_ver, void *data, u16 len, u16 seq); -void unic_query_rack_ip(struct auxiliary_device *adev); -void unic_uninit_rack_ip_table(struct unic_dev *unic_dev); +void unic_query_ip_by_ctrlq(struct auxiliary_device *adev); +void unic_uninit_ip_table(struct unic_dev *unic_dev); int unic_add_ip_addr(struct unic_dev *unic_dev, struct sockaddr *addr, u16 ip_mask); int unic_rm_ip_addr(struct unic_dev *unic_dev, struct sockaddr *addr, diff --git a/drivers/net/ub/unic/unic_lb.c b/drivers/net/ub/unic/unic_lb.c new file mode 100644 index 0000000000000000000000000000000000000000..70a427715835dc954d3de8ff62fe164940bfa0f6 --- /dev/null +++ b/drivers/net/ub/unic/unic_lb.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include + +#include "unic.h" +#include "unic_cmd.h" +#include "unic_dev.h" +#include "unic_hw.h" +#include "unic_netdev.h" +#include "unic_lb.h" + +#define UNIC_LB_TEST_CHANNEL_ID 0 +#define UNIC_LB_TEST_PKT_NUM 1 +#define UNIC_LB_TEST_UNEXECUTED 1 +#define UNIC_LB_TEST_PACKET_SIZE 128 + +#define UNIC_SW_TYPE_LEN 1 +#define UNIC_HEX 16 +#define UNIC_DHCPV4_PROTO 0x0100 + +static void unic_set_selftest_param(struct unic_dev *unic_dev, int *st_param) +{ + st_param[UNIC_LB_APP] = + unic_dev->loopback_flags & UNIC_SUPPORT_APP_LB; + st_param[UNIC_LB_SERIAL_SERDES] = + unic_dev->loopback_flags & UNIC_SUPPORT_SERIAL_SERDES_LB; + st_param[UNIC_LB_PARALLEL_SERDES] = + unic_dev->loopback_flags & UNIC_SUPPORT_PARALLEL_SERDES_LB; + st_param[UNIC_LB_EXTERNAL] = + unic_dev->loopback_flags & UNIC_SUPPORT_EXTERNAL_LB; +} + +static int unic_set_lb_mode(struct unic_dev *unic_dev, bool en, int loop_type) +{ + struct unic_lb_en_cfg req = {0}; + struct ubase_cmd_buf in; + int ret; + + req.lb_en = en ? 1 : 0; + req.sub_cmd = loop_type; + + ubase_fill_inout_buf(&in, UBASE_OPC_DL_CONFIG_LB, false, sizeof(req), + &req); + + ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); + if (ret) + unic_err(unic_dev, + "failed to config loopback mode, ret = %d, loop_type = %d.\n", + ret, loop_type); + + return ret; +} + +static int unic_lb_link_status_wait(struct unic_dev *unic_dev, bool en) +{ +#define UNIC_LINK_STATUS_MS 100 +#define UNIC_MAC_LINK_STATUS_NUM 100 + + u8 link_status = UNIC_LINK_STATUS_DOWN; + u8 link_ret; + int i = 0; + int ret; + + link_ret = en ? UNIC_LINK_STATUS_UP : UNIC_LINK_STATUS_DOWN; + + do { + ret = unic_query_link_status(unic_dev, &link_status); + if (ret) + return ret; + if (link_status == link_ret) + return 0; + + msleep(UNIC_LINK_STATUS_MS); + } while (++i < UNIC_MAC_LINK_STATUS_NUM); + + unic_warn(unic_dev, "query mac link status timeout, en = %d.\n", en); + return -EBUSY; +} + +static int unic_enable_serdes_lb(struct unic_dev *unic_dev, int loop_type) +{ + int ret; + + ret = unic_mac_cfg(unic_dev, true); + if (ret) + return ret; + + ret = unic_set_lb_mode(unic_dev, true, loop_type); + if (ret) + return ret; + + return unic_lb_link_status_wait(unic_dev, true); +} + +static int unic_disable_serdes_lb(struct unic_dev *unic_dev, int loop_type) +{ + int ret; + + ret = unic_set_lb_mode(unic_dev, false, loop_type); + if (ret) + return ret; + + ret = unic_mac_cfg(unic_dev, false); + if (ret) + return ret; + + return unic_lb_link_status_wait(unic_dev, false); +} + +static int unic_set_serdes_lb(struct unic_dev *unic_dev, bool en, int loop_type) +{ + if (!unic_dev_parallel_serdes_lb_supported(unic_dev)) + return -EOPNOTSUPP; + + return en ? unic_enable_serdes_lb(unic_dev, loop_type) : + unic_disable_serdes_lb(unic_dev, loop_type); +} + +static int unic_set_app_lb(struct unic_dev *unic_dev, bool en) +{ + int ret; + + if (!unic_dev_app_lb_supported(unic_dev)) + return -EOPNOTSUPP; + + ret = unic_mac_cfg(unic_dev, en); + if (ret) + return ret; + + return unic_lb_link_status_wait(unic_dev, en); +} + +static int unic_lb_config(struct net_device *ndev, int loop_type, bool en, + struct unic_promisc_en *promisc_en) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret = 0; + + switch (loop_type) { + case UNIC_LB_APP: + ret = unic_set_app_lb(unic_dev, en); + break; + case UNIC_LB_SERIAL_SERDES: + case UNIC_LB_PARALLEL_SERDES: + ret = unic_set_serdes_lb(unic_dev, en, loop_type); + break; + case UNIC_LB_EXTERNAL: + break; + default: + unic_info(unic_dev, + "loop_type is not supported, loop_type = %d.\n", + loop_type); + return -EOPNOTSUPP; + } + + if (ret && ret != -EOPNOTSUPP) + unic_err(unic_dev, + "lb_config return error, ret = %d, enable = %d.\n", + ret, en); + + unic_set_promisc_mode(unic_dev, promisc_en); + + return ret; +} + +static int unic_selftest_prepare(struct net_device *ndev, bool if_running, + u8 autoneg) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret; + + ret = if_running ? unic_net_stop(ndev) : 0; + if (ret) { + unic_err(unic_dev, "failed to stop net, ret = %d.\n", ret); + return ret; + } + + ret = autoneg ? unic_set_mac_autoneg(unic_dev, false) : 0; + if (ret) { + unic_err(unic_dev, "failed to set mac autoneg, ret = %d.\n", ret); + goto restore_net; + } + + set_bit(UNIC_STATE_TESTING, &unic_dev->state); + + return 0; + +restore_net: + ret = if_running ? unic_net_open(ndev) : 0; + if (ret) + unic_err(unic_dev, "failed to restore net, ret = %d.\n", ret); + + return ret; +} + +static void unic_eth_lb_check_skb_data(struct unic_channel *c, + struct sk_buff *skb) +{ + struct unic_dev *unic_dev = netdev_priv(skb->dev); + struct net_device *ndev = skb->dev; + struct unic_rq *rq = c->rq; + u32 len = skb_headlen(skb); + u8 *packet = skb->data; + struct ethhdr *ethh; + u32 i; + + if (ZERO_OR_NULL_PTR(packet)) { + unic_err(unic_dev, "eth packet content is null.\n"); + goto out; + } + + if (len != UNIC_LB_TEST_PACKET_SIZE) { + unic_err(unic_dev, + "eth test packet size error, len = %u.\n", len); + goto out; + } + + ethh = (struct ethhdr *)(skb->data - ETH_HLEN); + if (memcmp(ethh->h_dest, ndev->dev_addr, ETH_ALEN) || + memcmp(ethh->h_source, ndev->dev_addr, ETH_ALEN) || + ethh->h_proto != htons(ETH_P_ARP)) { + unic_err(unic_dev, "eth segment error.\n"); + goto out; + } + + for (i = 0; i < len; i++) { + if (packet[i] != (i & 0xff)) { + unic_err(unic_dev, + "eth packet content error, i = %u.\n", i); + goto out; + } + } + + dev_kfree_skb_any(skb); + return; +out: + /* Due to the fact that incorrect packet content in the poll rx process + * can also increase packet and byte counts, the statistics should be + * subtracted when counting if the packets are incorrect. + */ + u64_stats_update_begin(&rq->syncp); + rq->stats.packets--; + rq->stats.bytes -= skb->len; + u64_stats_update_end(&rq->syncp); + print_hex_dump(KERN_ERR, "eth selftest:", DUMP_PREFIX_OFFSET, + UNIC_HEX, 1, skb->data, len, true); + dev_kfree_skb_any(skb); +} + +static u32 unic_lb_check_rx(struct unic_dev *unic_dev, u32 budget, + struct sk_buff *skb) +{ + struct unic_channel *c; + u64 pre_pkt, pre_byte; + u32 pkt_total = 0; + u32 i; + + for (i = 0; i < unic_dev->channels.num; i++) { + c = &unic_dev->channels.c[i]; + pre_pkt = c->rq->stats.packets; + pre_byte = c->rq->stats.bytes; + + preempt_disable(); + unic_poll_rx(c, budget, unic_eth_lb_check_skb_data); + preempt_enable(); + + pkt_total += (c->rq->stats.packets - pre_pkt); + c->rq->stats.packets = pre_pkt; + c->rq->stats.bytes = pre_byte; + } + return pkt_total; +} + +static void unic_eth_lb_setup_skb(struct sk_buff *skb) +{ + struct net_device *ndev = skb->dev; + struct ethhdr *ethh; + u8 *packet; + u32 i; + + skb_reserve(skb, NET_IP_ALIGN); + ethh = skb_put(skb, sizeof(struct ethhdr)); + packet = skb_put(skb, UNIC_LB_TEST_PACKET_SIZE); + + memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + memcpy(ethh->h_source, ndev->dev_addr, ETH_ALEN); + + ethh->h_proto = htons(ETH_P_ARP); + + for (i = 0; i < UNIC_LB_TEST_PACKET_SIZE; i++) + packet[i] = (i & 0xff); +} + +static struct sk_buff *unic_lb_skb_prepare(struct net_device *ndev) +{ + u32 size = UNIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN; + struct sk_buff *skb; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return NULL; + + skb->dev = ndev; + skb->queue_mapping = UNIC_LB_TEST_CHANNEL_ID; + + unic_eth_lb_setup_skb(skb); + + return skb; +} + +static void unic_lb_poll_tx(struct unic_dev *unic_dev, struct sk_buff *skb) +{ + u64 pre_pkt, pre_byte; + struct unic_sq *sq; + + sq = unic_dev->channels.c[skb->queue_mapping].sq; + + pre_pkt = sq->stats.packets; + pre_byte = sq->stats.bytes; + + unic_poll_tx(sq, 0); + if (sq->pi != sq->ci) { + unic_err(unic_dev, "cqe error, sp pi doesn't match sp ci.\n"); + kfree_skb(skb); + } + + sq->stats.packets = pre_pkt; + sq->stats.bytes = pre_byte; +} + +static int unic_lb_run_test(struct net_device *ndev, int loop_mode) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct sk_buff *skb; + netdev_tx_t tx_ret; + int ret_val = 0; + u32 i, cnt = 0; + + /* Avoid loopback failure caused by receiving packets after mac_en + * takes effect but before loopback_en takes effect. + */ + for (i = 0; i < unic_dev->channels.num; i++) + unic_clear_rq(unic_dev->channels.c[i].rq); + + skb = unic_lb_skb_prepare(ndev); + if (!skb) { + unic_err(unic_dev, "failed to alloc skb.\n"); + return -ENOMEM; + } + + /* Used to handle the release of skb in different situations of xmit. + * 1. skb is released through poll_tx and kfree in success situation. + * 2. skb is released through dev_kfree_skb_any in dropped situation. + * 3. skb is released through kfree in busy situation. + */ + skb_get(skb); + + tx_ret = unic_start_xmit(skb, ndev); + if (tx_ret == NETDEV_TX_OK) { + cnt++; + } else { + kfree_skb(skb); + unic_err(unic_dev, "failed to xmit loopback skb, ret = %d.\n", + tx_ret); + } + + if (cnt != UNIC_LB_TEST_PKT_NUM) { + ret_val = -EBUSY; + unic_err(unic_dev, "mode %d sent fail, cnt = %u, budget = %d.\n", + loop_mode, cnt, UNIC_LB_TEST_PKT_NUM); + goto out; + } + + /* Allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + cnt = unic_lb_check_rx(unic_dev, UNIC_LB_TEST_PKT_NUM, skb); + if (cnt != UNIC_LB_TEST_PKT_NUM) { + ret_val = -EINVAL; + unic_err(unic_dev, "mode %d recv fail, cnt = %u, budget = %d.\n", + loop_mode, cnt, UNIC_LB_TEST_PKT_NUM); + } + +out: + unic_lb_poll_tx(unic_dev, skb); + kfree_skb(skb); + return ret_val; +} + +static void unic_external_selftest_prepare(struct net_device *ndev) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (test_and_set_bit(UNIC_STATE_DOWN, &unic_dev->state)) + return; + + netif_carrier_off(ndev); + netif_tx_disable(ndev); + + unic_disable_channels(unic_dev); + + unic_clear_all_queue(ndev); + + unic_reset_tx_queue(ndev); +} + +static void unic_do_external_selftest(struct net_device *ndev, int *st_param, + struct ethtool_test *eth_test, u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_vport *vport = &unic_dev->vport; + struct unic_promisc_en promisc_all_en; + struct unic_promisc_en promisc_en; + + if (!st_param[UNIC_LB_EXTERNAL]) + return; + + unic_fill_promisc_en(&promisc_en, + unic_dev->netdev_flags | vport->last_promisc_flags); + memset(&promisc_all_en, 1, sizeof(promisc_all_en)); + data[UNIC_LB_EXTERNAL] = unic_lb_config(ndev, UNIC_LB_EXTERNAL, + true, &promisc_all_en); + if (!data[UNIC_LB_EXTERNAL]) + data[UNIC_LB_EXTERNAL] = unic_lb_run_test(ndev, UNIC_LB_EXTERNAL); + unic_lb_config(ndev, UNIC_LB_EXTERNAL, false, &promisc_en); + + if (data[UNIC_LB_EXTERNAL]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; +} + +static void unic_external_selftest_restore(struct net_device *ndev) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (unic_resetting(ndev)) + return; + + if (!test_bit(UNIC_STATE_DOWN, &unic_dev->state)) + return; + + unic_clear_all_queue(ndev); + + unic_enable_channels(unic_dev); + + netif_tx_wake_all_queues(ndev); + + clear_bit(UNIC_STATE_DOWN, &unic_dev->state); +} + +static void unic_do_selftest(struct net_device *ndev, int *st_param, + struct ethtool_test *eth_test, u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_vport *vport = &unic_dev->vport; + struct unic_promisc_en promisc_all_en; + struct unic_promisc_en promisc_en; + int lb_type; + + unic_fill_promisc_en(&promisc_en, + unic_dev->netdev_flags | vport->last_promisc_flags); + memset(&promisc_all_en, 1, sizeof(promisc_all_en)); + for (lb_type = UNIC_LB_APP; lb_type < UNIC_LB_EXTERNAL; lb_type++) { + if (!st_param[lb_type]) + continue; + + data[lb_type] = unic_lb_config(ndev, lb_type, true, + &promisc_all_en); + if (!data[lb_type]) + data[lb_type] = unic_lb_run_test(ndev, lb_type); + + unic_lb_config(ndev, lb_type, false, &promisc_en); + + if (data[lb_type]) + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static void unic_selftest_restore(struct net_device *ndev, bool if_running, + u8 autoneg) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + int ret; + + clear_bit(UNIC_STATE_TESTING, &unic_dev->state); + + ret = autoneg ? unic_set_mac_autoneg(unic_dev, true) : 0; + if (ret) + unic_err(unic_dev, "failed to restore mac autoneg, ret = %d.\n", + ret); + + ret = if_running ? unic_net_open(ndev) : 0; + if (ret) + unic_err(unic_dev, "failed to restore unic ndev, ret = %d.\n", + ret); +} + +static bool unic_self_test_is_unexecuted(struct net_device *ndev, + struct ethtool_test *eth_test, + u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + + if (unic_dev_ubl_supported(unic_dev)) { + unic_err(unic_dev, + "failed to self test, due to in ub mode.\n"); + return true; + } + + if (test_bit(UNIC_STATE_DEACTIVATE, &unic_dev->state)) { + unic_err(unic_dev, + "failed to self test, due to dev deactivate.\n"); + return true; + } + + if (unic_resetting(ndev)) { + unic_err(unic_dev, + "failed to self test, due to dev resetting.\n"); + return true; + } + + if (!(eth_test->flags & ETH_TEST_FL_OFFLINE)) { + unic_err(unic_dev, + "failed to self test, due to disable test flags.\n"); + return true; + } + + if (unic_dev->loopback_flags & UNIC_SUPPORT_EXTERNAL_LB) + data[UNIC_LB_EXTERNAL] = UNIC_LB_TEST_UNEXECUTED; + + return false; +} + +int unic_get_selftest_count(struct unic_dev *unic_dev) +{ + int count = 0; + + /* clear loopback bit flags at first */ + unic_dev->loopback_flags &= (~UNIC_LB_TEST_FLAGS); + + if (unic_dev_app_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_APP_LB; + count++; + } + + if (unic_dev_serial_serdes_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_SERIAL_SERDES_LB; + count++; + } + + if (unic_dev_parallel_serdes_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_PARALLEL_SERDES_LB; + count++; + } + + if (unic_dev_external_lb_supported(unic_dev)) { + unic_dev->loopback_flags |= UNIC_SUPPORT_EXTERNAL_LB; + count++; + } + + return count == 0 ? -EOPNOTSUPP : UNIC_LB_MAX; +} + +void unic_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct unic_dev *unic_dev = netdev_priv(ndev); + struct unic_mac *mac = &unic_dev->hw.mac; + bool if_running = netif_running(ndev); + int st_param[UNIC_LB_MAX]; + int ret, i; + + ret = unic_get_selftest_count(unic_dev); + if (ret <= 0) { + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + /* initialize the loopback test result, avoiding mark not support loopback + * test as PASS. + */ + for (i = 0; i < UNIC_LB_MAX; i++) + data[i] = -EOPNOTSUPP; + + if (unic_self_test_is_unexecuted(ndev, eth_test, data)) { + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + unic_set_selftest_param(unic_dev, st_param); + + if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { + if (if_running) { + unic_external_selftest_prepare(ndev); + unic_do_external_selftest(ndev, st_param, eth_test, data); + unic_external_selftest_restore(ndev); + } else { + unic_warn(unic_dev, + "not to run external selftest, due to link down.\n"); + } + } + + ret = unic_selftest_prepare(ndev, if_running, mac->autoneg); + if (ret) + return; + + unic_do_selftest(ndev, st_param, eth_test, data); + unic_selftest_restore(ndev, if_running, mac->autoneg); +} diff --git a/drivers/ub/ubase/ubase_ctrlq_tp.h b/drivers/net/ub/unic/unic_lb.h similarity index 34% rename from drivers/ub/ubase/ubase_ctrlq_tp.h rename to drivers/net/ub/unic/unic_lb.h index 3d647d5d1625c905d1824818037e70be6d2e32bf..97e1e87763ef878eb6322d917b667fe278a24964 100644 --- a/drivers/ub/ubase/ubase_ctrlq_tp.h +++ b/drivers/net/ub/unic/unic_lb.h @@ -4,13 +4,14 @@ * */ -#ifndef __UBASE_CTRLQ_TP_H__ -#define __UBASE_CTRLQ_TP_H__ +#ifndef __UNIC_LB_H__ +#define __UNIC_LB_H__ -#include "ubase_dev.h" +#include +#include -int ubase_notify_tp_fd_by_ctrlq(struct ubase_dev *udev, u32 tp_num); -void ubase_dev_uninit_rack_tp_tpg(struct ubase_dev *udev); -int ubase_dev_init_rack_tp_tpg(struct ubase_dev *udev); +int unic_get_selftest_count(struct unic_dev *unic_dev); +void unic_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data); #endif diff --git a/drivers/net/ub/unic/unic_mac.c b/drivers/net/ub/unic/unic_mac.c new file mode 100644 index 0000000000000000000000000000000000000000..e3eb43f366dfdbb913c89b35de85479604fee939 --- /dev/null +++ b/drivers/net/ub/unic/unic_mac.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei UNIC Linux driver + * Copyright (c) 2024-2025 Hisilicon Limited. + * + */ + +#define dev_fmt(fmt) "unic: (pid %d) " fmt, current->pid + +#include +#include + +#include "unic.h" +#include "unic_comm_addr.h" +#include "unic_cmd.h" +#include "unic_mac.h" + +int unic_cfg_mac_address(struct unic_dev *unic_dev, u8 *mac_addr) +{ + struct unic_comm_addr_node *new_node, *old_node; + struct unic_vport *vport = &unic_dev->vport; + u8 *old_mac = unic_dev->hw.mac.mac_addr; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + struct list_head *list; + + list = &vport->addr_tbl.uc_mac_list; + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + new_node = unic_comm_find_addr_node(list, mac_addr, + UNIC_COMM_ADDR_NO_MASK); + if (new_node) { + if (new_node->state == UNIC_COMM_ADDR_TO_DEL) + new_node->state = UNIC_COMM_ADDR_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after reset. + */ + new_node->is_pfc = 1; + list_move(&new_node->node, list); + } else { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) { + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + return -ENOMEM; + } + + new_node->state = UNIC_COMM_ADDR_TO_ADD; + new_node->is_pfc = 1; + ether_addr_copy(new_node->mac_addr, mac_addr); + list_add_tail(&new_node->node, list); + } + + ether_addr_copy(unic_addr, old_mac); + old_node = unic_comm_find_addr_node(list, unic_addr, + UNIC_COMM_ADDR_NO_MASK); + if (old_node) { + if (old_node->state == UNIC_COMM_ADDR_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = UNIC_COMM_ADDR_TO_DEL; + old_node->is_pfc = 0; + } + } + + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + ether_addr_copy(unic_dev->hw.mac.mac_addr, mac_addr); + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + + return 0; +} + +static int unic_add_mac_addr_common(struct unic_vport *vport, u8 *mac_addr, + enum unic_mac_addr_type mac_type, + u8 is_pfc) +{ + struct unic_mac_promisc { + const char *type_str; + u8 promisc_mode; + } promisc[] = { + [UNIC_MAC_ADDR_UC] = {"unicast", UNIC_OVERFLOW_UP_MAC}, + [UNIC_MAC_ADDR_MC] = {"multicast", UNIC_OVERFLOW_MP_MAC}, + }; + + struct auxiliary_device *adev = vport->back->comdev.adev; + struct unic_dev *unic_dev = dev_get_drvdata(&adev->dev); + struct unic_mac_tbl_entry_cmd resp = {0}; + struct unic_mac_tbl_entry_cmd req = {0}; + u8 format_mac[UNIC_FORMAT_MAC_LEN]; + struct ubase_cmd_buf in, out; + int ret; + + req.mac_type = mac_type; + req.is_pfc = is_pfc; + ether_addr_copy(req.mac_addr, mac_addr); + unic_comm_format_mac_addr(format_mac, mac_addr); + ubase_fill_inout_buf(&in, UBASE_OPC_ADD_MAC_TBL, false, sizeof(req), &req); + ubase_fill_inout_buf(&out, UBASE_OPC_ADD_MAC_TBL, true, sizeof(resp), &resp); + ret = ubase_cmd_send_inout(adev, &in, &out); + ret = ret ? ret : -resp.resp_code; + if (!ret) { + return 0; + } else if (ret == -EEXIST && mac_type == UNIC_MAC_ADDR_UC) { + unic_info(unic_dev, "mac addr(%s) exists.\n", format_mac); + return -EEXIST; + } else if (ret != -ENOSPC) { + unic_err(unic_dev, + "failed to add mac addr(%s), ret = %d.\n", format_mac, + ret); + return ret; + } + + if (!(vport->overflow_promisc_flags & promisc[mac_type].promisc_mode)) + unic_warn(unic_dev, "%s mac table is full.\n", + promisc[mac_type].type_str); + + return ret; +} + +static int unic_del_mac_addr_common(struct unic_vport *vport, u8 *mac_addr, + enum unic_mac_addr_type mac_type, + u8 is_pfc) +{ + struct auxiliary_device *adev = vport->back->comdev.adev; + struct unic_mac_tbl_entry_cmd resp = {0}; + struct unic_mac_tbl_entry_cmd req = {0}; + u8 format_mac[UNIC_FORMAT_MAC_LEN]; + struct ubase_cmd_buf in, out; + u32 time_out; + int ret; + + req.mac_type = mac_type; + req.is_pfc = is_pfc; + ether_addr_copy(req.mac_addr, mac_addr); + ubase_fill_inout_buf(&in, UBASE_OPC_DEL_MAC_TBL, false, sizeof(req), &req); + ubase_fill_inout_buf(&out, UBASE_OPC_DEL_MAC_TBL, true, sizeof(resp), &resp); + time_out = unic_cmd_timeout(vport->back); + ret = ubase_cmd_send_inout_ex(adev, &in, &out, time_out); + ret = ret ? ret : -resp.resp_code; + if (ret) { + unic_comm_format_mac_addr(format_mac, mac_addr); + dev_err(adev->dev.parent, "failed to rm mac addr(%s), ret = %d.\n", + format_mac, ret); + } + + return ret; +} + +static void unic_sync_mac_list(struct unic_vport *vport, struct list_head *list, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unic_add_mac_addr_common(vport, mac_node->mac_addr, mac_type, + mac_node->is_pfc); + if (!ret) { + mac_node->state = UNIC_COMM_ADDR_ACTIVE; + } else { + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + if ((mac_type == UNIC_MAC_ADDR_UC && ret != -EEXIST) || + (mac_type == UNIC_MAC_ADDR_MC && ret != -ENOSPC)) + break; + } + } +} + +static void unic_unsync_mac_list(struct unic_vport *vport, + struct list_head *list, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unic_del_mac_addr_common(vport, mac_node->mac_addr, mac_type, + mac_node->is_pfc); + if (!ret) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + break; + } + } +} + +static void unic_sync_uc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_sync_mac_list(vport, list, UNIC_MAC_ADDR_UC); +} + +static void unic_sync_mc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_sync_mac_list(vport, list, UNIC_MAC_ADDR_MC); +} + +static void unic_unsync_uc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_unsync_mac_list(vport, list, UNIC_MAC_ADDR_UC); +} + +static void unic_unsync_mc_mac_list(struct unic_vport *vport, + struct list_head *list) +{ + unic_unsync_mac_list(vport, list, UNIC_MAC_ADDR_MC); +} + +static void unic_sync_mac_table_common(struct unic_vport *vport, + enum unic_mac_addr_type mac_type) +{ + void (*unsync)(struct unic_vport *vport, struct list_head *list); + void (*sync)(struct unic_vport *vport, struct list_head *list); + struct list_head *mac_list; + bool all_added; + + if (mac_type == UNIC_MAC_ADDR_UC) { + mac_list = &vport->addr_tbl.uc_mac_list; + sync = unic_sync_uc_mac_list; + unsync = unic_unsync_uc_mac_list; + } else { + mac_list = &vport->addr_tbl.mc_mac_list; + sync = unic_sync_mc_mac_list; + unsync = unic_unsync_mc_mac_list; + } + + all_added = unic_comm_sync_addr_table(vport, mac_list, + &vport->addr_tbl.mac_list_lock, + sync, unsync); + if (mac_type == UNIC_MAC_ADDR_UC) { + if (all_added) + vport->overflow_promisc_flags &= ~UNIC_OVERFLOW_UP_MAC; + else + vport->overflow_promisc_flags |= UNIC_OVERFLOW_UP_MAC; + } else { + if (all_added) + vport->overflow_promisc_flags &= ~UNIC_OVERFLOW_MP_MAC; + else + vport->overflow_promisc_flags |= UNIC_OVERFLOW_MP_MAC; + } +} + +void unic_sync_mac_table(struct unic_dev *unic_dev) +{ + struct unic_act_info *act_info = &unic_dev->act_info; + struct unic_vport *vport = &unic_dev->vport; + + if (!mutex_trylock(&act_info->mutex)) + return; + + if (act_info->deactivate) + goto out; + + if (!test_and_clear_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + goto out; + + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_UC); + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_MC); + +out: + mutex_unlock(&act_info->mutex); +} + +static int unic_update_mac_list(struct net_device *netdev, + enum UNIC_COMM_ADDR_STATE state, + enum unic_mac_addr_type mac_type, + const u8 *mac_addr) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + struct unic_vport *vport = &unic_dev->vport; + char format_mac[UNIC_FORMAT_MAC_LEN]; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + struct list_head *list; + bool valid; + int ret; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + if (mac_type == UNIC_MAC_ADDR_UC) { + list = &vport->addr_tbl.uc_mac_list; + valid = is_valid_ether_addr(mac_addr); + } else { + list = &vport->addr_tbl.mc_mac_list; + valid = is_multicast_ether_addr(mac_addr); + } + + unic_comm_format_mac_addr(format_mac, mac_addr); + if (!valid) { + unic_err(unic_dev, "failed to %s %s mac addr(%s).\n", + state == UNIC_COMM_ADDR_TO_ADD ? "add" : "del", + mac_type == UNIC_MAC_ADDR_UC ? "uc" : "mc", format_mac); + return -EINVAL; + } + + ether_addr_copy(unic_addr, mac_addr); + ret = unic_comm_update_addr_list(list, &vport->addr_tbl.mac_list_lock, + state, unic_addr); + if (ret) { + unic_err(unic_dev, + "failed to update mac addr(%s). mac_type = %s.\n", + format_mac, mac_type == UNIC_MAC_ADDR_UC ? "uc" : "mc"); + return ret; + } + + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return ret; +} + +int unic_add_uc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_ADD, + UNIC_MAC_ADDR_UC, mac_addr); +} + +int unic_del_uc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + if (ether_addr_equal(mac_addr, netdev->dev_addr)) + return 0; + + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_DEL, + UNIC_MAC_ADDR_UC, mac_addr); +} + +int unic_add_mc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_ADD, + UNIC_MAC_ADDR_MC, mac_addr); +} + +int unic_del_mc_mac(struct net_device *netdev, const u8 *mac_addr) +{ + return unic_update_mac_list(netdev, UNIC_COMM_ADDR_TO_DEL, + UNIC_MAC_ADDR_MC, mac_addr); +} + +static int unic_get_mac_addr(struct unic_dev *unic_dev, u8 *p) +{ + struct unic_query_mac_addr_resp resp = {0}; + struct ubase_cmd_buf in, out; + int ret; + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_MAC, true, 0, NULL); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_MAC, false, ETH_ALEN, + &resp); + ret = ubase_cmd_send_inout(unic_dev->comdev.adev, &in, &out); + if (ret) { + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to get mac address, ret = %d.\n", ret); + return ret; + } + + ether_addr_copy(p, resp.mac); + + return 0; +} + +int unic_init_mac_addr(struct unic_dev *unic_dev) +{ + struct net_device *netdev = unic_dev->comdev.netdev; + char format_mac[UNIC_FORMAT_MAC_LEN]; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + int ret; + + ret = unic_get_mac_addr(unic_dev, unic_addr); + if (ret) + return ret; + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(unic_addr)) { + eth_hw_addr_random(netdev); + ether_addr_copy(unic_addr, netdev->dev_addr); + unic_comm_format_mac_addr(format_mac, unic_addr); + dev_warn(unic_dev->comdev.adev->dev.parent, + "using random MAC address %s.\n", format_mac); + } else if (!ether_addr_equal(netdev->dev_addr, unic_addr)) { + dev_addr_set(netdev, unic_addr); + ether_addr_copy(netdev->perm_addr, unic_addr); + } else { + return 0; + } + + if (!unic_dev_cfg_mac_supported(unic_dev)) { + ether_addr_copy(unic_dev->hw.mac.mac_addr, unic_addr); + return 0; + } + + ret = unic_cfg_mac_address(unic_dev, unic_addr); + if (ret) { + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to cfg MAC address, ret = %d!\n", ret); + return ret; + } + + ubase_set_dev_mac(unic_dev->comdev.adev, netdev->dev_addr, + netdev->addr_len); + + return 0; +} + +void unic_uninit_mac_addr(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + struct unic_comm_addr_node *mac_node; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + ether_addr_copy(unic_addr, unic_dev->comdev.netdev->dev_addr); + mac_node = unic_comm_find_addr_node(&vport->addr_tbl.uc_mac_list, + unic_addr, UNIC_COMM_ADDR_NO_MASK); + if (mac_node) { + if (mac_node->state == UNIC_COMM_ADDR_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = UNIC_COMM_ADDR_TO_DEL; + } + } + + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); +} + +static void unic_uninit_mac_table_common(struct unic_dev *unic_dev, + enum unic_mac_addr_type mac_type) +{ + struct unic_vport *vport = &unic_dev->vport; + struct unic_comm_addr_node *mac_node, *tmp; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == UNIC_MAC_ADDR_UC) ? + &vport->addr_tbl.uc_mac_list : &vport->addr_tbl.mc_mac_list; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case UNIC_COMM_ADDR_TO_DEL: + case UNIC_COMM_ADDR_ACTIVE: + list_move_tail(&mac_node->node, &tmp_del_list); + break; + case UNIC_COMM_ADDR_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; + default: + break; + } + } + + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + + unic_unsync_mac_list(vport, &tmp_del_list, mac_type); + + if (!list_empty(&tmp_del_list)) + dev_warn(unic_dev->comdev.adev->dev.parent, + "uninit mac list not completely.\n"); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +void unic_uninit_mac_table(struct unic_dev *unic_dev) +{ + unic_uninit_mac_table_common(unic_dev, UNIC_MAC_ADDR_UC); + unic_uninit_mac_table_common(unic_dev, UNIC_MAC_ADDR_MC); +} + +static void unic_deactivate_unsync_mac_list(struct unic_vport *vport, + struct list_head *list, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unic_del_mac_addr_common(vport, mac_node->mac_addr, + mac_type, mac_node->is_pfc); + if (ret) + break; + + if (mac_node->state == UNIC_COMM_ADDR_ACTIVE) { + mac_node->state = UNIC_COMM_ADDR_TO_ADD; + } else if (mac_node->state == UNIC_COMM_ADDR_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void unic_deactivate_update_mac_state(struct unic_comm_addr_node *mac_node, + enum UNIC_COMM_ADDR_STATE state) +{ + switch (state) { + case UNIC_COMM_ADDR_TO_ADD: + if (mac_node->state == UNIC_COMM_ADDR_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == UNIC_COMM_ADDR_ACTIVE) { + mac_node->state = UNIC_COMM_ADDR_TO_ADD; + } + break; + case UNIC_COMM_ADDR_TO_DEL: + mac_node->state = UNIC_COMM_ADDR_ACTIVE; + break; + default: + break; + } +} + +static void unic_deactivate_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct unic_comm_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = unic_comm_find_addr_node(mac_list, + mac_node->unic_addr, + UNIC_COMM_ADDR_NO_MASK); + if (new_node) { + unic_deactivate_update_mac_state(new_node, + mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_move_tail(&mac_node->node, mac_list); + } + } +} + +static void unic_deactivate_sync_mac_table(struct unic_vport *vport, + enum unic_mac_addr_type mac_type) +{ + struct unic_comm_addr_node *mac_node, *tmp, *new_node; + struct list_head *mac_list, tmp_list; + + INIT_LIST_HEAD(&tmp_list); + + if (mac_type == UNIC_MAC_ADDR_UC) + mac_list = &vport->addr_tbl.uc_mac_list; + else + mac_list = &vport->addr_tbl.mc_mac_list; + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, mac_list, node) { + switch (mac_node->state) { + case UNIC_COMM_ADDR_TO_DEL: + list_move_tail(&mac_node->node, &tmp_list); + break; + case UNIC_COMM_ADDR_ACTIVE: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + memcpy(new_node->unic_addr, mac_node->unic_addr, + UNIC_ADDR_LEN); + new_node->state = mac_node->state; + new_node->is_pfc = mac_node->is_pfc; + list_add_tail(&new_node->node, &tmp_list); + break; + default: + break; + } + } + +stop_traverse: + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); + unic_deactivate_unsync_mac_list(vport, &tmp_list, mac_type); + + spin_lock_bh(&vport->addr_tbl.mac_list_lock); + unic_deactivate_sync_from_del_list(&tmp_list, mac_list); + spin_unlock_bh(&vport->addr_tbl.mac_list_lock); +} + +void unic_deactivate_mac_table(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + + unic_deactivate_sync_mac_table(vport, UNIC_MAC_ADDR_UC); + unic_deactivate_sync_mac_table(vport, UNIC_MAC_ADDR_MC); + set_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); +} + +void unic_activate_mac_table(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + + clear_bit(UNIC_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_UC); + unic_sync_mac_table_common(vport, UNIC_MAC_ADDR_MC); +} diff --git a/drivers/net/ub/unic/unic_mac.h b/drivers/net/ub/unic/unic_mac.h new file mode 100644 index 0000000000000000000000000000000000000000..3c45d4ab368718965ddee2ce21e3614881c85eb8 --- /dev/null +++ b/drivers/net/ub/unic/unic_mac.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei UNIC Linux driver + * Copyright (c) 2024-2025 Hisilicon Limited. + * + */ + +#ifndef UNIC_MAC_H +#define UNIC_MAC_H + +enum unic_mac_addr_type { + UNIC_MAC_ADDR_UC, + UNIC_MAC_ADDR_MC, +}; + +int unic_cfg_mac_address(struct unic_dev *unic_dev, u8 *mac_addr); +int unic_add_uc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_del_uc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_add_mc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_del_mc_mac(struct net_device *netdev, const u8 *mac_addr); +int unic_init_mac_addr(struct unic_dev *unic_dev); +void unic_uninit_mac_addr(struct unic_dev *unic_dev); +void unic_sync_mac_table(struct unic_dev *unic_dev); +void unic_uninit_mac_table(struct unic_dev *unic_dev); +void unic_deactivate_mac_table(struct unic_dev *unic_dev); +void unic_activate_mac_table(struct unic_dev *unic_dev); + +#endif diff --git a/drivers/net/ub/unic/unic_netdev.c b/drivers/net/ub/unic/unic_netdev.c index a9d8240d59b0342bdab84f66439e40cb621b0a7e..566496e8eb13b4f17adc009319ef2d23c5c6ee6e 100644 --- a/drivers/net/ub/unic/unic_netdev.c +++ b/drivers/net/ub/unic/unic_netdev.c @@ -24,11 +24,13 @@ #include "unic_dev.h" #include "unic_event.h" #include "unic_hw.h" +#include "unic_ip.h" +#include "unic_mac.h" #include "unic_rx.h" #include "unic_tx.h" #include "unic_txrx.h" +#include "unic_vlan.h" #include "unic_netdev.h" -#include "unic_rack_ip.h" static int unic_netdev_set_tcs(struct net_device *netdev) { @@ -206,6 +208,7 @@ void unic_link_status_change(struct net_device *netdev, bool linkup) netif_tx_wake_all_queues(netdev); netif_carrier_on(netdev); unic_clear_fec_stats(unic_dev); + ubase_clear_eth_port_stats(unic_dev->comdev.adev); } } else { netif_carrier_off(netdev); @@ -331,6 +334,7 @@ int unic_net_open_no_link_change(struct net_device *netdev) netif_tx_wake_all_queues(netdev); netif_carrier_on(netdev); unic_clear_fec_stats(unic_dev); + ubase_clear_eth_port_stats(unic_dev->comdev.adev); } return 0; @@ -526,6 +530,42 @@ static int unic_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +static int unic_set_mac_address(struct net_device *netdev, void *addr) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + char format_mac[UNIC_FORMAT_MAC_LEN]; + u8 unic_addr[UNIC_ADDR_LEN] = {0}; + struct sockaddr *mac_addr = addr; + int ret; + + if (!unic_dev_cfg_mac_supported(unic_dev)) + return -EOPNOTSUPP; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) { + unic_err(unic_dev, "invalid user mac.\n"); + return -EADDRNOTAVAIL; + } + + unic_comm_format_mac_addr(format_mac, mac_addr->sa_data); + if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { + unic_info(unic_dev, "already using mac(%s).\n", format_mac); + return 0; + } + + ether_addr_copy(unic_addr, mac_addr->sa_data); + ret = unic_cfg_mac_address(unic_dev, unic_addr); + if (ret) { + unic_err(unic_dev, "failed to set mac address, ret = %d.\n", ret); + return ret; + } + + dev_addr_set(netdev, unic_addr); + ubase_set_dev_mac(unic_dev->comdev.adev, netdev->dev_addr, + netdev->addr_len); + + return ret; +} + static u8 unic_get_netdev_flags(struct net_device *netdev) { struct unic_dev *unic_dev = netdev_priv(netdev); @@ -534,6 +574,8 @@ static u8 unic_get_netdev_flags(struct net_device *netdev) if (netdev->flags & IFF_PROMISC) { if (unic_dev_ubl_supported(unic_dev)) flags = UNIC_USER_UPE | UNIC_USER_MPE; + else + flags = UNIC_USER_UPE | UNIC_USER_MPE | UNIC_USER_BPE; } else if (netdev->flags & IFF_ALLMULTI) { flags = UNIC_USER_MPE; } @@ -545,9 +587,19 @@ static void unic_set_rx_mode(struct net_device *netdev) { struct unic_dev *unic_dev = netdev_priv(netdev); struct unic_vport *vport = &unic_dev->vport; + u8 promisc_changed; u8 new_flags; new_flags = unic_get_netdev_flags(netdev); + if (unic_dev_eth_mac_supported(unic_dev)) { + __dev_uc_sync(netdev, unic_add_uc_mac, unic_del_uc_mac); + __dev_mc_sync(netdev, unic_add_mc_mac, unic_del_mc_mac); + promisc_changed = unic_dev->netdev_flags ^ new_flags; + if (promisc_changed & UNIC_USER_UPE) + set_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, + &vport->state); + } + unic_dev->netdev_flags = new_flags; set_bit(UNIC_VPORT_STATE_PROMISC_CHANGE, &vport->state); @@ -604,6 +656,44 @@ static u16 unic_select_queue(struct net_device *netdev, struct sk_buff *skb, return netdev_pick_tx(netdev, skb, sb_dev); } +static int unic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vlan_id) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + + return unic_set_vlan_table(unic_dev, proto, vlan_id, true); +} + +static int unic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vlan_id) +{ + struct unic_dev *unic_dev = netdev_priv(netdev); + + return unic_set_vlan_table(unic_dev, proto, vlan_id, false); +} + +static int unic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct unic_dev *unic_dev = netdev_priv(netdev); + bool enable; + int ret; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return 0; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + ret = unic_set_vlan_filter(unic_dev, enable); + if (ret) + return ret; + } + + return 0; +} + static const struct net_device_ops unic_netdev_ops = { .ndo_get_stats64 = unic_get_stats64, .ndo_start_xmit = unic_start_xmit, @@ -611,8 +701,12 @@ static const struct net_device_ops unic_netdev_ops = { .ndo_change_mtu = unic_change_mtu, .ndo_open = unic_net_open, .ndo_stop = unic_net_stop, + .ndo_set_mac_address = unic_set_mac_address, .ndo_set_rx_mode = unic_set_rx_mode, .ndo_select_queue = unic_select_queue, + .ndo_vlan_rx_add_vid = unic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = unic_vlan_rx_kill_vid, + .ndo_set_features = unic_set_features, }; void unic_set_netdev_ops(struct net_device *netdev) @@ -625,15 +719,31 @@ static bool unic_port_dev_check(const struct net_device *dev) return dev->netdev_ops == &unic_netdev_ops; } -static int unic_ipaddr_event(struct notifier_block *nb, unsigned long event, - struct sockaddr *sa, struct net_device *ndev, - u16 ip_mask) +static int unic_eth_ip_event(struct sockaddr *sa, struct net_device *ndev, + u16 ip_mask, unsigned long event) { - struct unic_dev *unic_dev; - int ret; + enum UNIC_COMM_ADDR_STATE state; + int ret = NOTIFY_OK; - if (ndev->type != ARPHRD_UB) + switch (event) { + case NETDEV_UP: + state = UNIC_COMM_ADDR_TO_ADD; + break; + case NETDEV_DOWN: + state = UNIC_COMM_ADDR_TO_DEL; + break; + default: return NOTIFY_DONE; + } + + return ret; +} + +static int unic_ub_ip_event(struct sockaddr *sa, struct net_device *ndev, + u16 ip_mask, unsigned long event) +{ + struct unic_dev *unic_dev; + int ret; if (!unic_port_dev_check(ndev)) return NOTIFY_DONE; @@ -658,6 +768,18 @@ static int unic_ipaddr_event(struct notifier_block *nb, unsigned long event, return NOTIFY_OK; } +static int unic_ipaddr_event(struct notifier_block *nb, unsigned long event, + struct sockaddr *sa, struct net_device *ndev, + u16 ip_mask) +{ + if (ndev->type == ARPHRD_ETHER) + return unic_eth_ip_event(sa, ndev, ip_mask, event); + else if (ndev->type == ARPHRD_UB) + return unic_ub_ip_event(sa, ndev, ip_mask, event); + else + return NOTIFY_DONE; +} + static int unic_inetaddr_event(struct notifier_block *nb, unsigned long event, void *ptr) { diff --git a/drivers/net/ub/unic/unic_netdev.h b/drivers/net/ub/unic/unic_netdev.h index fd8fd16f1e922c614698e5838c7dac2749ecc8df..c3683361dd55df0a6c657bcad1c2c57fb843f360 100644 --- a/drivers/net/ub/unic/unic_netdev.h +++ b/drivers/net/ub/unic/unic_netdev.h @@ -20,6 +20,8 @@ void unic_link_status_update(struct unic_dev *unic_dev); int unic_register_ipaddr_notifier(void); void unic_unregister_ipaddr_notifier(void); void unic_link_status_change(struct net_device *netdev, bool linkup); +void unic_enable_channels(struct unic_dev *unic_dev); +void unic_disable_channels(struct unic_dev *unic_dev); int unic_query_link_status(struct unic_dev *unic_dev, u8 *link_status); #endif diff --git a/drivers/net/ub/unic/unic_qos_hw.c b/drivers/net/ub/unic/unic_qos_hw.c index bba05964156b664c9751fe75f45894c8b7918172..171de80357c3ba548a21d045133704fe6345368b 100644 --- a/drivers/net/ub/unic/unic_qos_hw.c +++ b/drivers/net/ub/unic/unic_qos_hw.c @@ -6,6 +6,9 @@ #define dev_fmt(fmt) "unic: (pid %d) " fmt, current->pid +#include + +#include "unic_cmd.h" #include "unic_hw.h" #include "unic_qos_hw.h" @@ -77,3 +80,52 @@ int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, return ret; } + +int unic_mac_pause_en_cfg(struct unic_dev *unic_dev, u32 tx_pause, u32 rx_pause) +{ + struct unic_cfg_mac_pause_cmd req = {0}; + struct ubase_cmd_buf in; + int ret; + + req.tx_en = cpu_to_le32(tx_pause); + req.rx_en = cpu_to_le32(rx_pause); + + ubase_fill_inout_buf(&in, UBASE_OPC_CFG_MAC_PAUSE_EN, false, sizeof(req), &req); + + ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); + if (ret) + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to config pause on|off, ret = %d.\n", ret); + + return ret; +} + +int unic_pfc_pause_cfg(struct unic_dev *unic_dev, u8 pfc_en) +{ +#define UNIC_PFC_TX_RX_ON 1 +#define UNIC_PFC_TX_RX_OFF 0 + + struct unic_cfg_pfc_pause_cmd req = {0}; + struct ubase_cmd_buf in; + int ret; + + req.pri_bitmap = pfc_en; + + if (pfc_en) { + req.tx_enable = UNIC_PFC_TX_RX_ON; + req.rx_enable = UNIC_PFC_TX_RX_ON; + } else { + req.tx_enable = UNIC_PFC_TX_RX_OFF; + req.rx_enable = UNIC_PFC_TX_RX_OFF; + } + + ubase_fill_inout_buf(&in, UBASE_OPC_CFG_PFC_PAUSE_EN, false, sizeof(req), + &req); + + ret = ubase_cmd_send_in(unic_dev->comdev.adev, &in); + if (ret) + dev_err(unic_dev->comdev.adev->dev.parent, + "failed to config pfc enable, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/net/ub/unic/unic_qos_hw.h b/drivers/net/ub/unic/unic_qos_hw.h index 82822cc871b60f8920e5d695478359839acf3eaf..67c76d5bfaac7787e3ad3d504055b13bf854fcc6 100644 --- a/drivers/net/ub/unic/unic_qos_hw.h +++ b/drivers/net/ub/unic/unic_qos_hw.h @@ -17,5 +17,8 @@ int unic_query_vl_map(struct unic_dev *unic_dev, struct unic_config_vl_map_cmd *resp); int unic_config_vl_rate_limit(struct unic_dev *unic_dev, u64 *vl_maxrate, u16 vl_bitmap); +int unic_mac_pause_en_cfg(struct unic_dev *unic_dev, u32 tx_pause, + u32 rx_pause); +int unic_pfc_pause_cfg(struct unic_dev *unic_dev, u8 pfc_en); #endif diff --git a/drivers/net/ub/unic/unic_reset.c b/drivers/net/ub/unic/unic_reset.c index ff1239cc5131b160f2adadc65731bb99a3fff675..69995449b2dfffdf4f12d5706c958bacc1cf789a 100644 --- a/drivers/net/ub/unic/unic_reset.c +++ b/drivers/net/ub/unic/unic_reset.c @@ -9,8 +9,9 @@ #include "unic_cmd.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_ip.h" +#include "unic_mac.h" #include "unic_netdev.h" -#include "unic_rack_ip.h" #include "unic_reset.h" static void unic_dev_suspend(struct unic_dev *unic_dev) @@ -42,6 +43,9 @@ static void unic_reset_down(struct auxiliary_device *adev) * to prevent that concurrent deactivate event ubable to close promisc * when resetting */ + if (unic_dev_eth_mac_supported(priv)) + unic_deactivate_mac_table(priv); + ret = unic_activate_promisc_mode(priv, false); if (ret) unic_warn(priv, "failed to close promisc, ret = %d.\n", ret); @@ -90,7 +94,7 @@ static void unic_reset_init(struct auxiliary_device *adev) if (ret) goto err_unic_resume; - unic_query_rack_ip(adev); + unic_query_ip_by_ctrlq(adev); unic_start_period_task(netdev); if_running = netif_running(netdev); diff --git a/drivers/net/ub/unic/unic_stats.c b/drivers/net/ub/unic/unic_stats.c index 4647ba5e3e5ed682ec71ccff92f0a7a47efc8fb0..045ecf43ae67897ce20cc84a91562fca7e4cfa49 100644 --- a/drivers/net/ub/unic/unic_stats.c +++ b/drivers/net/ub/unic/unic_stats.c @@ -6,11 +6,13 @@ #include #include +#include #include #include "unic.h" #include "unic_dev.h" #include "unic_hw.h" +#include "unic_lb.h" #include "unic_netdev.h" #include "unic_stats.h" @@ -108,6 +110,110 @@ static const struct unic_stats_desc unic_rq_stats_str[] = { {"csum_complete", UNIC_RQ_STATS_FIELD_OFF(csum_complete)}, }; +static const struct unic_mac_stats_desc unic_eth_stats_str[] = { + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pause_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri0_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri1_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri2_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri3_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri4_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri5_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri6_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_pri7_pfc_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pause_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri0_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri1_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri2_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri3_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri4_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri5_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri6_pfc_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_pri7_pfc_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_64_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_65_127_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_128_255_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_256_511_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_512_1023_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1024_1518_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1519_2047_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_2048_4095_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_4096_8191_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_8192_9216_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_9217_12287_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_12288_16383_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1519_max_octets_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1519_max_octets_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_oversize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_jabber_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_bad_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_good_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_total_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_total_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_unicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_multicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_broadcast_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_fragment_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_undersize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_undermin_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_mac_ctrl_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_unfilter_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_1588_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_err_all_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_from_app_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_from_app_bad_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_64_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_65_127_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_128_255_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_256_511_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_512_1023_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1024_1518_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1519_2047_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_2048_4095_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_4096_8191_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_8192_9216_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_9217_12287_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_12288_16383_octets_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1519_max_octets_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_1519_max_octets_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_oversize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_jabber_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_bad_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_bad_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_good_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_total_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_total_octets), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_unicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_multicast_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_broadcast_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_fragment_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_undersize_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_undermin_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_mac_ctrl_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_unfilter_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_symbol_err_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_fcs_err_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_send_app_good_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_send_app_bad_pkts), + + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_merge_frame_ass_error_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_merge_frame_ass_ok_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(tx_merge_frame_frag_count), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_ass_error_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_ass_ok_pkts), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_frag_count), + UNIC_ETH_MAC_STATS_FLD_CAP_1(rx_merge_frame_smd_error_pkts), +}; + static int unic_get_dfx_reg_num(struct unic_dev *unic_dev, u32 *reg_num, u32 reg_arr_size) { @@ -346,6 +452,36 @@ static u64 *unic_get_queues_stats(struct unic_dev *unic_dev, return data; } +static void unic_get_mac_stats(struct unic_dev *unic_dev, u64 *data) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + const struct unic_mac_stats_desc *stats_desc; + struct ubase_eth_mac_stats mac_stats = {0}; + u32 stats_num = caps->mac_stats_num; + u32 i, stats_desc_num; + u8 *stats; + int ret; + + if (unic_dev_ubl_supported(unic_dev)) + return; + + stats_desc = unic_eth_stats_str; + stats_desc_num = ARRAY_SIZE(unic_eth_stats_str); + ret = ubase_get_eth_port_stats(adev, &mac_stats); + if (ret) + return; + + stats = (u8 *)&mac_stats; + for (i = 0; i < stats_desc_num; i++) { + if (stats_desc[i].stats_num > stats_num) + continue; + + *data = UNIC_STATS_READ(stats, stats_desc[i].offset); + data++; + } +} + void unic_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { @@ -365,6 +501,7 @@ void unic_get_stats(struct net_device *netdev, p = unic_get_queues_stats(unic_dev, unic_rq_stats_str, ARRAY_SIZE(unic_rq_stats_str), UNIC_QUEUE_TYPE_RQ, p); + unic_get_mac_stats(unic_dev, p); } static u8 *unic_get_strings(u8 *data, const char *prefix, u32 num, @@ -405,20 +542,74 @@ static u8 *unic_get_queues_strings(struct unic_dev *unic_dev, u8 *data) return data; } +static void +unic_get_mac_strings(struct unic_dev *unic_dev, u8 *data, + const struct unic_mac_stats_desc *strs, u32 size) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + u32 stats_num = caps->mac_stats_num; + u32 i; + + if (!ubase_adev_mac_stats_supported(adev)) + return; + + for (i = 0; i < size; i++) { + if (strs[i].stats_num > stats_num) + continue; + + (void)snprintf(data, ETH_GSTRING_LEN, "%s", strs[i].desc); + data += ETH_GSTRING_LEN; + } +} + void unic_get_stats_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct unic_dev *unic_dev = netdev_priv(netdev); + char unic_test_strs[][ETH_GSTRING_LEN] = { + "App Loopback test ", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", + "External Loopback test", + }; u8 *p = data; switch (stringset) { case ETH_SS_STATS: p = unic_get_queues_strings(unic_dev, p); + if (unic_dev_ubl_supported(unic_dev)) + break; + + unic_get_mac_strings(unic_dev, p, unic_eth_stats_str, + ARRAY_SIZE(unic_eth_stats_str)); + break; + case ETH_SS_TEST: + memcpy(data, unic_test_strs, sizeof(unic_test_strs)); break; default: break; } } +static int unic_get_mac_count(struct unic_dev *unic_dev, + const struct unic_mac_stats_desc strs[], u32 size) +{ + struct auxiliary_device *adev = unic_dev->comdev.adev; + struct ubase_caps *caps = ubase_get_dev_caps(adev); + u32 stats_num = caps->mac_stats_num; + int count = 0; + u32 i; + + if (!ubase_adev_mac_stats_supported(adev)) + return 0; + + for (i = 0; i < size; i++) + if (strs[i].stats_num <= stats_num) + count++; + + return count; +} + int unic_get_sset_count(struct net_device *netdev, int stringset) { struct unic_dev *unic_dev = netdev_priv(netdev); @@ -429,6 +620,14 @@ int unic_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_STATS: count = ARRAY_SIZE(unic_sq_stats_str) * channel_num; count += ARRAY_SIZE(unic_rq_stats_str) * channel_num; + if (unic_dev_ubl_supported(unic_dev)) + break; + + count += unic_get_mac_count(unic_dev, unic_eth_stats_str, + ARRAY_SIZE(unic_eth_stats_str)); + break; + case ETH_SS_TEST: + count = unic_get_selftest_count(unic_dev); break; default: return -EOPNOTSUPP; @@ -450,6 +649,32 @@ static void unic_get_fec_stats_total(struct unic_dev *unic_dev, u8 stats_flags, fec_stats->corrected_bits.total = total->corr_bits; } +static void unic_get_fec_stats_lanes(struct unic_dev *unic_dev, u8 stats_flags, + struct ethtool_fec_stats *fec_stats) +{ + u8 lane_num = unic_dev->stats.fec_stats.lane_num; + u8 i; + + if (lane_num == 0 || lane_num > UNIC_FEC_STATS_MAX_LANE) { + unic_err(unic_dev, + "fec stats lane number is invalid, lane_num = %u.\n", + lane_num); + return; + } + + for (i = 0; i < lane_num; i++) { + if (stats_flags & UNIC_FEC_CORR_BLOCKS) + fec_stats->corrected_blocks.lanes[i] = + unic_dev->stats.fec_stats.lane[i].corr_blocks; + if (stats_flags & UNIC_FEC_UNCORR_BLOCKS) + fec_stats->uncorrectable_blocks.lanes[i] = + unic_dev->stats.fec_stats.lane[i].uncorr_blocks; + if (stats_flags & UNIC_FEC_CORR_BITS) + fec_stats->corrected_bits.lanes[i] = + unic_dev->stats.fec_stats.lane[i].corr_bits; + } +} + static void unic_get_ubl_fec_stats(struct unic_dev *unic_dev, struct ethtool_fec_stats *fec_stats) { @@ -469,6 +694,26 @@ static void unic_get_ubl_fec_stats(struct unic_dev *unic_dev, } } +static void unic_get_eth_fec_stats(struct unic_dev *unic_dev, + struct ethtool_fec_stats *fec_stats) +{ + u32 fec_mode = unic_dev->hw.mac.fec_mode; + u8 stats_flags = 0; + + switch (fec_mode) { + case ETHTOOL_FEC_RS: + stats_flags = UNIC_FEC_CORR_BLOCKS | UNIC_FEC_UNCORR_BLOCKS; + unic_get_fec_stats_total(unic_dev, stats_flags, fec_stats); + unic_get_fec_stats_lanes(unic_dev, UNIC_FEC_CORR_BITS, fec_stats); + break; + default: + unic_err(unic_dev, + "fec stats is not supported in mode(0x%x).\n", + fec_mode); + break; + } +} + void unic_get_fec_stats(struct net_device *ndev, struct ethtool_fec_stats *fec_stats) { @@ -483,4 +728,6 @@ void unic_get_fec_stats(struct net_device *ndev, if (unic_dev_ubl_supported(unic_dev)) unic_get_ubl_fec_stats(unic_dev, fec_stats); + else + unic_get_eth_fec_stats(unic_dev, fec_stats); } diff --git a/drivers/net/ub/unic/unic_stats.h b/drivers/net/ub/unic/unic_stats.h index 623b732f3d8e067cc7529731316002a78c43e1ff..0aef76abe3b7f469a0283748fe394f351ffb34c3 100644 --- a/drivers/net/ub/unic/unic_stats.h +++ b/drivers/net/ub/unic/unic_stats.h @@ -12,6 +12,13 @@ #include #include +struct unic_dev; + +#define UNIC_ETH_MAC_STATS_CAP_1 95 + +#define UNIC_ETH_MAC_STATS_FIELD_OFF(fld) offsetof(struct ubase_eth_mac_stats, fld) +#define UNIC_ETH_MAC_STATS_FLD_CAP_1(fld) {#fld, UNIC_ETH_MAC_STATS_CAP_1, \ + UNIC_ETH_MAC_STATS_FIELD_OFF(fld)} #define UNIC_SQ_STATS_FIELD_OFF(fld) (offsetof(struct unic_sq, stats) + \ offsetof(struct unic_sq_stats, fld)) #define UNIC_RQ_STATS_FIELD_OFF(fld) (offsetof(struct unic_rq, stats) + \ @@ -100,6 +107,12 @@ struct unic_stats_desc { u16 offset; }; +struct unic_mac_stats_desc { + char desc[ETH_GSTRING_LEN]; + u32 stats_num; + u16 offset; +}; + int unic_get_regs_len(struct net_device *netdev); void unic_get_regs(struct net_device *netdev, struct ethtool_regs *cmd, void *data); diff --git a/drivers/net/ub/unic/unic_tx.h b/drivers/net/ub/unic/unic_tx.h index b472065af4d83f8fd25856aae74670c0d8f5f291..755795d5ce81870f05a103b535a449502f02841f 100644 --- a/drivers/net/ub/unic/unic_tx.h +++ b/drivers/net/ub/unic/unic_tx.h @@ -87,7 +87,6 @@ struct unic_sq_stats { u64 pad_err; u64 bytes; u64 packets; - u64 map_err; u64 busy; u64 more; u64 restart_queue; diff --git a/drivers/net/ub/unic/unic_vlan.c b/drivers/net/ub/unic/unic_vlan.c new file mode 100644 index 0000000000000000000000000000000000000000..4a2e10f32272618bf12de379144b3d2ad5186fd3 --- /dev/null +++ b/drivers/net/ub/unic/unic_vlan.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#define dev_fmt(fmt) "unic: (pid %d) " fmt, current->pid + +#include "unic_hw.h" +#include "unic_vlan.h" + +static int unic_init_vlan_filter(struct unic_dev *unic_dev) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + + vlan_tbl->cur_vlan_fltr_en = false; + + INIT_LIST_HEAD(&vlan_tbl->vlan_list); + + return unic_set_vlan_filter_hw(unic_dev, false); +} + +int unic_init_vlan_config(struct unic_dev *unic_dev) +{ + int ret; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return 0; + + ret = unic_init_vlan_filter(unic_dev); + if (ret) + return ret; + + return unic_set_vlan_table(unic_dev, htons(ETH_P_8021Q), 0, true); +} + +void unic_uninit_vlan_config(struct unic_dev *unic_dev) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + struct list_head tmp_del_list; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return; + + INIT_LIST_HEAD(&tmp_del_list); + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) + list_move_tail(&vlan->node, &tmp_del_list); + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &tmp_del_list, node) { + (void)unic_set_port_vlan_hw(unic_dev, vlan->vlan_id, false); + list_del(&vlan->node); + kfree(vlan); + } +} + +static bool unic_need_update_port_vlan(struct unic_dev *unic_dev, u16 vlan_id, + bool is_add) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + bool exist = false; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) + if (vlan->vlan_id == vlan_id) { + exist = true; + break; + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + /* vlan 0 may be added twice when 8021q module is enabled */ + if (is_add && !vlan_id && exist) + return false; + + if (is_add && exist) { + dev_warn(unic_dev->comdev.adev->dev.parent, + "failed to add port vlan(%u), which is already in hw.\n", + vlan_id); + return false; + } + + if (!is_add && !exist) { + dev_warn(unic_dev->comdev.adev->dev.parent, + "failed to delete port vlan(%u), which is not in hw.\n", + vlan_id); + return false; + } + + return true; +} + +static int unic_set_port_vlan(struct unic_dev *unic_dev, u16 vlan_id, + bool is_add) +{ + if (!is_add && !vlan_id) + return 0; + + if (!unic_need_update_port_vlan(unic_dev, vlan_id, is_add)) + return 0; + + return unic_set_port_vlan_hw(unic_dev, vlan_id, is_add); +} + +static void unic_add_vlan_table(struct unic_dev *unic_dev, u16 vlan_id) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) { + if (vlan->vlan_id == vlan_id) + goto out; + } + + vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); + if (!vlan) + goto out; + + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vlan_tbl->vlan_list); + +out: + spin_unlock_bh(&vlan_tbl->vlan_lock); +} + +static void unic_rm_vlan_table(struct unic_dev *unic_dev, u16 vlan_id) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + list_del(&vlan->node); + kfree(vlan); + break; + } + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); +} + +static void unic_set_vlan_filter_change(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + + if (unic_dev_cfg_vlan_filter_supported(unic_dev)) + set_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, &vport->state); +} + +int unic_set_vlan_table(struct unic_dev *unic_dev, __be16 proto, u16 vlan_id, + bool is_add) +{ +#define UNIC_MAX_VLAN_ID 4095 + + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + int ret; + + if (vlan_id > UNIC_MAX_VLAN_ID) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + spin_lock_bh(&vlan_tbl->vlan_lock); + + if (is_add && test_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap)) { + clear_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + } else if (test_bit(UNIC_STATE_RESETTING, &unic_dev->state) && + !is_add) { + set_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + spin_unlock_bh(&vlan_tbl->vlan_lock); + return -EBUSY; + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + ret = unic_set_port_vlan(unic_dev, vlan_id, is_add); + if (!ret) { + if (is_add) + unic_add_vlan_table(unic_dev, vlan_id); + else if (!is_add && vlan_id != 0) + unic_rm_vlan_table(unic_dev, vlan_id); + } else if (!is_add) { + /* when remove hw vlan filter failed, record the vlan id, + * and try to remove it from hw later, to be consistence + * with stack. + */ + spin_lock_bh(&vlan_tbl->vlan_lock); + set_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + spin_unlock_bh(&vlan_tbl->vlan_lock); + } + + unic_set_vlan_filter_change(unic_dev); + + return ret; +} + +static bool unic_need_enable_vlan_filter(struct unic_dev *unic_dev, bool enable) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + struct unic_vlan_cfg *vlan, *tmp; + + if ((unic_dev->netdev_flags & UNIC_USER_UPE) || !enable) + return false; + + spin_lock_bh(&vlan_tbl->vlan_lock); + list_for_each_entry_safe(vlan, tmp, &vlan_tbl->vlan_list, node) { + if (vlan->vlan_id != 0) { + spin_unlock_bh(&vlan_tbl->vlan_lock); + return true; + } + } + + spin_unlock_bh(&vlan_tbl->vlan_lock); + + return false; +} + +int unic_set_vlan_filter(struct unic_dev *unic_dev, bool enable) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + bool need_en; + int ret = 0; + + need_en = unic_need_enable_vlan_filter(unic_dev, enable); + if (need_en == vlan_tbl->cur_vlan_fltr_en) + return ret; + + ret = unic_set_vlan_filter_hw(unic_dev, need_en); + if (ret) + return ret; + + vlan_tbl->cur_vlan_fltr_en = need_en; + + return ret; +} + +static void unic_sync_vlan_filter_state(struct unic_dev *unic_dev) +{ + struct unic_vport *vport = &unic_dev->vport; + int ret; + + if (!test_and_clear_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, + &vport->state)) + return; + + ret = unic_set_vlan_filter(unic_dev, true); + if (ret) { + unic_err(unic_dev, + "failed to sync vlan filter state, ret = %d.\n", ret); + set_bit(UNIC_VPORT_STATE_VLAN_FILTER_CHANGE, &vport->state); + } +} + +static u16 unic_find_del_fail_vlan(struct unic_dev *unic_dev) +{ + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + u16 vlan_id; + + spin_lock_bh(&vlan_tbl->vlan_lock); + vlan_id = find_first_bit(vlan_tbl->vlan_del_fail_bmap, VLAN_N_VID); + spin_unlock_bh(&vlan_tbl->vlan_lock); + + return vlan_id; +} + +void unic_sync_vlan_filter(struct unic_dev *unic_dev) +{ +#define UNIC_MAX_SYNC_COUNT 60 + + struct unic_vlan_tbl *vlan_tbl = &unic_dev->vport.vlan_tbl; + int ret, sync_cnt = 0; + u16 vlan_id; + + if (unic_dev_ubl_supported(unic_dev) || + !unic_dev_cfg_vlan_filter_supported(unic_dev)) + return; + + vlan_id = unic_find_del_fail_vlan(unic_dev); + while (vlan_id != VLAN_N_VID) { + ret = unic_set_port_vlan(unic_dev, vlan_id, false); + if (ret) + break; + + clear_bit(vlan_id, vlan_tbl->vlan_del_fail_bmap); + unic_rm_vlan_table(unic_dev, vlan_id); + unic_set_vlan_filter_change(unic_dev); + + if (++sync_cnt >= UNIC_MAX_SYNC_COUNT) + break; + + vlan_id = unic_find_del_fail_vlan(unic_dev); + } + + unic_sync_vlan_filter_state(unic_dev); +} diff --git a/drivers/net/ub/unic/unic_vlan.h b/drivers/net/ub/unic/unic_vlan.h new file mode 100644 index 0000000000000000000000000000000000000000..3842bfd05db2519120e5d86de9bb6f18b30ec01b --- /dev/null +++ b/drivers/net/ub/unic/unic_vlan.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UNIC_VLAN_H__ +#define __UNIC_VLAN_H__ + +#include "unic_dev.h" + +int unic_init_vlan_config(struct unic_dev *unic_dev); +void unic_uninit_vlan_config(struct unic_dev *unic_dev); +int unic_set_vlan_table(struct unic_dev *unic_dev, __be16 proto, u16 vlan_id, + bool is_add); +int unic_set_vlan_filter(struct unic_dev *unic_dev, bool enable); +void unic_sync_vlan_filter(struct unic_dev *unic_dev); + +#endif diff --git a/drivers/ub/ubase/Makefile b/drivers/ub/ubase/Makefile index 67440b08bcfe6e5ad1cb04af14745cc0f6f076b1..f266263aa3af551ac79de4273a79bdc952b951e2 100644 --- a/drivers/ub/ubase/Makefile +++ b/drivers/ub/ubase/Makefile @@ -12,8 +12,8 @@ MODULE_NAME := ubase UBASE_OBJS := ubase_main.o ubase_dev.o ubase_hw.o ubase_cmd.o ubase_ctrlq.o \ debugfs/ubase_debugfs.o ubase_eq.o ubase_mailbox.o ubase_ubus.o \ debugfs/ubase_ctx_debugfs.o debugfs/ubase_qos_debugfs.o \ - ubase_qos_hw.o ubase_tp.o ubase_ctrlq_tp.o ubase_reset.o \ - ubase_err_handle.o ubase_pmem.o ubase_stats.o ubase_arq.o + ubase_qos_hw.o ubase_tp.o ubase_reset.o ubase_err_handle.o \ + ubase_stats.o ubase_arq.o ubase_pmem.o $(MODULE_NAME)-objs := $(UBASE_OBJS) obj-$(CONFIG_UB_UBASE) := ubase.o diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c index 63f27093aec12985340dfe01b53b1860455bc91e..7f0e5ca1848492325abfdf318ecfbbb8f66133ae 100644 --- a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c @@ -51,26 +51,6 @@ static void ubase_dump_ceq_ctx(struct seq_file *s, struct ubase_dev *udev, u32 i ubase_dump_eq_ctx(s, eq); } -static void ubase_tpg_ctx_titles_print(struct seq_file *s) -{ - seq_puts(s, "CHANNEL_ID TPGN TP_SHIFT VALID_TP "); - seq_puts(s, "START_TPN TPG_STATE TP_CNT\n"); -} - -static void ubase_dump_tpg_ctx(struct seq_file *s, struct ubase_dev *udev, u32 idx) -{ - struct ubase_tpg *tpg = &udev->tp_ctx.tpg[idx]; - - seq_printf(s, "%-12u", idx); - seq_printf(s, "%-9u", tpg->mb_tpgn); - seq_printf(s, "%-10u", tpg->tp_shift); - seq_printf(s, "%-10lu", tpg->valid_tp); - seq_printf(s, "%-11u", tpg->start_tpn); - seq_printf(s, "%-11u", tpg->tpg_state); - seq_printf(s, "%-8u", tpg->tp_cnt); - seq_puts(s, "\n"); -} - enum ubase_dbg_ctx_type { UBASE_DBG_AEQ_CTX = 0, UBASE_DBG_CEQ_CTX, @@ -118,26 +98,14 @@ static int ubase_dbg_dump_context(struct seq_file *s, } dbg_ctx[] = { {ubase_eq_ctx_titles_print, ubase_dump_aeq_ctx}, {ubase_eq_ctx_titles_print, ubase_dump_ceq_ctx}, - {ubase_tpg_ctx_titles_print, ubase_dump_tpg_ctx}, }; struct ubase_dev *udev = dev_get_drvdata(s->private); - struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; - unsigned long port_bitmap; - u32 tp_pos, i; + u32 i; dbg_ctx[ctx_type].print_ctx_titles(s); - port_bitmap = unic_caps->utp_port_bitmap; - for (i = 0; i < ubase_get_ctx_num(udev, ctx_type, UBASE_DEFAULT_CTXGN); i++) { - if (ctx_type != UBASE_DBG_TP_CTX) { - dbg_ctx[ctx_type].get_ctx(s, udev, i); - continue; - } - - tp_pos = (i % unic_caps->tpg.depth) * UBASE_TP_PORT_BITMAP_STEP; - if (test_bit(tp_pos, &port_bitmap)) - dbg_ctx[ctx_type].get_ctx(s, udev, i); - } + for (i = 0; i < ubase_get_ctx_num(udev, ctx_type, UBASE_DEFAULT_CTXGN); i++) + dbg_ctx[ctx_type].get_ctx(s, udev, i); return 0; } @@ -152,10 +120,8 @@ struct ubase_ctx_info { static inline u32 ubase_get_ctx_group_num(struct ubase_dev *udev, enum ubase_dbg_ctx_type ctx_type) { - if (ctx_type == UBASE_DBG_TP_CTX) - return udev->caps.unic_caps.tpg.max_cnt; - - return 1; + return ctx_type == UBASE_DBG_TP_CTX ? udev->caps.unic_caps.tpg.max_cnt : + 1; } static void ubase_get_ctx_info(struct ubase_dev *udev, @@ -177,7 +143,7 @@ static void ubase_get_ctx_info(struct ubase_dev *udev, break; case UBASE_DBG_TPG_CTX: ctx_info->start_idx = udev->caps.unic_caps.tpg.start_idx; - ctx_info->ctx_size = udev->ctx_buf.tpg.entry_size; + ctx_info->ctx_size = UBASE_TPG_CTX_SIZE; ctx_info->op = UBASE_MB_QUERY_TPG_CONTEXT; ctx_info->ctx_name = "tpg"; break; @@ -187,7 +153,7 @@ static void ubase_get_ctx_info(struct ubase_dev *udev, udev->tp_ctx.tpg[ctxgn].start_tpn : 0; spin_unlock(&udev->tp_ctx.tpg_lock); - ctx_info->ctx_size = udev->ctx_buf.tp.entry_size; + ctx_info->ctx_size = UBASE_TP_CTX_SIZE; ctx_info->op = UBASE_MB_QUERY_TP_CONTEXT; ctx_info->ctx_name = "tp"; break; @@ -348,31 +314,6 @@ int ubase_dbg_dump_ceq_context(struct seq_file *s, void *data) return ret; } -int ubase_dbg_dump_tpg_ctx(struct seq_file *s, void *data) -{ - struct ubase_dev *udev = dev_get_drvdata(s->private); - int ret; - - if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) - return -EBUSY; - - if (!ubase_get_ctx_num(udev, UBASE_DBG_TPG_CTX, UBASE_DEFAULT_CTXGN)) - return -EOPNOTSUPP; - - if (!spin_trylock(&udev->tp_ctx.tpg_lock)) - return -EBUSY; - - if (!udev->tp_ctx.tpg) { - spin_unlock(&udev->tp_ctx.tpg_lock); - return -EBUSY; - } - - ret = ubase_dbg_dump_context(s, UBASE_DBG_TPG_CTX); - spin_unlock(&udev->tp_ctx.tpg_lock); - - return ret; -} - int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data) { struct ubase_dev *udev = dev_get_drvdata(s->private); @@ -380,9 +321,6 @@ int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data) if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) return -EBUSY; - if (!ubase_get_ctx_num(udev, UBASE_DBG_TPG_CTX, UBASE_DEFAULT_CTXGN)) - return -EOPNOTSUPP; - return ubase_dbg_dump_ctx_hw(s, data, UBASE_DBG_TPG_CTX); } diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h index 532665141fc8d3e8d40b433224e3d70e39269ed1..824c289b97002af170ad8b55a236ab2b7f7b1306 100644 --- a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.h @@ -11,7 +11,6 @@ struct device; int ubase_dbg_dump_aeq_context(struct seq_file *s, void *data); int ubase_dbg_dump_ceq_context(struct seq_file *s, void *data); -int ubase_dbg_dump_tpg_ctx(struct seq_file *s, void *data); int ubase_dbg_dump_tp_ctx_hw(struct seq_file *s, void *data); int ubase_dbg_dump_tpg_ctx_hw(struct seq_file *s, void *data); int ubase_dbg_dump_aeq_ctx_hw(struct seq_file *s, void *data); diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 3b66f255884576932e106604c3ca102516dfc142..e7cb577444a925dc32168c3c00eb7531215a3ec4 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -130,12 +130,8 @@ static void ubase_dbg_dump_adev_caps(struct seq_file *s, {"\tjfr_depth: %u\n", caps->jfr.depth}, {"\tjfc_max_cnt: %u\n", caps->jfc.max_cnt}, {"\tjfc_depth: %u\n", caps->jfc.depth}, - {"\ttp_max_cnt: %u\n", caps->tp.max_cnt}, - {"\ttp_depth: %u\n", caps->tp.depth}, {"\ttpg_max_cnt: %u\n", caps->tpg.max_cnt}, - {"\ttpg_depth: %u\n", caps->tpg.depth}, {"\tcqe_size: %hu\n", caps->cqe_size}, - {"\tutp_port_bitmap: 0x%x\n", caps->utp_port_bitmap}, {"\tjtg_max_cnt: %u\n", caps->jtg_max_cnt}, {"\trc_max_cnt: %u\n", caps->rc_max_cnt}, {"\trc_depth: %u\n", caps->rc_que_depth}, @@ -535,14 +531,6 @@ static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { .init = __ubase_dbg_seq_file_init, .read_func = ubase_dbg_dump_activate_record, }, - { - .name = "tpg_context", - .dentry_index = UBASE_DBG_DENTRY_CONTEXT, - .property = UBASE_SUP_URMA | UBASE_SUP_UBL_ETH, - .support = __ubase_dbg_dentry_support, - .init = __ubase_dbg_seq_file_init, - .read_func = ubase_dbg_dump_tpg_ctx, - }, { .name = "tp_context_hw", .dentry_index = UBASE_DBG_DENTRY_CONTEXT, diff --git a/drivers/ub/ubase/ubase_ctrlq_tp.c b/drivers/ub/ubase/ubase_ctrlq_tp.c deleted file mode 100644 index 5ebcb609a205f74c8208e4b07f0ac888b2553341..0000000000000000000000000000000000000000 --- a/drivers/ub/ubase/ubase_ctrlq_tp.c +++ /dev/null @@ -1,226 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. - * - */ - -#include - -#include "ubase_ctrlq.h" -#include "ubase_dev.h" -#include "ubase_tp.h" -#include "ubase_ctrlq_tp.h" - -#define UBASE_TRANS_TYPE_UM_TP 0x2 -#define UBASE_TPG_FLAG 0x1 - -int ubase_notify_tp_fd_by_ctrlq(struct ubase_dev *udev, u32 tpn) -{ - struct ubase_tpg *tpg = udev->tp_ctx.tpg; - struct ubase_ctrlq_tp_fd_req req = {0}; - struct ubase_ctrlq_msg msg = {0}; - int ret, tmp_resp; - u32 i; - - spin_lock(&udev->tp_ctx.tpg_lock); - if (!tpg) { - spin_unlock(&udev->tp_ctx.tpg_lock); - ubase_warn(udev, - "ubase tpg res does not exist, tpn = %u.\n", tpn); - return 0; - } - - for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { - if (tpn >= tpg[i].start_tpn && - tpn < tpg[i].start_tpn + tpg[i].tp_cnt) { - ubase_dbg(udev, - "receive tp flush done AE, tpn:%u, tpgn:%u.\n", - tpn, i); - break; - } - } - spin_unlock(&udev->tp_ctx.tpg_lock); - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = UBASE_CTRLQ_OPC_TP_FLUSH_DONE; - msg.need_resp = 1; - msg.in_size = sizeof(req); - msg.in = &req; - msg.out_size = sizeof(tmp_resp); - msg.out = &tmp_resp; - req.tpn = cpu_to_le32(tpn); - - ret = __ubase_ctrlq_send(udev, &msg, NULL); - if (ret) - ubase_err(udev, "failed to notify tp flush done, ret = %d.\n", - ret); - - spin_lock(&udev->tp_ctx.tpg_lock); - if (udev->tp_ctx.tpg && i < udev->caps.unic_caps.tpg.max_cnt) - atomic_inc(&tpg[i].tp_fd_cnt); - else - ubase_warn(udev, - "ubase tpg res does not exist, tpn = %u.\n", tpn); - spin_unlock(&udev->tp_ctx.tpg_lock); - - return ret; -} - -static int ubase_create_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) -{ - struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; - struct ubase_ctrlq_create_tp_resp resp = {0}; - struct ubase_ctrlq_create_tp_req req = {0}; - struct ubase_ctrlq_msg msg = {0}; - int ret; - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = UBASE_CTRLQ_OPC_CREATE_TP; - msg.need_resp = 1; - msg.in_size = sizeof(req); - msg.in = &req; - msg.out_size = sizeof(resp); - msg.out = &resp; - - req.trans_type = UBASE_TRANS_TYPE_UM_TP; - req.vl = (u8)vl; - - ret = __ubase_ctrlq_send(udev, &msg, NULL); - if (ret && ret != -EEXIST) { - ubase_err(udev, "failed to alloc tp tpg, ret = %d.\n", ret); - return ret; - } - - tp_ctx->tpg[vl].mb_tpgn = le32_to_cpu(resp.tpgn); - tp_ctx->tpg[vl].start_tpn = le32_to_cpu(resp.start_tpn); - tp_ctx->tpg[vl].tp_cnt = resp.tpn_cnt; - - if (tp_ctx->tpg[vl].mb_tpgn != vl) - ubase_warn(udev, "unexpected tpgn, vl = %u, tpgn = %u.\n", - vl, tp_ctx->tpg[vl].mb_tpgn); - - return 0; -} - -static void ubase_wait_tp_flush_done_by_ctrlq(struct ubase_dev *udev, u32 vl) -{ - struct ubase_tpg *tpg = &udev->tp_ctx.tpg[vl]; - int i; - - for (i = 0; i < UBASE_WAIT_TP_FLUSH_TOTAL_STEPS; i++) { - msleep(1 << i); - - if (atomic_read(&tpg->tp_fd_cnt) == tpg->tp_cnt) - return; - } - - ubase_warn(udev, - "wait tp flush done timeout, tpgn = %u, tp_fd_cnt = %u.\n", - vl, atomic_read(&tpg->tp_fd_cnt)); -} - -static void ubase_destroy_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) -{ - struct ubase_ctrlq_destroy_tp_req req = {0}; - struct ubase_ctrlq_msg msg = {0}; - int tmp_resp, ret; - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = UBASE_CTRLQ_OPC_DESTROY_TP; - msg.need_resp = 1; - msg.in_size = sizeof(req); - msg.in = &req; - msg.out_size = sizeof(tmp_resp); - msg.out = &tmp_resp; - - req.vl = (u8)vl; - req.trans_type = UBASE_TRANS_TYPE_UM_TP; - - ret = __ubase_ctrlq_send(udev, &msg, NULL); - if (ret) { - ubase_err(udev, - "failed to send destroy tp tpg request, tpgn = %u, ret = %d.\n", - vl, ret); - return; - } - - ubase_wait_tp_flush_done_by_ctrlq(udev, vl); -} - -static void ubase_destroy_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 num) -{ - u32 idx; - - for (idx = 0; idx < num; idx++) - ubase_destroy_tp_tpg_by_ctrlq(udev, idx); -} - -static int ubase_create_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev) -{ - int ret; - u32 i; - - for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { - atomic_set(&udev->tp_ctx.tpg[i].tp_fd_cnt, 0); - ret = ubase_create_tp_tpg_by_ctrlq(udev, i); - if (ret) { - ubase_err(udev, "failed to create tp tpg, tpgn = %u, ret = %d.\n", - i, ret); - goto err_create_tp_tpg; - } - } - - return 0; - -err_create_tp_tpg: - ubase_destroy_multi_tp_tpg_by_ctrlq(udev, i); - - return ret; -} - -void ubase_dev_uninit_rack_tp_tpg(struct ubase_dev *udev) -{ - struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; - struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; - u32 num = unic_caps->tpg.max_cnt; - - if (!tp_ctx->tpg) - return; - - if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) - ubase_destroy_multi_tp_tpg_by_ctrlq(udev, num); - - spin_lock(&tp_ctx->tpg_lock); - devm_kfree(udev->dev, tp_ctx->tpg); - tp_ctx->tpg = NULL; - spin_unlock(&tp_ctx->tpg_lock); -} - -int ubase_dev_init_rack_tp_tpg(struct ubase_dev *udev) -{ - struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; - struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; - int ret; - - spin_lock(&tp_ctx->tpg_lock); - tp_ctx->tpg = devm_kcalloc(udev->dev, unic_caps->tpg.max_cnt, - sizeof(struct ubase_tpg), GFP_ATOMIC); - if (!tp_ctx->tpg) { - spin_unlock(&tp_ctx->tpg_lock); - return -ENOMEM; - } - spin_unlock(&tp_ctx->tpg_lock); - - ret = ubase_create_multi_tp_tpg_by_ctrlq(udev); - if (ret) { - spin_lock(&tp_ctx->tpg_lock); - devm_kfree(udev->dev, tp_ctx->tpg); - tp_ctx->tpg = NULL; - spin_unlock(&tp_ctx->tpg_lock); - } - - return ret; -} diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index f3f9ab6c7e968683dcd963d8e1b49938cace0954..4765610b7f094069fdf929f1f9c4d6e3b544efdf 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -4,6 +4,7 @@ * */ +#include #include #include @@ -299,6 +300,21 @@ static void ubase_uninit_aux_devices(struct ubase_dev *udev) mutex_destroy(&udev->priv.uadev_lock); } +static void ubase_update_stats_for_all(struct ubase_dev *udev) +{ + int ret; + + if (ubase_dev_unic_supported(udev) && + ubase_dev_eth_mac_supported(udev) && + ubase_dev_mac_stats_supported(udev)) { + ret = ubase_update_eth_stats_trylock(udev); + if (ret) + ubase_err(udev, + "failed to update stats for eth, ret = %d.\n", + ret); + } +} + static void ubase_cancel_period_service_task(struct ubase_dev *udev) { if (udev->period_service_task.service_task.work.func) @@ -321,7 +337,6 @@ static int ubase_enable_period_service_task(struct ubase_dev *udev) static void ubase_period_service_task(struct work_struct *work) { #define UBASE_STATS_TIMER_INTERVAL (300000 / (UBASE_PERIOD_100MS)) -#define UBASE_QUERY_SL_TIMER_INTERVAL (1000 / (UBASE_PERIOD_100MS)) struct ubase_delay_work *ubase_work = container_of(work, struct ubase_delay_work, service_task.work); @@ -333,6 +348,10 @@ static void ubase_period_service_task(struct work_struct *work) return; } + if (test_bit(UBASE_STATE_INITED_B, &udev->state_bits) && + !(udev->serv_proc_cnt % UBASE_STATS_TIMER_INTERVAL)) + ubase_update_stats_for_all(udev); + udev->serv_proc_cnt++; ubase_enable_period_service_task(udev); } @@ -1329,6 +1348,15 @@ struct ubase_adev_qos *ubase_get_adev_qos(struct auxiliary_device *adev) } EXPORT_SYMBOL(ubase_get_adev_qos); +bool ubase_adev_mac_stats_supported(struct auxiliary_device *adev) +{ + if (!adev) + return false; + + return ubase_dev_mac_stats_supported(__ubase_get_udev_by_adev(adev)); +} +EXPORT_SYMBOL(ubase_adev_mac_stats_supported); + static void ubase_activate_notify(struct ubase_dev *udev, struct auxiliary_device *adev, bool activate) { @@ -1634,3 +1662,55 @@ int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid) return __ubase_get_bus_eid(udev, eid); } EXPORT_SYMBOL(ubase_get_bus_eid); + +/** + * ubase_set_dev_mac() - Record the MAC address of the device + * @adev: auxiliary device + * @dev_addr: MAC address of the device + * @addr_len: MAC address length + * + * This function is used to record the MAC address of the device, and store the + * MAC address in the ubase_dev structure. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ +int ubase_set_dev_mac(struct auxiliary_device *adev, const u8 *dev_addr, + u8 addr_len) +{ + struct ubase_dev *udev; + + if (!adev || !dev_addr || addr_len < ETH_ALEN) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(adev); + ether_addr_copy(udev->dev_mac, dev_addr); + + return 0; +} +EXPORT_SYMBOL(ubase_set_dev_mac); + +/** + * ubase_get_dev_mac() - Obtain the device MAC address and output it. + * @adev: auxiliary device + * @dev_addr: Output parameter, save the obtained MAC address array. + * @addr_len: Length of the array for storing MAC addresses + * + * This function is used to get the device MAC address from ubase_dev. + * + * Context: Any context. + * Return: 0 on success, negative error code otherwise + */ +int ubase_get_dev_mac(struct auxiliary_device *adev, u8 *dev_addr, u8 addr_len) +{ + struct ubase_dev *udev; + + if (!adev || !dev_addr || addr_len < ETH_ALEN) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(adev); + ether_addr_copy(dev_addr, udev->dev_mac); + + return 0; +} +EXPORT_SYMBOL(ubase_get_dev_mac); diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index 45605409de6b1c8b6f68dbd6ccb085186c3dbac4..ee7c5f605e65753a12ce72954f5dec5efe593855 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -291,6 +292,7 @@ struct ubase_dev { struct ubase_act_ctx act_ctx; struct ubase_arq_msg_ring arq; struct ubase_prealloc_mem_info pmem_info; + u8 dev_mac[ETH_ALEN]; }; #define UBASE_ERR_MSG_LEN 128 diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c index 2b68ba5884f51ca6bc815b51f4a41e86b53b7cf9..c473d12ddf723aa55b66e807ee589b8ade111f4e 100644 --- a/drivers/ub/ubase/ubase_hw.c +++ b/drivers/ub/ubase/ubase_hw.c @@ -162,7 +162,6 @@ static void ubase_parse_dev_caps_unic(struct ubase_dev *udev, unic_caps->jfc.max_cnt = le32_to_cpu(resp->nic_jfc_max_cnt); unic_caps->jfc.depth = le32_to_cpu(resp->nic_jfc_depth); unic_caps->cqe_size = le16_to_cpu(resp->nic_cqe_size); - unic_caps->utp_port_bitmap = le32_to_cpu(resp->port_bitmap); } static void ubase_parse_dev_caps_udma(struct ubase_dev *udev, @@ -585,7 +584,7 @@ static void ubase_uninit_dma_buf(struct ubase_dev *udev, buf->addr = NULL; } -static int ubase_init_ta_tp_ext_buf(struct ubase_dev *udev) +static int ubase_init_ta_ext_buf(struct ubase_dev *udev) { UBASE_DEFINE_DMA_BUFS(udev); int i, ret; @@ -609,7 +608,7 @@ static int ubase_init_ta_tp_ext_buf(struct ubase_dev *udev) return ret; } -static void ubase_uninit_ta_tp_ext_buf(struct ubase_dev *udev) +static void ubase_uninit_ta_ext_buf(struct ubase_dev *udev) { UBASE_DEFINE_DMA_BUFS(udev); int i; @@ -860,9 +859,9 @@ int ubase_hw_init(struct ubase_dev *udev) return ret; } - ret = ubase_init_ta_tp_ext_buf(udev); + ret = ubase_init_ta_ext_buf(udev); if (ret) - goto err_init_ta_tp_ext_buf; + goto err_init_ta_ext_buf; ret = ubase_dev_init_tp_tpg(udev); if (ret) { @@ -875,8 +874,8 @@ int ubase_hw_init(struct ubase_dev *udev) return 0; err_init_tp_tpg: - ubase_uninit_ta_tp_ext_buf(udev); -err_init_ta_tp_ext_buf: + ubase_uninit_ta_ext_buf(udev); +err_init_ta_ext_buf: ubase_uninit_ctx_buf(udev); return ret; @@ -887,7 +886,7 @@ void ubase_hw_uninit(struct ubase_dev *udev) clear_bit(UBASE_STATE_CTX_READY_B, &udev->state_bits); ubase_dev_uninit_tp_tpg(udev); - ubase_uninit_ta_tp_ext_buf(udev); + ubase_uninit_ta_ext_buf(udev); if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) { ubase_ctrlq_disable_remote(udev); diff --git a/drivers/ub/ubase/ubase_hw.h b/drivers/ub/ubase/ubase_hw.h index 2c7ed2264aabffc4f743fabfcbbed52e342056b0..e292053905ec0d8e6f7209a6b2406df2f3ae7347 100644 --- a/drivers/ub/ubase/ubase_hw.h +++ b/drivers/ub/ubase/ubase_hw.h @@ -70,39 +70,33 @@ struct ubase_res_cmd_resp { __le32 nic_jfc_max_cnt; u8 rsvd11[4]; __le32 nic_jfc_depth; - __le32 nic_tp_max_cnt; - __le32 nic_tp_reserved_cnt; - __le32 nic_tp_depth; - __le32 nic_tpg_max_cnt; + u8 rsvd12[16]; - __le32 nic_tpg_reserved_cnt; - __le32 nic_tpg_depth; + u8 rsvd13[8]; __le32 total_ue_num; - u8 rsvd12[16]; + u8 rsvd14[16]; __le16 rsvd_jetty_cnt; __le16 mac_stats_num; __le32 ta_extdb_buf_size; __le32 ta_timer_buf_size; __le32 public_jetty_cnt; - __le32 tp_extdb_buf_size; - __le32 tp_timer_buf_size; - u8 resv13; + u8 rsvd15[9]; u8 udma_vl_num; u8 udma_tp_resp_vl_offset; u8 ue_num; __le32 port_bitmap; - u8 rsvd14[4]; + u8 rsvd16[4]; /* include udma tp and ctp req vl */ u8 udma_req_vl[UBASE_MAX_REQ_VL_NUM]; __le32 udma_rc_depth; - u8 rsvd15[4]; + u8 rsvd17[4]; __le32 jtg_max_cnt; __le32 rc_max_cnt_per_vl; - u8 rsvd16[8]; + u8 rsvd18[8]; - u8 rsvd17[32]; + u8 rsvd19[32]; }; struct ubase_query_oor_resp { diff --git a/drivers/ub/ubase/ubase_qos_hw.c b/drivers/ub/ubase/ubase_qos_hw.c index b67b7f7e5c912fcd3d2f93d6b194476a8169c902..8dff50e1a374f760b8852eaa446c39265ab660b8 100644 --- a/drivers/ub/ubase/ubase_qos_hw.c +++ b/drivers/ub/ubase/ubase_qos_hw.c @@ -745,7 +745,7 @@ static int ubase_assign_urma_vl(struct ubase_dev *udev, u8 *urma_sl, return 0; } -static int ubase_parse_rack_nic_vl(struct ubase_dev *udev) +static int ubase_parse_nic_vl(struct ubase_dev *udev) { return ubase_assign_urma_vl(udev, udev->qos.nic_sl, udev->qos.nic_sl_num, udev->qos.nic_vl, &udev->qos.nic_vl_num); @@ -812,7 +812,7 @@ static int ubase_parse_rack_cdma_req_sl_vl(struct ubase_dev *udev) return 0; } -static int ubase_parse_rack_cdma_sl_vl(struct ubase_dev *udev) +static int ubase_parse_cdma_sl_vl(struct ubase_dev *udev) { int ret; @@ -828,16 +828,16 @@ static int ubase_parse_rack_cdma_sl_vl(struct ubase_dev *udev) return 0; } -static inline int ubase_parse_rack_nic_sl_vl(struct ubase_dev *udev) +static inline int ubase_parse_nic_sl_vl(struct ubase_dev *udev) { - return ubase_parse_rack_nic_vl(udev); + return ubase_parse_nic_vl(udev); } -static int ubase_parse_rack_urma_sl_vl(struct ubase_dev *udev) +static int ubase_parse_urma_sl_vl(struct ubase_dev *udev) { int ret; - ret = ubase_parse_rack_nic_sl_vl(udev); + ret = ubase_parse_nic_sl_vl(udev); if (ret) return ret; @@ -851,13 +851,13 @@ static int ubase_parse_rack_urma_sl_vl(struct ubase_dev *udev) return 0; } -static int ubase_parse_rack_adev_sl_vl(struct ubase_dev *udev) +static int ubase_parse_adev_sl_vl(struct ubase_dev *udev) { if (ubase_dev_cdma_supported(udev)) - return ubase_parse_rack_cdma_sl_vl(udev); + return ubase_parse_cdma_sl_vl(udev); if (ubase_dev_urma_supported(udev)) - return ubase_parse_rack_urma_sl_vl(udev); + return ubase_parse_urma_sl_vl(udev); return 0; } @@ -913,7 +913,7 @@ static int ubase_parse_sl_vl(struct ubase_dev *udev) if (ret) return ret; - ret = ubase_parse_rack_adev_sl_vl(udev); + ret = ubase_parse_adev_sl_vl(udev); if (ret) return ret; diff --git a/drivers/ub/ubase/ubase_stats.c b/drivers/ub/ubase/ubase_stats.c index 4d6e4678686d9078d61e755da538f3523ee8667b..78ce9b6835d1cbccfb5e3bcbd53e8b51b171ff76 100644 --- a/drivers/ub/ubase/ubase_stats.c +++ b/drivers/ub/ubase/ubase_stats.c @@ -51,6 +51,33 @@ static int ubase_update_mac_stats(struct ubase_dev *udev, u16 port_id, u64 *data return ret; } +/** + * ubase_clear_eth_port_stats() - clear eth port stats + * @adev: auxiliary device + * + * The function is used to clear eth port stats. + * + * Context: Process context. Takes and releases , BH-safe. Sleep. + * Return: 0 on success, negative error code otherwise + */ +void ubase_clear_eth_port_stats(struct auxiliary_device *adev) +{ + struct ubase_eth_mac_stats *eth_stats; + struct ubase_dev *udev; + + if (!adev) + return; + + udev = __ubase_get_udev_by_adev(adev); + eth_stats = &udev->stats.eth_stats; + if (ubase_dev_eth_mac_supported(udev)) { + mutex_lock(&udev->stats.stats_lock); + memset(eth_stats, 0, sizeof(*eth_stats)); + mutex_unlock(&udev->stats.stats_lock); + } +} +EXPORT_SYMBOL(ubase_clear_eth_port_stats); + /** * ubase_get_ub_port_stats() - get ub port stats * @adev: auxiliary device @@ -77,6 +104,41 @@ int ubase_get_ub_port_stats(struct auxiliary_device *adev, u16 port_id, } EXPORT_SYMBOL(ubase_get_ub_port_stats); +int __ubase_get_eth_port_stats(struct ubase_dev *udev, + struct ubase_eth_mac_stats *data) +{ + struct ubase_eth_mac_stats *eth_stats = &udev->stats.eth_stats; + u32 stats_num = sizeof(*eth_stats) / sizeof(u64); + int ret; + + mutex_lock(&udev->stats.stats_lock); + ret = ubase_update_mac_stats(udev, udev->caps.dev_caps.io_port_logic_id, + (u64 *)eth_stats, stats_num, true); + if (ret) { + mutex_unlock(&udev->stats.stats_lock); + return ret; + } + + memcpy(data, &udev->stats.eth_stats, sizeof(*data)); + mutex_unlock(&udev->stats.stats_lock); + + return 0; +} + +int ubase_get_eth_port_stats(struct auxiliary_device *adev, + struct ubase_eth_mac_stats *data) +{ + struct ubase_dev *udev; + + if (!adev || !data) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(adev); + + return __ubase_get_eth_port_stats(udev, data); +} +EXPORT_SYMBOL(ubase_get_eth_port_stats); + void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, int result) { @@ -98,3 +160,19 @@ void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, mutex_unlock(&record->lock); } + +int ubase_update_eth_stats_trylock(struct ubase_dev *udev) +{ + struct ubase_eth_mac_stats *eth_stats = &udev->stats.eth_stats; + u32 stats_num = sizeof(*eth_stats) / sizeof(u64); + int ret; + + if (!mutex_trylock(&udev->stats.stats_lock)) + return 0; + + ret = ubase_update_mac_stats(udev, udev->caps.dev_caps.io_port_logic_id, + (u64 *)eth_stats, stats_num, true); + mutex_unlock(&udev->stats.stats_lock); + + return ret; +} diff --git a/drivers/ub/ubase/ubase_stats.h b/drivers/ub/ubase/ubase_stats.h index a6826dd461c76ffaf03dc4ea78fac6dfe31d8494..7b221f29474f2c7453bdc55c1d8a5761c3c85bc6 100644 --- a/drivers/ub/ubase/ubase_stats.h +++ b/drivers/ub/ubase/ubase_stats.h @@ -15,6 +15,9 @@ struct ubase_query_mac_stats_cmd { __le64 stats_val[]; }; +int __ubase_get_eth_port_stats(struct ubase_dev *udev, + struct ubase_eth_mac_stats *data); +int ubase_update_eth_stats_trylock(struct ubase_dev *udev); void ubase_update_activate_stats(struct ubase_dev *udev, bool activate, int result); diff --git a/drivers/ub/ubase/ubase_tp.c b/drivers/ub/ubase/ubase_tp.c index 2b11b39fb6a402a0cb98b094becd155c96efd4f8..f18854fdc319ddf56ef7b25f82066382ff71e06e 100644 --- a/drivers/ub/ubase/ubase_tp.c +++ b/drivers/ub/ubase/ubase_tp.c @@ -4,10 +4,66 @@ * */ -#include "ubase_ctrlq_tp.h" +#include + +#include "ubase_ctrlq.h" #include "ubase_reset.h" #include "ubase_tp.h" +int ubase_notify_tp_fd_by_ctrlq(struct ubase_dev *udev, u32 tpn) +{ + struct ubase_ctrlq_tp_fd_req req = {0}; + struct ubase_ctrlq_msg msg = {0}; + struct ubase_tpg *tpg; + int ret, tmp_resp; + u32 i; + + spin_lock(&udev->tp_ctx.tpg_lock); + tpg = udev->tp_ctx.tpg; + if (!tpg) { + spin_unlock(&udev->tp_ctx.tpg_lock); + ubase_warn(udev, + "ubase tpg res does not exist, tpn = %u.\n", tpn); + return 0; + } + + for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { + if (tpn >= tpg[i].start_tpn && + tpn < tpg[i].start_tpn + tpg[i].tp_cnt) { + ubase_dbg(udev, + "receive tp flush done AE, tpn:%u, tpgn:%u.\n", + tpn, i); + break; + } + } + spin_unlock(&udev->tp_ctx.tpg_lock); + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = UBASE_CTRLQ_OPC_TP_FLUSH_DONE; + msg.need_resp = 1; + msg.in_size = sizeof(req); + msg.in = &req; + msg.out_size = sizeof(tmp_resp); + msg.out = &tmp_resp; + req.tpn = cpu_to_le32(tpn); + + ret = __ubase_ctrlq_send(udev, &msg, NULL); + if (ret) + ubase_err(udev, "failed to notify tp flush done, ret = %d.\n", + ret); + + spin_lock(&udev->tp_ctx.tpg_lock); + if (udev->tp_ctx.tpg && i < udev->caps.unic_caps.tpg.max_cnt) + atomic_inc(&tpg[i].tp_fd_cnt); + else + ubase_warn(udev, + "ubase tpg res does not exist, tpn = %u.\n", tpn); + spin_unlock(&udev->tp_ctx.tpg_lock); + + return ret; +} + int ubase_ae_tp_flush_done(struct notifier_block *nb, unsigned long event, void *data) { @@ -40,18 +96,165 @@ int ubase_ae_tp_level_error(struct notifier_block *nb, unsigned long event, return 0; } +static int ubase_create_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) +{ + struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; + struct ubase_ctrlq_create_tp_resp resp = {0}; + struct ubase_ctrlq_create_tp_req req = {0}; + struct ubase_ctrlq_msg msg = {0}; + int ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = UBASE_CTRLQ_OPC_CREATE_TP; + msg.need_resp = 1; + msg.in_size = sizeof(req); + msg.in = &req; + msg.out_size = sizeof(resp); + msg.out = &resp; + + req.trans_type = UBASE_TRANS_TYPE_UM_TP; + req.vl = (u8)vl; + + ret = __ubase_ctrlq_send(udev, &msg, NULL); + if (ret && ret != -EEXIST) { + ubase_err(udev, "failed to alloc tp tpg, ret = %d.\n", ret); + return ret; + } + + tp_ctx->tpg[vl].mb_tpgn = le32_to_cpu(resp.tpgn); + tp_ctx->tpg[vl].start_tpn = le32_to_cpu(resp.start_tpn); + tp_ctx->tpg[vl].tp_cnt = resp.tpn_cnt; + + if (tp_ctx->tpg[vl].mb_tpgn != vl) + ubase_warn(udev, "unexpected tpgn, vl = %u, tpgn = %u.\n", + vl, tp_ctx->tpg[vl].mb_tpgn); + + return 0; +} + +static void ubase_wait_tp_flush_done_by_ctrlq(struct ubase_dev *udev, u32 vl) +{ + struct ubase_tpg *tpg = &udev->tp_ctx.tpg[vl]; + int i; + + for (i = 0; i < UBASE_WAIT_TP_FLUSH_TOTAL_STEPS; i++) { + msleep(1 << i); + + if (atomic_read(&tpg->tp_fd_cnt) == tpg->tp_cnt) + return; + } + + ubase_warn(udev, + "wait tp flush done timeout, tpgn = %u, tp_fd_cnt = %u.\n", + vl, atomic_read(&tpg->tp_fd_cnt)); +} + +static void ubase_destroy_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 vl) +{ + struct ubase_ctrlq_destroy_tp_req req = {0}; + struct ubase_ctrlq_msg msg = {0}; + int tmp_resp, ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; + msg.opcode = UBASE_CTRLQ_OPC_DESTROY_TP; + msg.need_resp = 1; + msg.in_size = sizeof(req); + msg.in = &req; + msg.out_size = sizeof(tmp_resp); + msg.out = &tmp_resp; + + req.vl = (u8)vl; + req.trans_type = UBASE_TRANS_TYPE_UM_TP; + + ret = __ubase_ctrlq_send(udev, &msg, NULL); + if (ret) { + ubase_err(udev, + "failed to send destroy tp tpg request, tpgn = %u, ret = %d.\n", + vl, ret); + return; + } + + ubase_wait_tp_flush_done_by_ctrlq(udev, vl); +} + +static void ubase_destroy_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev, u32 num) +{ + u32 idx; + + for (idx = 0; idx < num; idx++) + ubase_destroy_tp_tpg_by_ctrlq(udev, idx); +} + +static int ubase_create_multi_tp_tpg_by_ctrlq(struct ubase_dev *udev) +{ + int ret; + u32 i; + + for (i = 0; i < udev->caps.unic_caps.tpg.max_cnt; i++) { + ret = ubase_create_tp_tpg_by_ctrlq(udev, i); + if (ret) { + ubase_err(udev, "failed to create tp tpg, tpgn = %u, ret = %d.\n", + i, ret); + goto err_create_tp_tpg; + } + } + + return 0; + +err_create_tp_tpg: + ubase_destroy_multi_tp_tpg_by_ctrlq(udev, i); + + return ret; +} + int ubase_dev_init_tp_tpg(struct ubase_dev *udev) { + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; + int ret; + if (!ubase_utp_supported(udev) || !ubase_dev_urma_supported(udev)) return 0; - return ubase_dev_init_rack_tp_tpg(udev); + spin_lock(&tp_ctx->tpg_lock); + tp_ctx->tpg = devm_kcalloc(udev->dev, unic_caps->tpg.max_cnt, + sizeof(struct ubase_tpg), GFP_ATOMIC); + if (!tp_ctx->tpg) { + spin_unlock(&tp_ctx->tpg_lock); + return -ENOMEM; + } + spin_unlock(&tp_ctx->tpg_lock); + + ret = ubase_create_multi_tp_tpg_by_ctrlq(udev); + if (ret) { + spin_lock(&tp_ctx->tpg_lock); + devm_kfree(udev->dev, tp_ctx->tpg); + tp_ctx->tpg = NULL; + spin_unlock(&tp_ctx->tpg_lock); + } + + return ret; } void ubase_dev_uninit_tp_tpg(struct ubase_dev *udev) { + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + struct ubase_tp_layer_ctx *tp_ctx = &udev->tp_ctx; + u32 num = unic_caps->tpg.max_cnt; + if (!ubase_utp_supported(udev) || !ubase_dev_urma_supported(udev)) return; - ubase_dev_uninit_rack_tp_tpg(udev); + if (!tp_ctx->tpg) + return; + + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + ubase_destroy_multi_tp_tpg_by_ctrlq(udev, num); + + spin_lock(&tp_ctx->tpg_lock); + devm_kfree(udev->dev, tp_ctx->tpg); + tp_ctx->tpg = NULL; + spin_unlock(&tp_ctx->tpg_lock); } diff --git a/drivers/ub/ubase/ubase_tp.h b/drivers/ub/ubase/ubase_tp.h index 0506e77c98f082e68bd0bc439067120b76403e80..42a3cb4eb8c57ee7a178f2569e2dbe9dc94e5d00 100644 --- a/drivers/ub/ubase/ubase_tp.h +++ b/drivers/ub/ubase/ubase_tp.h @@ -11,6 +11,8 @@ #include "ubase_dev.h" +#define UBASE_TRANS_TYPE_UM_TP 0x2 + #define UBASE_TP_PORT_BITMAP_STEP 2 #define UBASE_WAIT_TP_FLUSH_TOTAL_STEPS 12 diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index cadc707a23e6ed23428bb1b48aa6c59583d11fbc..4efbf8402d9d7f97783aede33d6e53c3001555c1 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -47,12 +47,19 @@ enum ubase_opcode_type { UBASE_OPC_QUERY_UBCL_CONFIG = 0x0050, /* NL commands */ + UBASE_OPC_VLAN_FILTER_CTRL = 0x2100, + UBASE_OPC_VLAN_FILTER_CFG = 0x2101, + UBASE_OPC_QUERY_VLAN_TBL = 0x2102, UBASE_OPC_CFG_VL_MAP = 0x2206, UBASE_OPC_CFG_ETS_TC_INFO = 0x2340, UBASE_OPC_QUERY_ETS_TCG_INFO = 0x2341, UBASE_OPC_QUERY_ETS_PORT_INFO = 0x2342, UBASE_OPC_QUERY_VL_AGEING_EN = 0x2343, UBASE_OPC_CFG_PROMISC_MODE = 0x240A, + UBASE_OPC_QUERY_MAC = 0x241A, + UBASE_OPC_ADD_MAC_TBL = 0x241B, + UBASE_OPC_DEL_MAC_TBL = 0x241C, + UBASE_OPC_QUERY_MAC_TBL = 0x241E, /* TP commands */ UBASE_OPC_TP_TIMER_VA_CONFIG = 0x3007, @@ -76,6 +83,7 @@ enum ubase_opcode_type { /* DL commands */ UBASE_OPC_DL_CONFIG_MODE = 0x5100, + UBASE_OPC_DL_CONFIG_LB = 0x5101, UBASE_OPC_QUERY_FLUSH_STATUS = 0x5102, UBASE_OPC_START_PERF_STATS = 0x5103, UBASE_OPC_STOP_PERF_STATS = 0x5104, @@ -87,6 +95,9 @@ enum ubase_opcode_type { UBASE_OPC_QUERY_PORT_INFO = 0x6200, UBASE_OPC_QUERY_CHIP_INFO = 0x6201, UBASE_OPC_QUERY_FEC_STATS = 0x6202, + UBASE_OPC_QUERY_LINK_DIAGNOSIS = 0x6203, + UBASE_OPC_CFG_MAC_PAUSE_EN = 0x6300, + UBASE_OPC_CFG_PFC_PAUSE_EN = 0x6301, UBASE_OPC_HIMAC_RESET = 0x6302, /* Mailbox commands */ diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index 8dfbb2dc91bdb273609978db473e1a7af8176375..37950410345e3c194bcec59e651ee1ddb054e43d 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -168,10 +168,8 @@ struct ubase_pmem_caps { * @jfs: jfs resource capabilities * @jfr: jfr resource capabilities * @jfc: jfc resource capabilities - * @tp: tp resource capabilities * @tpg: tp group resource capabilities * @pmem: physical memory capabilities - * @utp_port_bitmap: utp port bitmap * @jtg_max_cnt: jetty group max count * @rc_max_cnt: rc max count * @rc_que_depth: rc queue depth @@ -185,10 +183,8 @@ struct ubase_adev_caps { struct ubase_res_caps jfs; struct ubase_res_caps jfr; struct ubase_res_caps jfc; - struct ubase_res_caps tp; struct ubase_res_caps tpg; struct ubase_pmem_caps pmem; - u32 utp_port_bitmap; /* utp port bitmap */ u32 jtg_max_cnt; u32 rc_max_cnt; u32 rc_que_depth; @@ -222,8 +218,6 @@ struct ubase_ctx_buf_cap { * @jfc: jfc context buffer capability * @jtg: jetty group context buffer capability * @rc: rc context buffer capability - * @tp: tp context buffer capability - * @tpg: tp group context buffer capability */ struct ubase_ctx_buf { struct ubase_ctx_buf_cap jfs; @@ -231,9 +225,6 @@ struct ubase_ctx_buf { struct ubase_ctx_buf_cap jfc; struct ubase_ctx_buf_cap jtg; struct ubase_ctx_buf_cap rc; - - struct ubase_ctx_buf_cap tp; - struct ubase_ctx_buf_cap tpg; }; struct net_device; @@ -356,6 +347,7 @@ struct ubase_bus_eid { bool ubase_adev_ubl_supported(struct auxiliary_device *adev); bool ubase_adev_ctrlq_supported(struct auxiliary_device *adev); bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev); +bool ubase_adev_mac_stats_supported(struct auxiliary_device *aux_dev); bool ubase_adev_prealloc_supported(struct auxiliary_device *aux_dev); struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev); struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev); @@ -387,4 +379,8 @@ int ubase_activate_dev(struct auxiliary_device *adev); int ubase_deactivate_dev(struct auxiliary_device *adev); int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid); +int ubase_get_dev_mac(struct auxiliary_device *adev, u8 *dev_addr, u8 addr_len); +int ubase_set_dev_mac(struct auxiliary_device *adev, const u8 *dev_addr, + u8 addr_len); + #endif diff --git a/include/ub/ubase/ubase_comm_hw.h b/include/ub/ubase/ubase_comm_hw.h index 2efac24e326856564ac7c2f8d8d3e3aa039f2406..0a572b59fc1088256feddb8c53ed25730cadee6b 100644 --- a/include/ub/ubase/ubase_comm_hw.h +++ b/include/ub/ubase/ubase_comm_hw.h @@ -17,6 +17,8 @@ #define UBASE_JFC_CTX_SIZE 128 #define UBASE_RC_CTX_SIZE 256 #define UBASE_JTG_CTX_SIZE 8 +#define UBASE_TP_CTX_SIZE 256 +#define UBASE_TPG_CTX_SIZE 64 #define UBASE_DESC_DATA_LEN 6 diff --git a/include/ub/ubase/ubase_comm_stats.h b/include/ub/ubase/ubase_comm_stats.h index 52a766e7bab0b754cd6b730e84f52d9235cfb2ea..32b3d717d69b74ea36a32ec9be85a10ba95f933c 100644 --- a/include/ub/ubase/ubase_comm_stats.h +++ b/include/ub/ubase/ubase_comm_stats.h @@ -232,8 +232,11 @@ struct ubase_perf_stats_result { u32 rx_vl_bw[UBASE_STATS_MAX_VL_NUM]; }; +void ubase_clear_eth_port_stats(struct auxiliary_device *adev); int ubase_get_ub_port_stats(struct auxiliary_device *adev, u16 port_id, struct ubase_ub_dl_stats *data); +int ubase_get_eth_port_stats(struct auxiliary_device *adev, + struct ubase_eth_mac_stats *data); int ubase_perf_stats(struct auxiliary_device *adev, u64 port_bitmap, u32 period, struct ubase_perf_stats_result *data, u32 data_size);