diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 948e67ef30fdf409afbbb5703ea0dd4c56dc0fd9..21a736174fdaf2bf34c2999c948f111bd954d0a4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ + HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ @@ -70,6 +71,10 @@ enum hclge_mbx_vlan_cfg_subcode { HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ }; +enum hclge_mbx_tbl_cfg_subcode { + HCLGE_MBX_VPORT_LIST_CLEAR, +}; + #define HCLGE_MBX_MAX_MSG_SIZE 14 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U #define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 5587605d6deb26a7771e3553b7ad45069e501307..6291aa9f06b084ab9629020eac533e42afc1aeed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -233,7 +233,6 @@ struct hnae3_ae_dev { struct list_head node; u32 flag; unsigned long hw_err_reset_req; - enum hnae3_reset_type reset_type; void *priv; }; @@ -270,6 +269,8 @@ struct hnae3_ae_dev { * Set loopback * set_promisc_mode * Set promisc mode + * request_update_promisc_mode + * request to hclge(vf) to update promisc mode * set_mtu() * set mtu * get_pauseparam() @@ -354,8 +355,6 @@ struct hnae3_ae_dev { * Set vlan filter config of Ports * set_vf_vlan_filter() * Set vlan filter config of vf - * restore_vlan_table() - * Restore vlan filter entries after reset * enable_hw_strip_rxvtag() * Enable/disable hardware strip vlan tag of packets received * set_gro_en @@ -408,6 +407,7 @@ struct hnae3_ae_ops { int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, bool en_mc_pmc); + void (*request_update_promisc_mode)(struct hnae3_handle *handle); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); void (*get_pauseparam)(struct hnae3_handle *handle, @@ -525,7 +525,6 @@ struct hnae3_ae_ops { struct ethtool_rxnfc *cmd); int (*get_fd_all_rules)(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd, u32 *rule_locs); - int (*restore_fd_rules)(struct hnae3_handle *handle); void (*enable_fd)(struct hnae3_handle *handle, bool enable); int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys); @@ -539,7 +538,6 @@ struct hnae3_ae_ops { void (*set_timer_task)(struct hnae3_handle *handle, bool enable); int (*mac_connect_phy)(struct hnae3_handle *handle); void (*mac_disconnect_phy)(struct hnae3_handle *handle); - void (*restore_vlan_table)(struct hnae3_handle *handle); int (*get_vf_config)(struct hnae3_handle *handle, int vf, struct ifla_vf_info *ivf); int (*set_vf_link_state)(struct hnae3_handle *handle, int vf, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index c934f328c040277a0518ebdb7efce5c3428d445d..fe7fb565da19743bdc833705bcf54d2822b88f42 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -262,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "dump mac tnl status\n"); dev_info(&h->pdev->dev, "dump loopback\n"); dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n"); + dev_info(&h->pdev->dev, "dump uc mac list \n"); + dev_info(&h->pdev->dev, "dump mc mac list \n"); memset(printf_buf, 0, HNS3_DBG_BUF_LEN); strncat(printf_buf, "dump reg [[bios common] [ssu ]", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ac3a48a24d867cfad686bb60623140e0f6f7dcec..c79d6a391105550190af3b3039e72383e5aa580d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -40,7 +40,6 @@ } while (0) static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); -static void hns3_remove_hw_addr(struct net_device *netdev); static const char hns3_driver_name[] = "hns3"; static const char hns3_driver_string[] = @@ -548,6 +547,13 @@ static int hns3_nic_uc_unsync(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); + /* need ignore the request of removing device address, because + * we store the device address and other addresses of uc list + * in the function's mac filter list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + if (h->ae_algo->ops->rm_uc_addr) return h->ae_algo->ops->rm_uc_addr(h, addr); @@ -595,34 +601,25 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); u8 new_flags; - int ret; new_flags = hns3_get_netdev_flags(netdev); - ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); - if (ret) { - netdev_err(netdev, "sync uc address fail\n"); - if (ret == -ENOSPC) - new_flags |= HNAE3_OVERFLOW_UPE; - } - - if (netdev->flags & IFF_MULTICAST) { - ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, - hns3_nic_mc_unsync); - if (ret) { - netdev_err(netdev, "sync mc address fail\n"); - if (ret == -ENOSPC) - new_flags |= HNAE3_OVERFLOW_MPE; - } - } + __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); /* User mode Promisc mode enable and vlan filtering is disabled to - * let all packets in. MAC-VLAN Table overflow Promisc enabled and - * vlan fitering is enabled + * let all packets in. */ - hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); h->netdev_flags = new_flags; - hns3_update_promisc_mode(netdev, new_flags); + hns3_request_update_promisc_mode(h); +} + +void hns3_request_update_promisc_mode(struct hnae3_handle *handle) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (ops->request_update_promisc_mode) + ops->request_update_promisc_mode(handle); } int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) @@ -2105,7 +2102,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->pdev = pdev; ae_dev->flag = ent->driver_data; - ae_dev->reset_type = HNAE3_NONE_RESET; hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); @@ -3907,9 +3903,11 @@ static int hns3_init_mac_addr(struct net_device *netdev) eth_hw_addr_random(netdev); dev_warn(priv->dev, "using random MAC address %pM\n", netdev->dev_addr); - } else { + } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { ether_addr_copy(netdev->dev_addr, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); + } else { + return 0; } if (h->ae_algo->ops->set_mac_addr) @@ -3937,17 +3935,6 @@ static void hns3_uninit_phy(struct net_device *netdev) h->ae_algo->ops->mac_disconnect_phy(h); } -static int hns3_restore_fd_rules(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = 0; - - if (h->ae_algo->ops->restore_fd_rules) - ret = h->ae_algo->ops->restore_fd_rules(h); - - return ret; -} - static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -4119,8 +4106,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_remove_hw_addr(netdev); - if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); @@ -4191,56 +4176,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) return hns3_nic_set_real_num_queue(ndev); } -static int hns3_recover_hw_addr(struct net_device *ndev) -{ - struct netdev_hw_addr_list *list; - struct netdev_hw_addr *ha, *tmp; - int ret = 0; - - netif_addr_lock_bh(ndev); - /* go through and sync uc_addr entries to the device */ - list = &ndev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) { - ret = hns3_nic_uc_sync(ndev, ha->addr); - if (ret) - goto out; - } - - /* go through and sync mc_addr entries to the device */ - list = &ndev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) { - ret = hns3_nic_mc_sync(ndev, ha->addr); - if (ret) - goto out; - } - -out: - netif_addr_unlock_bh(ndev); - return ret; -} - -static void hns3_remove_hw_addr(struct net_device *netdev) -{ - struct netdev_hw_addr_list *list; - struct netdev_hw_addr *ha, *tmp; - - hns3_nic_uc_unsync(netdev, netdev->dev_addr); - - netif_addr_lock_bh(netdev); - /* go through and unsync uc_addr entries to the device */ - list = &netdev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_uc_unsync(netdev, ha->addr); - - /* go through and unsync mc_addr entries to the device */ - list = &netdev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - if (ha->refcount > 1) - hns3_nic_mc_unsync(netdev, ha->addr); - - netif_addr_unlock_bh(netdev); -} - static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { @@ -4399,7 +4334,6 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv) static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; struct hns3_nic_priv *priv = netdev_priv(ndev); @@ -4407,15 +4341,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) return 0; - /* it is cumbersome for hardware to pick-and-choose entries for deletion - * from table space. Hence, for function reset software intervention is - * required to delete the entries - */ - if (hns3_dev_ongoing_func_reset(ae_dev)) { - hns3_remove_hw_addr(ndev); - hns3_del_all_fd_rules(ndev, false); - } - if (!netif_running(ndev)) return 0; @@ -4482,6 +4407,9 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) goto err_init_irq_fail; } + if (!hns3_is_phys_func(handle->pdev)) + hns3_init_mac_addr(netdev); + ret = hns3_client_start(handle); if (ret) { dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); @@ -4507,33 +4435,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) return ret; } -static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) -{ - struct net_device *netdev = handle->kinfo.netdev; - bool vlan_filter_enable; - int ret; - - ret = hns3_init_mac_addr(netdev); - if (ret) - return ret; - - ret = hns3_recover_hw_addr(netdev); - if (ret) - return ret; - - ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); - if (ret) - return ret; - - vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; - hns3_enable_vlan_filter(netdev, vlan_filter_enable); - - if (handle->ae_algo->ops->restore_vlan_table) - handle->ae_algo->ops->restore_vlan_table(handle); - - return hns3_restore_fd_rules(netdev); -} - static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; @@ -4583,9 +4484,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle, case HNAE3_UNINIT_CLIENT: ret = hns3_reset_notify_uninit_enet(handle); break; - case HNAE3_RESTORE_CLIENT: - ret = hns3_reset_notify_restore_enet(handle); - break; default: break; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 4b3f0abf07155bf8b4002aa693d574070596e93d..240ba06cd0eb5186f5f0773a4c4cdb679629e5ed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -576,15 +576,6 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) writel(value, reg_addr + reg); } -static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) -{ - return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET || - ae_dev->reset_type == HNAE3_FLR_RESET || - ae_dev->reset_type == HNAE3_VF_FUNC_RESET || - ae_dev->reset_type == HNAE3_VF_FULL_RESET || - ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET)); -} - #define hns3_read_dev(a, reg) \ hns3_read_reg((a)->io_base, (reg)) @@ -658,6 +649,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); +void hns3_request_update_promisc_mode(struct hnae3_handle *handle); #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6a0734be4a1a46bf330e8ed65bba6e4615cfb3b1..4d9c85f049dc31d92d22e365e431051cc5b1d2cb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -99,7 +99,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) h->ae_algo->ops->set_promisc_mode(h, true, true); } else { /* recover promisc mode before loopback test */ - hns3_update_promisc_mode(ndev, h->netdev_flags); + hns3_request_update_promisc_mode(h); vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; hns3_enable_vlan_filter(ndev, vlan_filter_enable); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 66c1ad3a156bfcbf35261de6923678ab1a66b1be..6cfa8253eefc30855267269afa9aeda0e872daf0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1441,6 +1441,49 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, hclge_dbg_dump_qs_shaper_single(hdev, qsid); } +static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf, + bool is_unicast) +{ + struct hclge_mac_node *mac_node, *tmp; + struct hclge_vport *vport; + struct list_head *list; + u32 func_id; + int ret; + + ret = kstrtouint(cmd_buf, 0, &func_id); + if (ret < 0) { + dev_err(&hdev->pdev->dev, + "dump mac list: bad command string, ret = %d\n", ret); + return -EINVAL; + } + + if (func_id >= hdev->num_alloc_vport) { + dev_err(&hdev->pdev->dev, + "function id(%u) is out of range(0-%u)\n", func_id, + hdev->num_alloc_vport - 1); + return -EINVAL; + } + + vport = &hdev->vport[func_id]; + + list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; + + dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n", + func_id, is_unicast ? "uc" : "mc"); + dev_info(&hdev->pdev->dev, "mac address state\n"); + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + dev_info(&hdev->pdev->dev, "%pM %d\n", + mac_node->mac_addr, mac_node->state); + } + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; +} + int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) { #define DUMP_REG "dump reg" @@ -1485,6 +1528,14 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { hclge_dbg_dump_qs_shaper(hdev, &cmd_buf[sizeof("dump qs shaper")]); + } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) { + hclge_dbg_dump_mac_list(hdev, + &cmd_buf[sizeof("dump uc mac list")], + true); + } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) { + hclge_dbg_dump_mac_list(hdev, + &cmd_buf[sizeof("dump mc mac list")], + false); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 0618f22e6f14780e595f92d78facd37e7438835e..c74990a59e10c878c9783281eb2485e571d76b95 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -62,14 +62,16 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev); static void hclge_sync_vlan_filter(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); -static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, - u16 *allocated_size, bool is_alloc); static void hclge_rfs_filter_expire(struct hclge_dev *hdev); static void hclge_clear_arfs_rules(struct hnae3_handle *handle); static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, unsigned long *addr); static int hclge_set_default_loopback(struct hclge_dev *hdev); +static void hclge_sync_mac_table(struct hclge_dev *hdev); +static void hclge_restore_hw_table(struct hclge_dev *hdev); +static void hclge_sync_promisc_mode(struct hclge_dev *hdev); + static struct hnae3_ae_algo ae_algo; static struct workqueue_struct *hclge_wq; @@ -1687,6 +1689,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->uc_mac_list); INIT_LIST_HEAD(&vport->mc_mac_list); + spin_lock_init(&vport->mac_list_lock); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -3729,22 +3732,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - if (ret) - return ret; - - return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT); + return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); } static int hclge_reset_prepare(struct hclge_dev *hdev) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; - /* Initialize ae_dev reset status as well, in case enet layer wants to - * know if device is undergoing reset - */ - ae_dev->reset_type = hdev->reset_type; hdev->rst_stats.reset_cnt++; /* perform reset of the stack & ae device for a client */ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); @@ -3806,7 +3800,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) hdev->last_reset_time = jiffies; hdev->rst_stats.reset_fail_cnt = 0; hdev->rst_stats.reset_done_cnt++; - ae_dev->reset_type = HNAE3_NONE_RESET; clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); /* if default_reset_request has a higher level reset request, @@ -3973,6 +3966,8 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) * updated when it is triggered by mbx. */ hclge_update_link_status(hdev); + hclge_sync_mac_table(hdev); + hclge_sync_promisc_mode(hdev); if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { delta = jiffies - hdev->last_serv_processed; @@ -4722,7 +4717,8 @@ static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, - "Set promisc mode fail, status is %d.\n", ret); + "failed to set vport %d promisc mode, ret = %d.\n", + param->vf_id, ret); return ret; } @@ -4772,6 +4768,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, en_bc_pmc); } +static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); +} + static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) { struct hclge_get_fd_mode_cmd *req; @@ -6924,8 +6928,22 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int hclge_vport_start(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); vport->last_active_jiffies = jiffies; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) { + if (vport->vport_id) { + hclge_restore_mac_table_common(vport); + hclge_restore_vport_vlan_table(vport); + } else { + hclge_restore_hw_table(hdev); + } + } + + clear_bit(vport->vport_id, hdev->vport_config_block); + return 0; } @@ -6962,17 +6980,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, } if (op == HCLGE_MAC_VLAN_ADD) { - if ((!resp_code) || (resp_code == 1)) { + if (!resp_code || resp_code == 1) return 0; - } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) { - dev_err(&hdev->pdev->dev, - "add mac addr failed for uc_overflow.\n"); - return -ENOSPC; - } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) { - dev_err(&hdev->pdev->dev, - "add mac addr failed for mc_overflow.\n"); + else if (resp_code == HCLGE_ADD_UC_OVERFLOW || + resp_code == HCLGE_ADD_MC_OVERFLOW) return -ENOSPC; - } dev_err(&hdev->pdev->dev, "add mac addr failed for undefined, code=%u.\n", @@ -7196,52 +7208,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, return cfg_status; } -static int hclge_init_umv_space(struct hclge_dev *hdev) -{ - u16 allocated_size = 0; - int ret; - - ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, - true); - if (ret) - return ret; - - if (allocated_size < hdev->wanted_umv_size) - dev_warn(&hdev->pdev->dev, - "Alloc umv space failed, want %u, get %u\n", - hdev->wanted_umv_size, allocated_size); - - mutex_init(&hdev->umv_mutex); - hdev->max_umv_size = allocated_size; - /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to - * preserve some unicast mac vlan table entries shared by pf - * and its vfs. - */ - hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); - hdev->share_umv_size = hdev->priv_umv_size + - hdev->max_umv_size % (hdev->num_req_vfs + 2); - - return 0; -} - -static int hclge_uninit_umv_space(struct hclge_dev *hdev) -{ - int ret; - - if (hdev->max_umv_size > 0) { - ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, - false); - if (ret) - return ret; - hdev->max_umv_size = 0; - } - mutex_destroy(&hdev->umv_mutex); - - return 0; -} - static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, - u16 *allocated_size, bool is_alloc) + u16 *allocated_size) { struct hclge_umv_spc_alc_cmd *req; struct hclge_desc desc; @@ -7249,25 +7217,44 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, req = (struct hclge_umv_spc_alc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); - if (!is_alloc) - hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1); req->space_size = cpu_to_le32(space_size); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "%s umv space failed for cmd_send, ret =%d\n", - is_alloc ? "allocate" : "free", ret); + dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", + ret); return ret; } - if (is_alloc && allocated_size) + if (allocated_size) *allocated_size = le32_to_cpu(desc.data[1]); return 0; } +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "failed to alloc umv space, want %u, get %u\n", + hdev->wanted_umv_size, allocated_size); + + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + + return 0; +} + static void hclge_reset_umv_space(struct hclge_dev *hdev) { struct hclge_vport *vport; @@ -7278,21 +7265,25 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev) vport->used_umv_num = 0; } - mutex_lock(&hdev->umv_mutex); + mutex_lock(&hdev->vport_lock); hdev->share_umv_size = hdev->priv_umv_size + - hdev->max_umv_size % (hdev->num_req_vfs + 2); - mutex_unlock(&hdev->umv_mutex); + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + mutex_unlock(&hdev->vport_lock); } -static bool hclge_is_umv_space_full(struct hclge_vport *vport) +static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) { struct hclge_dev *hdev = vport->back; bool is_full; - mutex_lock(&hdev->umv_mutex); + if (need_lock) + mutex_lock(&hdev->vport_lock); + is_full = (vport->used_umv_num >= hdev->priv_umv_size && hdev->share_umv_size == 0); - mutex_unlock(&hdev->umv_mutex); + + if (need_lock) + mutex_unlock(&hdev->vport_lock); return is_full; } @@ -7301,7 +7292,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) { struct hclge_dev *hdev = vport->back; - mutex_lock(&hdev->umv_mutex); if (is_free) { if (vport->used_umv_num > hdev->priv_umv_size) hdev->share_umv_size++; @@ -7314,7 +7304,99 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) hdev->share_umv_size--; vport->used_umv_num++; } - mutex_unlock(&hdev->umv_mutex); +} + +static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, + const u8 *mac_addr) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclge_update_mac_node(struct hclge_mac_node *mac_node, + enum HCLGE_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGE_MAC_TO_ADD: + if (mac_node->state == HCLGE_MAC_TO_DEL) + mac_node->state = HCLGE_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGE_MAC_TO_DEL: + if (mac_node->state == HCLGE_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGE_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * ACTIVE. + */ + case HCLGE_MAC_ACTIVE: + if (mac_node->state == HCLGE_MAC_TO_ADD) + mac_node->state = HCLGE_MAC_ACTIVE; + + break; + } +} + +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_node *mac_node; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * new state, or just remove it, or do nothing. + */ + mac_node = hclge_find_mac_node(list, addr); + if (mac_node) { + hclge_update_mac_node(mac_node, state); + spin_unlock_bh(&vport->mac_list_lock); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + return 0; + } + + /* if this address is never added, unnecessary to delete */ + if (state == HCLGE_MAC_TO_DEL) { + spin_unlock_bh(&vport->mac_list_lock); + dev_err(&hdev->pdev->dev, + "failed to delete address %pM from mac list\n", + addr); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&vport->mac_list_lock); + return -ENOMEM; + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; } static int hclge_add_uc_addr(struct hnae3_handle *handle, @@ -7322,7 +7404,8 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_uc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, + addr); } int hclge_add_uc_addr_common(struct hclge_vport *vport, @@ -7361,15 +7444,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, */ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); if (ret == -ENOENT) { - if (!hclge_is_umv_space_full(vport)) { + mutex_lock(&hdev->vport_lock); + if (!hclge_is_umv_space_full(vport, false)) { ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); if (!ret) hclge_update_umv_space(vport, false); + mutex_unlock(&hdev->vport_lock); return ret; } + mutex_unlock(&hdev->vport_lock); - dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", - hdev->priv_umv_size); + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); return -ENOSPC; } @@ -7393,7 +7480,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_rm_uc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, + addr); } int hclge_rm_uc_addr_common(struct hclge_vport *vport, @@ -7416,8 +7504,13 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); - if (!ret) + if (!ret) { + mutex_lock(&hdev->vport_lock); hclge_update_umv_space(vport, true); + mutex_unlock(&hdev->vport_lock); + } else if (ret == -ENOENT) { + ret = 0; + } return ret; } @@ -7427,7 +7520,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_mc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, + addr); } int hclge_add_mc_addr_common(struct hclge_vport *vport, @@ -7459,7 +7553,9 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - if (status == -ENOSPC) + /* if already overflow, not to print each time */ + if (status == -ENOSPC && + !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); return status; @@ -7470,7 +7566,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_rm_mc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, + addr); } int hclge_rm_mc_addr_common(struct hclge_vport *vport, @@ -7505,111 +7602,354 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); - } else { - /* Maybe this mac address is in mta table, but it cannot be - * deleted here because an entry of mta represents an address - * range rather than a specific address. the delete action to - * all entries will take effect in update_mta_status called by - * hns3_nic_set_rx_mode. - */ + } else if (status == -ENOENT) { status = 0; } return status; } -void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - enum HCLGE_MAC_ADDR_TYPE mac_type) +static void hclge_sync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*sync)(struct hclge_vport *, + const unsigned char *)) { - struct hclge_vport_mac_addr_cfg *mac_cfg; - struct list_head *list; + struct hclge_mac_node *mac_node, *tmp; + int ret; - if (!vport->vport_id) - return; + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = sync(vport, mac_node->mac_addr); + if (!ret) { + mac_node->state = HCLGE_MAC_ACTIVE; + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} - mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); - if (!mac_cfg) - return; +static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*unsync)(struct hclge_vport *, + const unsigned char *)) +{ + struct hclge_mac_node *mac_node, *tmp; + int ret; - mac_cfg->hd_tbl_status = true; - memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unsync(vport, mac_node->mac_addr); + if (!ret || ret == -ENOENT) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; +static bool hclge_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + bool all_added = true; - list_add_tail(&mac_cfg->node, list); + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + if (mac_node->state == HCLGE_MAC_TO_ADD) + all_added = false; + + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of adding the mac address into mac + * table. if mac_node state is ACTIVE, then change it to TO_DEL, + * then it will be removed at next time. else it must be TO_ADD, + * this address hasn't been added into mac table, + * so just remove the mac node. + */ + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclge_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_DEL; + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } + + return all_added; } -void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - bool is_write_tbl, - enum HCLGE_MAC_ADDR_TYPE mac_type) +static void hclge_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) { - struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct list_head *list; - bool uc_flag, mc_flag; + struct hclge_mac_node *mac_node, *tmp, *new_node; - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr exists in the mac list, it means + * received a new TO_ADD request during the time window + * of configuring the mac address. For the mac node + * state is TO_ADD, and the address is already in the + * in the hardware(due to delete fail), so we just need + * to change the mac node state to ACTIVE. + */ + new_node->state = HCLGE_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } + } +} - uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; - mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; +static void hclge_update_overflow_flags(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type, + bool is_all_added) +{ + if (mac_type == HCLGE_MAC_ADDR_UC) { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; + } else { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; + } +} - list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { - if (uc_flag && mac_cfg->hd_tbl_status) - hclge_rm_uc_addr_common(vport, mac_addr); +static void hclge_sync_vport_mac_table(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + bool all_added; - if (mc_flag && mac_cfg->hd_tbl_status) - hclge_rm_mc_addr_common(vport, mac_addr); + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); - list_del(&mac_cfg->node); - kfree(mac_cfg); + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: break; } } + +stop_traverse: + spin_unlock_bh(&vport->mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + if (mac_type == HCLGE_MAC_ADDR_UC) { + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + hclge_sync_vport_mac_list(vport, &tmp_add_list, + hclge_add_uc_addr_common); + } else { + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + hclge_sync_vport_mac_list(vport, &tmp_add_list, + hclge_add_mc_addr_common); + } + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + all_added = hclge_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_update_overflow_flags(vport, mac_type, all_added); +} + +static bool hclge_need_sync_mac_table(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + return false; + + if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + return true; + + return false; +} + +static void hclge_sync_mac_table(struct hclge_dev *hdev) +{ + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (!hclge_need_sync_mac_table(vport)) + continue; + + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); + } } void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type) { - struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct list_head *list; + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_mac_node *mac_cfg, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + int ret; - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; + if (mac_type == HCLGE_MAC_ADDR_UC) { + list = &vport->uc_mac_list; + unsync = hclge_rm_uc_addr_common; + } else { + list = &vport->mc_mac_list; + unsync = hclge_rm_mc_addr_common; + } - list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) - hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); + INIT_LIST_HEAD(&tmp_del_list); - if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) - hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); + if (!is_del_list) + set_bit(vport->vport_id, hdev->vport_config_block); - mac_cfg->hd_tbl_status = false; - if (is_del_list) { + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + switch (mac_cfg->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: list_del(&mac_cfg->node); - kfree(mac_cfg); + list_add_tail(&mac_cfg->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + break; + } + } + + spin_unlock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) { + ret = unsync(vport, mac_cfg->mac_addr); + if (!ret || ret == -ENOENT) { + /* clear all mac addr from hardware, but remain these + * mac addr in the mac list, and restore them after + * vf reset finished. + */ + if (!is_del_list && + mac_cfg->state == HCLGE_MAC_ACTIVE) { + mac_cfg->state = HCLGE_MAC_TO_ADD; + } else { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } else if (is_del_list) { + mac_cfg->state = HCLGE_MAC_TO_DEL; + } + } + + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +/* remove all mac address when uninitailize */ +static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; } } + + spin_unlock_bh(&vport->mac_list_lock); + + if (mac_type == HCLGE_MAC_ADDR_UC) + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + else + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + + if (!list_empty(&tmp_del_list)) + dev_warn(&hdev->pdev->dev, + "uninit %s mac list for vport %u not completely.\n", + mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", + vport->vport_id); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } } -void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +static void hclge_uninit_mac_table(struct hclge_dev *hdev) { - struct hclge_vport_mac_addr_cfg *mac, *tmp; struct hclge_vport *vport; int i; for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { - list_del(&mac->node); - kfree(mac); - } - - list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { - list_del(&mac->node); - kfree(mac); - } + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); } } @@ -7773,12 +8113,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr) +{ + struct list_head *list = &vport->uc_mac_list; + struct hclge_mac_node *old_node, *new_node; + + new_node = hclge_find_mac_node(list, new_addr); + if (!new_node) { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + return -ENOMEM; + + new_node->state = HCLGE_MAC_TO_ADD; + ether_addr_copy(new_node->mac_addr, new_addr); + list_add(&new_node->node, list); + } else { + if (new_node->state == HCLGE_MAC_TO_DEL) + new_node->state = HCLGE_MAC_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after global/imp reset which will clear mac + * table by hardware. + */ + list_move(&new_node->node, list); + } + + if (old_addr && !ether_addr_equal(old_addr, new_addr)) { + old_node = hclge_find_mac_node(list, old_addr); + if (old_node) { + if (old_node->state == HCLGE_MAC_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = HCLGE_MAC_TO_DEL; + } + } + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return 0; +} + static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + unsigned char *old_addr = NULL; int ret; /* mac addr check */ @@ -7786,39 +8171,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, is_broadcast_ether_addr(new_addr) || is_multicast_ether_addr(new_addr)) { dev_err(&hdev->pdev->dev, - "Change uc mac err! invalid mac:%pM.\n", + "change uc mac err! invalid mac: %pM.\n", new_addr); return -EINVAL; } - if ((!is_first || is_kdump_kernel()) && - hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_warn(&hdev->pdev->dev, - "remove old uc mac address fail.\n"); - - ret = hclge_add_uc_addr(handle, new_addr); + ret = hclge_pause_addr_cfg(hdev, new_addr); if (ret) { dev_err(&hdev->pdev->dev, - "add uc mac address fail, ret =%d.\n", + "failed to configure mac pause address, ret = %d\n", ret); - - if (!is_first && - hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_err(&hdev->pdev->dev, - "restore uc mac address fail.\n"); - - return -EIO; + return ret; } - ret = hclge_pause_addr_cfg(hdev, new_addr); + if (!is_first) + old_addr = hdev->hw.mac.mac_addr; + + spin_lock_bh(&vport->mac_list_lock); + ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); if (ret) { dev_err(&hdev->pdev->dev, - "configure mac pause address fail, ret =%d.\n", - ret); - return -EIO; - } + "failed to change the mac addr:%pM, ret = %d\n", + new_addr, ret); + spin_unlock_bh(&vport->mac_list_lock); + if (!is_first) + hclge_pause_addr_cfg(hdev, old_addr); + + return ret; + } + /* we must update dev addr with spin lock protect, preventing dev addr + * being removed by set_rx_mode path. + */ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + spin_unlock_bh(&vport->mac_list_lock); + + hclge_task_schedule(hdev, 0); return 0; } @@ -8398,42 +8786,80 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) } } -static void hclge_restore_vlan_table(struct hnae3_handle *handle) +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) { - struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; u16 vlan_proto; - u16 state, vlan_id; - int i; + u16 vlan_id; + u16 state; + int ret; - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; - vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - state = vport->port_base_vlan_cfg.state; + vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; + vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; + state = vport->port_base_vlan_cfg.state; - if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { - hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), - vport->vport_id, vlan_id, - false); - continue; - } + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, vlan_id, + false); + return; + } - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { - int ret; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; + } +} - if (!vlan->hd_tbl_status) - continue; - ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), - vport->vport_id, - vlan->vlan_id, false); - if (ret) - break; +/* For global reset and imp reset, hardware will clear the mac table, + * so we change the mac address state from ACTIVE to TO_ADD, then they + * can be restored in the service task after reset complete. Furtherly, + * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to + * be restored after reset, so just remove these mac nodes from mac_list. + */ +static void hclge_mac_node_convert_for_reset(struct list_head *list) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_ADD; + } else if (mac_node->state == HCLGE_MAC_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); } } } +void hclge_restore_mac_table_common(struct hclge_vport *vport) +{ + spin_lock_bh(&vport->mac_list_lock); + + hclge_mac_node_convert_for_reset(&vport->uc_mac_list); + hclge_mac_node_convert_for_reset(&vport->mc_mac_list); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + spin_unlock_bh(&vport->mac_list_lock); +} + +static void hclge_restore_hw_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + + hclge_restore_mac_table_common(vport); + hclge_restore_vport_vlan_table(vport); + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + + hclge_restore_fd_entries(handle); +} + int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -9748,7 +10174,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, dev_warn(&hdev->pdev->dev, "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", vf); - else if (enable && hclge_is_umv_space_full(vport)) + else if (enable && hclge_is_umv_space_full(vport, true)) dev_warn(&hdev->pdev->dev, "vf %d mac table is full, enable spoof check may cause its packet send fail\n", vf); @@ -9925,8 +10351,16 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); hclge_stats_clear(hdev); - memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); - memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); + /* NOTE: pf reset needn't to clear or restore pf and vf table entry. + * so here should not clean table in memory. + */ + if (hdev->reset_type == HNAE3_IMP_RESET || + hdev->reset_type == HNAE3_GLOBAL_RESET) { + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); + memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); + bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); + hclge_reset_umv_space(hdev); + } ret = hclge_cmd_init(hdev); if (ret) { @@ -9940,8 +10374,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - hclge_reset_umv_space(hdev); - ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -10037,12 +10469,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_clear_vf_vlan(hdev); hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); + hclge_uninit_mac_table(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); - hclge_uninit_umv_space(hdev); - /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); @@ -10056,7 +10487,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); - hclge_uninit_vport_mac_table(hdev); hclge_uninit_vport_vlan_table(hdev); ae_dev->priv = NULL; } @@ -10666,6 +11096,30 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) return hclge_config_gro(hdev, enable); } +static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + u8 tmp_flags = 0; + int ret; + + if (vport->last_promisc_flags != vport->overflow_promisc_flags) { + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + vport->last_promisc_flags = vport->overflow_promisc_flags; + } + + if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) { + tmp_flags = handle->netdev_flags | vport->last_promisc_flags; + ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, + tmp_flags & HNAE3_MPE); + if (!ret) { + clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + hclge_enable_vlan_filter(handle, + tmp_flags & HNAE3_VLAN_FLTR); + } + } +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -10678,6 +11132,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_vector = hclge_get_vector, .put_vector = hclge_put_vector, .set_promisc_mode = hclge_set_promisc_mode, + .request_update_promisc_mode = hclge_request_update_promisc_mode, .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, @@ -10739,7 +11194,6 @@ static const struct hnae3_ae_ops hclge_ops = { .get_fd_rule_cnt = hclge_get_fd_rule_cnt, .get_fd_rule_info = hclge_get_fd_rule_info, .get_fd_all_rules = hclge_get_all_rules, - .restore_fd_rules = hclge_restore_fd_entries, .enable_fd = hclge_enable_fd, .add_arfs_entry = hclge_add_fd_entry_by_arfs, .dbg_run_cmd = hclge_dbg_run_cmd, @@ -10752,7 +11206,6 @@ static const struct hnae3_ae_ops hclge_ops = { .set_timer_task = hclge_set_timer_task, .mac_connect_phy = hclge_mac_connect_phy, .mac_disconnect_phy = hclge_mac_disconnect_phy, - .restore_vlan_table = hclge_restore_vlan_table, .get_vf_config = hclge_get_vf_config, .set_vf_link_state = hclge_set_vf_link_state, .set_vf_spoofchk = hclge_set_vf_spoofchk, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index a58c26200ea059d36abb1c63a374229d6168bf71..913c4f67740480f73dff2c08f1cf243e5566f3f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -217,6 +217,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_STATISTICS_UPDATING, HCLGE_STATE_CMD_DISABLE, HCLGE_STATE_LINK_UPDATING, + HCLGE_STATE_PROMISC_CHANGED, HCLGE_STATE_RST_FAIL, HCLGE_STATE_MAX }; @@ -630,9 +631,15 @@ struct hclge_fd_ad_data { u16 rule_id; }; -struct hclge_vport_mac_addr_cfg { +enum HCLGE_MAC_NODE_STATE { + HCLGE_MAC_TO_ADD, + HCLGE_MAC_TO_DEL, + HCLGE_MAC_ACTIVE +}; + +struct hclge_mac_node { struct list_head node; - int hd_tbl_status; + enum HCLGE_MAC_NODE_STATE state; u8 mac_addr[ETH_ALEN]; }; @@ -805,6 +812,8 @@ struct hclge_dev { unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + struct hclge_fd_cfg fd_cfg; struct hlist_head fd_rule_list; spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ @@ -822,7 +831,6 @@ struct hclge_dev { u16 priv_umv_size; /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; - struct mutex umv_mutex; /* protect share_umv_size */ DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); @@ -866,6 +874,7 @@ struct hclge_rss_tuple_cfg { enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_ALIVE, + HCLGE_VPORT_STATE_MAC_TBL_CHANGE, HCLGE_VPORT_STATE_MAX }; @@ -922,6 +931,10 @@ struct hclge_vport { u32 mps; /* Max packet size */ struct hclge_vf_info vf_info; + u8 overflow_promisc_flags; + u8 last_promisc_flags; + + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head mc_mac_list; /* Store VF multicast table */ struct list_head vlan_list; /* Store VF vlan table */ @@ -977,16 +990,18 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); int hclge_notify_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type); -void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - enum HCLGE_MAC_ADDR_TYPE mac_type); -void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - bool is_write_tbl, - enum HCLGE_MAC_ADDR_TYPE mac_type); +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr); +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr); void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type); -void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_vlan_table(struct hclge_vport *vport); int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info); int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 103c2ec777b09342c05cd8bdc20d19c215f25fa5..ac70fafd15d5168136ad9ef4d197540fa8677786 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -275,26 +275,17 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, if (!is_valid_ether_addr(mac_addr)) return -EINVAL; - hclge_rm_uc_addr_common(vport, old_addr); - status = hclge_add_uc_addr_common(vport, mac_addr); - if (status) { - hclge_add_uc_addr_common(vport, old_addr); - } else { - hclge_rm_vport_mac_table(vport, mac_addr, - false, HCLGE_MAC_ADDR_UC); - hclge_add_vport_mac_table(vport, mac_addr, - HCLGE_MAC_ADDR_UC); - } + spin_lock_bh(&vport->mac_list_lock); + status = hclge_update_mac_node_for_dev_addr(vport, old_addr, + mac_addr); + spin_unlock_bh(&vport->mac_list_lock); + hclge_task_schedule(hdev, 0); } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { - status = hclge_add_uc_addr_common(vport, mac_addr); - if (!status) - hclge_add_vport_mac_table(vport, mac_addr, - HCLGE_MAC_ADDR_UC); + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_UC, mac_addr); } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { - status = hclge_rm_uc_addr_common(vport, mac_addr); - if (!status) - hclge_rm_vport_mac_table(vport, mac_addr, - false, HCLGE_MAC_ADDR_UC); + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_UC, mac_addr); } else { dev_err(&hdev->pdev->dev, "failed to set unicast mac addr, unknown subcode %u\n", @@ -310,18 +301,13 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, { const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); struct hclge_dev *hdev = vport->back; - int status; if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { - status = hclge_add_mc_addr_common(vport, mac_addr); - if (!status) - hclge_add_vport_mac_table(vport, mac_addr, - HCLGE_MAC_ADDR_MC); + hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_MC, mac_addr); } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { - status = hclge_rm_mc_addr_common(vport, mac_addr); - if (!status) - hclge_rm_vport_mac_table(vport, mac_addr, - false, HCLGE_MAC_ADDR_MC); + hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_MC, mac_addr); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %u\n", @@ -329,7 +315,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, return -EIO; } - return status; + return 0; } int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, @@ -643,6 +629,23 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev) ae_dev->ops->reset_event(hdev->pdev, NULL); } +static void hclge_handle_vf_tbl(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vf_vlan_cfg *msg_cmd; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + } else { + dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", + msg_cmd->subcode); + } +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -650,6 +653,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) struct hclge_mbx_vf_to_pf_cmd *req; struct hclge_vport *vport; struct hclge_desc *desc; + bool is_del = false; unsigned int flag; int ret = 0; @@ -767,11 +771,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) break; case HCLGE_MBX_GET_VF_FLR_STATUS: case HCLGE_MBX_VF_UNINIT: - hclge_rm_vport_all_mac_table(vport, true, + is_del = req->msg.code == HCLGE_MBX_VF_UNINIT; + hclge_rm_vport_all_mac_table(vport, is_del, HCLGE_MAC_ADDR_UC); - hclge_rm_vport_all_mac_table(vport, true, + hclge_rm_vport_all_mac_table(vport, is_del, HCLGE_MAC_ADDR_MC); - hclge_rm_vport_all_vlan_table(vport, true); + hclge_rm_vport_all_vlan_table(vport, is_del); break; case HCLGE_MBX_GET_MEDIA_TYPE: hclge_get_vf_media_type(vport, &resp_msg); @@ -785,6 +790,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) case HCLGE_MBX_NCSI_ERROR: hclge_handle_ncsi_error(hdev); break; + case HCLGE_MBX_HANDLE_VF_TBL: + hclge_handle_vf_tbl(vport, req); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %u\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e02d427131eeb759eeea79509cd0179064a3f526..32341dcaa6c1aec77b7ab40c886b70083a619ad7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1164,6 +1164,27 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, en_bc_pmc); } +static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); +} + +static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->nic; + bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; + bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; + int ret; + + if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { + ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); + if (!ret) + clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + } +} + static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, int stream_id, bool enable) { @@ -1245,10 +1266,12 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, int status; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); - send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : - HCLGE_MBX_MAC_VLAN_UC_MODIFY; + send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; ether_addr_copy(send_msg.data, new_mac_addr); - ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); + if (is_first && !hdev->has_pf_mac) + eth_zero_addr(&send_msg.data[ETH_ALEN]); + else + ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); if (!status) ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); @@ -1256,54 +1279,302 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, return status; } -static int hclgevf_add_uc_addr(struct hnae3_handle *handle, - const unsigned char *addr) +static struct hclgevf_mac_addr_node * +hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGEVF_MAC_TO_ADD: + if (mac_node->state == HCLGEVF_MAC_TO_DEL) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGEVF_MAC_TO_DEL: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGEVF_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * HCLGEVF_MAC_ACTIVE + */ + case HCLGEVF_MAC_ACTIVE: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + } +} + +static int hclgevf_update_mac_list(struct hnae3_handle *handle, + enum HCLGEVF_MAC_NODE_STATE state, + enum HCLGEVF_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclge_vf_to_pf_msg send_msg; + struct hclgevf_mac_addr_node *mac_node; + struct list_head *list; - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_ADD); - ether_addr_copy(send_msg.data, addr); - return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; + + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * new state, or just remove it, or do nothing. + */ + mac_node = hclgevf_find_mac_node(list, addr); + if (mac_node) { + hclgevf_update_mac_node(mac_node, state); + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; + } + /* if this address is never added, unnecessary to delete */ + if (state == HCLGEVF_MAC_TO_DEL) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOMEM; + } + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; +} + +static int hclgevf_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_UC, addr); } static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclge_vf_to_pf_msg send_msg; - - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_REMOVE); - ether_addr_copy(send_msg.data, addr); - return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_UC, addr); } static int hclgevf_add_mc_addr(struct hnae3_handle *handle, const unsigned char *addr) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclge_vf_to_pf_msg send_msg; - - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_ADD); - ether_addr_copy(send_msg.data, addr); - return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_MC, addr); } static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, const unsigned char *addr) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_MC, addr); +} + +static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, + struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ struct hclge_vf_to_pf_msg send_msg; + u8 code, subcode; - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_REMOVE); - ether_addr_copy(send_msg.data, addr); + if (mac_type == HCLGEVF_MAC_ADDR_UC) { + code = HCLGE_MBX_SET_UNICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; + } else { + code = HCLGE_MBX_SET_MULTICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; + } + + hclgevf_build_send_msg(&send_msg, code, subcode); + ether_addr_copy(send_msg.data, mac_node->mac_addr); return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } +static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, + struct list_head *list, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to configure mac %pM, state = %d, ret = %d\n", + mac_node->mac_addr, mac_node->state, ret); + return; + } + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + mac_node->state = HCLGEVF_MAC_ACTIVE; + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void hclgevf_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of sending mac config request to PF + * If mac_node state is ACTIVE, then change its state to TO_DEL, + * then it will be removed at next time. If is TO_ADD, it means + * send TO_ADD request failed, so just remove the mac node. + */ + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclgevf_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { + mac_node->state = HCLGEVF_MAC_TO_DEL; + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void hclgevf_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr is exist in the mac list, it means + * received a new request TO_ADD during the time window + * of sending mac addr configurrequest to PF, so just + * change the mac state to ACTIVE. + */ + new_node->state = HCLGEVF_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } + } +} + +static void hclgevf_clear_list(struct list_head *list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; + + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGEVF_MAC_TO_DEL: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGEVF_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: + break; + } + } + +stop_traverse: + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); + hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + hclgevf_sync_from_del_list(&tmp_del_list, list); + hclgevf_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); +} + +static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) +{ + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); +} + +static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) +{ + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + hclgevf_clear_list(&hdev->mac_table.uc_mac_list); + hclgevf_clear_list(&hdev->mac_table.mc_mac_list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); +} + static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) @@ -1506,10 +1777,6 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); - if (ret) - return ret; - /* clear handshake status with IMP */ hclgevf_reset_handshake(hdev, false); @@ -1589,13 +1856,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; - /* Initialize ae_dev reset status as well, in case enet layer wants to - * know if device is undergoing reset - */ - ae_dev->reset_type = hdev->reset_type; hdev->rst_stats.rst_cnt++; rtnl_lock(); @@ -1610,7 +1872,6 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; hdev->rst_stats.hw_rst_done_cnt++; @@ -1625,7 +1886,6 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) } hdev->last_reset_time = jiffies; - ae_dev->reset_type = HNAE3_NONE_RESET; hdev->rst_stats.rst_done_cnt++; hdev->rst_stats.rst_fail_cnt = 0; clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); @@ -1951,6 +2211,10 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) hclgevf_sync_vlan_filter(hdev); + hclgevf_sync_mac_table(hdev); + + hclgevf_sync_promisc_mode(hdev); + hdev->last_serv_processed = jiffies; out: @@ -2313,6 +2577,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) mutex_init(&hdev->mbx_resp.mbx_mutex); sema_init(&hdev->reset_sem, 1); + spin_lock_init(&hdev->mac_table.mac_list_lock); + INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); + INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); + /* bring the device down */ set_bit(HCLGEVF_STATE_DOWN, &hdev->state); } @@ -2695,6 +2963,15 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev) return ret; } +static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, + HCLGE_MBX_VPORT_LIST_CLEAR); + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -2730,6 +3007,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + dev_info(&hdev->pdev->dev, "Reset done\n"); return 0; @@ -2802,6 +3081,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + /* ensure vf tbl list as empty before init*/ + ret = hclgevf_clear_vport_list(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to clear tbl list configuration, ret = %d.\n", + ret); + goto err_config; + } + ret = hclgevf_init_vlan_config(hdev); if (ret) { dev_err(&hdev->pdev->dev, @@ -2846,6 +3134,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) hclgevf_pci_uninit(hdev); hclgevf_cmd_uninit(hdev); + hclgevf_uninit_mac_list(hdev); } static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -3213,6 +3502,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_timer_task = hclgevf_set_timer_task, .get_link_mode = hclgevf_get_link_mode, .set_promisc_mode = hclgevf_set_promisc_mode, + .request_update_promisc_mode = hclgevf_request_update_promisc_mode, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 3b88d866facc257169b24dd75ca25899d9d9a361..f19583c4bc9b8ef2c794f1cbea4608093c5f032e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -148,6 +148,7 @@ enum hclgevf_states { HCLGEVF_STATE_MBX_HANDLING, HCLGEVF_STATE_CMD_DISABLE, HCLGEVF_STATE_LINK_UPDATING, + HCLGEVF_STATE_PROMISC_CHANGED, HCLGEVF_STATE_RST_FAIL, }; @@ -234,6 +235,29 @@ struct hclgevf_rst_stats { u32 rst_fail_cnt; /* the number of VF reset fail */ }; +enum HCLGEVF_MAC_ADDR_TYPE { + HCLGEVF_MAC_ADDR_UC, + HCLGEVF_MAC_ADDR_MC +}; + +enum HCLGEVF_MAC_NODE_STATE { + HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ACTIVE +}; + +struct hclgevf_mac_addr_node { + struct list_head node; + enum HCLGEVF_MAC_NODE_STATE state; + u8 mac_addr[ETH_ALEN]; +}; + +struct hclgevf_mac_table_cfg { + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; + struct list_head mc_mac_list; +}; + struct hclgevf_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -282,6 +306,8 @@ struct hclgevf_dev { unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + struct hclgevf_mac_table_cfg mac_table; + bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */