diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index eda9c5b971dee6..c454590078025c 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -19,8 +19,10 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, pd->vport_use_count--; WARN_ON(pd->vport_use_count < 0); - if (!pd->vport_use_count) + if (!pd->vport_use_count) { + mana_destroy_eq(mpc); mana_uncfg_vport(mpc); + } mutex_unlock(&pd->vport_mutex); } @@ -54,15 +56,21 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd, return err; } - mutex_unlock(&pd->vport_mutex); pd->tx_shortform_allowed = mpc->tx_shortform_allowed; pd->tx_vp_offset = mpc->tx_vp_offset; + err = mana_create_eq(mpc); + if (err) { + mana_uncfg_vport(mpc); + pd->vport_use_count--; + } + + mutex_unlock(&pd->vport_mutex); ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n", mpc->port_handle, pd->pdn, doorbell_id); - return 0; + return err; } int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) @@ -730,6 +738,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) { struct gdma_context *gc = mdev_to_gc(mdev); struct gdma_queue_spec spec = {}; + struct gdma_irq_context *gic; int err, i; spec.type = GDMA_EQ; @@ -740,6 +749,8 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; spec.eq.msix_index = 0; + gic = gdma_get_gic(gc, false, 0, 0, &spec.eq.msix_index); + err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq); if (err) return err; @@ -753,6 +764,9 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) spec.eq.callback = NULL; for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) { spec.eq.msix_index = (i + 1) % gc->num_msix_usable; + + gic = gdma_get_gic(gc, false, 0, 0, &spec.eq.msix_index); + err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]); if (err) goto destroy_eqs; @@ -772,12 +786,16 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) void mana_ib_destroy_eqs(struct mana_ib_dev *mdev) { struct gdma_context *gc = mdev_to_gc(mdev); - int i; + int i, msi; mana_gd_destroy_queue(gc, mdev->fatal_err_eq); + gdma_put_gic(gc, false, 0); - for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) + for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) { mana_gd_destroy_queue(gc, mdev->eqs[i]); + msi = (i + 1) % gc->num_msix_usable; + gdma_put_gic(gc, false, msi); + } kfree(mdev->eqs); } diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index c928af58f38bfe..9bdd413b807172 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -189,7 +189,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, cq_spec.gdma_region = cq->queue.gdma_region; cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; - eq = &mpc->ac->eqs[cq->comp_vector]; + eq = &mpc->eqs[cq->comp_vector % mpc->num_queues]; cq_spec.attached_eq = eq->eq->id; ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ, @@ -341,7 +341,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; eq_vec = send_cq->comp_vector; - eq = &mpc->ac->eqs[eq_vec]; + eq = &mpc->eqs[eq_vec % mpc->num_queues]; cq_spec.attached_eq = eq->eq->id; err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec, diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index f4b82bccd1727a..19f56a22f4ca80 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -70,6 +70,8 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) struct gdma_context *gc = pci_get_drvdata(pdev); struct gdma_query_max_resources_resp resp = {}; struct gdma_general_req req = {}; + unsigned int max_num_queues; + u16 num_ports; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES, @@ -115,6 +117,30 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) if (gc->max_num_queues > gc->num_msix_usable - 1) gc->max_num_queues = gc->num_msix_usable - 1; + err = mana_gd_query_device_cfg(gc, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, + MANA_MICRO_VERSION, &num_ports); + if (err) + return err; + + /* + * Adjust gc->max_num_queues returned from the SOC to allow dedicated MSIx + * for each vPort. Reduce max_num_queues to no less than 16 if necessary + */ + max_num_queues = (gc->num_msix_usable - 1) / num_ports; + max_num_queues = roundup_pow_of_two(max_num_queues); + if (max_num_queues < 16) + max_num_queues = 16; + + /* + * Use dedicated MSIx for EQs whenever possible, use MSIx sharing for + * Ethernet EQs when (max_num_queues * num_ports > num_msix_usable - 1) + */ + gc->max_num_queues = min(gc->max_num_queues, max_num_queues); + if (gc->max_num_queues * num_ports > gc->num_msix_usable - 1) + gc->msi_sharing = true; + + dev_info(gc->dev, "MSI sharing mode %d max queues %d\n", gc->msi_sharing, gc->max_num_queues); + return 0; } @@ -497,6 +523,7 @@ static void mana_gd_process_eq_events(void *arg) struct gdma_queue *eq = arg; struct gdma_context *gc; struct gdma_eqe *eqe; + unsigned int arm_bit; u32 head, num_eqe; int i; @@ -536,16 +563,48 @@ static void mana_gd_process_eq_events(void *arg) eq->head++; } + /* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */ + if (mana_gd_is_hwc(eq->gdma_dev)) { + arm_bit = SET_ARM_BIT; + } else if (eq->eq.work_done < eq->eq.budget && + napi_complete_done(&eq->eq.napi, eq->eq.work_done)) { + arm_bit = SET_ARM_BIT; + } else { + arm_bit = 0; + } + head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS); mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, - head, SET_ARM_BIT); + head, arm_bit); +} + +static int mana_poll(struct napi_struct *napi, int budget) +{ + struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi); + + eq->eq.work_done = 0; + eq->eq.budget = budget; + + mana_gd_process_eq_events(eq); + + return min(eq->eq.work_done, budget); +} + +static void mana_gd_schedule_napi(void *arg) +{ + struct gdma_queue *eq = arg; + struct napi_struct *napi; + + napi = &eq->eq.napi; + napi_schedule_irqoff(napi); } static int mana_gd_register_irq(struct gdma_queue *queue, const struct gdma_queue_spec *spec) { struct gdma_dev *gd = queue->gdma_dev; + bool is_mana = mana_gd_is_mana(gd); struct gdma_irq_context *gic; struct gdma_context *gc; unsigned int msi_index; @@ -570,6 +629,14 @@ static int mana_gd_register_irq(struct gdma_queue *queue, if (WARN_ON(!gic)) return -EINVAL; + if (is_mana) { + netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll); +// netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll, +// NAPI_POLL_WEIGHT); + napi_enable(&queue->eq.napi); + gic->handler = mana_gd_schedule_napi; + } + spin_lock_irqsave(&gic->lock, flags); list_add_rcu(&queue->entry, &gic->eq_list); spin_unlock_irqrestore(&gic->lock, flags); @@ -606,7 +673,6 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue) } spin_unlock_irqrestore(&gic->lock, flags); - queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; synchronize_rcu(); } @@ -720,6 +786,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd, out: dev_err(dev, "Failed to create EQ: %d\n", err); mana_gd_destroy_eq(gc, false, queue); + queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; return err; } @@ -1358,6 +1425,129 @@ static irqreturn_t mana_gd_intr(int irq, void *arg) return IRQ_HANDLED; } +void gdma_put_gic(struct gdma_context *gc, bool use_bitmap, int msi) +{ + struct pci_dev *dev = to_pci_dev(gc->dev); + struct msi_map irq_map; + struct gdma_irq_context *gic; + int irq; + + mutex_lock(&gc->gic_mutex); + + gic = xa_load(&gc->irq_contexts, msi); + if (WARN_ON(!gic)) { + mutex_unlock(&gc->gic_mutex); + return; + } + + if (!refcount_dec_and_test(&gic->refcount)) + goto clear_bitmap; + + irq = pci_irq_vector(dev, msi); + + irq_update_affinity_hint(irq, NULL); + free_irq(irq, gic); + + irq_map.virq = irq; + irq_map.index = msi; + pci_msix_free_irq(dev, irq_map); + + xa_erase(&gc->irq_contexts, msi); + kfree(gic); + +clear_bitmap: + if (use_bitmap) + clear_bit(msi, gc->msi_bitmap); + + mutex_unlock(&gc->gic_mutex); +} +EXPORT_SYMBOL_NS(gdma_put_gic, "NET_MANA"); + +struct gdma_irq_context *gdma_get_gic(struct gdma_context *gc, bool use_bitmap, + u16 port_index, int queue_index, + int *msi_requested) +{ + struct gdma_irq_context *gic; + struct pci_dev *dev = to_pci_dev(gc->dev); + struct msi_map irq_map; + int irq; + int msi; + int err; + + mutex_lock(&gc->gic_mutex); + + if (use_bitmap) { + msi = find_first_zero_bit(gc->msi_bitmap, gc->num_msix_usable); + *msi_requested = msi; + } else { + msi = *msi_requested; + } + + gic = xa_load(&gc->irq_contexts, msi); + if (gic) { + refcount_inc(&gic->refcount); + if (use_bitmap) + set_bit(msi, gc->msi_bitmap); + goto out; + } + + irq = pci_irq_vector(dev, msi); + if (irq == -EINVAL) { + irq_map = pci_msix_alloc_irq_at(dev, msi, NULL); + if (!irq_map.virq) { + err = irq_map.index; + dev_err(gc->dev, + "Failed to alloc irq_map msi %d err %d\n", + msi, err); + gic = NULL; + goto out; + } + irq = irq_map.virq; + msi = irq_map.index; + } + + gic = kzalloc(sizeof(*gic), GFP_KERNEL); + if (!gic) { + dev_err(gc->dev, "Failed to allocate gic\n"); + goto out; + } + gic->handler = mana_gd_process_eq_events; + gic->msi = msi; + gic->irq = irq; + INIT_LIST_HEAD(&gic->eq_list); + spin_lock_init(&gic->lock); + + if (!gic->msi) + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s", + pci_name(dev)); + else if (use_bitmap) + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_p%dq%d@pci:%s", + port_index, queue_index, pci_name(dev)); + else + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", + queue_index, pci_name(dev)); + + err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); + if (err) { + dev_err(gc->dev, "Failed to request irq %d %s\n", + irq, gic->name); + kfree(gic); + gic = NULL; + goto out; + } + + refcount_set(&gic->refcount, 1); + xa_store(&gc->irq_contexts, msi, gic, GFP_KERNEL); + + if (use_bitmap) + set_bit(msi, gc->msi_bitmap); + +out: + mutex_unlock(&gc->gic_mutex); + return gic; +} +EXPORT_SYMBOL_NS(gdma_get_gic, "NET_MANA"); + int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r) { r->map = bitmap_zalloc(res_avail, GFP_KERNEL); @@ -1473,17 +1663,11 @@ static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) * further used in irq_setup() */ for (i = 1; i <= nvec; i++) { - gic = kzalloc(sizeof(*gic), GFP_KERNEL); + gic = gdma_get_gic(gc, false, 0, i, &i); if (!gic) { err = -ENOMEM; goto free_irq; } - gic->handler = mana_gd_process_eq_events; - INIT_LIST_HEAD(&gic->eq_list); - spin_lock_init(&gic->lock); - - snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", - i - 1, pci_name(pdev)); /* one pci vector is already allocated for HWC */ irqs[i - 1] = pci_irq_vector(pdev, i); @@ -1491,12 +1675,6 @@ static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) err = irqs[i - 1]; goto free_current_gic; } - - err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_current_gic; - - xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } /* @@ -1523,14 +1701,8 @@ static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) free_irq: for (i -= 1; i > 0; i--) { irq = pci_irq_vector(pdev, i); - gic = xa_load(&gc->irq_contexts, i); - if (WARN_ON(!gic)) - continue; - irq_update_affinity_hint(irq, NULL); - free_irq(irq, gic); - xa_erase(&gc->irq_contexts, i); - kfree(gic); + gdma_put_gic(gc, false, i); } kfree(irqs); return err; @@ -1551,34 +1723,11 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec) start_irqs = irqs; for (i = 0; i < nvec; i++) { - gic = kzalloc(sizeof(*gic), GFP_KERNEL); + gic = gdma_get_gic(gc, false, 0, i, &i); if (!gic) { err = -ENOMEM; goto free_irq; } - - gic->handler = mana_gd_process_eq_events; - INIT_LIST_HEAD(&gic->eq_list); - spin_lock_init(&gic->lock); - - if (!i) - snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s", - pci_name(pdev)); - else - snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", - i - 1, pci_name(pdev)); - - irqs[i] = pci_irq_vector(pdev, i); - if (irqs[i] < 0) { - err = irqs[i]; - goto free_current_gic; - } - - err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_current_gic; - - xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } /* If number of IRQ is one extra than number of online CPUs, @@ -1607,19 +1756,11 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec) kfree(start_irqs); return 0; -free_current_gic: - kfree(gic); free_irq: for (i -= 1; i >= 0; i--) { irq = pci_irq_vector(pdev, i); - gic = xa_load(&gc->irq_contexts, i); - if (WARN_ON(!gic)) - continue; - irq_update_affinity_hint(irq, NULL); - free_irq(irq, gic); - xa_erase(&gc->irq_contexts, i); - kfree(gic); + gdma_put_gic(gc, false, i); } kfree(start_irqs); @@ -1639,6 +1780,7 @@ static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev) /* Need 1 interrupt for HWC */ max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1; min_irqs = 2; + gc->msi_sharing = true; } nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX); @@ -1693,26 +1835,17 @@ static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev) static void mana_gd_remove_irqs(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); - struct gdma_irq_context *gic; int irq, i; if (gc->max_num_msix < 1) return; - for (i = 0; i < gc->max_num_msix; i++) { - irq = pci_irq_vector(pdev, i); - if (irq < 0) - continue; - - gic = xa_load(&gc->irq_contexts, i); - if (WARN_ON(!gic)) - continue; - + for (i = 0; i < (gc->msi_sharing ? gc->max_num_msix : 1); i++) { /* Need to clear the hint before free_irq */ + irq = pci_irq_vector(pdev, i); irq_update_affinity_hint(irq, NULL); - free_irq(irq, gic); - xa_erase(&gc->irq_contexts, i); - kfree(gic); + + gdma_put_gic(gc, !gc->msi_sharing, i); } pci_free_irq_vectors(pdev); @@ -1744,20 +1877,30 @@ static int mana_gd_setup(struct pci_dev *pdev) if (err) goto destroy_hwc; - err = mana_gd_query_max_resources(pdev); + err = mana_gd_detect_devices(pdev); if (err) goto destroy_hwc; - err = mana_gd_setup_remaining_irqs(pdev); - if (err) { - dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err); - goto destroy_hwc; - } - - err = mana_gd_detect_devices(pdev); + err = mana_gd_query_max_resources(pdev); if (err) goto destroy_hwc; + if (!gc->msi_sharing) { + gc->msi_bitmap = bitmap_zalloc(gc->num_msix_usable, GFP_KERNEL); + if (!gc->msi_bitmap) { + err = -ENOMEM; + goto destroy_hwc; + } + // Set bit for HWC + set_bit(0, gc->msi_bitmap); + } else { + err = mana_gd_setup_remaining_irqs(pdev); + if (err) { + dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err); + goto destroy_hwc; + } + } + dev_dbg(&pdev->dev, "mana gdma setup successful\n"); return 0; @@ -1819,6 +1962,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto release_region; mutex_init(&gc->eq_test_event_mutex); + mutex_init(&gc->gic_mutex); pci_set_drvdata(pdev, gc); gc->bar0_pa = pci_resource_start(pdev, 0); diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 2bac6be8f6a09c..935ca00b413080 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -758,10 +758,9 @@ static int mana_init_port_context(struct mana_port_context *apc) return !apc->rxqs ? -ENOMEM : 0; } -static int mana_send_request(struct mana_context *ac, void *in_buf, - u32 in_len, void *out_buf, u32 out_len) +static int gdma_mana_send_request(struct gdma_context *gc, void *in_buf, + u32 in_len, void *out_buf, u32 out_len) { - struct gdma_context *gc = ac->gdma_dev->gdma_context; struct gdma_resp_hdr *resp = out_buf; struct gdma_req_hdr *req = in_buf; struct device *dev = gc->dev; @@ -790,6 +789,14 @@ static int mana_send_request(struct mana_context *ac, void *in_buf, return 0; } +static int mana_send_request(struct mana_context *ac, void *in_buf, + u32 in_len, void *out_buf, u32 out_len) +{ + struct gdma_context *gc = ac->gdma_dev->gdma_context; + + return gdma_mana_send_request(gc, in_buf, in_len, out_buf, out_len); +} + static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, const enum mana_command_code expected_code, const u32 min_size) @@ -919,11 +926,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc) err, resp.hdr.status); } -static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, - u32 proto_minor_ver, u32 proto_micro_ver, - u16 *max_num_vports) +int mana_gd_query_device_cfg(struct gdma_context *gc, u32 proto_major_ver, + u32 proto_minor_ver, u32 proto_micro_ver, + u16 *max_num_vports) { - struct gdma_context *gc = ac->gdma_dev->gdma_context; struct mana_query_device_cfg_resp resp = {}; struct mana_query_device_cfg_req req = {}; struct device *dev = gc->dev; @@ -938,7 +944,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, req.proto_minor_ver = proto_minor_ver; req.proto_micro_ver = proto_micro_ver; - err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); + err = gdma_mana_send_request(gc, &req, sizeof(req), &resp, sizeof(resp)); if (err) { dev_err(dev, "Failed to query config: %d", err); return err; @@ -961,8 +967,6 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, else gc->adapter_mtu = ETH_FRAME_LEN; - debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu); - return 0; } @@ -1239,79 +1243,105 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, } EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA"); -static void mana_destroy_eq(struct mana_context *ac) +static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf) { + int i; + + for (i = 0; i < CQE_POLLING_BUFFER; i++) + memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp)); +} + +void mana_destroy_eq(struct mana_port_context *apc) +{ + struct mana_context *ac = apc->ac; struct gdma_context *gc = ac->gdma_dev->gdma_context; struct gdma_queue *eq; int i; - if (!ac->eqs) + if (!apc->eqs) return; - debugfs_remove_recursive(ac->mana_eqs_debugfs); - ac->mana_eqs_debugfs = NULL; + debugfs_remove_recursive(apc->mana_eqs_debugfs); + apc->mana_eqs_debugfs = NULL; - for (i = 0; i < gc->max_num_queues; i++) { - eq = ac->eqs[i].eq; + for (i = 0; i < apc->num_queues; i++) { + eq = apc->eqs[i].eq; if (!eq) continue; mana_gd_destroy_queue(gc, eq); + gdma_put_gic(gc, !gc->msi_sharing, eq->eq.msix_index); } - kfree(ac->eqs); - ac->eqs = NULL; + kfree(apc->eqs); + apc->eqs = NULL; } +EXPORT_SYMBOL_NS(mana_destroy_eq, "NET_MANA"); -static void mana_create_eq_debugfs(struct mana_context *ac, int i) +static void mana_create_eq_debugfs(struct mana_port_context *apc, int i) { - struct mana_eq eq = ac->eqs[i]; + struct mana_eq eq = apc->eqs[i]; char eqnum[32]; sprintf(eqnum, "eq%d", i); - eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs); + eq.mana_eq_debugfs = debugfs_create_dir(eqnum, apc->mana_eqs_debugfs); debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head); debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail); + debugfs_create_u32("irq", 0400, eq.mana_eq_debugfs, &eq.eq->eq.irq); debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops); } -static int mana_create_eq(struct mana_context *ac) +int mana_create_eq(struct mana_port_context *apc) { - struct gdma_dev *gd = ac->gdma_dev; + struct gdma_dev *gd = apc->ac->gdma_dev; struct gdma_context *gc = gd->gdma_context; struct gdma_queue_spec spec = {}; int err; int i; + struct gdma_irq_context *gic; - ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), - GFP_KERNEL); - if (!ac->eqs) + WARN_ON(apc->eqs); + apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq), + GFP_KERNEL); + if (!apc->eqs) return -ENOMEM; spec.type = GDMA_EQ; spec.monitor_avl_buf = false; spec.queue_size = EQ_SIZE; spec.eq.callback = NULL; - spec.eq.context = ac->eqs; + spec.eq.context = apc->eqs; spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; + spec.eq.ndev = apc->ndev; + + apc->mana_eqs_debugfs = debugfs_create_dir("EQs", apc->mana_port_debugfs); + + for (i = 0; i < apc->num_queues; i++) { + mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll); - ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs); + if (gc->msi_sharing) + spec.eq.msix_index = (i + 1) % gc->num_msix_usable; - for (i = 0; i < gc->max_num_queues; i++) { - spec.eq.msix_index = (i + 1) % gc->num_msix_usable; - err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); + gic = gdma_get_gic(gc, !gc->msi_sharing, apc->port_idx, i, + &spec.eq.msix_index); + if (!gic) + goto out; + + err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq); if (err) { dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err); goto out; } - mana_create_eq_debugfs(ac, i); + apc->eqs[i].eq->eq.irq = gic->irq; + mana_create_eq_debugfs(apc, i); } return 0; out: - mana_destroy_eq(ac); + mana_destroy_eq(apc); return err; } +EXPORT_SYMBOL_NS(mana_create_eq, "NET_MANA"); static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) { @@ -1404,6 +1434,7 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) static void mana_poll_tx_cq(struct mana_cq *cq) { + struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent; struct gdma_comp *completions = cq->gdma_comp_buf; struct gdma_posted_wqe_info *wqe_info; unsigned int pkt_transmitted = 0; @@ -1425,9 +1456,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, CQE_POLLING_BUFFER); - if (comp_read < 1) - return; - for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -1483,7 +1511,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) mana_unmap_skb(skb, apc); - napi_consume_skb(skb, cq->budget); + napi_consume_skb(skb, gdma_eq->eq.budget); pkt_transmitted++; } @@ -1512,8 +1540,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) WARN_ON_ONCE(1); - - cq->work_done = pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -1566,15 +1592,19 @@ static void mana_rx_skb(void *buf_va, bool from_pool, struct mana_stats_rx *rx_stats = &rxq->stats; struct net_device *ndev = rxq->ndev; uint pkt_len = cqe->ppi[0].pkt_len; + struct mana_port_context *apc; u16 rxq_idx = rxq->rxq_idx; struct napi_struct *napi; + struct gdma_queue *eq; struct xdp_buff xdp = {}; struct sk_buff *skb; u32 hash_value; u32 act; - rxq->rx_cq.work_done++; - napi = &rxq->rx_cq.napi; + apc = netdev_priv(ndev); + eq = apc->eqs[rxq_idx].eq; + eq->eq.work_done++; + napi = &eq->eq.napi; if (!buf_va) { ++ndev->stats.rx_dropped; @@ -1818,10 +1848,10 @@ static void mana_poll_rx_cq(struct mana_cq *cq) xdp_do_flush(); } -static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) +static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) { struct mana_cq *cq = context; - int w; +// int w; WARN_ON_ONCE(cq->gdma_cq != gdma_queue); @@ -1830,6 +1860,8 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) else mana_poll_tx_cq(cq); + mana_gd_ring_cq(gdma_queue, SET_ARM_BIT); +#if 0 w = cq->work_done; cq->work_done_since_doorbell += w; @@ -1849,26 +1881,7 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) } return w; -} - -static int mana_poll(struct napi_struct *napi, int budget) -{ - struct mana_cq *cq = container_of(napi, struct mana_cq, napi); - int w; - - cq->work_done = 0; - cq->budget = budget; - - w = mana_cq_handler(cq, cq->gdma_cq); - - return min(w, budget); -} - -static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) -{ - struct mana_cq *cq = context; - - napi_schedule_irqoff(&cq->napi); +#endif } static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) @@ -1893,7 +1906,6 @@ static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) static void mana_destroy_txq(struct mana_port_context *apc) { - struct napi_struct *napi; int i; if (!apc->tx_qp) @@ -1903,13 +1915,6 @@ static void mana_destroy_txq(struct mana_port_context *apc) debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs); apc->tx_qp[i].mana_tx_debugfs = NULL; - napi = &apc->tx_qp[i].tx_cq.napi; - if (apc->tx_qp[i].txq.napi_initialized) { - napi_synchronize(napi); - napi_disable(napi); - netif_napi_del(napi); - apc->tx_qp[i].txq.napi_initialized = false; - } mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); @@ -1938,8 +1943,8 @@ static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx) &tx_qp->tx_cq.gdma_cq->head); debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs, &tx_qp->tx_cq.gdma_cq->tail); - debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs, - &tx_qp->tx_cq.budget); +// debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs, +// &tx_qp->tx_cq.budget); debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs, tx_qp->txq.gdma_sq, &mana_dbg_q_fops); debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs, @@ -2005,6 +2010,7 @@ static int mana_create_txq(struct mana_port_context *apc, /* Create SQ's CQ */ cq = &apc->tx_qp[i].tx_cq; + cq->gdma_comp_buf = apc->eqs[i].cqe_poll; cq->type = MANA_CQ_TYPE_TX; cq->txq = txq; @@ -2013,8 +2019,8 @@ static int mana_create_txq(struct mana_port_context *apc, spec.type = GDMA_CQ; spec.monitor_avl_buf = false; spec.queue_size = cq_size; - spec.cq.callback = mana_schedule_napi; - spec.cq.parent_eq = ac->eqs[i].eq; + spec.cq.callback = mana_cq_handler; + spec.cq.parent_eq = apc->eqs[i].eq; spec.cq.context = cq; err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); if (err) @@ -2059,10 +2065,6 @@ static int mana_create_txq(struct mana_port_context *apc, mana_create_txq_debugfs(apc, i); - netif_napi_add_tx(net, &cq->napi, mana_poll); - napi_enable(&cq->napi); - txq->napi_initialized = true; - mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); } @@ -2074,6 +2076,21 @@ static int mana_create_txq(struct mana_port_context *apc, return err; } +static void mana_napi_sync_for_rx(struct mana_rxq *rxq) +{ + struct net_device *ndev = rxq->ndev; + struct mana_port_context *apc; + u16 rxq_idx = rxq->rxq_idx; + struct napi_struct *napi; + struct gdma_queue *eq; + + apc = netdev_priv(ndev); + eq = apc->eqs[rxq_idx].eq; + napi = &eq->eq.napi; + + napi_synchronize(napi); +} + static void mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq, bool napi_initialized) @@ -2081,7 +2098,6 @@ static void mana_destroy_rxq(struct mana_port_context *apc, struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct mana_recv_buf_oob *rx_oob; struct device *dev = gc->dev; - struct napi_struct *napi; struct page *page; int i; @@ -2091,17 +2107,11 @@ static void mana_destroy_rxq(struct mana_port_context *apc, debugfs_remove_recursive(rxq->mana_rx_debugfs); rxq->mana_rx_debugfs = NULL; - napi = &rxq->rx_cq.napi; - - if (napi_initialized) { - napi_synchronize(napi); - - napi_disable(napi); - - netif_napi_del(napi); - } xdp_rxq_info_unreg(&rxq->xdp_rxq); + if (napi_initialized) + mana_napi_sync_for_rx(rxq); + mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); mana_deinit_cq(apc, &rxq->rx_cq); @@ -2227,11 +2237,13 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) { struct mana_port_context *mpc = netdev_priv(rxq->ndev); struct page_pool_params pprm = {}; + u16 rxq_idx = rxq->rxq_idx; + struct gdma_queue *eq = mpc->eqs[rxq_idx].eq; int ret; pprm.pool_size = mpc->rx_queue_size; pprm.nid = gc->numa_node; - pprm.napi = &rxq->rx_cq.napi; + pprm.napi = &eq->eq.napi; pprm.netdev = rxq->ndev; pprm.order = get_order(rxq->alloc_size); @@ -2300,6 +2312,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, /* Create RQ's CQ */ cq = &rxq->rx_cq; + cq->gdma_comp_buf = eq->cqe_poll; cq->type = MANA_CQ_TYPE_RX; cq->rxq = rxq; @@ -2307,7 +2320,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, spec.type = GDMA_CQ; spec.monitor_avl_buf = false; spec.queue_size = cq_size; - spec.cq.callback = mana_schedule_napi; + spec.cq.callback = mana_cq_handler; spec.cq.parent_eq = eq->eq; spec.cq.context = cq; err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); @@ -2349,15 +2362,11 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); - WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, - cq->napi.napi_id)); + eq->eq->eq.napi.napi_id)); WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, rxq->page_pool)); - napi_enable(&cq->napi); - mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); out: if (!err) @@ -2389,7 +2398,7 @@ static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx) &rxq->rx_cq.gdma_cq->head); debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.gdma_cq->tail); - debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget); +// debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget); debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops); debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq, &mana_dbg_q_fops); @@ -2398,13 +2407,12 @@ static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx) static int mana_add_rx_queues(struct mana_port_context *apc, struct net_device *ndev) { - struct mana_context *ac = apc->ac; struct mana_rxq *rxq; int err = 0; int i; for (i = 0; i < apc->num_queues; i++) { - rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); + rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev); if (!rxq) { err = -ENOMEM; netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err); @@ -2423,9 +2431,8 @@ static int mana_add_rx_queues(struct mana_port_context *apc, return err; } -static void mana_destroy_vport(struct mana_port_context *apc) +static void mana_destroy_rxqs(struct mana_port_context *apc) { - struct gdma_dev *gd = apc->ac->gdma_dev; struct mana_rxq *rxq; u32 rxq_idx; @@ -2437,8 +2444,12 @@ static void mana_destroy_vport(struct mana_port_context *apc) mana_destroy_rxq(apc, rxq, true); apc->rxqs[rxq_idx] = NULL; } +} + +static void mana_destroy_vport(struct mana_port_context *apc) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; - mana_destroy_txq(apc); mana_uncfg_vport(apc); if (gd->gdma_context->is_pf) @@ -2459,11 +2470,7 @@ static int mana_create_vport(struct mana_port_context *apc, return err; } - err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); - if (err) - return err; - - return mana_create_txq(apc, net); + return mana_cfg_vport(apc, gd->pdid, gd->doorbell); } static int mana_rss_table_alloc(struct mana_port_context *apc) @@ -2655,21 +2662,36 @@ int mana_alloc_queues(struct net_device *ndev) err = mana_create_vport(apc, ndev); if (err) { - netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err); + netdev_err(ndev, "Failed to create vPort %u : %d\n", + apc->port_idx, err); return err; } + err = mana_create_eq(apc); + if (err) { + netdev_err(ndev, "Failed to create EQ on vPort %u: %d\n", + apc->port_idx, err); + goto destroy_vport; + } + + err = mana_create_txq(apc, ndev); + if (err) { + netdev_err(ndev, "Failed to create TXQ on vPort %u: %d\n", + apc->port_idx, err); + goto destroy_eq; + } + err = netif_set_real_num_tx_queues(ndev, apc->num_queues); if (err) { netdev_err(ndev, "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n", apc->num_queues, err); - goto destroy_vport; + goto destroy_txq; } err = mana_add_rx_queues(apc, ndev); if (err) - goto destroy_vport; + goto destroy_txq; apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; @@ -2678,7 +2700,7 @@ int mana_alloc_queues(struct net_device *ndev) netdev_err(ndev, "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n", apc->num_queues, err); - goto destroy_vport; + goto destroy_rxq; } mana_rss_table_init(apc); @@ -2686,19 +2708,25 @@ int mana_alloc_queues(struct net_device *ndev) err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); if (err) { netdev_err(ndev, "Failed to configure RSS table: %d\n", err); - goto destroy_vport; + goto destroy_rxq; } if (gd->gdma_context->is_pf) { err = mana_pf_register_filter(apc); if (err) - goto destroy_vport; + goto destroy_rxq; } mana_chn_setxdp(apc, mana_xdp_get(apc)); return 0; +destroy_rxq: + mana_destroy_rxqs(apc); +destroy_txq: + mana_destroy_txq(apc); +destroy_eq: + mana_destroy_eq(apc); destroy_vport: mana_destroy_vport(apc); return err; @@ -2805,6 +2833,9 @@ static int mana_dealloc_queues(struct net_device *ndev) return err; } + mana_destroy_rxqs(apc); + mana_destroy_txq(apc); + mana_destroy_eq(apc); mana_destroy_vport(apc); return 0; @@ -3019,17 +3050,13 @@ int mana_probe(struct gdma_dev *gd, bool resuming) gd->driver_data = ac; } - err = mana_create_eq(ac); - if (err) { - dev_err(dev, "Failed to create EQs: %d\n", err); - goto out; - } - - err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, - MANA_MICRO_VERSION, &num_ports); + err = mana_gd_query_device_cfg(gc, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, + MANA_MICRO_VERSION, &num_ports); if (err) goto out; + debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu); + if (!resuming) { ac->num_ports = num_ports; } else { @@ -3138,7 +3165,6 @@ void mana_remove(struct gdma_dev *gd, bool suspending) free_netdev(ndev); } - mana_destroy_eq(ac); out: mana_gd_deregister_device(gd); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 918411d4153bce..52dcb4a8c7e4fa 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -267,8 +267,10 @@ struct gdma_event { struct gdma_queue; +#define CQE_POLLING_BUFFER 512 struct mana_eq { struct gdma_queue *eq; + struct gdma_comp cqe_poll[CQE_POLLING_BUFFER]; struct dentry *mana_eq_debugfs; }; @@ -317,8 +319,14 @@ struct gdma_queue { void *context; unsigned int msix_index; + unsigned int irq; u32 log2_throttle_limit; + + /* NAPI data */ + struct napi_struct napi; + int work_done; + int budget; } eq; struct { @@ -343,6 +351,8 @@ struct gdma_queue_spec { unsigned long log2_throttle_limit; unsigned int msix_index; + + struct net_device *ndev; } eq; struct { @@ -363,6 +373,9 @@ struct gdma_irq_context { spinlock_t lock; struct list_head eq_list; char name[MANA_IRQ_NAME_SZ]; + unsigned int msi; + unsigned int irq; + refcount_t refcount; }; struct gdma_context { @@ -409,6 +422,10 @@ struct gdma_context { /* Azure RDMA adapter */ struct gdma_dev mana_ib; + + struct mutex gic_mutex; + bool msi_sharing; + unsigned long *msi_bitmap; }; static inline bool mana_gd_is_mana(struct gdma_dev *gd) @@ -564,6 +581,8 @@ enum { #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) /* Driver supports dynamic MSI-X vector allocation */ #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13) +/* Driver supports separate EQ/MSIs for each vPort */ +#define GDMA_DRV_CAP_FLAG_1_EQ_MSI_UNSHARE_MULTI_VPORT BIT(19) #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ @@ -572,7 +591,8 @@ enum { GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \ GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ - GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT) + GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ + GDMA_DRV_CAP_FLAG_1_EQ_MSI_UNSHARE_MULTI_VPORT) #define GDMA_DRV_CAP_FLAGS2 0 @@ -902,4 +922,11 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); void mana_register_debugfs(void); void mana_unregister_debugfs(void); +struct gdma_irq_context *gdma_get_gic(struct gdma_context *gc, bool use_bitmap, + u16 port_index, int queue_index, + int *msi_requested); +void gdma_put_gic(struct gdma_context *gc, bool use_bitmap, int msi); +int mana_gd_query_device_cfg(struct gdma_context *gc, u32 proto_major_ver, + u32 proto_minor_ver, u32 proto_micro_ver, + u16 *max_num_vports); #endif /* _GDMA_H */ diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 0f78065de8fe42..5e8d1f9e606e05 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -283,14 +283,8 @@ struct mana_cq { */ struct mana_txq *txq; - /* Buffer which the CQ handler can copy the CQE's into. */ - struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; - - /* NAPI data */ - struct napi_struct napi; - int work_done; - int work_done_since_doorbell; - int budget; + /* Pointer to a buffer which the CQ handler can copy the CQE's into. */ + struct gdma_comp *gdma_comp_buf; }; struct mana_recv_buf_oob { @@ -409,9 +403,6 @@ struct mana_context { u16 num_ports; - struct mana_eq *eqs; - struct dentry *mana_eqs_debugfs; - struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; }; @@ -421,6 +412,9 @@ struct mana_port_context { u8 mac_addr[ETH_ALEN]; + struct mana_eq *eqs; + struct dentry *mana_eqs_debugfs; + enum TRI_STATE rss_state; mana_handle_t default_rxobj; @@ -826,6 +820,8 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, u32 doorbell_pg_id); void mana_uncfg_vport(struct mana_port_context *apc); +int mana_create_eq(struct mana_port_context *apc); +void mana_destroy_eq(struct mana_port_context *apc); struct net_device *mana_get_primary_netdev(struct mana_context *ac, u32 port_index,