aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/addr.c1
-rw-r--r--drivers/infiniband/core/cache.c1
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/cma_configfs.c1
-rw-r--r--drivers/infiniband/core/cq.c1
-rw-r--r--drivers/infiniband/core/iwpm_util.h1
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/sa_query.c1
-rw-r--r--drivers/infiniband/core/verbs.c9
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c5
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/device.c1
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c1
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c1
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c97
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c71
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c458
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c50
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c106
-rw-r--r--drivers/infiniband/hw/irdma/cm.c68
-rw-r--r--drivers/infiniband/hw/irdma/cm.h7
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c602
-rw-r--r--drivers/infiniband/hw/irdma/defs.h8
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c105
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h53
-rw-r--r--drivers/infiniband/hw/irdma/hw.c192
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c3
-rw-r--r--drivers/infiniband/hw/irdma/main.c29
-rw-r--r--drivers/infiniband/hw/irdma/main.h47
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h41
-rw-r--r--drivers/infiniband/hw/irdma/pble.c77
-rw-r--r--drivers/infiniband/hw/irdma/pble.h25
-rw-r--r--drivers/infiniband/hw/irdma/protos.h90
-rw-r--r--drivers/infiniband/hw/irdma/puda.c132
-rw-r--r--drivers/infiniband/hw/irdma/puda.h43
-rw-r--r--drivers/infiniband/hw/irdma/status.h71
-rw-r--r--drivers/infiniband/hw/irdma/type.h113
-rw-r--r--drivers/infiniband/hw/irdma/uda.c40
-rw-r--r--drivers/infiniband/hw/irdma/uda.h46
-rw-r--r--drivers/infiniband/hw/irdma/uk.c122
-rw-r--r--drivers/infiniband/hw/irdma/user.h62
-rw-r--r--drivers/infiniband/hw/irdma/utils.c247
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c391
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h15
-rw-r--r--drivers/infiniband/hw/irdma/ws.c19
-rw-r--r--drivers/infiniband/hw/irdma/ws.h2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c1
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c1
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c4
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c1
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c1
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c104
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c19
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c110
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c542
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c38
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c41
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c433
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h105
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c26
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c71
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c170
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c108
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h13
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c58
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c69
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c42
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h11
106 files changed, 2870 insertions, 2875 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 65e3e7df8a4b..f253295795f0 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -37,7 +37,6 @@
#include <linux/inetdevice.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <linux/module.h>
#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f6aa1a964573..4084d05a4510 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -34,7 +34,6 @@
*/
#include <linux/if_vlan.h>
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 50c53409ceb6..fabca5e51e3d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2642,7 +2642,7 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
{
struct rdma_id_private *id_priv;
- if (id->qp_type != IB_QPT_RC)
+ if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 9ac16e0db761..de8a2d5d741c 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/configfs.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 433b426729d4..a70876a0a231 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -2,7 +2,6 @@
/*
* Copyright (c) 2015 HGST, a Western Digital Company.
*/
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 3a42ad43056e..d6fc8402158a 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -33,7 +33,6 @@
#ifndef _IWPM_UTIL_H
#define _IWPM_UTIL_H
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/in.h>
#include <linux/in6.h>
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index f5aacaf7fb8e..ca24ce34da76 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1951,9 +1951,10 @@ static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[],
u32 port)
{
struct rdma_hw_stats *stats;
- int rem, i, index, ret = 0;
struct nlattr *entry_attr;
unsigned long *target;
+ int rem, i, ret = 0;
+ u32 index;
stats = ib_get_hw_stats_port(device, port);
if (!stats)
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 74ecd7456a11..8dc7d1f4b35d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/random.h>
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c18634bec212..a9819c40a140 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -268,9 +268,6 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
return ERR_PTR(-ENOMEM);
pd->device = device;
- pd->uobject = NULL;
- pd->__internal_mr = NULL;
- atomic_set(&pd->usecnt, 0);
pd->flags = flags;
rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
@@ -341,11 +338,6 @@ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
pd->__internal_mr = NULL;
}
- /* uverbs manipulates usecnt with proper locking, while the kabi
- * requires the caller to guarantee we can't race here.
- */
- WARN_ON(atomic_read(&pd->usecnt));
-
ret = pd->device->ops.dealloc_pd(pd, udata);
if (ret)
return ret;
@@ -2153,6 +2145,7 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return mr;
mr->device = pd->device;
+ mr->type = IB_MR_TYPE_USER;
mr->pd = pd;
mr->dm = NULL;
atomic_inc(&pd->usecnt);
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 98c813ba4304..877f8e84a672 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -5,7 +5,6 @@
#include <linux/topology.h>
#include <linux/cpumask.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/numa.h>
@@ -667,7 +666,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* engines, use the same CPU cores as general/control
* context.
*/
- if (cpumask_weight(&entry->def_intr.mask) == 0)
+ if (cpumask_empty(&entry->def_intr.mask))
cpumask_copy(&entry->def_intr.mask,
&entry->general_intr_mask);
}
@@ -687,7 +686,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* vectors, use the same CPU core as the general/control
* context.
*/
- if (cpumask_weight(&entry->comp_vect_mask) == 0)
+ if (cpumask_empty(&entry->comp_vect_mask))
cpumask_copy(&entry->comp_vect_mask,
&entry->general_intr_mask);
}
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 22a3cdb940be..80ba1e53c068 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -7,7 +7,6 @@
#include <linux/seq_file.h>
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ratelimit.h>
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index 68a184c39941..8ceff7141baf 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -4,7 +4,6 @@
*/
#include <linux/cdev.h>
-#include <linux/module.h>
#include <linux/device.h>
#include <linux/fs.h>
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index e2e4f9f6fae2..3af77a0840ab 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -6,7 +6,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/bitmap.h>
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 31e63e245ea9..aa15a5cc7cf3 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -5,7 +5,6 @@
#include <linux/firmware.h>
#include <linux/mutex.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/crc32.h>
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index dc9211f3a009..99d0743133ca 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1397,8 +1397,7 @@ static int query_port(struct rvt_dev_info *rdi, u32 port_num,
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
- props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu :
- ib_mtu_enum_to_int(props->max_mtu);
+ props->phys_mtu = hfi1_max_mtu;
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 4b693d542ace..864413607571 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -38,45 +38,36 @@
#define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32
-static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier,
- u8 op_modifier, u16 op, u16 token,
- int event)
+static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
{
- return hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, token, event);
+ return hr_dev->hw->post_mbox(hr_dev, mbox_msg);
}
/* this should be called with "poll_sem" */
-static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, unsigned long in_modifier,
- u8 op_modifier, u16 op,
- unsigned int timeout)
+static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
{
int ret;
- ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- CMD_POLL_TOKEN, 0);
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg);
if (ret) {
dev_err_ratelimited(hr_dev->dev,
"failed to post mailbox 0x%x in poll mode, ret = %d.\n",
- op, ret);
+ mbox_msg->cmd, ret);
return ret;
}
- return hr_dev->hw->poll_mbox_done(hr_dev, timeout);
+ return hr_dev->hw->poll_mbox_done(hr_dev);
}
-static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, unsigned long in_modifier,
- u8 op_modifier, u16 op, unsigned int timeout)
+static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
{
int ret;
down(&hr_dev->cmd.poll_sem);
- ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, timeout);
+ ret = __hns_roce_cmd_mbox_poll(hr_dev, mbox_msg);
up(&hr_dev->cmd.poll_sem);
return ret;
@@ -100,10 +91,8 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
complete(&context->done);
}
-static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, unsigned long in_modifier,
- u8 op_modifier, u16 op,
- unsigned int timeout)
+static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct hns_roce_cmd_context *context;
@@ -124,20 +113,19 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
reinit_completion(&context->done);
- ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- context->token, 1);
+ mbox_msg->token = context->token;
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg);
if (ret) {
dev_err_ratelimited(dev,
"failed to post mailbox 0x%x in event mode, ret = %d.\n",
- op, ret);
+ mbox_msg->cmd, ret);
goto out;
}
if (!wait_for_completion_timeout(&context->done,
- msecs_to_jiffies(timeout))) {
+ msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS))) {
dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
- context->token, op);
+ context->token, mbox_msg->cmd);
ret = -EBUSY;
goto out;
}
@@ -145,45 +133,50 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
ret = context->result;
if (ret)
dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
- context->token, op, ret);
+ context->token, mbox_msg->cmd, ret);
out:
context->busy = 0;
return ret;
}
-static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, unsigned long in_modifier,
- u8 op_modifier, u16 op, unsigned int timeout)
+static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
{
int ret;
down(&hr_dev->cmd.event_sem);
- ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, timeout);
+ ret = __hns_roce_cmd_mbox_wait(hr_dev, mbox_msg);
up(&hr_dev->cmd.event_sem);
return ret;
}
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
- unsigned long in_modifier, u8 op_modifier, u16 op,
- unsigned int timeout)
+ u8 cmd, unsigned long tag)
{
+ struct hns_roce_mbox_msg mbox_msg = {};
bool is_busy;
if (hr_dev->hw->chk_mbox_avail)
if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy))
return is_busy ? -EBUSY : 0;
- if (hr_dev->cmd.use_events)
- return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
- else
- return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
+ mbox_msg.in_param = in_param;
+ mbox_msg.out_param = out_param;
+ mbox_msg.cmd = cmd;
+ mbox_msg.tag = tag;
+
+ if (hr_dev->cmd.use_events) {
+ mbox_msg.event_en = 1;
+
+ return hns_roce_cmd_mbox_wait(hr_dev, &mbox_msg);
+ } else {
+ mbox_msg.event_en = 0;
+ mbox_msg.token = CMD_POLL_TOKEN;
+
+ return hns_roce_cmd_mbox_poll(hr_dev, &mbox_msg);
+ }
}
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
@@ -269,3 +262,15 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
+
+int hns_roce_create_hw_ctx(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ u8 cmd, unsigned long idx)
+{
+ return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx);
+}
+
+int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, unsigned long idx)
+{
+ return hns_roce_cmd_mbox(dev, 0, 0, cmd, idx);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 8025e7f657fa..052a3d60905a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -140,12 +140,16 @@ enum {
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
- unsigned long in_modifier, u8 op_modifier, u16 op,
- unsigned int timeout);
+ u8 cmd, unsigned long tag);
struct hns_roce_cmd_mailbox *
hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox);
+int hns_roce_create_hw_ctx(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ u8 cmd, unsigned long idx);
+int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd,
+ unsigned long idx);
#endif /* _HNS_ROCE_CMD_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 55057dcbb2dc..8acd599ffac1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -100,12 +100,39 @@ static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
mutex_unlock(&cq_table->bank_mutex);
}
+static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq,
+ u64 *mtts, dma_addr_t dma_handle)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
+ return PTR_ERR(mailbox);
+ }
+
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
+
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
+ hr_cq->cqn);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
+ hr_cq->cqn, ret);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_cmd_mailbox *mailbox;
- u64 mtts[MTT_MIN_COUNT] = { 0 };
+ u64 mtts[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle;
int ret;
@@ -121,7 +148,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
if (ret) {
ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
hr_cq->cqn, ret);
- goto err_out;
+ return ret;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
@@ -130,41 +157,17 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
goto err_put;
}
- /* Allocate mailbox memory */
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
- goto err_xa;
- }
-
- hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
-
- /* Send mailbox to hw */
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
- HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret) {
- ibdev_err(ibdev,
- "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
- hr_cq->cqn, ret);
+ ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
+ if (ret)
goto err_xa;
- }
-
- hr_cq->cons_index = 0;
- hr_cq->arm_sn = 1;
-
- refcount_set(&hr_cq->refcount, 1);
- init_completion(&hr_cq->free);
return 0;
err_xa:
xa_erase(&cq_table->array, hr_cq->cqn);
-
err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
-err_out:
return ret;
}
@@ -174,9 +177,8 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
struct device *dev = hr_dev->dev;
int ret;
- ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
- HNS_ROCE_CMD_DESTROY_CQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
+ hr_cq->cqn);
if (ret)
dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
@@ -413,6 +415,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cqc;
}
+ hr_cq->cons_index = 0;
+ hr_cq->arm_sn = 1;
+ refcount_set(&hr_cq->refcount, 1);
+ init_completion(&hr_cq->free);
+
return 0;
err_cqc:
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 1e0bae136997..3083d6db1d68 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -561,6 +561,15 @@ struct hns_roce_cmd_mailbox {
dma_addr_t dma;
};
+struct hns_roce_mbox_msg {
+ u64 in_param;
+ u64 out_param;
+ u8 cmd;
+ u32 tag;
+ u16 token;
+ u8 event_en;
+};
+
struct hns_roce_dev;
struct hns_roce_rinl_sge {
@@ -624,6 +633,7 @@ struct hns_roce_qp {
u32 next_sge;
enum ib_mtu path_mtu;
u32 max_inline_data;
+ u8 free_mr_en;
/* 0: flush needed, 1: unneeded */
unsigned long flush_flag;
@@ -851,11 +861,9 @@ struct hns_roce_hw {
int (*hw_profile)(struct hns_roce_dev *hr_dev);
int (*hw_init)(struct hns_roce_dev *hr_dev);
void (*hw_exit)(struct hns_roce_dev *hr_dev);
- int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
- u16 token, int event);
- int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
- unsigned int timeout);
+ int (*post_mbox)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg);
+ int (*poll_mbox_done)(struct hns_roce_dev *hr_dev);
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
@@ -873,15 +881,16 @@ struct hns_roce_hw {
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle);
int (*set_hem)(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj, int step_idx);
+ struct hns_roce_hem_table *table, int obj, u32 step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
- int step_idx);
+ u32 step_idx);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
+ void (*dereg_mr)(struct hns_roce_dev *hr_dev);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
@@ -1145,9 +1154,6 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 8917365cc6b8..ce1a0d2792a3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -488,7 +488,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_index *index)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
- int step_idx;
+ u32 step_idx;
int ret = 0;
if (index->inited & HEM_INDEX_L0) {
@@ -618,7 +618,7 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
struct ib_device *ibdev = &hr_dev->ib_dev;
u32 hop_num = mhop->hop_num;
u32 chunk_ba_num;
- int step_idx;
+ u32 step_idx;
index->inited = HEM_INDEX_BUF;
chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index b33e948fd060..2b0cef17ad45 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1344,17 +1344,17 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
return ret;
}
-static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
- dma_addr_t base_addr, u16 op)
+static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
+ dma_addr_t base_addr, u8 cmd, unsigned long tag)
{
- struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ struct hns_roce_cmd_mailbox *mbox;
int ret;
+ mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mbox))
return PTR_ERR(mbox);
- ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag);
hns_roce_free_cmd_mailbox(hr_dev, mbox);
return ret;
}
@@ -2664,6 +2664,194 @@ static void free_dip_list(struct hns_roce_dev *hr_dev)
spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
}
+static void free_mr_exit(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ if (free_mr->rsv_qp[i]) {
+ ret = ib_destroy_qp(free_mr->rsv_qp[i]);
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to destroy qp in free mr.\n");
+
+ free_mr->rsv_qp[i] = NULL;
+ }
+ }
+
+ if (free_mr->rsv_cq) {
+ ib_destroy_cq(free_mr->rsv_cq);
+ free_mr->rsv_cq = NULL;
+ }
+
+ if (free_mr->rsv_pd) {
+ ib_dealloc_pd(free_mr->rsv_pd);
+ free_mr->rsv_pd = NULL;
+ }
+}
+
+static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_cq_init_attr cq_init_attr = {};
+ struct ib_qp_init_attr qp_init_attr = {};
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+ int ret;
+ int i;
+
+ pd = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(pd)) {
+ ibdev_err(ibdev, "failed to create pd for free mr.\n");
+ return PTR_ERR(pd);
+ }
+ free_mr->rsv_pd = pd;
+
+ cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
+ cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr);
+ if (IS_ERR(cq)) {
+ ibdev_err(ibdev, "failed to create cq for free mr.\n");
+ ret = PTR_ERR(cq);
+ goto create_failed;
+ }
+ free_mr->rsv_cq = cq;
+
+ qp_init_attr.qp_type = IB_QPT_RC;
+ qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ qp_init_attr.send_cq = free_mr->rsv_cq;
+ qp_init_attr.recv_cq = free_mr->rsv_cq;
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
+ qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
+ qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
+ qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
+
+ qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr);
+ if (IS_ERR(qp)) {
+ ibdev_err(ibdev, "failed to create qp for free mr.\n");
+ ret = PTR_ERR(qp);
+ goto create_failed;
+ }
+
+ free_mr->rsv_qp[i] = qp;
+ }
+
+ return 0;
+
+create_failed:
+ free_mr_exit(hr_dev);
+
+ return ret;
+}
+
+static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ struct ib_qp_attr *attr, int sl_num)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_qp *hr_qp;
+ int loopback;
+ int mask;
+ int ret;
+
+ hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]);
+ hr_qp->free_mr_en = 1;
+
+ mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
+ attr->qp_state = IB_QPS_INIT;
+ attr->port_num = 1;
+ attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
+ ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ loopback = hr_dev->loop_idc;
+ /* Set qpc lbi = 1 incidate loopback IO */
+ hr_dev->loop_idc = 1;
+
+ mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
+ IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
+ attr->qp_state = IB_QPS_RTR;
+ attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ attr->path_mtu = IB_MTU_256;
+ attr->dest_qp_num = hr_qp->qpn;
+ attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN;
+
+ rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
+
+ ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
+ hr_dev->loop_idc = loopback;
+ if (ret) {
+ ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT |
+ IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC;
+ attr->qp_state = IB_QPS_RTS;
+ attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
+ attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
+ attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
+ ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
+ if (ret)
+ ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_qp_attr attr = {};
+ int ret;
+ int i;
+
+ rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
+ rdma_ah_set_static_rate(&attr.ah_attr, 3);
+ rdma_ah_set_port_num(&attr.ah_attr, 1);
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ ret = free_mr_modify_rsv_qp(hr_dev, &attr, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int free_mr_init(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+
+ ret = free_mr_alloc_res(hr_dev);
+ if (ret)
+ return ret;
+
+ ret = free_mr_modify_qp(hr_dev);
+ if (ret)
+ goto err_modify_qp;
+
+ return 0;
+
+err_modify_qp:
+ free_mr_exit(hr_dev);
+
+ return ret;
+}
+
static int get_hem_table(struct hns_roce_dev *hr_dev)
{
unsigned int qpc_count;
@@ -2780,21 +2968,21 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
free_dip_list(hr_dev);
}
-static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier,
- u16 op, u16 token, int event)
+static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
- mb->in_param_l = cpu_to_le32(in_param);
- mb->in_param_h = cpu_to_le32(in_param >> 32);
- mb->out_param_l = cpu_to_le32(out_param);
- mb->out_param_h = cpu_to_le32(out_param >> 32);
- mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
- mb->token_event_en = cpu_to_le32(event << 16 | token);
+ mb->in_param_l = cpu_to_le32(mbox_msg->in_param);
+ mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32);
+ mb->out_param_l = cpu_to_le32(mbox_msg->out_param);
+ mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32);
+ mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd);
+ mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 |
+ mbox_msg->token);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
@@ -2847,9 +3035,8 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
return ret;
}
-static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier,
- u16 op, u16 token, int event)
+static int v2_post_mbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mbox_msg *mbox_msg)
{
u8 status = 0;
int ret;
@@ -2865,8 +3052,7 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
}
/* Post new message to mbox */
- ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, token, event);
+ ret = hns_roce_mbox_post(hr_dev, mbox_msg);
if (ret)
dev_err_ratelimited(hr_dev->dev,
"failed to post mailbox, ret = %d.\n", ret);
@@ -2874,12 +3060,13 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
return ret;
}
-static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout)
+static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev)
{
u8 status = 0;
int ret;
- ret = v2_wait_mbox_complete(hr_dev, timeout, &status);
+ ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS,
+ &status);
if (!ret) {
if (status != MB_ST_COMPLETE_SUCC)
return -EBUSY;
@@ -3245,6 +3432,98 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
return 0;
}
+static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ const struct ib_send_wr *bad_wr;
+ struct ib_rdma_wr rdma_wr = {};
+ struct ib_send_wr *send_wr;
+ int ret;
+
+ send_wr = &rdma_wr.wr;
+ send_wr->opcode = IB_WR_RDMA_WRITE;
+
+ ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc);
+
+static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+ struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)];
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_qp *hr_qp;
+ unsigned long end;
+ int cqe_cnt = 0;
+ int npolled;
+ int ret;
+ int i;
+
+ /*
+ * If the device initialization is not complete or in the uninstall
+ * process, then there is no need to execute free mr.
+ */
+ if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
+ priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT ||
+ hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT)
+ return;
+
+ mutex_lock(&free_mr->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ hr_qp = to_hr_qp(free_mr->rsv_qp[i]);
+
+ ret = free_mr_post_send_lp_wqe(hr_qp);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
+ hr_qp->qpn, ret);
+ break;
+ }
+
+ cqe_cnt++;
+ }
+
+ end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
+ while (cqe_cnt) {
+ npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc);
+ if (npolled < 0) {
+ ibdev_err(ibdev,
+ "failed to poll cqe for free mr, remain %d cqe.\n",
+ cqe_cnt);
+ goto out;
+ }
+
+ if (time_after(jiffies, end)) {
+ ibdev_err(ibdev,
+ "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
+ cqe_cnt);
+ goto out;
+ }
+ cqe_cnt -= npolled;
+ }
+
+out:
+ mutex_unlock(&free_mr->mutex);
+}
+
+static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)
+{
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_send_cmd_to_hw(hr_dev);
+}
+
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
@@ -3818,38 +4097,38 @@ out:
}
static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
- int step_idx, u16 *mbox_op)
+ u32 step_idx, u8 *mbox_cmd)
{
- u16 op;
+ u8 cmd;
switch (type) {
case HEM_TYPE_QPC:
- op = HNS_ROCE_CMD_WRITE_QPC_BT0;
+ cmd = HNS_ROCE_CMD_WRITE_QPC_BT0;
break;
case HEM_TYPE_MTPT:
- op = HNS_ROCE_CMD_WRITE_MPT_BT0;
+ cmd = HNS_ROCE_CMD_WRITE_MPT_BT0;
break;
case HEM_TYPE_CQC:
- op = HNS_ROCE_CMD_WRITE_CQC_BT0;
+ cmd = HNS_ROCE_CMD_WRITE_CQC_BT0;
break;
case HEM_TYPE_SRQC:
- op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0;
break;
case HEM_TYPE_SCCC:
- op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+ cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0;
break;
case HEM_TYPE_QPC_TIMER:
- op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
+ cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
break;
case HEM_TYPE_CQC_TIMER:
- op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
+ cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
break;
default:
dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
return -EINVAL;
}
- *mbox_op = op + step_idx;
+ *mbox_cmd = cmd + step_idx;
return 0;
}
@@ -3872,10 +4151,10 @@ static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
}
static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
- dma_addr_t base_addr, u32 hem_type, int step_idx)
+ dma_addr_t base_addr, u32 hem_type, u32 step_idx)
{
int ret;
- u16 op;
+ u8 cmd;
if (unlikely(hem_type == HEM_TYPE_GMV))
return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
@@ -3883,16 +4162,16 @@ static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
return 0;
- ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op);
+ ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd);
if (ret < 0)
return ret;
- return config_hem_ba_to_hw(hr_dev, obj, base_addr, op);
+ return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj);
}
static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
- int step_idx)
+ u32 step_idx)
{
struct hns_roce_hem_iter iter;
struct hns_roce_hem_mhop mhop;
@@ -3950,29 +4229,29 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj,
- int step_idx)
+ struct hns_roce_hem_table *table,
+ int tag, u32 step_idx)
{
- struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ u8 cmd = 0xff;
int ret;
- u16 op = 0xff;
if (!hns_roce_check_whether_mhop(hr_dev, table->type))
return 0;
switch (table->type) {
case HEM_TYPE_QPC:
- op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
+ cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0;
break;
case HEM_TYPE_MTPT:
- op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
+ cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0;
break;
case HEM_TYPE_CQC:
- op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0;
break;
case HEM_TYPE_SRQC:
- op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
break;
case HEM_TYPE_SCCC:
case HEM_TYPE_QPC_TIMER:
@@ -3985,15 +4264,13 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
return 0;
}
- op += step_idx;
+ cmd += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- /* configure the tag and op */
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
@@ -4017,9 +4294,8 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
memcpy(mailbox->buf, context, qpc_size);
memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
- HNS_ROCE_CMD_MODIFY_QPC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
+ HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -4667,6 +4943,18 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
u8 hr_port;
int ret;
+ /*
+ * If free_mr_en of qp is set, it means that this qp comes from
+ * free mr. This qp will perform the loopback operation.
+ * In the loopback scenario, only sl needs to be set.
+ */
+ if (hr_qp->free_mr_en) {
+ hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
+ hr_reg_clear(qpc_mask, QPC_SL);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ return 0;
+ }
+
ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
hr_port = ib_port - 1;
is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
@@ -5092,9 +5380,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
- HNS_ROCE_CMD_QUERY_QPC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
+ hr_qp->qpn);
if (ret)
goto out;
@@ -5460,9 +5747,8 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
- HNS_ROCE_CMD_MODIFY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
+ HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
ibdev_err(&hr_dev->ib_dev,
@@ -5488,9 +5774,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return PTR_ERR(mailbox);
srq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
- HNS_ROCE_CMD_QUERY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
+ HNS_ROCE_CMD_QUERY_SRQC, srq->srqn);
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to process cmd of querying SRQ, ret = %d.\n",
@@ -5540,9 +5825,8 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
- HNS_ROCE_CMD_MODIFY_CQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
+ HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
ibdev_err(&hr_dev->ib_dev,
@@ -5869,15 +6153,14 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
{
struct device *dev = hr_dev->dev;
int ret;
+ u8 cmd;
if (eqn < hr_dev->caps.num_comp_vectors)
- ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
- 0, HNS_ROCE_CMD_DESTROY_CEQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ cmd = HNS_ROCE_CMD_DESTROY_CEQC;
else
- ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
- 0, HNS_ROCE_CMD_DESTROY_AEQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ cmd = HNS_ROCE_CMD_DESTROY_AEQC;
+
+ ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
if (ret)
dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
}
@@ -5983,16 +6266,15 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
}
static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq,
- unsigned int eq_cmd)
+ struct hns_roce_eq *eq, u8 eq_cmd)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR_OR_NULL(mailbox))
- return -ENOMEM;
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
ret = alloc_eq_buf(hr_dev, eq);
if (ret)
@@ -6002,8 +6284,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
if (ret)
goto err_cmd_mbox;
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
- eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn);
if (ret) {
dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
goto err_cmd_mbox;
@@ -6114,14 +6395,14 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
struct device *dev = hr_dev->dev;
struct hns_roce_eq *eq;
- unsigned int eq_cmd;
- int irq_num;
- int eq_num;
int other_num;
int comp_num;
int aeq_num;
- int i;
+ int irq_num;
+ int eq_num;
+ u8 eq_cmd;
int ret;
+ int i;
other_num = hr_dev->caps.num_other_vectors;
comp_num = hr_dev->caps.num_comp_vectors;
@@ -6258,6 +6539,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
.modify_qp = hns_roce_v2_modify_qp,
+ .dereg_mr = hns_roce_v2_dereg_mr,
.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
.init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table,
@@ -6339,14 +6621,25 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
ret = hns_roce_init(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
- goto error_failed_get_cfg;
+ goto error_failed_cfg;
+ }
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ ret = free_mr_init(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to init free mr!\n");
+ goto error_failed_roce_init;
+ }
}
handle->priv = hr_dev;
return 0;
-error_failed_get_cfg:
+error_failed_roce_init:
+ hns_roce_exit(hr_dev);
+
+error_failed_cfg:
kfree(hr_dev->priv);
error_failed_kzalloc:
@@ -6368,6 +6661,9 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
hns_roce_handle_device_err(hr_dev);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_exit(hr_dev);
+
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 12be85f0986e..0d87b627601e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -139,6 +139,18 @@ enum {
#define CMD_CSQ_DESC_NUM 1024
#define CMD_CRQ_DESC_NUM 1024
+/* Free mr used parameters */
+#define HNS_ROCE_FREE_MR_USED_CQE_NUM 128
+#define HNS_ROCE_FREE_MR_USED_QP_NUM 0x8
+#define HNS_ROCE_FREE_MR_USED_PSN 0x0808
+#define HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT 0x7
+#define HNS_ROCE_FREE_MR_USED_QP_TIMEOUT 0x12
+#define HNS_ROCE_FREE_MR_USED_SQWQE_NUM 128
+#define HNS_ROCE_FREE_MR_USED_SQSGE_NUM 0x2
+#define HNS_ROCE_FREE_MR_USED_RQWQE_NUM 128
+#define HNS_ROCE_FREE_MR_USED_RQSGE_NUM 0x2
+#define HNS_ROCE_V2_FREE_MR_TIMEOUT 4500
+
enum {
NO_ARMED = 0x0,
REG_NXT_CEQE = 0x2,
@@ -1418,10 +1430,18 @@ struct hns_roce_link_table {
#define HNS_ROCE_EXT_LLM_ENTRY(addr, id) (((id) << (64 - 12)) | ((addr) >> 12))
#define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
+struct hns_roce_v2_free_mr {
+ struct ib_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
+ struct ib_cq *rsv_cq;
+ struct ib_pd *rsv_pd;
+ struct mutex mutex;
+};
+
struct hns_roce_v2_priv {
struct hnae3_handle *handle;
struct hns_roce_v2_cmq cmq;
struct hns_roce_link_table ext_llm;
+ struct hns_roce_v2_free_mr free_mr;
};
struct hns_roce_dip {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
index 5a97b5a0b7be..f7a75a7cda74 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
@@ -18,9 +18,8 @@ int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
- HNS_ROCE_CMD_QUERY_CQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC,
+ cqn);
if (ret) {
dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
goto err_mailbox;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 2ee06b906b60..b389738d157f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -47,24 +47,6 @@ unsigned long key_to_hw_index(u32 key)
return (key << 24) | (key >> 8);
}
-static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
-{
- return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
- HNS_ROCE_CMD_CREATE_MPT,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
-{
- return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
- mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
@@ -137,14 +119,13 @@ static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
}
-static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr)
+static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (mr->enabled) {
- ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
key_to_hw_index(mr->key) &
(hr_dev->caps.num_mtpts - 1));
if (ret)
@@ -166,10 +147,8 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
- return ret;
- }
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
@@ -180,7 +159,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
@@ -303,13 +282,14 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
return ERR_CAST(mailbox);
mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
- HNS_ROCE_CMD_QUERY_MPT,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
+ mtpt_idx);
if (ret)
goto free_cmd_mbox;
- ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
+ mtpt_idx);
if (ret)
ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
@@ -339,7 +319,8 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
goto free_cmd_mbox;
}
- ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
+ mtpt_idx);
if (ret) {
ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
goto free_cmd_mbox;
@@ -361,6 +342,9 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0;
+ if (hr_dev->hw->dereg_mr)
+ hr_dev->hw->dereg_mr(hr_dev);
+
hns_roce_mr_free(hr_dev, mr);
kfree(mr);
@@ -480,7 +464,7 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
int ret;
if (mw->enabled) {
- ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
key_to_hw_index(mw->rkey) &
(hr_dev->caps.num_mtpts - 1));
if (ret)
@@ -520,7 +504,7 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index e64ef6903fb4..8dae98f827eb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -59,58 +59,39 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
}
}
-static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
- HNS_ROCE_CMD_CREATE_SRQ,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
-{
- return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
-{
- struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
int id;
id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
GFP_KERNEL);
if (id < 0) {
- ibdev_err(ibdev, "failed to alloc srq(%d).\n", id);
+ ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id);
return -ENOMEM;
}
- srq->srqn = (unsigned long)id;
- ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
- if (ret) {
- ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
- goto err_out;
- }
+ srq->srqn = id;
- ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
- if (ret) {
- ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
- goto err_put;
- }
+ return 0;
+}
+
+static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn);
+}
+
+static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR_OR_NULL(mailbox)) {
+ if (IS_ERR(mailbox)) {
ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
- ret = -ENOMEM;
- goto err_xa;
+ return PTR_ERR(mailbox);
}
ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
@@ -119,24 +100,44 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
goto err_mbox;
}
- ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
- if (ret) {
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ,
+ srq->srqn);
+ if (ret)
ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
- goto err_mbox;
- }
+err_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
+ if (ret) {
+ ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ if (ret) {
+ ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
+ goto err_put;
+ }
+
+ ret = hns_roce_create_srqc(hr_dev, srq);
+ if (ret)
+ goto err_xa;
return 0;
-err_mbox:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_xa:
xa_erase(&srq_table->xa, srq->srqn);
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
-err_out:
- ida_free(&srq_ida->ida, id);
return ret;
}
@@ -146,7 +147,8 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
- ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
+ ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ,
+ srq->srqn);
if (ret)
dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn);
@@ -158,7 +160,6 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
wait_for_completion(&srq->free);
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
- ida_free(&srq_table->srq_ida.ida, (int)srq->srqn);
}
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
@@ -406,10 +407,14 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
if (ret)
return ret;
- ret = alloc_srqc(hr_dev, srq);
+ ret = alloc_srqn(hr_dev, srq);
if (ret)
goto err_srq_buf;
+ ret = alloc_srqc(hr_dev, srq);
+ if (ret)
+ goto err_srqn;
+
if (udata) {
resp.srqn = srq->srqn;
if (ib_copy_to_udata(udata, &resp,
@@ -428,6 +433,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
err_srqc:
free_srqc(hr_dev, srq);
+err_srqn:
+ free_srqn(hr_dev, srq);
err_srq_buf:
free_srq_buf(hr_dev, srq);
@@ -440,6 +447,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
free_srqc(hr_dev, srq);
+ free_srqn(hr_dev, srq);
free_srq_buf(hr_dev, srq);
return 0;
}
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 6dea0a49d171..dedb3b7edd8d 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1501,15 +1501,14 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
* @cm_info: CM info for parent listen node
* @cm_parent_listen_node: The parent listen node
*/
-static enum irdma_status_code
-irdma_del_multiple_qhash(struct irdma_device *iwdev,
- struct irdma_cm_info *cm_info,
- struct irdma_cm_listener *cm_parent_listen_node)
+static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
{
struct irdma_cm_listener *child_listen_node;
- enum irdma_status_code ret = IRDMA_ERR_CFG;
struct list_head *pos, *tpos;
unsigned long flags;
+ int ret = -EINVAL;
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
list_for_each_safe (pos, tpos,
@@ -1618,16 +1617,16 @@ u16 irdma_get_vlan_ipv4(u32 *addr)
* Adds a qhash and a child listen node for every IPv6 address
* on the adapter and adds the associated qhash filter
*/
-static enum irdma_status_code
-irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
- struct irdma_cm_listener *cm_parent_listen_node)
+static int irdma_add_mqh_6(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
{
struct net_device *ip_dev;
struct inet6_dev *idev;
struct inet6_ifaddr *ifp, *tmp;
- enum irdma_status_code ret = 0;
struct irdma_cm_listener *child_listen_node;
unsigned long flags;
+ int ret = 0;
rtnl_lock();
for_each_netdev(&init_net, ip_dev) {
@@ -1653,7 +1652,7 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
child_listen_node);
if (!child_listen_node) {
ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
- ret = IRDMA_ERR_NO_MEMORY;
+ ret = -ENOMEM;
goto exit;
}
@@ -1700,16 +1699,16 @@ exit:
* Adds a qhash and a child listen node for every IPv4 address
* on the adapter and adds the associated qhash filter
*/
-static enum irdma_status_code
-irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
- struct irdma_cm_listener *cm_parent_listen_node)
+static int irdma_add_mqh_4(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
{
struct net_device *ip_dev;
struct in_device *idev;
struct irdma_cm_listener *child_listen_node;
- enum irdma_status_code ret = 0;
unsigned long flags;
const struct in_ifaddr *ifa;
+ int ret = 0;
rtnl_lock();
for_each_netdev(&init_net, ip_dev) {
@@ -1734,7 +1733,7 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
if (!child_listen_node) {
ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
in_dev_put(idev);
- ret = IRDMA_ERR_NO_MEMORY;
+ ret = -ENOMEM;
goto exit;
}
@@ -1781,9 +1780,9 @@ exit:
* @cm_info: CM info for parent listen node
* @cm_listen_node: The parent listen node
*/
-static enum irdma_status_code
-irdma_add_mqh(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
- struct irdma_cm_listener *cm_listen_node)
+static int irdma_add_mqh(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_listen_node)
{
if (cm_info->ipv4)
return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
@@ -2200,7 +2199,7 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
/* set our node specific transport info */
cm_node->ipv4 = cm_info->ipv4;
cm_node->vlan_id = cm_info->vlan_id;
- if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb)
+ if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
cm_node->vlan_id = 0;
cm_node->tos = cm_info->tos;
cm_node->user_pri = cm_info->user_pri;
@@ -2209,8 +2208,12 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
ibdev_warn(&iwdev->ibdev,
"application TOS[%d] and remote client TOS[%d] mismatch\n",
listener->tos, cm_info->tos);
- cm_node->tos = max(listener->tos, cm_info->tos);
- cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ if (iwdev->vsi.dscp_mode) {
+ cm_node->user_pri = listener->user_pri;
+ } else {
+ cm_node->tos = max(listener->tos, cm_info->tos);
+ cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ }
ibdev_dbg(&iwdev->ibdev,
"DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
cm_node->user_pri);
@@ -3201,8 +3204,7 @@ static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
* @iwdev: iwarp device structure
* @rdma_ver: HW version
*/
-enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev,
- u8 rdma_ver)
+int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
{
struct irdma_cm_core *cm_core = &iwdev->cm_core;
@@ -3212,7 +3214,7 @@ enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev,
/* Handles CM event work items send to Iwarp core */
cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
if (!cm_core->event_wq)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
INIT_LIST_HEAD(&cm_core->listen_list);
@@ -3835,7 +3837,11 @@ int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_info.cm_id = cm_id;
cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
cm_info.tos = cm_id->tos;
- cm_info.user_pri = rt_tos2priority(cm_id->tos);
+ if (iwdev->vsi.dscp_mode)
+ cm_info.user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
+ else
+ cm_info.user_pri = rt_tos2priority(cm_id->tos);
if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
return -ENOMEM;
@@ -3915,10 +3921,10 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
struct irdma_device *iwdev;
struct irdma_cm_listener *cm_listen_node;
struct irdma_cm_info cm_info = {};
- enum irdma_status_code err;
struct sockaddr_in *laddr;
struct sockaddr_in6 *laddr6;
bool wildcard = false;
+ int err;
iwdev = to_iwdev(cm_id->device);
if (!iwdev)
@@ -3959,7 +3965,7 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
}
}
- if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb)
+ if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
cm_info.vlan_id = 0;
cm_info.backlog = backlog;
cm_info.cm_id = cm_id;
@@ -3977,7 +3983,11 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = cm_listen_node;
cm_listen_node->tos = cm_id->tos;
- cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+ if (iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
+ else
+ cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
cm_info.user_pri = cm_listen_node->user_pri;
if (!cm_listen_node->reused_node) {
if (wildcard) {
@@ -4325,11 +4335,11 @@ static void irdma_qhash_ctrl(struct irdma_device *iwdev,
struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
struct irdma_cm_listener *child_listen_node;
struct list_head *pos, *tpos;
- enum irdma_status_code err;
bool node_allocated = false;
enum irdma_quad_hash_manage_type op = ifup ?
IRDMA_QHASH_MANAGE_TYPE_ADD :
IRDMA_QHASH_MANAGE_TYPE_DELETE;
+ int err;
list_for_each_safe (pos, tpos, child_listen_list) {
child_listen_node = list_entry(pos, struct irdma_cm_listener,
diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
index 3bf42728e9b7..19c284975fc7 100644
--- a/drivers/infiniband/hw/irdma/cm.h
+++ b/drivers/infiniband/hw/irdma/cm.h
@@ -384,6 +384,13 @@ int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
struct irdma_puda_buf *sqbuf,
enum irdma_timer_type type, int send_retrans,
int close_when_complete);
+
+static inline u8 irdma_tos2dscp(u8 tos)
+{
+#define IRDMA_DSCP_VAL GENMASK(7, 2)
+ return (u8)FIELD_GET(IRDMA_DSCP_VAL, tos);
+}
+
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 3141a9c85de5..58c0e181ca2b 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3,7 +3,6 @@
#include <linux/etherdevice.h>
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -70,6 +69,31 @@ void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
}
}
+static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2p)
+{
+ u8 i;
+
+ vsi->qos_rel_bw = l2p->vsi_rel_bw;
+ vsi->qos_prio_type = l2p->vsi_prio_type;
+ vsi->dscp_mode = l2p->dscp_mode;
+ if (l2p->dscp_mode) {
+ memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ l2p->up2tc[i] = i;
+ }
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
+ vsi->qos[i].traffic_class = l2p->up2tc[i];
+ vsi->qos[i].rel_bw =
+ l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
+ vsi->qos[i].prio_type =
+ l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
+ vsi->qos[i].valid = false;
+ }
+}
+
/**
* irdma_change_l2params - given the new l2 parameters, change all qp
* @vsi: RDMA VSI pointer
@@ -88,6 +112,7 @@ void irdma_change_l2params(struct irdma_sc_vsi *vsi,
return;
vsi->tc_change_pending = false;
+ irdma_set_qos_info(vsi, l2params);
irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
}
@@ -154,17 +179,16 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
- struct irdma_add_arp_cache_entry_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_add_arp_cache_entry_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8, info->reach_max);
set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
@@ -192,16 +216,15 @@ irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
* @arp_index: arp index to delete arp entry
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- u16 arp_index, bool post_sq)
+static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 arp_index, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = arp_index |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
@@ -226,17 +249,16 @@ irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
- struct irdma_apbvt_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_apbvt_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->port);
@@ -274,7 +296,7 @@ irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
* quad hash entry in the hardware will point to iwarp's qp
* number and requires no calls from the driver.
*/
-static enum irdma_status_code
+static int
irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
struct irdma_qhash_table_info *info,
u64 scratch, bool post_sq)
@@ -287,7 +309,7 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
@@ -350,10 +372,9 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
* @qp: sc qp
* @info: initialization qp info
*/
-enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
- struct irdma_qp_init_info *info)
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
{
- enum irdma_status_code ret_code;
+ int ret_code;
u32 pble_obj_cnt;
u16 wqe_size;
@@ -361,7 +382,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
info->qp_uk_init_info.max_rq_frag_cnt >
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
qp->dev = info->pd->dev;
qp->vsi = info->vsi;
@@ -384,7 +405,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
(info->virtual_map && info->rq_pa >= pble_obj_cnt))
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
@@ -424,8 +445,8 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -433,12 +454,12 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c
cqp = qp->dev->cqp;
if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
- qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
- return IRDMA_ERR_INVALID_QP_ID;
+ qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
+ return -EINVAL;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
@@ -475,9 +496,8 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
- struct irdma_modify_qp_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -488,7 +508,7 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
if (info->dont_send_fin)
@@ -546,9 +566,8 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
* @ignore_mw_bnd: memory window bind flag
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
- bool remove_hash_idx, bool ignore_mw_bnd,
- bool post_sq)
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -557,7 +576,7 @@ enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
@@ -739,16 +758,15 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- bool post_sq)
+static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
@@ -774,17 +792,16 @@ irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
- struct irdma_local_mac_entry_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_local_mac_entry_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
@@ -813,16 +830,16 @@ irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
* @ignore_ref_count: to force mac adde delete
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- u16 entry_idx, u8 ignore_ref_count, bool post_sq)
+static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 entry_idx, u8 ignore_ref_count,
+ bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
@@ -1035,10 +1052,9 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
- struct irdma_allocate_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_allocate_stag_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -1055,7 +1071,7 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
@@ -1097,10 +1113,9 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
- struct irdma_reg_ns_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ struct irdma_reg_ns_stag_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 fbo;
@@ -1118,7 +1133,7 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
else if (info->page_size == 0x1000)
page_size = IRDMA_PAGE_SIZE_4K;
else
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
@@ -1128,12 +1143,12 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
fbo = info->va & (info->page_size - 1);
set_64bit_val(wqe, 0,
@@ -1186,10 +1201,9 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
- struct irdma_dealloc_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_dealloc_stag_info *info,
+ u64 scratch, bool post_sq)
{
u64 hdr;
__le64 *wqe;
@@ -1198,7 +1212,7 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
@@ -1227,9 +1241,9 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
+ struct irdma_mw_alloc_info *info, u64 scratch,
+ bool post_sq)
{
u64 hdr;
struct irdma_sc_cqp *cqp;
@@ -1238,7 +1252,7 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
@@ -1268,9 +1282,9 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
* @info: fast mr info
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code
-irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
- struct irdma_fast_reg_stag_info *info, bool post_sq)
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq)
{
u64 temp, hdr;
__le64 *wqe;
@@ -1292,7 +1306,7 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(&qp->qp_uk, wqe_idx);
@@ -1821,8 +1835,7 @@ void irdma_terminate_received(struct irdma_sc_qp *qp,
}
}
-static enum irdma_status_code irdma_null_ws_add(struct irdma_sc_vsi *vsi,
- u8 user_pri)
+static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{
return 0;
}
@@ -1845,7 +1858,6 @@ static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_init_info *info)
{
- struct irdma_l2params *l2p;
int i;
vsi->dev = info->dev;
@@ -1858,18 +1870,8 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
vsi->fcn_id = info->dev->hmc_fn_id;
- l2p = info->params;
- vsi->qos_rel_bw = l2p->vsi_rel_bw;
- vsi->qos_prio_type = l2p->vsi_prio_type;
+ irdma_set_qos_info(vsi, info->params);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
- if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
- vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
- vsi->qos[i].traffic_class = info->params->up2tc[i];
- vsi->qos[i].rel_bw =
- l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
- vsi->qos[i].prio_type =
- l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
- vsi->qos[i].valid = false;
mutex_init(&vsi->qos[i].qos_mutex);
INIT_LIST_HEAD(&vsi->qos[i].qplist);
}
@@ -1918,8 +1920,8 @@ static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
* @vsi: pointer to the vsi structure
* @info: The info structure used for initialization
*/
-enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
- struct irdma_vsi_stats_info *info)
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info)
{
u8 fcn_id = info->fcn_id;
struct irdma_dma_mem *stats_buff_mem;
@@ -1934,7 +1936,7 @@ enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
&stats_buff_mem->pa,
GFP_KERNEL);
if (!stats_buff_mem->va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
vsi->pestat->gather_info.last_gather_stats_va =
@@ -1961,7 +1963,7 @@ stats_error:
stats_buff_mem->va, stats_buff_mem->pa);
stats_buff_mem->va = NULL;
- return IRDMA_ERR_CQP_COMPL_ERROR;
+ return -EIO;
}
/**
@@ -2023,19 +2025,19 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
* @info: gather stats info structure
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
- struct irdma_stats_gather_info *info, u64 scratch)
+static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_gather_info *info,
+ u64 scratch)
{
__le64 *wqe;
u64 temp;
if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
- return IRDMA_ERR_BUF_TOO_SHORT;
+ return -ENOMEM;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
@@ -2070,17 +2072,16 @@ irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
* @alloc: alloc vs. delete flag
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
- struct irdma_stats_inst_info *info, bool alloc,
- u64 scratch)
+static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_inst_info *info,
+ bool alloc, u64 scratch)
{
__le64 *wqe;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
@@ -2108,9 +2109,8 @@ irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
* @info: User priority map info
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
- struct irdma_up_info *info,
- u64 scratch)
+static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
+ struct irdma_up_info *info, u64 scratch)
{
__le64 *wqe;
u64 temp = 0;
@@ -2118,7 +2118,7 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
temp |= (u64)info->map[i] << (i * 8);
@@ -2151,17 +2151,16 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
* @node_op: 0 for add 1 for modify, 2 for delete
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
- struct irdma_ws_node_info *info,
- enum irdma_ws_node_op node_op, u64 scratch)
+static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
+ struct irdma_ws_node_info *info,
+ enum irdma_ws_node_op node_op, u64 scratch)
{
__le64 *wqe;
u64 temp = 0;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 32,
FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
@@ -2194,9 +2193,9 @@ irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
- struct irdma_qp_flush_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq)
{
u64 temp = 0;
__le64 *wqe;
@@ -2215,13 +2214,13 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
ibdev_dbg(to_ibdev(qp->dev),
"CQP: Additional flush request ignored for qp %x\n",
qp->qp_uk.qp_id);
- return IRDMA_ERR_FLUSHED_Q;
+ return -EALREADY;
}
cqp = qp->pd->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
if (info->userflushcode) {
if (flush_rq)
@@ -2268,9 +2267,9 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
- struct irdma_gen_ae_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, u64 scratch,
+ bool post_sq)
{
u64 temp;
__le64 *wqe;
@@ -2280,7 +2279,7 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
cqp = qp->pd->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
info->ae_src);
@@ -2308,10 +2307,9 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
- struct irdma_upload_context_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
+ struct irdma_upload_context_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -2320,7 +2318,7 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->buf_pa);
@@ -2349,21 +2347,20 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
- struct irdma_cqp_manage_push_page_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_manage_push_page_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
if (info->free_page &&
info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
- return IRDMA_ERR_INVALID_PUSH_PAGE_INDEX;
+ return -EINVAL;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->qs_handle);
hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
@@ -2389,16 +2386,15 @@ irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
* @qp: sc qp struct
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
- struct irdma_sc_qp *qp,
- u64 scratch)
+static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
{
u64 hdr;
__le64 *wqe;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
@@ -2420,16 +2416,15 @@ static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
* @qp: sc qp struct
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_resume_qp(struct irdma_sc_cqp *cqp,
- struct irdma_sc_qp *qp,
- u64 scratch)
+static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
{
u64 hdr;
__le64 *wqe;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
@@ -2462,14 +2457,13 @@ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
* @cq: cq struct
* @info: cq initialization info
*/
-enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
- struct irdma_cq_init_info *info)
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
{
u32 pble_obj_cnt;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cq->cq_pa = info->cq_base_pa;
cq->dev = info->dev;
@@ -2500,23 +2494,21 @@ enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
* @check_overflow: flag for overflow check
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
- u64 scratch,
- bool check_overflow,
- bool post_sq)
+static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
+ bool check_overflow, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
struct irdma_sc_ceq *ceq;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
cqp = cq->dev->cqp;
- if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
- return IRDMA_ERR_INVALID_CQ_ID;
+ if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
+ return -EINVAL;
- if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
- return IRDMA_ERR_INVALID_CEQ_ID;
+ if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
@@ -2529,7 +2521,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
if (!wqe) {
if (ceq && ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, cq);
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
}
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
@@ -2575,8 +2567,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
- bool post_sq)
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -2586,7 +2577,7 @@ enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
@@ -2642,9 +2633,9 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag to post to sq
*/
-static enum irdma_status_code
-irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
+ struct irdma_modify_cq_info *info, u64 scratch,
+ bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -2654,12 +2645,12 @@ irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->cq_resize && info->virtual_map &&
info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, info->cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
@@ -2733,8 +2724,8 @@ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
* @tail: wqtail register value
* @count: how many times to try for completion
*/
-static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
- u32 tail, u32 count)
+static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
+ u32 count)
{
u32 i = 0;
u32 newtail, error, val;
@@ -2746,7 +2737,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
ibdev_dbg(to_ibdev(cqp->dev),
"CQP: CQPERRCODES error_code[x%08X]\n",
error);
- return IRDMA_ERR_CQP_COMPL_ERROR;
+ return -EIO;
}
if (newtail != tail) {
/* SUCCESS */
@@ -2757,7 +2748,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
udelay(cqp->dev->hw_attrs.max_sleep_count);
}
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
}
/**
@@ -2912,10 +2903,9 @@ static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
* parses fpm query buffer and copy max_cnt and
* size value of hmc objects in hmc_info
*/
-static enum irdma_status_code
-irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
- struct irdma_hmc_info *hmc_info,
- struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
{
struct irdma_hmc_obj_info *obj_info;
u64 temp;
@@ -2954,7 +2944,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[IRDMA_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
if (!hmc_fpm_misc->xf_block_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
get_64bit_val(buf, 80, &temp);
@@ -2963,7 +2953,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
if (!hmc_fpm_misc->q1_block_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
@@ -2987,7 +2977,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
if (!hmc_fpm_misc->rrf_block_size &&
obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
@@ -2999,7 +2989,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
if (!hmc_fpm_misc->ooiscf_block_size &&
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -3027,8 +3017,7 @@ static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
* @ceq: ceq sc structure
* @cq: cq sc structure
*/
-enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
- struct irdma_sc_cq *cq)
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
{
unsigned long flags;
@@ -3036,7 +3025,7 @@ enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
if (ceq->reg_cq_size == ceq->elem_cnt) {
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
- return IRDMA_ERR_REG_CQ_FULL;
+ return -ENOMEM;
}
ceq->reg_cq[ceq->reg_cq_size++] = cq;
@@ -3077,15 +3066,15 @@ exit:
*
* Initializes the object and context buffers for a control Queue Pair.
*/
-enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
- struct irdma_cqp_init_info *info)
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info)
{
u8 hw_sq_size;
if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
((info->sq_size & (info->sq_size - 1))))
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
IRDMA_QUEUE_TYPE_CQP);
@@ -3135,13 +3124,12 @@ enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
* @maj_err: If error, major err number
* @min_err: If error, minor err number
*/
-enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
- u16 *min_err)
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
{
u64 temp;
u8 hw_rev;
u32 cnt = 0, p1, p2, val = 0, err_code;
- enum irdma_status_code ret_code;
+ int ret_code;
hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
@@ -3150,7 +3138,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
cqp->sdbuf.size, &cqp->sdbuf.pa,
GFP_KERNEL);
if (!cqp->sdbuf.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
spin_lock_init(&cqp->dev->cqp_lock);
@@ -3205,7 +3193,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
do {
if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
- ret_code = IRDMA_ERR_TIMEOUT;
+ ret_code = -ETIMEDOUT;
goto err;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
@@ -3213,7 +3201,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
} while (!val);
if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
- ret_code = IRDMA_ERR_DEVICE_NOT_SUPPORTED;
+ ret_code = -EOPNOTSUPP;
goto err;
}
@@ -3254,7 +3242,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
u32 *wqe_idx)
{
__le64 *wqe = NULL;
- enum irdma_status_code ret_code;
+ int ret_code;
if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
ibdev_dbg(to_ibdev(cqp->dev),
@@ -3281,16 +3269,16 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
* irdma_sc_cqp_destroy - destroy cqp during close
* @cqp: struct for cqp hw
*/
-enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
{
u32 cnt = 0, val;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
do {
if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
- ret_code = IRDMA_ERR_TIMEOUT;
+ ret_code = -ETIMEDOUT;
break;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
@@ -3335,8 +3323,8 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
* @ccq: ccq sc struct
* @info: completion q entry to return
*/
-enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
- struct irdma_ccq_cqe_info *info)
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info)
{
u64 qp_ctx, temp, temp1;
__le64 *cqe;
@@ -3344,7 +3332,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
u32 wqe_idx;
u32 error;
u8 polarity;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
if (ccq->cq_uk.avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
@@ -3354,7 +3342,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
get_64bit_val(cqe, 24, &temp);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
if (polarity != ccq->cq_uk.polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
get_64bit_val(cqe, 8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
@@ -3401,25 +3389,25 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
* @op_code: cqp opcode for completion
* @compl_info: completion q entry to return
*/
-enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
- struct irdma_ccq_cqe_info *compl_info)
+int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
+ struct irdma_ccq_cqe_info *compl_info)
{
struct irdma_ccq_cqe_info info = {};
struct irdma_sc_cq *ccq;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 cnt = 0;
ccq = cqp->dev->ccq;
while (1) {
if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
udelay(cqp->dev->hw_attrs.max_sleep_count);
continue;
}
if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
- ret_code = IRDMA_ERR_CQP_COMPL_ERROR;
+ ret_code = -EIO;
break;
}
/* make sure op code matches*/
@@ -3443,17 +3431,16 @@ enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u
* @info: info for the manage function table operation
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
- struct irdma_hmc_fcn_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
+ struct irdma_hmc_fcn_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, 0);
set_64bit_val(wqe, 8, 0);
@@ -3486,8 +3473,7 @@ irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
* for fpm commit
* @cqp: struct for cqp hw
*/
-static enum irdma_status_code
-irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
+static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
{
return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
NULL);
@@ -3502,19 +3488,19 @@ irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
* @post_sq: flag for cqp db to ring
* @wait_type: poll ccq or cqp registers for cqp completion
*/
-static enum irdma_status_code
-irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
- struct irdma_dma_mem *commit_fpm_mem, bool post_sq,
- u8 wait_type)
+static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *commit_fpm_mem,
+ bool post_sq, u8 wait_type)
{
__le64 *wqe;
u64 hdr;
u32 tail, val, error;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, hmc_fn_id);
set_64bit_val(wqe, 32, commit_fpm_mem->pa);
@@ -3548,8 +3534,7 @@ irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
* query fpm
* @cqp: struct for cqp hw
*/
-static enum irdma_status_code
-irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
+static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
{
return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
NULL);
@@ -3564,19 +3549,19 @@ irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
* @post_sq: flag for cqp db to ring
* @wait_type: poll ccq or cqp registers for cqp completion
*/
-static enum irdma_status_code
-irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
- struct irdma_dma_mem *query_fpm_mem, bool post_sq,
- u8 wait_type)
+static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *query_fpm_mem,
+ bool post_sq, u8 wait_type)
{
__le64 *wqe;
u64 hdr;
u32 tail, val, error;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, hmc_fn_id);
set_64bit_val(wqe, 32, query_fpm_mem->pa);
@@ -3608,21 +3593,21 @@ irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
* @ceq: ceq sc structure
* @info: ceq initialization info
*/
-enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
- struct irdma_ceq_init_info *info)
+int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info)
{
u32 pble_obj_cnt;
if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
- if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
- return IRDMA_ERR_INVALID_CEQ_ID;
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
ceq->size = sizeof(*ceq);
ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
@@ -3655,8 +3640,8 @@ enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
- bool post_sq)
+static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -3665,7 +3650,7 @@ static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64
cqp = ceq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, ceq->elem_cnt);
set_64bit_val(wqe, 32,
(ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
@@ -3697,8 +3682,7 @@ static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64
* irdma_sc_cceq_create_done - poll for control ceq wqe to complete
* @ceq: ceq sc structure
*/
-static enum irdma_status_code
-irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
+static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
{
struct irdma_sc_cqp *cqp;
@@ -3711,7 +3695,7 @@ irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
* irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
* @ceq: ceq sc structure
*/
-enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
+int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
{
struct irdma_sc_cqp *cqp;
@@ -3730,9 +3714,9 @@ enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
* @ceq: ceq sc structure
* @scratch: u64 saved to be used during cqp completion
*/
-enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
{
- enum irdma_status_code ret_code;
+ int ret_code;
struct irdma_sc_dev *dev = ceq->dev;
dev->ccq->vsi = ceq->vsi;
@@ -3755,8 +3739,7 @@ enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratc
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
- bool post_sq)
+int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -3765,7 +3748,7 @@ enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratc
cqp = ceq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, ceq->elem_cnt);
set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
@@ -3884,19 +3867,19 @@ void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
* @aeq: aeq structure ptr
* @info: aeq initialization info
*/
-enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
- struct irdma_aeq_init_info *info)
+int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info)
{
u32 pble_obj_cnt;
if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
aeq->size = sizeof(*aeq);
aeq->polarity = 1;
@@ -3921,8 +3904,8 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
- u64 scratch, bool post_sq)
+static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -3931,7 +3914,7 @@ static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
cqp = aeq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, aeq->elem_cnt);
set_64bit_val(wqe, 32,
(aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
@@ -3960,8 +3943,8 @@ static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
- u64 scratch, bool post_sq)
+static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -3974,7 +3957,7 @@ static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, aeq->elem_cnt);
set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
@@ -3997,8 +3980,8 @@ static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
* @aeq: aeq structure ptr
* @info: aeqe info to be returned
*/
-enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
- struct irdma_aeqe_info *info)
+int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info)
{
u64 temp, compl_ctx;
__le64 *aeqe;
@@ -4012,7 +3995,7 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
if (aeq->polarity != polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);
@@ -4157,22 +4140,21 @@ void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
* @cq: sc's cq ctruct
* @info: info for control cq initialization
*/
-enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq,
- struct irdma_ccq_init_info *info)
+int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
{
u32 pble_obj_cnt;
if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
- if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
- return IRDMA_ERR_INVALID_CEQ_ID;
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cq->cq_pa = info->cq_pa;
cq->cq_uk.cq_base = info->cq_base;
@@ -4209,7 +4191,7 @@ enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq,
* irdma_sc_ccq_create_done - poll cqp for ccq create
* @ccq: ccq sc struct
*/
-static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
+static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
{
struct irdma_sc_cqp *cqp;
@@ -4225,10 +4207,10 @@ static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq
* @check_overflow: overlow flag for ccq
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
- bool check_overflow, bool post_sq)
+int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq)
{
- enum irdma_status_code ret_code;
+ int ret_code;
ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
if (ret_code)
@@ -4250,19 +4232,18 @@ enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
- bool post_sq)
+int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
u64 hdr;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 tail, val, error;
cqp = ccq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
@@ -4301,13 +4282,12 @@ enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch
* @dev : ptr to irdma_dev struct
* @hmc_fn_id: hmc function id
*/
-enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
- u8 hmc_fn_id)
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
{
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
struct irdma_dma_mem query_fpm_mem;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u8 wait_type;
hmc_info = dev->hmc_info;
@@ -4338,14 +4318,13 @@ enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
* @dev : ptr to irdma_dev struct
* @hmc_fn_id: hmc function id
*/
-static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
- u8 hmc_fn_id)
+static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
{
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_obj_info *obj_info;
__le64 *buf;
struct irdma_dma_mem commit_fpm_mem;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u8 wait_type;
hmc_info = dev->hmc_info;
@@ -4408,9 +4387,8 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
* @info: sd info for wqe
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
- u64 scratch)
+static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
+ struct irdma_update_sds_info *info, u64 scratch)
{
u64 data;
u64 hdr;
@@ -4422,7 +4400,7 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
mem_entries = info->cnt - wqe_entries;
@@ -4488,12 +4466,11 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
* @info: sd info for sd's
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_update_pe_sds(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info, u64 scratch)
+static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info, u64 scratch)
{
struct irdma_sc_cqp *cqp = dev->cqp;
- enum irdma_status_code ret_code;
+ int ret_code;
ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
if (!ret_code)
@@ -4507,13 +4484,12 @@ irdma_update_pe_sds(struct irdma_sc_dev *dev,
* @dev: sc device struct
* @info: sd info for sd's
*/
-enum irdma_status_code
-irdma_update_sds_noccq(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info)
+int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info)
{
u32 error, val, tail;
struct irdma_sc_cqp *cqp = dev->cqp;
- enum irdma_status_code ret_code;
+ int ret_code;
ret_code = cqp_sds_wqe_fill(cqp, info, 0);
if (ret_code)
@@ -4534,10 +4510,9 @@ irdma_update_sds_noccq(struct irdma_sc_dev *dev,
* @post_sq: flag for cqp db to ring
* @poll_registers: flag to poll register for cqp completion
*/
-enum irdma_status_code
-irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id, bool post_sq,
- bool poll_registers)
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers)
{
u64 hdr;
__le64 *wqe;
@@ -4545,7 +4520,7 @@ irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
@@ -4620,8 +4595,7 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
* irdma_sc_query_rdma_features_done - poll cqp for query features done
* @cqp: struct for cqp hw
*/
-static enum irdma_status_code
-irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
+static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
{
return irdma_sc_poll_for_cqp_op_done(cqp,
IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
@@ -4634,16 +4608,15 @@ irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
* @buf: buffer to hold query info
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
- struct irdma_dma_mem *buf, u64 scratch)
+static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
+ struct irdma_dma_mem *buf, u64 scratch)
{
__le64 *wqe;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
temp = buf->pa;
set_64bit_val(wqe, 32, temp);
@@ -4667,9 +4640,9 @@ irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
* irdma_get_rdma_features - get RDMA features
* @dev: sc device struct
*/
-enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
+int irdma_get_rdma_features(struct irdma_sc_dev *dev)
{
- enum irdma_status_code ret_code;
+ int ret_code;
struct irdma_dma_mem feat_buf;
u64 temp;
u16 byte_idx, feat_type, feat_cnt, feat_idx;
@@ -4679,7 +4652,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
&feat_buf.pa, GFP_KERNEL);
if (!feat_buf.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
if (!ret_code)
@@ -4690,7 +4663,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
get_64bit_val(feat_buf.va, 0, &temp);
feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
if (feat_cnt < 2) {
- ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
+ ret_code = -EINVAL;
goto exit;
} else if (feat_cnt > IRDMA_MAX_FEATURES) {
ibdev_dbg(to_ibdev(dev),
@@ -4704,7 +4677,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
feat_buf.size, &feat_buf.pa,
GFP_KERNEL);
if (!feat_buf.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
if (!ret_code)
@@ -4715,7 +4688,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
get_64bit_val(feat_buf.va, 0, &temp);
feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
if (feat_cnt < 2) {
- ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
+ ret_code = -EINVAL;
goto exit;
}
}
@@ -4794,7 +4767,7 @@ static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
* @dev: sc device struct
* @qp_count: desired qp count
*/
-enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
+int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
{
struct irdma_virt_mem virt_mem;
u32 i, mem_size;
@@ -4805,7 +4778,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
u32 loop_count = 0;
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
hmc_info = dev->hmc_info;
hmc_fpm_misc = &dev->hmc_fpm_misc;
@@ -4932,7 +4905,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
ibdev_dbg(to_ibdev(dev),
"HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
- return IRDMA_ERR_CFG;
+ return -EINVAL;
}
if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
@@ -4968,7 +4941,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (!virt_mem.va) {
ibdev_dbg(to_ibdev(dev),
"HMC: failed to allocate memory for sd_entry buffer\n");
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
hmc_info->sd_table.sd_entry = virt_mem.va;
@@ -4980,10 +4953,10 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
* @dev: rdma device
* @pcmdinfo: cqp command info
*/
-static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
- struct cqp_cmds_info *pcmdinfo)
+static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
{
- enum irdma_status_code status;
+ int status;
struct irdma_dma_mem val_mem;
bool alloc = false;
@@ -5245,7 +5218,7 @@ static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
pcmdinfo->in.u.mc_modify.scratch);
break;
default:
- status = IRDMA_NOT_SUPPORTED;
+ status = -EOPNOTSUPP;
break;
}
@@ -5257,10 +5230,10 @@ static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
* @dev: sc device struct
* @pcmdinfo: cqp command info
*/
-enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
- struct cqp_cmds_info *pcmdinfo)
+int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
{
- enum irdma_status_code status = 0;
+ int status = 0;
unsigned long flags;
spin_lock_irqsave(&dev->cqp_lock, flags);
@@ -5276,9 +5249,9 @@ enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
* irdma_process_bh - called from tasklet for cqp list
* @dev: sc device struct
*/
-enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev)
+int irdma_process_bh(struct irdma_sc_dev *dev)
{
- enum irdma_status_code status = 0;
+ int status = 0;
struct cqp_cmds_info *pcmdinfo;
unsigned long flags;
@@ -5366,12 +5339,11 @@ static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
* @dev: Device pointer
* @info: Device init info
*/
-enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
- struct irdma_sc_dev *dev,
- struct irdma_device_init_info *info)
+int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info)
{
u32 val;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u8 db_size;
INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
@@ -5415,7 +5387,7 @@ enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
irdma_sc_init_hw(dev);
if (irdma_wait_pe_ready(dev))
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
@@ -5423,7 +5395,7 @@ enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
ibdev_dbg(to_ibdev(dev),
"DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
val, db_size);
- return IRDMA_ERR_PE_DOORBELL_NOT_ENA;
+ return -ENODEV;
}
dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index cc3d9a365b35..e03e03082a5f 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -964,7 +964,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + 1) % size; \
(_retcode) = 0; \
} else { \
- (_retcode) = IRDMA_ERR_RING_FULL; \
+ (_retcode) = -ENOMEM; \
} \
}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
@@ -975,7 +975,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + (_count)) % size; \
(_retcode) = 0; \
} else { \
- (_retcode) = IRDMA_ERR_RING_FULL; \
+ (_retcode) = -ENOMEM; \
} \
}
#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
@@ -986,7 +986,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + 1) % size; \
(_retcode) = 0; \
} else { \
- (_retcode) = IRDMA_ERR_RING_FULL; \
+ (_retcode) = -ENOMEM; \
} \
}
#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
@@ -997,7 +997,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + (_count)) % size; \
(_retcode) = 0; \
} else { \
- (_retcode) = IRDMA_ERR_RING_FULL; \
+ (_retcode) = -ENOMEM; \
} \
}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c
index ecffcb93c05a..49307ce8c4da 100644
--- a/drivers/infiniband/hw/irdma/hmc.c
+++ b/drivers/infiniband/hw/irdma/hmc.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -121,10 +120,8 @@ static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_i
* @type: paged or direct sd
* @setsd: flag to set or clear sd
*/
-enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
- u64 pa, u32 sd_idx,
- enum irdma_sd_entry_type type,
- bool setsd)
+int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type, bool setsd)
{
struct irdma_update_sds_info sdinfo;
@@ -145,16 +142,15 @@ enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
* @sd_cnt: number of sd entries
* @setsd: flag to set or clear sd
*/
-static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
- struct irdma_hmc_info *hmc_info,
- u32 sd_index, u32 sd_cnt,
- bool setsd)
+static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ u32 sd_cnt, bool setsd)
{
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_update_sds_info sdinfo = {};
u64 pa;
u32 i;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
for (i = sd_index; i < sd_index + sd_cnt; i++) {
@@ -196,16 +192,15 @@ static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
* @dev: pointer to the device structure
* @info: create obj info
*/
-static enum irdma_status_code
-irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
- struct irdma_hmc_create_obj_info *info)
+static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
{
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
- return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
+ return -EINVAL;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt)
- return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
+ return -EINVAL;
if (!info->add_sd_cnt)
return 0;
@@ -222,9 +217,8 @@ irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
*/
-enum irdma_status_code
-irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
- struct irdma_hmc_create_obj_info *info)
+int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
{
struct irdma_hmc_sd_entry *sd_entry;
u32 sd_idx, sd_lmt;
@@ -232,10 +226,10 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 i, j;
bool pd_error = false;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
- return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
+ return -EINVAL;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
@@ -243,7 +237,7 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
"HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
info->rsrc_type, info->start_idx, info->count,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
- return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
+ return -EINVAL;
}
irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
@@ -251,7 +245,7 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
&sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
- return IRDMA_ERR_INVALID_SD_INDEX;
+ return -EINVAL;
}
irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
@@ -312,7 +306,7 @@ exit_sd_error:
irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
break;
default:
- ret_code = IRDMA_ERR_INVALID_SD_TYPE;
+ ret_code = -EINVAL;
break;
}
j--;
@@ -327,12 +321,12 @@ exit_sd_error:
* @info: dele obj info
* @reset: true if called before reset
*/
-static enum irdma_status_code
-irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
- struct irdma_hmc_del_obj_info *info, bool reset)
+static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info,
+ bool reset)
{
struct irdma_hmc_sd_entry *sd_entry;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 i, sd_idx;
struct irdma_dma_mem *mem;
@@ -373,22 +367,21 @@ irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
*/
-enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
- struct irdma_hmc_del_obj_info *info,
- bool reset)
+int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info, bool reset)
{
struct irdma_hmc_pd_table *pd_table;
u32 sd_idx, sd_lmt;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 i, j;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
ibdev_dbg(to_ibdev(dev),
"HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
info->start_idx, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
- return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
+ return -EINVAL;
}
if ((info->start_idx + info->count) >
@@ -397,7 +390,7 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
"HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
info->start_idx, info->count, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
- return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
+ return -EINVAL;
}
irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
@@ -433,7 +426,7 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
- return IRDMA_ERR_INVALID_SD_INDEX;
+ return -EINVAL;
}
for (i = sd_idx; i < sd_lmt; i++) {
@@ -477,11 +470,9 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
*/
-enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
- struct irdma_hmc_info *hmc_info,
- u32 sd_index,
- enum irdma_sd_entry_type type,
- u64 direct_mode_sz)
+int irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ enum irdma_sd_entry_type type, u64 direct_mode_sz)
{
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_dma_mem dma_mem;
@@ -499,7 +490,7 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
&dma_mem.pa, GFP_KERNEL);
if (!dma_mem.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
if (type == IRDMA_SD_TYPE_PAGED) {
struct irdma_virt_mem *vmem =
&sd_entry->u.pd_table.pd_entry_virt_mem;
@@ -510,7 +501,7 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
dma_free_coherent(hw->device, dma_mem.size,
dma_mem.va, dma_mem.pa);
dma_mem.va = NULL;
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
sd_entry->u.pd_table.pd_entry = vmem->va;
@@ -549,10 +540,9 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
*/
-enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
- struct irdma_hmc_info *hmc_info,
- u32 pd_index,
- struct irdma_dma_mem *rsrc_pg)
+int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg)
{
struct irdma_hmc_pd_table *pd_table;
struct irdma_hmc_pd_entry *pd_entry;
@@ -563,7 +553,7 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
u64 page_desc;
if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
- return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
+ return -EINVAL;
sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
@@ -584,7 +574,7 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
page->size, &page->pa,
GFP_KERNEL);
if (!page->va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
pd_entry->rsrc_pg = false;
}
@@ -621,9 +611,8 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
*/
-enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
- struct irdma_hmc_info *hmc_info,
- u32 idx)
+int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 idx)
{
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_hmc_pd_table *pd_table;
@@ -635,11 +624,11 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
if (sd_idx >= hmc_info->sd_table.sd_cnt)
- return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
+ return -EINVAL;
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
- return IRDMA_ERR_INVALID_SD_TYPE;
+ return -EINVAL;
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
@@ -656,7 +645,7 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
if (!mem || !mem->va)
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
dma_free_coherent(dev->hw->device, mem->size, mem->va,
mem->pa);
@@ -673,14 +662,13 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
*/
-enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
- u32 idx)
+int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
{
struct irdma_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (--sd_entry->u.bp.use_cnt)
- return IRDMA_ERR_NOT_READY;
+ return -EBUSY;
hmc_info->sd_table.use_cnt--;
sd_entry->valid = false;
@@ -693,15 +681,14 @@ enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
*/
-enum irdma_status_code
-irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
+int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
{
struct irdma_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (sd_entry->u.pd_table.use_cnt)
- return IRDMA_ERR_NOT_READY;
+ return -EBUSY;
sd_entry->valid = false;
hmc_info->sd_table.use_cnt--;
diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h
index e2139c788b1b..f5c5dacc7021 100644
--- a/drivers/infiniband/hw/irdma/hmc.h
+++ b/drivers/infiniband/hw/irdma/hmc.h
@@ -141,40 +141,29 @@ struct irdma_hmc_del_obj_info {
bool privileged;
};
-enum irdma_status_code irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
- struct irdma_dma_mem *src_mem,
- u64 src_offset, u64 size);
-enum irdma_status_code
-irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
- struct irdma_hmc_create_obj_info *info);
-enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
- struct irdma_hmc_del_obj_info *info,
- bool reset);
-enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
- u64 pa, u32 sd_idx,
- enum irdma_sd_entry_type type,
- bool setsd);
-enum irdma_status_code
-irdma_update_sds_noccq(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info);
+int irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
+ struct irdma_dma_mem *src_mem, u64 src_offset, u64 size);
+int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info);
+int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info, bool reset);
+int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type,
+ bool setsd);
+int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
-enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
- struct irdma_hmc_info *hmc_info,
- u32 sd_index,
- enum irdma_sd_entry_type type,
- u64 direct_mode_sz);
-enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
- struct irdma_hmc_info *hmc_info,
- u32 pd_index,
- struct irdma_dma_mem *rsrc_pg);
-enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
- struct irdma_hmc_info *hmc_info,
- u32 idx);
-enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
- u32 idx);
-enum irdma_status_code
-irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
+int irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ enum irdma_sd_entry_type type, u64 direct_mode_sz);
+int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg);
+int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 idx);
+int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx);
+int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
#endif /* IRDMA_HMC_H */
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 89234d04cc65..3dc9b5801da1 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -75,12 +75,12 @@ static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
struct irdma_sc_cq *cq)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
- enum irdma_status_code status;
u32 compl_error;
+ int status;
do {
status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
- if (status == IRDMA_ERR_Q_EMPTY)
+ if (status == -ENOENT)
break;
if (status) {
ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
@@ -456,7 +456,7 @@ static void irdma_ceq_dpc(struct tasklet_struct *t)
* Allocate iwdev msix table and copy the msix info to the table
* Return 0 if successful, otherwise return error
*/
-static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf)
+static int irdma_save_msix_info(struct irdma_pci_f *rf)
{
struct irdma_qvlist_info *iw_qvlist;
struct irdma_qv_info *iw_qvinfo;
@@ -466,13 +466,13 @@ static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf)
size_t size;
if (!rf->msix_count)
- return IRDMA_ERR_NO_INTR;
+ return -EINVAL;
size = sizeof(struct irdma_msix_vector) * rf->msix_count;
size += struct_size(iw_qvlist, qv_info, rf->msix_count);
rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
if (!rf->iw_msixtbl)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
rf->iw_qvlist = (struct irdma_qvlist_info *)
(&rf->iw_msixtbl[rf->msix_count]);
@@ -564,9 +564,9 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
*/
static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
{
- enum irdma_status_code status = 0;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cqp *cqp = &rf->cqp;
+ int status = 0;
if (rf->cqp_cmpl_wq)
destroy_workqueue(rf->cqp_cmpl_wq);
@@ -606,9 +606,9 @@ static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
*/
static void irdma_destroy_aeq(struct irdma_pci_f *rf)
{
- enum irdma_status_code status = IRDMA_ERR_NOT_READY;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_aeq *aeq = &rf->aeq;
+ int status = -EBUSY;
if (!rf->msix_shared) {
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
@@ -642,8 +642,8 @@ exit:
*/
static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
{
- enum irdma_status_code status;
struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
if (rf->reset)
goto exit;
@@ -733,7 +733,7 @@ static void irdma_destroy_ccq(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_ccq *ccq = &rf->ccq;
- enum irdma_status_code status = 0;
+ int status = 0;
if (!rf->reset)
status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
@@ -796,9 +796,8 @@ static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
* @dev: hardware control device structure
* @info: information for the hmc object to create
*/
-static enum irdma_status_code
-irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
- struct irdma_hmc_create_obj_info *info)
+static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
{
return irdma_sc_create_hmc_obj(dev, info);
}
@@ -812,13 +811,12 @@ irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
* Create the device hmc objects and allocate hmc pages
* Return 0 if successful, otherwise clean up and return error
*/
-static enum irdma_status_code
-irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers)
+static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
+ enum irdma_vers vers)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_hmc_create_obj_info info = {};
- enum irdma_status_code status = 0;
- int i;
+ int i, status = 0;
info.hmc_info = dev->hmc_info;
info.privileged = privileged;
@@ -868,9 +866,9 @@ irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers v
* update the memptr to point to the new aligned memory
* Return 0 if successful, otherwise return no memory error
*/
-static enum irdma_status_code
-irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr,
- u32 size, u32 mask)
+static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
+ struct irdma_dma_mem *memptr, u32 size,
+ u32 mask)
{
unsigned long va, newva;
unsigned long extra;
@@ -884,7 +882,7 @@ irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr,
memptr->pa = rf->obj_next.pa + extra;
memptr->size = size;
if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
rf->obj_next.va = (u8 *)memptr->va + size;
rf->obj_next.pa = memptr->pa + size;
@@ -899,25 +897,24 @@ irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr,
* Return 0, if the cqp and all the resources associated with it
* are successfully created, otherwise return error
*/
-static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf)
+static int irdma_create_cqp(struct irdma_pci_f *rf)
{
- enum irdma_status_code status;
u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
struct irdma_dma_mem mem;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cqp_init_info cqp_init_info = {};
struct irdma_cqp *cqp = &rf->cqp;
u16 maj_err, min_err;
- int i;
+ int i, status;
cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
if (!cqp->cqp_requests)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
if (!cqp->scratch_array) {
kfree(cqp->cqp_requests);
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
dev->cqp = &cqp->sc_cqp;
@@ -929,7 +926,7 @@ static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf)
if (!cqp->sq.va) {
kfree(cqp->scratch_array);
kfree(cqp->cqp_requests);
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
@@ -999,12 +996,12 @@ exit:
* Return 0, if the ccq and the resources associated with it
* are successfully created, otherwise return error
*/
-static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf)
+static int irdma_create_ccq(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
- enum irdma_status_code status;
struct irdma_ccq_init_info info = {};
struct irdma_ccq *ccq = &rf->ccq;
+ int status;
dev->ccq = &ccq->sc_cq;
dev->ccq->dev = dev;
@@ -1015,7 +1012,7 @@ static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf)
ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
&ccq->mem_cq.pa, GFP_KERNEL);
if (!ccq->mem_cq.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
ccq->shadow_area.size,
@@ -1054,9 +1051,9 @@ exit:
* Allocate a mac ip entry and add it to the hw table Return 0
* if successful, otherwise return error
*/
-static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev)
+static int irdma_alloc_set_mac(struct irdma_device *iwdev)
{
- enum irdma_status_code status;
+ int status;
status = irdma_alloc_local_mac_entry(iwdev->rf,
&iwdev->mac_ip_table_idx);
@@ -1082,9 +1079,8 @@ static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev)
* Allocate interrupt resources and enable irq handling
* Return 0 if successful, otherwise return error
*/
-static enum irdma_status_code
-irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
- u32 ceq_id, struct irdma_msix_vector *msix_vec)
+static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ u32 ceq_id, struct irdma_msix_vector *msix_vec)
{
int status;
@@ -1103,7 +1099,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
- return IRDMA_ERR_CFG;
+ return status;
}
msix_vec->ceq_id = ceq_id;
@@ -1119,7 +1115,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
* Allocate interrupt resources and enable irq handling
* Return 0 if successful, otherwise return error
*/
-static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
+static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
{
struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
u32 ret = 0;
@@ -1131,7 +1127,7 @@ static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
}
if (ret) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
- return IRDMA_ERR_CFG;
+ return -EINVAL;
}
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
@@ -1149,12 +1145,10 @@ static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
* Return 0, if the ceq and the resources associated with it
* are successfully created, otherwise return error
*/
-static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,
- struct irdma_ceq *iwceq,
- u32 ceq_id,
- struct irdma_sc_vsi *vsi)
+static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ u32 ceq_id, struct irdma_sc_vsi *vsi)
{
- enum irdma_status_code status;
+ int status;
struct irdma_ceq_init_info info = {};
struct irdma_sc_dev *dev = &rf->sc_dev;
u64 scratch;
@@ -1169,7 +1163,7 @@ static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,
iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
&iwceq->mem.pa, GFP_KERNEL);
if (!iwceq->mem.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
info.ceq_id = ceq_id;
info.ceqe_base = iwceq->mem.va;
@@ -1205,18 +1199,18 @@ static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,
* Create the ceq 0 and configure it's msix interrupt vector
* Return 0, if successfully set up, otherwise return error
*/
-static enum irdma_status_code irdma_setup_ceq_0(struct irdma_pci_f *rf)
+static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
{
struct irdma_ceq *iwceq;
struct irdma_msix_vector *msix_vec;
u32 i;
- enum irdma_status_code status = 0;
+ int status = 0;
u32 num_ceqs;
num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
if (!rf->ceqlist) {
- status = IRDMA_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto exit;
}
@@ -1262,14 +1256,13 @@ exit:
* Create the ceq's and configure their msix interrupt vectors
* Return 0, if ceqs are successfully set up, otherwise return error
*/
-static enum irdma_status_code irdma_setup_ceqs(struct irdma_pci_f *rf,
- struct irdma_sc_vsi *vsi)
+static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
{
u32 i;
u32 ceq_id;
struct irdma_ceq *iwceq;
struct irdma_msix_vector *msix_vec;
- enum irdma_status_code status;
+ int status;
u32 num_ceqs;
num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
@@ -1303,22 +1296,21 @@ del_ceqs:
return status;
}
-static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf,
- u32 size)
+static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
{
- enum irdma_status_code status = IRDMA_ERR_NO_MEMORY;
struct irdma_aeq *aeq = &rf->aeq;
dma_addr_t *pg_arr;
u32 pg_cnt;
+ int status;
if (rf->rdma_ver < IRDMA_GEN_2)
- return IRDMA_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
aeq->mem.va = vzalloc(aeq->mem.size);
if (!aeq->mem.va)
- return status;
+ return -ENOMEM;
pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
@@ -1345,15 +1337,15 @@ static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf,
* Return 0, if the aeq and the resources associated with it
* are successfully created, otherwise return error
*/
-static enum irdma_status_code irdma_create_aeq(struct irdma_pci_f *rf)
+static int irdma_create_aeq(struct irdma_pci_f *rf)
{
- enum irdma_status_code status;
struct irdma_aeq_init_info info = {};
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_aeq *aeq = &rf->aeq;
struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
u32 aeq_size;
u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
+ int status;
aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
@@ -1412,10 +1404,10 @@ err:
* Create the aeq and configure its msix interrupt vector
* Return 0 if successful, otherwise return error
*/
-static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf)
+static int irdma_setup_aeq(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
- enum irdma_status_code status;
+ int status;
status = irdma_create_aeq(rf);
if (status)
@@ -1439,10 +1431,10 @@ static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf)
*
* Return 0 if successful, otherwise return error
*/
-static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev)
+static int irdma_initialize_ilq(struct irdma_device *iwdev)
{
struct irdma_puda_rsrc_info info = {};
- enum irdma_status_code status;
+ int status;
info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
info.cq_id = 1;
@@ -1469,10 +1461,10 @@ static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev)
*
* Return 0 if successful, otherwise return error
*/
-static enum irdma_status_code irdma_initialize_ieq(struct irdma_device *iwdev)
+static int irdma_initialize_ieq(struct irdma_device *iwdev)
{
struct irdma_puda_rsrc_info info = {};
- enum irdma_status_code status;
+ int status;
info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
info.cq_id = 2;
@@ -1515,9 +1507,9 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
* the hmc objects and create the objects
* Return 0 if successful, otherwise return error
*/
-static enum irdma_status_code irdma_hmc_setup(struct irdma_pci_f *rf)
+static int irdma_hmc_setup(struct irdma_pci_f *rf)
{
- enum irdma_status_code status;
+ int status;
u32 qpcnt;
if (rf->rdma_ver == IRDMA_GEN_1)
@@ -1570,9 +1562,9 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
* Return 0 if successful, otherwise clean up the resources
* and return error
*/
-static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf)
+static int irdma_initialize_dev(struct irdma_pci_f *rf)
{
- enum irdma_status_code status;
+ int status;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_device_init_info info = {};
struct irdma_dma_mem mem;
@@ -1584,7 +1576,7 @@ static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf)
rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
if (!rf->hmc_info_mem)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
dev->hmc_info = &rf->hw.hmc;
@@ -1608,7 +1600,7 @@ static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf)
info.fpm_commit_buf = mem.va;
info.bar0 = rf->hw.hw_addr;
- info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn);
+ info.hmc_fn_id = rf->pf_id;
info.hw = &rf->hw;
status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
if (status)
@@ -1667,9 +1659,9 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
destroy_workqueue(iwdev->cleanup_wq);
}
-static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf)
+static int irdma_setup_init_state(struct irdma_pci_f *rf)
{
- enum irdma_status_code status;
+ int status;
status = irdma_save_msix_info(rf);
if (status)
@@ -1680,7 +1672,7 @@ static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf)
rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
&rf->obj_mem.pa, GFP_KERNEL);
if (!rf->obj_mem.va) {
- status = IRDMA_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto clean_msixtbl;
}
@@ -1763,14 +1755,14 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
* Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
* device resource objects.
*/
-enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
- struct irdma_l2params *l2params)
+int irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params)
{
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_sc_dev *dev = &rf->sc_dev;
- enum irdma_status_code status;
struct irdma_vsi_init_info vsi_info = {};
struct irdma_vsi_stats_info stats_info = {};
+ int status;
vsi_info.dev = dev;
vsi_info.back_vsi = iwdev;
@@ -1788,7 +1780,7 @@ enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
if (!stats_info.pestat) {
irdma_cleanup_cm_core(&iwdev->cm_core);
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
stats_info.fcn_id = dev->hmc_fn_id;
status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
@@ -1850,7 +1842,7 @@ enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
if (!iwdev->cleanup_wq)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
irdma_get_used_rsrc(iwdev);
init_waitqueue_head(&iwdev->suspend_wq);
@@ -1870,10 +1862,10 @@ enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
*
* Create admin queues, HMC obejcts and RF resource objects
*/
-enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
+int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
- enum irdma_status_code status;
+ int status;
do {
status = irdma_setup_init_state(rf);
if (status)
@@ -1915,7 +1907,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
WQ_HIGHPRI | WQ_UNBOUND);
if (!rf->cqp_cmpl_wq) {
- status = IRDMA_ERR_NO_MEMORY;
+ status = -ENOMEM;
break;
}
INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
@@ -2202,11 +2194,11 @@ int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 id
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_info->post_sq = 1;
@@ -2238,11 +2230,11 @@ int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status = 0;
+ int status = 0;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
@@ -2264,18 +2256,17 @@ int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
* @accel_local_port: port for apbvt
* @add_port: add ordelete port
*/
-static enum irdma_status_code
-irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, u16 accel_local_port,
- bool add_port)
+static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
+ u16 accel_local_port, bool add_port)
{
struct irdma_apbvt_info *info;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
@@ -2429,22 +2420,21 @@ static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
* @cmnode: cmnode associated with connection
* @wait: wait for completion
*/
-enum irdma_status_code
-irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
- enum irdma_quad_entry_type etype,
- enum irdma_quad_hash_manage_type mtype, void *cmnode,
- bool wait)
+int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait)
{
struct irdma_qhash_table_info *info;
- enum irdma_status_code status;
struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_cm_node *cm_node = cmnode;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_qhash_table_entry.info;
@@ -2558,12 +2548,10 @@ static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
* @info: info for flush
* @wait: flag wait for completion
*/
-enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
- struct irdma_sc_qp *qp,
- struct irdma_qp_flush_info *info,
- bool wait)
+int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, bool wait)
{
- enum irdma_status_code status;
+ int status;
struct irdma_qp_flush_info *hw_info;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -2571,7 +2559,7 @@ enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
if (!wait)
@@ -2619,7 +2607,7 @@ enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
info->sq = true;
new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!new_req) {
- status = IRDMA_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto put_cqp;
}
cqp_info = &new_req->info;
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index 64148ad8a604..e46fc110004d 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -3,7 +3,6 @@
#include "osdep.h"
#include "type.h"
#include "i40iw_hw.h"
-#include "status.h"
#include "protos.h"
static u32 i40iw_regs[IRDMA_MAX_REGS] = {
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index 43e962b97d6a..4053ead32416 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -77,6 +77,7 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info
rf->rdma_ver = IRDMA_GEN_1;
rf->gen_ops.request_reset = i40iw_request_reset;
rf->pcidev = cdev_info->pcidev;
+ rf->pf_id = cdev_info->fid;
rf->hw.hw_addr = cdev_info->hw_addr;
rf->cdev = cdev_info;
rf->msix_count = cdev_info->msix_count;
@@ -138,7 +139,7 @@ static int i40iw_open(struct i40e_info *cdev_info, struct i40e_client *client)
if (last_qset == IRDMA_NO_QSET)
last_qset = qset;
else if ((qset != last_qset) && (qset != IRDMA_NO_QSET))
- iwdev->dcb = true;
+ iwdev->dcb_vlan_mode = true;
}
if (irdma_rt_init_hw(iwdev, &l2params)) {
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 9fab29039f1c..514453777e07 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -79,6 +79,10 @@ static void irdma_fill_qos_info(struct irdma_l2params *l2params,
}
for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
l2params->up2tc[i] = qos_info->up2tc[i];
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
+ l2params->dscp_mode = true;
+ memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
+ }
}
static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
@@ -108,8 +112,9 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event
l2params.tc_changed = true;
ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
ice_get_qos_params(pf, &qos_info);
- iwdev->dcb = qos_info.num_tc > 1;
irdma_fill_qos_info(&l2params, &qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode;
irdma_change_l2params(&iwdev->vsi, &l2params);
} else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
@@ -157,8 +162,8 @@ static void irdma_request_reset(struct irdma_pci_f *rf)
* @vsi: vsi structure
* @tc_node: Traffic class node
*/
-static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node)
+static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
struct ice_pf *pf = iwdev->rf->cdev;
@@ -171,7 +176,7 @@ static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
ret = ice_add_rdma_qset(pf, &qset);
if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
- return IRDMA_ERR_REG_QSET;
+ return ret;
}
tc_node->l2_sched_node_id = qset.teid;
@@ -226,6 +231,7 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
rf->hw.hw_addr = pf->hw.hw_addr;
rf->pcidev = pf->pdev;
rf->msix_count = pf->num_rdma_msix;
+ rf->pf_id = pf->hw.pf_id;
rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
rf->default_vsi.vsi_idx = vsi->vsi_num;
rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
@@ -236,7 +242,7 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
rf->gen_ops.request_reset = irdma_request_reset;
rf->limits_sel = 7;
rf->iwdev = iwdev;
-
+ mutex_init(&iwdev->ah_tbl_lock);
iwdev->netdev = vsi->netdev;
iwdev->vsi_num = vsi->vsi_num;
iwdev->init_state = INITIAL_STATE;
@@ -275,18 +281,19 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf;
- if (irdma_ctrl_init_hw(rf)) {
- err = -EIO;
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
goto err_ctrl_init;
- }
l2params.mtu = iwdev->netdev->mtu;
ice_get_qos_params(pf, &qos_info);
irdma_fill_qos_info(&l2params, &qos_info);
- if (irdma_rt_init_hw(iwdev, &l2params)) {
- err = -EIO;
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ err = irdma_rt_init_hw(iwdev, &l2params);
+ if (err)
goto err_rt_init;
- }
err = irdma_ib_register_device(iwdev);
if (err)
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index cb218cab79ac..5123f5feaa2f 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -40,7 +40,6 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
-#include "status.h"
#include "osdep.h"
#include "defs.h"
#include "hmc.h"
@@ -242,8 +241,8 @@ struct irdma_qvlist_info {
struct irdma_gen_ops {
void (*request_reset)(struct irdma_pci_f *rf);
- enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node);
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
};
@@ -257,6 +256,7 @@ struct irdma_pci_f {
u8 *mem_rsrc;
u8 rdma_ver;
u8 rst_to;
+ u8 pf_id;
enum irdma_protocol_used protocol_used;
u32 sd_type;
u32 msix_count;
@@ -332,6 +332,8 @@ struct irdma_device {
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
struct irdma_cm_core cm_core;
+ DECLARE_HASHTABLE(ah_hash_tbl, 8);
+ struct mutex ah_tbl_lock; /* protect AH hash table access */
u32 roce_cwnd;
u32 roce_ackcreds;
u32 vendor_id;
@@ -345,7 +347,7 @@ struct irdma_device {
u8 iw_status;
bool roce_mode:1;
bool roce_dcqcn_en:1;
- bool dcb:1;
+ bool dcb_vlan_mode:1;
bool iw_ooo:1;
enum init_completion_state init_state;
@@ -457,10 +459,10 @@ static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
}
-enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf);
+int irdma_ctrl_init_hw(struct irdma_pci_f *rf);
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
-enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
- struct irdma_l2params *l2params);
+int irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params);
void irdma_rt_deinit_hw(struct irdma_device *iwdev);
void irdma_qp_add_ref(struct ib_qp *ibqp);
void irdma_qp_rem_ref(struct ib_qp *ibqp);
@@ -489,9 +491,8 @@ void irdma_cm_disconn(struct irdma_qp *qp);
bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
u16 maj_err_code, u16 min_err_code);
-enum irdma_status_code
-irdma_handle_cqp_op(struct irdma_pci_f *rf,
- struct irdma_cqp_request *cqp_request);
+int irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request);
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
@@ -500,21 +501,17 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
-enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
- struct irdma_qp *iwqp,
- struct irdma_modify_qp_info *info,
- bool wait);
-enum irdma_status_code irdma_qp_suspend_resume(struct irdma_sc_qp *qp,
- bool suspend);
-enum irdma_status_code
-irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
- enum irdma_quad_entry_type etype,
- enum irdma_quad_hash_manage_type mtype, void *cmnode,
- bool wait);
+int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info, bool wait);
+int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend);
+int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait);
void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
-enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
+int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
u8 term_len);
@@ -523,10 +520,8 @@ int irdma_send_reset(struct irdma_cm_node *cm_node);
struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
u16 rem_port, u32 *rem_addr, u16 loc_port,
u32 *loc_addr, u16 vlan_id);
-enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
- struct irdma_sc_qp *qp,
- struct irdma_qp_flush_info *info,
- bool wait);
+int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, bool wait);
void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
struct irdma_gen_ae_info *info, bool wait);
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index 63d8bb3a6903..fc1ba2a3e6fb 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/bitfield.h>
+#include <linux/net/intel/iidc.h>
#include <crypto/hash.h>
#include <rdma/ib_verbs.h>
@@ -42,32 +43,28 @@ enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
void irdma_add_dev_ref(struct irdma_sc_dev *dev);
void irdma_put_dev_ref(struct irdma_sc_dev *dev);
-enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
- void *addr, u32 len, u32 val);
+int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
+ u32 val);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf);
void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
u32 seqnum);
void irdma_free_hash_desc(struct shash_desc *hash_desc);
-enum irdma_status_code irdma_init_hash_desc(struct shash_desc **hash_desc);
-enum irdma_status_code
-irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
- struct irdma_puda_buf *buf);
-enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info);
-enum irdma_status_code
-irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
- struct irdma_hmc_fcn_info *hmcfcninfo,
- u16 *pmf_idx);
-enum irdma_status_code
-irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-enum irdma_status_code
-irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *mem);
+int irdma_init_hash_desc(struct shash_desc **hash_desc);
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
+int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
u8 term_len);
@@ -79,7 +76,7 @@ void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi);
void wr32(struct irdma_hw *hw, u32 reg, u32 val);
u32 rd32(struct irdma_hw *hw, u32 reg);
u64 rd64(struct irdma_hw *hw, u32 reg);
-enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
- dma_addr_t *pg_dma, u32 pg_cnt);
+int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
+ u32 pg_cnt);
void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt);
#endif /* IRDMA_OSDEP_H */
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index fed49da770f3..cdc0b8a6ed48 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -1,15 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
#include "protos.h"
#include "pble.h"
-static enum irdma_status_code
-add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
+static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
/**
* irdma_destroy_pble_prm - destroy prm during module unload
@@ -35,13 +33,12 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
* @dev: irdma_sc_dev struct
* @pble_rsrc: pble resources
*/
-enum irdma_status_code
-irdma_hmc_init_pble(struct irdma_sc_dev *dev,
- struct irdma_hmc_pble_rsrc *pble_rsrc)
+int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_hmc_info *hmc_info;
u32 fpm_idx = 0;
- enum irdma_status_code status = 0;
+ int status = 0;
hmc_info = dev->hmc_info;
pble_rsrc->dev = dev;
@@ -60,7 +57,7 @@ irdma_hmc_init_pble(struct irdma_sc_dev *dev,
INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
if (add_pble_prm(pble_rsrc)) {
irdma_destroy_pble_prm(pble_rsrc);
- status = IRDMA_ERR_NO_MEMORY;
+ status = -ENOMEM;
}
return status;
@@ -84,12 +81,11 @@ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_rsrc: pble resource ptr
* @info: page info for sd
*/
-static enum irdma_status_code
-add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
- struct irdma_add_page_info *info)
+static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
struct sd_pd_idx *idx = &info->idx;
struct irdma_chunk *chunk = info->chunk;
struct irdma_hmc_info *hmc_info = info->hmc_info;
@@ -137,9 +133,8 @@ static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
* @pble_rsrc: pble resource management
* @info: page info for sd
*/
-static enum irdma_status_code
-add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
- struct irdma_add_page_info *info)
+static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
u8 *addr;
@@ -148,13 +143,13 @@ add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
struct irdma_hmc_info *hmc_info = info->hmc_info;
struct irdma_chunk *chunk = info->chunk;
- enum irdma_status_code status = 0;
+ int status = 0;
u32 rel_pd_idx = info->idx.rel_pd_idx;
u32 pd_idx = info->idx.pd_idx;
u32 i;
if (irdma_pble_get_paged_mem(chunk, info->pages))
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
IRDMA_SD_TYPE_PAGED,
@@ -207,8 +202,7 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
* add_pble_prm - add a sd entry for pble resoure
* @pble_rsrc: pble resource management
*/
-static enum irdma_status_code
-add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
+static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
struct irdma_hmc_sd_entry *sd_entry;
@@ -216,22 +210,22 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
struct irdma_chunk *chunk;
struct irdma_add_page_info info;
struct sd_pd_idx *idx = &info.idx;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
enum irdma_sd_entry_type sd_entry_type;
u64 sd_reg_val = 0;
struct irdma_virt_mem chunkmem;
u32 pages;
if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
if (pble_rsrc->next_fpm_addr & 0xfff)
- return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
+ return -EINVAL;
chunkmem.size = sizeof(*chunk);
chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
if (!chunkmem.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
chunk = chunkmem.va;
chunk->chunkmem = chunkmem;
@@ -337,9 +331,8 @@ static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
*/
-static enum irdma_status_code
-get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
- struct irdma_pble_alloc *palloc)
+static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
{
u32 lf4k, lflast, total, i;
u32 pblcnt = PBLE_PER_PAGE;
@@ -347,7 +340,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_level2 *lvl2 = &palloc->level2;
struct irdma_pble_info *root = &lvl2->root;
struct irdma_pble_info *leaf;
- enum irdma_status_code ret_code;
+ int ret_code;
u64 fpm_addr;
/* number of full 512 (4K) leafs) */
@@ -359,7 +352,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
lvl2->leafmem.size = (sizeof(*leaf) * total);
lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
if (!lvl2->leafmem.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
lvl2->leaf = lvl2->leafmem.va;
leaf = lvl2->leaf;
@@ -368,7 +361,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
if (ret_code) {
kfree(lvl2->leafmem.va);
lvl2->leaf = NULL;
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
@@ -397,7 +390,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
error:
free_lvl2(pble_rsrc, palloc);
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -405,11 +398,10 @@ error:
* @pble_rsrc: pble resource management
* @palloc: level 1 pble allocation
*/
-static enum irdma_status_code
-get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
- struct irdma_pble_alloc *palloc)
+static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
{
- enum irdma_status_code ret_code;
+ int ret_code;
u64 fpm_addr;
struct irdma_pble_info *lvl1 = &palloc->level1;
@@ -417,7 +409,7 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
palloc->total_cnt << 3, &lvl1->addr,
&fpm_addr);
if (ret_code)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
palloc->level = PBLE_LEVEL_1;
lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
@@ -433,11 +425,10 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @level1_only: flag for a level 1 PBLE
*/
-static enum irdma_status_code
-get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
- struct irdma_pble_alloc *palloc, bool level1_only)
+static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, bool level1_only)
{
- enum irdma_status_code status = 0;
+ int status = 0;
status = get_lvl1_pble(pble_rsrc, palloc);
if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
@@ -455,11 +446,11 @@ get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_cnt: #of pbles requested
* @level1_only: true if only pble level 1 to acquire
*/
-enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
- struct irdma_pble_alloc *palloc,
- u32 pble_cnt, bool level1_only)
+int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u32 pble_cnt,
+ bool level1_only)
{
- enum irdma_status_code status = 0;
+ int status = 0;
int max_sds = 0;
int i;
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
index d0d4f2b77d34..29d295463559 100644
--- a/drivers/infiniband/hw/irdma/pble.h
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -108,20 +108,18 @@ struct irdma_hmc_pble_rsrc {
};
void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
-enum irdma_status_code
-irdma_hmc_init_pble(struct irdma_sc_dev *dev,
- struct irdma_hmc_pble_rsrc *pble_rsrc);
+int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc);
void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc);
-enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
- struct irdma_pble_alloc *palloc,
- u32 pble_cnt, bool level1_only);
-enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
- struct irdma_chunk *pchunk);
-enum irdma_status_code
-irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
- struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
- u64 **vaddr, u64 *fpm_addr);
+int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u32 pble_cnt,
+ bool level1_only);
+int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk);
+int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr);
void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
struct irdma_pble_chunkinfo *chunkinfo);
void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
@@ -129,7 +127,6 @@ void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
unsigned long *flags);
void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
-enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk,
- u32 pg_cnt);
+int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt);
void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
#endif /* IRDMA_PBLE_H */
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index a17c0ffb0cc8..9b6e919ae2a9 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -12,58 +12,51 @@
#define CQP_TIMEOUT_THRESHOLD 500
/* init operations */
-enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
- struct irdma_sc_dev *dev,
- struct irdma_device_init_info *info);
+int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info);
void irdma_sc_rt_init(struct irdma_sc_dev *dev);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
-enum irdma_status_code
-irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
- struct irdma_fast_reg_stag_info *info, bool post_sq);
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq);
/* HMC/FPM functions */
-enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
- u8 hmc_fn_id);
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id);
/* stats misc */
-enum irdma_status_code
-irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
- struct irdma_vsi_pestat *pestat, bool wait);
+int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait);
void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat);
void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
struct irdma_dev_hw_stats *stats_values,
u64 *hw_stats_regs_32, u64 *hw_stats_regs_64,
u8 hw_rev);
-enum irdma_status_code
-irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_ws_node_info *node_info);
-enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_ceq *sc_ceq, u8 op);
-enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_aeq *sc_aeq, u8 op);
-enum irdma_status_code
-irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
- struct irdma_stats_inst_info *stats_info);
+int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info);
+int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
+ u8 op);
+int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
+ u8 op);
+int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info);
u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
struct irdma_gather_stats *gather_stats,
struct irdma_gather_stats *last_gather_stats);
/* vsi functions */
-enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
- struct irdma_vsi_stats_info *info);
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info);
void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi);
void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_init_info *info);
-enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
- struct irdma_sc_cq *cq);
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
/* misc L2 param change functions */
void irdma_change_l2params(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params);
void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend);
-enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp,
- u8 cmd);
+int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 cmd);
void irdma_qp_add_qos(struct irdma_sc_qp *qp);
void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
@@ -81,31 +74,26 @@ void irdma_terminate_received(struct irdma_sc_qp *qp,
/* misc */
u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
-enum irdma_status_code
-irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id, bool post_sq,
- bool poll_registers);
-enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev,
- u32 qp_count);
-enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev);
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers);
+int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count);
+int irdma_get_rdma_features(struct irdma_sc_dev *dev);
void free_sd_mem(struct irdma_sc_dev *dev);
-enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
- struct cqp_cmds_info *pcmdinfo);
-enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev);
-enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info);
-enum irdma_status_code
-irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-enum irdma_status_code
-irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *mem);
-enum irdma_status_code
-irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
- struct irdma_hmc_fcn_info *hmcfcninfo,
- u16 *pmf_idx);
+int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo);
+int irdma_process_bh(struct irdma_sc_dev *dev);
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
+int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
void irdma_add_dev_ref(struct irdma_sc_dev *dev);
void irdma_put_dev_ref(struct irdma_sc_dev *dev);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
index 58e7d875643b..397f3d070f90 100644
--- a/drivers/infiniband/hw/irdma/puda.c
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -114,8 +113,7 @@ static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
* @rsrc: resource to use for buffer
* @initial: flag if during init time
*/
-static enum irdma_status_code
-irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
+static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
{
u32 i;
u32 invalid_cnt = rsrc->rxq_invalid_cnt;
@@ -124,7 +122,7 @@ irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
for (i = 0; i < invalid_cnt; i++) {
buf = irdma_puda_get_bufpool(rsrc);
if (!buf)
- return IRDMA_ERR_list_empty;
+ return -ENOBUFS;
irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
rsrc->rxq_invalid_cnt--;
@@ -194,7 +192,7 @@ static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
u32 *wqe_idx)
{
__le64 *wqe = NULL;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
if (!*wqe_idx)
@@ -213,8 +211,8 @@ static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
* @cq: cq for poll
* @info: info return for successful completion
*/
-static enum irdma_status_code
-irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)
+static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
+ struct irdma_puda_cmpl_info *info)
{
struct irdma_cq_uk *cq_uk = &cq->cq_uk;
u64 qword0, qword2, qword3, qword6;
@@ -233,7 +231,7 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)
get_64bit_val(cqe, 24, &qword3);
valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (valid_bit != cq_uk->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
@@ -246,7 +244,7 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)
if (!peek_head)
polarity ^= 1;
if (polarity != cq_uk->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
@@ -267,7 +265,7 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)
major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
info->compl_error = major_err << 16 | minor_err;
- return IRDMA_ERR_CQ_COMPL_ERROR;
+ return -EIO;
}
get_64bit_val(cqe, 0, &qword0);
@@ -319,14 +317,13 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)
* @cq: cq getting interrupt
* @compl_err: return any completion err
*/
-enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,
- struct irdma_sc_cq *cq,
- u32 *compl_err)
+int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
+ u32 *compl_err)
{
struct irdma_qp_uk *qp;
struct irdma_cq_uk *cq_uk = &cq->cq_uk;
struct irdma_puda_cmpl_info info = {};
- enum irdma_status_code ret = 0;
+ int ret = 0;
struct irdma_puda_buf *buf;
struct irdma_puda_rsrc *rsrc;
u8 cq_type = cq->cq_type;
@@ -337,24 +334,24 @@ enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,
cq->vsi->ieq;
} else {
ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n");
- return IRDMA_ERR_BAD_PTR;
+ return -EINVAL;
}
ret = irdma_puda_poll_info(cq, &info);
*compl_err = info.compl_error;
- if (ret == IRDMA_ERR_Q_EMPTY)
+ if (ret == -ENOENT)
return ret;
if (ret)
goto done;
qp = info.qp;
if (!qp || !rsrc) {
- ret = IRDMA_ERR_BAD_PTR;
+ ret = -EFAULT;
goto done;
}
if (qp->qp_id != rsrc->qp_id) {
- ret = IRDMA_ERR_BAD_PTR;
+ ret = -EFAULT;
goto done;
}
@@ -422,8 +419,7 @@ done:
* @qp: puda qp for send
* @info: buffer information for transmit
*/
-enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,
- struct irdma_puda_send_info *info)
+int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
{
__le64 *wqe;
u32 iplen, l4len;
@@ -443,7 +439,7 @@ enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,
wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
/* Third line of WQE descriptor */
@@ -503,7 +499,7 @@ void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf)
{
struct irdma_puda_send_info info;
- enum irdma_status_code ret = 0;
+ int ret = 0;
unsigned long flags;
spin_lock_irqsave(&rsrc->bufpool_lock, flags);
@@ -603,19 +599,18 @@ static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
* @dev: Device
* @qp: Resource qp
*/
-static enum irdma_status_code irdma_puda_qp_wqe(struct irdma_sc_dev *dev,
- struct irdma_sc_qp *qp)
+static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
u64 hdr;
struct irdma_ccq_cqe_info compl_info;
- enum irdma_status_code status = 0;
+ int status = 0;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
@@ -643,11 +638,11 @@ static enum irdma_status_code irdma_puda_qp_wqe(struct irdma_sc_dev *dev,
* irdma_puda_qp_create - create qp for resource
* @rsrc: resource to use for buffer
*/
-static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
+static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
{
struct irdma_sc_qp *qp = &rsrc->qp;
struct irdma_qp_uk *ukqp = &qp->qp_uk;
- enum irdma_status_code ret = 0;
+ int ret = 0;
u32 sq_size, rq_size;
struct irdma_dma_mem *mem;
@@ -659,7 +654,7 @@ static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
rsrc->qpmem.size, &rsrc->qpmem.pa,
GFP_KERNEL);
if (!rsrc->qpmem.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
mem = &rsrc->qpmem;
memset(mem->va, 0, rsrc->qpmem.size);
@@ -722,19 +717,18 @@ static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
* @dev: Device
* @cq: resource for cq
*/
-static enum irdma_status_code irdma_puda_cq_wqe(struct irdma_sc_dev *dev,
- struct irdma_sc_cq *cq)
+static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
struct irdma_ccq_cqe_info compl_info;
- enum irdma_status_code status = 0;
+ int status = 0;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
@@ -775,11 +769,11 @@ static enum irdma_status_code irdma_puda_cq_wqe(struct irdma_sc_dev *dev,
* irdma_puda_cq_create - create cq for resource
* @rsrc: resource for which cq to create
*/
-static enum irdma_status_code irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
+static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
{
struct irdma_sc_dev *dev = rsrc->dev;
struct irdma_sc_cq *cq = &rsrc->cq;
- enum irdma_status_code ret = 0;
+ int ret = 0;
u32 cqsize;
struct irdma_dma_mem *mem;
struct irdma_cq_init_info info = {};
@@ -792,7 +786,7 @@ static enum irdma_status_code irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
&rsrc->cqmem.pa, GFP_KERNEL);
if (!rsrc->cqmem.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
mem = &rsrc->cqmem;
info.dev = dev;
@@ -833,7 +827,7 @@ error:
*/
static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
{
- enum irdma_status_code ret;
+ int ret;
struct irdma_ccq_cqe_info compl_info;
struct irdma_sc_dev *dev = rsrc->dev;
@@ -865,7 +859,7 @@ static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
*/
static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
{
- enum irdma_status_code ret;
+ int ret;
struct irdma_ccq_cqe_info compl_info;
struct irdma_sc_dev *dev = rsrc->dev;
@@ -967,8 +961,7 @@ void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
* @rsrc: resource for buffer allocation
* @count: number of buffers to create
*/
-static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc,
- u32 count)
+static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
{
u32 i;
struct irdma_puda_buf *buf;
@@ -978,7 +971,7 @@ static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc,
buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
if (!buf) {
rsrc->stats_buf_alloc_fail++;
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
irdma_puda_ret_bufpool(rsrc, buf);
rsrc->alloc_buf_count++;
@@ -1001,11 +994,11 @@ static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc,
* @vsi: sc VSI struct
* @info: resource information
*/
-enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
- struct irdma_puda_rsrc_info *info)
+int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info)
{
struct irdma_sc_dev *dev = vsi->dev;
- enum irdma_status_code ret = 0;
+ int ret = 0;
struct irdma_puda_rsrc *rsrc;
u32 pudasize;
u32 sqwridsize, rqwridsize;
@@ -1023,12 +1016,12 @@ enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
vmem = &vsi->ieq_mem;
break;
default:
- return IRDMA_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
vmem->size = pudasize + sqwridsize + rqwridsize;
vmem->va = kzalloc(vmem->size, GFP_KERNEL);
if (!vmem->va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
rsrc = vmem->va;
spin_lock_init(&rsrc->bufpool_lock);
@@ -1046,7 +1039,7 @@ enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
rsrc->xmit_complete = irdma_ieq_tx_compl;
break;
default:
- return IRDMA_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
rsrc->type = info->type;
@@ -1323,12 +1316,12 @@ static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
* @buf: first receive buffer
* @fpdu_len: total length of fpdu
*/
-static enum irdma_status_code
-irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist,
- struct list_head *pbufl, struct irdma_puda_buf *buf,
- u16 fpdu_len)
+static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct irdma_puda_buf *buf, u16 fpdu_len)
{
- enum irdma_status_code status = 0;
+ int status = 0;
struct irdma_puda_buf *nextbuf;
u32 nextseqnum;
u16 plen = fpdu_len - buf->datalen;
@@ -1338,13 +1331,13 @@ irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist,
do {
nextbuf = irdma_puda_get_listbuf(rxlist);
if (!nextbuf) {
- status = IRDMA_ERR_list_empty;
+ status = -ENOBUFS;
break;
}
list_add_tail(&nextbuf->list, pbufl);
if (nextbuf->seqnum != nextseqnum) {
pfpdu->bad_seq_num++;
- status = IRDMA_ERR_SEQ_NUM;
+ status = -ERANGE;
break;
}
if (nextbuf->datalen >= plen) {
@@ -1366,11 +1359,11 @@ irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist,
* @buf: receive buffer
* @fpdu_len: fpdu len in the buffer
*/
-static enum irdma_status_code
-irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,
- struct irdma_puda_buf *buf, u16 fpdu_len)
+static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
+ struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf, u16 fpdu_len)
{
- enum irdma_status_code status = 0;
+ int status = 0;
u8 *crcptr;
u32 mpacrc;
u32 seqnum = buf->seqnum;
@@ -1390,7 +1383,7 @@ irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,
txbuf = irdma_puda_get_bufpool(ieq);
if (!txbuf) {
pfpdu->no_tx_bufs++;
- status = IRDMA_ERR_NO_TXBUFS;
+ status = -ENOBUFS;
goto error;
}
@@ -1434,9 +1427,9 @@ error:
* @pfpdu: partial management per user qp
* @buf: receive buffer
*/
-static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
- struct irdma_pfpdu *pfpdu,
- struct irdma_puda_buf *buf)
+static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
+ struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf)
{
u16 fpdu_len = 0;
u16 datalen = buf->datalen;
@@ -1450,7 +1443,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
bool partial = false;
struct irdma_puda_buf *txbuf;
struct list_head *rxlist = &pfpdu->rxlist;
- enum irdma_status_code ret = 0;
+ int ret = 0;
ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
while (datalen) {
@@ -1459,7 +1452,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
ibdev_dbg(to_ibdev(ieq->dev),
"IEQ: error bad fpdu len\n");
list_add(&buf->list, rxlist);
- return IRDMA_ERR_MPA_CRC;
+ return -EINVAL;
}
if (datalen < fpdu_len) {
@@ -1475,7 +1468,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
list_add(&buf->list, rxlist);
ibdev_dbg(to_ibdev(ieq->dev),
"ERR: IRDMA_ERR_MPA_CRC\n");
- return IRDMA_ERR_MPA_CRC;
+ return -EINVAL;
}
full++;
pfpdu->fpdu_processed++;
@@ -1490,7 +1483,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
if (!txbuf) {
pfpdu->no_tx_bufs++;
list_add(&buf->list, rxlist);
- return IRDMA_ERR_NO_TXBUFS;
+ return -ENOBUFS;
}
/* modify txbuf's buffer header */
irdma_ieq_setup_tx_buf(buf, txbuf);
@@ -1539,7 +1532,7 @@ void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
struct irdma_pfpdu *pfpdu = &qp->pfpdu;
struct list_head *rxlist = &pfpdu->rxlist;
struct irdma_puda_buf *buf;
- enum irdma_status_code status;
+ int status;
do {
if (list_empty(rxlist))
@@ -1557,7 +1550,7 @@ void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
}
/* keep processing buffers from the head of the list */
status = irdma_ieq_process_buf(ieq, pfpdu, buf);
- if (status == IRDMA_ERR_MPA_CRC) {
+ if (status == -EINVAL) {
pfpdu->mpa_crc_err = true;
while (!list_empty(rxlist)) {
buf = irdma_puda_get_listbuf(rxlist);
@@ -1576,8 +1569,7 @@ void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
* @qp: qp pointer
* @buf: buf received on IEQ used to create AH
*/
-static enum irdma_status_code irdma_ieq_create_ah(struct irdma_sc_qp *qp,
- struct irdma_puda_buf *buf)
+static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf)
{
struct irdma_ah_info ah_info = {};
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
index db3a51170020..5f5124db6ddf 100644
--- a/drivers/infiniband/hw/irdma/puda.h
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -151,42 +151,33 @@ void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
-enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,
- struct irdma_puda_send_info *info);
-enum irdma_status_code
-irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
- struct irdma_puda_rsrc_info *info);
+int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info);
+int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info);
void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
bool reset);
-enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,
- struct irdma_sc_cq *cq,
- u32 *compl_err);
+int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
+ u32 *compl_err);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf);
-enum irdma_status_code
-irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
- struct irdma_puda_buf *buf);
-enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
- void *addr, u32 len, u32 val);
-enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc);
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, u32 val);
+int irdma_init_hash_desc(struct shash_desc **desc);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_free_hash_desc(struct shash_desc *desc);
-void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
- u32 seqnum);
-enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_qp *qp);
-enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_cq *cq);
-enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum);
+int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
+int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
struct irdma_ah_info *ah_info);
-enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,
- struct irdma_ah_info *ah_info,
- bool wait, enum puda_rsrc_type type,
- void *cb_param,
- struct irdma_sc_ah **ah);
+int irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info, bool wait,
+ enum puda_rsrc_type type, void *cb_param,
+ struct irdma_sc_ah **ah);
void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
struct irdma_puda_rsrc *ieq);
diff --git a/drivers/infiniband/hw/irdma/status.h b/drivers/infiniband/hw/irdma/status.h
deleted file mode 100644
index 22ea3888253a..000000000000
--- a/drivers/infiniband/hw/irdma/status.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
-/* Copyright (c) 2015 - 2020 Intel Corporation */
-#ifndef IRDMA_STATUS_H
-#define IRDMA_STATUS_H
-
-/* Error Codes */
-enum irdma_status_code {
- IRDMA_SUCCESS = 0,
- IRDMA_ERR_NVM = -1,
- IRDMA_ERR_NVM_CHECKSUM = -2,
- IRDMA_ERR_CFG = -4,
- IRDMA_ERR_PARAM = -5,
- IRDMA_ERR_DEVICE_NOT_SUPPORTED = -6,
- IRDMA_ERR_RESET_FAILED = -7,
- IRDMA_ERR_SWFW_SYNC = -8,
- IRDMA_ERR_NO_MEMORY = -9,
- IRDMA_ERR_BAD_PTR = -10,
- IRDMA_ERR_INVALID_PD_ID = -11,
- IRDMA_ERR_INVALID_QP_ID = -12,
- IRDMA_ERR_INVALID_CQ_ID = -13,
- IRDMA_ERR_INVALID_CEQ_ID = -14,
- IRDMA_ERR_INVALID_AEQ_ID = -15,
- IRDMA_ERR_INVALID_SIZE = -16,
- IRDMA_ERR_INVALID_ARP_INDEX = -17,
- IRDMA_ERR_INVALID_FPM_FUNC_ID = -18,
- IRDMA_ERR_QP_INVALID_MSG_SIZE = -19,
- IRDMA_ERR_QP_TOOMANY_WRS_POSTED = -20,
- IRDMA_ERR_INVALID_FRAG_COUNT = -21,
- IRDMA_ERR_Q_EMPTY = -22,
- IRDMA_ERR_INVALID_ALIGNMENT = -23,
- IRDMA_ERR_FLUSHED_Q = -24,
- IRDMA_ERR_INVALID_PUSH_PAGE_INDEX = -25,
- IRDMA_ERR_INVALID_INLINE_DATA_SIZE = -26,
- IRDMA_ERR_TIMEOUT = -27,
- IRDMA_ERR_OPCODE_MISMATCH = -28,
- IRDMA_ERR_CQP_COMPL_ERROR = -29,
- IRDMA_ERR_INVALID_VF_ID = -30,
- IRDMA_ERR_INVALID_HMCFN_ID = -31,
- IRDMA_ERR_BACKING_PAGE_ERROR = -32,
- IRDMA_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
- IRDMA_ERR_INVALID_PBLE_INDEX = -34,
- IRDMA_ERR_INVALID_SD_INDEX = -35,
- IRDMA_ERR_INVALID_PAGE_DESC_INDEX = -36,
- IRDMA_ERR_INVALID_SD_TYPE = -37,
- IRDMA_ERR_MEMCPY_FAILED = -38,
- IRDMA_ERR_INVALID_HMC_OBJ_INDEX = -39,
- IRDMA_ERR_INVALID_HMC_OBJ_COUNT = -40,
- IRDMA_ERR_BUF_TOO_SHORT = -43,
- IRDMA_ERR_BAD_IWARP_CQE = -44,
- IRDMA_ERR_NVM_BLANK_MODE = -45,
- IRDMA_ERR_NOT_IMPL = -46,
- IRDMA_ERR_PE_DOORBELL_NOT_ENA = -47,
- IRDMA_ERR_NOT_READY = -48,
- IRDMA_NOT_SUPPORTED = -49,
- IRDMA_ERR_FIRMWARE_API_VER = -50,
- IRDMA_ERR_RING_FULL = -51,
- IRDMA_ERR_MPA_CRC = -61,
- IRDMA_ERR_NO_TXBUFS = -62,
- IRDMA_ERR_SEQ_NUM = -63,
- IRDMA_ERR_list_empty = -64,
- IRDMA_ERR_INVALID_MAC_ADDR = -65,
- IRDMA_ERR_BAD_STAG = -66,
- IRDMA_ERR_CQ_COMPL_ERROR = -67,
- IRDMA_ERR_Q_DESTROYED = -68,
- IRDMA_ERR_INVALID_FEAT_CNT = -69,
- IRDMA_ERR_REG_CQ_FULL = -70,
- IRDMA_ERR_VF_MSG_ERROR = -71,
- IRDMA_ERR_NO_INTR = -72,
- IRDMA_ERR_REG_QSET = -73,
-};
-#endif /* IRDMA_STATUS_H */
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 9483bb3e10ea..9e7b8ecb137a 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -2,7 +2,6 @@
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_TYPE_H
#define IRDMA_TYPE_H
-#include "status.h"
#include "osdep.h"
#include "irdma.h"
#include "user.h"
@@ -402,8 +401,8 @@ struct irdma_sc_cqp {
u64 host_ctx_pa;
void *back_cqp;
struct irdma_sc_dev *dev;
- enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info);
+ int (*process_cqp_sds)(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
struct irdma_dma_mem sdbuf;
struct irdma_ring sq_ring;
struct irdma_cqp_quanta *sq_base;
@@ -605,12 +604,14 @@ struct irdma_sc_vsi {
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
struct irdma_vsi_pestat *pestat;
atomic_t qp_suspend_reqs;
- enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node);
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
u8 qos_rel_bw;
u8 qos_prio_type;
+ u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ bool dscp_mode:1;
};
struct irdma_sc_dev {
@@ -655,7 +656,7 @@ struct irdma_sc_dev {
bool vchnl_up:1;
bool ceq_valid:1;
u8 pci_rev;
- enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
+ int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_reset)(struct irdma_sc_vsi *vsi);
};
@@ -735,11 +736,13 @@ struct irdma_l2params {
u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
u16 mtu;
u8 up2tc[IRDMA_MAX_USER_PRIORITY];
+ u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
u8 num_tc;
u8 vsi_rel_bw;
u8 vsi_prio_type;
bool mtu_changed:1;
bool tc_changed:1;
+ bool dscp_mode:1;
};
struct irdma_vsi_init_info {
@@ -750,8 +753,8 @@ struct irdma_vsi_init_info {
u16 pf_data_vsi_num;
enum irdma_vm_vf_type vm_vf_type;
u16 vm_id;
- enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node);
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
};
@@ -1198,29 +1201,27 @@ struct irdma_irq_ops {
};
void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
-enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
- bool check_overflow, bool post_sq);
-enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
- bool post_sq);
-enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
- struct irdma_ccq_cqe_info *info);
-enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
- struct irdma_ccq_init_info *info);
-
-enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
-enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
-
-enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
- bool post_sq);
-enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
- struct irdma_ceq_init_info *info);
+int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq);
+int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info);
+int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_init_info *info);
+
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
+int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
+
+int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
+int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info);
void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
-enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
- struct irdma_aeq_init_info *info);
-enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
- struct irdma_aeqe_info *info);
+int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info);
+int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info);
void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
@@ -1228,31 +1229,27 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
struct irdma_sc_dev *dev);
-enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
- u16 *min_err);
-enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
-enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
- struct irdma_cqp_init_info *info);
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
-enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
- struct irdma_ccq_cqe_info *cmpl_info);
-enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp,
- struct irdma_fast_reg_stag_info *info,
- bool post_sq);
-enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp,
- struct irdma_create_qp_info *info,
- u64 scratch, bool post_sq);
-enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp,
- u64 scratch, bool remove_hash_idx,
- bool ignore_mw_bnd, bool post_sq);
-enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
- struct irdma_qp_flush_info *info,
- u64 scratch, bool post_sq);
-enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
- struct irdma_qp_init_info *info);
-enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
- struct irdma_modify_qp_info *info,
- u64 scratch, bool post_sq);
+int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
+ struct irdma_ccq_cqe_info *cmpl_info);
+int irdma_sc_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info, bool post_sq);
+int irdma_sc_qp_create(struct irdma_sc_qp *qp,
+ struct irdma_create_qp_info *info, u64 scratch,
+ bool post_sq);
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq);
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
+ struct irdma_modify_qp_info *info, u64 scratch,
+ bool post_sq);
void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag);
@@ -1261,14 +1258,12 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info);
void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info);
-enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
- bool post_sq);
-enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
- struct irdma_cq_init_info *info);
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
-enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp,
- u64 scratch, u8 hmc_fn_id,
- bool post_sq, bool poll_registers);
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers);
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
struct cqp_info {
diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c
index 7a9988ddbd01..284cec2a74de 100644
--- a/drivers/infiniband/hw/irdma/uda.c
+++ b/drivers/infiniband/hw/irdma/uda.c
@@ -3,7 +3,6 @@
#include <linux/etherdevice.h>
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -18,16 +17,15 @@
* @op: Operation
* @scratch: u64 saved to be used during cqp completion
*/
-enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
- struct irdma_ah_info *info,
- u32 op, u64 scratch)
+int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
+ u32 op, u64 scratch)
{
__le64 *wqe;
u64 qw1, qw2;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
@@ -86,8 +84,7 @@ enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
* irdma_create_mg_ctx() - create a mcg context
* @info: multicast group context info
*/
-static enum irdma_status_code
-irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
+static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
{
struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
u8 idx = 0; /* index in the array */
@@ -106,8 +103,6 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
ctx_idx++;
}
}
-
- return 0;
}
/**
@@ -117,27 +112,24 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
* @op: operation to perform
* @scratch: u64 saved to be used during cqp completion
*/
-enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
- struct irdma_mcast_grp_info *info,
- u32 op, u64 scratch)
+int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info, u32 op,
+ u64 scratch)
{
__le64 *wqe;
- enum irdma_status_code ret_code = 0;
if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
}
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe) {
ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
}
- ret_code = irdma_create_mg_ctx(info);
- if (ret_code)
- return ret_code;
+ irdma_create_mg_ctx(info);
set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
set_64bit_val(wqe, 16,
@@ -198,8 +190,8 @@ static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
* @ctx: Multcast group context
* @mg: Multcast group info
*/
-enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
- struct irdma_mcast_grp_ctx_entry_info *mg)
+int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
{
u32 idx;
bool free_entry_found = false;
@@ -228,7 +220,7 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
return 0;
}
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -239,8 +231,8 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
* Finds and removes a specific mulicast group from context, all
* parameters must match to remove a multicast group.
*/
-enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
- struct irdma_mcast_grp_ctx_entry_info *mg)
+int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
{
u32 idx;
@@ -269,5 +261,5 @@ enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
}
}
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
}
diff --git a/drivers/infiniband/hw/irdma/uda.h b/drivers/infiniband/hw/irdma/uda.h
index a4ad0367dc96..fe4820ff0cca 100644
--- a/drivers/infiniband/hw/irdma/uda.h
+++ b/drivers/infiniband/hw/irdma/uda.h
@@ -32,56 +32,54 @@ struct irdma_sc_ah {
struct irdma_ah_info ah_info;
};
-enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
- struct irdma_mcast_grp_ctx_entry_info *mg);
-enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
- struct irdma_mcast_grp_ctx_entry_info *mg);
-enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
- u32 op, u64 scratch);
-enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
- struct irdma_mcast_grp_info *info,
- u32 op, u64 scratch);
+int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
+ u32 op, u64 scratch);
+int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info, u32 op,
+ u64 scratch);
static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
{
ah->dev = dev;
}
-static inline enum irdma_status_code irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
- struct irdma_ah_info *info,
- u64 scratch)
+static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info, u64 scratch)
{
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
scratch);
}
-static inline enum irdma_status_code irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
- struct irdma_ah_info *info,
- u64 scratch)
+static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info, u64 scratch)
{
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
scratch);
}
-static inline enum irdma_status_code irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
- struct irdma_mcast_grp_info *info,
- u64 scratch)
+static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
scratch);
}
-static inline enum irdma_status_code irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
- struct irdma_mcast_grp_info *info,
- u64 scratch)
+static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP,
scratch);
}
-static inline enum irdma_status_code irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
- struct irdma_mcast_grp_info *info,
- u64 scratch)
+static inline int irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP,
scratch);
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 57a9444e9ea7..daeab5daed5b 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
-#include "status.h"
#include "defs.h"
#include "user.h"
#include "irdma.h"
@@ -56,7 +55,7 @@ static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
* irdma_nop_1 - insert a NOP wqe
* @qp: hw qp ptr
*/
-static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
+static int irdma_nop_1(struct irdma_qp_uk *qp)
{
u64 hdr;
__le64 *wqe;
@@ -64,7 +63,7 @@ static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
bool signaled = false;
if (!qp->sq_ring.head)
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
wqe = qp->sq_base[wqe_idx].elem;
@@ -245,7 +244,7 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
{
__le64 *wqe;
- enum irdma_status_code ret_code;
+ int ret_code;
if (IRDMA_RING_FULL_ERR(qp->rq_ring))
return NULL;
@@ -268,16 +267,15 @@ __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
{
u64 hdr;
__le64 *wqe;
struct irdma_rdma_write *op_info;
u32 i, wqe_idx;
u32 total_size = 0, byte_off;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 frag_cnt, addl_frag_cnt;
bool read_fence = false;
u16 quanta;
@@ -286,7 +284,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
total_size += op_info->lo_sg_list[i].length;
@@ -305,7 +303,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -370,12 +368,11 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
* @inv_stag: flag for inv_stag
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool inv_stag, bool post_sq)
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq)
{
struct irdma_rdma_read *op_info;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 i, byte_off, total_size = 0;
bool local_fence = false;
u32 addl_frag_cnt;
@@ -388,7 +385,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
total_size += op_info->lo_sg_list[i].length;
@@ -400,7 +397,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -457,15 +454,14 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_post_send *op_info;
u64 hdr;
u32 i, wqe_idx, total_size = 0, byte_off;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 frag_cnt, addl_frag_cnt;
bool read_fence = false;
u16 quanta;
@@ -474,7 +470,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_sges; i++)
total_size += op_info->sg_list[i].length;
@@ -490,7 +486,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -678,9 +674,8 @@ static u16 irdma_inline_data_size_to_quanta(u32 data_size)
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code
-irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
struct irdma_inline_rdma_write *op_info;
@@ -693,13 +688,13 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
op_info = &info->op.inline_rdma_write;
if (op_info->len > qp->max_inline_data)
- return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+ return -EINVAL;
quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -745,9 +740,8 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
struct irdma_post_inline_send *op_info;
@@ -760,13 +754,13 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
op_info = &info->op.inline_send;
if (op_info->len > qp->max_inline_data)
- return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+ return -EINVAL;
quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -817,9 +811,9 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code
-irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info, bool post_sq)
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_inv_local_stag *op_info;
@@ -835,7 +829,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -871,8 +865,8 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
* @qp: hw qp ptr
* @info: post rq information
*/
-enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
- struct irdma_post_rq_info *info)
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info)
{
u32 wqe_idx, i, byte_off;
u32 addl_frag_cnt;
@@ -880,11 +874,11 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
u64 hdr;
if (qp->max_rq_frag_cnt < info->num_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
qp->rq_wrid_array[wqe_idx] = info->wr_id;
addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
@@ -1000,15 +994,15 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
* @cq: hw cq
* @info: cq poll information returned
*/
-enum irdma_status_code
-irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info)
{
u64 comp_ctx, qword0, qword2, qword3;
__le64 *cqe;
struct irdma_qp_uk *qp;
struct irdma_ring *pring = NULL;
u32 wqe_idx, q_type;
- enum irdma_status_code ret_code;
+ int ret_code;
bool move_cq_head = true;
u8 polarity;
bool ext_valid;
@@ -1022,7 +1016,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
get_64bit_val(cqe, 24, &qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (polarity != cq->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
/* Ensure CQE contents are read after valid bit is checked */
dma_rmb();
@@ -1045,7 +1039,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
polarity ^= 1;
}
if (polarity != cq->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
/* Ensure ext CQE contents are read after ext valid bit is checked */
dma_rmb();
@@ -1112,7 +1106,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
if (!qp || qp->destroy_pending) {
- ret_code = IRDMA_ERR_Q_DESTROYED;
+ ret_code = -EFAULT;
goto exit;
}
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
@@ -1126,7 +1120,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
- ret_code = IRDMA_ERR_Q_EMPTY;
+ ret_code = -ENOENT;
goto exit;
}
@@ -1186,7 +1180,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
} else {
if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
- ret_code = IRDMA_ERR_Q_EMPTY;
+ ret_code = -ENOENT;
goto exit;
}
@@ -1303,15 +1297,15 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
* @sqdepth: depth of SQ
*
*/
-enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 sq_size, u8 shift, u32 *sqdepth)
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *sqdepth)
{
*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
*sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -1323,15 +1317,15 @@ enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
* @shift: shift which determines size of WQE
* @rqdepth: depth of RQ
*/
-enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 rq_size, u8 shift, u32 *rqdepth)
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *rqdepth)
{
*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
*rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -1381,17 +1375,16 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
* allowed. Then size of wqe * the number of wqes should be the
* amount of memory allocated for sq and rq.
*/
-enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
- struct irdma_qp_uk_init_info *info)
+int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 sq_ring_size;
u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
@@ -1502,8 +1495,7 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
* @signaled: signaled for completion
* @post_sq: ring doorbell
*/
-enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
- bool signaled, bool post_sq)
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
{
__le64 *wqe;
u64 hdr;
@@ -1515,7 +1507,7 @@ enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, &info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -1541,7 +1533,7 @@ enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
* @frag_cnt: number of fragments
* @quanta: quanta for frag_cnt
*/
-enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
{
switch (frag_cnt) {
case 0:
@@ -1577,7 +1569,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
*quanta = 8;
break;
default:
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
}
return 0;
@@ -1588,7 +1580,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
* @frag_cnt: number of fragments
* @wqe_size: size in bytes given frag_cnt
*/
-enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
{
switch (frag_cnt) {
case 0:
@@ -1615,7 +1607,7 @@ enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
*wqe_size = 256;
break;
default:
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
}
return 0;
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index 3c811fb88404..ddd0ebbdd7d5 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -270,29 +270,24 @@ struct irdma_cq_poll_info {
bool imm_valid:1;
};
-enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq);
-enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq);
-
-enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
- bool signaled, bool post_sq);
-enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
- struct irdma_post_rq_info *info);
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
+ bool post_sq);
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info);
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
-enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool inv_stag, bool post_sq);
-enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq);
-enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info, bool post_sq);
-enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq);
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq);
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
@@ -303,16 +298,16 @@ struct irdma_wqe_uk_ops {
struct irdma_bind_window *op_info);
};
-enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
- struct irdma_cq_poll_info *info);
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
void irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
-enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
- struct irdma_qp_uk_init_info *info);
+int irdma_uk_qp_init(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@@ -413,16 +408,15 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
-enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
- bool signaled, bool post_sq);
-enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
-enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
-enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 sq_size, u8 shift, u32 *wqdepth);
-enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 rq_size, u8 shift, u32 *wqdepth);
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *wqdepth);
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *wqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 398736d8c78a..346c2c5dabdf 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -150,31 +150,35 @@ int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
void *ptr)
{
struct in_ifaddr *ifa = ptr;
- struct net_device *netdev = ifa->ifa_dev->dev;
+ struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
struct irdma_device *iwdev;
struct ib_device *ibdev;
u32 local_ipaddr;
- ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
if (!ibdev)
return NOTIFY_DONE;
iwdev = to_iwdev(ibdev);
local_ipaddr = ntohl(ifa->ifa_address);
ibdev_dbg(&iwdev->ibdev,
- "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", netdev,
- event, &local_ipaddr, netdev->dev_addr);
+ "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
+ event, &local_ipaddr, real_dev->dev_addr);
switch (event) {
case NETDEV_DOWN:
- irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
+ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
&local_ipaddr, true, IRDMA_ARP_DELETE);
- irdma_if_notify(iwdev, netdev, &local_ipaddr, true, false);
+ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
irdma_gid_change_event(&iwdev->ibdev);
break;
case NETDEV_UP:
case NETDEV_CHANGEADDR:
- irdma_add_arp(iwdev->rf, &local_ipaddr, true, netdev->dev_addr);
- irdma_if_notify(iwdev, netdev, &local_ipaddr, true, true);
+ irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
+ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
irdma_gid_change_event(&iwdev->ibdev);
break;
default:
@@ -196,32 +200,36 @@ int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
void *ptr)
{
struct inet6_ifaddr *ifa = ptr;
- struct net_device *netdev = ifa->idev->dev;
+ struct net_device *real_dev, *netdev = ifa->idev->dev;
struct irdma_device *iwdev;
struct ib_device *ibdev;
u32 local_ipaddr6[4];
- ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
if (!ibdev)
return NOTIFY_DONE;
iwdev = to_iwdev(ibdev);
irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
ibdev_dbg(&iwdev->ibdev,
- "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", netdev,
- event, local_ipaddr6, netdev->dev_addr);
+ "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
+ event, local_ipaddr6, real_dev->dev_addr);
switch (event) {
case NETDEV_DOWN:
- irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
+ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
local_ipaddr6, false, IRDMA_ARP_DELETE);
- irdma_if_notify(iwdev, netdev, local_ipaddr6, false, false);
+ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
irdma_gid_change_event(&iwdev->ibdev);
break;
case NETDEV_UP:
case NETDEV_CHANGEADDR:
irdma_add_arp(iwdev->rf, local_ipaddr6, false,
- netdev->dev_addr);
- irdma_if_notify(iwdev, netdev, local_ipaddr6, false, true);
+ real_dev->dev_addr);
+ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
irdma_gid_change_event(&iwdev->ibdev);
break;
default:
@@ -243,14 +251,18 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
void *ptr)
{
struct neighbour *neigh = ptr;
+ struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
struct irdma_device *iwdev;
struct ib_device *ibdev;
__be32 *p;
u32 local_ipaddr[4] = {};
bool ipv4 = true;
- ibdev = ib_device_get_by_netdev((struct net_device *)neigh->dev,
- RDMA_DRIVER_IRDMA);
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
if (!ibdev)
return NOTIFY_DONE;
@@ -551,12 +563,12 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
* @rf: RDMA PCI function
* @cqp_request: cqp request to wait
*/
-static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf,
- struct irdma_cqp_request *cqp_request)
+static int irdma_wait_event(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
{
struct irdma_cqp_timeout cqp_timeout = {};
bool cqp_error = false;
- enum irdma_status_code err_code = 0;
+ int err_code = 0;
cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
do {
@@ -575,12 +587,12 @@ static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf,
rf->reset = true;
rf->gen_ops.request_reset(rf);
}
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
} while (1);
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
- err_code = IRDMA_ERR_CQP_COMPL_ERROR;
+ err_code = -EIO;
if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
cqp_request->compl_info.min_err_code == 0x8029) {
if (!rf->reset) {
@@ -680,16 +692,16 @@ bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
* @rf: RDMA PCI function
* @cqp_request: cqp request to process
*/
-enum irdma_status_code irdma_handle_cqp_op(struct irdma_pci_f *rf,
- struct irdma_cqp_request *cqp_request)
+int irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
struct cqp_cmds_info *info = &cqp_request->info;
- enum irdma_status_code status;
+ int status;
bool put_cqp_request = true;
if (rf->reset)
- return IRDMA_ERR_NOT_READY;
+ return -EBUSY;
irdma_get_cqp_request(cqp_request);
status = irdma_process_cqp_cmd(dev, info);
@@ -791,17 +803,17 @@ void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
* @sdinfo: information for sd cqp
*
*/
-enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *sdinfo)
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *sdinfo)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
@@ -822,19 +834,18 @@ enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
* @qp: hardware control qp
* @op: suspend or resume
*/
-enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp,
- u8 op)
+int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
{
struct irdma_sc_dev *dev = qp->dev;
struct irdma_cqp_request *cqp_request;
struct irdma_sc_cqp *cqp = dev->cqp;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_info->cqp_cmd = op;
@@ -940,18 +951,17 @@ void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
* @val_mem: buffer for fpm
* @hmc_fn_id: function id for fpm
*/
-enum irdma_status_code
-irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_request->param = NULL;
@@ -975,18 +985,17 @@ irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
* @val_mem: buffer with fpm values
* @hmc_fn_id: function id for fpm
*/
-enum irdma_status_code
-irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_request->param = NULL;
@@ -1009,18 +1018,17 @@ irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
* @dev: device pointer
* @cq: pointer to created cq
*/
-enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_cq *cq)
+int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
{
struct irdma_pci_f *rf = dev_to_rf(dev);
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
@@ -1039,19 +1047,18 @@ enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
* @dev: device pointer
* @qp: pointer to created qp
*/
-enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_qp *qp)
+int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
{
struct irdma_pci_f *rf = dev_to_rf(dev);
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_create_qp_info *qp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
qp_info = &cqp_request->info.in.u.qp_create.info;
@@ -1079,7 +1086,7 @@ static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
return;
@@ -1179,12 +1186,10 @@ static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
* @info: info for modify qp
* @wait: flag to wait or not for modify qp completion
*/
-enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
- struct irdma_qp *iwqp,
- struct irdma_modify_qp_info *info,
- bool wait)
+int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info, bool wait)
{
- enum irdma_status_code status;
+ int status;
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -1192,7 +1197,7 @@ enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
if (!wait) {
cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
@@ -1230,7 +1235,7 @@ enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
wait);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
m_info = &cqp_info->in.u.qp_modify.info;
@@ -1271,17 +1276,17 @@ void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
* @dev: device pointer
* @qp: pointer to qp
*/
-enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
{
struct irdma_pci_f *rf = dev_to_rf(dev);
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
memset(cqp_info, 0, sizeof(*cqp_info));
@@ -1317,20 +1322,20 @@ void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
* irdma_init_hash_desc - initialize hash for crc calculation
* @desc: cryption type
*/
-enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc)
+int irdma_init_hash_desc(struct shash_desc **desc)
{
struct crypto_shash *tfm;
struct shash_desc *tdesc;
tfm = crypto_alloc_shash("crc32c", 0, 0);
if (IS_ERR(tfm))
- return IRDMA_ERR_MPA_CRC;
+ return -EINVAL;
tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
GFP_KERNEL);
if (!tdesc) {
crypto_free_shash(tfm);
- return IRDMA_ERR_MPA_CRC;
+ return -EINVAL;
}
tdesc->tfm = tfm;
@@ -1358,19 +1363,19 @@ void irdma_free_hash_desc(struct shash_desc *desc)
* @len: length of buffer
* @val: value to be compared
*/
-enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
- void *addr, u32 len, u32 val)
+int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
+ u32 val)
{
u32 crc = 0;
int ret;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
crypto_shash_init(desc);
ret = crypto_shash_update(desc, addr, len);
if (!ret)
crypto_shash_final(desc, (u8 *)&crc);
if (crc != val)
- ret_code = IRDMA_ERR_MPA_CRC;
+ ret_code = -EINVAL;
return ret_code;
}
@@ -1524,9 +1529,8 @@ void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
* @info: to get information
* @buf: puda buffer
*/
-static enum irdma_status_code
-irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
- struct irdma_puda_buf *buf)
+static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
{
struct iphdr *iph;
struct ipv6hdr *ip6h;
@@ -1563,7 +1567,7 @@ irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
ibdev_dbg(to_ibdev(buf->vsi->dev),
"ERR: payload_len = 0x%x totallen expected0x%x\n",
info->payload_len, buf->totallen);
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
}
buf->tcphlen = tcph->doff << 2;
@@ -1580,9 +1584,8 @@ irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
* @info: to get information
* @buf: puda buffer
*/
-enum irdma_status_code
-irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
- struct irdma_puda_buf *buf)
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
{
struct tcphdr *tcph;
u32 pkt_len;
@@ -1861,20 +1864,19 @@ static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
* @pestat: pointer to stats info
* @wait: flag to wait or not wait for stats
*/
-enum irdma_status_code
-irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
- struct irdma_vsi_pestat *pestat, bool wait)
+int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait)
{
struct irdma_pci_f *rf = dev_to_rf(dev);
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
memset(cqp_info, 0, sizeof(*cqp_info));
@@ -1900,22 +1902,21 @@ irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
* @cmd: command to allocate or free
* @stats_info: pointer to allocate stats info
*/
-enum irdma_status_code
-irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
- struct irdma_stats_inst_info *stats_info)
+int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info)
{
struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
bool wait = false;
if (cmd == IRDMA_OP_STATS_ALLOCATE)
wait = true;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
memset(cqp_info, 0, sizeof(*cqp_info));
@@ -1938,17 +1939,17 @@ irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
* @sc_ceq: pointer to ceq structure
* @op: Create or Destroy
*/
-enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_ceq *sc_ceq, u8 op)
+int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
+ u8 op)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_info->post_sq = 1;
@@ -1968,17 +1969,17 @@ enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
* @sc_aeq: pointer to aeq structure
* @op: Create or Destroy
*/
-enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
- struct irdma_sc_aeq *sc_aeq, u8 op)
+int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
+ u8 op)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_info->post_sq = 1;
@@ -1998,16 +1999,15 @@ enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
* @cmd: Add, modify or delete
* @node_info: pointer to ws node info
*/
-enum irdma_status_code
-irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_ws_node_info *node_info)
+int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info)
{
struct irdma_pci_f *rf = dev_to_rf(dev);
struct irdma_cqp *iwcqp = &rf->cqp;
struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
bool poll;
if (!rf->sc_dev.ceq_valid)
@@ -2017,7 +2017,7 @@ irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
cqp_info = &cqp_request->info;
memset(cqp_info, 0, sizeof(*cqp_info));
@@ -2066,7 +2066,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
return -EINVAL;
@@ -2148,11 +2148,10 @@ static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
* @ah_ret: Returned pointer to address handle if created
*
*/
-enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,
- struct irdma_ah_info *ah_info,
- bool wait, enum puda_rsrc_type type,
- void *cb_param,
- struct irdma_sc_ah **ah_ret)
+int irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info, bool wait,
+ enum puda_rsrc_type type, void *cb_param,
+ struct irdma_sc_ah **ah_ret)
{
struct irdma_sc_ah *ah;
struct irdma_pci_f *rf = dev_to_rf(dev);
@@ -2161,7 +2160,7 @@ enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
*ah_ret = ah;
if (!ah)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
&ah_info->ah_idx, &rf->next_ah);
@@ -2187,7 +2186,7 @@ error:
err_free:
kfree(ah);
*ah_ret = NULL;
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -2229,19 +2228,19 @@ void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
* @pprm: pble resource manager
* @pchunk: chunk of memory to add
*/
-enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
- struct irdma_chunk *pchunk)
+int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk)
{
u64 sizeofbitmap;
if (pchunk->size & 0xfff)
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
if (!pchunk->bitmapbuf)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
pchunk->sizeofbitmap = sizeofbitmap;
/* each pble is 8 bytes hence shift by 3 */
@@ -2259,10 +2258,9 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
* @vaddr: returns virtual address of pble memory
* @fpm_addr: returns fpm address of pble memory
*/
-enum irdma_status_code
-irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
- struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
- u64 **vaddr, u64 *fpm_addr)
+int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr)
{
u64 bits_needed;
u64 bit_idx = PBLE_INVALID_IDX;
@@ -2290,7 +2288,7 @@ irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
spin_unlock_irqrestore(&pprm->prm_lock, flags);
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
@@ -2325,8 +2323,8 @@ void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
spin_unlock_irqrestore(&pprm->prm_lock, flags);
}
-enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
- dma_addr_t *pg_dma, u32 pg_cnt)
+int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
+ u32 pg_cnt)
{
struct page *vm_page;
int i;
@@ -2350,7 +2348,7 @@ enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
err:
irdma_unmap_vm_page_list(hw, pg_dma, i);
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
@@ -2386,15 +2384,14 @@ done:
* @chunk: chunk to add for paged memory
* @pg_cnt: number of pages needed
*/
-enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk,
- u32 pg_cnt)
+int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
{
u32 size;
void *va;
chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
if (!chunk->dmainfo.dmaaddrs)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
size = PAGE_SIZE * pg_cnt;
va = vmalloc(size);
@@ -2416,7 +2413,7 @@ err:
kfree(chunk->dmainfo.dmaaddrs);
chunk->dmainfo.dmaaddrs = NULL;
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 460e757d3fe6..46f475394af5 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -256,7 +256,7 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp)
struct cqp_cmds_info *cqp_info;
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_qp *qp = &iwqp->sc_qp;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request)
@@ -592,7 +592,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
u32 sqdepth, rqdepth;
u8 sqshift, rqshift;
u32 size;
- enum irdma_status_code status;
+ int status;
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
@@ -603,7 +603,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
&sqdepth);
if (status)
- return -ENOMEM;
+ return status;
if (uk_attrs->hw_rev == IRDMA_GEN_1)
rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
@@ -614,7 +614,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
&rqdepth);
if (status)
- return -ENOMEM;
+ return status;
iwqp->kqp.sq_wrid_mem =
kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
@@ -668,7 +668,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_create_qp_info *qp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
@@ -688,7 +688,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
- return status ? -ENOMEM : 0;
+ return status;
}
static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
@@ -806,7 +806,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_create_qp_req req;
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
- enum irdma_status_code ret;
int err_code;
int sq_size;
int rq_size;
@@ -936,9 +935,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
- ret = irdma_sc_qp_init(qp, &init_info);
- if (ret) {
- err_code = -EPROTO;
+ err_code = irdma_sc_qp_init(qp, &init_info);
+ if (err_code) {
ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
goto error;
}
@@ -1189,7 +1187,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (ret)
return ret;
- if (vlan_id >= VLAN_N_VID && iwdev->dcb)
+ if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
vlan_id = 0;
if (vlan_id < VLAN_N_VID) {
udp_info->insert_vlan_tag = true;
@@ -1202,7 +1200,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
- if (av->sgid_addr.saddr.sa_family == AF_INET6) {
+ if (av->net_type == RDMA_NETWORK_IPV6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
__be32 *saddr =
@@ -1218,7 +1216,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
&local_ip[0],
false, NULL,
IRDMA_ARP_RESOLVE);
- } else {
+ } else if (av->net_type == RDMA_NETWORK_IPV4) {
__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
@@ -1792,7 +1790,6 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
struct irdma_cq_buf *cq_buf = NULL;
- enum irdma_status_code status = 0;
unsigned long flags;
int ret;
@@ -1885,12 +1882,10 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
cqp_info->post_sq = 1;
- status = irdma_handle_cqp_op(rf, cqp_request);
+ ret = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
- if (status) {
- ret = -EPROTO;
+ if (ret)
goto error;
- }
spin_lock_irqsave(&iwcq->lock, flags);
if (cq_buf) {
@@ -1945,7 +1940,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
struct irdma_sc_cq *cq;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cq_init_info info = {};
- enum irdma_status_code status;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
@@ -2095,12 +2089,10 @@ static int irdma_create_cq(struct ib_cq *ibcq,
cqp_info->in.u.cq_create.cq = cq;
cqp_info->in.u.cq_create.check_overflow = true;
cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
- status = irdma_handle_cqp_op(rf, cqp_request);
+ err_code = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
- if (status) {
- err_code = -ENOMEM;
+ if (err_code)
goto cq_free_rsrc;
- }
if (udata) {
struct irdma_create_cq_resp resp = {};
@@ -2309,14 +2301,14 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_pble_info *pinfo;
u64 *pbl;
- enum irdma_status_code status;
+ int status;
enum irdma_pble_level level = PBLE_LEVEL_1;
if (use_pbles) {
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
false);
if (status)
- return -ENOMEM;
+ return status;
iwpbl->pbl_allocated = true;
level = palloc->level;
@@ -2434,7 +2426,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request)
@@ -2457,7 +2449,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
- return status ? -ENOMEM : 0;
+ return status;
}
/**
@@ -2509,7 +2501,7 @@ static int irdma_dealloc_mw(struct ib_mw *ibmw)
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.dealloc_stag.info;
memset(info, 0, sizeof(*info));
- info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
+ info->pd_id = iwpd->sc_pd.pd_id;
info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
info->mr = false;
cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
@@ -2533,8 +2525,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
{
struct irdma_allocate_stag_info *info;
struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
- enum irdma_status_code status;
- int err = 0;
+ int status;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -2556,10 +2547,8 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
- if (status)
- err = -ENOMEM;
- return err;
+ return status;
}
/**
@@ -2575,9 +2564,8 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
struct irdma_pble_alloc *palloc;
struct irdma_pbl *iwpbl;
struct irdma_mr *iwmr;
- enum irdma_status_code status;
u32 stag;
- int err_code = -ENOMEM;
+ int err_code;
iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
if (!iwmr)
@@ -2599,9 +2587,9 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
iwmr->type = IRDMA_MEMREG_TYPE_MEM;
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
- status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
- true);
- if (status)
+ err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
+ true);
+ if (err_code)
goto err_get_pble;
err_code = irdma_hw_alloc_stag(iwdev, iwmr);
@@ -2672,10 +2660,9 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
struct irdma_reg_ns_stag_info *stag_info;
struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
- enum irdma_status_code status;
- int err = 0;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
+ int ret;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request)
@@ -2712,12 +2699,10 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
cqp_info->post_sq = 1;
cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
- status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
- if (status)
- err = -ENOMEM;
- return err;
+ return ret;
}
/**
@@ -2897,7 +2882,6 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_pbl *iwpbl;
struct irdma_mr *iwmr;
- enum irdma_status_code status;
u32 stag;
int ret;
@@ -2925,10 +2909,9 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access
iwmr->pgaddrmem[0] = addr;
iwmr->len = size;
iwmr->page_size = SZ_4K;
- status = irdma_hwreg_mr(iwdev, iwmr, access);
- if (status) {
+ ret = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (ret) {
irdma_free_stag(iwdev, stag);
- ret = -ENOMEM;
goto err;
}
@@ -3021,7 +3004,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.dealloc_stag.info;
memset(info, 0, sizeof(*info));
- info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
+ info->pd_id = iwpd->sc_pd.pd_id;
info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
info->mr = true;
if (iwpbl->pbl_allocated)
@@ -3057,7 +3040,6 @@ static int irdma_post_send(struct ib_qp *ibqp,
struct irdma_qp_uk *ukqp;
struct irdma_sc_dev *dev;
struct irdma_post_sq_info info;
- enum irdma_status_code ret;
int err = 0;
unsigned long flags;
bool inv_stag;
@@ -3116,7 +3098,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
}
- ret = irdma_uk_inline_send(ukqp, &info, false);
+ err = irdma_uk_inline_send(ukqp, &info, false);
} else {
info.op.send.num_sges = ib_wr->num_sge;
info.op.send.sg_list = ib_wr->sg_list;
@@ -3127,14 +3109,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
}
- ret = irdma_uk_send(ukqp, &info, false);
- }
-
- if (ret) {
- if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
+ err = irdma_uk_send(ukqp, &info, false);
}
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -3160,20 +3135,13 @@ static int irdma_post_send(struct ib_qp *ibqp,
rdma_wr(ib_wr)->remote_addr;
info.op.inline_rdma_write.rem_addr.lkey =
rdma_wr(ib_wr)->rkey;
- ret = irdma_uk_inline_rdma_write(ukqp, &info, false);
+ err = irdma_uk_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
- ret = irdma_uk_rdma_write(ukqp, &info, false);
- }
-
- if (ret) {
- if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
+ err = irdma_uk_rdma_write(ukqp, &info, false);
}
break;
case IB_WR_RDMA_READ_WITH_INV:
@@ -3190,21 +3158,12 @@ static int irdma_post_send(struct ib_qp *ibqp,
info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
-
- ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
- if (ret) {
- if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
- }
+ err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
break;
case IB_WR_LOCAL_INV:
info.op_type = IRDMA_OP_TYPE_INV_STAG;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
- ret = irdma_uk_stag_local_invalidate(ukqp, &info, true);
- if (ret)
- err = -ENOMEM;
+ err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
break;
case IB_WR_REG_MR: {
struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
@@ -3226,10 +3185,8 @@ static int irdma_post_send(struct ib_qp *ibqp,
stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
stag_info.chunk_size = 1;
- ret = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
+ err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
true);
- if (ret)
- err = -ENOMEM;
break;
}
default:
@@ -3274,7 +3231,6 @@ static int irdma_post_recv(struct ib_qp *ibqp,
struct irdma_qp *iwqp;
struct irdma_qp_uk *ukqp;
struct irdma_post_rq_info post_recv = {};
- enum irdma_status_code ret = 0;
unsigned long flags;
int err = 0;
bool reflush = false;
@@ -3289,14 +3245,10 @@ static int irdma_post_recv(struct ib_qp *ibqp,
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
post_recv.sg_list = ib_wr->sg_list;
- ret = irdma_uk_post_receive(ukqp, &post_recv);
- if (ret) {
+ err = irdma_uk_post_receive(ukqp, &post_recv);
+ if (err) {
ibdev_dbg(&iwqp->iwdev->ibdev,
- "VERBS: post_recv err %d\n", ret);
- if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
+ "VERBS: post_recv err %d\n", err);
goto out;
}
@@ -3483,7 +3435,7 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
struct irdma_cq_buf *last_buf = NULL;
struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
struct irdma_cq_buf *cq_buf;
- enum irdma_status_code ret;
+ int ret;
struct irdma_device *iwdev;
struct irdma_cq_uk *ukcq;
bool cq_new_cqe = false;
@@ -3503,10 +3455,10 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
cq_new_cqe = true;
continue;
}
- if (ret == IRDMA_ERR_Q_EMPTY)
+ if (ret == -ENOENT)
break;
/* QP using the CQ is destroyed. Skip reporting this CQE */
- if (ret == IRDMA_ERR_Q_DESTROYED) {
+ if (ret == -EFAULT) {
cq_new_cqe = true;
continue;
}
@@ -3528,10 +3480,10 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
continue;
}
- if (ret == IRDMA_ERR_Q_EMPTY)
+ if (ret == -ENOENT)
break;
/* QP using the CQ is destroyed. Skip reporting this CQE */
- if (ret == IRDMA_ERR_Q_DESTROYED) {
+ if (ret == -EFAULT) {
cq_new_cqe = true;
continue;
}
@@ -3553,7 +3505,7 @@ error:
ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
__func__, ret);
- return -EINVAL;
+ return ret;
}
/**
@@ -3859,7 +3811,7 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
{
struct cqp_cmds_info *cqp_info;
struct irdma_cqp_request *cqp_request;
- enum irdma_status_code status;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request)
@@ -3873,10 +3825,8 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
- if (status)
- return -ENOMEM;
- return 0;
+ return status;
}
/**
@@ -3932,11 +3882,7 @@ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
int ret = 0;
bool ipv4;
u16 vlan_id;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr;
+ union irdma_sockaddr sgid_addr;
unsigned char dmac[ETH_ALEN];
rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
@@ -4072,11 +4018,7 @@ static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
int ret;
unsigned long flags;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr;
+ union irdma_sockaddr sgid_addr;
rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
@@ -4132,17 +4074,47 @@ static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
return 0;
}
-/**
- * irdma_create_ah - create address handle
- * @ibah: address handle
- * @attr: address handle attributes
- * @udata: User data
- *
- * returns 0 on success, error otherwise
- */
-static int irdma_create_ah(struct ib_ah *ibah,
- struct rdma_ah_init_attr *attr,
- struct ib_udata *udata)
+static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+ int err;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
+ &rf->next_ah);
+ if (err)
+ return err;
+
+ err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
+ irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
+
+ if (err) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
+ goto err_ah_create;
+ }
+
+ if (!sleep) {
+ int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
+
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ mdelay(1);
+ } while (!ah->sc_ah.ah_info.ah_valid && --cnt);
+
+ if (!cnt) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
+ err = -ETIMEDOUT;
+ goto err_ah_create;
+ }
+ }
+ return 0;
+
+err_ah_create:
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
+
+ return err;
+}
+
+static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
{
struct irdma_pd *pd = to_iwpd(ibah->pd);
struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
@@ -4151,25 +4123,13 @@ static int irdma_create_ah(struct ib_ah *ibah,
struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_sc_ah *sc_ah;
- u32 ah_id = 0;
struct irdma_ah_info *ah_info;
- struct irdma_create_ah_resp uresp;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr, dgid_addr;
+ union irdma_sockaddr sgid_addr, dgid_addr;
int err;
u8 dmac[ETH_ALEN];
- err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id,
- &rf->next_ah);
- if (err)
- return err;
-
ah->pd = pd;
sc_ah = &ah->sc_ah;
- sc_ah->ah_info.ah_idx = ah_id;
sc_ah->ah_info.vsi = &iwdev->vsi;
irdma_sc_init_ah(&rf->sc_dev, sc_ah);
ah->sgid_index = ah_attr->grh.sgid_index;
@@ -4179,10 +4139,7 @@ static int irdma_create_ah(struct ib_ah *ibah,
rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
ah->av.attrs = *ah_attr;
ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
- ah->av.sgid_addr.saddr = sgid_addr.saddr;
- ah->av.dgid_addr.saddr = dgid_addr.saddr;
ah_info = &sc_ah->ah_info;
- ah_info->ah_idx = ah_id;
ah_info->pd_idx = pd->sc_pd.pd_id;
if (ah_attr->ah_flags & IB_AH_GRH) {
ah_info->flow_label = ah_attr->grh.flow_label;
@@ -4191,7 +4148,7 @@ static int irdma_create_ah(struct ib_ah *ibah,
}
ether_addr_copy(dmac, ah_attr->roce.dmac);
- if (rdma_gid_attr_network_type(sgid_attr) == RDMA_NETWORK_IPV4) {
+ if (ah->av.net_type == RDMA_NETWORK_IPV4) {
ah_info->ipv4_valid = true;
ah_info->dest_ip_addr[0] =
ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
@@ -4219,17 +4176,15 @@ static int irdma_create_ah(struct ib_ah *ibah,
err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
ah_info->mac_addr);
if (err)
- goto error;
+ return err;
ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
ah_info->ipv4_valid, dmac);
- if (ah_info->dst_arpindex == -1) {
- err = -EINVAL;
- goto error;
- }
+ if (ah_info->dst_arpindex == -1)
+ return -EINVAL;
- if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb)
+ if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
ah_info->vlan_tag = 0;
if (ah_info->vlan_tag < VLAN_N_VID) {
@@ -4238,43 +4193,38 @@ static int irdma_create_ah(struct ib_ah *ibah,
rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
}
- err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
- attr->flags & RDMA_CREATE_AH_SLEEPABLE,
- irdma_gsi_ud_qp_ah_cb, sc_ah);
-
- if (err) {
- ibdev_dbg(&iwdev->ibdev,
- "VERBS: CQP-OP Create AH fail");
- goto error;
- }
-
- if (!(attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
- int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
-
- do {
- irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
- mdelay(1);
- } while (!sc_ah->ah_info.ah_valid && --cnt);
+ return 0;
+}
- if (!cnt) {
- ibdev_dbg(&iwdev->ibdev,
- "VERBS: CQP create AH timed out");
- err = -ETIMEDOUT;
- goto error;
+/**
+ * irdma_ah_exists - Check for existing identical AH
+ * @iwdev: irdma device
+ * @new_ah: AH to check for
+ *
+ * returns true if AH is found, false if not found.
+ */
+static bool irdma_ah_exists(struct irdma_device *iwdev,
+ struct irdma_ah *new_ah)
+{
+ struct irdma_ah *ah;
+ u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[3];
+
+ hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
+ /* Set ah_valid and ah_id the same so memcmp can work */
+ new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
+ new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
+ if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
+ sizeof(ah->sc_ah.ah_info))) {
+ refcount_inc(&ah->refcnt);
+ new_ah->parent_ah = ah;
+ return true;
}
}
- if (udata) {
- uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
- err = ib_copy_to_udata(udata, &uresp,
- min(sizeof(uresp), udata->outlen));
- }
- return 0;
-
-error:
- irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
-
- return err;
+ return false;
}
/**
@@ -4287,6 +4237,17 @@ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
struct irdma_device *iwdev = to_iwdev(ibah->device);
struct irdma_ah *ah = to_iwah(ibah);
+ if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
+ mutex_lock(&iwdev->ah_tbl_lock);
+ if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
+ mutex_unlock(&iwdev->ah_tbl_lock);
+ return 0;
+ }
+ hash_del(&ah->parent_ah->list);
+ kfree(ah->parent_ah);
+ mutex_unlock(&iwdev->ah_tbl_lock);
+ }
+
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
false, NULL, ah);
@@ -4297,6 +4258,80 @@ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
}
/**
+ * irdma_create_user_ah - create user address handle
+ * @ibah: address handle
+ * @attr: address handle attributes
+ * @udata: User data
+ *
+ * returns 0 on success, error otherwise
+ */
+static int irdma_create_user_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ struct irdma_create_ah_resp uresp;
+ struct irdma_ah *parent_ah;
+ int err;
+
+ err = irdma_setup_ah(ibah, attr);
+ if (err)
+ return err;
+ mutex_lock(&iwdev->ah_tbl_lock);
+ if (!irdma_ah_exists(iwdev, ah)) {
+ err = irdma_create_hw_ah(iwdev, ah, true);
+ if (err) {
+ mutex_unlock(&iwdev->ah_tbl_lock);
+ return err;
+ }
+ /* Add new AH to list */
+ parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
+ if (parent_ah) {
+ u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[3];
+
+ ah->parent_ah = parent_ah;
+ hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
+ refcount_set(&parent_ah->refcnt, 1);
+ }
+ }
+ mutex_unlock(&iwdev->ah_tbl_lock);
+
+ uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
+ err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
+ if (err)
+ irdma_destroy_ah(ibah, attr->flags);
+
+ return err;
+}
+
+/**
+ * irdma_create_ah - create address handle
+ * @ibah: address handle
+ * @attr: address handle attributes
+ * @udata: NULL
+ *
+ * returns 0 on success, error otherwise
+ */
+static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ int err;
+
+ err = irdma_setup_ah(ibah, attr);
+ if (err)
+ return err;
+ err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+
+ return err;
+}
+
+/**
* irdma_query_ah - Query address handle
* @ibah: pointer to address handle
* @ah_attr: address handle attributes
@@ -4329,7 +4364,7 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
static const struct ib_device_ops irdma_roce_dev_ops = {
.attach_mcast = irdma_attach_mcast,
.create_ah = irdma_create_ah,
- .create_user_ah = irdma_create_ah,
+ .create_user_ah = irdma_create_user_ah,
.destroy_ah = irdma_destroy_ah,
.detach_mcast = irdma_detach_mcast,
.get_link_layer = irdma_get_link_layer,
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index d0fdef8d09ea..08ba24d0b843 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -25,14 +25,16 @@ struct irdma_pd {
struct irdma_sc_pd sc_pd;
};
+union irdma_sockaddr {
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+};
+
struct irdma_av {
u8 macaddr[16];
struct rdma_ah_attr attrs;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr, dgid_addr;
+ union irdma_sockaddr sgid_addr;
+ union irdma_sockaddr dgid_addr;
u8 net_type;
};
@@ -43,6 +45,9 @@ struct irdma_ah {
struct irdma_av av;
u8 sgid_index;
union ib_gid dgid;
+ struct hlist_node list;
+ refcount_t refcnt;
+ struct irdma_ah *parent_ah; /* AH from cached list */
};
struct irdma_hmc_pble {
diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
index b0d6ee0739f5..20bc8d0d7f1f 100644
--- a/drivers/infiniband/hw/irdma/ws.c
+++ b/drivers/infiniband/hw/irdma/ws.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2017 - 2021 Intel Corporation */
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -87,8 +86,8 @@ static void irdma_free_node(struct irdma_sc_vsi *vsi,
* @node: pointer to node
* @cmd: add, remove or modify
*/
-static enum irdma_status_code
-irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd)
+static int irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *node, u8 cmd)
{
struct irdma_ws_node_info node_info = {};
@@ -106,7 +105,7 @@ irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd)
node_info.enable = node->enable;
if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) {
ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n");
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
@@ -234,18 +233,18 @@ static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
* @vsi: vsi pointer
* @user_pri: user priority
*/
-enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{
struct irdma_ws_node *ws_tree_root;
struct irdma_ws_node *vsi_node;
struct irdma_ws_node *tc_node;
u16 traffic_class;
- enum irdma_status_code ret = 0;
+ int ret = 0;
int i;
mutex_lock(&vsi->dev->ws_mutex);
if (vsi->tc_change_pending) {
- ret = IRDMA_ERR_NOT_READY;
+ ret = -EBUSY;
goto exit;
}
@@ -258,7 +257,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
ws_tree_root = irdma_alloc_node(vsi, user_pri,
WS_NODE_TYPE_PARENT, NULL);
if (!ws_tree_root) {
- ret = IRDMA_ERR_NO_MEMORY;
+ ret = -ENOMEM;
goto exit;
}
@@ -283,7 +282,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT,
ws_tree_root);
if (!vsi_node) {
- ret = IRDMA_ERR_NO_MEMORY;
+ ret = -ENOMEM;
goto vsi_add_err;
}
@@ -310,7 +309,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
vsi_node);
if (!tc_node) {
- ret = IRDMA_ERR_NO_MEMORY;
+ ret = -ENOMEM;
goto leaf_add_err;
}
diff --git a/drivers/infiniband/hw/irdma/ws.h b/drivers/infiniband/hw/irdma/ws.h
index f0e16f630701..d431e3327d26 100644
--- a/drivers/infiniband/hw/irdma/ws.h
+++ b/drivers/infiniband/hw/irdma/ws.h
@@ -34,7 +34,7 @@ struct irdma_ws_node {
};
struct irdma_sc_vsi;
-enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
+int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_reset(struct irdma_sc_vsi *vsi);
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index e2e1f5daddc4..111fa88a3be4 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -38,7 +38,6 @@
#include <rdma/ib_sa.h>
#include <rdma/ib_pack.h>
#include <linux/mlx4/cmd.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <rdma/ib_user_verbs.h>
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 6a381751c0d8..c4cf91235eee 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -320,7 +320,6 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
- nreq = 0;
goto out;
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index fc036b4794fd..2a2a9e9afc9d 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1895,8 +1895,10 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
key_level2,
obj_event,
GFP_KERNEL);
- if (err)
+ if (err) {
+ kfree(obj_event);
return err;
+ }
INIT_LIST_HEAD(&obj_event->obj_sub_list);
}
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index f2f62875d072..afeb5e53254f 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 844545064c9e..6191aa833ac2 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index cbc20e400be0..4f04bb55c4c6 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -763,9 +763,9 @@ struct mlx5_cache_ent {
char name[4];
u32 order;
- u32 xlt;
u32 access_mode;
u32 page;
+ unsigned int ndescs;
u8 disabled:1;
u8 fill_to_high_water:1;
@@ -788,7 +788,6 @@ struct mlx5_cache_ent {
u32 miss;
struct mlx5_ib_dev *dev;
- struct work_struct work;
struct delayed_work dwork;
};
@@ -1344,7 +1343,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- unsigned int entry, int access_flags);
+ struct mlx5_cache_ent *ent,
+ int access_flags);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -1539,12 +1539,6 @@ static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_suppor
MLX5_UARS_IN_PAGE : 1;
}
-static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi)
-{
- return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
-}
-
extern void *xlt_emergency_page;
int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 32cb7068f0ca..956f8e875daa 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -68,7 +68,6 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- bool ro_pci_enabled = pcie_relaxed_ordering_enabled(dev->mdev->pdev);
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
@@ -76,12 +75,13 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, mkc, lr, 1);
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
- MLX5_SET(mkc, mkc, relaxed_ordering_write,
- (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled);
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
- MLX5_SET(mkc, mkc, relaxed_ordering_read,
- (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled);
+ if ((acc & IB_ACCESS_RELAXED_ORDERING) &&
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev)) {
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ }
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
@@ -189,6 +189,25 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
spin_unlock_irqrestore(&ent->lock, flags);
}
+static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
+{
+ int ret = 0;
+
+ switch (access_mode) {
+ case MLX5_MKC_ACCESS_MODE_MTT:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_mtt));
+ break;
+ case MLX5_MKC_ACCESS_MODE_KSM:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_klm));
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
+}
+
static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
{
struct mlx5_ib_mr *mr;
@@ -204,7 +223,8 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_mkc_octo_size(ent->access_mode, ent->ndescs));
MLX5_SET(mkc, mkc, log_page_size, ent->page);
return mr;
}
@@ -478,14 +498,14 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
return;
if (ent->available_mrs < ent->limit) {
ent->fill_to_high_water = true;
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
ent->available_mrs + ent->pending < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->available_mrs == 2 * ent->limit) {
ent->fill_to_high_water = false;
} else if (ent->available_mrs > 2 * ent->limit) {
@@ -495,7 +515,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
}
@@ -571,33 +591,20 @@ static void delayed_cache_work_func(struct work_struct *work)
__cache_work_func(ent);
}
-static void cache_work_func(struct work_struct *work)
-{
- struct mlx5_cache_ent *ent;
-
- ent = container_of(work, struct mlx5_cache_ent, work);
- __cache_work_func(ent);
-}
-
-/* Allocate a special entry from the cache */
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- unsigned int entry, int access_flags)
+ struct mlx5_cache_ent *ent,
+ int access_flags)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
- if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
- entry >= ARRAY_SIZE(cache->ent)))
- return ERR_PTR(-EINVAL);
-
/* Matches access in alloc_cache_mr() */
if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
return ERR_PTR(-EOPNOTSUPP);
- ent = &cache->ent[entry];
spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
+ queue_adjust_cache_locked(ent);
+ ent->miss++;
spin_unlock_irq(&ent->lock);
mr = create_cache_mr(ent);
if (IS_ERR(mr))
@@ -611,32 +618,9 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
mlx5_clear_mr(mr);
}
- mr->access_flags = access_flags;
return mr;
}
-/* Return a MR already available in the cache */
-static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
-{
- struct mlx5_ib_mr *mr = NULL;
- struct mlx5_cache_ent *ent = req_ent;
-
- spin_lock_irq(&ent->lock);
- if (!list_empty(&ent->head)) {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
- mlx5_clear_mr(mr);
- return mr;
- }
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
- req_ent->miss++;
- return NULL;
-}
-
static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
@@ -739,7 +723,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->dev = dev;
ent->limit = 0;
- INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
if (i > MR_CACHE_LAST_STD_ENTRY) {
@@ -751,8 +734,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
continue;
ent->page = PAGE_SHIFT;
- ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
- MLX5_IB_UMR_OCTOWORD;
+ ent->ndescs = 1 << ent->order;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
@@ -783,7 +765,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
spin_lock_irq(&ent->lock);
ent->disabled = true;
spin_unlock_irq(&ent->lock);
- cancel_work_sync(&ent->work);
cancel_delayed_work_sync(&ent->dwork);
}
@@ -972,16 +953,9 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
return mr;
}
- mr = get_cache_mr(ent);
- if (!mr) {
- mr = create_cache_mr(ent);
- /*
- * The above already tried to do the same stuff as reg_create(),
- * no reason to try it again.
- */
- if (IS_ERR(mr))
- return mr;
- }
+ mr = mlx5_mr_cache_alloc(dev, ent, access_flags);
+ if (IS_ERR(mr))
+ return mr;
mr->ibmr.pd = pd;
mr->umem = umem;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 86842cd580ba..41c964a45f89 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -407,6 +407,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
unsigned long idx)
{
+ struct mlx5_ib_dev *dev = mr_to_mdev(imr);
struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
struct mlx5_ib_mr *ret;
@@ -418,13 +419,14 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
if (IS_ERR(odp))
return ERR_CAST(odp);
- mr = mlx5_mr_cache_alloc(
- mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
+ mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
+ imr->access_flags);
if (IS_ERR(mr)) {
ib_umem_odp_release(odp);
return mr;
}
+ mr->access_flags = imr->access_flags;
mr->ibmr.pd = imr->ibmr.pd;
mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
mr->umem = &odp->umem;
@@ -493,12 +495,15 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
+ imr = mlx5_mr_cache_alloc(dev,
+ &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
+ access_flags);
if (IS_ERR(imr)) {
ib_umem_odp_release(umem_odp);
return imr;
}
+ imr->access_flags = access_flags;
imr->ibmr.pd = &pd->ibpd;
imr->ibmr.iova = 0;
imr->umem = &umem_odp->umem;
@@ -1593,18 +1598,14 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
switch (ent->order - 2) {
case MLX5_IMR_MTT_CACHE_ENTRY:
ent->page = PAGE_SHIFT;
- ent->xlt = MLX5_IMR_MTT_ENTRIES *
- sizeof(struct mlx5_mtt) /
- MLX5_IB_UMR_OCTOWORD;
+ ent->ndescs = MLX5_IMR_MTT_ENTRIES;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
ent->limit = 0;
break;
case MLX5_IMR_KSM_CACHE_ENTRY:
ent->page = MLX5_KSM_PAGE_SHIFT;
- ent->xlt = mlx5_imr_ksm_entries *
- sizeof(struct mlx5_klm) /
- MLX5_IB_UMR_OCTOWORD;
+ ent->ndescs = mlx5_imr_ksm_entries;
ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
ent->limit = 0;
break;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index b7fe47107d76..3f467557d34e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -31,7 +31,6 @@
*/
#include <linux/etherdevice.h>
-#include <linux/module.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_user_verbs.h>
@@ -615,7 +614,8 @@ enum {
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
- return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
+ bfregi->num_static_sys_pages * MLX5_NON_FP_BFREGS_PER_UAR;
}
static int num_med_bfreg(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 191c4ee7db62..09b365a98bbf 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -3,7 +3,6 @@
* Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
*/
-#include <linux/module.h>
#include <linux/mlx5/qp.h>
#include <linux/slab.h>
#include <rdma/ib_umem.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index f507c4cd46d3..b54bc8865dae 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -939,12 +939,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
- goto err_free_res;
- }
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ goto err_free_res;
}
/* We can handle large RDMA requests, so allow larger segments. */
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 7ea970774839..69af65f1b332 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index a0c5f3bdc324..a973905afd13 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/mount.h>
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 80a8dd6c7814..37b628a162e0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -634,7 +634,7 @@ static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
"PCIe completion timeout"),
/*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * In practice, it's unlikely that we'll see PCIe PLL, or bus
* parity or memory parity error failures, because most likely we
* won't be able to talk to the core of the chip. Nonetheless, we
* might see them, if they are in parts of the PCIe core that aren't
@@ -2988,7 +2988,7 @@ done:
* the utility. Names need to be 12 chars or less (w/o newline), for proper
* display by utility.
* Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
+ * Start of "error" counters is indicated by a leading "E " on the first
* "error" counter, and doesn't count in label length.
* The EgrOvfl list needs to be last so we truncate them at the configured
* context count for the device.
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
index e5a3f02fb078..10a8cd5ba076 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -32,7 +32,6 @@
*/
#include <linux/debugfs.h>
-#include <linux/module.h>
#include "usnic.h"
#include "usnic_log.h"
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 3b60fa9cb58d..59bfbfaee325 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -32,7 +32,6 @@
*/
#include <linux/bug.h>
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
#include "usnic_log.h"
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 7d868f033bbf..fdb63a8fb997 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -31,7 +31,6 @@
*
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 5a0e26cd648e..d3a9670bf971 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
index 82dd810bc000..dc37066900a5 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.c
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -32,7 +32,6 @@
*/
#include <linux/bitmap.h>
#include <linux/file.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <net/inet_sock.h>
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
index ebe08f348453..0c47f73aaed5 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.c
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -31,7 +31,6 @@
*
*/
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include "usnic_ib.h"
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 105f3a155939..343288b02792 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -811,12 +811,10 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
}
/* Enable 64-Bit DMA */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret != 0) {
- dev_err(&pdev->dev, "dma_set_mask failed\n");
- goto err_free_resource;
- }
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
+ goto err_free_resource;
}
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
pci_set_master(pdev);
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index fab291245366..2dae7538a2ea 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -28,8 +28,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
rxe_pool_cleanup(&rxe->cq_pool);
rxe_pool_cleanup(&rxe->mr_pool);
rxe_pool_cleanup(&rxe->mw_pool);
- rxe_pool_cleanup(&rxe->mc_grp_pool);
- rxe_pool_cleanup(&rxe->mc_elem_pool);
+
+ WARN_ON(!RB_EMPTY_ROOT(&rxe->mcg_tree));
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
@@ -114,106 +114,37 @@ static void rxe_init_ports(struct rxe_dev *rxe)
}
/* init pools of managed objects */
-static int rxe_init_pools(struct rxe_dev *rxe)
+static void rxe_init_pools(struct rxe_dev *rxe)
{
- int err;
-
- err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
- rxe->max_ucontext);
- if (err)
- goto err1;
-
- err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD,
- rxe->attr.max_pd);
- if (err)
- goto err2;
-
- err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH,
- rxe->attr.max_ah);
- if (err)
- goto err3;
-
- err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ,
- rxe->attr.max_srq);
- if (err)
- goto err4;
-
- err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP,
- rxe->attr.max_qp);
- if (err)
- goto err5;
-
- err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ,
- rxe->attr.max_cq);
- if (err)
- goto err6;
-
- err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR,
- rxe->attr.max_mr);
- if (err)
- goto err7;
-
- err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW,
- rxe->attr.max_mw);
- if (err)
- goto err8;
-
- err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
- rxe->attr.max_mcast_grp);
- if (err)
- goto err9;
-
- err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
- rxe->attr.max_total_mcast_qp_attach);
- if (err)
- goto err10;
-
- return 0;
-
-err10:
- rxe_pool_cleanup(&rxe->mc_grp_pool);
-err9:
- rxe_pool_cleanup(&rxe->mw_pool);
-err8:
- rxe_pool_cleanup(&rxe->mr_pool);
-err7:
- rxe_pool_cleanup(&rxe->cq_pool);
-err6:
- rxe_pool_cleanup(&rxe->qp_pool);
-err5:
- rxe_pool_cleanup(&rxe->srq_pool);
-err4:
- rxe_pool_cleanup(&rxe->ah_pool);
-err3:
- rxe_pool_cleanup(&rxe->pd_pool);
-err2:
- rxe_pool_cleanup(&rxe->uc_pool);
-err1:
- return err;
+ rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC);
+ rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD);
+ rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH);
+ rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ);
+ rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP);
+ rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ);
+ rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR);
+ rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW);
}
/* initialize rxe device state */
-static int rxe_init(struct rxe_dev *rxe)
+static void rxe_init(struct rxe_dev *rxe)
{
- int err;
-
/* init default device parameters */
rxe_init_device_param(rxe);
rxe_init_ports(rxe);
-
- err = rxe_init_pools(rxe);
- if (err)
- return err;
+ rxe_init_pools(rxe);
/* init pending mmap list */
spin_lock_init(&rxe->mmap_offset_lock);
spin_lock_init(&rxe->pending_lock);
INIT_LIST_HEAD(&rxe->pending_mmaps);
- mutex_init(&rxe->usdev_lock);
+ /* init multicast support */
+ spin_lock_init(&rxe->mcg_lock);
+ rxe->mcg_tree = RB_ROOT;
- return 0;
+ mutex_init(&rxe->usdev_lock);
}
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
@@ -235,12 +166,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
*/
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
{
- int err;
-
- err = rxe_init(rxe);
- if (err)
- return err;
-
+ rxe_init(rxe);
rxe_set_mtu(rxe, mtu);
return rxe_register_device(rxe, ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index fb9066e6f5f0..30fbdf3bc76a 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -12,7 +12,6 @@
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
#include <linux/skbuff.h>
#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 38c7b6fb39d7..3b05314ca739 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -99,11 +99,14 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
av->network_type = type;
}
-struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp)
{
struct rxe_ah *ah;
u32 ah_num;
+ if (ahp)
+ *ahp = NULL;
+
if (!pkt || !pkt->qp)
return NULL;
@@ -117,10 +120,22 @@ struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
if (ah_num) {
/* only new user provider or kernel client */
ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num);
- if (!ah || ah->ah_num != ah_num || rxe_ah_pd(ah) != pkt->qp->pd) {
+ if (!ah) {
pr_warn("Unable to find AH matching ah_num\n");
return NULL;
}
+
+ if (rxe_ah_pd(ah) != pkt->qp->pd) {
+ pr_warn("PDs don't match for AH and QP\n");
+ rxe_put(ah);
+ return NULL;
+ }
+
+ if (ahp)
+ *ahp = ah;
+ else
+ rxe_put(ah);
+
return &ah->av;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index f363fe3fa414..138b3e7d3a5f 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -526,7 +526,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
struct rxe_queue *q = qp->sq.queue;
while ((skb = skb_dequeue(&qp->resp_pkts))) {
- rxe_drop_ref(qp);
+ rxe_put(qp);
kfree_skb(skb);
ib_device_put(qp->ibqp.device);
}
@@ -548,7 +548,7 @@ static void free_pkt(struct rxe_pkt_info *pkt)
struct ib_device *dev = qp->ibqp.device;
kfree_skb(skb);
- rxe_drop_ref(qp);
+ rxe_put(qp);
ib_device_put(dev);
}
@@ -562,7 +562,7 @@ int rxe_completer(void *arg)
enum comp_state state;
int ret = 0;
- rxe_add_ref(qp);
+ rxe_get(qp);
if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
qp->req.state == QP_STATE_RESET) {
@@ -761,7 +761,7 @@ int rxe_completer(void *arg)
done:
if (pkt)
free_pkt(pkt);
- rxe_drop_ref(qp);
+ rxe_put(qp);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 6baaaa34458e..642b52539ac3 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -42,13 +42,14 @@ err1:
static void rxe_send_complete(struct tasklet_struct *t)
{
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
+ unsigned long flags;
- spin_lock_bh(&cq->cq_lock);
+ spin_lock_irqsave(&cq->cq_lock, flags);
if (cq->is_dying) {
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
return;
}
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -107,12 +108,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
struct ib_event ev;
int full;
void *addr;
+ unsigned long flags;
- spin_lock_bh(&cq->cq_lock);
+ spin_lock_irqsave(&cq->cq_lock, flags);
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
@@ -128,7 +130,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -141,9 +143,11 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
void rxe_cq_disable(struct rxe_cq *cq)
{
- spin_lock_bh(&cq->cq_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
cq->is_dying = true;
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
}
void rxe_cq_cleanup(struct rxe_pool_elem *elem)
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index b1e174afb1d4..2ffbe3390668 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -19,7 +19,7 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
-struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp);
/* rxe_cq.c */
int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
@@ -40,18 +40,10 @@ void rxe_cq_disable(struct rxe_cq *cq);
void rxe_cq_cleanup(struct rxe_pool_elem *arg);
/* rxe_mcast.c */
-int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
- struct rxe_mc_grp **grp_p);
-
-int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
- struct rxe_mc_grp *grp);
-
-int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
- union ib_gid *mgid);
-
-void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
-
-void rxe_mc_cleanup(struct rxe_pool_elem *arg);
+struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid);
+int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
+int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
+void rxe_cleanup_mcg(struct kref *kref);
/* rxe_mmap.c */
struct rxe_mmap_info {
@@ -102,35 +94,27 @@ void rxe_mw_cleanup(struct rxe_pool_elem *arg);
/* rxe_net.c */
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
-int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb);
+int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb);
int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
struct sk_buff *skb);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
-int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
-int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
/* rxe_qp.c */
int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
-
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct ib_qp_init_attr *init,
struct rxe_create_qp_resp __user *uresp,
struct ib_pd *ibpd, struct ib_udata *udata);
-
int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
-
int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_attr *attr, int mask);
-
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
int mask, struct ib_udata *udata);
-
int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
-
void rxe_qp_error(struct rxe_qp *qp);
-
+int rxe_qp_chk_destroy(struct rxe_qp *qp);
void rxe_qp_destroy(struct rxe_qp *qp);
-
void rxe_qp_cleanup(struct rxe_pool_elem *elem);
static inline int qp_num(struct rxe_qp *qp)
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index bd1ac88b8700..ae8f11cb704a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -1,178 +1,488 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
+ * Copyright (c) 2022 Hewlett Packard Enterprise, Inc. All rights reserved.
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
+/*
+ * rxe_mcast.c implements driver support for multicast transport.
+ * It is based on two data structures struct rxe_mcg ('mcg') and
+ * struct rxe_mca ('mca'). An mcg is allocated each time a qp is
+ * attached to a new mgid for the first time. These are indexed by
+ * a red-black tree using the mgid. This data structure is searched
+ * for the mcg when a multicast packet is received and when another
+ * qp is attached to the same mgid. It is cleaned up when the last qp
+ * is detached from the mcg. Each time a qp is attached to an mcg an
+ * mca is created. It holds a pointer to the qp and is added to a list
+ * of qp's that are attached to the mcg. The qp_list is used to replicate
+ * mcast packets in the rxe receive path.
+ */
+
#include "rxe.h"
-#include "rxe_loc.h"
-/* caller should hold mc_grp_pool->pool_lock */
-static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
- struct rxe_pool *pool,
- union ib_gid *mgid)
+/**
+ * rxe_mcast_add - add multicast address to rxe device
+ * @rxe: rxe device object
+ * @mgid: multicast address as a gid
+ *
+ * Returns 0 on success else an error
+ */
+static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
- struct rxe_mc_grp *grp;
+ unsigned char ll_addr[ETH_ALEN];
- grp = rxe_alloc_locked(&rxe->mc_grp_pool);
- if (!grp)
- return ERR_PTR(-ENOMEM);
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- INIT_LIST_HEAD(&grp->qp_list);
- spin_lock_init(&grp->mcg_lock);
- grp->rxe = rxe;
- rxe_add_key_locked(grp, mgid);
+ return dev_mc_add(rxe->ndev, ll_addr);
+}
- err = rxe_mcast_add(rxe, mgid);
- if (unlikely(err)) {
- rxe_drop_key_locked(grp);
- rxe_drop_ref(grp);
- return ERR_PTR(err);
+/**
+ * rxe_mcast_delete - delete multicast address from rxe device
+ * @rxe: rxe device object
+ * @mgid: multicast address as a gid
+ *
+ * Returns 0 on success else an error
+ */
+static int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ unsigned char ll_addr[ETH_ALEN];
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+
+ return dev_mc_del(rxe->ndev, ll_addr);
+}
+
+/**
+ * __rxe_insert_mcg - insert an mcg into red-black tree (rxe->mcg_tree)
+ * @mcg: mcg object with an embedded red-black tree node
+ *
+ * Context: caller must hold a reference to mcg and rxe->mcg_lock and
+ * is responsible to avoid adding the same mcg twice to the tree.
+ */
+static void __rxe_insert_mcg(struct rxe_mcg *mcg)
+{
+ struct rb_root *tree = &mcg->rxe->mcg_tree;
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *node = NULL;
+ struct rxe_mcg *tmp;
+ int cmp;
+
+ while (*link) {
+ node = *link;
+ tmp = rb_entry(node, struct rxe_mcg, node);
+
+ cmp = memcmp(&tmp->mgid, &mcg->mgid, sizeof(mcg->mgid));
+ if (cmp > 0)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
}
- return grp;
+ rb_link_node(&mcg->node, node, link);
+ rb_insert_color(&mcg->node, tree);
}
-int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
- struct rxe_mc_grp **grp_p)
+/**
+ * __rxe_remove_mcg - remove an mcg from red-black tree holding lock
+ * @mcg: mcast group object with an embedded red-black tree node
+ *
+ * Context: caller must hold a reference to mcg and rxe->mcg_lock
+ */
+static void __rxe_remove_mcg(struct rxe_mcg *mcg)
{
- int err;
- struct rxe_mc_grp *grp;
- struct rxe_pool *pool = &rxe->mc_grp_pool;
+ rb_erase(&mcg->node, &mcg->rxe->mcg_tree);
+}
- if (rxe->attr.max_mcast_qp_attach == 0)
- return -EINVAL;
+/**
+ * __rxe_lookup_mcg - lookup mcg in rxe->mcg_tree while holding lock
+ * @rxe: rxe device object
+ * @mgid: multicast IP address
+ *
+ * Context: caller must hold rxe->mcg_lock
+ * Returns: mcg on success and takes a ref to mcg else NULL
+ */
+static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
+ union ib_gid *mgid)
+{
+ struct rb_root *tree = &rxe->mcg_tree;
+ struct rxe_mcg *mcg;
+ struct rb_node *node;
+ int cmp;
- write_lock_bh(&pool->pool_lock);
+ node = tree->rb_node;
- grp = rxe_pool_get_key_locked(pool, mgid);
- if (grp)
- goto done;
+ while (node) {
+ mcg = rb_entry(node, struct rxe_mcg, node);
- grp = create_grp(rxe, pool, mgid);
- if (IS_ERR(grp)) {
- write_unlock_bh(&pool->pool_lock);
- err = PTR_ERR(grp);
- return err;
+ cmp = memcmp(&mcg->mgid, mgid, sizeof(*mgid));
+
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else
+ break;
}
-done:
- write_unlock_bh(&pool->pool_lock);
- *grp_p = grp;
+ if (node) {
+ kref_get(&mcg->ref_cnt);
+ return mcg;
+ }
+
+ return NULL;
+}
+
+/**
+ * rxe_lookup_mcg - lookup up mcg in red-back tree
+ * @rxe: rxe device object
+ * @mgid: multicast IP address
+ *
+ * Returns: mcg if found else NULL
+ */
+struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ struct rxe_mcg *mcg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
+ mcg = __rxe_lookup_mcg(rxe, mgid);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+
+ return mcg;
+}
+
+/**
+ * __rxe_init_mcg - initialize a new mcg
+ * @rxe: rxe device
+ * @mgid: multicast address as a gid
+ * @mcg: new mcg object
+ *
+ * Context: caller should hold rxe->mcg lock
+ * Returns: 0 on success else an error
+ */
+static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mcg *mcg)
+{
+ int err;
+
+ err = rxe_mcast_add(rxe, mgid);
+ if (unlikely(err))
+ return err;
+
+ kref_init(&mcg->ref_cnt);
+ memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
+ INIT_LIST_HEAD(&mcg->qp_list);
+ mcg->rxe = rxe;
+
+ /* caller holds a ref on mcg but that will be
+ * dropped when mcg goes out of scope. We need to take a ref
+ * on the pointer that will be saved in the red-black tree
+ * by __rxe_insert_mcg and used to lookup mcg from mgid later.
+ * Inserting mcg makes it visible to outside so this should
+ * be done last after the object is ready.
+ */
+ kref_get(&mcg->ref_cnt);
+ __rxe_insert_mcg(mcg);
+
return 0;
}
-int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
- struct rxe_mc_grp *grp)
+/**
+ * rxe_get_mcg - lookup or allocate a mcg
+ * @rxe: rxe device object
+ * @mgid: multicast IP address as a gid
+ *
+ * Returns: mcg on success else ERR_PTR(error)
+ */
+static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
+ struct rxe_mcg *mcg, *tmp;
+ unsigned long flags;
int err;
- struct rxe_mc_elem *elem;
- /* check to see of the qp is already a member of the group */
- spin_lock_bh(&qp->grp_lock);
- spin_lock_bh(&grp->mcg_lock);
- list_for_each_entry(elem, &grp->qp_list, qp_list) {
- if (elem->qp == qp) {
- err = 0;
- goto out;
- }
- }
+ if (rxe->attr.max_mcast_grp == 0)
+ return ERR_PTR(-EINVAL);
- if (grp->num_qp >= rxe->attr.max_mcast_qp_attach) {
- err = -ENOMEM;
+ /* check to see if mcg already exists */
+ mcg = rxe_lookup_mcg(rxe, mgid);
+ if (mcg)
+ return mcg;
+
+ /* speculative alloc of new mcg */
+ mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
+ if (!mcg)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
+ /* re-check to see if someone else just added it */
+ tmp = __rxe_lookup_mcg(rxe, mgid);
+ if (tmp) {
+ kfree(mcg);
+ mcg = tmp;
goto out;
}
- elem = rxe_alloc_locked(&rxe->mc_elem_pool);
- if (!elem) {
+ if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
err = -ENOMEM;
- goto out;
+ goto err_dec;
+ }
+
+ err = __rxe_init_mcg(rxe, mgid, mcg);
+ if (err)
+ goto err_dec;
+out:
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ return mcg;
+
+err_dec:
+ atomic_dec(&rxe->mcg_num);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ kfree(mcg);
+ return ERR_PTR(err);
+}
+
+/**
+ * rxe_cleanup_mcg - cleanup mcg for kref_put
+ * @kref: struct kref embnedded in mcg
+ */
+void rxe_cleanup_mcg(struct kref *kref)
+{
+ struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt);
+
+ kfree(mcg);
+}
+
+/**
+ * __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock
+ * @mcg: the mcg object
+ *
+ * Context: caller is holding rxe->mcg_lock
+ * no qp's are attached to mcg
+ */
+static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
+{
+ struct rxe_dev *rxe = mcg->rxe;
+
+ /* remove mcg from red-black tree then drop ref */
+ __rxe_remove_mcg(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ rxe_mcast_delete(mcg->rxe, &mcg->mgid);
+ atomic_dec(&rxe->mcg_num);
+}
+
+/**
+ * rxe_destroy_mcg - destroy mcg object
+ * @mcg: the mcg object
+ *
+ * Context: no qp's are attached to mcg
+ */
+static void rxe_destroy_mcg(struct rxe_mcg *mcg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcg->rxe->mcg_lock, flags);
+ __rxe_destroy_mcg(mcg);
+ spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
+}
+
+/**
+ * __rxe_init_mca - initialize a new mca holding lock
+ * @qp: qp object
+ * @mcg: mcg object
+ * @mca: empty space for new mca
+ *
+ * Context: caller must hold references on qp and mcg, rxe->mcg_lock
+ * and pass memory for new mca
+ *
+ * Returns: 0 on success else an error
+ */
+static int __rxe_init_mca(struct rxe_qp *qp, struct rxe_mcg *mcg,
+ struct rxe_mca *mca)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int n;
+
+ n = atomic_inc_return(&rxe->mcg_attach);
+ if (n > rxe->attr.max_total_mcast_qp_attach) {
+ atomic_dec(&rxe->mcg_attach);
+ return -ENOMEM;
}
- /* each qp holds a ref on the grp */
- rxe_add_ref(grp);
+ n = atomic_inc_return(&mcg->qp_num);
+ if (n > rxe->attr.max_mcast_qp_attach) {
+ atomic_dec(&mcg->qp_num);
+ atomic_dec(&rxe->mcg_attach);
+ return -ENOMEM;
+ }
+
+ atomic_inc(&qp->mcg_num);
- grp->num_qp++;
- elem->qp = qp;
- elem->grp = grp;
+ rxe_get(qp);
+ mca->qp = qp;
- list_add(&elem->qp_list, &grp->qp_list);
- list_add(&elem->grp_list, &qp->grp_list);
+ list_add_tail(&mca->qp_list, &mcg->qp_list);
- err = 0;
+ return 0;
+}
+
+/**
+ * rxe_attach_mcg - attach qp to mcg if not already attached
+ * @qp: qp object
+ * @mcg: mcg object
+ *
+ * Context: caller must hold reference on qp and mcg.
+ * Returns: 0 on success else an error
+ */
+static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = mcg->rxe;
+ struct rxe_mca *mca, *tmp;
+ unsigned long flags;
+ int err;
+
+ /* check to see if the qp is already a member of the group */
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
+ list_for_each_entry(mca, &mcg->qp_list, qp_list) {
+ if (mca->qp == qp) {
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+
+ /* speculative alloc new mca without using GFP_ATOMIC */
+ mca = kzalloc(sizeof(*mca), GFP_KERNEL);
+ if (!mca)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
+ /* re-check to see if someone else just attached qp */
+ list_for_each_entry(tmp, &mcg->qp_list, qp_list) {
+ if (tmp->qp == qp) {
+ kfree(mca);
+ err = 0;
+ goto out;
+ }
+ }
+
+ err = __rxe_init_mca(qp, mcg, mca);
+ if (err)
+ kfree(mca);
out:
- spin_unlock_bh(&grp->mcg_lock);
- spin_unlock_bh(&qp->grp_lock);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
return err;
}
-int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
- union ib_gid *mgid)
+/**
+ * __rxe_cleanup_mca - cleanup mca object holding lock
+ * @mca: mca object
+ * @mcg: mcg object
+ *
+ * Context: caller must hold a reference to mcg and rxe->mcg_lock
+ */
+static void __rxe_cleanup_mca(struct rxe_mca *mca, struct rxe_mcg *mcg)
{
- struct rxe_mc_grp *grp;
- struct rxe_mc_elem *elem, *tmp;
+ list_del(&mca->qp_list);
- grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
- if (!grp)
- goto err1;
+ atomic_dec(&mcg->qp_num);
+ atomic_dec(&mcg->rxe->mcg_attach);
+ atomic_dec(&mca->qp->mcg_num);
+ rxe_put(mca->qp);
- spin_lock_bh(&qp->grp_lock);
- spin_lock_bh(&grp->mcg_lock);
+ kfree(mca);
+}
- list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
- if (elem->qp == qp) {
- list_del(&elem->qp_list);
- list_del(&elem->grp_list);
- grp->num_qp--;
+/**
+ * rxe_detach_mcg - detach qp from mcg
+ * @mcg: mcg object
+ * @qp: qp object
+ *
+ * Returns: 0 on success else an error if qp is not attached.
+ */
+static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
+{
+ struct rxe_dev *rxe = mcg->rxe;
+ struct rxe_mca *mca, *tmp;
+ unsigned long flags;
- spin_unlock_bh(&grp->mcg_lock);
- spin_unlock_bh(&qp->grp_lock);
- rxe_drop_ref(elem);
- rxe_drop_ref(grp); /* ref held by QP */
- rxe_drop_ref(grp); /* ref from get_key */
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
+ list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
+ if (mca->qp == qp) {
+ __rxe_cleanup_mca(mca, mcg);
+
+ /* if the number of qp's attached to the
+ * mcast group falls to zero go ahead and
+ * tear it down. This will not free the
+ * object since we are still holding a ref
+ * from the caller
+ */
+ if (atomic_read(&mcg->qp_num) <= 0)
+ __rxe_destroy_mcg(mcg);
+
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
return 0;
}
}
- spin_unlock_bh(&grp->mcg_lock);
- spin_unlock_bh(&qp->grp_lock);
- rxe_drop_ref(grp); /* ref from get_key */
-err1:
+ /* we didn't find the qp on the list */
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
return -EINVAL;
}
-void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
+/**
+ * rxe_attach_mcast - attach qp to multicast group (see IBA-11.3.1)
+ * @ibqp: (IB) qp object
+ * @mgid: multicast IP address
+ * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6)
+ *
+ * Returns: 0 on success else an errno
+ */
+int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
{
- struct rxe_mc_grp *grp;
- struct rxe_mc_elem *elem;
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_mcg *mcg;
- while (1) {
- spin_lock_bh(&qp->grp_lock);
- if (list_empty(&qp->grp_list)) {
- spin_unlock_bh(&qp->grp_lock);
- break;
- }
- elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
- grp_list);
- list_del(&elem->grp_list);
- spin_unlock_bh(&qp->grp_lock);
-
- grp = elem->grp;
- spin_lock_bh(&grp->mcg_lock);
- list_del(&elem->qp_list);
- grp->num_qp--;
- spin_unlock_bh(&grp->mcg_lock);
- rxe_drop_ref(grp);
- rxe_drop_ref(elem);
- }
+ /* takes a ref on mcg if successful */
+ mcg = rxe_get_mcg(rxe, mgid);
+ if (IS_ERR(mcg))
+ return PTR_ERR(mcg);
+
+ err = rxe_attach_mcg(mcg, qp);
+
+ /* if we failed to attach the first qp to mcg tear it down */
+ if (atomic_read(&mcg->qp_num) == 0)
+ rxe_destroy_mcg(mcg);
+
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ return err;
}
-void rxe_mc_cleanup(struct rxe_pool_elem *elem)
+/**
+ * rxe_detach_mcast - detach qp from multicast group (see IBA-11.3.2)
+ * @ibqp: address of (IB) qp object
+ * @mgid: multicast IP address
+ * @mlid: multicast LID, ignored for RoCEv2 (see IBA-A17.5.6)
+ *
+ * Returns: 0 on success else an errno
+ */
+int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
{
- struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem);
- struct rxe_dev *rxe = grp->rxe;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_mcg *mcg;
+ int err;
- rxe_drop_key(grp);
- rxe_mcast_delete(rxe, &grp->mgid);
+ mcg = rxe_lookup_mcg(rxe, mgid);
+ if (!mcg)
+ return -EINVAL;
+
+ err = rxe_detach_mcg(mcg, qp);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 035f226af133..9149b6095429 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -4,7 +4,6 @@
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
-#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/errno.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 453ef3c9d535..60a31b718774 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -459,7 +459,7 @@ int copy_data(
if (offset >= sge->length) {
if (mr) {
- rxe_drop_ref(mr);
+ rxe_put(mr);
mr = NULL;
}
sge++;
@@ -504,13 +504,13 @@ int copy_data(
dma->resid = resid;
if (mr)
- rxe_drop_ref(mr);
+ rxe_put(mr);
return 0;
err2:
if (mr)
- rxe_drop_ref(mr);
+ rxe_put(mr);
err1:
return err;
}
@@ -569,7 +569,7 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
(type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
mr_pd(mr) != pd || (access && !(access & mr->access)) ||
mr->state != RXE_MR_STATE_VALID)) {
- rxe_drop_ref(mr);
+ rxe_put(mr);
mr = NULL;
}
@@ -613,7 +613,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
ret = 0;
err_drop_ref:
- rxe_drop_ref(mr);
+ rxe_put(mr);
err:
return ret;
}
@@ -690,9 +690,8 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
mr->state = RXE_MR_STATE_INVALID;
- rxe_drop_ref(mr_pd(mr));
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
+ rxe_put(mr_pd(mr));
+ rxe_put(mr);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 32dd8c0b8b9e..c86b2efd58f2 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -12,15 +12,14 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
struct rxe_dev *rxe = to_rdev(ibmw->device);
int ret;
- rxe_add_ref(pd);
+ rxe_get(pd);
ret = rxe_add_to_pool(&rxe->mw_pool, mw);
if (ret) {
- rxe_drop_ref(pd);
+ rxe_put(pd);
return ret;
}
- rxe_add_index(mw);
mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
@@ -36,14 +35,14 @@ static void rxe_do_dealloc_mw(struct rxe_mw *mw)
mw->mr = NULL;
atomic_dec(&mr->num_mw);
- rxe_drop_ref(mr);
+ rxe_put(mr);
}
if (mw->qp) {
struct rxe_qp *qp = mw->qp;
mw->qp = NULL;
- rxe_drop_ref(qp);
+ rxe_put(qp);
}
mw->access = 0;
@@ -61,8 +60,8 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
rxe_do_dealloc_mw(mw);
spin_unlock_bh(&mw->lock);
- rxe_drop_ref(mw);
- rxe_drop_ref(pd);
+ rxe_put(mw);
+ rxe_put(pd);
return 0;
}
@@ -171,7 +170,7 @@ static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
mw->length = wqe->wr.wr.mw.length;
if (mw->mr) {
- rxe_drop_ref(mw->mr);
+ rxe_put(mw->mr);
atomic_dec(&mw->mr->num_mw);
mw->mr = NULL;
}
@@ -179,11 +178,11 @@ static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (mw->length) {
mw->mr = mr;
atomic_inc(&mr->num_mw);
- rxe_add_ref(mr);
+ rxe_get(mr);
}
if (mw->ibmw.type == IB_MW_TYPE_2) {
- rxe_add_ref(qp);
+ rxe_get(qp);
mw->qp = qp;
}
}
@@ -234,9 +233,9 @@ err_unlock:
spin_unlock_bh(&mw->lock);
err_drop_mr:
if (mr)
- rxe_drop_ref(mr);
+ rxe_put(mr);
err_drop_mw:
- rxe_drop_ref(mw);
+ rxe_put(mw);
err:
return ret;
}
@@ -261,13 +260,13 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw)
/* valid type 2 MW will always have a QP pointer */
qp = mw->qp;
mw->qp = NULL;
- rxe_drop_ref(qp);
+ rxe_put(qp);
/* valid type 2 MW will always have an MR pointer */
mr = mw->mr;
mw->mr = NULL;
atomic_dec(&mr->num_mw);
- rxe_drop_ref(mr);
+ rxe_put(mr);
mw->access = 0;
mw->addr = 0;
@@ -302,7 +301,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
err_unlock:
spin_unlock_bh(&mw->lock);
err_drop_ref:
- rxe_drop_ref(mw);
+ rxe_put(mw);
err:
return ret;
}
@@ -323,16 +322,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
(mw->length == 0) ||
(access && !(access & mw->access)) ||
mw->state != RXE_MW_STATE_VALID)) {
- rxe_drop_ref(mw);
+ rxe_put(mw);
return NULL;
}
return mw;
}
-
-void rxe_mw_cleanup(struct rxe_pool_elem *elem)
-{
- struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
-
- rxe_drop_index(mw);
-}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index be72bdbfb4ba..c53f4529f098 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -20,24 +20,6 @@
static struct rxe_recv_sockets recv_sockets;
-int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
-{
- unsigned char ll_addr[ETH_ALEN];
-
- ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
-
- return dev_mc_add(rxe->ndev, ll_addr);
-}
-
-int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
-{
- unsigned char ll_addr[ETH_ALEN];
-
- ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
-
- return dev_mc_del(rxe->ndev, ll_addr);
-}
-
static struct dst_entry *rxe_find_route4(struct net_device *ndev,
struct in_addr *saddr,
struct in_addr *daddr)
@@ -289,13 +271,13 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
bool xnet = false;
__be16 df = htons(IP_DF);
- struct rxe_av *av = rxe_get_av(pkt);
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
@@ -315,11 +297,11 @@ static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
-static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
- struct rxe_av *av = rxe_get_av(pkt);
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
@@ -340,16 +322,17 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
-int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
{
int err = 0;
if (skb->protocol == htons(ETH_P_IP))
- err = prepare4(pkt, skb);
+ err = prepare4(av, pkt, skb);
else if (skb->protocol == htons(ETH_P_IPV6))
- err = prepare6(pkt, skb);
+ err = prepare6(av, pkt, skb);
- if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
+ if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
pkt->mask |= RXE_LOOPBACK_MASK;
return err;
@@ -365,7 +348,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_run_task(&qp->req.task, 1);
- rxe_drop_ref(qp);
+ rxe_put(qp);
}
static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
@@ -375,7 +358,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
skb->destructor = rxe_skb_tx_dtor;
skb->sk = pkt->qp->sk->sk;
- rxe_add_ref(pkt->qp);
+ rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP)) {
@@ -385,7 +368,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
} else {
pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
atomic_dec(&pkt->qp->skb_out);
- rxe_drop_ref(pkt->qp);
+ rxe_put(pkt->qp);
kfree_skb(skb);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 4cb003885e00..87066d04ed18 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -12,129 +12,93 @@ static const struct rxe_type_info {
const char *name;
size_t size;
size_t elem_offset;
- void (*cleanup)(struct rxe_pool_elem *obj);
+ void (*cleanup)(struct rxe_pool_elem *elem);
enum rxe_pool_flags flags;
u32 min_index;
u32 max_index;
- size_t key_offset;
- size_t key_size;
+ u32 max_elem;
} rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
- .name = "rxe-uc",
+ .name = "uc",
.size = sizeof(struct rxe_ucontext),
.elem_offset = offsetof(struct rxe_ucontext, elem),
- .flags = RXE_POOL_NO_ALLOC,
+ .min_index = 1,
+ .max_index = UINT_MAX,
+ .max_elem = UINT_MAX,
},
[RXE_TYPE_PD] = {
- .name = "rxe-pd",
+ .name = "pd",
.size = sizeof(struct rxe_pd),
.elem_offset = offsetof(struct rxe_pd, elem),
- .flags = RXE_POOL_NO_ALLOC,
+ .min_index = 1,
+ .max_index = UINT_MAX,
+ .max_elem = UINT_MAX,
},
[RXE_TYPE_AH] = {
- .name = "rxe-ah",
+ .name = "ah",
.size = sizeof(struct rxe_ah),
.elem_offset = offsetof(struct rxe_ah, elem),
- .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_AH_INDEX,
.max_index = RXE_MAX_AH_INDEX,
+ .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
},
[RXE_TYPE_SRQ] = {
- .name = "rxe-srq",
+ .name = "srq",
.size = sizeof(struct rxe_srq),
.elem_offset = offsetof(struct rxe_srq, elem),
- .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
+ .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
},
[RXE_TYPE_QP] = {
- .name = "rxe-qp",
+ .name = "qp",
.size = sizeof(struct rxe_qp),
.elem_offset = offsetof(struct rxe_qp, elem),
.cleanup = rxe_qp_cleanup,
- .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_QP_INDEX,
.max_index = RXE_MAX_QP_INDEX,
+ .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
},
[RXE_TYPE_CQ] = {
- .name = "rxe-cq",
+ .name = "cq",
.size = sizeof(struct rxe_cq),
.elem_offset = offsetof(struct rxe_cq, elem),
- .flags = RXE_POOL_NO_ALLOC,
.cleanup = rxe_cq_cleanup,
+ .min_index = 1,
+ .max_index = UINT_MAX,
+ .max_elem = UINT_MAX,
},
[RXE_TYPE_MR] = {
- .name = "rxe-mr",
+ .name = "mr",
.size = sizeof(struct rxe_mr),
.elem_offset = offsetof(struct rxe_mr, elem),
.cleanup = rxe_mr_cleanup,
- .flags = RXE_POOL_INDEX,
+ .flags = RXE_POOL_ALLOC,
.min_index = RXE_MIN_MR_INDEX,
.max_index = RXE_MAX_MR_INDEX,
+ .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
},
[RXE_TYPE_MW] = {
- .name = "rxe-mw",
+ .name = "mw",
.size = sizeof(struct rxe_mw),
.elem_offset = offsetof(struct rxe_mw, elem),
- .cleanup = rxe_mw_cleanup,
- .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_MW_INDEX,
.max_index = RXE_MAX_MW_INDEX,
- },
- [RXE_TYPE_MC_GRP] = {
- .name = "rxe-mc_grp",
- .size = sizeof(struct rxe_mc_grp),
- .elem_offset = offsetof(struct rxe_mc_grp, elem),
- .cleanup = rxe_mc_cleanup,
- .flags = RXE_POOL_KEY,
- .key_offset = offsetof(struct rxe_mc_grp, mgid),
- .key_size = sizeof(union ib_gid),
- },
- [RXE_TYPE_MC_ELEM] = {
- .name = "rxe-mc_elem",
- .size = sizeof(struct rxe_mc_elem),
- .elem_offset = offsetof(struct rxe_mc_elem, elem),
+ .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
},
};
-static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
-{
- int err = 0;
-
- if ((max - min + 1) < pool->max_elem) {
- pr_warn("not enough indices for max_elem\n");
- err = -EINVAL;
- goto out;
- }
-
- pool->index.max_index = max;
- pool->index.min_index = min;
-
- pool->index.table = bitmap_zalloc(max - min + 1, GFP_KERNEL);
- if (!pool->index.table) {
- err = -ENOMEM;
- goto out;
- }
-
-out:
- return err;
-}
-
-int rxe_pool_init(
- struct rxe_dev *rxe,
- struct rxe_pool *pool,
- enum rxe_elem_type type,
- unsigned int max_elem)
+void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
+ enum rxe_elem_type type)
{
const struct rxe_type_info *info = &rxe_type_info[type];
- int err = 0;
memset(pool, 0, sizeof(*pool));
pool->rxe = rxe;
pool->name = info->name;
pool->type = type;
- pool->max_elem = max_elem;
+ pool->max_elem = info->max_elem;
pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
pool->elem_offset = info->elem_offset;
pool->flags = info->flags;
@@ -142,225 +106,31 @@ int rxe_pool_init(
atomic_set(&pool->num_elem, 0);
- rwlock_init(&pool->pool_lock);
-
- if (pool->flags & RXE_POOL_INDEX) {
- pool->index.tree = RB_ROOT;
- err = rxe_pool_init_index(pool, info->max_index,
- info->min_index);
- if (err)
- goto out;
- }
-
- if (pool->flags & RXE_POOL_KEY) {
- pool->key.tree = RB_ROOT;
- pool->key.key_offset = info->key_offset;
- pool->key.key_size = info->key_size;
- }
-
-out:
- return err;
+ xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
+ pool->limit.min = info->min_index;
+ pool->limit.max = info->max_index;
}
void rxe_pool_cleanup(struct rxe_pool *pool)
{
- if (atomic_read(&pool->num_elem) > 0)
- pr_warn("%s pool destroyed with unfree'd elem\n",
- pool->name);
-
- if (pool->flags & RXE_POOL_INDEX)
- bitmap_free(pool->index.table);
-}
-
-static u32 alloc_index(struct rxe_pool *pool)
-{
- u32 index;
- u32 range = pool->index.max_index - pool->index.min_index + 1;
-
- index = find_next_zero_bit(pool->index.table, range, pool->index.last);
- if (index >= range)
- index = find_first_zero_bit(pool->index.table, range);
-
- WARN_ON_ONCE(index >= range);
- set_bit(index, pool->index.table);
- pool->index.last = index;
- return index + pool->index.min_index;
-}
-
-static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
-{
- struct rb_node **link = &pool->index.tree.rb_node;
- struct rb_node *parent = NULL;
- struct rxe_pool_elem *elem;
-
- while (*link) {
- parent = *link;
- elem = rb_entry(parent, struct rxe_pool_elem, index_node);
-
- if (elem->index == new->index) {
- pr_warn("element already exists!\n");
- return -EINVAL;
- }
-
- if (elem->index > new->index)
- link = &(*link)->rb_left;
- else
- link = &(*link)->rb_right;
- }
-
- rb_link_node(&new->index_node, parent, link);
- rb_insert_color(&new->index_node, &pool->index.tree);
-
- return 0;
-}
-
-static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new)
-{
- struct rb_node **link = &pool->key.tree.rb_node;
- struct rb_node *parent = NULL;
- struct rxe_pool_elem *elem;
- int cmp;
-
- while (*link) {
- parent = *link;
- elem = rb_entry(parent, struct rxe_pool_elem, key_node);
-
- cmp = memcmp((u8 *)elem + pool->key.key_offset,
- (u8 *)new + pool->key.key_offset,
- pool->key.key_size);
-
- if (cmp == 0) {
- pr_warn("key already exists!\n");
- return -EINVAL;
- }
-
- if (cmp > 0)
- link = &(*link)->rb_left;
- else
- link = &(*link)->rb_right;
- }
-
- rb_link_node(&new->key_node, parent, link);
- rb_insert_color(&new->key_node, &pool->key.tree);
-
- return 0;
-}
-
-int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key)
-{
- struct rxe_pool *pool = elem->pool;
- int err;
-
- memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
- err = rxe_insert_key(pool, elem);
-
- return err;
-}
-
-int __rxe_add_key(struct rxe_pool_elem *elem, void *key)
-{
- struct rxe_pool *pool = elem->pool;
- int err;
-
- write_lock_bh(&pool->pool_lock);
- err = __rxe_add_key_locked(elem, key);
- write_unlock_bh(&pool->pool_lock);
-
- return err;
-}
-
-void __rxe_drop_key_locked(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
-
- rb_erase(&elem->key_node, &pool->key.tree);
-}
-
-void __rxe_drop_key(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
-
- write_lock_bh(&pool->pool_lock);
- __rxe_drop_key_locked(elem);
- write_unlock_bh(&pool->pool_lock);
-}
-
-int __rxe_add_index_locked(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
- int err;
-
- elem->index = alloc_index(pool);
- err = rxe_insert_index(pool, elem);
-
- return err;
-}
-
-int __rxe_add_index(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
- int err;
-
- write_lock_bh(&pool->pool_lock);
- err = __rxe_add_index_locked(elem);
- write_unlock_bh(&pool->pool_lock);
-
- return err;
-}
-
-void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
-
- clear_bit(elem->index - pool->index.min_index, pool->index.table);
- rb_erase(&elem->index_node, &pool->index.tree);
-}
-
-void __rxe_drop_index(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
-
- write_lock_bh(&pool->pool_lock);
- __rxe_drop_index_locked(elem);
- write_unlock_bh(&pool->pool_lock);
-}
-
-void *rxe_alloc_locked(struct rxe_pool *pool)
-{
- struct rxe_pool_elem *elem;
- void *obj;
-
- if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto out_cnt;
-
- obj = kzalloc(pool->elem_size, GFP_ATOMIC);
- if (!obj)
- goto out_cnt;
-
- elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
-
- elem->pool = pool;
- elem->obj = obj;
- kref_init(&elem->ref_cnt);
-
- return obj;
-
-out_cnt:
- atomic_dec(&pool->num_elem);
- return NULL;
+ WARN_ON(!xa_empty(&pool->xa));
}
void *rxe_alloc(struct rxe_pool *pool)
{
struct rxe_pool_elem *elem;
void *obj;
+ int err;
+
+ if (WARN_ON(!(pool->flags & RXE_POOL_ALLOC)))
+ return NULL;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto out_cnt;
+ goto err_cnt;
obj = kzalloc(pool->elem_size, GFP_KERNEL);
if (!obj)
- goto out_cnt;
+ goto err_cnt;
elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
@@ -368,127 +138,86 @@ void *rxe_alloc(struct rxe_pool *pool)
elem->obj = obj;
kref_init(&elem->ref_cnt);
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ &pool->next, GFP_KERNEL);
+ if (err)
+ goto err_free;
+
return obj;
-out_cnt:
+err_free:
+ kfree(obj);
+err_cnt:
atomic_dec(&pool->num_elem);
return NULL;
}
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
+ int err;
+
+ if (WARN_ON(pool->flags & RXE_POOL_ALLOC))
+ return -EINVAL;
+
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto out_cnt;
+ goto err_cnt;
elem->pool = pool;
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ &pool->next, GFP_KERNEL);
+ if (err)
+ goto err_cnt;
+
return 0;
-out_cnt:
+err_cnt:
atomic_dec(&pool->num_elem);
return -EINVAL;
}
-void rxe_elem_release(struct kref *kref)
-{
- struct rxe_pool_elem *elem =
- container_of(kref, struct rxe_pool_elem, ref_cnt);
- struct rxe_pool *pool = elem->pool;
- void *obj;
-
- if (pool->cleanup)
- pool->cleanup(elem);
-
- if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
- obj = elem->obj;
- kfree(obj);
- }
-
- atomic_dec(&pool->num_elem);
-}
-
-void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- struct rb_node *node;
struct rxe_pool_elem *elem;
+ struct xarray *xa = &pool->xa;
+ unsigned long flags;
void *obj;
- node = pool->index.tree.rb_node;
-
- while (node) {
- elem = rb_entry(node, struct rxe_pool_elem, index_node);
-
- if (elem->index > index)
- node = node->rb_left;
- else if (elem->index < index)
- node = node->rb_right;
- else
- break;
- }
-
- if (node) {
- kref_get(&elem->ref_cnt);
+ xa_lock_irqsave(xa, flags);
+ elem = xa_load(xa, index);
+ if (elem && kref_get_unless_zero(&elem->ref_cnt))
obj = elem->obj;
- } else {
+ else
obj = NULL;
- }
-
- return obj;
-}
-
-void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
-{
- void *obj;
-
- read_lock_bh(&pool->pool_lock);
- obj = rxe_pool_get_index_locked(pool, index);
- read_unlock_bh(&pool->pool_lock);
+ xa_unlock_irqrestore(xa, flags);
return obj;
}
-void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
+static void rxe_elem_release(struct kref *kref)
{
- struct rb_node *node;
- struct rxe_pool_elem *elem;
- void *obj;
- int cmp;
-
- node = pool->key.tree.rb_node;
+ struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
+ struct rxe_pool *pool = elem->pool;
- while (node) {
- elem = rb_entry(node, struct rxe_pool_elem, key_node);
+ xa_erase(&pool->xa, elem->index);
- cmp = memcmp((u8 *)elem + pool->key.key_offset,
- key, pool->key.key_size);
+ if (pool->cleanup)
+ pool->cleanup(elem);
- if (cmp > 0)
- node = node->rb_left;
- else if (cmp < 0)
- node = node->rb_right;
- else
- break;
- }
+ if (pool->flags & RXE_POOL_ALLOC)
+ kfree(elem->obj);
- if (node) {
- kref_get(&elem->ref_cnt);
- obj = elem->obj;
- } else {
- obj = NULL;
- }
-
- return obj;
+ atomic_dec(&pool->num_elem);
}
-void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+int __rxe_get(struct rxe_pool_elem *elem)
{
- void *obj;
-
- read_lock_bh(&pool->pool_lock);
- obj = rxe_pool_get_key_locked(pool, key);
- read_unlock_bh(&pool->pool_lock);
+ return kref_get_unless_zero(&elem->ref_cnt);
+}
- return obj;
+int __rxe_put(struct rxe_pool_elem *elem)
+{
+ return kref_put(&elem->ref_cnt, rxe_elem_release);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 214279310f4d..24bcc786c1b3 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -8,9 +8,7 @@
#define RXE_POOL_H
enum rxe_pool_flags {
- RXE_POOL_INDEX = BIT(1),
- RXE_POOL_KEY = BIT(2),
- RXE_POOL_NO_ALLOC = BIT(4),
+ RXE_POOL_ALLOC = BIT(1),
};
enum rxe_elem_type {
@@ -22,8 +20,6 @@ enum rxe_elem_type {
RXE_TYPE_CQ,
RXE_TYPE_MR,
RXE_TYPE_MW,
- RXE_TYPE_MC_GRP,
- RXE_TYPE_MC_ELEM,
RXE_NUM_TYPES, /* keep me last */
};
@@ -32,20 +28,13 @@ struct rxe_pool_elem {
void *obj;
struct kref ref_cnt;
struct list_head list;
-
- /* only used if keyed */
- struct rb_node key_node;
-
- /* only used if indexed */
- struct rb_node index_node;
u32 index;
};
struct rxe_pool {
struct rxe_dev *rxe;
const char *name;
- rwlock_t pool_lock; /* protects pool add/del/search */
- void (*cleanup)(struct rxe_pool_elem *obj);
+ void (*cleanup)(struct rxe_pool_elem *elem);
enum rxe_pool_flags flags;
enum rxe_elem_type type;
@@ -54,36 +43,22 @@ struct rxe_pool {
size_t elem_size;
size_t elem_offset;
- /* only used if indexed */
- struct {
- struct rb_root tree;
- unsigned long *table;
- u32 last;
- u32 max_index;
- u32 min_index;
- } index;
-
- /* only used if keyed */
- struct {
- struct rb_root tree;
- size_t key_offset;
- size_t key_size;
- } key;
+ struct xarray xa;
+ struct xa_limit limit;
+ u32 next;
};
/* initialize a pool of objects with given limit on
* number of elements. gets parameters from rxe_type_info
* pool elements will be allocated out of a slab cache
*/
-int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
- enum rxe_elem_type type, u32 max_elem);
+void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
+ enum rxe_elem_type type);
/* free resources from object pool */
void rxe_pool_cleanup(struct rxe_pool *pool);
-/* allocate an object from pool holding and not holding the pool lock */
-void *rxe_alloc_locked(struct rxe_pool *pool);
-
+/* allocate an object from pool */
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
@@ -91,69 +66,17 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
-/* assign an index to an indexed object and insert object into
- * pool's rb tree holding and not holding the pool_lock
- */
-int __rxe_add_index_locked(struct rxe_pool_elem *elem);
-
-#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
-
-int __rxe_add_index(struct rxe_pool_elem *elem);
-
-#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
-
-/* drop an index and remove object from rb tree
- * holding and not holding the pool_lock
- */
-void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
-
-#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
-
-void __rxe_drop_index(struct rxe_pool_elem *elem);
-
-#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
-
-/* assign a key to a keyed object and insert object into
- * pool's rb tree holding and not holding pool_lock
- */
-int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key);
-
-#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key)
-
-int __rxe_add_key(struct rxe_pool_elem *elem, void *key);
-
-#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key)
-
-/* remove elem from rb tree holding and not holding the pool_lock */
-void __rxe_drop_key_locked(struct rxe_pool_elem *elem);
-
-#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem)
-
-void __rxe_drop_key(struct rxe_pool_elem *elem);
-
-#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem)
-
-/* lookup an indexed object from index holding and not holding the pool_lock.
- * takes a reference on object
- */
-void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index);
-
+/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
-/* lookup keyed object from key holding and not holding the pool_lock.
- * takes a reference on the objecti
- */
-void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key);
+int __rxe_get(struct rxe_pool_elem *elem);
-void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
+#define rxe_get(obj) __rxe_get(&(obj)->elem)
-/* cleanup an object when all references are dropped */
-void rxe_elem_release(struct kref *kref);
+int __rxe_put(struct rxe_pool_elem *elem);
-/* take a reference on an object */
-#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt)
+#define rxe_put(obj) __rxe_put(&(obj)->elem)
-/* drop a reference on an object */
-#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release)
+#define rxe_read(obj) kref_read(&(obj)->elem.ref_cnt)
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 5018b9387694..62acf890af6c 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -135,12 +135,8 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
{
- if (res->type == RXE_ATOMIC_MASK) {
+ if (res->type == RXE_ATOMIC_MASK)
kfree_skb(res->atomic.skb);
- } else if (res->type == RXE_READ_MASK) {
- if (res->read.mr)
- rxe_drop_ref(res->read.mr);
- }
res->type = 0;
}
@@ -188,9 +184,6 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
break;
}
- INIT_LIST_HEAD(&qp->grp_list);
-
- spin_lock_init(&qp->grp_lock);
spin_lock_init(&qp->state_lock);
atomic_set(&qp->ssn, 0);
@@ -330,11 +323,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
- rxe_add_ref(pd);
- rxe_add_ref(rcq);
- rxe_add_ref(scq);
+ rxe_get(pd);
+ rxe_get(rcq);
+ rxe_get(scq);
if (srq)
- rxe_add_ref(srq);
+ rxe_get(srq);
qp->pd = pd;
qp->rcq = rcq;
@@ -366,10 +359,10 @@ err1:
qp->srq = NULL;
if (srq)
- rxe_drop_ref(srq);
- rxe_drop_ref(scq);
- rxe_drop_ref(rcq);
- rxe_drop_ref(pd);
+ rxe_put(srq);
+ rxe_put(scq);
+ rxe_put(rcq);
+ rxe_put(pd);
return err;
}
@@ -528,7 +521,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
qp->resp.sent_psn_nak = 0;
if (qp->resp.mr) {
- rxe_drop_ref(qp->resp.mr);
+ rxe_put(qp->resp.mr);
qp->resp.mr = NULL;
}
@@ -770,6 +763,20 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
return 0;
}
+int rxe_qp_chk_destroy(struct rxe_qp *qp)
+{
+ /* See IBA o10-2.2.3
+ * An attempt to destroy a QP while attached to a mcast group
+ * will fail immediately.
+ */
+ if (atomic_read(&qp->mcg_num)) {
+ pr_debug("Attempt to destroy QP while attached to multicast group\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/* called by the destroy qp verb */
void rxe_qp_destroy(struct rxe_qp *qp)
{
@@ -798,28 +805,24 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
{
struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
- rxe_drop_all_mcast_groups(qp);
-
if (qp->sq.queue)
rxe_queue_cleanup(qp->sq.queue);
if (qp->srq)
- rxe_drop_ref(qp->srq);
+ rxe_put(qp->srq);
if (qp->rq.queue)
rxe_queue_cleanup(qp->rq.queue);
if (qp->scq)
- rxe_drop_ref(qp->scq);
+ rxe_put(qp->scq);
if (qp->rcq)
- rxe_drop_ref(qp->rcq);
+ rxe_put(qp->rcq);
if (qp->pd)
- rxe_drop_ref(qp->pd);
+ rxe_put(qp->pd);
- if (qp->resp.mr) {
- rxe_drop_ref(qp->resp.mr);
- qp->resp.mr = NULL;
- }
+ if (qp->resp.mr)
+ rxe_put(qp->resp.mr);
if (qp_type(qp) == IB_QPT_RC)
sk_dst_reset(qp->sk->sk);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index a1b283dd2d4c..dbd4971039c0 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -151,6 +151,8 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
struct rxe_queue *new_q;
unsigned int num_elem = *num_elem_p;
int err;
+ unsigned long producer_flags;
+ unsigned long consumer_flags;
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
if (!new_q)
@@ -164,17 +166,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
goto err1;
}
- spin_lock_bh(consumer_lock);
+ spin_lock_irqsave(consumer_lock, consumer_flags);
if (producer_lock) {
- spin_lock_bh(producer_lock);
+ spin_lock_irqsave(producer_lock, producer_flags);
err = resize_finish(q, new_q, num_elem);
- spin_unlock_bh(producer_lock);
+ spin_unlock_irqrestore(producer_lock, producer_flags);
} else {
err = resize_finish(q, new_q, num_elem);
}
- spin_unlock_bh(consumer_lock);
+ spin_unlock_irqrestore(consumer_lock, consumer_flags);
rxe_queue_cleanup(new_q); /* new/old dep on err */
if (err)
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 6a6cc1fa90e4..d09a8b68c962 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -217,7 +217,7 @@ static int hdr_check(struct rxe_pkt_info *pkt)
return 0;
err2:
- rxe_drop_ref(qp);
+ rxe_put(qp);
err1:
return -EINVAL;
}
@@ -233,8 +233,8 @@ static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
{
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
- struct rxe_mc_grp *mcg;
- struct rxe_mc_elem *mce;
+ struct rxe_mcg *mcg;
+ struct rxe_mca *mca;
struct rxe_qp *qp;
union ib_gid dgid;
int err;
@@ -246,19 +246,19 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
/* lookup mcast group corresponding to mgid, takes a ref */
- mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
+ mcg = rxe_lookup_mcg(rxe, &dgid);
if (!mcg)
goto drop; /* mcast group not registered */
- spin_lock_bh(&mcg->mcg_lock);
+ spin_lock_bh(&rxe->mcg_lock);
/* this is unreliable datagram service so we let
* failures to deliver a multicast packet to a
* single QP happen and just move on and try
* the rest of them on the list
*/
- list_for_each_entry(mce, &mcg->qp_list, qp_list) {
- qp = mce->qp;
+ list_for_each_entry(mca, &mcg->qp_list, qp_list) {
+ qp = mca->qp;
/* validate qp for incoming packet */
err = check_type_state(rxe, pkt, qp);
@@ -273,7 +273,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
* skb and pass to the QP. Pass the original skb to
* the last QP in the list.
*/
- if (mce->qp_list.next != &mcg->qp_list) {
+ if (mca->qp_list.next != &mcg->qp_list) {
struct sk_buff *cskb;
struct rxe_pkt_info *cpkt;
@@ -288,19 +288,19 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
cpkt = SKB_TO_PKT(cskb);
cpkt->qp = qp;
- rxe_add_ref(qp);
+ rxe_get(qp);
rxe_rcv_pkt(cpkt, cskb);
} else {
pkt->qp = qp;
- rxe_add_ref(qp);
+ rxe_get(qp);
rxe_rcv_pkt(pkt, skb);
skb = NULL; /* mark consumed */
}
}
- spin_unlock_bh(&mcg->mcg_lock);
+ spin_unlock_bh(&rxe->mcg_lock);
- rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
if (likely(!skb))
return;
@@ -397,7 +397,7 @@ void rxe_rcv(struct sk_buff *skb)
drop:
if (pkt->qp)
- rxe_drop_ref(pkt->qp);
+ rxe_put(pkt->qp);
kfree_skb(skb);
ib_device_put(&rxe->ib_dev);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 5eb89052dd66..ae5fbc79dd5c 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -358,14 +358,14 @@ static inline int get_mtu(struct rxe_qp *qp)
}
static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ struct rxe_av *av,
struct rxe_send_wqe *wqe,
- int opcode, int payload,
+ int opcode, u32 payload,
struct rxe_pkt_info *pkt)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct sk_buff *skb;
struct rxe_send_wr *ibwr = &wqe->wr;
- struct rxe_av *av;
int pad = (-payload) & 0x3;
int paylen;
int solicited;
@@ -374,21 +374,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
/* length from start of bth to end of icrc */
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
-
- /* pkt->hdr, port_num and mask are initialized in ifc layer */
- pkt->rxe = rxe;
- pkt->opcode = opcode;
- pkt->qp = qp;
- pkt->psn = qp->req.psn;
- pkt->mask = rxe_opcode[opcode].mask;
- pkt->paylen = paylen;
- pkt->wqe = wqe;
+ pkt->paylen = paylen;
/* init skb */
- av = rxe_get_av(pkt);
- if (!av)
- return NULL;
-
skb = rxe_init_packet(rxe, av, paylen, pkt);
if (unlikely(!skb))
return NULL;
@@ -447,13 +435,13 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
return skb;
}
-static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- struct rxe_pkt_info *pkt, struct sk_buff *skb,
- int paylen)
+static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
+ struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 paylen)
{
int err;
- err = rxe_prepare(pkt, skb);
+ err = rxe_prepare(av, pkt, skb);
if (err)
return err;
@@ -497,7 +485,7 @@ static void update_wqe_state(struct rxe_qp *qp,
static void update_wqe_psn(struct rxe_qp *qp,
struct rxe_send_wqe *wqe,
struct rxe_pkt_info *pkt,
- int payload)
+ u32 payload)
{
/* number of packets left to send including current one */
int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
@@ -540,7 +528,7 @@ static void rollback_state(struct rxe_send_wqe *wqe,
}
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- struct rxe_pkt_info *pkt, int payload)
+ struct rxe_pkt_info *pkt)
{
qp->req.opcode = pkt->opcode;
@@ -608,19 +596,22 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
int rxe_requester(void *arg)
{
struct rxe_qp *qp = (struct rxe_qp *)arg;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_pkt_info pkt;
struct sk_buff *skb;
struct rxe_send_wqe *wqe;
enum rxe_hdr_mask mask;
- int payload;
+ u32 payload;
int mtu;
int opcode;
int ret;
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
struct rxe_queue *q = qp->sq.queue;
+ struct rxe_ah *ah;
+ struct rxe_av *av;
- rxe_add_ref(qp);
+ rxe_get(qp);
next_wqe:
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -699,20 +690,34 @@ next_wqe:
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
__rxe_do_task(&qp->comp.task);
- rxe_drop_ref(qp);
+ rxe_put(qp);
return 0;
}
payload = mtu;
}
- skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
+ pkt.rxe = rxe;
+ pkt.opcode = opcode;
+ pkt.qp = qp;
+ pkt.psn = qp->req.psn;
+ pkt.mask = rxe_opcode[opcode].mask;
+ pkt.wqe = wqe;
+
+ av = rxe_get_av(&pkt, &ah);
+ if (unlikely(!av)) {
+ pr_err("qp#%d Failed no address vector\n", qp_num(qp));
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err_drop_ah;
+ }
+
+ skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err;
+ goto err_drop_ah;
}
- ret = finish_packet(qp, wqe, &pkt, skb, payload);
+ ret = finish_packet(qp, av, wqe, &pkt, skb, payload);
if (unlikely(ret)) {
pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
if (ret == -EFAULT)
@@ -720,9 +725,12 @@ next_wqe:
else
wqe->status = IB_WC_LOC_QP_OP_ERR;
kfree_skb(skb);
- goto err;
+ goto err_drop_ah;
}
+ if (ah)
+ rxe_put(ah);
+
/*
* To prevent a race on wqe access between requester and completer,
* wqe members state and psn need to be set before calling
@@ -747,15 +755,18 @@ next_wqe:
goto err;
}
- update_state(qp, wqe, &pkt, payload);
+ update_state(qp, wqe, &pkt);
goto next_wqe;
+err_drop_ah:
+ if (ah)
+ rxe_put(ah);
err:
wqe->state = wqe_state_error;
__rxe_do_task(&qp->comp.task);
exit:
- rxe_drop_ref(qp);
+ rxe_put(qp);
return -EAGAIN;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index e8f435fa6e4d..16fc7ea1298d 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -99,7 +99,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp,
if (qp->resp.state == QP_STATE_ERROR) {
while ((skb = skb_dequeue(&qp->req_pkts))) {
- rxe_drop_ref(qp);
+ rxe_put(qp);
kfree_skb(skb);
ib_device_put(qp->ibqp.device);
}
@@ -297,21 +297,22 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
struct ib_event ev;
unsigned int count;
size_t size;
+ unsigned long flags;
if (srq->error)
return RESPST_ERR_RNR;
- spin_lock_bh(&srq->rq.consumer_lock);
+ spin_lock_irqsave(&srq->rq.consumer_lock, flags);
wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
if (!wqe) {
- spin_unlock_bh(&srq->rq.consumer_lock);
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
return RESPST_ERR_RNR;
}
/* don't trust user space data */
if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
- spin_unlock_bh(&srq->rq.consumer_lock);
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
return RESPST_ERR_MALFORMED_WQE;
}
@@ -327,11 +328,11 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
goto event;
}
- spin_unlock_bh(&srq->rq.consumer_lock);
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
return RESPST_CHK_LENGTH;
event:
- spin_unlock_bh(&srq->rq.consumer_lock);
+ spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
@@ -463,8 +464,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
if (mw->access & IB_ZERO_BASED)
qp->resp.offset = mw->addr;
- rxe_drop_ref(mw);
- rxe_add_ref(mr);
+ rxe_put(mw);
+ rxe_get(mr);
} else {
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
if (!mr) {
@@ -507,9 +508,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
err:
if (mr)
- rxe_drop_ref(mr);
+ rxe_put(mr);
if (mw)
- rxe_drop_ref(mw);
+ rxe_put(mw);
return state;
}
@@ -632,7 +633,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
if (ack->mask & RXE_ATMACK_MASK)
atmack_set_orig(ack, qp->resp.atomic_orig);
- err = rxe_prepare(ack, skb);
+ err = rxe_prepare(&qp->pri_av, ack, skb);
if (err) {
kfree_skb(skb);
return NULL;
@@ -641,6 +642,78 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
return skb;
}
+static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct resp_res *res;
+ u32 pkts;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(qp, res);
+
+ res->type = RXE_READ_MASK;
+ res->replay = 0;
+ res->read.va = qp->resp.va + qp->resp.offset;
+ res->read.va_org = qp->resp.va + qp->resp.offset;
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
+ res->first_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
+
+ res->state = rdatm_res_state_new;
+
+ return res;
+}
+
+/**
+ * rxe_recheck_mr - revalidate MR from rkey and get a reference
+ * @qp: the qp
+ * @rkey: the rkey
+ *
+ * This code allows the MR to be invalidated or deregistered or
+ * the MW if one was used to be invalidated or deallocated.
+ * It is assumed that the access permissions if originally good
+ * are OK and the mappings to be unchanged.
+ *
+ * Return: mr on success else NULL
+ */
+static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mr *mr;
+ struct rxe_mw *mw;
+
+ if (rkey_is_mw(rkey)) {
+ mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
+ if (!mw || mw->rkey != rkey)
+ return NULL;
+
+ if (mw->state != RXE_MW_STATE_VALID) {
+ rxe_put(mw);
+ return NULL;
+ }
+
+ mr = mw->mr;
+ rxe_put(mw);
+ } else {
+ mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ if (!mr || mr->rkey != rkey)
+ return NULL;
+ }
+
+ if (mr->state != RXE_MR_STATE_VALID) {
+ rxe_put(mr);
+ return NULL;
+ }
+
+ return mr;
+}
+
/* RDMA read response. If res is not NULL, then we have a current RDMA request
* being processed or replayed.
*/
@@ -655,53 +728,26 @@ static enum resp_states read_reply(struct rxe_qp *qp,
int opcode;
int err;
struct resp_res *res = qp->resp.res;
+ struct rxe_mr *mr;
if (!res) {
- /* This is the first time we process that request. Get a
- * resource
- */
- res = &qp->resp.resources[qp->resp.res_head];
-
- free_rd_atomic_resource(qp, res);
- rxe_advance_resp_resource(qp);
-
- res->type = RXE_READ_MASK;
- res->replay = 0;
-
- res->read.va = qp->resp.va +
- qp->resp.offset;
- res->read.va_org = qp->resp.va +
- qp->resp.offset;
-
- res->first_psn = req_pkt->psn;
-
- if (reth_len(req_pkt)) {
- res->last_psn = (req_pkt->psn +
- (reth_len(req_pkt) + mtu - 1) /
- mtu - 1) & BTH_PSN_MASK;
- } else {
- res->last_psn = res->first_psn;
- }
- res->cur_psn = req_pkt->psn;
-
- res->read.resid = qp->resp.resid;
- res->read.length = qp->resp.resid;
- res->read.rkey = qp->resp.rkey;
-
- /* note res inherits the reference to mr from qp */
- res->read.mr = qp->resp.mr;
- qp->resp.mr = NULL;
-
- qp->resp.res = res;
- res->state = rdatm_res_state_new;
+ res = rxe_prepare_read_res(qp, req_pkt);
+ qp->resp.res = res;
}
if (res->state == rdatm_res_state_new) {
+ mr = qp->resp.mr;
+ qp->resp.mr = NULL;
+
if (res->read.resid <= mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
else
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
} else {
+ mr = rxe_recheck_mr(qp, res->read.rkey);
+ if (!mr)
+ return RESPST_ERR_RKEY_VIOLATION;
+
if (res->read.resid > mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
else
@@ -717,10 +763,12 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb)
return RESPST_ERR_RNR;
- err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
+ err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
payload, RXE_FROM_MR_OBJ);
if (err)
pr_err("Failed copying memory\n");
+ if (mr)
+ rxe_put(mr);
if (bth_pad(&ack_pkt)) {
u8 *pad = payload_addr(&ack_pkt) + payload;
@@ -814,6 +862,10 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
return RESPST_ERR_INVALIDATE_RKEY;
}
+ if (pkt->mask & RXE_END_MASK)
+ /* We successfully processed this new request. */
+ qp->resp.msn++;
+
/* next expected psn, read handles this separately */
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
qp->resp.ack_psn = qp->resp.psn;
@@ -821,11 +873,9 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
- if (pkt->mask & RXE_COMP_MASK) {
- /* We successfully processed this new request. */
- qp->resp.msn++;
+ if (pkt->mask & RXE_COMP_MASK)
return RESPST_COMPLETE;
- } else if (qp_type(qp) == IB_QPT_RC)
+ else if (qp_type(qp) == IB_QPT_RC)
return RESPST_ACKNOWLEDGE;
else
return RESPST_CLEANUP;
@@ -987,7 +1037,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
rc = rxe_xmit_packet(qp, &ack_pkt, skb);
if (rc) {
pr_err_ratelimited("Failed sending ack\n");
- rxe_drop_ref(qp);
+ rxe_put(qp);
}
out:
return rc;
@@ -1016,13 +1066,13 @@ static enum resp_states cleanup(struct rxe_qp *qp,
if (pkt) {
skb = skb_dequeue(&qp->req_pkts);
- rxe_drop_ref(qp);
+ rxe_put(qp);
kfree_skb(skb);
ib_device_put(qp->ibqp.device);
}
if (qp->resp.mr) {
- rxe_drop_ref(qp->resp.mr);
+ rxe_put(qp->resp.mr);
qp->resp.mr = NULL;
}
@@ -1166,7 +1216,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
if (qp->resp.mr) {
- rxe_drop_ref(qp->resp.mr);
+ rxe_put(qp->resp.mr);
qp->resp.mr = NULL;
}
@@ -1180,7 +1230,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
struct rxe_queue *q = qp->rq.queue;
while ((skb = skb_dequeue(&qp->req_pkts))) {
- rxe_drop_ref(qp);
+ rxe_put(qp);
kfree_skb(skb);
ib_device_put(qp->ibqp.device);
}
@@ -1200,7 +1250,7 @@ int rxe_responder(void *arg)
struct rxe_pkt_info *pkt = NULL;
int ret = 0;
- rxe_add_ref(qp);
+ rxe_get(qp);
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
@@ -1387,6 +1437,6 @@ int rxe_responder(void *arg)
exit:
ret = -EAGAIN;
done:
- rxe_drop_ref(qp);
+ rxe_put(qp);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 915ad6664321..67184b0281a0 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -115,7 +115,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
{
struct rxe_ucontext *uc = to_ruc(ibuc);
- rxe_drop_ref(uc);
+ rxe_put(uc);
}
static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
@@ -149,7 +149,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
- rxe_drop_ref(pd);
+ rxe_put(pd);
return 0;
}
@@ -181,7 +181,6 @@ static int rxe_create_ah(struct ib_ah *ibah,
return err;
/* create index > 0 */
- rxe_add_index(ah);
ah->ah_num = ah->elem.index;
if (uresp) {
@@ -189,8 +188,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = copy_to_user(&uresp->ah_num, &ah->ah_num,
sizeof(uresp->ah_num));
if (err) {
- rxe_drop_index(ah);
- rxe_drop_ref(ah);
+ rxe_put(ah);
return -EFAULT;
}
} else if (ah->is_user) {
@@ -230,8 +228,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
- rxe_drop_index(ah);
- rxe_drop_ref(ah);
+ rxe_put(ah);
return 0;
}
@@ -306,7 +303,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (err)
goto err1;
- rxe_add_ref(pd);
+ rxe_get(pd);
srq->pd = pd;
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
@@ -316,8 +313,8 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
return 0;
err2:
- rxe_drop_ref(pd);
- rxe_drop_ref(srq);
+ rxe_put(pd);
+ rxe_put(srq);
err1:
return err;
}
@@ -374,8 +371,8 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
if (srq->rq.queue)
rxe_queue_cleanup(srq->rq.queue);
- rxe_drop_ref(srq->pd);
- rxe_drop_ref(srq);
+ rxe_put(srq->pd);
+ rxe_put(srq);
return 0;
}
@@ -384,8 +381,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
{
int err = 0;
struct rxe_srq *srq = to_rsrq(ibsrq);
+ unsigned long flags;
- spin_lock_bh(&srq->rq.producer_lock);
+ spin_lock_irqsave(&srq->rq.producer_lock, flags);
while (wr) {
err = post_one_recv(&srq->rq, wr);
@@ -394,7 +392,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_bh(&srq->rq.producer_lock);
+ spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
if (err)
*bad_wr = wr;
@@ -437,7 +435,6 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (err)
return err;
- rxe_add_index(qp);
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err)
goto qp_init;
@@ -445,8 +442,7 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
return 0;
qp_init:
- rxe_drop_index(qp);
- rxe_drop_ref(qp);
+ rxe_put(qp);
return err;
}
@@ -493,10 +489,14 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct rxe_qp *qp = to_rqp(ibqp);
+ int ret;
+
+ ret = rxe_qp_chk_destroy(qp);
+ if (ret)
+ return ret;
rxe_qp_destroy(qp);
- rxe_drop_index(qp);
- rxe_drop_ref(qp);
+ rxe_put(qp);
return 0;
}
@@ -638,18 +638,19 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
int err;
struct rxe_sq *sq = &qp->sq;
struct rxe_send_wqe *send_wqe;
+ unsigned long flags;
int full;
err = validate_send_wr(qp, ibwr, mask, length);
if (err)
return err;
- spin_lock_bh(&qp->sq.sq_lock);
+ spin_lock_irqsave(&qp->sq.sq_lock, flags);
full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
- spin_unlock_bh(&qp->sq.sq_lock);
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
return -ENOMEM;
}
@@ -658,7 +659,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
- spin_unlock_bh(&qp->sq.sq_lock);
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
return 0;
}
@@ -738,6 +739,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_rq *rq = &qp->rq;
+ unsigned long flags;
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
*bad_wr = wr;
@@ -751,7 +753,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
goto err1;
}
- spin_lock_bh(&rq->producer_lock);
+ spin_lock_irqsave(&rq->producer_lock, flags);
while (wr) {
err = post_one_recv(rq, wr);
@@ -762,7 +764,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_bh(&rq->producer_lock);
+ spin_unlock_irqrestore(&rq->producer_lock, flags);
if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
@@ -807,7 +809,7 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
rxe_cq_disable(cq);
- rxe_drop_ref(cq);
+ rxe_put(cq);
return 0;
}
@@ -843,8 +845,9 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int i;
struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_cqe *cqe;
+ unsigned long flags;
- spin_lock_bh(&cq->cq_lock);
+ spin_lock_irqsave(&cq->cq_lock, flags);
for (i = 0; i < num_entries; i++) {
cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
@@ -853,7 +856,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
return i;
}
@@ -873,8 +876,9 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
struct rxe_cq *cq = to_rcq(ibcq);
int ret = 0;
int empty;
+ unsigned long irq_flags;
- spin_lock_bh(&cq->cq_lock);
+ spin_lock_irqsave(&cq->cq_lock, irq_flags);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
@@ -883,7 +887,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
return ret;
}
@@ -898,8 +902,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
if (!mr)
return ERR_PTR(-ENOMEM);
- rxe_add_index(mr);
- rxe_add_ref(pd);
+ rxe_get(pd);
rxe_mr_init_dma(pd, access, mr);
return &mr->ibmr;
@@ -922,9 +925,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
goto err2;
}
- rxe_add_index(mr);
- rxe_add_ref(pd);
+ rxe_get(pd);
err = rxe_mr_init_user(pd, start, length, iova, access, mr);
if (err)
@@ -933,9 +935,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
return &mr->ibmr;
err3:
- rxe_drop_ref(pd);
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
+ rxe_put(pd);
+ rxe_put(mr);
err2:
return ERR_PTR(err);
}
@@ -957,9 +958,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
goto err1;
}
- rxe_add_index(mr);
-
- rxe_add_ref(pd);
+ rxe_get(pd);
err = rxe_mr_init_fast(pd, max_num_sg, mr);
if (err)
@@ -968,9 +967,8 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
return &mr->ibmr;
err2:
- rxe_drop_ref(pd);
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
+ rxe_put(pd);
+ rxe_put(mr);
err1:
return ERR_PTR(err);
}
@@ -999,32 +997,6 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
return n;
}
-static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
-{
- int err;
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_qp *qp = to_rqp(ibqp);
- struct rxe_mc_grp *grp;
-
- /* takes a ref on grp if successful */
- err = rxe_mcast_get_grp(rxe, mgid, &grp);
- if (err)
- return err;
-
- err = rxe_mcast_add_grp_elem(rxe, qp, grp);
-
- rxe_drop_ref(grp);
- return err;
-}
-
-static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
-{
- struct rxe_dev *rxe = to_rdev(ibqp->device);
- struct rxe_qp *qp = to_rqp(ibqp);
-
- return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
-}
-
static ssize_t parent_show(struct device *device,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index e48969e8d4c8..e7eff1ca75e9 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -157,7 +157,6 @@ struct resp_res {
struct sk_buff *skb;
} atomic;
struct {
- struct rxe_mr *mr;
u64 va_org;
u32 rkey;
u32 length;
@@ -232,9 +231,7 @@ struct rxe_qp {
struct rxe_av pri_av;
struct rxe_av alt_av;
- /* list of mcast groups qp has joined (for cleanup) */
- struct list_head grp_list;
- spinlock_t grp_lock; /* guard grp_list */
+ atomic_t mcg_num;
struct sk_buff_head req_pkts;
struct sk_buff_head resp_pkts;
@@ -353,23 +350,20 @@ struct rxe_mw {
u64 length;
};
-struct rxe_mc_grp {
- struct rxe_pool_elem elem;
- spinlock_t mcg_lock; /* guard group */
+struct rxe_mcg {
+ struct rb_node node;
+ struct kref ref_cnt;
struct rxe_dev *rxe;
struct list_head qp_list;
union ib_gid mgid;
- int num_qp;
+ atomic_t qp_num;
u32 qkey;
u16 pkey;
};
-struct rxe_mc_elem {
- struct rxe_pool_elem elem;
+struct rxe_mca {
struct list_head qp_list;
- struct list_head grp_list;
struct rxe_qp *qp;
- struct rxe_mc_grp *grp;
};
struct rxe_port {
@@ -401,7 +395,12 @@ struct rxe_dev {
struct rxe_pool mr_pool;
struct rxe_pool mw_pool;
struct rxe_pool mc_grp_pool;
- struct rxe_pool mc_elem_pool;
+
+ /* multicast support */
+ spinlock_t mcg_lock;
+ struct rb_root mcg_tree;
+ atomic_t mcg_num;
+ atomic_t mcg_attach;
spinlock_t pending_lock; /* guard pending_mmaps */
struct list_head pending_mmaps;
@@ -482,6 +481,4 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
-void rxe_mc_cleanup(struct rxe_pool_elem *elem);
-
#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 5b05cf3837da..ea16ba5d8da6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -32,7 +32,6 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h> /* For ARPHRD_xxx */
-#include <linux/module.h>
#include <net/rtnetlink.h>
#include "ipoib.h"
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 0322dc75396f..4bd161e86f8d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/init.h>
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 20af46c4e954..7e4faf9c5e9e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -203,12 +203,12 @@ struct iser_reg_resources;
*
* @sge: memory region sg element
* @rkey: memory region remote key
- * @mem_h: pointer to registration context (FMR/Fastreg)
+ * @desc: pointer to fast registration context
*/
struct iser_mem_reg {
- struct ib_sge sge;
- u32 rkey;
- void *mem_h;
+ struct ib_sge sge;
+ u32 rkey;
+ struct iser_fr_desc *desc;
};
enum iser_desc_type {
@@ -531,13 +531,12 @@ int iser_post_recvm(struct iser_conn *iser_conn,
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum dma_data_direction dir);
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2490150d3085..bd5f3b5e1727 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -52,30 +52,17 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
struct iser_mem_reg *mem_reg;
int err;
struct iser_ctrl *hdr = &iser_task->desc.iser_header;
- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
err = iser_dma_map_task_data(iser_task,
- buf_in,
ISER_DIR_IN,
DMA_FROM_DEVICE);
if (err)
return err;
- if (scsi_prot_sg_count(iser_task->sc)) {
- struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
-
- err = iser_dma_map_task_data(iser_task,
- pbuf_in,
- ISER_DIR_IN,
- DMA_FROM_DEVICE);
- if (err)
- return err;
- }
-
err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
- return err;
+ goto out_err;
}
mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
@@ -88,6 +75,10 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
(unsigned long long)mem_reg->sge.addr);
return 0;
+
+out_err:
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
+ return err;
}
/* Register user buffer memory and initialize passive rdma
@@ -106,28 +97,16 @@ static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
- buf_out,
ISER_DIR_OUT,
DMA_TO_DEVICE);
if (err)
return err;
- if (scsi_prot_sg_count(iser_task->sc)) {
- struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
-
- err = iser_dma_map_task_data(iser_task,
- pbuf_out,
- ISER_DIR_OUT,
- DMA_TO_DEVICE);
- if (err)
- return err;
- }
-
err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
buf_out->data_len == imm_sz);
- if (err != 0) {
+ if (err) {
iser_err("Failed to register write cmd RDMA mem\n");
- return err;
+ goto out_err;
}
mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
@@ -154,6 +133,10 @@ static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
}
return 0;
+
+out_err:
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
+ return err;
}
/* creates a new tx descriptor and adds header regd buffer */
@@ -619,13 +602,13 @@ static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
struct iser_fr_desc *desc;
if (iser_task->dir[ISER_DIR_IN]) {
- desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
+ desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
if (unlikely(iser_inv_desc(desc, rkey)))
return -EINVAL;
}
if (iser_task->dir[ISER_DIR_OUT]) {
- desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
+ desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
if (unlikely(iser_inv_desc(desc, rkey)))
return -EINVAL;
}
@@ -740,27 +723,16 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- int prot_count = scsi_prot_sg_count(iser_task->sc);
if (iser_task->dir[ISER_DIR_IN]) {
iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
- iser_dma_unmap_task_data(iser_task,
- &iser_task->data[ISER_DIR_IN],
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
DMA_FROM_DEVICE);
- if (prot_count)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->prot[ISER_DIR_IN],
- DMA_FROM_DEVICE);
}
if (iser_task->dir[ISER_DIR_OUT]) {
iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
- iser_dma_unmap_task_data(iser_task,
- &iser_task->data[ISER_DIR_OUT],
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
DMA_TO_DEVICE);
- if (prot_count)
- iser_dma_unmap_task_data(iser_task,
- &iser_task->prot[ISER_DIR_OUT],
- DMA_TO_DEVICE);
}
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 660982625488..29ae2c6a250a 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -30,7 +30,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -71,10 +70,10 @@ static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
}
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
+ struct iser_data_buf *data = &iser_task->data[iser_dir];
struct ib_device *dev;
iser_task->dir[iser_dir] = 1;
@@ -85,17 +84,40 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
+
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
+
+ pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
+ if (unlikely(pdata->dma_nents == 0)) {
+ iser_err("protection dma_map_sg failed!!!\n");
+ goto out_unmap;
+ }
+ }
+
return 0;
+
+out_unmap:
+ ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
+ return -EINVAL;
}
+
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum dma_data_direction dir)
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
{
+ struct iser_data_buf *data = &iser_task->data[iser_dir];
struct ib_device *dev;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
- ib_dma_unmap_sg(dev, data->sg, data->size, dir);
+ ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
+
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
+
+ ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
+ }
}
static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
@@ -130,7 +152,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
struct iser_fr_desc *desc;
struct ib_mr_status mr_status;
- desc = reg->mem_h;
+ desc = reg->desc;
if (!desc)
return;
@@ -147,8 +169,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
&mr_status);
}
- iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h);
- reg->mem_h = NULL;
+ iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
+ reg->desc = NULL;
}
static void iser_set_dif_domain(struct scsi_cmnd *sc,
@@ -327,40 +349,26 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return 0;
}
-static int iser_reg_data_sg(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- struct iser_fr_desc *desc, bool use_dma_key,
- struct iser_mem_reg *reg)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- if (use_dma_key)
- return iser_reg_dma(device, mem, reg);
-
- return iser_fast_reg_mr(task, mem, &desc->rsc, reg);
-}
-
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir dir,
bool all_imm)
{
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
- struct iser_fr_desc *desc = NULL;
+ struct iser_fr_desc *desc;
bool use_dma_key;
int err;
use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
+ if (use_dma_key)
+ return iser_reg_dma(device, mem, reg);
- if (!use_dma_key) {
- desc = iser_reg_desc_get_fr(ib_conn);
- reg->mem_h = desc;
- }
-
+ desc = iser_reg_desc_get_fr(ib_conn);
if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
- err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg);
+ err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
if (unlikely(err))
goto err_reg;
} else {
@@ -372,11 +380,12 @@ int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
desc->sig_protected = true;
}
+ reg->desc = desc;
+
return 0;
err_reg:
- if (desc)
- iser_reg_desc_put_fr(ib_conn, desc);
+ iser_reg_desc_put_fr(ib_conn, desc);
return err;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8bf87b073d9b..5dbad68c7390 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -905,7 +904,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector)
{
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
- struct iser_fr_desc *desc = reg->mem_h;
+ struct iser_fr_desc *desc = reg->desc;
unsigned long sector_size = iser_task->sc->device->sector_size;
struct ib_mr_status mr_status;
int ret;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index aeff68f582d3..071f35711468 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -50,7 +50,6 @@
* netdev functionality.
*/
-#include <linux/module.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index b4fa473b7888..d3c436ead694 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -156,8 +156,7 @@ static DEVICE_ATTR_RW(mpath_policy);
static ssize_t add_path_show(struct device *dev,
struct device_attribute *attr, char *page)
{
- return sysfs_emit(
- page,
+ return sysfs_emit(page,
"Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
attr->attr.name);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 759b85f03331..c2c860d0c56e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -297,6 +297,7 @@ static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
return changed;
}
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
@@ -304,16 +305,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) {
- struct rtrs_clt_sess *clt = clt_path->clt;
- unsigned int delay_ms;
-
- /*
- * Normal scenario, reconnect if we were successfully connected
- */
- delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
- msecs_to_jiffies(delay_ms +
- prandom_u32() % RTRS_RECONNECT_SEED));
+ queue_work(rtrs_wq, &clt_path->err_recovery_work);
} else {
/*
* Error can happen just on establishing new connection,
@@ -917,7 +909,7 @@ static inline void path_it_deinit(struct path_it *it)
{
struct list_head *skip, *tmp;
/*
- * The skip_list is used only for the MIN_INFLIGHT policy.
+ * The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies.
* We need to remove paths from it, so that next IO can insert
* paths (->mp_skip_entry) into a skip_list again.
*/
@@ -1511,6 +1503,22 @@ static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
static void rtrs_clt_reconnect_work(struct work_struct *work);
static void rtrs_clt_close_work(struct work_struct *work);
+static void rtrs_clt_err_recovery_work(struct work_struct *work)
+{
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
+ int delay_ms;
+
+ clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
+ clt = clt_path->clt;
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
+ msecs_to_jiffies(delay_ms +
+ prandom_u32() %
+ RTRS_RECONNECT_SEED));
+}
+
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
const struct rtrs_addr *path,
size_t con_num, u32 nr_poll_queues)
@@ -1562,6 +1570,7 @@ static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
clt_path->state = RTRS_CLT_CONNECTING;
atomic_set(&clt_path->connected_cnt, 0);
INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
+ INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
rtrs_clt_init_hb(clt_path);
@@ -2326,6 +2335,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
clt_path = container_of(work, struct rtrs_clt_path, close_work);
+ cancel_work_sync(&clt_path->err_recovery_work);
cancel_delayed_work_sync(&clt_path->reconnect_dwork);
rtrs_clt_stop_and_destroy_conns(clt_path);
rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
@@ -2638,7 +2648,6 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
{
struct rtrs_clt_path *clt_path;
struct rtrs_clt_sess *clt;
- unsigned int delay_ms;
int err;
clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
@@ -2655,8 +2664,6 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
}
clt_path->reconnect_attempts++;
- /* Stop everything */
- rtrs_clt_stop_and_destroy_conns(clt_path);
msleep(RTRS_RECONNECT_BACKOFF);
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
err = init_path(clt_path);
@@ -2669,11 +2676,7 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
reconnect_again:
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
clt_path->stats->reconnects.fail_cnt++;
- delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
- msecs_to_jiffies(delay_ms +
- prandom_u32() %
- RTRS_RECONNECT_SEED));
+ queue_work(rtrs_wq, &clt_path->err_recovery_work);
}
}
@@ -2908,6 +2911,7 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
&old_state);
if (changed) {
clt_path->reconnect_attempts = 0;
+ rtrs_clt_stop_and_destroy_conns(clt_path);
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
}
if (changed || old_state == RTRS_CLT_RECONNECTING) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index d1b18a154ae0..f848c0392d98 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -134,6 +134,7 @@ struct rtrs_clt_path {
struct rtrs_clt_io_req *reqs;
struct delayed_work reconnect_dwork;
struct work_struct close_work;
+ struct work_struct err_recovery_work;
unsigned int reconnect_attempts;
bool established;
struct rtrs_rbuf *rbufs;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 4da889103a5f..60fa0b0160f4 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -479,7 +479,6 @@ static int rtrs_str_to_sockaddr(const char *addr, size_t len,
*/
int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
{
-
switch (addr->sa_family) {
case AF_IB:
return scnprintf(buf, len, "gid:%pI6",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index abccddeea1e3..55a575e2cace 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -92,6 +92,9 @@ enum srp_iu_type {
};
/*
+ * RDMA adapter in the initiator system.
+ *
+ * @dev_list: List of RDMA ports associated with this RDMA adapter (srp_host).
* @mr_page_mask: HCA memory registration page mask.
* @mr_page_size: HCA memory registration page size.
* @mr_max_size: Maximum size in bytes of a single FR registration request.
@@ -109,6 +112,12 @@ struct srp_device {
bool use_fast_reg;
};
+/*
+ * One port of an RDMA adapter in the initiator system.
+ *
+ * @target_list: List of connected target ports (struct srp_target_port).
+ * @target_lock: Protects @target_list.
+ */
struct srp_host {
struct srp_device *srp_dev;
u8 port;
@@ -183,7 +192,7 @@ struct srp_rdma_ch {
};
/**
- * struct srp_target_port
+ * struct srp_target_port - RDMA port in the SRP target system
* @comp_vector: Completion vector used by the first RDMA channel created for
* this target port.
*/