aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c104
1 files changed, 39 insertions, 65 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 32cb7068f0ca..956f8e875daa 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -68,7 +68,6 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- bool ro_pci_enabled = pcie_relaxed_ordering_enabled(dev->mdev->pdev);
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
@@ -76,12 +75,13 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, mkc, lr, 1);
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
- MLX5_SET(mkc, mkc, relaxed_ordering_write,
- (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled);
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
- MLX5_SET(mkc, mkc, relaxed_ordering_read,
- (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled);
+ if ((acc & IB_ACCESS_RELAXED_ORDERING) &&
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev)) {
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ }
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
@@ -189,6 +189,25 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
spin_unlock_irqrestore(&ent->lock, flags);
}
+static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
+{
+ int ret = 0;
+
+ switch (access_mode) {
+ case MLX5_MKC_ACCESS_MODE_MTT:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_mtt));
+ break;
+ case MLX5_MKC_ACCESS_MODE_KSM:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_klm));
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
+}
+
static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
{
struct mlx5_ib_mr *mr;
@@ -204,7 +223,8 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_mkc_octo_size(ent->access_mode, ent->ndescs));
MLX5_SET(mkc, mkc, log_page_size, ent->page);
return mr;
}
@@ -478,14 +498,14 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
return;
if (ent->available_mrs < ent->limit) {
ent->fill_to_high_water = true;
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
ent->available_mrs + ent->pending < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->available_mrs == 2 * ent->limit) {
ent->fill_to_high_water = false;
} else if (ent->available_mrs > 2 * ent->limit) {
@@ -495,7 +515,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
}
@@ -571,33 +591,20 @@ static void delayed_cache_work_func(struct work_struct *work)
__cache_work_func(ent);
}
-static void cache_work_func(struct work_struct *work)
-{
- struct mlx5_cache_ent *ent;
-
- ent = container_of(work, struct mlx5_cache_ent, work);
- __cache_work_func(ent);
-}
-
-/* Allocate a special entry from the cache */
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- unsigned int entry, int access_flags)
+ struct mlx5_cache_ent *ent,
+ int access_flags)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
- if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
- entry >= ARRAY_SIZE(cache->ent)))
- return ERR_PTR(-EINVAL);
-
/* Matches access in alloc_cache_mr() */
if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
return ERR_PTR(-EOPNOTSUPP);
- ent = &cache->ent[entry];
spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
+ queue_adjust_cache_locked(ent);
+ ent->miss++;
spin_unlock_irq(&ent->lock);
mr = create_cache_mr(ent);
if (IS_ERR(mr))
@@ -611,32 +618,9 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
mlx5_clear_mr(mr);
}
- mr->access_flags = access_flags;
return mr;
}
-/* Return a MR already available in the cache */
-static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
-{
- struct mlx5_ib_mr *mr = NULL;
- struct mlx5_cache_ent *ent = req_ent;
-
- spin_lock_irq(&ent->lock);
- if (!list_empty(&ent->head)) {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
- mlx5_clear_mr(mr);
- return mr;
- }
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
- req_ent->miss++;
- return NULL;
-}
-
static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
@@ -739,7 +723,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->dev = dev;
ent->limit = 0;
- INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
if (i > MR_CACHE_LAST_STD_ENTRY) {
@@ -751,8 +734,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
continue;
ent->page = PAGE_SHIFT;
- ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
- MLX5_IB_UMR_OCTOWORD;
+ ent->ndescs = 1 << ent->order;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
@@ -783,7 +765,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
spin_lock_irq(&ent->lock);
ent->disabled = true;
spin_unlock_irq(&ent->lock);
- cancel_work_sync(&ent->work);
cancel_delayed_work_sync(&ent->dwork);
}
@@ -972,16 +953,9 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
return mr;
}
- mr = get_cache_mr(ent);
- if (!mr) {
- mr = create_cache_mr(ent);
- /*
- * The above already tried to do the same stuff as reg_create(),
- * no reason to try it again.
- */
- if (IS_ERR(mr))
- return mr;
- }
+ mr = mlx5_mr_cache_alloc(dev, ent, access_flags);
+ if (IS_ERR(mr))
+ return mr;
mr->ibmr.pd = pd;
mr->umem = umem;