blob: 9feb7618ba412f3a22bac19e12f8c4053d78ca3b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
Tom Duffycd4e8fb2005-06-27 14:36:37 -07003 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier5e0b5372005-07-07 17:57:16 -07004 * Copyright (c) 2005 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
35 */
36
37#include <ib_smi.h>
Roland Dreier53b8b3f2005-07-07 17:57:17 -070038#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#include "mthca_dev.h"
41#include "mthca_cmd.h"
Roland Dreier5e0b5372005-07-07 17:57:16 -070042#include "mthca_user.h"
43#include "mthca_memfree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int mthca_query_device(struct ib_device *ibdev,
46 struct ib_device_attr *props)
47{
48 struct ib_smp *in_mad = NULL;
49 struct ib_smp *out_mad = NULL;
50 int err = -ENOMEM;
51 struct mthca_dev* mdev = to_mdev(ibdev);
52
53 u8 status;
54
55 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
56 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
57 if (!in_mad || !out_mad)
58 goto out;
59
Roland Dreiera8520922005-06-27 14:36:42 -070060 memset(props, 0, sizeof *props);
Roland Dreier8cf2daf2005-04-16 15:26:14 -070061
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 props->fw_ver = mdev->fw_ver;
63
64 memset(in_mad, 0, sizeof *in_mad);
65 in_mad->base_version = 1;
66 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
67 in_mad->class_version = 1;
68 in_mad->method = IB_MGMT_METHOD_GET;
69 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
70
71 err = mthca_MAD_IFC(mdev, 1, 1,
72 1, NULL, NULL, in_mad, out_mad,
73 &status);
74 if (err)
75 goto out;
76 if (status) {
77 err = -EINVAL;
78 goto out;
79 }
80
Roland Dreier8cf2daf2005-04-16 15:26:14 -070081 props->device_cap_flags = mdev->device_cap_flags;
82 props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) &
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 0xffffff;
Roland Dreier8cf2daf2005-04-16 15:26:14 -070084 props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30));
85 props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
87 memcpy(&props->node_guid, out_mad->data + 12, 8);
88
Roland Dreier8cf2daf2005-04-16 15:26:14 -070089 props->max_mr_size = ~0ull;
90 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
91 props->max_qp_wr = 0xffff;
92 props->max_sge = mdev->limits.max_sg;
93 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
94 props->max_cqe = 0xffff;
95 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
96 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
97 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
98 props->max_qp_init_rd_atom = 1 << mdev->qp_table.rdb_shift;
99 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 err = 0;
102 out:
103 kfree(in_mad);
104 kfree(out_mad);
105 return err;
106}
107
108static int mthca_query_port(struct ib_device *ibdev,
109 u8 port, struct ib_port_attr *props)
110{
111 struct ib_smp *in_mad = NULL;
112 struct ib_smp *out_mad = NULL;
113 int err = -ENOMEM;
114 u8 status;
115
116 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
117 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
118 if (!in_mad || !out_mad)
119 goto out;
120
121 memset(in_mad, 0, sizeof *in_mad);
122 in_mad->base_version = 1;
123 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
124 in_mad->class_version = 1;
125 in_mad->method = IB_MGMT_METHOD_GET;
126 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
127 in_mad->attr_mod = cpu_to_be32(port);
128
129 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
130 port, NULL, NULL, in_mad, out_mad,
131 &status);
132 if (err)
133 goto out;
134 if (status) {
135 err = -EINVAL;
136 goto out;
137 }
138
139 props->lid = be16_to_cpup((u16 *) (out_mad->data + 16));
140 props->lmc = out_mad->data[34] & 0x7;
141 props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18));
142 props->sm_sl = out_mad->data[36] & 0xf;
143 props->state = out_mad->data[32] & 0xf;
144 props->phys_state = out_mad->data[33] >> 4;
145 props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20));
146 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
147 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
148 props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48));
149 props->active_width = out_mad->data[31] & 0xf;
150 props->active_speed = out_mad->data[35] >> 4;
151
152 out:
153 kfree(in_mad);
154 kfree(out_mad);
155 return err;
156}
157
158static int mthca_modify_port(struct ib_device *ibdev,
159 u8 port, int port_modify_mask,
160 struct ib_port_modify *props)
161{
162 struct mthca_set_ib_param set_ib;
163 struct ib_port_attr attr;
164 int err;
165 u8 status;
166
167 if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
168 return -ERESTARTSYS;
169
170 err = mthca_query_port(ibdev, port, &attr);
171 if (err)
172 goto out;
173
174 set_ib.set_si_guid = 0;
175 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
176
177 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
178 ~props->clr_port_cap_mask;
179
180 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
181 if (err)
182 goto out;
183 if (status) {
184 err = -EINVAL;
185 goto out;
186 }
187
188out:
189 up(&to_mdev(ibdev)->cap_mask_mutex);
190 return err;
191}
192
193static int mthca_query_pkey(struct ib_device *ibdev,
194 u8 port, u16 index, u16 *pkey)
195{
196 struct ib_smp *in_mad = NULL;
197 struct ib_smp *out_mad = NULL;
198 int err = -ENOMEM;
199 u8 status;
200
201 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
202 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
203 if (!in_mad || !out_mad)
204 goto out;
205
206 memset(in_mad, 0, sizeof *in_mad);
207 in_mad->base_version = 1;
208 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
209 in_mad->class_version = 1;
210 in_mad->method = IB_MGMT_METHOD_GET;
211 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
212 in_mad->attr_mod = cpu_to_be32(index / 32);
213
214 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
215 port, NULL, NULL, in_mad, out_mad,
216 &status);
217 if (err)
218 goto out;
219 if (status) {
220 err = -EINVAL;
221 goto out;
222 }
223
224 *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]);
225
226 out:
227 kfree(in_mad);
228 kfree(out_mad);
229 return err;
230}
231
232static int mthca_query_gid(struct ib_device *ibdev, u8 port,
233 int index, union ib_gid *gid)
234{
235 struct ib_smp *in_mad = NULL;
236 struct ib_smp *out_mad = NULL;
237 int err = -ENOMEM;
238 u8 status;
239
240 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
241 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
242 if (!in_mad || !out_mad)
243 goto out;
244
245 memset(in_mad, 0, sizeof *in_mad);
246 in_mad->base_version = 1;
247 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
248 in_mad->class_version = 1;
249 in_mad->method = IB_MGMT_METHOD_GET;
250 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
251 in_mad->attr_mod = cpu_to_be32(port);
252
253 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
254 port, NULL, NULL, in_mad, out_mad,
255 &status);
256 if (err)
257 goto out;
258 if (status) {
259 err = -EINVAL;
260 goto out;
261 }
262
263 memcpy(gid->raw, out_mad->data + 8, 8);
264
265 memset(in_mad, 0, sizeof *in_mad);
266 in_mad->base_version = 1;
267 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
268 in_mad->class_version = 1;
269 in_mad->method = IB_MGMT_METHOD_GET;
270 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
271 in_mad->attr_mod = cpu_to_be32(index / 8);
272
273 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
274 port, NULL, NULL, in_mad, out_mad,
275 &status);
276 if (err)
277 goto out;
278 if (status) {
279 err = -EINVAL;
280 goto out;
281 }
282
283 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
284
285 out:
286 kfree(in_mad);
287 kfree(out_mad);
288 return err;
289}
290
Roland Dreier5e0b5372005-07-07 17:57:16 -0700291static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
292 struct ib_udata *udata)
293{
294 struct mthca_alloc_ucontext_resp uresp;
295 struct mthca_ucontext *context;
296 int err;
297
298 memset(&uresp, 0, sizeof uresp);
299
300 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
301 if (mthca_is_memfree(to_mdev(ibdev)))
302 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
303 else
304 uresp.uarc_size = 0;
305
306 context = kmalloc(sizeof *context, GFP_KERNEL);
307 if (!context)
308 return ERR_PTR(-ENOMEM);
309
310 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
311 if (err) {
312 kfree(context);
313 return ERR_PTR(err);
314 }
315
316 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
317 if (IS_ERR(context->db_tab)) {
318 err = PTR_ERR(context->db_tab);
319 mthca_uar_free(to_mdev(ibdev), &context->uar);
320 kfree(context);
321 return ERR_PTR(err);
322 }
323
324 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
325 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
326 mthca_uar_free(to_mdev(ibdev), &context->uar);
327 kfree(context);
328 return ERR_PTR(-EFAULT);
329 }
330
331 return &context->ibucontext;
332}
333
334static int mthca_dealloc_ucontext(struct ib_ucontext *context)
335{
336 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
337 to_mucontext(context)->db_tab);
338 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
339 kfree(to_mucontext(context));
340
341 return 0;
342}
343
Roland Dreier53b8b3f2005-07-07 17:57:17 -0700344static int mthca_mmap_uar(struct ib_ucontext *context,
345 struct vm_area_struct *vma)
346{
347 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
348 return -EINVAL;
349
350 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
351
352 if (remap_pfn_range(vma, vma->vm_start,
353 to_mucontext(context)->uar.pfn,
354 PAGE_SIZE, vma->vm_page_prot))
355 return -EAGAIN;
356
357 return 0;
358}
359
Roland Dreier1cf296b2005-07-07 17:57:11 -0700360static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
361 struct ib_ucontext *context,
362 struct ib_udata *udata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
364 struct mthca_pd *pd;
365 int err;
366
367 pd = kmalloc(sizeof *pd, GFP_KERNEL);
368 if (!pd)
369 return ERR_PTR(-ENOMEM);
370
Roland Dreier99264c12005-07-07 17:57:18 -0700371 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 if (err) {
373 kfree(pd);
374 return ERR_PTR(err);
375 }
376
Roland Dreier99264c12005-07-07 17:57:18 -0700377 if (context) {
378 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
379 mthca_pd_free(to_mdev(ibdev), pd);
380 kfree(pd);
381 return ERR_PTR(-EFAULT);
382 }
383 }
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 return &pd->ibpd;
386}
387
388static int mthca_dealloc_pd(struct ib_pd *pd)
389{
390 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
391 kfree(pd);
392
393 return 0;
394}
395
396static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
397 struct ib_ah_attr *ah_attr)
398{
399 int err;
400 struct mthca_ah *ah;
401
Roland Dreier8df8a342005-04-16 15:26:26 -0700402 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 if (!ah)
404 return ERR_PTR(-ENOMEM);
405
406 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
407 if (err) {
408 kfree(ah);
409 return ERR_PTR(err);
410 }
411
412 return &ah->ibah;
413}
414
415static int mthca_ah_destroy(struct ib_ah *ah)
416{
417 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
418 kfree(ah);
419
420 return 0;
421}
422
423static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
Roland Dreier1cf296b2005-07-07 17:57:11 -0700424 struct ib_qp_init_attr *init_attr,
425 struct ib_udata *udata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 struct mthca_qp *qp;
428 int err;
429
430 switch (init_attr->qp_type) {
431 case IB_QPT_RC:
432 case IB_QPT_UC:
433 case IB_QPT_UD:
434 {
435 qp = kmalloc(sizeof *qp, GFP_KERNEL);
436 if (!qp)
437 return ERR_PTR(-ENOMEM);
438
439 qp->sq.max = init_attr->cap.max_send_wr;
440 qp->rq.max = init_attr->cap.max_recv_wr;
441 qp->sq.max_gs = init_attr->cap.max_send_sge;
442 qp->rq.max_gs = init_attr->cap.max_recv_sge;
443
444 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
445 to_mcq(init_attr->send_cq),
446 to_mcq(init_attr->recv_cq),
447 init_attr->qp_type, init_attr->sq_sig_type,
448 qp);
449 qp->ibqp.qp_num = qp->qpn;
450 break;
451 }
452 case IB_QPT_SMI:
453 case IB_QPT_GSI:
454 {
455 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
456 if (!qp)
457 return ERR_PTR(-ENOMEM);
458
459 qp->sq.max = init_attr->cap.max_send_wr;
460 qp->rq.max = init_attr->cap.max_recv_wr;
461 qp->sq.max_gs = init_attr->cap.max_send_sge;
462 qp->rq.max_gs = init_attr->cap.max_recv_sge;
463
464 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
465
466 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
467 to_mcq(init_attr->send_cq),
468 to_mcq(init_attr->recv_cq),
469 init_attr->sq_sig_type,
470 qp->ibqp.qp_num, init_attr->port_num,
471 to_msqp(qp));
472 break;
473 }
474 default:
475 /* Don't support raw QPs */
476 return ERR_PTR(-ENOSYS);
477 }
478
479 if (err) {
480 kfree(qp);
481 return ERR_PTR(err);
482 }
483
484 init_attr->cap.max_inline_data = 0;
485
486 return &qp->ibqp;
487}
488
489static int mthca_destroy_qp(struct ib_qp *qp)
490{
491 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
492 kfree(qp);
493 return 0;
494}
495
Roland Dreier1cf296b2005-07-07 17:57:11 -0700496static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
497 struct ib_ucontext *context,
498 struct ib_udata *udata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499{
Roland Dreier74c21742005-07-07 17:57:19 -0700500 struct mthca_create_cq ucmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 struct mthca_cq *cq;
502 int nent;
503 int err;
504
Roland Dreier74c21742005-07-07 17:57:19 -0700505 if (context) {
506 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
507 return ERR_PTR(-EFAULT);
508
509 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
510 to_mucontext(context)->db_tab,
511 ucmd.set_db_index, ucmd.set_db_page);
512 if (err)
513 return ERR_PTR(err);
514
515 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
516 to_mucontext(context)->db_tab,
517 ucmd.arm_db_index, ucmd.arm_db_page);
518 if (err)
519 goto err_unmap_set;
520 }
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 cq = kmalloc(sizeof *cq, GFP_KERNEL);
Roland Dreier74c21742005-07-07 17:57:19 -0700523 if (!cq) {
524 err = -ENOMEM;
525 goto err_unmap_arm;
526 }
527
528 if (context) {
529 cq->mr.ibmr.lkey = ucmd.lkey;
530 cq->set_ci_db_index = ucmd.set_db_index;
531 cq->arm_db_index = ucmd.arm_db_index;
532 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534 for (nent = 1; nent <= entries; nent <<= 1)
535 ; /* nothing */
536
Roland Dreier74c21742005-07-07 17:57:19 -0700537 err = mthca_init_cq(to_mdev(ibdev), nent,
538 context ? to_mucontext(context) : NULL,
539 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
540 cq);
541 if (err)
542 goto err_free;
543
544 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
545 mthca_free_cq(to_mdev(ibdev), cq);
546 goto err_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 }
548
549 return &cq->ibcq;
Roland Dreier74c21742005-07-07 17:57:19 -0700550
551err_free:
552 kfree(cq);
553
554err_unmap_arm:
555 if (context)
556 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
557 to_mucontext(context)->db_tab, ucmd.arm_db_index);
558
559err_unmap_set:
560 if (context)
561 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
562 to_mucontext(context)->db_tab, ucmd.set_db_index);
563
564 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
567static int mthca_destroy_cq(struct ib_cq *cq)
568{
Roland Dreier74c21742005-07-07 17:57:19 -0700569 if (cq->uobject) {
570 mthca_unmap_user_db(to_mdev(cq->device),
571 &to_mucontext(cq->uobject->context)->uar,
572 to_mucontext(cq->uobject->context)->db_tab,
573 to_mcq(cq)->arm_db_index);
574 mthca_unmap_user_db(to_mdev(cq->device),
575 &to_mucontext(cq->uobject->context)->uar,
576 to_mucontext(cq->uobject->context)->db_tab,
577 to_mcq(cq)->set_ci_db_index);
578 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
580 kfree(cq);
581
582 return 0;
583}
584
585static inline u32 convert_access(int acc)
586{
587 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
588 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
589 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
590 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
591 MTHCA_MPT_FLAG_LOCAL_READ;
592}
593
594static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
595{
596 struct mthca_mr *mr;
597 int err;
598
599 mr = kmalloc(sizeof *mr, GFP_KERNEL);
600 if (!mr)
601 return ERR_PTR(-ENOMEM);
602
603 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
604 to_mpd(pd)->pd_num,
605 convert_access(acc), mr);
606
607 if (err) {
608 kfree(mr);
609 return ERR_PTR(err);
610 }
611
612 return &mr->ibmr;
613}
614
615static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
616 struct ib_phys_buf *buffer_list,
617 int num_phys_buf,
618 int acc,
619 u64 *iova_start)
620{
621 struct mthca_mr *mr;
622 u64 *page_list;
623 u64 total_size;
624 u64 mask;
625 int shift;
626 int npages;
627 int err;
628 int i, j, n;
629
630 /* First check that we have enough alignment */
631 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
632 return ERR_PTR(-EINVAL);
633
634 if (num_phys_buf > 1 &&
635 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
636 return ERR_PTR(-EINVAL);
637
638 mask = 0;
639 total_size = 0;
640 for (i = 0; i < num_phys_buf; ++i) {
Michael S. Tsirkin72c30292005-04-16 15:26:16 -0700641 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 return ERR_PTR(-EINVAL);
643 if (i != 0 && i != num_phys_buf - 1 &&
644 (buffer_list[i].size & ~PAGE_MASK))
645 return ERR_PTR(-EINVAL);
646
647 total_size += buffer_list[i].size;
648 if (i > 0)
649 mask |= buffer_list[i].addr;
650 }
651
652 /* Find largest page shift we can use to cover buffers */
653 for (shift = PAGE_SHIFT; shift < 31; ++shift)
654 if (num_phys_buf > 1) {
655 if ((1ULL << shift) & mask)
656 break;
657 } else {
658 if (1ULL << shift >=
659 buffer_list[0].size +
660 (buffer_list[0].addr & ((1ULL << shift) - 1)))
661 break;
662 }
663
664 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
665 buffer_list[0].addr &= ~0ull << shift;
666
667 mr = kmalloc(sizeof *mr, GFP_KERNEL);
668 if (!mr)
669 return ERR_PTR(-ENOMEM);
670
671 npages = 0;
672 for (i = 0; i < num_phys_buf; ++i)
673 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
674
675 if (!npages)
676 return &mr->ibmr;
677
678 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
679 if (!page_list) {
680 kfree(mr);
681 return ERR_PTR(-ENOMEM);
682 }
683
684 n = 0;
685 for (i = 0; i < num_phys_buf; ++i)
686 for (j = 0;
687 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
688 ++j)
689 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
690
691 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
692 "in PD %x; shift %d, npages %d.\n",
693 (unsigned long long) buffer_list[0].addr,
694 (unsigned long long) *iova_start,
695 to_mpd(pd)->pd_num,
696 shift, npages);
697
698 err = mthca_mr_alloc_phys(to_mdev(pd->device),
699 to_mpd(pd)->pd_num,
700 page_list, shift, npages,
701 *iova_start, total_size,
702 convert_access(acc), mr);
703
704 if (err) {
Roland Dreier761f9eb2005-06-27 14:36:44 -0700705 kfree(page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 kfree(mr);
707 return ERR_PTR(err);
708 }
709
710 kfree(page_list);
711 return &mr->ibmr;
712}
713
Roland Dreier24d42812005-07-07 17:57:19 -0700714static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
715 int acc, struct ib_udata *udata)
716{
717 struct mthca_dev *dev = to_mdev(pd->device);
718 struct ib_umem_chunk *chunk;
719 struct mthca_mr *mr;
720 u64 *pages;
721 int shift, n, len;
722 int i, j, k;
723 int err = 0;
724
725 shift = ffs(region->page_size) - 1;
726
727 mr = kmalloc(sizeof *mr, GFP_KERNEL);
728 if (!mr)
729 return ERR_PTR(-ENOMEM);
730
731 n = 0;
732 list_for_each_entry(chunk, &region->chunk_list, list)
733 n += chunk->nents;
734
735 mr->mtt = mthca_alloc_mtt(dev, n);
736 if (IS_ERR(mr->mtt)) {
737 err = PTR_ERR(mr->mtt);
738 goto err;
739 }
740
741 pages = (u64 *) __get_free_page(GFP_KERNEL);
742 if (!pages) {
743 err = -ENOMEM;
744 goto err_mtt;
745 }
746
747 i = n = 0;
748
749 list_for_each_entry(chunk, &region->chunk_list, list)
750 for (j = 0; j < chunk->nmap; ++j) {
751 len = sg_dma_len(&chunk->page_list[j]) >> shift;
752 for (k = 0; k < len; ++k) {
753 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
754 region->page_size * k;
755 /*
756 * Be friendly to WRITE_MTT command
757 * and leave two empty slots for the
758 * index and reserved fields of the
759 * mailbox.
760 */
761 if (i == PAGE_SIZE / sizeof (u64) - 2) {
762 err = mthca_write_mtt(dev, mr->mtt,
763 n, pages, i);
764 if (err)
765 goto mtt_done;
766 n += i;
767 i = 0;
768 }
769 }
770 }
771
772 if (i)
773 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
774mtt_done:
775 free_page((unsigned long) pages);
776 if (err)
777 goto err_mtt;
778
779 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
780 region->length, convert_access(acc), mr);
781
782 if (err)
783 goto err_mtt;
784
785 return &mr->ibmr;
786
787err_mtt:
788 mthca_free_mtt(dev, mr->mtt);
789
790err:
791 kfree(mr);
792 return ERR_PTR(err);
793}
794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795static int mthca_dereg_mr(struct ib_mr *mr)
796{
Roland Dreiere464b2a2005-04-16 15:26:17 -0700797 struct mthca_mr *mmr = to_mmr(mr);
798 mthca_free_mr(to_mdev(mr->device), mmr);
799 kfree(mmr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 return 0;
801}
802
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700803static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
804 struct ib_fmr_attr *fmr_attr)
805{
806 struct mthca_fmr *fmr;
807 int err;
808
809 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
810 if (!fmr)
811 return ERR_PTR(-ENOMEM);
812
813 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
814 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
815 convert_access(mr_access_flags), fmr);
816
817 if (err) {
818 kfree(fmr);
819 return ERR_PTR(err);
820 }
821
822 return &fmr->ibmr;
823}
824
825static int mthca_dealloc_fmr(struct ib_fmr *fmr)
826{
827 struct mthca_fmr *mfmr = to_mfmr(fmr);
828 int err;
829
830 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
831 if (err)
832 return err;
833
834 kfree(mfmr);
835 return 0;
836}
837
838static int mthca_unmap_fmr(struct list_head *fmr_list)
839{
840 struct ib_fmr *fmr;
841 int err;
842 u8 status;
843 struct mthca_dev *mdev = NULL;
844
845 list_for_each_entry(fmr, fmr_list, list) {
846 if (mdev && to_mdev(fmr->device) != mdev)
847 return -EINVAL;
848 mdev = to_mdev(fmr->device);
849 }
850
851 if (!mdev)
852 return 0;
853
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700854 if (mthca_is_memfree(mdev)) {
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700855 list_for_each_entry(fmr, fmr_list, list)
856 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
857
858 wmb();
859 } else
860 list_for_each_entry(fmr, fmr_list, list)
861 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
862
863 err = mthca_SYNC_TPT(mdev, &status);
864 if (err)
865 return err;
866 if (status)
867 return -EINVAL;
868 return 0;
869}
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871static ssize_t show_rev(struct class_device *cdev, char *buf)
872{
873 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
874 return sprintf(buf, "%x\n", dev->rev_id);
875}
876
877static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
878{
879 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
880 return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32),
881 (int) (dev->fw_ver >> 16) & 0xffff,
882 (int) dev->fw_ver & 0xffff);
883}
884
885static ssize_t show_hca(struct class_device *cdev, char *buf)
886{
887 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
Roland Dreier68a3c212005-04-16 15:26:34 -0700888 switch (dev->pdev->device) {
889 case PCI_DEVICE_ID_MELLANOX_TAVOR:
890 return sprintf(buf, "MT23108\n");
891 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
892 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
893 case PCI_DEVICE_ID_MELLANOX_ARBEL:
894 return sprintf(buf, "MT25208\n");
895 case PCI_DEVICE_ID_MELLANOX_SINAI:
896 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
897 return sprintf(buf, "MT25204\n");
898 default:
899 return sprintf(buf, "unknown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 }
901}
902
903static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
904static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
905static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
906
907static struct class_device_attribute *mthca_class_attributes[] = {
908 &class_device_attr_hw_rev,
909 &class_device_attr_fw_ver,
910 &class_device_attr_hca_type
911};
912
913int mthca_register_device(struct mthca_dev *dev)
914{
915 int ret;
916 int i;
917
918 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
Roland Dreier1cf296b2005-07-07 17:57:11 -0700919 dev->ib_dev.owner = THIS_MODULE;
920
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 dev->ib_dev.node_type = IB_NODE_CA;
922 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
923 dev->ib_dev.dma_device = &dev->pdev->dev;
924 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
925 dev->ib_dev.query_device = mthca_query_device;
926 dev->ib_dev.query_port = mthca_query_port;
927 dev->ib_dev.modify_port = mthca_modify_port;
928 dev->ib_dev.query_pkey = mthca_query_pkey;
929 dev->ib_dev.query_gid = mthca_query_gid;
Roland Dreier5e0b5372005-07-07 17:57:16 -0700930 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
931 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
Roland Dreier53b8b3f2005-07-07 17:57:17 -0700932 dev->ib_dev.mmap = mthca_mmap_uar;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 dev->ib_dev.alloc_pd = mthca_alloc_pd;
934 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
935 dev->ib_dev.create_ah = mthca_ah_create;
936 dev->ib_dev.destroy_ah = mthca_ah_destroy;
937 dev->ib_dev.create_qp = mthca_create_qp;
938 dev->ib_dev.modify_qp = mthca_modify_qp;
939 dev->ib_dev.destroy_qp = mthca_destroy_qp;
940 dev->ib_dev.create_cq = mthca_create_cq;
941 dev->ib_dev.destroy_cq = mthca_destroy_cq;
942 dev->ib_dev.poll_cq = mthca_poll_cq;
943 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
944 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
Roland Dreier24d42812005-07-07 17:57:19 -0700945 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 dev->ib_dev.dereg_mr = mthca_dereg_mr;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700947
948 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
949 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
950 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
951 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700952 if (mthca_is_memfree(dev))
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700953 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
954 else
955 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
956 }
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 dev->ib_dev.attach_mcast = mthca_multicast_attach;
959 dev->ib_dev.detach_mcast = mthca_multicast_detach;
960 dev->ib_dev.process_mad = mthca_process_mad;
961
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700962 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
964 dev->ib_dev.post_send = mthca_arbel_post_send;
965 dev->ib_dev.post_recv = mthca_arbel_post_receive;
966 } else {
967 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
968 dev->ib_dev.post_send = mthca_tavor_post_send;
969 dev->ib_dev.post_recv = mthca_tavor_post_receive;
970 }
971
972 init_MUTEX(&dev->cap_mask_mutex);
973
974 ret = ib_register_device(&dev->ib_dev);
975 if (ret)
976 return ret;
977
978 for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
979 ret = class_device_create_file(&dev->ib_dev.class_dev,
980 mthca_class_attributes[i]);
981 if (ret) {
982 ib_unregister_device(&dev->ib_dev);
983 return ret;
984 }
985 }
986
987 return 0;
988}
989
990void mthca_unregister_device(struct mthca_dev *dev)
991{
992 ib_unregister_device(&dev->ib_dev);
993}