blob: 43390217a0260695bf80fe6a420d5e0b6a534871 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38#include "qib_mad.h"
39
40static int reply(struct ib_smp *smp)
41{
42 /*
43 * The verbs framework will handle the directed/LID route
44 * packet changes.
45 */
46 smp->method = IB_MGMT_METHOD_GET_RESP;
47 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48 smp->status |= IB_SMP_DIRECTION;
49 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
50}
51
52static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
53{
54 struct ib_mad_send_buf *send_buf;
55 struct ib_mad_agent *agent;
56 struct ib_smp *smp;
57 int ret;
58 unsigned long flags;
59 unsigned long timeout;
60
61 agent = ibp->send_agent;
62 if (!agent)
63 return;
64
65 /* o14-3.2.1 */
66 if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
67 return;
68
69 /* o14-2 */
70 if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
71 return;
72
73 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
74 IB_MGMT_MAD_DATA, GFP_ATOMIC);
75 if (IS_ERR(send_buf))
76 return;
77
78 smp = send_buf->mad;
79 smp->base_version = IB_MGMT_BASE_VERSION;
80 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
81 smp->class_version = 1;
82 smp->method = IB_MGMT_METHOD_TRAP;
83 ibp->tid++;
84 smp->tid = cpu_to_be64(ibp->tid);
85 smp->attr_id = IB_SMP_ATTR_NOTICE;
86 /* o14-1: smp->mkey = 0; */
87 memcpy(smp->data, data, len);
88
89 spin_lock_irqsave(&ibp->lock, flags);
90 if (!ibp->sm_ah) {
91 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
92 struct ib_ah *ah;
93 struct ib_ah_attr attr;
94
95 memset(&attr, 0, sizeof attr);
96 attr.dlid = ibp->sm_lid;
97 attr.port_num = ppd_from_ibp(ibp)->port;
98 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
99 if (IS_ERR(ah))
100 ret = -EINVAL;
101 else {
102 send_buf->ah = ah;
103 ibp->sm_ah = to_iah(ah);
104 ret = 0;
105 }
106 } else
107 ret = -EINVAL;
108 } else {
109 send_buf->ah = &ibp->sm_ah->ibah;
110 ret = 0;
111 }
112 spin_unlock_irqrestore(&ibp->lock, flags);
113
114 if (!ret)
115 ret = ib_post_send_mad(send_buf, NULL);
116 if (!ret) {
117 /* 4.096 usec. */
118 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
119 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
120 } else {
121 ib_free_send_mad(send_buf);
122 ibp->trap_timeout = 0;
123 }
124}
125
126/*
127 * Send a bad [PQ]_Key trap (ch. 14.3.8).
128 */
129void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
130 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
131{
132 struct ib_mad_notice_attr data;
133
134 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
135 ibp->pkey_violations++;
136 else
137 ibp->qkey_violations++;
138 ibp->n_pkt_drops++;
139
140 /* Send violation trap */
141 data.generic_type = IB_NOTICE_TYPE_SECURITY;
142 data.prod_type_msb = 0;
143 data.prod_type_lsb = IB_NOTICE_PROD_CA;
144 data.trap_num = trap_num;
145 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
146 data.toggle_count = 0;
147 memset(&data.details, 0, sizeof data.details);
148 data.details.ntc_257_258.lid1 = lid1;
149 data.details.ntc_257_258.lid2 = lid2;
150 data.details.ntc_257_258.key = cpu_to_be32(key);
151 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
152 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
153
154 qib_send_trap(ibp, &data, sizeof data);
155}
156
157/*
158 * Send a bad M_Key trap (ch. 14.3.9).
159 */
160static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
161{
162 struct ib_mad_notice_attr data;
163
164 /* Send violation trap */
165 data.generic_type = IB_NOTICE_TYPE_SECURITY;
166 data.prod_type_msb = 0;
167 data.prod_type_lsb = IB_NOTICE_PROD_CA;
168 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
169 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
170 data.toggle_count = 0;
171 memset(&data.details, 0, sizeof data.details);
172 data.details.ntc_256.lid = data.issuer_lid;
173 data.details.ntc_256.method = smp->method;
174 data.details.ntc_256.attr_id = smp->attr_id;
175 data.details.ntc_256.attr_mod = smp->attr_mod;
176 data.details.ntc_256.mkey = smp->mkey;
177 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
178 u8 hop_cnt;
179
180 data.details.ntc_256.dr_slid = smp->dr_slid;
181 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
182 hop_cnt = smp->hop_cnt;
183 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
184 data.details.ntc_256.dr_trunc_hop |=
185 IB_NOTICE_TRAP_DR_TRUNC;
186 hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
187 }
188 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
189 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
190 hop_cnt);
191 }
192
193 qib_send_trap(ibp, &data, sizeof data);
194}
195
196/*
197 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
198 */
199void qib_cap_mask_chg(struct qib_ibport *ibp)
200{
201 struct ib_mad_notice_attr data;
202
203 data.generic_type = IB_NOTICE_TYPE_INFO;
204 data.prod_type_msb = 0;
205 data.prod_type_lsb = IB_NOTICE_PROD_CA;
206 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
207 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
208 data.toggle_count = 0;
209 memset(&data.details, 0, sizeof data.details);
210 data.details.ntc_144.lid = data.issuer_lid;
211 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
212
213 qib_send_trap(ibp, &data, sizeof data);
214}
215
216/*
217 * Send a System Image GUID Changed trap (ch. 14.3.12).
218 */
219void qib_sys_guid_chg(struct qib_ibport *ibp)
220{
221 struct ib_mad_notice_attr data;
222
223 data.generic_type = IB_NOTICE_TYPE_INFO;
224 data.prod_type_msb = 0;
225 data.prod_type_lsb = IB_NOTICE_PROD_CA;
226 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
227 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
228 data.toggle_count = 0;
229 memset(&data.details, 0, sizeof data.details);
230 data.details.ntc_145.lid = data.issuer_lid;
231 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
232
233 qib_send_trap(ibp, &data, sizeof data);
234}
235
236/*
237 * Send a Node Description Changed trap (ch. 14.3.13).
238 */
239void qib_node_desc_chg(struct qib_ibport *ibp)
240{
241 struct ib_mad_notice_attr data;
242
243 data.generic_type = IB_NOTICE_TYPE_INFO;
244 data.prod_type_msb = 0;
245 data.prod_type_lsb = IB_NOTICE_PROD_CA;
246 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
247 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
248 data.toggle_count = 0;
249 memset(&data.details, 0, sizeof data.details);
250 data.details.ntc_144.lid = data.issuer_lid;
251 data.details.ntc_144.local_changes = 1;
252 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
253
254 qib_send_trap(ibp, &data, sizeof data);
255}
256
257static int subn_get_nodedescription(struct ib_smp *smp,
258 struct ib_device *ibdev)
259{
260 if (smp->attr_mod)
261 smp->status |= IB_SMP_INVALID_FIELD;
262
263 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
264
265 return reply(smp);
266}
267
268static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
269 u8 port)
270{
271 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
272 struct qib_devdata *dd = dd_from_ibdev(ibdev);
273 u32 vendor, majrev, minrev;
274 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
275
276 /* GUID 0 is illegal */
277 if (smp->attr_mod || pidx >= dd->num_pports ||
278 dd->pport[pidx].guid == 0)
279 smp->status |= IB_SMP_INVALID_FIELD;
280 else
281 nip->port_guid = dd->pport[pidx].guid;
282
283 nip->base_version = 1;
284 nip->class_version = 1;
285 nip->node_type = 1; /* channel adapter */
286 nip->num_ports = ibdev->phys_port_cnt;
287 /* This is already in network order */
288 nip->sys_guid = ib_qib_sys_image_guid;
289 nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
290 nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
291 nip->device_id = cpu_to_be16(dd->deviceid);
292 majrev = dd->majrev;
293 minrev = dd->minrev;
294 nip->revision = cpu_to_be32((majrev << 16) | minrev);
295 nip->local_port_num = port;
296 vendor = dd->vendorid;
297 nip->vendor_id[0] = QIB_SRC_OUI_1;
298 nip->vendor_id[1] = QIB_SRC_OUI_2;
299 nip->vendor_id[2] = QIB_SRC_OUI_3;
300
301 return reply(smp);
302}
303
304static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
305 u8 port)
306{
307 struct qib_devdata *dd = dd_from_ibdev(ibdev);
308 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
309 __be64 *p = (__be64 *) smp->data;
310 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
311
312 /* 32 blocks of 8 64-bit GUIDs per block */
313
314 memset(smp->data, 0, sizeof(smp->data));
315
316 if (startgx == 0 && pidx < dd->num_pports) {
317 struct qib_pportdata *ppd = dd->pport + pidx;
318 struct qib_ibport *ibp = &ppd->ibport_data;
319 __be64 g = ppd->guid;
320 unsigned i;
321
322 /* GUID 0 is illegal */
323 if (g == 0)
324 smp->status |= IB_SMP_INVALID_FIELD;
325 else {
326 /* The first is a copy of the read-only HW GUID. */
327 p[0] = g;
328 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
329 p[i] = ibp->guids[i - 1];
330 }
331 } else
332 smp->status |= IB_SMP_INVALID_FIELD;
333
334 return reply(smp);
335}
336
337static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
338{
339 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
340}
341
342static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
343{
344 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
345}
346
347static int get_overrunthreshold(struct qib_pportdata *ppd)
348{
349 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
350}
351
352/**
353 * set_overrunthreshold - set the overrun threshold
354 * @ppd: the physical port data
355 * @n: the new threshold
356 *
357 * Note that this will only take effect when the link state changes.
358 */
359static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
360{
361 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
362 (u32)n);
363 return 0;
364}
365
366static int get_phyerrthreshold(struct qib_pportdata *ppd)
367{
368 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
369}
370
371/**
372 * set_phyerrthreshold - set the physical error threshold
373 * @ppd: the physical port data
374 * @n: the new threshold
375 *
376 * Note that this will only take effect when the link state changes.
377 */
378static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
379{
380 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
381 (u32)n);
382 return 0;
383}
384
385/**
386 * get_linkdowndefaultstate - get the default linkdown state
387 * @ppd: the physical port data
388 *
389 * Returns zero if the default is POLL, 1 if the default is SLEEP.
390 */
391static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
392{
393 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
394 IB_LINKINITCMD_SLEEP;
395}
396
397static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
398{
Jim Foraker6199c892012-05-02 14:39:11 -0400399 int valid_mkey = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700400 int ret = 0;
401
402 /* Is the mkey in the process of expiring? */
403 if (ibp->mkey_lease_timeout &&
404 time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
405 /* Clear timeout and mkey protection field. */
406 ibp->mkey_lease_timeout = 0;
407 ibp->mkeyprot = 0;
408 }
409
Jim Foraker6199c892012-05-02 14:39:11 -0400410 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
411 ibp->mkey == smp->mkey)
412 valid_mkey = 1;
413
414 /* Unset lease timeout on any valid Get/Set/TrapRepress */
415 if (valid_mkey && ibp->mkey_lease_timeout &&
416 (smp->method == IB_MGMT_METHOD_GET ||
417 smp->method == IB_MGMT_METHOD_SET ||
418 smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
Ralph Campbellf9315512010-05-23 21:44:54 -0700419 ibp->mkey_lease_timeout = 0;
420
Jim Foraker6199c892012-05-02 14:39:11 -0400421 if (!valid_mkey) {
422 switch (smp->method) {
423 case IB_MGMT_METHOD_GET:
424 /* Bad mkey not a violation below level 2 */
425 if (ibp->mkeyprot < 2)
426 break;
427 case IB_MGMT_METHOD_SET:
428 case IB_MGMT_METHOD_TRAP_REPRESS:
429 if (ibp->mkey_violations != 0xFFFF)
430 ++ibp->mkey_violations;
431 if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
432 ibp->mkey_lease_timeout = jiffies +
433 ibp->mkey_lease_period * HZ;
434 /* Generate a trap notice. */
435 qib_bad_mkey(ibp, smp);
Jim Foraker3236b2d2012-05-02 14:57:23 -0400436 ret = 1;
Jim Foraker6199c892012-05-02 14:39:11 -0400437 }
438 }
439
Ralph Campbellf9315512010-05-23 21:44:54 -0700440 return ret;
441}
442
443static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
444 u8 port)
445{
446 struct qib_devdata *dd;
447 struct qib_pportdata *ppd;
448 struct qib_ibport *ibp;
449 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
Ralph Campbellf9315512010-05-23 21:44:54 -0700450 u8 mtu;
451 int ret;
452 u32 state;
453 u32 port_num = be32_to_cpu(smp->attr_mod);
454
455 if (port_num == 0)
456 port_num = port;
457 else {
458 if (port_num > ibdev->phys_port_cnt) {
459 smp->status |= IB_SMP_INVALID_FIELD;
460 ret = reply(smp);
461 goto bail;
462 }
463 if (port_num != port) {
464 ibp = to_iport(ibdev, port_num);
465 ret = check_mkey(ibp, smp, 0);
466 if (ret)
Jim Foraker3236b2d2012-05-02 14:57:23 -0400467 ret = IB_MAD_RESULT_FAILURE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700468 goto bail;
469 }
470 }
471
472 dd = dd_from_ibdev(ibdev);
473 /* IB numbers ports from 1, hdw from 0 */
474 ppd = dd->pport + (port_num - 1);
475 ibp = &ppd->ibport_data;
476
477 /* Clear all fields. Only set the non-zero fields. */
478 memset(smp->data, 0, sizeof(smp->data));
479
480 /* Only return the mkey if the protection field allows it. */
Mitko Haralanov36b87b42011-03-04 18:53:17 +0000481 if (!(smp->method == IB_MGMT_METHOD_GET &&
482 ibp->mkey != smp->mkey &&
483 ibp->mkeyprot == 1))
Ralph Campbellf9315512010-05-23 21:44:54 -0700484 pip->mkey = ibp->mkey;
485 pip->gid_prefix = ibp->gid_prefix;
Mike Marciniszyn520b3ee2012-02-25 17:45:50 -0800486 pip->lid = cpu_to_be16(ppd->lid);
Ralph Campbellf9315512010-05-23 21:44:54 -0700487 pip->sm_lid = cpu_to_be16(ibp->sm_lid);
488 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
489 /* pip->diag_code; */
490 pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
491 pip->local_port_num = port;
492 pip->link_width_enabled = ppd->link_width_enabled;
493 pip->link_width_supported = ppd->link_width_supported;
494 pip->link_width_active = ppd->link_width_active;
495 state = dd->f_iblink_state(ppd->lastibcstat);
496 pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
497
498 pip->portphysstate_linkdown =
499 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
500 (get_linkdowndefaultstate(ppd) ? 1 : 2);
501 pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
502 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
503 ppd->link_speed_enabled;
504 switch (ppd->ibmtu) {
505 default: /* something is wrong; fall through */
506 case 4096:
507 mtu = IB_MTU_4096;
508 break;
509 case 2048:
510 mtu = IB_MTU_2048;
511 break;
512 case 1024:
513 mtu = IB_MTU_1024;
514 break;
515 case 512:
516 mtu = IB_MTU_512;
517 break;
518 case 256:
519 mtu = IB_MTU_256;
520 break;
521 }
522 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
523 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
524 pip->vl_high_limit = ibp->vl_high_limit;
525 pip->vl_arb_high_cap =
526 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
527 pip->vl_arb_low_cap =
528 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
529 /* InitTypeReply = 0 */
530 pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
531 /* HCAs ignore VLStallCount and HOQLife */
532 /* pip->vlstallcnt_hoqlife; */
533 pip->operationalvl_pei_peo_fpi_fpo =
534 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
535 pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
536 /* P_KeyViolations are counted by hardware. */
537 pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
538 pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
539 /* Only the hardware GUID is supported for now */
540 pip->guid_cap = QIB_GUIDS_PER_PORT;
541 pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
542 /* 32.768 usec. response time (guessing) */
543 pip->resv_resptimevalue = 3;
544 pip->localphyerrors_overrunerrors =
545 (get_phyerrthreshold(ppd) << 4) |
546 get_overrunthreshold(ppd);
547 /* pip->max_credit_hint; */
548 if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
549 u32 v;
550
551 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
552 pip->link_roundtrip_latency[0] = v >> 16;
553 pip->link_roundtrip_latency[1] = v >> 8;
554 pip->link_roundtrip_latency[2] = v;
555 }
556
557 ret = reply(smp);
558
559bail:
560 return ret;
561}
562
563/**
564 * get_pkeys - return the PKEY table
565 * @dd: the qlogic_ib device
566 * @port: the IB port number
567 * @pkeys: the pkey table is placed here
568 */
569static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
570{
571 struct qib_pportdata *ppd = dd->pport + port - 1;
572 /*
573 * always a kernel context, no locking needed.
574 * If we get here with ppd setup, no need to check
575 * that pd is valid.
576 */
577 struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
578
579 memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
580
581 return 0;
582}
583
584static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
585 u8 port)
586{
587 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
588 u16 *p = (u16 *) smp->data;
589 __be16 *q = (__be16 *) smp->data;
590
591 /* 64 blocks of 32 16-bit P_Key entries */
592
593 memset(smp->data, 0, sizeof(smp->data));
594 if (startpx == 0) {
595 struct qib_devdata *dd = dd_from_ibdev(ibdev);
596 unsigned i, n = qib_get_npkeys(dd);
597
598 get_pkeys(dd, port, p);
599
600 for (i = 0; i < n; i++)
601 q[i] = cpu_to_be16(p[i]);
602 } else
603 smp->status |= IB_SMP_INVALID_FIELD;
604
605 return reply(smp);
606}
607
608static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
609 u8 port)
610{
611 struct qib_devdata *dd = dd_from_ibdev(ibdev);
612 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
613 __be64 *p = (__be64 *) smp->data;
614 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
615
616 /* 32 blocks of 8 64-bit GUIDs per block */
617
618 if (startgx == 0 && pidx < dd->num_pports) {
619 struct qib_pportdata *ppd = dd->pport + pidx;
620 struct qib_ibport *ibp = &ppd->ibport_data;
621 unsigned i;
622
623 /* The first entry is read-only. */
624 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
625 ibp->guids[i - 1] = p[i];
626 } else
627 smp->status |= IB_SMP_INVALID_FIELD;
628
629 /* The only GUID we support is the first read-only entry. */
630 return subn_get_guidinfo(smp, ibdev, port);
631}
632
633/**
634 * subn_set_portinfo - set port information
635 * @smp: the incoming SM packet
636 * @ibdev: the infiniband device
637 * @port: the port on the device
638 *
639 * Set Portinfo (see ch. 14.2.5.6).
640 */
641static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
642 u8 port)
643{
644 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
645 struct ib_event event;
646 struct qib_devdata *dd;
647 struct qib_pportdata *ppd;
648 struct qib_ibport *ibp;
Todd Rimmer4ccf28a2012-05-02 14:35:18 -0400649 u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
Ralph Campbellf9315512010-05-23 21:44:54 -0700650 unsigned long flags;
651 u16 lid, smlid;
652 u8 lwe;
653 u8 lse;
654 u8 state;
655 u8 vls;
656 u8 msl;
657 u16 lstate;
658 int ret, ore, mtu;
659 u32 port_num = be32_to_cpu(smp->attr_mod);
660
661 if (port_num == 0)
662 port_num = port;
663 else {
664 if (port_num > ibdev->phys_port_cnt)
665 goto err;
666 /* Port attributes can only be set on the receiving port */
667 if (port_num != port)
668 goto get_only;
669 }
670
671 dd = dd_from_ibdev(ibdev);
672 /* IB numbers ports from 1, hdw from 0 */
673 ppd = dd->pport + (port_num - 1);
674 ibp = &ppd->ibport_data;
675 event.device = ibdev;
676 event.element.port_num = port;
677
678 ibp->mkey = pip->mkey;
679 ibp->gid_prefix = pip->gid_prefix;
680 ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
681
682 lid = be16_to_cpu(pip->lid);
683 /* Must be a valid unicast LID address. */
684 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800685 smp->status |= IB_SMP_INVALID_FIELD;
686 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700687 if (ppd->lid != lid)
688 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
689 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
690 qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
691 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
692 event.event = IB_EVENT_LID_CHANGE;
693 ib_dispatch_event(&event);
694 }
695
696 smlid = be16_to_cpu(pip->sm_lid);
697 msl = pip->neighbormtu_mastersmsl & 0xF;
698 /* Must be a valid unicast LID address. */
699 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800700 smp->status |= IB_SMP_INVALID_FIELD;
701 else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700702 spin_lock_irqsave(&ibp->lock, flags);
703 if (ibp->sm_ah) {
704 if (smlid != ibp->sm_lid)
705 ibp->sm_ah->attr.dlid = smlid;
706 if (msl != ibp->sm_sl)
707 ibp->sm_ah->attr.sl = msl;
708 }
709 spin_unlock_irqrestore(&ibp->lock, flags);
710 if (smlid != ibp->sm_lid)
711 ibp->sm_lid = smlid;
712 if (msl != ibp->sm_sl)
713 ibp->sm_sl = msl;
714 event.event = IB_EVENT_SM_CHANGE;
715 ib_dispatch_event(&event);
716 }
717
718 /* Allow 1x or 4x to be set (see 14.2.6.6). */
719 lwe = pip->link_width_enabled;
720 if (lwe) {
721 if (lwe == 0xFF)
Mitko Haralanovcc7fb052011-02-22 16:56:37 -0800722 set_link_width_enabled(ppd, ppd->link_width_supported);
Ralph Campbellf9315512010-05-23 21:44:54 -0700723 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800724 smp->status |= IB_SMP_INVALID_FIELD;
725 else if (lwe != ppd->link_width_enabled)
726 set_link_width_enabled(ppd, lwe);
Ralph Campbellf9315512010-05-23 21:44:54 -0700727 }
728
729 lse = pip->linkspeedactive_enabled & 0xF;
730 if (lse) {
731 /*
732 * The IB 1.2 spec. only allows link speed values
733 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
734 * speeds.
735 */
736 if (lse == 15)
Mitko Haralanovcc7fb052011-02-22 16:56:37 -0800737 set_link_speed_enabled(ppd,
738 ppd->link_speed_supported);
Ralph Campbellf9315512010-05-23 21:44:54 -0700739 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800740 smp->status |= IB_SMP_INVALID_FIELD;
741 else if (lse != ppd->link_speed_enabled)
742 set_link_speed_enabled(ppd, lse);
Ralph Campbellf9315512010-05-23 21:44:54 -0700743 }
744
745 /* Set link down default state. */
746 switch (pip->portphysstate_linkdown & 0xF) {
747 case 0: /* NOP */
748 break;
749 case 1: /* SLEEP */
750 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
751 IB_LINKINITCMD_SLEEP);
752 break;
753 case 2: /* POLL */
754 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
755 IB_LINKINITCMD_POLL);
756 break;
757 default:
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800758 smp->status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -0700759 }
760
761 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
762 ibp->vl_high_limit = pip->vl_high_limit;
763 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
764 ibp->vl_high_limit);
765
766 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
767 if (mtu == -1)
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800768 smp->status |= IB_SMP_INVALID_FIELD;
769 else
770 qib_set_mtu(ppd, mtu);
Ralph Campbellf9315512010-05-23 21:44:54 -0700771
772 /* Set operational VLs */
773 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
774 if (vls) {
775 if (vls > ppd->vls_supported)
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800776 smp->status |= IB_SMP_INVALID_FIELD;
777 else
778 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
Ralph Campbellf9315512010-05-23 21:44:54 -0700779 }
780
781 if (pip->mkey_violations == 0)
782 ibp->mkey_violations = 0;
783
784 if (pip->pkey_violations == 0)
785 ibp->pkey_violations = 0;
786
787 if (pip->qkey_violations == 0)
788 ibp->qkey_violations = 0;
789
790 ore = pip->localphyerrors_overrunerrors;
791 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800792 smp->status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -0700793
794 if (set_overrunthreshold(ppd, (ore & 0xF)))
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800795 smp->status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -0700796
797 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
798
Ralph Campbellf9315512010-05-23 21:44:54 -0700799 /*
800 * Do the port state change now that the other link parameters
801 * have been set.
802 * Changing the port physical state only makes sense if the link
803 * is down or is being set to down.
804 */
805 state = pip->linkspeed_portstate & 0xF;
806 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
807 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800808 smp->status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -0700809
810 /*
811 * Only state changes of DOWN, ARM, and ACTIVE are valid
812 * and must be in the correct state to take effect (see 7.2.6).
813 */
814 switch (state) {
815 case IB_PORT_NOP:
816 if (lstate == 0)
817 break;
818 /* FALLTHROUGH */
819 case IB_PORT_DOWN:
820 if (lstate == 0)
821 lstate = QIB_IB_LINKDOWN_ONLY;
822 else if (lstate == 1)
823 lstate = QIB_IB_LINKDOWN_SLEEP;
824 else if (lstate == 2)
825 lstate = QIB_IB_LINKDOWN;
826 else if (lstate == 3)
827 lstate = QIB_IB_LINKDOWN_DISABLE;
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800828 else {
829 smp->status |= IB_SMP_INVALID_FIELD;
830 break;
831 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700832 spin_lock_irqsave(&ppd->lflags_lock, flags);
833 ppd->lflags &= ~QIBL_LINKV;
834 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
835 qib_set_linkstate(ppd, lstate);
836 /*
837 * Don't send a reply if the response would be sent
838 * through the disabled port.
839 */
840 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
841 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
842 goto done;
843 }
844 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
845 break;
846 case IB_PORT_ARMED:
847 qib_set_linkstate(ppd, QIB_IB_LINKARM);
848 break;
849 case IB_PORT_ACTIVE:
850 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
851 break;
852 default:
Mike Marciniszyn3c9e5f42011-01-10 17:42:19 -0800853 smp->status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -0700854 }
855
Todd Rimmer4ccf28a2012-05-02 14:35:18 -0400856 if (clientrereg) {
857 event.event = IB_EVENT_CLIENT_REREGISTER;
858 ib_dispatch_event(&event);
859 }
860
Ralph Campbellf9315512010-05-23 21:44:54 -0700861 ret = subn_get_portinfo(smp, ibdev, port);
862
Todd Rimmer4ccf28a2012-05-02 14:35:18 -0400863 /* restore re-reg bit per o14-12.2.1 */
864 pip->clientrereg_resv_subnetto |= clientrereg;
Ralph Campbellf9315512010-05-23 21:44:54 -0700865
Mitko Haralanovcc7fb052011-02-22 16:56:37 -0800866 goto get_only;
Ralph Campbellf9315512010-05-23 21:44:54 -0700867
868err:
869 smp->status |= IB_SMP_INVALID_FIELD;
870get_only:
871 ret = subn_get_portinfo(smp, ibdev, port);
872done:
873 return ret;
874}
875
876/**
877 * rm_pkey - decrecment the reference count for the given PKEY
878 * @dd: the qlogic_ib device
879 * @key: the PKEY index
880 *
881 * Return true if this was the last reference and the hardware table entry
882 * needs to be changed.
883 */
884static int rm_pkey(struct qib_pportdata *ppd, u16 key)
885{
886 int i;
887 int ret;
888
889 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
890 if (ppd->pkeys[i] != key)
891 continue;
892 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
893 ppd->pkeys[i] = 0;
894 ret = 1;
895 goto bail;
896 }
897 break;
898 }
899
900 ret = 0;
901
902bail:
903 return ret;
904}
905
906/**
907 * add_pkey - add the given PKEY to the hardware table
908 * @dd: the qlogic_ib device
909 * @key: the PKEY
910 *
911 * Return an error code if unable to add the entry, zero if no change,
912 * or 1 if the hardware PKEY register needs to be updated.
913 */
914static int add_pkey(struct qib_pportdata *ppd, u16 key)
915{
916 int i;
917 u16 lkey = key & 0x7FFF;
918 int any = 0;
919 int ret;
920
921 if (lkey == 0x7FFF) {
922 ret = 0;
923 goto bail;
924 }
925
926 /* Look for an empty slot or a matching PKEY. */
927 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
928 if (!ppd->pkeys[i]) {
929 any++;
930 continue;
931 }
932 /* If it matches exactly, try to increment the ref count */
933 if (ppd->pkeys[i] == key) {
934 if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
935 ret = 0;
936 goto bail;
937 }
938 /* Lost the race. Look for an empty slot below. */
939 atomic_dec(&ppd->pkeyrefs[i]);
940 any++;
941 }
942 /*
943 * It makes no sense to have both the limited and unlimited
944 * PKEY set at the same time since the unlimited one will
945 * disable the limited one.
946 */
947 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
948 ret = -EEXIST;
949 goto bail;
950 }
951 }
952 if (!any) {
953 ret = -EBUSY;
954 goto bail;
955 }
956 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
957 if (!ppd->pkeys[i] &&
958 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
959 /* for qibstats, etc. */
960 ppd->pkeys[i] = key;
961 ret = 1;
962 goto bail;
963 }
964 }
965 ret = -EBUSY;
966
967bail:
968 return ret;
969}
970
971/**
972 * set_pkeys - set the PKEY table for ctxt 0
973 * @dd: the qlogic_ib device
974 * @port: the IB port number
975 * @pkeys: the PKEY table
976 */
977static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
978{
979 struct qib_pportdata *ppd;
980 struct qib_ctxtdata *rcd;
981 int i;
982 int changed = 0;
983
984 /*
985 * IB port one/two always maps to context zero/one,
986 * always a kernel context, no locking needed
987 * If we get here with ppd setup, no need to check
988 * that rcd is valid.
989 */
990 ppd = dd->pport + (port - 1);
991 rcd = dd->rcd[ppd->hw_pidx];
992
993 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
994 u16 key = pkeys[i];
995 u16 okey = rcd->pkeys[i];
996
997 if (key == okey)
998 continue;
999 /*
1000 * The value of this PKEY table entry is changing.
1001 * Remove the old entry in the hardware's array of PKEYs.
1002 */
1003 if (okey & 0x7FFF)
1004 changed |= rm_pkey(ppd, okey);
1005 if (key & 0x7FFF) {
1006 int ret = add_pkey(ppd, key);
1007
1008 if (ret < 0)
1009 key = 0;
1010 else
1011 changed |= ret;
1012 }
1013 rcd->pkeys[i] = key;
1014 }
1015 if (changed) {
1016 struct ib_event event;
1017
1018 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1019
1020 event.event = IB_EVENT_PKEY_CHANGE;
1021 event.device = &dd->verbs_dev.ibdev;
1022 event.element.port_num = 1;
1023 ib_dispatch_event(&event);
1024 }
1025 return 0;
1026}
1027
1028static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1029 u8 port)
1030{
1031 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1032 __be16 *p = (__be16 *) smp->data;
1033 u16 *q = (u16 *) smp->data;
1034 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1035 unsigned i, n = qib_get_npkeys(dd);
1036
1037 for (i = 0; i < n; i++)
1038 q[i] = be16_to_cpu(p[i]);
1039
1040 if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1041 smp->status |= IB_SMP_INVALID_FIELD;
1042
1043 return subn_get_pkeytable(smp, ibdev, port);
1044}
1045
1046static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1047 u8 port)
1048{
1049 struct qib_ibport *ibp = to_iport(ibdev, port);
1050 u8 *p = (u8 *) smp->data;
1051 unsigned i;
1052
1053 memset(smp->data, 0, sizeof(smp->data));
1054
1055 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
1056 smp->status |= IB_SMP_UNSUP_METHOD;
1057 else
1058 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1059 *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1060
1061 return reply(smp);
1062}
1063
1064static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1065 u8 port)
1066{
1067 struct qib_ibport *ibp = to_iport(ibdev, port);
1068 u8 *p = (u8 *) smp->data;
1069 unsigned i;
1070
1071 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1072 smp->status |= IB_SMP_UNSUP_METHOD;
1073 return reply(smp);
1074 }
1075
1076 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1077 ibp->sl_to_vl[i] = *p >> 4;
1078 ibp->sl_to_vl[i + 1] = *p & 0xF;
1079 }
1080 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1081 _QIB_EVENT_SL2VL_CHANGE_BIT);
1082
1083 return subn_get_sl_to_vl(smp, ibdev, port);
1084}
1085
1086static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1087 u8 port)
1088{
1089 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1090 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1091
1092 memset(smp->data, 0, sizeof(smp->data));
1093
1094 if (ppd->vls_supported == IB_VL_VL0)
1095 smp->status |= IB_SMP_UNSUP_METHOD;
1096 else if (which == IB_VLARB_LOWPRI_0_31)
1097 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1098 smp->data);
1099 else if (which == IB_VLARB_HIGHPRI_0_31)
1100 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1101 smp->data);
1102 else
1103 smp->status |= IB_SMP_INVALID_FIELD;
1104
1105 return reply(smp);
1106}
1107
1108static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1109 u8 port)
1110{
1111 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1112 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1113
1114 if (ppd->vls_supported == IB_VL_VL0)
1115 smp->status |= IB_SMP_UNSUP_METHOD;
1116 else if (which == IB_VLARB_LOWPRI_0_31)
1117 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1118 smp->data);
1119 else if (which == IB_VLARB_HIGHPRI_0_31)
1120 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1121 smp->data);
1122 else
1123 smp->status |= IB_SMP_INVALID_FIELD;
1124
1125 return subn_get_vl_arb(smp, ibdev, port);
1126}
1127
1128static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1129 u8 port)
1130{
1131 /*
1132 * For now, we only send the trap once so no need to process this.
1133 * o13-6, o13-7,
1134 * o14-3.a4 The SMA shall not send any message in response to a valid
1135 * SubnTrapRepress() message.
1136 */
1137 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1138}
1139
Or Gerlitz6aea2132011-07-05 15:46:29 +00001140static int pma_get_classportinfo(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001141 struct ib_device *ibdev)
1142{
Or Gerlitz6aea2132011-07-05 15:46:29 +00001143 struct ib_class_port_info *p =
1144 (struct ib_class_port_info *)pmp->data;
Ralph Campbellf9315512010-05-23 21:44:54 -07001145 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1146
1147 memset(pmp->data, 0, sizeof(pmp->data));
1148
Or Gerlitz6aea2132011-07-05 15:46:29 +00001149 if (pmp->mad_hdr.attr_mod != 0)
1150 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -07001151
1152 /* Note that AllPortSelect is not valid */
1153 p->base_version = 1;
1154 p->class_version = 1;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001155 p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
Ralph Campbellf9315512010-05-23 21:44:54 -07001156 /*
1157 * Set the most significant bit of CM2 to indicate support for
1158 * congestion statistics
1159 */
1160 p->reserved[0] = dd->psxmitwait_supported << 7;
1161 /*
1162 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1163 */
1164 p->resp_time_value = 18;
1165
1166 return reply((struct ib_smp *) pmp);
1167}
1168
Or Gerlitz6aea2132011-07-05 15:46:29 +00001169static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001170 struct ib_device *ibdev, u8 port)
1171{
1172 struct ib_pma_portsamplescontrol *p =
1173 (struct ib_pma_portsamplescontrol *)pmp->data;
1174 struct qib_ibdev *dev = to_idev(ibdev);
1175 struct qib_devdata *dd = dd_from_dev(dev);
1176 struct qib_ibport *ibp = to_iport(ibdev, port);
1177 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1178 unsigned long flags;
1179 u8 port_select = p->port_select;
1180
1181 memset(pmp->data, 0, sizeof(pmp->data));
1182
1183 p->port_select = port_select;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001184 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1185 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -07001186 goto bail;
1187 }
1188 spin_lock_irqsave(&ibp->lock, flags);
1189 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1190 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1191 p->counter_width = 4; /* 32 bit counters */
1192 p->counter_mask0_9 = COUNTER_MASK0_9;
1193 p->sample_start = cpu_to_be32(ibp->pma_sample_start);
1194 p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
1195 p->tag = cpu_to_be16(ibp->pma_tag);
1196 p->counter_select[0] = ibp->pma_counter_select[0];
1197 p->counter_select[1] = ibp->pma_counter_select[1];
1198 p->counter_select[2] = ibp->pma_counter_select[2];
1199 p->counter_select[3] = ibp->pma_counter_select[3];
1200 p->counter_select[4] = ibp->pma_counter_select[4];
1201 spin_unlock_irqrestore(&ibp->lock, flags);
1202
1203bail:
1204 return reply((struct ib_smp *) pmp);
1205}
1206
Or Gerlitz6aea2132011-07-05 15:46:29 +00001207static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001208 struct ib_device *ibdev, u8 port)
1209{
1210 struct ib_pma_portsamplescontrol *p =
1211 (struct ib_pma_portsamplescontrol *)pmp->data;
1212 struct qib_ibdev *dev = to_idev(ibdev);
1213 struct qib_devdata *dd = dd_from_dev(dev);
1214 struct qib_ibport *ibp = to_iport(ibdev, port);
1215 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1216 unsigned long flags;
1217 u8 status, xmit_flags;
1218 int ret;
1219
Or Gerlitz6aea2132011-07-05 15:46:29 +00001220 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
1221 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -07001222 ret = reply((struct ib_smp *) pmp);
1223 goto bail;
1224 }
1225
1226 spin_lock_irqsave(&ibp->lock, flags);
1227
1228 /* Port Sampling code owns the PS* HW counters */
1229 xmit_flags = ppd->cong_stats.flags;
1230 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1231 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1232 if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1233 (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1234 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1235 ibp->pma_sample_start = be32_to_cpu(p->sample_start);
1236 ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
1237 ibp->pma_tag = be16_to_cpu(p->tag);
1238 ibp->pma_counter_select[0] = p->counter_select[0];
1239 ibp->pma_counter_select[1] = p->counter_select[1];
1240 ibp->pma_counter_select[2] = p->counter_select[2];
1241 ibp->pma_counter_select[3] = p->counter_select[3];
1242 ibp->pma_counter_select[4] = p->counter_select[4];
1243 dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
1244 ibp->pma_sample_start);
1245 }
1246 spin_unlock_irqrestore(&ibp->lock, flags);
1247
1248 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1249
1250bail:
1251 return ret;
1252}
1253
1254static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1255 __be16 sel)
1256{
1257 u64 ret;
1258
1259 switch (sel) {
1260 case IB_PMA_PORT_XMIT_DATA:
1261 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1262 break;
1263 case IB_PMA_PORT_RCV_DATA:
1264 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1265 break;
1266 case IB_PMA_PORT_XMIT_PKTS:
1267 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1268 break;
1269 case IB_PMA_PORT_RCV_PKTS:
1270 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1271 break;
1272 case IB_PMA_PORT_XMIT_WAIT:
1273 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1274 break;
1275 default:
1276 ret = 0;
1277 }
1278
1279 return ret;
1280}
1281
1282/* This function assumes that the xmit_wait lock is already held */
1283static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1284{
1285 u32 delta;
1286
1287 delta = get_counter(&ppd->ibport_data, ppd,
1288 IB_PMA_PORT_XMIT_WAIT);
1289 return ppd->cong_stats.counter + delta;
1290}
1291
1292static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1293{
1294 struct qib_ibport *ibp = &ppd->ibport_data;
1295
1296 ppd->cong_stats.counter_cache.psxmitdata =
1297 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1298 ppd->cong_stats.counter_cache.psrcvdata =
1299 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1300 ppd->cong_stats.counter_cache.psxmitpkts =
1301 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1302 ppd->cong_stats.counter_cache.psrcvpkts =
1303 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1304 ppd->cong_stats.counter_cache.psxmitwait =
1305 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1306}
1307
1308static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1309 __be16 sel)
1310{
1311 u64 ret;
1312
1313 switch (sel) {
1314 case IB_PMA_PORT_XMIT_DATA:
1315 ret = ppd->cong_stats.counter_cache.psxmitdata;
1316 break;
1317 case IB_PMA_PORT_RCV_DATA:
1318 ret = ppd->cong_stats.counter_cache.psrcvdata;
1319 break;
1320 case IB_PMA_PORT_XMIT_PKTS:
1321 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1322 break;
1323 case IB_PMA_PORT_RCV_PKTS:
1324 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1325 break;
1326 case IB_PMA_PORT_XMIT_WAIT:
1327 ret = ppd->cong_stats.counter_cache.psxmitwait;
1328 break;
1329 default:
1330 ret = 0;
1331 }
1332
1333 return ret;
1334}
1335
Or Gerlitz6aea2132011-07-05 15:46:29 +00001336static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001337 struct ib_device *ibdev, u8 port)
1338{
1339 struct ib_pma_portsamplesresult *p =
1340 (struct ib_pma_portsamplesresult *)pmp->data;
1341 struct qib_ibdev *dev = to_idev(ibdev);
1342 struct qib_devdata *dd = dd_from_dev(dev);
1343 struct qib_ibport *ibp = to_iport(ibdev, port);
1344 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1345 unsigned long flags;
1346 u8 status;
1347 int i;
1348
1349 memset(pmp->data, 0, sizeof(pmp->data));
1350 spin_lock_irqsave(&ibp->lock, flags);
1351 p->tag = cpu_to_be16(ibp->pma_tag);
1352 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1353 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1354 else {
1355 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1356 p->sample_status = cpu_to_be16(status);
1357 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1358 cache_hw_sample_counters(ppd);
1359 ppd->cong_stats.counter =
1360 xmit_wait_get_value_delta(ppd);
1361 dd->f_set_cntr_sample(ppd,
1362 QIB_CONG_TIMER_PSINTERVAL, 0);
1363 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1364 }
1365 }
1366 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1367 p->counter[i] = cpu_to_be32(
1368 get_cache_hw_sample_counters(
1369 ppd, ibp->pma_counter_select[i]));
1370 spin_unlock_irqrestore(&ibp->lock, flags);
1371
1372 return reply((struct ib_smp *) pmp);
1373}
1374
Or Gerlitz6aea2132011-07-05 15:46:29 +00001375static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001376 struct ib_device *ibdev, u8 port)
1377{
1378 struct ib_pma_portsamplesresult_ext *p =
1379 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1380 struct qib_ibdev *dev = to_idev(ibdev);
1381 struct qib_devdata *dd = dd_from_dev(dev);
1382 struct qib_ibport *ibp = to_iport(ibdev, port);
1383 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1384 unsigned long flags;
1385 u8 status;
1386 int i;
1387
1388 /* Port Sampling code owns the PS* HW counters */
1389 memset(pmp->data, 0, sizeof(pmp->data));
1390 spin_lock_irqsave(&ibp->lock, flags);
1391 p->tag = cpu_to_be16(ibp->pma_tag);
1392 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1393 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1394 else {
1395 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1396 p->sample_status = cpu_to_be16(status);
1397 /* 64 bits */
1398 p->extended_width = cpu_to_be32(0x80000000);
1399 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1400 cache_hw_sample_counters(ppd);
1401 ppd->cong_stats.counter =
1402 xmit_wait_get_value_delta(ppd);
1403 dd->f_set_cntr_sample(ppd,
1404 QIB_CONG_TIMER_PSINTERVAL, 0);
1405 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1406 }
1407 }
1408 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1409 p->counter[i] = cpu_to_be64(
1410 get_cache_hw_sample_counters(
1411 ppd, ibp->pma_counter_select[i]));
1412 spin_unlock_irqrestore(&ibp->lock, flags);
1413
1414 return reply((struct ib_smp *) pmp);
1415}
1416
Or Gerlitz6aea2132011-07-05 15:46:29 +00001417static int pma_get_portcounters(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001418 struct ib_device *ibdev, u8 port)
1419{
1420 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1421 pmp->data;
1422 struct qib_ibport *ibp = to_iport(ibdev, port);
1423 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1424 struct qib_verbs_counters cntrs;
1425 u8 port_select = p->port_select;
1426
1427 qib_get_counters(ppd, &cntrs);
1428
1429 /* Adjust counters for any resets done. */
1430 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1431 cntrs.link_error_recovery_counter -=
1432 ibp->z_link_error_recovery_counter;
1433 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1434 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1435 cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1436 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1437 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1438 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1439 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1440 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1441 cntrs.local_link_integrity_errors -=
1442 ibp->z_local_link_integrity_errors;
1443 cntrs.excessive_buffer_overrun_errors -=
1444 ibp->z_excessive_buffer_overrun_errors;
1445 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1446 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1447
1448 memset(pmp->data, 0, sizeof(pmp->data));
1449
1450 p->port_select = port_select;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001451 if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
1452 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -07001453
1454 if (cntrs.symbol_error_counter > 0xFFFFUL)
1455 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1456 else
1457 p->symbol_error_counter =
1458 cpu_to_be16((u16)cntrs.symbol_error_counter);
1459 if (cntrs.link_error_recovery_counter > 0xFFUL)
1460 p->link_error_recovery_counter = 0xFF;
1461 else
1462 p->link_error_recovery_counter =
1463 (u8)cntrs.link_error_recovery_counter;
1464 if (cntrs.link_downed_counter > 0xFFUL)
1465 p->link_downed_counter = 0xFF;
1466 else
1467 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1468 if (cntrs.port_rcv_errors > 0xFFFFUL)
1469 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1470 else
1471 p->port_rcv_errors =
1472 cpu_to_be16((u16) cntrs.port_rcv_errors);
1473 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1474 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1475 else
1476 p->port_rcv_remphys_errors =
1477 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1478 if (cntrs.port_xmit_discards > 0xFFFFUL)
1479 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1480 else
1481 p->port_xmit_discards =
1482 cpu_to_be16((u16)cntrs.port_xmit_discards);
1483 if (cntrs.local_link_integrity_errors > 0xFUL)
1484 cntrs.local_link_integrity_errors = 0xFUL;
1485 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1486 cntrs.excessive_buffer_overrun_errors = 0xFUL;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001487 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
Ralph Campbellf9315512010-05-23 21:44:54 -07001488 cntrs.excessive_buffer_overrun_errors;
1489 if (cntrs.vl15_dropped > 0xFFFFUL)
1490 p->vl15_dropped = cpu_to_be16(0xFFFF);
1491 else
1492 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1493 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1494 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1495 else
1496 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1497 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1498 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1499 else
1500 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1501 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1502 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1503 else
1504 p->port_xmit_packets =
1505 cpu_to_be32((u32)cntrs.port_xmit_packets);
1506 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1507 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1508 else
1509 p->port_rcv_packets =
1510 cpu_to_be32((u32) cntrs.port_rcv_packets);
1511
1512 return reply((struct ib_smp *) pmp);
1513}
1514
Or Gerlitz6aea2132011-07-05 15:46:29 +00001515static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001516 struct ib_device *ibdev, u8 port)
1517{
1518 /* Congestion PMA packets start at offset 24 not 64 */
1519 struct ib_pma_portcounters_cong *p =
1520 (struct ib_pma_portcounters_cong *)pmp->reserved;
1521 struct qib_verbs_counters cntrs;
1522 struct qib_ibport *ibp = to_iport(ibdev, port);
1523 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1524 struct qib_devdata *dd = dd_from_ppd(ppd);
Or Gerlitz6aea2132011-07-05 15:46:29 +00001525 u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
Ralph Campbellf9315512010-05-23 21:44:54 -07001526 u64 xmit_wait_counter;
1527 unsigned long flags;
1528
1529 /*
1530 * This check is performed only in the GET method because the
1531 * SET method ends up calling this anyway.
1532 */
1533 if (!dd->psxmitwait_supported)
Or Gerlitz6aea2132011-07-05 15:46:29 +00001534 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
Ralph Campbellf9315512010-05-23 21:44:54 -07001535 if (port_select != port)
Or Gerlitz6aea2132011-07-05 15:46:29 +00001536 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -07001537
1538 qib_get_counters(ppd, &cntrs);
1539 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1540 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1541 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1542
1543 /* Adjust counters for any resets done. */
1544 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1545 cntrs.link_error_recovery_counter -=
1546 ibp->z_link_error_recovery_counter;
1547 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1548 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1549 cntrs.port_rcv_remphys_errors -=
1550 ibp->z_port_rcv_remphys_errors;
1551 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1552 cntrs.local_link_integrity_errors -=
1553 ibp->z_local_link_integrity_errors;
1554 cntrs.excessive_buffer_overrun_errors -=
1555 ibp->z_excessive_buffer_overrun_errors;
1556 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1557 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1558 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1559 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1560 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1561 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1562
1563 memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1564 sizeof(pmp->data));
1565
1566 /*
1567 * Set top 3 bits to indicate interval in picoseconds in
1568 * remaining bits.
1569 */
1570 p->port_check_rate =
1571 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1572 (dd->psxmitwait_check_rate &
1573 ~(QIB_XMIT_RATE_PICO << 13)));
1574 p->port_adr_events = cpu_to_be64(0);
1575 p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1576 p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1577 p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1578 p->port_xmit_packets =
1579 cpu_to_be64(cntrs.port_xmit_packets);
1580 p->port_rcv_packets =
1581 cpu_to_be64(cntrs.port_rcv_packets);
1582 if (cntrs.symbol_error_counter > 0xFFFFUL)
1583 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1584 else
1585 p->symbol_error_counter =
1586 cpu_to_be16(
1587 (u16)cntrs.symbol_error_counter);
1588 if (cntrs.link_error_recovery_counter > 0xFFUL)
1589 p->link_error_recovery_counter = 0xFF;
1590 else
1591 p->link_error_recovery_counter =
1592 (u8)cntrs.link_error_recovery_counter;
1593 if (cntrs.link_downed_counter > 0xFFUL)
1594 p->link_downed_counter = 0xFF;
1595 else
1596 p->link_downed_counter =
1597 (u8)cntrs.link_downed_counter;
1598 if (cntrs.port_rcv_errors > 0xFFFFUL)
1599 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1600 else
1601 p->port_rcv_errors =
1602 cpu_to_be16((u16) cntrs.port_rcv_errors);
1603 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1604 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1605 else
1606 p->port_rcv_remphys_errors =
1607 cpu_to_be16(
1608 (u16)cntrs.port_rcv_remphys_errors);
1609 if (cntrs.port_xmit_discards > 0xFFFFUL)
1610 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1611 else
1612 p->port_xmit_discards =
1613 cpu_to_be16((u16)cntrs.port_xmit_discards);
1614 if (cntrs.local_link_integrity_errors > 0xFUL)
1615 cntrs.local_link_integrity_errors = 0xFUL;
1616 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1617 cntrs.excessive_buffer_overrun_errors = 0xFUL;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001618 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
Ralph Campbellf9315512010-05-23 21:44:54 -07001619 cntrs.excessive_buffer_overrun_errors;
1620 if (cntrs.vl15_dropped > 0xFFFFUL)
1621 p->vl15_dropped = cpu_to_be16(0xFFFF);
1622 else
1623 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1624
1625 return reply((struct ib_smp *)pmp);
1626}
1627
Or Gerlitz6aea2132011-07-05 15:46:29 +00001628static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001629 struct ib_device *ibdev, u8 port)
1630{
1631 struct ib_pma_portcounters_ext *p =
1632 (struct ib_pma_portcounters_ext *)pmp->data;
1633 struct qib_ibport *ibp = to_iport(ibdev, port);
1634 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1635 u64 swords, rwords, spkts, rpkts, xwait;
1636 u8 port_select = p->port_select;
1637
1638 memset(pmp->data, 0, sizeof(pmp->data));
1639
1640 p->port_select = port_select;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001641 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1642 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
Ralph Campbellf9315512010-05-23 21:44:54 -07001643 goto bail;
1644 }
1645
1646 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1647
1648 /* Adjust counters for any resets done. */
1649 swords -= ibp->z_port_xmit_data;
1650 rwords -= ibp->z_port_rcv_data;
1651 spkts -= ibp->z_port_xmit_packets;
1652 rpkts -= ibp->z_port_rcv_packets;
1653
1654 p->port_xmit_data = cpu_to_be64(swords);
1655 p->port_rcv_data = cpu_to_be64(rwords);
1656 p->port_xmit_packets = cpu_to_be64(spkts);
1657 p->port_rcv_packets = cpu_to_be64(rpkts);
1658 p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
1659 p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
1660 p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
1661 p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
1662
1663bail:
1664 return reply((struct ib_smp *) pmp);
1665}
1666
Or Gerlitz6aea2132011-07-05 15:46:29 +00001667static int pma_set_portcounters(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001668 struct ib_device *ibdev, u8 port)
1669{
1670 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1671 pmp->data;
1672 struct qib_ibport *ibp = to_iport(ibdev, port);
1673 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1674 struct qib_verbs_counters cntrs;
1675
1676 /*
1677 * Since the HW doesn't support clearing counters, we save the
1678 * current count and subtract it from future responses.
1679 */
1680 qib_get_counters(ppd, &cntrs);
1681
1682 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1683 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1684
1685 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1686 ibp->z_link_error_recovery_counter =
1687 cntrs.link_error_recovery_counter;
1688
1689 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1690 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1691
1692 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1693 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1694
1695 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1696 ibp->z_port_rcv_remphys_errors =
1697 cntrs.port_rcv_remphys_errors;
1698
1699 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1700 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1701
1702 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1703 ibp->z_local_link_integrity_errors =
1704 cntrs.local_link_integrity_errors;
1705
1706 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1707 ibp->z_excessive_buffer_overrun_errors =
1708 cntrs.excessive_buffer_overrun_errors;
1709
1710 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1711 ibp->n_vl15_dropped = 0;
1712 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1713 }
1714
1715 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1716 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1717
1718 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1719 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1720
1721 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1722 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1723
1724 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1725 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1726
1727 return pma_get_portcounters(pmp, ibdev, port);
1728}
1729
Or Gerlitz6aea2132011-07-05 15:46:29 +00001730static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001731 struct ib_device *ibdev, u8 port)
1732{
1733 struct qib_ibport *ibp = to_iport(ibdev, port);
1734 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1735 struct qib_devdata *dd = dd_from_ppd(ppd);
1736 struct qib_verbs_counters cntrs;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001737 u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
Ralph Campbellf9315512010-05-23 21:44:54 -07001738 int ret = 0;
1739 unsigned long flags;
1740
1741 qib_get_counters(ppd, &cntrs);
1742 /* Get counter values before we save them */
1743 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1744
1745 if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1746 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1747 ppd->cong_stats.counter = 0;
1748 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1749 0x0);
1750 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1751 }
1752 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1753 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1754 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1755 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1756 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1757 }
1758 if (counter_select & IB_PMA_SEL_CONG_ALL) {
1759 ibp->z_symbol_error_counter =
1760 cntrs.symbol_error_counter;
1761 ibp->z_link_error_recovery_counter =
1762 cntrs.link_error_recovery_counter;
1763 ibp->z_link_downed_counter =
1764 cntrs.link_downed_counter;
1765 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1766 ibp->z_port_rcv_remphys_errors =
1767 cntrs.port_rcv_remphys_errors;
1768 ibp->z_port_xmit_discards =
1769 cntrs.port_xmit_discards;
1770 ibp->z_local_link_integrity_errors =
1771 cntrs.local_link_integrity_errors;
1772 ibp->z_excessive_buffer_overrun_errors =
1773 cntrs.excessive_buffer_overrun_errors;
1774 ibp->n_vl15_dropped = 0;
1775 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1776 }
1777
1778 return ret;
1779}
1780
Or Gerlitz6aea2132011-07-05 15:46:29 +00001781static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
Ralph Campbellf9315512010-05-23 21:44:54 -07001782 struct ib_device *ibdev, u8 port)
1783{
1784 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1785 pmp->data;
1786 struct qib_ibport *ibp = to_iport(ibdev, port);
1787 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1788 u64 swords, rwords, spkts, rpkts, xwait;
1789
1790 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1791
1792 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1793 ibp->z_port_xmit_data = swords;
1794
1795 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1796 ibp->z_port_rcv_data = rwords;
1797
1798 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1799 ibp->z_port_xmit_packets = spkts;
1800
1801 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1802 ibp->z_port_rcv_packets = rpkts;
1803
1804 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1805 ibp->n_unicast_xmit = 0;
1806
1807 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1808 ibp->n_unicast_rcv = 0;
1809
1810 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1811 ibp->n_multicast_xmit = 0;
1812
1813 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1814 ibp->n_multicast_rcv = 0;
1815
1816 return pma_get_portcounters_ext(pmp, ibdev, port);
1817}
1818
1819static int process_subn(struct ib_device *ibdev, int mad_flags,
1820 u8 port, struct ib_mad *in_mad,
1821 struct ib_mad *out_mad)
1822{
1823 struct ib_smp *smp = (struct ib_smp *)out_mad;
1824 struct qib_ibport *ibp = to_iport(ibdev, port);
1825 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1826 int ret;
1827
1828 *out_mad = *in_mad;
1829 if (smp->class_version != 1) {
1830 smp->status |= IB_SMP_UNSUP_VERSION;
1831 ret = reply(smp);
1832 goto bail;
1833 }
1834
1835 ret = check_mkey(ibp, smp, mad_flags);
1836 if (ret) {
1837 u32 port_num = be32_to_cpu(smp->attr_mod);
1838
1839 /*
1840 * If this is a get/set portinfo, we already check the
1841 * M_Key if the MAD is for another port and the M_Key
1842 * is OK on the receiving port. This check is needed
1843 * to increment the error counters when the M_Key
1844 * fails to match on *both* ports.
1845 */
1846 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1847 (smp->method == IB_MGMT_METHOD_GET ||
1848 smp->method == IB_MGMT_METHOD_SET) &&
1849 port_num && port_num <= ibdev->phys_port_cnt &&
1850 port != port_num)
1851 (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
Jim Foraker3236b2d2012-05-02 14:57:23 -04001852 ret = IB_MAD_RESULT_FAILURE;
Ralph Campbellf9315512010-05-23 21:44:54 -07001853 goto bail;
1854 }
1855
1856 switch (smp->method) {
1857 case IB_MGMT_METHOD_GET:
1858 switch (smp->attr_id) {
1859 case IB_SMP_ATTR_NODE_DESC:
1860 ret = subn_get_nodedescription(smp, ibdev);
1861 goto bail;
1862 case IB_SMP_ATTR_NODE_INFO:
1863 ret = subn_get_nodeinfo(smp, ibdev, port);
1864 goto bail;
1865 case IB_SMP_ATTR_GUID_INFO:
1866 ret = subn_get_guidinfo(smp, ibdev, port);
1867 goto bail;
1868 case IB_SMP_ATTR_PORT_INFO:
1869 ret = subn_get_portinfo(smp, ibdev, port);
1870 goto bail;
1871 case IB_SMP_ATTR_PKEY_TABLE:
1872 ret = subn_get_pkeytable(smp, ibdev, port);
1873 goto bail;
1874 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1875 ret = subn_get_sl_to_vl(smp, ibdev, port);
1876 goto bail;
1877 case IB_SMP_ATTR_VL_ARB_TABLE:
1878 ret = subn_get_vl_arb(smp, ibdev, port);
1879 goto bail;
1880 case IB_SMP_ATTR_SM_INFO:
1881 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1882 ret = IB_MAD_RESULT_SUCCESS |
1883 IB_MAD_RESULT_CONSUMED;
1884 goto bail;
1885 }
1886 if (ibp->port_cap_flags & IB_PORT_SM) {
1887 ret = IB_MAD_RESULT_SUCCESS;
1888 goto bail;
1889 }
1890 /* FALLTHROUGH */
1891 default:
1892 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1893 ret = reply(smp);
1894 goto bail;
1895 }
1896
1897 case IB_MGMT_METHOD_SET:
1898 switch (smp->attr_id) {
1899 case IB_SMP_ATTR_GUID_INFO:
1900 ret = subn_set_guidinfo(smp, ibdev, port);
1901 goto bail;
1902 case IB_SMP_ATTR_PORT_INFO:
1903 ret = subn_set_portinfo(smp, ibdev, port);
1904 goto bail;
1905 case IB_SMP_ATTR_PKEY_TABLE:
1906 ret = subn_set_pkeytable(smp, ibdev, port);
1907 goto bail;
1908 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1909 ret = subn_set_sl_to_vl(smp, ibdev, port);
1910 goto bail;
1911 case IB_SMP_ATTR_VL_ARB_TABLE:
1912 ret = subn_set_vl_arb(smp, ibdev, port);
1913 goto bail;
1914 case IB_SMP_ATTR_SM_INFO:
1915 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1916 ret = IB_MAD_RESULT_SUCCESS |
1917 IB_MAD_RESULT_CONSUMED;
1918 goto bail;
1919 }
1920 if (ibp->port_cap_flags & IB_PORT_SM) {
1921 ret = IB_MAD_RESULT_SUCCESS;
1922 goto bail;
1923 }
1924 /* FALLTHROUGH */
1925 default:
1926 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1927 ret = reply(smp);
1928 goto bail;
1929 }
1930
1931 case IB_MGMT_METHOD_TRAP_REPRESS:
1932 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1933 ret = subn_trap_repress(smp, ibdev, port);
1934 else {
1935 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1936 ret = reply(smp);
1937 }
1938 goto bail;
1939
1940 case IB_MGMT_METHOD_TRAP:
1941 case IB_MGMT_METHOD_REPORT:
1942 case IB_MGMT_METHOD_REPORT_RESP:
1943 case IB_MGMT_METHOD_GET_RESP:
1944 /*
1945 * The ib_mad module will call us to process responses
1946 * before checking for other consumers.
1947 * Just tell the caller to process it normally.
1948 */
1949 ret = IB_MAD_RESULT_SUCCESS;
1950 goto bail;
1951
1952 case IB_MGMT_METHOD_SEND:
1953 if (ib_get_smp_direction(smp) &&
1954 smp->attr_id == QIB_VENDOR_IPG) {
1955 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
1956 smp->data[0]);
1957 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1958 } else
1959 ret = IB_MAD_RESULT_SUCCESS;
1960 goto bail;
1961
1962 default:
1963 smp->status |= IB_SMP_UNSUP_METHOD;
1964 ret = reply(smp);
1965 }
1966
1967bail:
1968 return ret;
1969}
1970
1971static int process_perf(struct ib_device *ibdev, u8 port,
1972 struct ib_mad *in_mad,
1973 struct ib_mad *out_mad)
1974{
Or Gerlitz6aea2132011-07-05 15:46:29 +00001975 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
Ralph Campbellf9315512010-05-23 21:44:54 -07001976 int ret;
1977
1978 *out_mad = *in_mad;
Or Gerlitz6aea2132011-07-05 15:46:29 +00001979 if (pmp->mad_hdr.class_version != 1) {
1980 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
Ralph Campbellf9315512010-05-23 21:44:54 -07001981 ret = reply((struct ib_smp *) pmp);
1982 goto bail;
1983 }
1984
Or Gerlitz6aea2132011-07-05 15:46:29 +00001985 switch (pmp->mad_hdr.method) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001986 case IB_MGMT_METHOD_GET:
Or Gerlitz6aea2132011-07-05 15:46:29 +00001987 switch (pmp->mad_hdr.attr_id) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001988 case IB_PMA_CLASS_PORT_INFO:
1989 ret = pma_get_classportinfo(pmp, ibdev);
1990 goto bail;
1991 case IB_PMA_PORT_SAMPLES_CONTROL:
1992 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1993 goto bail;
1994 case IB_PMA_PORT_SAMPLES_RESULT:
1995 ret = pma_get_portsamplesresult(pmp, ibdev, port);
1996 goto bail;
1997 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1998 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
1999 goto bail;
2000 case IB_PMA_PORT_COUNTERS:
2001 ret = pma_get_portcounters(pmp, ibdev, port);
2002 goto bail;
2003 case IB_PMA_PORT_COUNTERS_EXT:
2004 ret = pma_get_portcounters_ext(pmp, ibdev, port);
2005 goto bail;
2006 case IB_PMA_PORT_COUNTERS_CONG:
2007 ret = pma_get_portcounters_cong(pmp, ibdev, port);
2008 goto bail;
2009 default:
Or Gerlitz6aea2132011-07-05 15:46:29 +00002010 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
Ralph Campbellf9315512010-05-23 21:44:54 -07002011 ret = reply((struct ib_smp *) pmp);
2012 goto bail;
2013 }
2014
2015 case IB_MGMT_METHOD_SET:
Or Gerlitz6aea2132011-07-05 15:46:29 +00002016 switch (pmp->mad_hdr.attr_id) {
Ralph Campbellf9315512010-05-23 21:44:54 -07002017 case IB_PMA_PORT_SAMPLES_CONTROL:
2018 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2019 goto bail;
2020 case IB_PMA_PORT_COUNTERS:
2021 ret = pma_set_portcounters(pmp, ibdev, port);
2022 goto bail;
2023 case IB_PMA_PORT_COUNTERS_EXT:
2024 ret = pma_set_portcounters_ext(pmp, ibdev, port);
2025 goto bail;
2026 case IB_PMA_PORT_COUNTERS_CONG:
2027 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2028 goto bail;
2029 default:
Or Gerlitz6aea2132011-07-05 15:46:29 +00002030 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
Ralph Campbellf9315512010-05-23 21:44:54 -07002031 ret = reply((struct ib_smp *) pmp);
2032 goto bail;
2033 }
2034
2035 case IB_MGMT_METHOD_TRAP:
2036 case IB_MGMT_METHOD_GET_RESP:
2037 /*
2038 * The ib_mad module will call us to process responses
2039 * before checking for other consumers.
2040 * Just tell the caller to process it normally.
2041 */
2042 ret = IB_MAD_RESULT_SUCCESS;
2043 goto bail;
2044
2045 default:
Or Gerlitz6aea2132011-07-05 15:46:29 +00002046 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
Ralph Campbellf9315512010-05-23 21:44:54 -07002047 ret = reply((struct ib_smp *) pmp);
2048 }
2049
2050bail:
2051 return ret;
2052}
2053
2054/**
2055 * qib_process_mad - process an incoming MAD packet
2056 * @ibdev: the infiniband device this packet came in on
2057 * @mad_flags: MAD flags
2058 * @port: the port number this packet came in on
2059 * @in_wc: the work completion entry for this packet
2060 * @in_grh: the global route header for this packet
2061 * @in_mad: the incoming MAD
2062 * @out_mad: any outgoing MAD reply
2063 *
2064 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2065 * interested in processing.
2066 *
2067 * Note that the verbs framework has already done the MAD sanity checks,
2068 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2069 * MADs.
2070 *
2071 * This is called by the ib_mad module.
2072 */
2073int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2074 struct ib_wc *in_wc, struct ib_grh *in_grh,
2075 struct ib_mad *in_mad, struct ib_mad *out_mad)
2076{
2077 int ret;
2078
2079 switch (in_mad->mad_hdr.mgmt_class) {
2080 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2081 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2082 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2083 goto bail;
2084
2085 case IB_MGMT_CLASS_PERF_MGMT:
2086 ret = process_perf(ibdev, port, in_mad, out_mad);
2087 goto bail;
2088
2089 default:
2090 ret = IB_MAD_RESULT_SUCCESS;
2091 }
2092
2093bail:
2094 return ret;
2095}
2096
2097static void send_handler(struct ib_mad_agent *agent,
2098 struct ib_mad_send_wc *mad_send_wc)
2099{
2100 ib_free_send_mad(mad_send_wc->send_buf);
2101}
2102
2103static void xmit_wait_timer_func(unsigned long opaque)
2104{
2105 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2106 struct qib_devdata *dd = dd_from_ppd(ppd);
2107 unsigned long flags;
2108 u8 status;
2109
2110 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
2111 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2112 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2113 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2114 /* save counter cache */
2115 cache_hw_sample_counters(ppd);
2116 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2117 } else
2118 goto done;
2119 }
2120 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2121 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2122done:
2123 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
2124 mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2125}
2126
2127int qib_create_agents(struct qib_ibdev *dev)
2128{
2129 struct qib_devdata *dd = dd_from_dev(dev);
2130 struct ib_mad_agent *agent;
2131 struct qib_ibport *ibp;
2132 int p;
2133 int ret;
2134
2135 for (p = 0; p < dd->num_pports; p++) {
2136 ibp = &dd->pport[p].ibport_data;
2137 agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
2138 NULL, 0, send_handler,
2139 NULL, NULL);
2140 if (IS_ERR(agent)) {
2141 ret = PTR_ERR(agent);
2142 goto err;
2143 }
2144
2145 /* Initialize xmit_wait structure */
2146 dd->pport[p].cong_stats.counter = 0;
2147 init_timer(&dd->pport[p].cong_stats.timer);
2148 dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
2149 dd->pport[p].cong_stats.timer.data =
2150 (unsigned long)(&dd->pport[p]);
2151 dd->pport[p].cong_stats.timer.expires = 0;
2152 add_timer(&dd->pport[p].cong_stats.timer);
2153
2154 ibp->send_agent = agent;
2155 }
2156
2157 return 0;
2158
2159err:
2160 for (p = 0; p < dd->num_pports; p++) {
2161 ibp = &dd->pport[p].ibport_data;
2162 if (ibp->send_agent) {
2163 agent = ibp->send_agent;
2164 ibp->send_agent = NULL;
2165 ib_unregister_mad_agent(agent);
2166 }
2167 }
2168
2169 return ret;
2170}
2171
2172void qib_free_agents(struct qib_ibdev *dev)
2173{
2174 struct qib_devdata *dd = dd_from_dev(dev);
2175 struct ib_mad_agent *agent;
2176 struct qib_ibport *ibp;
2177 int p;
2178
2179 for (p = 0; p < dd->num_pports; p++) {
2180 ibp = &dd->pport[p].ibport_data;
2181 if (ibp->send_agent) {
2182 agent = ibp->send_agent;
2183 ibp->send_agent = NULL;
2184 ib_unregister_mad_agent(agent);
2185 }
2186 if (ibp->sm_ah) {
2187 ib_destroy_ah(&ibp->sm_ah->ibah);
2188 ibp->sm_ah = NULL;
2189 }
2190 if (dd->pport[p].cong_stats.timer.data)
2191 del_timer_sync(&dd->pport[p].cong_stats.timer);
2192 }
2193}