blob: 64cc9542e5cd00211392e052d30fbfc489a2007c [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4 * (c) Copyright 2013 RisingTide Systems LLC.
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
Sagi Grimberg8a2629a2014-04-29 13:13:45 +030030#include <linux/semaphore.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080031
32#include "isert_proto.h"
33#include "ib_isert.h"
34
35#define ISERT_MAX_CONN 8
36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38
39static DEFINE_MUTEX(device_list_mutex);
40static LIST_HEAD(device_list);
41static struct workqueue_struct *isert_rx_wq;
42static struct workqueue_struct *isert_comp_wq;
43static struct kmem_cache *isert_cmd_cache;
44
45static void
46isert_qp_event_callback(struct ib_event *e, void *context)
47{
48 struct isert_conn *isert_conn = (struct isert_conn *)context;
49
50 pr_err("isert_qp_event_callback event: %d\n", e->event);
51 switch (e->event) {
52 case IB_EVENT_COMM_EST:
53 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
54 break;
55 case IB_EVENT_QP_LAST_WQE_REACHED:
56 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
57 break;
58 default:
59 break;
60 }
61}
62
63static int
64isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
65{
66 int ret;
67
68 ret = ib_query_device(ib_dev, devattr);
69 if (ret) {
70 pr_err("ib_query_device() failed: %d\n", ret);
71 return ret;
72 }
73 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
74 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
75
76 return 0;
77}
78
79static int
80isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
81{
82 struct isert_device *device = isert_conn->conn_device;
83 struct ib_qp_init_attr attr;
84 struct ib_device_attr devattr;
85 int ret, index, min_index = 0;
86
87 memset(&devattr, 0, sizeof(struct ib_device_attr));
88 ret = isert_query_device(cma_id->device, &devattr);
89 if (ret)
90 return ret;
91
92 mutex_lock(&device_list_mutex);
93 for (index = 0; index < device->cqs_used; index++)
94 if (device->cq_active_qps[index] <
95 device->cq_active_qps[min_index])
96 min_index = index;
97 device->cq_active_qps[min_index]++;
98 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
99 mutex_unlock(&device_list_mutex);
100
101 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
102 attr.event_handler = isert_qp_event_callback;
103 attr.qp_context = isert_conn;
104 attr.send_cq = device->dev_tx_cq[min_index];
105 attr.recv_cq = device->dev_rx_cq[min_index];
106 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
107 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
108 /*
109 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
Or Gerlitzf2da3c22015-01-30 22:17:21 +0000110 * work-around for RDMA_READs with ConnectX-2.
111 *
112 * Also, still make sure to have at least two SGEs for
113 * outgoing control PDU responses.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800114 */
Or Gerlitzf2da3c22015-01-30 22:17:21 +0000115 attr.cap.max_send_sge = max(2, devattr.max_sge - 2);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800116 isert_conn->max_sge = attr.cap.max_send_sge;
117
118 attr.cap.max_recv_sge = 1;
119 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
120 attr.qp_type = IB_QPT_RC;
121
122 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
123 cma_id->device);
124 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
125 isert_conn->conn_pd->device);
126
127 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
128 if (ret) {
129 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
130 return ret;
131 }
132 isert_conn->conn_qp = cma_id->qp;
133 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
134
135 return 0;
136}
137
138static void
139isert_cq_event_callback(struct ib_event *e, void *context)
140{
141 pr_debug("isert_cq_event_callback event: %d\n", e->event);
142}
143
144static int
145isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
146{
147 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
148 struct iser_rx_desc *rx_desc;
149 struct ib_sge *rx_sg;
150 u64 dma_addr;
151 int i, j;
152
153 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
154 sizeof(struct iser_rx_desc), GFP_KERNEL);
155 if (!isert_conn->conn_rx_descs)
156 goto fail;
157
158 rx_desc = isert_conn->conn_rx_descs;
159
160 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
161 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
162 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
163 if (ib_dma_mapping_error(ib_dev, dma_addr))
164 goto dma_map_fail;
165
166 rx_desc->dma_addr = dma_addr;
167
168 rx_sg = &rx_desc->rx_sg;
169 rx_sg->addr = rx_desc->dma_addr;
170 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
171 rx_sg->lkey = isert_conn->conn_mr->lkey;
172 }
173
174 isert_conn->conn_rx_desc_head = 0;
175 return 0;
176
177dma_map_fail:
178 rx_desc = isert_conn->conn_rx_descs;
179 for (j = 0; j < i; j++, rx_desc++) {
180 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
181 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
182 }
183 kfree(isert_conn->conn_rx_descs);
184 isert_conn->conn_rx_descs = NULL;
185fail:
186 return -ENOMEM;
187}
188
189static void
190isert_free_rx_descriptors(struct isert_conn *isert_conn)
191{
192 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
193 struct iser_rx_desc *rx_desc;
194 int i;
195
196 if (!isert_conn->conn_rx_descs)
197 return;
198
199 rx_desc = isert_conn->conn_rx_descs;
200 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
201 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
202 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
203 }
204
205 kfree(isert_conn->conn_rx_descs);
206 isert_conn->conn_rx_descs = NULL;
207}
208
209static void isert_cq_tx_callback(struct ib_cq *, void *);
210static void isert_cq_rx_callback(struct ib_cq *, void *);
211
212static int
213isert_create_device_ib_res(struct isert_device *device)
214{
215 struct ib_device *ib_dev = device->ib_device;
216 struct isert_cq_desc *cq_desc;
217 int ret = 0, i, j;
Chris Moorec50ad632015-01-30 22:17:20 +0000218 int max_rx_cqe, max_tx_cqe;
219 struct ib_device_attr dev_attr;
220
221 memset(&dev_attr, 0, sizeof(struct ib_device_attr));
222 ret = isert_query_device(device->ib_device, &dev_attr);
223 if (ret)
224 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800225
226 device->cqs_used = min_t(int, num_online_cpus(),
227 device->ib_device->num_comp_vectors);
228 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
229 pr_debug("Using %d CQs, device %s supports %d vectors\n",
230 device->cqs_used, device->ib_device->name,
231 device->ib_device->num_comp_vectors);
232 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
233 device->cqs_used, GFP_KERNEL);
234 if (!device->cq_desc) {
235 pr_err("Unable to allocate device->cq_desc\n");
236 return -ENOMEM;
237 }
238 cq_desc = device->cq_desc;
239
240 device->dev_pd = ib_alloc_pd(ib_dev);
241 if (IS_ERR(device->dev_pd)) {
242 ret = PTR_ERR(device->dev_pd);
243 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
244 goto out_cq_desc;
245 }
246
Chris Moorec50ad632015-01-30 22:17:20 +0000247 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr.max_cqe);
248 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr.max_cqe);
249
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800250 for (i = 0; i < device->cqs_used; i++) {
251 cq_desc[i].device = device;
252 cq_desc[i].cq_index = i;
253
254 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
255 isert_cq_rx_callback,
256 isert_cq_event_callback,
257 (void *)&cq_desc[i],
Chris Moorec50ad632015-01-30 22:17:20 +0000258 max_rx_cqe, i);
Wei Yongjun0629b402013-10-29 09:56:34 +0800259 if (IS_ERR(device->dev_rx_cq[i])) {
260 ret = PTR_ERR(device->dev_rx_cq[i]);
261 device->dev_rx_cq[i] = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800262 goto out_cq;
Wei Yongjun0629b402013-10-29 09:56:34 +0800263 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800264
265 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
266 isert_cq_tx_callback,
267 isert_cq_event_callback,
268 (void *)&cq_desc[i],
Chris Moorec50ad632015-01-30 22:17:20 +0000269 max_tx_cqe, i);
Wei Yongjun0629b402013-10-29 09:56:34 +0800270 if (IS_ERR(device->dev_tx_cq[i])) {
271 ret = PTR_ERR(device->dev_tx_cq[i]);
272 device->dev_tx_cq[i] = NULL;
273 goto out_cq;
274 }
275
276 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
277 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800278 goto out_cq;
279
Wei Yongjun0629b402013-10-29 09:56:34 +0800280 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
281 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800282 goto out_cq;
283 }
284
285 device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
286 if (IS_ERR(device->dev_mr)) {
287 ret = PTR_ERR(device->dev_mr);
288 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
289 goto out_cq;
290 }
291
292 return 0;
293
294out_cq:
295 for (j = 0; j < i; j++) {
296 cq_desc = &device->cq_desc[j];
297
298 if (device->dev_rx_cq[j]) {
299 cancel_work_sync(&cq_desc->cq_rx_work);
300 ib_destroy_cq(device->dev_rx_cq[j]);
301 }
302 if (device->dev_tx_cq[j]) {
303 cancel_work_sync(&cq_desc->cq_tx_work);
304 ib_destroy_cq(device->dev_tx_cq[j]);
305 }
306 }
307 ib_dealloc_pd(device->dev_pd);
308
309out_cq_desc:
310 kfree(device->cq_desc);
311
312 return ret;
313}
314
315static void
316isert_free_device_ib_res(struct isert_device *device)
317{
318 struct isert_cq_desc *cq_desc;
319 int i;
320
321 for (i = 0; i < device->cqs_used; i++) {
322 cq_desc = &device->cq_desc[i];
323
324 cancel_work_sync(&cq_desc->cq_rx_work);
325 cancel_work_sync(&cq_desc->cq_tx_work);
326 ib_destroy_cq(device->dev_rx_cq[i]);
327 ib_destroy_cq(device->dev_tx_cq[i]);
328 device->dev_rx_cq[i] = NULL;
329 device->dev_tx_cq[i] = NULL;
330 }
331
332 ib_dereg_mr(device->dev_mr);
333 ib_dealloc_pd(device->dev_pd);
334 kfree(device->cq_desc);
335}
336
337static void
338isert_device_try_release(struct isert_device *device)
339{
340 mutex_lock(&device_list_mutex);
341 device->refcount--;
342 if (!device->refcount) {
343 isert_free_device_ib_res(device);
344 list_del(&device->dev_node);
345 kfree(device);
346 }
347 mutex_unlock(&device_list_mutex);
348}
349
350static struct isert_device *
351isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
352{
353 struct isert_device *device;
354 int ret;
355
356 mutex_lock(&device_list_mutex);
357 list_for_each_entry(device, &device_list, dev_node) {
358 if (device->ib_device->node_guid == cma_id->device->node_guid) {
359 device->refcount++;
360 mutex_unlock(&device_list_mutex);
361 return device;
362 }
363 }
364
365 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
366 if (!device) {
367 mutex_unlock(&device_list_mutex);
368 return ERR_PTR(-ENOMEM);
369 }
370
371 INIT_LIST_HEAD(&device->dev_node);
372
373 device->ib_device = cma_id->device;
374 ret = isert_create_device_ib_res(device);
375 if (ret) {
376 kfree(device);
377 mutex_unlock(&device_list_mutex);
378 return ERR_PTR(ret);
379 }
380
381 device->refcount++;
382 list_add_tail(&device->dev_node, &device_list);
383 mutex_unlock(&device_list_mutex);
384
385 return device;
386}
387
388static int
389isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
390{
391 struct iscsi_np *np = cma_id->context;
392 struct isert_np *isert_np = np->np_context;
393 struct isert_conn *isert_conn;
394 struct isert_device *device;
395 struct ib_device *ib_dev = cma_id->device;
396 int ret = 0;
397
Sagi Grimbergc2334082014-04-29 13:13:47 +0300398 spin_lock_bh(&np->np_thread_lock);
399 if (!np->enabled) {
400 spin_unlock_bh(&np->np_thread_lock);
401 pr_debug("iscsi_np is not enabled, reject connect request\n");
402 return rdma_reject(cma_id, NULL, 0);
403 }
404 spin_unlock_bh(&np->np_thread_lock);
405
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800406 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
407 cma_id, cma_id->context);
408
409 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
410 if (!isert_conn) {
411 pr_err("Unable to allocate isert_conn\n");
412 return -ENOMEM;
413 }
414 isert_conn->state = ISER_CONN_INIT;
415 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
416 init_completion(&isert_conn->conn_login_comp);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -0800417 init_completion(&isert_conn->conn_wait);
418 init_completion(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800419 kref_init(&isert_conn->conn_kref);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -0700420 mutex_init(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800421
422 cma_id->context = isert_conn;
423 isert_conn->conn_cm_id = cma_id;
424 isert_conn->responder_resources = event->param.conn.responder_resources;
425 isert_conn->initiator_depth = event->param.conn.initiator_depth;
426 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
427 isert_conn->responder_resources, isert_conn->initiator_depth);
428
429 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
430 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
431 if (!isert_conn->login_buf) {
432 pr_err("Unable to allocate isert_conn->login_buf\n");
433 ret = -ENOMEM;
434 goto out;
435 }
436
437 isert_conn->login_req_buf = isert_conn->login_buf;
438 isert_conn->login_rsp_buf = isert_conn->login_buf +
439 ISCSI_DEF_MAX_RECV_SEG_LEN;
440 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
441 isert_conn->login_buf, isert_conn->login_req_buf,
442 isert_conn->login_rsp_buf);
443
444 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
445 (void *)isert_conn->login_req_buf,
446 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
447
448 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
449 if (ret) {
450 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
451 ret);
452 isert_conn->login_req_dma = 0;
453 goto out_login_buf;
454 }
455
456 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
457 (void *)isert_conn->login_rsp_buf,
458 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
459
460 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
461 if (ret) {
462 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
463 ret);
464 isert_conn->login_rsp_dma = 0;
465 goto out_req_dma_map;
466 }
467
468 device = isert_device_find_by_ib_dev(cma_id);
469 if (IS_ERR(device)) {
470 ret = PTR_ERR(device);
471 goto out_rsp_dma_map;
472 }
473
474 isert_conn->conn_device = device;
475 isert_conn->conn_pd = device->dev_pd;
476 isert_conn->conn_mr = device->dev_mr;
477
478 ret = isert_conn_setup_qp(isert_conn, cma_id);
479 if (ret)
480 goto out_conn_dev;
481
482 mutex_lock(&isert_np->np_accept_mutex);
Sagi Grimberg5de94f82014-04-29 13:13:44 +0300483 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800484 mutex_unlock(&isert_np->np_accept_mutex);
485
Sagi Grimberg8a2629a2014-04-29 13:13:45 +0300486 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
487 up(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800488 return 0;
489
490out_conn_dev:
491 isert_device_try_release(device);
492out_rsp_dma_map:
493 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
494 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
495out_req_dma_map:
496 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
497 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
498out_login_buf:
499 kfree(isert_conn->login_buf);
500out:
501 kfree(isert_conn);
502 return ret;
503}
504
505static void
506isert_connect_release(struct isert_conn *isert_conn)
507{
508 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
509 struct isert_device *device = isert_conn->conn_device;
510 int cq_index;
511
512 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
513
514 if (isert_conn->conn_qp) {
515 cq_index = ((struct isert_cq_desc *)
516 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
517 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
518 isert_conn->conn_device->cq_active_qps[cq_index]--;
519
520 rdma_destroy_qp(isert_conn->conn_cm_id);
521 }
522
523 isert_free_rx_descriptors(isert_conn);
524 rdma_destroy_id(isert_conn->conn_cm_id);
525
526 if (isert_conn->login_buf) {
527 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
528 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
529 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
530 ISCSI_DEF_MAX_RECV_SEG_LEN,
531 DMA_FROM_DEVICE);
532 kfree(isert_conn->login_buf);
533 }
534 kfree(isert_conn);
535
536 if (device)
537 isert_device_try_release(device);
538
539 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
540}
541
542static void
543isert_connected_handler(struct rdma_cm_id *cma_id)
544{
Sagi Grimberg058ab452014-07-02 16:19:24 +0300545 struct isert_conn *isert_conn = cma_id->context;
546
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000547 pr_info("conn %p\n", isert_conn);
548
549 isert_conn->state = ISER_CONN_UP;
Sagi Grimberg058ab452014-07-02 16:19:24 +0300550 kref_get(&isert_conn->conn_kref);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800551}
552
553static void
554isert_release_conn_kref(struct kref *kref)
555{
556 struct isert_conn *isert_conn = container_of(kref,
557 struct isert_conn, conn_kref);
558
559 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
560 current->comm, current->pid);
561
562 isert_connect_release(isert_conn);
563}
564
565static void
566isert_put_conn(struct isert_conn *isert_conn)
567{
568 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
569}
570
Sagi Grimberg839eac52015-01-30 22:17:25 +0000571/**
572 * isert_conn_terminate() - Initiate connection termination
573 * @isert_conn: isert connection struct
574 *
575 * Notes:
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000576 * In case the connection state is FULL_FEATURE, move state
Sagi Grimberg839eac52015-01-30 22:17:25 +0000577 * to TEMINATING and start teardown sequence (rdma_disconnect).
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000578 * In case the connection state is UP, complete flush as well.
Sagi Grimberg839eac52015-01-30 22:17:25 +0000579 *
580 * This routine must be called with conn_mutex held. Thus it is
581 * safe to call multiple times.
582 */
583static void
584isert_conn_terminate(struct isert_conn *isert_conn)
585{
586 int err;
587
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000588 switch (isert_conn->state) {
589 case ISER_CONN_TERMINATING:
590 break;
591 case ISER_CONN_UP:
592 /*
593 * No flush completions will occur as we didn't
594 * get to ISER_CONN_FULL_FEATURE yet, complete
595 * to allow teardown progress.
596 */
597 complete(&isert_conn->conn_wait_comp_err);
598 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
Sagi Grimberg839eac52015-01-30 22:17:25 +0000599 pr_info("Terminating conn %p state %d\n",
600 isert_conn, isert_conn->state);
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000601 isert_conn->state = ISER_CONN_TERMINATING;
Sagi Grimberg839eac52015-01-30 22:17:25 +0000602 err = rdma_disconnect(isert_conn->conn_cm_id);
603 if (err)
604 pr_warn("Failed rdma_disconnect isert_conn %p\n",
605 isert_conn);
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000606 break;
607 default:
608 pr_warn("conn %p teminating in state %d\n",
609 isert_conn, isert_conn->state);
Sagi Grimberg839eac52015-01-30 22:17:25 +0000610 }
611}
612
Sagi Grimberg11b926d2014-10-28 13:45:03 -0700613static int
Sagi Grimberg839eac52015-01-30 22:17:25 +0000614isert_disconnected_handler(struct rdma_cm_id *cma_id)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800615{
Sagi Grimberg11b926d2014-10-28 13:45:03 -0700616 struct isert_conn *isert_conn;
617
618 if (!cma_id->qp) {
619 struct isert_np *isert_np = cma_id->context;
620
621 isert_np->np_cm_id = NULL;
622 return -1;
623 }
624
625 isert_conn = (struct isert_conn *)cma_id->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800626
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000627 mutex_lock(&isert_conn->conn_mutex);
628 isert_conn_terminate(isert_conn);
629 mutex_unlock(&isert_conn->conn_mutex);
630
631 pr_info("conn %p completing conn_wait\n", isert_conn);
632 complete(&isert_conn->conn_wait);
Sagi Grimberg11b926d2014-10-28 13:45:03 -0700633
634 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800635}
636
Sagi Grimberg839eac52015-01-30 22:17:25 +0000637static void
638isert_connect_error(struct rdma_cm_id *cma_id)
639{
640 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
641
642 isert_put_conn(isert_conn);
643}
644
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800645static int
646isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
647{
648 int ret = 0;
649
650 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
651 event->event, event->status, cma_id->context, cma_id);
652
653 switch (event->event) {
654 case RDMA_CM_EVENT_CONNECT_REQUEST:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800655 ret = isert_connect_request(cma_id, event);
Sagi Grimberg11b926d2014-10-28 13:45:03 -0700656 if (ret)
657 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
658 event->event, ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800659 break;
660 case RDMA_CM_EVENT_ESTABLISHED:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800661 isert_connected_handler(cma_id);
662 break;
Sagi Grimberg674a2362014-05-19 17:44:24 +0300663 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
664 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
665 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
Sagi Grimberg674a2362014-05-19 17:44:24 +0300666 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
Sagi Grimberg839eac52015-01-30 22:17:25 +0000667 ret = isert_disconnected_handler(cma_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800668 break;
Sagi Grimberg839eac52015-01-30 22:17:25 +0000669 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
670 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800671 case RDMA_CM_EVENT_CONNECT_ERROR:
Sagi Grimberg839eac52015-01-30 22:17:25 +0000672 isert_connect_error(cma_id);
673 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800674 default:
Sagi Grimberg674a2362014-05-19 17:44:24 +0300675 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800676 break;
677 }
678
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800679 return ret;
680}
681
682static int
683isert_post_recv(struct isert_conn *isert_conn, u32 count)
684{
685 struct ib_recv_wr *rx_wr, *rx_wr_failed;
686 int i, ret;
687 unsigned int rx_head = isert_conn->conn_rx_desc_head;
688 struct iser_rx_desc *rx_desc;
689
690 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
691 rx_desc = &isert_conn->conn_rx_descs[rx_head];
692 rx_wr->wr_id = (unsigned long)rx_desc;
693 rx_wr->sg_list = &rx_desc->rx_sg;
694 rx_wr->num_sge = 1;
695 rx_wr->next = rx_wr + 1;
696 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
697 }
698
699 rx_wr--;
700 rx_wr->next = NULL; /* mark end of work requests list */
701
702 isert_conn->post_recv_buf_count += count;
703 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
704 &rx_wr_failed);
705 if (ret) {
706 pr_err("ib_post_recv() failed with ret: %d\n", ret);
707 isert_conn->post_recv_buf_count -= count;
708 } else {
709 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
710 isert_conn->conn_rx_desc_head = rx_head;
711 }
712 return ret;
713}
714
715static int
716isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
717{
718 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
719 struct ib_send_wr send_wr, *send_wr_failed;
720 int ret;
721
722 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
723 ISER_HEADERS_LEN, DMA_TO_DEVICE);
724
725 send_wr.next = NULL;
726 send_wr.wr_id = (unsigned long)tx_desc;
727 send_wr.sg_list = tx_desc->tx_sg;
728 send_wr.num_sge = tx_desc->num_sge;
729 send_wr.opcode = IB_WR_SEND;
730 send_wr.send_flags = IB_SEND_SIGNALED;
731
732 atomic_inc(&isert_conn->post_send_buf_count);
733
734 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
735 if (ret) {
736 pr_err("ib_post_send() failed, ret: %d\n", ret);
737 atomic_dec(&isert_conn->post_send_buf_count);
738 }
739
740 return ret;
741}
742
743static void
744isert_create_send_desc(struct isert_conn *isert_conn,
745 struct isert_cmd *isert_cmd,
746 struct iser_tx_desc *tx_desc)
747{
748 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
749
750 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
751 ISER_HEADERS_LEN, DMA_TO_DEVICE);
752
753 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
754 tx_desc->iser_header.flags = ISER_VER;
755
756 tx_desc->num_sge = 1;
757 tx_desc->isert_cmd = isert_cmd;
758
759 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
760 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
761 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
762 }
763}
764
765static int
766isert_init_tx_hdrs(struct isert_conn *isert_conn,
767 struct iser_tx_desc *tx_desc)
768{
769 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
770 u64 dma_addr;
771
772 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
773 ISER_HEADERS_LEN, DMA_TO_DEVICE);
774 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
775 pr_err("ib_dma_mapping_error() failed\n");
776 return -ENOMEM;
777 }
778
779 tx_desc->dma_addr = dma_addr;
780 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
781 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
782 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
783
784 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
785 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
786 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
787
788 return 0;
789}
790
791static void
792isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
793{
794 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
795 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
796 send_wr->opcode = IB_WR_SEND;
797 send_wr->send_flags = IB_SEND_SIGNALED;
798 send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
799 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
800}
801
802static int
803isert_rdma_post_recvl(struct isert_conn *isert_conn)
804{
805 struct ib_recv_wr rx_wr, *rx_wr_fail;
806 struct ib_sge sge;
807 int ret;
808
809 memset(&sge, 0, sizeof(struct ib_sge));
810 sge.addr = isert_conn->login_req_dma;
811 sge.length = ISER_RX_LOGIN_SIZE;
812 sge.lkey = isert_conn->conn_mr->lkey;
813
814 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
815 sge.addr, sge.length, sge.lkey);
816
817 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
818 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
819 rx_wr.sg_list = &sge;
820 rx_wr.num_sge = 1;
821
822 isert_conn->post_recv_buf_count++;
823 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
824 if (ret) {
825 pr_err("ib_post_recv() failed: %d\n", ret);
826 isert_conn->post_recv_buf_count--;
827 }
828
829 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
830 return ret;
831}
832
833static int
834isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
835 u32 length)
836{
837 struct isert_conn *isert_conn = conn->context;
838 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
839 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
840 int ret;
841
842 isert_create_send_desc(isert_conn, NULL, tx_desc);
843
844 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
845 sizeof(struct iscsi_hdr));
846
847 isert_init_tx_hdrs(isert_conn, tx_desc);
848
849 if (length > 0) {
850 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
851
852 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
853 length, DMA_TO_DEVICE);
854
855 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
856
857 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
858 length, DMA_TO_DEVICE);
859
860 tx_dsg->addr = isert_conn->login_rsp_dma;
861 tx_dsg->length = length;
862 tx_dsg->lkey = isert_conn->conn_mr->lkey;
863 tx_desc->num_sge = 2;
864 }
865 if (!login->login_failed) {
866 if (login->login_complete) {
867 ret = isert_alloc_rx_descriptors(isert_conn);
868 if (ret)
869 return ret;
870
871 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
872 if (ret)
873 return ret;
874
Sagi Grimbergdc0672f2015-01-30 22:17:26 +0000875 /* Now we are in FULL_FEATURE phase */
876 isert_conn->state = ISER_CONN_FULL_FEATURE;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800877 goto post_send;
878 }
879
880 ret = isert_rdma_post_recvl(isert_conn);
881 if (ret)
882 return ret;
883 }
884post_send:
885 ret = isert_post_send(isert_conn, tx_desc);
886 if (ret)
887 return ret;
888
889 return 0;
890}
891
892static void
893isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
894 struct isert_conn *isert_conn)
895{
896 struct iscsi_conn *conn = isert_conn->conn;
897 struct iscsi_login *login = conn->conn_login;
898 int size;
899
900 if (!login) {
901 pr_err("conn->conn_login is NULL\n");
902 dump_stack();
903 return;
904 }
905
906 if (login->first_request) {
907 struct iscsi_login_req *login_req =
908 (struct iscsi_login_req *)&rx_desc->iscsi_header;
909 /*
910 * Setup the initial iscsi_login values from the leading
911 * login request PDU.
912 */
913 login->leading_connection = (!login_req->tsih) ? 1 : 0;
914 login->current_stage =
915 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
916 >> 2;
917 login->version_min = login_req->min_version;
918 login->version_max = login_req->max_version;
919 memcpy(login->isid, login_req->isid, 6);
920 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
921 login->init_task_tag = login_req->itt;
922 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
923 login->cid = be16_to_cpu(login_req->cid);
924 login->tsih = be16_to_cpu(login_req->tsih);
925 }
926
927 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
928
929 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
930 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
931 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
932 memcpy(login->req_buf, &rx_desc->data[0], size);
933
934 complete(&isert_conn->conn_login_comp);
935}
936
937static void
938isert_release_cmd(struct iscsi_cmd *cmd)
939{
940 struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
941 iscsi_cmd);
942
943 pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
944
945 kfree(cmd->buf_ptr);
946 kfree(cmd->tmr_req);
947
948 kmem_cache_free(isert_cmd_cache, isert_cmd);
949}
950
951static struct iscsi_cmd
952*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
953{
954 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
955 struct isert_cmd *isert_cmd;
956
957 isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
958 if (!isert_cmd) {
959 pr_err("Unable to allocate isert_cmd\n");
960 return NULL;
961 }
962 isert_cmd->conn = isert_conn;
963 isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
964
965 return &isert_cmd->iscsi_cmd;
966}
967
968static int
969isert_handle_scsi_cmd(struct isert_conn *isert_conn,
970 struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
971 unsigned char *buf)
972{
973 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
974 struct iscsi_conn *conn = isert_conn->conn;
975 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
976 struct scatterlist *sg;
977 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
978 bool dump_payload = false;
979
980 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
981 if (rc < 0)
982 return rc;
983
984 imm_data = cmd->immediate_data;
985 imm_data_len = cmd->first_burst_len;
986 unsol_data = cmd->unsolicited_data;
987
988 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
989 if (rc < 0) {
990 return 0;
991 } else if (rc > 0) {
992 dump_payload = true;
993 goto sequence_cmd;
994 }
995
996 if (!imm_data)
997 return 0;
998
999 sg = &cmd->se_cmd.t_data_sg[0];
1000 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1001
1002 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1003 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1004
1005 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1006
1007 cmd->write_data_done += imm_data_len;
1008
1009 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1010 spin_lock_bh(&cmd->istate_lock);
1011 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1012 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1013 spin_unlock_bh(&cmd->istate_lock);
1014 }
1015
1016sequence_cmd:
Nicholas Bellingeradb97c22013-07-30 04:04:02 +00001017 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001018
1019 if (!rc && dump_payload == false && unsol_data)
1020 iscsit_set_unsoliticed_dataout(cmd);
Nicholas Bellinger553e4c52014-05-23 00:48:35 -07001021 else if (dump_payload && imm_data)
1022 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001023
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001024 return 0;
1025}
1026
1027static int
1028isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1029 struct iser_rx_desc *rx_desc, unsigned char *buf)
1030{
1031 struct scatterlist *sg_start;
1032 struct iscsi_conn *conn = isert_conn->conn;
1033 struct iscsi_cmd *cmd = NULL;
1034 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1035 u32 unsol_data_len = ntoh24(hdr->dlength);
1036 int rc, sg_nents, sg_off, page_off;
1037
1038 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1039 if (rc < 0)
1040 return rc;
1041 else if (!cmd)
1042 return 0;
1043 /*
1044 * FIXME: Unexpected unsolicited_data out
1045 */
1046 if (!cmd->unsolicited_data) {
1047 pr_err("Received unexpected solicited data payload\n");
1048 dump_stack();
1049 return -1;
1050 }
1051
1052 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1053 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1054
1055 sg_off = cmd->write_data_done / PAGE_SIZE;
1056 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1057 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1058 page_off = cmd->write_data_done % PAGE_SIZE;
1059 /*
1060 * FIXME: Non page-aligned unsolicited_data out
1061 */
1062 if (page_off) {
1063 pr_err("Received unexpected non-page aligned data payload\n");
1064 dump_stack();
1065 return -1;
1066 }
1067 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1068 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1069
1070 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1071 unsol_data_len);
1072
1073 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1074 if (rc < 0)
1075 return rc;
1076
1077 return 0;
1078}
1079
1080static int
1081isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1082 uint32_t read_stag, uint64_t read_va,
1083 uint32_t write_stag, uint64_t write_va)
1084{
1085 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1086 struct iscsi_conn *conn = isert_conn->conn;
1087 struct iscsi_cmd *cmd;
1088 struct isert_cmd *isert_cmd;
1089 int ret = -EINVAL;
1090 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1091
1092 switch (opcode) {
1093 case ISCSI_OP_SCSI_CMD:
1094 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1095 if (!cmd)
1096 break;
1097
1098 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1099 isert_cmd->read_stag = read_stag;
1100 isert_cmd->read_va = read_va;
1101 isert_cmd->write_stag = write_stag;
1102 isert_cmd->write_va = write_va;
1103
1104 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
1105 rx_desc, (unsigned char *)hdr);
1106 break;
1107 case ISCSI_OP_NOOP_OUT:
1108 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1109 if (!cmd)
1110 break;
1111
1112 ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
1113 break;
1114 case ISCSI_OP_SCSI_DATA_OUT:
1115 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1116 (unsigned char *)hdr);
1117 break;
1118 case ISCSI_OP_SCSI_TMFUNC:
1119 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1120 if (!cmd)
1121 break;
1122
1123 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1124 (unsigned char *)hdr);
1125 break;
1126 case ISCSI_OP_LOGOUT:
1127 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1128 if (!cmd)
1129 break;
1130
1131 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1132 if (ret > 0)
1133 wait_for_completion_timeout(&conn->conn_logout_comp,
1134 SECONDS_FOR_LOGOUT_COMP *
1135 HZ);
1136 break;
1137 default:
1138 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1139 dump_stack();
1140 break;
1141 }
1142
1143 return ret;
1144}
1145
1146static void
1147isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1148{
1149 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1150 uint64_t read_va = 0, write_va = 0;
1151 uint32_t read_stag = 0, write_stag = 0;
1152 int rc;
1153
1154 switch (iser_hdr->flags & 0xF0) {
1155 case ISCSI_CTRL:
1156 if (iser_hdr->flags & ISER_RSV) {
1157 read_stag = be32_to_cpu(iser_hdr->read_stag);
1158 read_va = be64_to_cpu(iser_hdr->read_va);
1159 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1160 read_stag, (unsigned long long)read_va);
1161 }
1162 if (iser_hdr->flags & ISER_WSV) {
1163 write_stag = be32_to_cpu(iser_hdr->write_stag);
1164 write_va = be64_to_cpu(iser_hdr->write_va);
1165 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1166 write_stag, (unsigned long long)write_va);
1167 }
1168
1169 pr_debug("ISER ISCSI_CTRL PDU\n");
1170 break;
1171 case ISER_HELLO:
1172 pr_err("iSER Hello message\n");
1173 break;
1174 default:
1175 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1176 break;
1177 }
1178
1179 rc = isert_rx_opcode(isert_conn, rx_desc,
1180 read_stag, read_va, write_stag, write_va);
1181}
1182
1183static void
1184isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1185 unsigned long xfer_len)
1186{
1187 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1188 struct iscsi_hdr *hdr;
1189 u64 rx_dma;
1190 int rx_buflen, outstanding;
1191
1192 if ((char *)desc == isert_conn->login_req_buf) {
1193 rx_dma = isert_conn->login_req_dma;
1194 rx_buflen = ISER_RX_LOGIN_SIZE;
1195 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1196 rx_dma, rx_buflen);
1197 } else {
1198 rx_dma = desc->dma_addr;
1199 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1200 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1201 rx_dma, rx_buflen);
1202 }
1203
1204 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1205
1206 hdr = &desc->iscsi_header;
1207 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1208 hdr->opcode, hdr->itt, hdr->flags,
1209 (int)(xfer_len - ISER_HEADERS_LEN));
1210
1211 if ((char *)desc == isert_conn->login_req_buf)
1212 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1213 isert_conn);
1214 else
1215 isert_rx_do_work(desc, isert_conn);
1216
1217 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1218 DMA_FROM_DEVICE);
1219
1220 isert_conn->post_recv_buf_count--;
1221 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1222 isert_conn->post_recv_buf_count);
1223
1224 if ((char *)desc == isert_conn->login_req_buf)
1225 return;
1226
1227 outstanding = isert_conn->post_recv_buf_count;
1228 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1229 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1230 ISERT_MIN_POSTED_RX);
1231 err = isert_post_recv(isert_conn, count);
1232 if (err) {
1233 pr_err("isert_post_recv() count: %d failed, %d\n",
1234 count, err);
1235 }
1236 }
1237}
1238
1239static void
1240isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1241{
1242 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1243 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1244
1245 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1246
1247 if (wr->sge) {
1248 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1249 wr->sge = NULL;
1250 }
1251
1252 kfree(wr->send_wr);
1253 wr->send_wr = NULL;
1254
1255 kfree(isert_cmd->ib_sge);
1256 isert_cmd->ib_sge = NULL;
1257}
1258
1259static void
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001260isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001261{
1262 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1263 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001264 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001265
1266 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1267
1268 switch (cmd->iscsi_opcode) {
1269 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001270 spin_lock_bh(&conn->cmd_lock);
1271 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellingeraf737f62014-02-03 12:53:51 -08001272 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001273 spin_unlock_bh(&conn->cmd_lock);
1274
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001275 if (cmd->data_direction == DMA_TO_DEVICE) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001276 iscsit_stop_dataout_timer(cmd);
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001277 /*
1278 * Check for special case during comp_err where
1279 * WRITE_PENDING has been handed off from core,
1280 * but requires an extra target_put_sess_cmd()
1281 * before transport_generic_free_cmd() below.
1282 */
1283 if (comp_err &&
1284 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1285 struct se_cmd *se_cmd = &cmd->se_cmd;
1286
1287 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1288 }
1289 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001290
1291 isert_unmap_cmd(isert_cmd, isert_conn);
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001292 transport_generic_free_cmd(&cmd->se_cmd, 0);
1293 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001294 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001295 spin_lock_bh(&conn->cmd_lock);
1296 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellingeraf737f62014-02-03 12:53:51 -08001297 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001298 spin_unlock_bh(&conn->cmd_lock);
1299
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001300 transport_generic_free_cmd(&cmd->se_cmd, 0);
1301 break;
1302 case ISCSI_OP_REJECT:
1303 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001304 spin_lock_bh(&conn->cmd_lock);
1305 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellingeraf737f62014-02-03 12:53:51 -08001306 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001307 spin_unlock_bh(&conn->cmd_lock);
1308
1309 /*
1310 * Handle special case for REJECT when iscsi_add_reject*() has
1311 * overwritten the original iscsi_opcode assignment, and the
1312 * associated cmd->se_cmd needs to be released.
1313 */
1314 if (cmd->se_cmd.se_tfo != NULL) {
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001315 pr_debug("Calling transport_generic_free_cmd from"
1316 " isert_put_cmd for 0x%02x\n",
1317 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001318 transport_generic_free_cmd(&cmd->se_cmd, 0);
1319 break;
1320 }
1321 /*
1322 * Fall-through
1323 */
1324 default:
1325 isert_release_cmd(cmd);
1326 break;
1327 }
1328}
1329
1330static void
1331isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1332{
1333 if (tx_desc->dma_addr != 0) {
1334 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1335 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1336 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1337 tx_desc->dma_addr = 0;
1338 }
1339}
1340
1341static void
1342isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001343 struct ib_device *ib_dev, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001344{
1345 if (isert_cmd->sense_buf_dma != 0) {
1346 pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
1347 ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
1348 isert_cmd->sense_buf_len, DMA_TO_DEVICE);
1349 isert_cmd->sense_buf_dma = 0;
1350 }
1351
1352 isert_unmap_tx_desc(tx_desc, ib_dev);
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001353 isert_put_cmd(isert_cmd, comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001354}
1355
1356static void
1357isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1358 struct isert_cmd *isert_cmd)
1359{
1360 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1361 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1362 struct se_cmd *se_cmd = &cmd->se_cmd;
1363 struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
1364
1365 iscsit_stop_dataout_timer(cmd);
1366
1367 if (wr->sge) {
1368 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1369 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1370 wr->sge = NULL;
1371 }
1372
1373 if (isert_cmd->ib_sge) {
1374 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1375 kfree(isert_cmd->ib_sge);
1376 isert_cmd->ib_sge = NULL;
1377 }
1378
1379 cmd->write_data_done = se_cmd->data_length;
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001380 wr->send_wr_num = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001381
1382 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1383 spin_lock_bh(&cmd->istate_lock);
1384 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1385 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1386 spin_unlock_bh(&cmd->istate_lock);
1387
1388 target_execute_cmd(se_cmd);
1389}
1390
1391static void
1392isert_do_control_comp(struct work_struct *work)
1393{
1394 struct isert_cmd *isert_cmd = container_of(work,
1395 struct isert_cmd, comp_work);
1396 struct isert_conn *isert_conn = isert_cmd->conn;
1397 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1398 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1399
1400 switch (cmd->i_state) {
1401 case ISTATE_SEND_TASKMGTRSP:
1402 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1403
1404 atomic_dec(&isert_conn->post_send_buf_count);
1405 iscsit_tmr_post_handler(cmd, cmd->conn);
1406
1407 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001408 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001409 break;
1410 case ISTATE_SEND_REJECT:
1411 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1412 atomic_dec(&isert_conn->post_send_buf_count);
1413
1414 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001415 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001416 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001417 case ISTATE_SEND_LOGOUTRSP:
1418 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
Sagi Grimberg1813e802014-05-19 17:44:23 +03001419
1420 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001421 iscsit_logout_post_handler(cmd, cmd->conn);
1422 break;
1423 default:
1424 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1425 dump_stack();
1426 break;
1427 }
1428}
1429
1430static void
1431isert_response_completion(struct iser_tx_desc *tx_desc,
1432 struct isert_cmd *isert_cmd,
1433 struct isert_conn *isert_conn,
1434 struct ib_device *ib_dev)
1435{
1436 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001437 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001438
1439 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001440 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1441 cmd->i_state == ISTATE_SEND_REJECT) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001442 isert_unmap_tx_desc(tx_desc, ib_dev);
1443
1444 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1445 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1446 return;
1447 }
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001448 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001449
1450 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001451 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001452}
1453
1454static void
1455isert_send_completion(struct iser_tx_desc *tx_desc,
1456 struct isert_conn *isert_conn)
1457{
1458 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1459 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1460 struct isert_rdma_wr *wr;
1461
1462 if (!isert_cmd) {
1463 atomic_dec(&isert_conn->post_send_buf_count);
1464 isert_unmap_tx_desc(tx_desc, ib_dev);
1465 return;
1466 }
1467 wr = &isert_cmd->rdma_wr;
1468
1469 switch (wr->iser_ib_op) {
1470 case ISER_IB_RECV:
1471 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1472 dump_stack();
1473 break;
1474 case ISER_IB_SEND:
1475 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1476 isert_response_completion(tx_desc, isert_cmd,
1477 isert_conn, ib_dev);
1478 break;
1479 case ISER_IB_RDMA_WRITE:
1480 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1481 dump_stack();
1482 break;
1483 case ISER_IB_RDMA_READ:
1484 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1485
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001486 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001487 isert_completion_rdma_read(tx_desc, isert_cmd);
1488 break;
1489 default:
1490 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1491 dump_stack();
1492 break;
1493 }
1494}
1495
1496static void
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001497isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001498{
1499 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001500 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001501
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001502 if (!isert_cmd)
1503 isert_unmap_tx_desc(tx_desc, ib_dev);
1504 else
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001505 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001506}
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001507
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001508static void
1509isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1510{
1511 struct iscsi_conn *conn = isert_conn->conn;
1512
1513 if (isert_conn->post_recv_buf_count)
1514 return;
1515
1516 if (conn->sess) {
1517 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1518 target_wait_for_sess_cmds(conn->sess->se_sess);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001519 }
1520
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001521 while (atomic_read(&isert_conn->post_send_buf_count))
1522 msleep(3000);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001523
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001524 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg839eac52015-01-30 22:17:25 +00001525 isert_conn_terminate(isert_conn);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001526 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07001527
Sagi Grimberg1813e802014-05-19 17:44:23 +03001528 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1529
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001530 complete(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001531}
1532
1533static void
1534isert_cq_tx_work(struct work_struct *work)
1535{
1536 struct isert_cq_desc *cq_desc = container_of(work,
1537 struct isert_cq_desc, cq_tx_work);
1538 struct isert_device *device = cq_desc->device;
1539 int cq_index = cq_desc->cq_index;
1540 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1541 struct isert_conn *isert_conn;
1542 struct iser_tx_desc *tx_desc;
1543 struct ib_wc wc;
1544
1545 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1546 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1547 isert_conn = wc.qp->qp_context;
1548
1549 if (wc.status == IB_WC_SUCCESS) {
1550 isert_send_completion(tx_desc, isert_conn);
1551 } else {
1552 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1553 pr_debug("TX wc.status: 0x%08x\n", wc.status);
1554 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001555 isert_cq_tx_comp_err(tx_desc, isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001556 }
1557 }
1558
1559 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1560}
1561
1562static void
1563isert_cq_tx_callback(struct ib_cq *cq, void *context)
1564{
1565 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1566
1567 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1568 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1569}
1570
1571static void
1572isert_cq_rx_work(struct work_struct *work)
1573{
1574 struct isert_cq_desc *cq_desc = container_of(work,
1575 struct isert_cq_desc, cq_rx_work);
1576 struct isert_device *device = cq_desc->device;
1577 int cq_index = cq_desc->cq_index;
1578 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1579 struct isert_conn *isert_conn;
1580 struct iser_rx_desc *rx_desc;
1581 struct ib_wc wc;
1582 unsigned long xfer_len;
1583
1584 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1585 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1586 isert_conn = wc.qp->qp_context;
1587
1588 if (wc.status == IB_WC_SUCCESS) {
1589 xfer_len = (unsigned long)wc.byte_len;
1590 isert_rx_completion(rx_desc, isert_conn, xfer_len);
1591 } else {
1592 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1593 if (wc.status != IB_WC_WR_FLUSH_ERR)
1594 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1595
1596 isert_conn->post_recv_buf_count--;
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001597 isert_cq_rx_comp_err(isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001598 }
1599 }
1600
1601 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1602}
1603
1604static void
1605isert_cq_rx_callback(struct ib_cq *cq, void *context)
1606{
1607 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1608
1609 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1610 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1611}
1612
1613static int
1614isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1615{
1616 struct ib_send_wr *wr_failed;
1617 int ret;
1618
1619 atomic_inc(&isert_conn->post_send_buf_count);
1620
1621 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1622 &wr_failed);
1623 if (ret) {
1624 pr_err("ib_post_send failed with %d\n", ret);
1625 atomic_dec(&isert_conn->post_send_buf_count);
1626 return ret;
1627 }
1628 return ret;
1629}
1630
1631static int
1632isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1633{
1634 struct isert_cmd *isert_cmd = container_of(cmd,
1635 struct isert_cmd, iscsi_cmd);
1636 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1637 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1638 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1639 &isert_cmd->tx_desc.iscsi_header;
1640
1641 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1642 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1643 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1644 /*
1645 * Attach SENSE DATA payload to iSCSI Response PDU
1646 */
1647 if (cmd->se_cmd.sense_buffer &&
1648 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1649 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1650 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1651 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1652 u32 padding, sense_len;
1653
1654 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1655 cmd->sense_buffer);
1656 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1657
1658 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1659 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1660 sense_len = cmd->se_cmd.scsi_sense_length + padding;
1661
1662 isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1663 (void *)cmd->sense_buffer, sense_len,
1664 DMA_TO_DEVICE);
1665
1666 isert_cmd->sense_buf_len = sense_len;
1667 tx_dsg->addr = isert_cmd->sense_buf_dma;
1668 tx_dsg->length = sense_len;
1669 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1670 isert_cmd->tx_desc.num_sge = 2;
1671 }
1672
1673 isert_init_send_wr(isert_cmd, send_wr);
1674
1675 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1676
1677 return isert_post_response(isert_conn, isert_cmd);
1678}
1679
1680static int
1681isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1682 bool nopout_response)
1683{
1684 struct isert_cmd *isert_cmd = container_of(cmd,
1685 struct isert_cmd, iscsi_cmd);
1686 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1687 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1688
1689 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1690 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1691 &isert_cmd->tx_desc.iscsi_header,
1692 nopout_response);
1693 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1694 isert_init_send_wr(isert_cmd, send_wr);
1695
1696 pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1697
1698 return isert_post_response(isert_conn, isert_cmd);
1699}
1700
1701static int
1702isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1703{
1704 struct isert_cmd *isert_cmd = container_of(cmd,
1705 struct isert_cmd, iscsi_cmd);
1706 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1707 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1708
1709 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1710 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1711 &isert_cmd->tx_desc.iscsi_header);
1712 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1713 isert_init_send_wr(isert_cmd, send_wr);
1714
1715 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1716
1717 return isert_post_response(isert_conn, isert_cmd);
1718}
1719
1720static int
1721isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1722{
1723 struct isert_cmd *isert_cmd = container_of(cmd,
1724 struct isert_cmd, iscsi_cmd);
1725 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1726 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1727
1728 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1729 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1730 &isert_cmd->tx_desc.iscsi_header);
1731 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1732 isert_init_send_wr(isert_cmd, send_wr);
1733
1734 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1735
1736 return isert_post_response(isert_conn, isert_cmd);
1737}
1738
1739static int
1740isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1741{
1742 struct isert_cmd *isert_cmd = container_of(cmd,
1743 struct isert_cmd, iscsi_cmd);
1744 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1745 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001746 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1747 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1748 struct iscsi_reject *hdr =
1749 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001750
1751 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001752 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001753 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001754
1755 hton24(hdr->dlength, ISCSI_HDR_LEN);
1756 isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1757 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1758 DMA_TO_DEVICE);
1759 isert_cmd->sense_buf_len = ISCSI_HDR_LEN;
1760 tx_dsg->addr = isert_cmd->sense_buf_dma;
1761 tx_dsg->length = ISCSI_HDR_LEN;
1762 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1763 isert_cmd->tx_desc.num_sge = 2;
1764
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001765 isert_init_send_wr(isert_cmd, send_wr);
1766
1767 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1768
1769 return isert_post_response(isert_conn, isert_cmd);
1770}
1771
1772static int
1773isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1774 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1775 u32 data_left, u32 offset)
1776{
1777 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1778 struct scatterlist *sg_start, *tmp_sg;
1779 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1780 u32 sg_off, page_off;
1781 int i = 0, sg_nents;
1782
1783 sg_off = offset / PAGE_SIZE;
1784 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1785 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1786 page_off = offset % PAGE_SIZE;
1787
1788 send_wr->sg_list = ib_sge;
1789 send_wr->num_sge = sg_nents;
1790 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1791 /*
1792 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1793 */
1794 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1795 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1796 (unsigned long long)tmp_sg->dma_address,
1797 tmp_sg->length, page_off);
1798
1799 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1800 ib_sge->length = min_t(u32, data_left,
1801 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1802 ib_sge->lkey = isert_conn->conn_mr->lkey;
1803
1804 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
1805 ib_sge->addr, ib_sge->length);
1806 page_off = 0;
1807 data_left -= ib_sge->length;
1808 ib_sge++;
1809 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1810 }
1811
1812 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1813 send_wr->sg_list, send_wr->num_sge);
1814
1815 return sg_nents;
1816}
1817
1818static int
1819isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1820{
1821 struct se_cmd *se_cmd = &cmd->se_cmd;
1822 struct isert_cmd *isert_cmd = container_of(cmd,
1823 struct isert_cmd, iscsi_cmd);
1824 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1825 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1826 struct ib_send_wr *wr_failed, *send_wr;
1827 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1828 struct ib_sge *ib_sge;
1829 struct scatterlist *sg;
1830 u32 offset = 0, data_len, data_left, rdma_write_max;
1831 int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
1832
1833 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
1834
1835 sg = &se_cmd->t_data_sg[0];
1836 sg_nents = se_cmd->t_data_nents;
1837
1838 count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1839 if (unlikely(!count)) {
1840 pr_err("Unable to map put_datain SGs\n");
1841 return -EINVAL;
1842 }
1843 wr->sge = sg;
1844 wr->num_sge = sg_nents;
1845 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1846 count, sg, sg_nents);
1847
1848 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1849 if (!ib_sge) {
1850 pr_warn("Unable to allocate datain ib_sge\n");
1851 ret = -ENOMEM;
1852 goto unmap_sg;
1853 }
1854 isert_cmd->ib_sge = ib_sge;
1855
1856 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1857 ib_sge, se_cmd->t_data_nents);
1858
1859 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1860 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1861 GFP_KERNEL);
1862 if (!wr->send_wr) {
1863 pr_err("Unable to allocate wr->send_wr\n");
1864 ret = -ENOMEM;
1865 goto unmap_sg;
1866 }
1867 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1868 wr->send_wr, wr->send_wr_num);
1869
1870 iscsit_increment_maxcmdsn(cmd, conn->sess);
1871 cmd->stat_sn = conn->stat_sn++;
1872
1873 wr->isert_cmd = isert_cmd;
1874 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1875 data_left = se_cmd->data_length;
1876
1877 for (i = 0; i < wr->send_wr_num; i++) {
1878 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1879 data_len = min(data_left, rdma_write_max);
1880
1881 send_wr->opcode = IB_WR_RDMA_WRITE;
1882 send_wr->send_flags = 0;
1883 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1884 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1885
1886 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1887 send_wr, data_len, offset);
1888 ib_sge += ib_sge_cnt;
1889
1890 if (i + 1 == wr->send_wr_num)
1891 send_wr->next = &isert_cmd->tx_desc.send_wr;
1892 else
1893 send_wr->next = &wr->send_wr[i + 1];
1894
1895 offset += data_len;
1896 data_left -= data_len;
1897 }
1898 /*
1899 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1900 */
1901 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1902 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1903 &isert_cmd->tx_desc.iscsi_header);
1904 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1905 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1906
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001907 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001908
1909 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1910 if (rc) {
1911 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001912 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001913 }
1914 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1915 return 1;
1916
1917unmap_sg:
1918 ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1919 return ret;
1920}
1921
1922static int
1923isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1924{
1925 struct se_cmd *se_cmd = &cmd->se_cmd;
1926 struct isert_cmd *isert_cmd = container_of(cmd,
1927 struct isert_cmd, iscsi_cmd);
1928 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1929 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1930 struct ib_send_wr *wr_failed, *send_wr;
1931 struct ib_sge *ib_sge;
1932 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1933 struct scatterlist *sg_start;
1934 u32 sg_off, sg_nents, page_off, va_offset = 0;
1935 u32 offset = 0, data_len, data_left, rdma_write_max;
1936 int rc, ret = 0, count, i, ib_sge_cnt;
1937
1938 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1939 se_cmd->data_length, cmd->write_data_done);
1940
1941 sg_off = cmd->write_data_done / PAGE_SIZE;
1942 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1943 page_off = cmd->write_data_done % PAGE_SIZE;
1944
1945 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1946 sg_off, sg_start, page_off);
1947
1948 data_left = se_cmd->data_length - cmd->write_data_done;
1949 sg_nents = se_cmd->t_data_nents - sg_off;
1950
1951 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1952 data_left, sg_nents);
1953
1954 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1955 if (unlikely(!count)) {
1956 pr_err("Unable to map get_dataout SGs\n");
1957 return -EINVAL;
1958 }
1959 wr->sge = sg_start;
1960 wr->num_sge = sg_nents;
1961 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1962 count, sg_start, sg_nents);
1963
1964 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1965 if (!ib_sge) {
1966 pr_warn("Unable to allocate dataout ib_sge\n");
1967 ret = -ENOMEM;
1968 goto unmap_sg;
1969 }
1970 isert_cmd->ib_sge = ib_sge;
1971
1972 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1973 ib_sge, sg_nents);
1974
1975 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1976 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1977 GFP_KERNEL);
1978 if (!wr->send_wr) {
1979 pr_debug("Unable to allocate wr->send_wr\n");
1980 ret = -ENOMEM;
1981 goto unmap_sg;
1982 }
1983 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1984 wr->send_wr, wr->send_wr_num);
1985
1986 isert_cmd->tx_desc.isert_cmd = isert_cmd;
1987
1988 wr->iser_ib_op = ISER_IB_RDMA_READ;
1989 wr->isert_cmd = isert_cmd;
1990 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1991 offset = cmd->write_data_done;
1992
1993 for (i = 0; i < wr->send_wr_num; i++) {
1994 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1995 data_len = min(data_left, rdma_write_max);
1996
1997 send_wr->opcode = IB_WR_RDMA_READ;
1998 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
1999 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2000
2001 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2002 send_wr, data_len, offset);
2003 ib_sge += ib_sge_cnt;
2004
2005 if (i + 1 == wr->send_wr_num)
2006 send_wr->send_flags = IB_SEND_SIGNALED;
2007 else
2008 send_wr->next = &wr->send_wr[i + 1];
2009
2010 offset += data_len;
2011 va_offset += data_len;
2012 data_left -= data_len;
2013 }
2014
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08002015 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002016
2017 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2018 if (rc) {
2019 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08002020 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002021 }
2022 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
2023 return 0;
2024
2025unmap_sg:
2026 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
2027 return ret;
2028}
2029
2030static int
2031isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2032{
2033 int ret;
2034
2035 switch (state) {
2036 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2037 ret = isert_put_nopin(cmd, conn, false);
2038 break;
2039 default:
2040 pr_err("Unknown immediate state: 0x%02x\n", state);
2041 ret = -EINVAL;
2042 break;
2043 }
2044
2045 return ret;
2046}
2047
2048static int
2049isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2050{
2051 int ret;
2052
2053 switch (state) {
2054 case ISTATE_SEND_LOGOUTRSP:
2055 ret = isert_put_logout_rsp(cmd, conn);
2056 if (!ret) {
2057 pr_debug("Returning iSER Logout -EAGAIN\n");
2058 ret = -EAGAIN;
2059 }
2060 break;
2061 case ISTATE_SEND_NOPIN:
2062 ret = isert_put_nopin(cmd, conn, true);
2063 break;
2064 case ISTATE_SEND_TASKMGTRSP:
2065 ret = isert_put_tm_rsp(cmd, conn);
2066 break;
2067 case ISTATE_SEND_REJECT:
2068 ret = isert_put_reject(cmd, conn);
2069 break;
2070 case ISTATE_SEND_STATUS:
2071 /*
2072 * Special case for sending non GOOD SCSI status from TX thread
2073 * context during pre se_cmd excecution failure.
2074 */
2075 ret = isert_put_response(conn, cmd);
2076 break;
2077 default:
2078 pr_err("Unknown response state: 0x%02x\n", state);
2079 ret = -EINVAL;
2080 break;
2081 }
2082
2083 return ret;
2084}
2085
2086static int
2087isert_setup_np(struct iscsi_np *np,
2088 struct __kernel_sockaddr_storage *ksockaddr)
2089{
2090 struct isert_np *isert_np;
2091 struct rdma_cm_id *isert_lid;
2092 struct sockaddr *sa;
2093 int ret;
2094
2095 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2096 if (!isert_np) {
2097 pr_err("Unable to allocate struct isert_np\n");
2098 return -ENOMEM;
2099 }
Sagi Grimberg8a2629a2014-04-29 13:13:45 +03002100 sema_init(&isert_np->np_sem, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002101 mutex_init(&isert_np->np_accept_mutex);
2102 INIT_LIST_HEAD(&isert_np->np_accept_list);
2103 init_completion(&isert_np->np_login_comp);
2104
2105 sa = (struct sockaddr *)ksockaddr;
2106 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2107 /*
2108 * Setup the np->np_sockaddr from the passed sockaddr setup
2109 * in iscsi_target_configfs.c code..
2110 */
2111 memcpy(&np->np_sockaddr, ksockaddr,
2112 sizeof(struct __kernel_sockaddr_storage));
2113
2114 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2115 IB_QPT_RC);
2116 if (IS_ERR(isert_lid)) {
2117 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2118 PTR_ERR(isert_lid));
2119 ret = PTR_ERR(isert_lid);
2120 goto out;
2121 }
2122
2123 ret = rdma_bind_addr(isert_lid, sa);
2124 if (ret) {
2125 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2126 goto out_lid;
2127 }
2128
2129 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2130 if (ret) {
2131 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2132 goto out_lid;
2133 }
2134
2135 isert_np->np_cm_id = isert_lid;
2136 np->np_context = isert_np;
2137 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2138
2139 return 0;
2140
2141out_lid:
2142 rdma_destroy_id(isert_lid);
2143out:
2144 kfree(isert_np);
2145 return ret;
2146}
2147
2148static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002149isert_rdma_accept(struct isert_conn *isert_conn)
2150{
2151 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2152 struct rdma_conn_param cp;
2153 int ret;
2154
2155 memset(&cp, 0, sizeof(struct rdma_conn_param));
2156 cp.responder_resources = isert_conn->responder_resources;
2157 cp.initiator_depth = isert_conn->initiator_depth;
2158 cp.retry_count = 7;
2159 cp.rnr_retry_count = 7;
2160
2161 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2162
2163 ret = rdma_accept(cm_id, &cp);
2164 if (ret) {
2165 pr_err("rdma_accept() failed with: %d\n", ret);
2166 return ret;
2167 }
2168
2169 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2170
2171 return 0;
2172}
2173
2174static int
2175isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2176{
2177 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2178 int ret;
2179
2180 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2181
2182 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2183 if (ret)
2184 return ret;
2185
2186 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2187 return 0;
2188}
2189
2190static void
2191isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2192 struct isert_conn *isert_conn)
2193{
2194 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2195 struct rdma_route *cm_route = &cm_id->route;
2196 struct sockaddr_in *sock_in;
2197 struct sockaddr_in6 *sock_in6;
2198
2199 conn->login_family = np->np_sockaddr.ss_family;
2200
2201 if (np->np_sockaddr.ss_family == AF_INET6) {
2202 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2203 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2204 &sock_in6->sin6_addr.in6_u);
2205 conn->login_port = ntohs(sock_in6->sin6_port);
2206
2207 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2208 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2209 &sock_in6->sin6_addr.in6_u);
2210 conn->local_port = ntohs(sock_in6->sin6_port);
2211 } else {
2212 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2213 sprintf(conn->login_ip, "%pI4",
2214 &sock_in->sin_addr.s_addr);
2215 conn->login_port = ntohs(sock_in->sin_port);
2216
2217 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2218 sprintf(conn->local_ip, "%pI4",
2219 &sock_in->sin_addr.s_addr);
2220 conn->local_port = ntohs(sock_in->sin_port);
2221 }
2222}
2223
2224static int
2225isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2226{
2227 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2228 struct isert_conn *isert_conn;
2229 int max_accept = 0, ret;
2230
2231accept_wait:
Sagi Grimberg8a2629a2014-04-29 13:13:45 +03002232 ret = down_interruptible(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002233 if (max_accept > 5)
2234 return -ENODEV;
2235
2236 spin_lock_bh(&np->np_thread_lock);
Sagi Grimberg3ddb8752014-05-19 17:44:22 +03002237 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002238 spin_unlock_bh(&np->np_thread_lock);
Sagi Grimberg3ddb8752014-05-19 17:44:22 +03002239 pr_debug("np_thread_state %d for isert_accept_np\n",
2240 np->np_thread_state);
2241 /**
2242 * No point in stalling here when np_thread
2243 * is in state RESET/SHUTDOWN/EXIT - bail
2244 **/
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002245 return -ENODEV;
2246 }
2247 spin_unlock_bh(&np->np_thread_lock);
2248
2249 mutex_lock(&isert_np->np_accept_mutex);
2250 if (list_empty(&isert_np->np_accept_list)) {
2251 mutex_unlock(&isert_np->np_accept_mutex);
2252 max_accept++;
2253 goto accept_wait;
2254 }
2255 isert_conn = list_first_entry(&isert_np->np_accept_list,
2256 struct isert_conn, conn_accept_node);
2257 list_del_init(&isert_conn->conn_accept_node);
2258 mutex_unlock(&isert_np->np_accept_mutex);
2259
2260 conn->context = isert_conn;
2261 isert_conn->conn = conn;
2262 max_accept = 0;
2263
2264 ret = isert_rdma_post_recvl(isert_conn);
2265 if (ret)
2266 return ret;
2267
2268 ret = isert_rdma_accept(isert_conn);
2269 if (ret)
2270 return ret;
2271
2272 isert_set_conn_info(np, conn, isert_conn);
2273
2274 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2275 return 0;
2276}
2277
2278static void
2279isert_free_np(struct iscsi_np *np)
2280{
2281 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2282
Sagi Grimberg11b926d2014-10-28 13:45:03 -07002283 if (isert_np->np_cm_id)
2284 rdma_destroy_id(isert_np->np_cm_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002285
2286 np->np_context = NULL;
2287 kfree(isert_np);
2288}
2289
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002290static void isert_wait_conn(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002291{
2292 struct isert_conn *isert_conn = conn->context;
2293
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002294 pr_debug("isert_wait_conn: Starting \n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002295
Sagi Grimberg1813e802014-05-19 17:44:23 +03002296 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002297 /*
2298 * Only wait for conn_wait_comp_err if the isert_conn made it
2299 * into full feature phase..
2300 */
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07002301 if (isert_conn->state == ISER_CONN_INIT) {
2302 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07002303 return;
2304 }
Sagi Grimberg839eac52015-01-30 22:17:25 +00002305 isert_conn_terminate(isert_conn);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07002306 mutex_unlock(&isert_conn->conn_mutex);
2307
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002308 wait_for_completion(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002309 wait_for_completion(&isert_conn->conn_wait);
Sagi Grimberg839eac52015-01-30 22:17:25 +00002310
2311 mutex_lock(&isert_conn->conn_mutex);
2312 isert_conn->state = ISER_CONN_DOWN;
2313 mutex_unlock(&isert_conn->conn_mutex);
2314
2315 pr_info("Destroying conn %p\n", isert_conn);
Sagi Grimbergb9979822014-07-02 16:19:25 +03002316 isert_put_conn(isert_conn);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002317}
2318
2319static void isert_free_conn(struct iscsi_conn *conn)
2320{
2321 struct isert_conn *isert_conn = conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002322
2323 isert_put_conn(isert_conn);
2324}
2325
2326static struct iscsit_transport iser_target_transport = {
2327 .name = "IB/iSER",
2328 .transport_type = ISCSI_INFINIBAND,
2329 .owner = THIS_MODULE,
2330 .iscsit_setup_np = isert_setup_np,
2331 .iscsit_accept_np = isert_accept_np,
2332 .iscsit_free_np = isert_free_np,
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002333 .iscsit_wait_conn = isert_wait_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002334 .iscsit_free_conn = isert_free_conn,
2335 .iscsit_alloc_cmd = isert_alloc_cmd,
2336 .iscsit_get_login_rx = isert_get_login_rx,
2337 .iscsit_put_login_tx = isert_put_login_tx,
2338 .iscsit_immediate_queue = isert_immediate_queue,
2339 .iscsit_response_queue = isert_response_queue,
2340 .iscsit_get_dataout = isert_get_dataout,
2341 .iscsit_queue_data_in = isert_put_datain,
2342 .iscsit_queue_status = isert_put_response,
2343};
2344
2345static int __init isert_init(void)
2346{
2347 int ret;
2348
2349 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2350 if (!isert_rx_wq) {
2351 pr_err("Unable to allocate isert_rx_wq\n");
2352 return -ENOMEM;
2353 }
2354
2355 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2356 if (!isert_comp_wq) {
2357 pr_err("Unable to allocate isert_comp_wq\n");
2358 ret = -ENOMEM;
2359 goto destroy_rx_wq;
2360 }
2361
2362 isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2363 sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2364 0, NULL);
2365 if (!isert_cmd_cache) {
2366 pr_err("Unable to create isert_cmd_cache\n");
2367 ret = -ENOMEM;
2368 goto destroy_tx_cq;
2369 }
2370
2371 iscsit_register_transport(&iser_target_transport);
2372 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2373 return 0;
2374
2375destroy_tx_cq:
2376 destroy_workqueue(isert_comp_wq);
2377destroy_rx_wq:
2378 destroy_workqueue(isert_rx_wq);
2379 return ret;
2380}
2381
2382static void __exit isert_exit(void)
2383{
Sagi Grimberg029a8942014-05-19 17:44:25 +03002384 flush_scheduled_work();
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002385 kmem_cache_destroy(isert_cmd_cache);
2386 destroy_workqueue(isert_comp_wq);
2387 destroy_workqueue(isert_rx_wq);
2388 iscsit_unregister_transport(&iser_target_transport);
2389 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2390}
2391
2392MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2393MODULE_VERSION("0.1");
2394MODULE_AUTHOR("nab@Linux-iSCSI.org");
2395MODULE_LICENSE("GPL");
2396
2397module_init(isert_init);
2398module_exit(isert_exit);