blob: 9908c9927a968c684c2e6bed94857bcdfcb93488 [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4 * (c) Copyright 2013 RisingTide Systems LLC.
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
Sagi Grimberg8a2629a2014-04-29 13:13:45 +030030#include <linux/semaphore.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080031
32#include "isert_proto.h"
33#include "ib_isert.h"
34
35#define ISERT_MAX_CONN 8
36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38
39static DEFINE_MUTEX(device_list_mutex);
40static LIST_HEAD(device_list);
41static struct workqueue_struct *isert_rx_wq;
42static struct workqueue_struct *isert_comp_wq;
43static struct kmem_cache *isert_cmd_cache;
44
45static void
46isert_qp_event_callback(struct ib_event *e, void *context)
47{
48 struct isert_conn *isert_conn = (struct isert_conn *)context;
49
50 pr_err("isert_qp_event_callback event: %d\n", e->event);
51 switch (e->event) {
52 case IB_EVENT_COMM_EST:
53 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
54 break;
55 case IB_EVENT_QP_LAST_WQE_REACHED:
56 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
57 break;
58 default:
59 break;
60 }
61}
62
63static int
64isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
65{
66 int ret;
67
68 ret = ib_query_device(ib_dev, devattr);
69 if (ret) {
70 pr_err("ib_query_device() failed: %d\n", ret);
71 return ret;
72 }
73 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
74 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
75
76 return 0;
77}
78
79static int
80isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
81{
82 struct isert_device *device = isert_conn->conn_device;
83 struct ib_qp_init_attr attr;
84 struct ib_device_attr devattr;
85 int ret, index, min_index = 0;
86
87 memset(&devattr, 0, sizeof(struct ib_device_attr));
88 ret = isert_query_device(cma_id->device, &devattr);
89 if (ret)
90 return ret;
91
92 mutex_lock(&device_list_mutex);
93 for (index = 0; index < device->cqs_used; index++)
94 if (device->cq_active_qps[index] <
95 device->cq_active_qps[min_index])
96 min_index = index;
97 device->cq_active_qps[min_index]++;
98 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
99 mutex_unlock(&device_list_mutex);
100
101 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
102 attr.event_handler = isert_qp_event_callback;
103 attr.qp_context = isert_conn;
104 attr.send_cq = device->dev_tx_cq[min_index];
105 attr.recv_cq = device->dev_rx_cq[min_index];
106 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
107 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
108 /*
109 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
110 * work-around for RDMA_READ..
111 */
112 attr.cap.max_send_sge = devattr.max_sge - 2;
113 isert_conn->max_sge = attr.cap.max_send_sge;
114
115 attr.cap.max_recv_sge = 1;
116 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
117 attr.qp_type = IB_QPT_RC;
118
119 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
120 cma_id->device);
121 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
122 isert_conn->conn_pd->device);
123
124 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
125 if (ret) {
126 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
127 return ret;
128 }
129 isert_conn->conn_qp = cma_id->qp;
130 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
131
132 return 0;
133}
134
135static void
136isert_cq_event_callback(struct ib_event *e, void *context)
137{
138 pr_debug("isert_cq_event_callback event: %d\n", e->event);
139}
140
141static int
142isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
143{
144 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
145 struct iser_rx_desc *rx_desc;
146 struct ib_sge *rx_sg;
147 u64 dma_addr;
148 int i, j;
149
150 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
151 sizeof(struct iser_rx_desc), GFP_KERNEL);
152 if (!isert_conn->conn_rx_descs)
153 goto fail;
154
155 rx_desc = isert_conn->conn_rx_descs;
156
157 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
158 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
159 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
160 if (ib_dma_mapping_error(ib_dev, dma_addr))
161 goto dma_map_fail;
162
163 rx_desc->dma_addr = dma_addr;
164
165 rx_sg = &rx_desc->rx_sg;
166 rx_sg->addr = rx_desc->dma_addr;
167 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
168 rx_sg->lkey = isert_conn->conn_mr->lkey;
169 }
170
171 isert_conn->conn_rx_desc_head = 0;
172 return 0;
173
174dma_map_fail:
175 rx_desc = isert_conn->conn_rx_descs;
176 for (j = 0; j < i; j++, rx_desc++) {
177 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
178 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
179 }
180 kfree(isert_conn->conn_rx_descs);
181 isert_conn->conn_rx_descs = NULL;
182fail:
183 return -ENOMEM;
184}
185
186static void
187isert_free_rx_descriptors(struct isert_conn *isert_conn)
188{
189 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
190 struct iser_rx_desc *rx_desc;
191 int i;
192
193 if (!isert_conn->conn_rx_descs)
194 return;
195
196 rx_desc = isert_conn->conn_rx_descs;
197 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
198 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
199 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
200 }
201
202 kfree(isert_conn->conn_rx_descs);
203 isert_conn->conn_rx_descs = NULL;
204}
205
206static void isert_cq_tx_callback(struct ib_cq *, void *);
207static void isert_cq_rx_callback(struct ib_cq *, void *);
208
209static int
210isert_create_device_ib_res(struct isert_device *device)
211{
212 struct ib_device *ib_dev = device->ib_device;
213 struct isert_cq_desc *cq_desc;
214 int ret = 0, i, j;
215
216 device->cqs_used = min_t(int, num_online_cpus(),
217 device->ib_device->num_comp_vectors);
218 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
219 pr_debug("Using %d CQs, device %s supports %d vectors\n",
220 device->cqs_used, device->ib_device->name,
221 device->ib_device->num_comp_vectors);
222 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
223 device->cqs_used, GFP_KERNEL);
224 if (!device->cq_desc) {
225 pr_err("Unable to allocate device->cq_desc\n");
226 return -ENOMEM;
227 }
228 cq_desc = device->cq_desc;
229
230 device->dev_pd = ib_alloc_pd(ib_dev);
231 if (IS_ERR(device->dev_pd)) {
232 ret = PTR_ERR(device->dev_pd);
233 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
234 goto out_cq_desc;
235 }
236
237 for (i = 0; i < device->cqs_used; i++) {
238 cq_desc[i].device = device;
239 cq_desc[i].cq_index = i;
240
241 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
242 isert_cq_rx_callback,
243 isert_cq_event_callback,
244 (void *)&cq_desc[i],
245 ISER_MAX_RX_CQ_LEN, i);
Wei Yongjun0629b402013-10-29 09:56:34 +0800246 if (IS_ERR(device->dev_rx_cq[i])) {
247 ret = PTR_ERR(device->dev_rx_cq[i]);
248 device->dev_rx_cq[i] = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800249 goto out_cq;
Wei Yongjun0629b402013-10-29 09:56:34 +0800250 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800251
252 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
253 isert_cq_tx_callback,
254 isert_cq_event_callback,
255 (void *)&cq_desc[i],
256 ISER_MAX_TX_CQ_LEN, i);
Wei Yongjun0629b402013-10-29 09:56:34 +0800257 if (IS_ERR(device->dev_tx_cq[i])) {
258 ret = PTR_ERR(device->dev_tx_cq[i]);
259 device->dev_tx_cq[i] = NULL;
260 goto out_cq;
261 }
262
263 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
264 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800265 goto out_cq;
266
Wei Yongjun0629b402013-10-29 09:56:34 +0800267 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
268 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800269 goto out_cq;
270 }
271
272 device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
273 if (IS_ERR(device->dev_mr)) {
274 ret = PTR_ERR(device->dev_mr);
275 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
276 goto out_cq;
277 }
278
279 return 0;
280
281out_cq:
282 for (j = 0; j < i; j++) {
283 cq_desc = &device->cq_desc[j];
284
285 if (device->dev_rx_cq[j]) {
286 cancel_work_sync(&cq_desc->cq_rx_work);
287 ib_destroy_cq(device->dev_rx_cq[j]);
288 }
289 if (device->dev_tx_cq[j]) {
290 cancel_work_sync(&cq_desc->cq_tx_work);
291 ib_destroy_cq(device->dev_tx_cq[j]);
292 }
293 }
294 ib_dealloc_pd(device->dev_pd);
295
296out_cq_desc:
297 kfree(device->cq_desc);
298
299 return ret;
300}
301
302static void
303isert_free_device_ib_res(struct isert_device *device)
304{
305 struct isert_cq_desc *cq_desc;
306 int i;
307
308 for (i = 0; i < device->cqs_used; i++) {
309 cq_desc = &device->cq_desc[i];
310
311 cancel_work_sync(&cq_desc->cq_rx_work);
312 cancel_work_sync(&cq_desc->cq_tx_work);
313 ib_destroy_cq(device->dev_rx_cq[i]);
314 ib_destroy_cq(device->dev_tx_cq[i]);
315 device->dev_rx_cq[i] = NULL;
316 device->dev_tx_cq[i] = NULL;
317 }
318
319 ib_dereg_mr(device->dev_mr);
320 ib_dealloc_pd(device->dev_pd);
321 kfree(device->cq_desc);
322}
323
324static void
325isert_device_try_release(struct isert_device *device)
326{
327 mutex_lock(&device_list_mutex);
328 device->refcount--;
329 if (!device->refcount) {
330 isert_free_device_ib_res(device);
331 list_del(&device->dev_node);
332 kfree(device);
333 }
334 mutex_unlock(&device_list_mutex);
335}
336
337static struct isert_device *
338isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
339{
340 struct isert_device *device;
341 int ret;
342
343 mutex_lock(&device_list_mutex);
344 list_for_each_entry(device, &device_list, dev_node) {
345 if (device->ib_device->node_guid == cma_id->device->node_guid) {
346 device->refcount++;
347 mutex_unlock(&device_list_mutex);
348 return device;
349 }
350 }
351
352 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
353 if (!device) {
354 mutex_unlock(&device_list_mutex);
355 return ERR_PTR(-ENOMEM);
356 }
357
358 INIT_LIST_HEAD(&device->dev_node);
359
360 device->ib_device = cma_id->device;
361 ret = isert_create_device_ib_res(device);
362 if (ret) {
363 kfree(device);
364 mutex_unlock(&device_list_mutex);
365 return ERR_PTR(ret);
366 }
367
368 device->refcount++;
369 list_add_tail(&device->dev_node, &device_list);
370 mutex_unlock(&device_list_mutex);
371
372 return device;
373}
374
375static int
376isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
377{
378 struct iscsi_np *np = cma_id->context;
379 struct isert_np *isert_np = np->np_context;
380 struct isert_conn *isert_conn;
381 struct isert_device *device;
382 struct ib_device *ib_dev = cma_id->device;
383 int ret = 0;
384
Sagi Grimbergc2334082014-04-29 13:13:47 +0300385 spin_lock_bh(&np->np_thread_lock);
386 if (!np->enabled) {
387 spin_unlock_bh(&np->np_thread_lock);
388 pr_debug("iscsi_np is not enabled, reject connect request\n");
389 return rdma_reject(cma_id, NULL, 0);
390 }
391 spin_unlock_bh(&np->np_thread_lock);
392
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800393 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
394 cma_id, cma_id->context);
395
396 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
397 if (!isert_conn) {
398 pr_err("Unable to allocate isert_conn\n");
399 return -ENOMEM;
400 }
401 isert_conn->state = ISER_CONN_INIT;
402 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
403 init_completion(&isert_conn->conn_login_comp);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -0800404 init_completion(&isert_conn->conn_wait);
405 init_completion(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800406 kref_init(&isert_conn->conn_kref);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -0700407 mutex_init(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800408
409 cma_id->context = isert_conn;
410 isert_conn->conn_cm_id = cma_id;
411 isert_conn->responder_resources = event->param.conn.responder_resources;
412 isert_conn->initiator_depth = event->param.conn.initiator_depth;
413 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
414 isert_conn->responder_resources, isert_conn->initiator_depth);
415
416 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
417 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
418 if (!isert_conn->login_buf) {
419 pr_err("Unable to allocate isert_conn->login_buf\n");
420 ret = -ENOMEM;
421 goto out;
422 }
423
424 isert_conn->login_req_buf = isert_conn->login_buf;
425 isert_conn->login_rsp_buf = isert_conn->login_buf +
426 ISCSI_DEF_MAX_RECV_SEG_LEN;
427 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
428 isert_conn->login_buf, isert_conn->login_req_buf,
429 isert_conn->login_rsp_buf);
430
431 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
432 (void *)isert_conn->login_req_buf,
433 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
434
435 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
436 if (ret) {
437 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
438 ret);
439 isert_conn->login_req_dma = 0;
440 goto out_login_buf;
441 }
442
443 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
444 (void *)isert_conn->login_rsp_buf,
445 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
446
447 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
448 if (ret) {
449 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
450 ret);
451 isert_conn->login_rsp_dma = 0;
452 goto out_req_dma_map;
453 }
454
455 device = isert_device_find_by_ib_dev(cma_id);
456 if (IS_ERR(device)) {
457 ret = PTR_ERR(device);
458 goto out_rsp_dma_map;
459 }
460
461 isert_conn->conn_device = device;
462 isert_conn->conn_pd = device->dev_pd;
463 isert_conn->conn_mr = device->dev_mr;
464
465 ret = isert_conn_setup_qp(isert_conn, cma_id);
466 if (ret)
467 goto out_conn_dev;
468
469 mutex_lock(&isert_np->np_accept_mutex);
Sagi Grimberg5de94f82014-04-29 13:13:44 +0300470 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800471 mutex_unlock(&isert_np->np_accept_mutex);
472
Sagi Grimberg8a2629a2014-04-29 13:13:45 +0300473 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
474 up(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800475 return 0;
476
477out_conn_dev:
478 isert_device_try_release(device);
479out_rsp_dma_map:
480 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
481 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
482out_req_dma_map:
483 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
484 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
485out_login_buf:
486 kfree(isert_conn->login_buf);
487out:
488 kfree(isert_conn);
489 return ret;
490}
491
492static void
493isert_connect_release(struct isert_conn *isert_conn)
494{
495 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
496 struct isert_device *device = isert_conn->conn_device;
497 int cq_index;
498
499 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
500
501 if (isert_conn->conn_qp) {
502 cq_index = ((struct isert_cq_desc *)
503 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
504 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
505 isert_conn->conn_device->cq_active_qps[cq_index]--;
506
507 rdma_destroy_qp(isert_conn->conn_cm_id);
508 }
509
510 isert_free_rx_descriptors(isert_conn);
511 rdma_destroy_id(isert_conn->conn_cm_id);
512
513 if (isert_conn->login_buf) {
514 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
515 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
516 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
517 ISCSI_DEF_MAX_RECV_SEG_LEN,
518 DMA_FROM_DEVICE);
519 kfree(isert_conn->login_buf);
520 }
521 kfree(isert_conn);
522
523 if (device)
524 isert_device_try_release(device);
525
526 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
527}
528
529static void
530isert_connected_handler(struct rdma_cm_id *cma_id)
531{
Sagi Grimberg058ab452014-07-02 16:19:24 +0300532 struct isert_conn *isert_conn = cma_id->context;
533
534 kref_get(&isert_conn->conn_kref);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800535}
536
537static void
538isert_release_conn_kref(struct kref *kref)
539{
540 struct isert_conn *isert_conn = container_of(kref,
541 struct isert_conn, conn_kref);
542
543 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
544 current->comm, current->pid);
545
546 isert_connect_release(isert_conn);
547}
548
549static void
550isert_put_conn(struct isert_conn *isert_conn)
551{
552 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
553}
554
555static void
556isert_disconnect_work(struct work_struct *work)
557{
558 struct isert_conn *isert_conn = container_of(work,
559 struct isert_conn, conn_logout_work);
560
561 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -0700562 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -0800563 if (isert_conn->state == ISER_CONN_UP)
564 isert_conn->state = ISER_CONN_TERMINATING;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800565
566 if (isert_conn->post_recv_buf_count == 0 &&
567 atomic_read(&isert_conn->post_send_buf_count) == 0) {
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -0700568 mutex_unlock(&isert_conn->conn_mutex);
569 goto wake_up;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800570 }
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -0700571 if (!isert_conn->conn_cm_id) {
572 mutex_unlock(&isert_conn->conn_mutex);
573 isert_put_conn(isert_conn);
574 return;
575 }
Sagi Grimberg1813e802014-05-19 17:44:23 +0300576
Sagi Grimberg674a2362014-05-19 17:44:24 +0300577 if (isert_conn->disconnect) {
578 /* Send DREQ/DREP towards our initiator */
579 rdma_disconnect(isert_conn->conn_cm_id);
580 }
Sagi Grimberg1813e802014-05-19 17:44:23 +0300581
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -0700582 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800583
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -0700584wake_up:
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -0800585 complete(&isert_conn->conn_wait);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800586 isert_put_conn(isert_conn);
587}
588
589static void
Sagi Grimberg674a2362014-05-19 17:44:24 +0300590isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800591{
592 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
593
Sagi Grimberg674a2362014-05-19 17:44:24 +0300594 isert_conn->disconnect = disconnect;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800595 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
596 schedule_work(&isert_conn->conn_logout_work);
597}
598
599static int
600isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
601{
602 int ret = 0;
Sagi Grimberg674a2362014-05-19 17:44:24 +0300603 bool disconnect = false;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800604
605 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
606 event->event, event->status, cma_id->context, cma_id);
607
608 switch (event->event) {
609 case RDMA_CM_EVENT_CONNECT_REQUEST:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800610 ret = isert_connect_request(cma_id, event);
611 break;
612 case RDMA_CM_EVENT_ESTABLISHED:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800613 isert_connected_handler(cma_id);
614 break;
Sagi Grimberg674a2362014-05-19 17:44:24 +0300615 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
616 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
617 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
618 disconnect = true;
619 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
620 isert_disconnected_handler(cma_id, disconnect);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800621 break;
622 case RDMA_CM_EVENT_CONNECT_ERROR:
623 default:
Sagi Grimberg674a2362014-05-19 17:44:24 +0300624 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800625 break;
626 }
627
628 if (ret != 0) {
629 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
630 event->event, ret);
631 dump_stack();
632 }
633
634 return ret;
635}
636
637static int
638isert_post_recv(struct isert_conn *isert_conn, u32 count)
639{
640 struct ib_recv_wr *rx_wr, *rx_wr_failed;
641 int i, ret;
642 unsigned int rx_head = isert_conn->conn_rx_desc_head;
643 struct iser_rx_desc *rx_desc;
644
645 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
646 rx_desc = &isert_conn->conn_rx_descs[rx_head];
647 rx_wr->wr_id = (unsigned long)rx_desc;
648 rx_wr->sg_list = &rx_desc->rx_sg;
649 rx_wr->num_sge = 1;
650 rx_wr->next = rx_wr + 1;
651 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
652 }
653
654 rx_wr--;
655 rx_wr->next = NULL; /* mark end of work requests list */
656
657 isert_conn->post_recv_buf_count += count;
658 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
659 &rx_wr_failed);
660 if (ret) {
661 pr_err("ib_post_recv() failed with ret: %d\n", ret);
662 isert_conn->post_recv_buf_count -= count;
663 } else {
664 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
665 isert_conn->conn_rx_desc_head = rx_head;
666 }
667 return ret;
668}
669
670static int
671isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
672{
673 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
674 struct ib_send_wr send_wr, *send_wr_failed;
675 int ret;
676
677 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
678 ISER_HEADERS_LEN, DMA_TO_DEVICE);
679
680 send_wr.next = NULL;
681 send_wr.wr_id = (unsigned long)tx_desc;
682 send_wr.sg_list = tx_desc->tx_sg;
683 send_wr.num_sge = tx_desc->num_sge;
684 send_wr.opcode = IB_WR_SEND;
685 send_wr.send_flags = IB_SEND_SIGNALED;
686
687 atomic_inc(&isert_conn->post_send_buf_count);
688
689 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
690 if (ret) {
691 pr_err("ib_post_send() failed, ret: %d\n", ret);
692 atomic_dec(&isert_conn->post_send_buf_count);
693 }
694
695 return ret;
696}
697
698static void
699isert_create_send_desc(struct isert_conn *isert_conn,
700 struct isert_cmd *isert_cmd,
701 struct iser_tx_desc *tx_desc)
702{
703 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
704
705 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
706 ISER_HEADERS_LEN, DMA_TO_DEVICE);
707
708 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
709 tx_desc->iser_header.flags = ISER_VER;
710
711 tx_desc->num_sge = 1;
712 tx_desc->isert_cmd = isert_cmd;
713
714 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
715 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
716 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
717 }
718}
719
720static int
721isert_init_tx_hdrs(struct isert_conn *isert_conn,
722 struct iser_tx_desc *tx_desc)
723{
724 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
725 u64 dma_addr;
726
727 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
728 ISER_HEADERS_LEN, DMA_TO_DEVICE);
729 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
730 pr_err("ib_dma_mapping_error() failed\n");
731 return -ENOMEM;
732 }
733
734 tx_desc->dma_addr = dma_addr;
735 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
736 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
737 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
738
739 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
740 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
741 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
742
743 return 0;
744}
745
746static void
747isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
748{
749 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
750 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
751 send_wr->opcode = IB_WR_SEND;
752 send_wr->send_flags = IB_SEND_SIGNALED;
753 send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
754 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
755}
756
757static int
758isert_rdma_post_recvl(struct isert_conn *isert_conn)
759{
760 struct ib_recv_wr rx_wr, *rx_wr_fail;
761 struct ib_sge sge;
762 int ret;
763
764 memset(&sge, 0, sizeof(struct ib_sge));
765 sge.addr = isert_conn->login_req_dma;
766 sge.length = ISER_RX_LOGIN_SIZE;
767 sge.lkey = isert_conn->conn_mr->lkey;
768
769 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
770 sge.addr, sge.length, sge.lkey);
771
772 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
773 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
774 rx_wr.sg_list = &sge;
775 rx_wr.num_sge = 1;
776
777 isert_conn->post_recv_buf_count++;
778 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
779 if (ret) {
780 pr_err("ib_post_recv() failed: %d\n", ret);
781 isert_conn->post_recv_buf_count--;
782 }
783
784 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
785 return ret;
786}
787
788static int
789isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
790 u32 length)
791{
792 struct isert_conn *isert_conn = conn->context;
793 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
794 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
795 int ret;
796
797 isert_create_send_desc(isert_conn, NULL, tx_desc);
798
799 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
800 sizeof(struct iscsi_hdr));
801
802 isert_init_tx_hdrs(isert_conn, tx_desc);
803
804 if (length > 0) {
805 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
806
807 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
808 length, DMA_TO_DEVICE);
809
810 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
811
812 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
813 length, DMA_TO_DEVICE);
814
815 tx_dsg->addr = isert_conn->login_rsp_dma;
816 tx_dsg->length = length;
817 tx_dsg->lkey = isert_conn->conn_mr->lkey;
818 tx_desc->num_sge = 2;
819 }
820 if (!login->login_failed) {
821 if (login->login_complete) {
822 ret = isert_alloc_rx_descriptors(isert_conn);
823 if (ret)
824 return ret;
825
826 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
827 if (ret)
828 return ret;
829
830 isert_conn->state = ISER_CONN_UP;
831 goto post_send;
832 }
833
834 ret = isert_rdma_post_recvl(isert_conn);
835 if (ret)
836 return ret;
837 }
838post_send:
839 ret = isert_post_send(isert_conn, tx_desc);
840 if (ret)
841 return ret;
842
843 return 0;
844}
845
846static void
847isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
848 struct isert_conn *isert_conn)
849{
850 struct iscsi_conn *conn = isert_conn->conn;
851 struct iscsi_login *login = conn->conn_login;
852 int size;
853
854 if (!login) {
855 pr_err("conn->conn_login is NULL\n");
856 dump_stack();
857 return;
858 }
859
860 if (login->first_request) {
861 struct iscsi_login_req *login_req =
862 (struct iscsi_login_req *)&rx_desc->iscsi_header;
863 /*
864 * Setup the initial iscsi_login values from the leading
865 * login request PDU.
866 */
867 login->leading_connection = (!login_req->tsih) ? 1 : 0;
868 login->current_stage =
869 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
870 >> 2;
871 login->version_min = login_req->min_version;
872 login->version_max = login_req->max_version;
873 memcpy(login->isid, login_req->isid, 6);
874 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
875 login->init_task_tag = login_req->itt;
876 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
877 login->cid = be16_to_cpu(login_req->cid);
878 login->tsih = be16_to_cpu(login_req->tsih);
879 }
880
881 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
882
883 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
884 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
885 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
886 memcpy(login->req_buf, &rx_desc->data[0], size);
887
888 complete(&isert_conn->conn_login_comp);
889}
890
891static void
892isert_release_cmd(struct iscsi_cmd *cmd)
893{
894 struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
895 iscsi_cmd);
896
897 pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
898
899 kfree(cmd->buf_ptr);
900 kfree(cmd->tmr_req);
901
902 kmem_cache_free(isert_cmd_cache, isert_cmd);
903}
904
905static struct iscsi_cmd
906*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
907{
908 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
909 struct isert_cmd *isert_cmd;
910
911 isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
912 if (!isert_cmd) {
913 pr_err("Unable to allocate isert_cmd\n");
914 return NULL;
915 }
916 isert_cmd->conn = isert_conn;
917 isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
918
919 return &isert_cmd->iscsi_cmd;
920}
921
922static int
923isert_handle_scsi_cmd(struct isert_conn *isert_conn,
924 struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
925 unsigned char *buf)
926{
927 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
928 struct iscsi_conn *conn = isert_conn->conn;
929 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
930 struct scatterlist *sg;
931 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
932 bool dump_payload = false;
933
934 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
935 if (rc < 0)
936 return rc;
937
938 imm_data = cmd->immediate_data;
939 imm_data_len = cmd->first_burst_len;
940 unsol_data = cmd->unsolicited_data;
941
942 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
943 if (rc < 0) {
944 return 0;
945 } else if (rc > 0) {
946 dump_payload = true;
947 goto sequence_cmd;
948 }
949
950 if (!imm_data)
951 return 0;
952
953 sg = &cmd->se_cmd.t_data_sg[0];
954 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
955
956 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
957 sg, sg_nents, &rx_desc->data[0], imm_data_len);
958
959 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
960
961 cmd->write_data_done += imm_data_len;
962
963 if (cmd->write_data_done == cmd->se_cmd.data_length) {
964 spin_lock_bh(&cmd->istate_lock);
965 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
966 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
967 spin_unlock_bh(&cmd->istate_lock);
968 }
969
970sequence_cmd:
Nicholas Bellingeradb97c22013-07-30 04:04:02 +0000971 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800972
973 if (!rc && dump_payload == false && unsol_data)
974 iscsit_set_unsoliticed_dataout(cmd);
Nicholas Bellinger553e4c52014-05-23 00:48:35 -0700975 else if (dump_payload && imm_data)
976 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800977
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800978 return 0;
979}
980
981static int
982isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
983 struct iser_rx_desc *rx_desc, unsigned char *buf)
984{
985 struct scatterlist *sg_start;
986 struct iscsi_conn *conn = isert_conn->conn;
987 struct iscsi_cmd *cmd = NULL;
988 struct iscsi_data *hdr = (struct iscsi_data *)buf;
989 u32 unsol_data_len = ntoh24(hdr->dlength);
990 int rc, sg_nents, sg_off, page_off;
991
992 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
993 if (rc < 0)
994 return rc;
995 else if (!cmd)
996 return 0;
997 /*
998 * FIXME: Unexpected unsolicited_data out
999 */
1000 if (!cmd->unsolicited_data) {
1001 pr_err("Received unexpected solicited data payload\n");
1002 dump_stack();
1003 return -1;
1004 }
1005
1006 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1007 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1008
1009 sg_off = cmd->write_data_done / PAGE_SIZE;
1010 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1011 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1012 page_off = cmd->write_data_done % PAGE_SIZE;
1013 /*
1014 * FIXME: Non page-aligned unsolicited_data out
1015 */
1016 if (page_off) {
1017 pr_err("Received unexpected non-page aligned data payload\n");
1018 dump_stack();
1019 return -1;
1020 }
1021 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1022 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1023
1024 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1025 unsol_data_len);
1026
1027 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1028 if (rc < 0)
1029 return rc;
1030
1031 return 0;
1032}
1033
1034static int
1035isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1036 uint32_t read_stag, uint64_t read_va,
1037 uint32_t write_stag, uint64_t write_va)
1038{
1039 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1040 struct iscsi_conn *conn = isert_conn->conn;
1041 struct iscsi_cmd *cmd;
1042 struct isert_cmd *isert_cmd;
1043 int ret = -EINVAL;
1044 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1045
1046 switch (opcode) {
1047 case ISCSI_OP_SCSI_CMD:
1048 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1049 if (!cmd)
1050 break;
1051
1052 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1053 isert_cmd->read_stag = read_stag;
1054 isert_cmd->read_va = read_va;
1055 isert_cmd->write_stag = write_stag;
1056 isert_cmd->write_va = write_va;
1057
1058 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
1059 rx_desc, (unsigned char *)hdr);
1060 break;
1061 case ISCSI_OP_NOOP_OUT:
1062 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1063 if (!cmd)
1064 break;
1065
1066 ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
1067 break;
1068 case ISCSI_OP_SCSI_DATA_OUT:
1069 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1070 (unsigned char *)hdr);
1071 break;
1072 case ISCSI_OP_SCSI_TMFUNC:
1073 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1074 if (!cmd)
1075 break;
1076
1077 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1078 (unsigned char *)hdr);
1079 break;
1080 case ISCSI_OP_LOGOUT:
1081 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1082 if (!cmd)
1083 break;
1084
1085 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1086 if (ret > 0)
1087 wait_for_completion_timeout(&conn->conn_logout_comp,
1088 SECONDS_FOR_LOGOUT_COMP *
1089 HZ);
1090 break;
1091 default:
1092 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1093 dump_stack();
1094 break;
1095 }
1096
1097 return ret;
1098}
1099
1100static void
1101isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1102{
1103 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1104 uint64_t read_va = 0, write_va = 0;
1105 uint32_t read_stag = 0, write_stag = 0;
1106 int rc;
1107
1108 switch (iser_hdr->flags & 0xF0) {
1109 case ISCSI_CTRL:
1110 if (iser_hdr->flags & ISER_RSV) {
1111 read_stag = be32_to_cpu(iser_hdr->read_stag);
1112 read_va = be64_to_cpu(iser_hdr->read_va);
1113 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1114 read_stag, (unsigned long long)read_va);
1115 }
1116 if (iser_hdr->flags & ISER_WSV) {
1117 write_stag = be32_to_cpu(iser_hdr->write_stag);
1118 write_va = be64_to_cpu(iser_hdr->write_va);
1119 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1120 write_stag, (unsigned long long)write_va);
1121 }
1122
1123 pr_debug("ISER ISCSI_CTRL PDU\n");
1124 break;
1125 case ISER_HELLO:
1126 pr_err("iSER Hello message\n");
1127 break;
1128 default:
1129 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1130 break;
1131 }
1132
1133 rc = isert_rx_opcode(isert_conn, rx_desc,
1134 read_stag, read_va, write_stag, write_va);
1135}
1136
1137static void
1138isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1139 unsigned long xfer_len)
1140{
1141 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1142 struct iscsi_hdr *hdr;
1143 u64 rx_dma;
1144 int rx_buflen, outstanding;
1145
1146 if ((char *)desc == isert_conn->login_req_buf) {
1147 rx_dma = isert_conn->login_req_dma;
1148 rx_buflen = ISER_RX_LOGIN_SIZE;
1149 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1150 rx_dma, rx_buflen);
1151 } else {
1152 rx_dma = desc->dma_addr;
1153 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1154 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1155 rx_dma, rx_buflen);
1156 }
1157
1158 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1159
1160 hdr = &desc->iscsi_header;
1161 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1162 hdr->opcode, hdr->itt, hdr->flags,
1163 (int)(xfer_len - ISER_HEADERS_LEN));
1164
1165 if ((char *)desc == isert_conn->login_req_buf)
1166 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1167 isert_conn);
1168 else
1169 isert_rx_do_work(desc, isert_conn);
1170
1171 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1172 DMA_FROM_DEVICE);
1173
1174 isert_conn->post_recv_buf_count--;
1175 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1176 isert_conn->post_recv_buf_count);
1177
1178 if ((char *)desc == isert_conn->login_req_buf)
1179 return;
1180
1181 outstanding = isert_conn->post_recv_buf_count;
1182 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1183 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1184 ISERT_MIN_POSTED_RX);
1185 err = isert_post_recv(isert_conn, count);
1186 if (err) {
1187 pr_err("isert_post_recv() count: %d failed, %d\n",
1188 count, err);
1189 }
1190 }
1191}
1192
1193static void
1194isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1195{
1196 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1197 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1198
1199 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1200
1201 if (wr->sge) {
1202 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1203 wr->sge = NULL;
1204 }
1205
1206 kfree(wr->send_wr);
1207 wr->send_wr = NULL;
1208
1209 kfree(isert_cmd->ib_sge);
1210 isert_cmd->ib_sge = NULL;
1211}
1212
1213static void
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001214isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001215{
1216 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1217 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001218 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001219
1220 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1221
1222 switch (cmd->iscsi_opcode) {
1223 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001224 spin_lock_bh(&conn->cmd_lock);
1225 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellingeraf737f62014-02-03 12:53:51 -08001226 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001227 spin_unlock_bh(&conn->cmd_lock);
1228
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001229 if (cmd->data_direction == DMA_TO_DEVICE) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001230 iscsit_stop_dataout_timer(cmd);
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001231 /*
1232 * Check for special case during comp_err where
1233 * WRITE_PENDING has been handed off from core,
1234 * but requires an extra target_put_sess_cmd()
1235 * before transport_generic_free_cmd() below.
1236 */
1237 if (comp_err &&
1238 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1239 struct se_cmd *se_cmd = &cmd->se_cmd;
1240
1241 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1242 }
1243 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001244
1245 isert_unmap_cmd(isert_cmd, isert_conn);
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001246 transport_generic_free_cmd(&cmd->se_cmd, 0);
1247 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001248 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001249 spin_lock_bh(&conn->cmd_lock);
1250 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellingeraf737f62014-02-03 12:53:51 -08001251 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerc6ccbb92013-07-03 03:11:48 -07001252 spin_unlock_bh(&conn->cmd_lock);
1253
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001254 transport_generic_free_cmd(&cmd->se_cmd, 0);
1255 break;
1256 case ISCSI_OP_REJECT:
1257 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001258 spin_lock_bh(&conn->cmd_lock);
1259 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellingeraf737f62014-02-03 12:53:51 -08001260 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001261 spin_unlock_bh(&conn->cmd_lock);
1262
1263 /*
1264 * Handle special case for REJECT when iscsi_add_reject*() has
1265 * overwritten the original iscsi_opcode assignment, and the
1266 * associated cmd->se_cmd needs to be released.
1267 */
1268 if (cmd->se_cmd.se_tfo != NULL) {
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001269 pr_debug("Calling transport_generic_free_cmd from"
1270 " isert_put_cmd for 0x%02x\n",
1271 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001272 transport_generic_free_cmd(&cmd->se_cmd, 0);
1273 break;
1274 }
1275 /*
1276 * Fall-through
1277 */
1278 default:
1279 isert_release_cmd(cmd);
1280 break;
1281 }
1282}
1283
1284static void
1285isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1286{
1287 if (tx_desc->dma_addr != 0) {
1288 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1289 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1290 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1291 tx_desc->dma_addr = 0;
1292 }
1293}
1294
1295static void
1296isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001297 struct ib_device *ib_dev, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001298{
1299 if (isert_cmd->sense_buf_dma != 0) {
1300 pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
1301 ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
1302 isert_cmd->sense_buf_len, DMA_TO_DEVICE);
1303 isert_cmd->sense_buf_dma = 0;
1304 }
1305
1306 isert_unmap_tx_desc(tx_desc, ib_dev);
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001307 isert_put_cmd(isert_cmd, comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001308}
1309
1310static void
1311isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1312 struct isert_cmd *isert_cmd)
1313{
1314 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1315 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1316 struct se_cmd *se_cmd = &cmd->se_cmd;
1317 struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
1318
1319 iscsit_stop_dataout_timer(cmd);
1320
1321 if (wr->sge) {
1322 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1323 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1324 wr->sge = NULL;
1325 }
1326
1327 if (isert_cmd->ib_sge) {
1328 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1329 kfree(isert_cmd->ib_sge);
1330 isert_cmd->ib_sge = NULL;
1331 }
1332
1333 cmd->write_data_done = se_cmd->data_length;
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001334 wr->send_wr_num = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001335
1336 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1337 spin_lock_bh(&cmd->istate_lock);
1338 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1339 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1340 spin_unlock_bh(&cmd->istate_lock);
1341
1342 target_execute_cmd(se_cmd);
1343}
1344
1345static void
1346isert_do_control_comp(struct work_struct *work)
1347{
1348 struct isert_cmd *isert_cmd = container_of(work,
1349 struct isert_cmd, comp_work);
1350 struct isert_conn *isert_conn = isert_cmd->conn;
1351 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1352 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1353
1354 switch (cmd->i_state) {
1355 case ISTATE_SEND_TASKMGTRSP:
1356 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1357
1358 atomic_dec(&isert_conn->post_send_buf_count);
1359 iscsit_tmr_post_handler(cmd, cmd->conn);
1360
1361 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001362 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001363 break;
1364 case ISTATE_SEND_REJECT:
1365 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1366 atomic_dec(&isert_conn->post_send_buf_count);
1367
1368 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001369 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001370 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001371 case ISTATE_SEND_LOGOUTRSP:
1372 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
Sagi Grimberg1813e802014-05-19 17:44:23 +03001373
1374 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001375 iscsit_logout_post_handler(cmd, cmd->conn);
1376 break;
1377 default:
1378 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1379 dump_stack();
1380 break;
1381 }
1382}
1383
1384static void
1385isert_response_completion(struct iser_tx_desc *tx_desc,
1386 struct isert_cmd *isert_cmd,
1387 struct isert_conn *isert_conn,
1388 struct ib_device *ib_dev)
1389{
1390 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001391 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001392
1393 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001394 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1395 cmd->i_state == ISTATE_SEND_REJECT) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001396 isert_unmap_tx_desc(tx_desc, ib_dev);
1397
1398 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1399 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1400 return;
1401 }
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001402 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001403
1404 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001405 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001406}
1407
1408static void
1409isert_send_completion(struct iser_tx_desc *tx_desc,
1410 struct isert_conn *isert_conn)
1411{
1412 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1413 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1414 struct isert_rdma_wr *wr;
1415
1416 if (!isert_cmd) {
1417 atomic_dec(&isert_conn->post_send_buf_count);
1418 isert_unmap_tx_desc(tx_desc, ib_dev);
1419 return;
1420 }
1421 wr = &isert_cmd->rdma_wr;
1422
1423 switch (wr->iser_ib_op) {
1424 case ISER_IB_RECV:
1425 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1426 dump_stack();
1427 break;
1428 case ISER_IB_SEND:
1429 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1430 isert_response_completion(tx_desc, isert_cmd,
1431 isert_conn, ib_dev);
1432 break;
1433 case ISER_IB_RDMA_WRITE:
1434 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1435 dump_stack();
1436 break;
1437 case ISER_IB_RDMA_READ:
1438 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1439
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001440 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001441 isert_completion_rdma_read(tx_desc, isert_cmd);
1442 break;
1443 default:
1444 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1445 dump_stack();
1446 break;
1447 }
1448}
1449
1450static void
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001451isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001452{
1453 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001454 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001455
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001456 if (!isert_cmd)
1457 isert_unmap_tx_desc(tx_desc, ib_dev);
1458 else
Nicholas Bellingerd3066062014-03-30 15:50:03 -07001459 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001460}
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001461
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001462static void
1463isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1464{
1465 struct iscsi_conn *conn = isert_conn->conn;
1466
1467 if (isert_conn->post_recv_buf_count)
1468 return;
1469
1470 if (conn->sess) {
1471 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1472 target_wait_for_sess_cmds(conn->sess->se_sess);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001473 }
1474
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001475 while (atomic_read(&isert_conn->post_send_buf_count))
1476 msleep(3000);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001477
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001478 mutex_lock(&isert_conn->conn_mutex);
1479 isert_conn->state = ISER_CONN_DOWN;
1480 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07001481
Sagi Grimberg1813e802014-05-19 17:44:23 +03001482 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1483
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001484 complete(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001485}
1486
1487static void
1488isert_cq_tx_work(struct work_struct *work)
1489{
1490 struct isert_cq_desc *cq_desc = container_of(work,
1491 struct isert_cq_desc, cq_tx_work);
1492 struct isert_device *device = cq_desc->device;
1493 int cq_index = cq_desc->cq_index;
1494 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1495 struct isert_conn *isert_conn;
1496 struct iser_tx_desc *tx_desc;
1497 struct ib_wc wc;
1498
1499 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1500 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1501 isert_conn = wc.qp->qp_context;
1502
1503 if (wc.status == IB_WC_SUCCESS) {
1504 isert_send_completion(tx_desc, isert_conn);
1505 } else {
1506 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1507 pr_debug("TX wc.status: 0x%08x\n", wc.status);
1508 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001509 isert_cq_tx_comp_err(tx_desc, isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001510 }
1511 }
1512
1513 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1514}
1515
1516static void
1517isert_cq_tx_callback(struct ib_cq *cq, void *context)
1518{
1519 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1520
1521 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1522 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1523}
1524
1525static void
1526isert_cq_rx_work(struct work_struct *work)
1527{
1528 struct isert_cq_desc *cq_desc = container_of(work,
1529 struct isert_cq_desc, cq_rx_work);
1530 struct isert_device *device = cq_desc->device;
1531 int cq_index = cq_desc->cq_index;
1532 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1533 struct isert_conn *isert_conn;
1534 struct iser_rx_desc *rx_desc;
1535 struct ib_wc wc;
1536 unsigned long xfer_len;
1537
1538 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1539 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1540 isert_conn = wc.qp->qp_context;
1541
1542 if (wc.status == IB_WC_SUCCESS) {
1543 xfer_len = (unsigned long)wc.byte_len;
1544 isert_rx_completion(rx_desc, isert_conn, xfer_len);
1545 } else {
1546 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1547 if (wc.status != IB_WC_WR_FLUSH_ERR)
1548 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1549
1550 isert_conn->post_recv_buf_count--;
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08001551 isert_cq_rx_comp_err(isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001552 }
1553 }
1554
1555 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1556}
1557
1558static void
1559isert_cq_rx_callback(struct ib_cq *cq, void *context)
1560{
1561 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1562
1563 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1564 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1565}
1566
1567static int
1568isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1569{
1570 struct ib_send_wr *wr_failed;
1571 int ret;
1572
1573 atomic_inc(&isert_conn->post_send_buf_count);
1574
1575 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1576 &wr_failed);
1577 if (ret) {
1578 pr_err("ib_post_send failed with %d\n", ret);
1579 atomic_dec(&isert_conn->post_send_buf_count);
1580 return ret;
1581 }
1582 return ret;
1583}
1584
1585static int
1586isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1587{
1588 struct isert_cmd *isert_cmd = container_of(cmd,
1589 struct isert_cmd, iscsi_cmd);
1590 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1591 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1592 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1593 &isert_cmd->tx_desc.iscsi_header;
1594
1595 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1596 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1597 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1598 /*
1599 * Attach SENSE DATA payload to iSCSI Response PDU
1600 */
1601 if (cmd->se_cmd.sense_buffer &&
1602 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1603 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1604 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1605 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1606 u32 padding, sense_len;
1607
1608 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1609 cmd->sense_buffer);
1610 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1611
1612 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1613 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1614 sense_len = cmd->se_cmd.scsi_sense_length + padding;
1615
1616 isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1617 (void *)cmd->sense_buffer, sense_len,
1618 DMA_TO_DEVICE);
1619
1620 isert_cmd->sense_buf_len = sense_len;
1621 tx_dsg->addr = isert_cmd->sense_buf_dma;
1622 tx_dsg->length = sense_len;
1623 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1624 isert_cmd->tx_desc.num_sge = 2;
1625 }
1626
1627 isert_init_send_wr(isert_cmd, send_wr);
1628
1629 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1630
1631 return isert_post_response(isert_conn, isert_cmd);
1632}
1633
1634static int
1635isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1636 bool nopout_response)
1637{
1638 struct isert_cmd *isert_cmd = container_of(cmd,
1639 struct isert_cmd, iscsi_cmd);
1640 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1641 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1642
1643 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1644 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1645 &isert_cmd->tx_desc.iscsi_header,
1646 nopout_response);
1647 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1648 isert_init_send_wr(isert_cmd, send_wr);
1649
1650 pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1651
1652 return isert_post_response(isert_conn, isert_cmd);
1653}
1654
1655static int
1656isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1657{
1658 struct isert_cmd *isert_cmd = container_of(cmd,
1659 struct isert_cmd, iscsi_cmd);
1660 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1661 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1662
1663 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1664 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1665 &isert_cmd->tx_desc.iscsi_header);
1666 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1667 isert_init_send_wr(isert_cmd, send_wr);
1668
1669 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1670
1671 return isert_post_response(isert_conn, isert_cmd);
1672}
1673
1674static int
1675isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1676{
1677 struct isert_cmd *isert_cmd = container_of(cmd,
1678 struct isert_cmd, iscsi_cmd);
1679 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1680 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1681
1682 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1683 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1684 &isert_cmd->tx_desc.iscsi_header);
1685 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1686 isert_init_send_wr(isert_cmd, send_wr);
1687
1688 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1689
1690 return isert_post_response(isert_conn, isert_cmd);
1691}
1692
1693static int
1694isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1695{
1696 struct isert_cmd *isert_cmd = container_of(cmd,
1697 struct isert_cmd, iscsi_cmd);
1698 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1699 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001700 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1701 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1702 struct iscsi_reject *hdr =
1703 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001704
1705 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001706 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001707 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellingerfff98872013-06-26 02:31:42 -07001708
1709 hton24(hdr->dlength, ISCSI_HDR_LEN);
1710 isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1711 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1712 DMA_TO_DEVICE);
1713 isert_cmd->sense_buf_len = ISCSI_HDR_LEN;
1714 tx_dsg->addr = isert_cmd->sense_buf_dma;
1715 tx_dsg->length = ISCSI_HDR_LEN;
1716 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1717 isert_cmd->tx_desc.num_sge = 2;
1718
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001719 isert_init_send_wr(isert_cmd, send_wr);
1720
1721 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1722
1723 return isert_post_response(isert_conn, isert_cmd);
1724}
1725
1726static int
1727isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1728 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1729 u32 data_left, u32 offset)
1730{
1731 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1732 struct scatterlist *sg_start, *tmp_sg;
1733 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1734 u32 sg_off, page_off;
1735 int i = 0, sg_nents;
1736
1737 sg_off = offset / PAGE_SIZE;
1738 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1739 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1740 page_off = offset % PAGE_SIZE;
1741
1742 send_wr->sg_list = ib_sge;
1743 send_wr->num_sge = sg_nents;
1744 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1745 /*
1746 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1747 */
1748 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1749 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1750 (unsigned long long)tmp_sg->dma_address,
1751 tmp_sg->length, page_off);
1752
1753 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1754 ib_sge->length = min_t(u32, data_left,
1755 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1756 ib_sge->lkey = isert_conn->conn_mr->lkey;
1757
1758 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
1759 ib_sge->addr, ib_sge->length);
1760 page_off = 0;
1761 data_left -= ib_sge->length;
1762 ib_sge++;
1763 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1764 }
1765
1766 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1767 send_wr->sg_list, send_wr->num_sge);
1768
1769 return sg_nents;
1770}
1771
1772static int
1773isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1774{
1775 struct se_cmd *se_cmd = &cmd->se_cmd;
1776 struct isert_cmd *isert_cmd = container_of(cmd,
1777 struct isert_cmd, iscsi_cmd);
1778 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1779 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1780 struct ib_send_wr *wr_failed, *send_wr;
1781 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1782 struct ib_sge *ib_sge;
1783 struct scatterlist *sg;
1784 u32 offset = 0, data_len, data_left, rdma_write_max;
1785 int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
1786
1787 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
1788
1789 sg = &se_cmd->t_data_sg[0];
1790 sg_nents = se_cmd->t_data_nents;
1791
1792 count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1793 if (unlikely(!count)) {
1794 pr_err("Unable to map put_datain SGs\n");
1795 return -EINVAL;
1796 }
1797 wr->sge = sg;
1798 wr->num_sge = sg_nents;
1799 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1800 count, sg, sg_nents);
1801
1802 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1803 if (!ib_sge) {
1804 pr_warn("Unable to allocate datain ib_sge\n");
1805 ret = -ENOMEM;
1806 goto unmap_sg;
1807 }
1808 isert_cmd->ib_sge = ib_sge;
1809
1810 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1811 ib_sge, se_cmd->t_data_nents);
1812
1813 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1814 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1815 GFP_KERNEL);
1816 if (!wr->send_wr) {
1817 pr_err("Unable to allocate wr->send_wr\n");
1818 ret = -ENOMEM;
1819 goto unmap_sg;
1820 }
1821 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1822 wr->send_wr, wr->send_wr_num);
1823
1824 iscsit_increment_maxcmdsn(cmd, conn->sess);
1825 cmd->stat_sn = conn->stat_sn++;
1826
1827 wr->isert_cmd = isert_cmd;
1828 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1829 data_left = se_cmd->data_length;
1830
1831 for (i = 0; i < wr->send_wr_num; i++) {
1832 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1833 data_len = min(data_left, rdma_write_max);
1834
1835 send_wr->opcode = IB_WR_RDMA_WRITE;
1836 send_wr->send_flags = 0;
1837 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1838 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1839
1840 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1841 send_wr, data_len, offset);
1842 ib_sge += ib_sge_cnt;
1843
1844 if (i + 1 == wr->send_wr_num)
1845 send_wr->next = &isert_cmd->tx_desc.send_wr;
1846 else
1847 send_wr->next = &wr->send_wr[i + 1];
1848
1849 offset += data_len;
1850 data_left -= data_len;
1851 }
1852 /*
1853 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1854 */
1855 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1856 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1857 &isert_cmd->tx_desc.iscsi_header);
1858 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1859 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1860
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001861 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001862
1863 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1864 if (rc) {
1865 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001866 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001867 }
1868 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1869 return 1;
1870
1871unmap_sg:
1872 ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1873 return ret;
1874}
1875
1876static int
1877isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1878{
1879 struct se_cmd *se_cmd = &cmd->se_cmd;
1880 struct isert_cmd *isert_cmd = container_of(cmd,
1881 struct isert_cmd, iscsi_cmd);
1882 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1883 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1884 struct ib_send_wr *wr_failed, *send_wr;
1885 struct ib_sge *ib_sge;
1886 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1887 struct scatterlist *sg_start;
1888 u32 sg_off, sg_nents, page_off, va_offset = 0;
1889 u32 offset = 0, data_len, data_left, rdma_write_max;
1890 int rc, ret = 0, count, i, ib_sge_cnt;
1891
1892 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1893 se_cmd->data_length, cmd->write_data_done);
1894
1895 sg_off = cmd->write_data_done / PAGE_SIZE;
1896 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1897 page_off = cmd->write_data_done % PAGE_SIZE;
1898
1899 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1900 sg_off, sg_start, page_off);
1901
1902 data_left = se_cmd->data_length - cmd->write_data_done;
1903 sg_nents = se_cmd->t_data_nents - sg_off;
1904
1905 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1906 data_left, sg_nents);
1907
1908 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1909 if (unlikely(!count)) {
1910 pr_err("Unable to map get_dataout SGs\n");
1911 return -EINVAL;
1912 }
1913 wr->sge = sg_start;
1914 wr->num_sge = sg_nents;
1915 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1916 count, sg_start, sg_nents);
1917
1918 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1919 if (!ib_sge) {
1920 pr_warn("Unable to allocate dataout ib_sge\n");
1921 ret = -ENOMEM;
1922 goto unmap_sg;
1923 }
1924 isert_cmd->ib_sge = ib_sge;
1925
1926 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1927 ib_sge, sg_nents);
1928
1929 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1930 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1931 GFP_KERNEL);
1932 if (!wr->send_wr) {
1933 pr_debug("Unable to allocate wr->send_wr\n");
1934 ret = -ENOMEM;
1935 goto unmap_sg;
1936 }
1937 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1938 wr->send_wr, wr->send_wr_num);
1939
1940 isert_cmd->tx_desc.isert_cmd = isert_cmd;
1941
1942 wr->iser_ib_op = ISER_IB_RDMA_READ;
1943 wr->isert_cmd = isert_cmd;
1944 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1945 offset = cmd->write_data_done;
1946
1947 for (i = 0; i < wr->send_wr_num; i++) {
1948 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1949 data_len = min(data_left, rdma_write_max);
1950
1951 send_wr->opcode = IB_WR_RDMA_READ;
1952 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
1953 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
1954
1955 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1956 send_wr, data_len, offset);
1957 ib_sge += ib_sge_cnt;
1958
1959 if (i + 1 == wr->send_wr_num)
1960 send_wr->send_flags = IB_SEND_SIGNALED;
1961 else
1962 send_wr->next = &wr->send_wr[i + 1];
1963
1964 offset += data_len;
1965 va_offset += data_len;
1966 data_left -= data_len;
1967 }
1968
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001969 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001970
1971 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1972 if (rc) {
1973 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
Nicholas Bellinger0bf95492014-02-27 09:05:03 -08001974 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001975 }
1976 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
1977 return 0;
1978
1979unmap_sg:
1980 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1981 return ret;
1982}
1983
1984static int
1985isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1986{
1987 int ret;
1988
1989 switch (state) {
1990 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
1991 ret = isert_put_nopin(cmd, conn, false);
1992 break;
1993 default:
1994 pr_err("Unknown immediate state: 0x%02x\n", state);
1995 ret = -EINVAL;
1996 break;
1997 }
1998
1999 return ret;
2000}
2001
2002static int
2003isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2004{
2005 int ret;
2006
2007 switch (state) {
2008 case ISTATE_SEND_LOGOUTRSP:
2009 ret = isert_put_logout_rsp(cmd, conn);
2010 if (!ret) {
2011 pr_debug("Returning iSER Logout -EAGAIN\n");
2012 ret = -EAGAIN;
2013 }
2014 break;
2015 case ISTATE_SEND_NOPIN:
2016 ret = isert_put_nopin(cmd, conn, true);
2017 break;
2018 case ISTATE_SEND_TASKMGTRSP:
2019 ret = isert_put_tm_rsp(cmd, conn);
2020 break;
2021 case ISTATE_SEND_REJECT:
2022 ret = isert_put_reject(cmd, conn);
2023 break;
2024 case ISTATE_SEND_STATUS:
2025 /*
2026 * Special case for sending non GOOD SCSI status from TX thread
2027 * context during pre se_cmd excecution failure.
2028 */
2029 ret = isert_put_response(conn, cmd);
2030 break;
2031 default:
2032 pr_err("Unknown response state: 0x%02x\n", state);
2033 ret = -EINVAL;
2034 break;
2035 }
2036
2037 return ret;
2038}
2039
2040static int
2041isert_setup_np(struct iscsi_np *np,
2042 struct __kernel_sockaddr_storage *ksockaddr)
2043{
2044 struct isert_np *isert_np;
2045 struct rdma_cm_id *isert_lid;
2046 struct sockaddr *sa;
2047 int ret;
2048
2049 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2050 if (!isert_np) {
2051 pr_err("Unable to allocate struct isert_np\n");
2052 return -ENOMEM;
2053 }
Sagi Grimberg8a2629a2014-04-29 13:13:45 +03002054 sema_init(&isert_np->np_sem, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002055 mutex_init(&isert_np->np_accept_mutex);
2056 INIT_LIST_HEAD(&isert_np->np_accept_list);
2057 init_completion(&isert_np->np_login_comp);
2058
2059 sa = (struct sockaddr *)ksockaddr;
2060 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2061 /*
2062 * Setup the np->np_sockaddr from the passed sockaddr setup
2063 * in iscsi_target_configfs.c code..
2064 */
2065 memcpy(&np->np_sockaddr, ksockaddr,
2066 sizeof(struct __kernel_sockaddr_storage));
2067
2068 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2069 IB_QPT_RC);
2070 if (IS_ERR(isert_lid)) {
2071 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2072 PTR_ERR(isert_lid));
2073 ret = PTR_ERR(isert_lid);
2074 goto out;
2075 }
2076
2077 ret = rdma_bind_addr(isert_lid, sa);
2078 if (ret) {
2079 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2080 goto out_lid;
2081 }
2082
2083 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2084 if (ret) {
2085 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2086 goto out_lid;
2087 }
2088
2089 isert_np->np_cm_id = isert_lid;
2090 np->np_context = isert_np;
2091 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2092
2093 return 0;
2094
2095out_lid:
2096 rdma_destroy_id(isert_lid);
2097out:
2098 kfree(isert_np);
2099 return ret;
2100}
2101
2102static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002103isert_rdma_accept(struct isert_conn *isert_conn)
2104{
2105 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2106 struct rdma_conn_param cp;
2107 int ret;
2108
2109 memset(&cp, 0, sizeof(struct rdma_conn_param));
2110 cp.responder_resources = isert_conn->responder_resources;
2111 cp.initiator_depth = isert_conn->initiator_depth;
2112 cp.retry_count = 7;
2113 cp.rnr_retry_count = 7;
2114
2115 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2116
2117 ret = rdma_accept(cm_id, &cp);
2118 if (ret) {
2119 pr_err("rdma_accept() failed with: %d\n", ret);
2120 return ret;
2121 }
2122
2123 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2124
2125 return 0;
2126}
2127
2128static int
2129isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2130{
2131 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2132 int ret;
2133
2134 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2135
2136 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2137 if (ret)
2138 return ret;
2139
2140 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2141 return 0;
2142}
2143
2144static void
2145isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2146 struct isert_conn *isert_conn)
2147{
2148 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2149 struct rdma_route *cm_route = &cm_id->route;
2150 struct sockaddr_in *sock_in;
2151 struct sockaddr_in6 *sock_in6;
2152
2153 conn->login_family = np->np_sockaddr.ss_family;
2154
2155 if (np->np_sockaddr.ss_family == AF_INET6) {
2156 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2157 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2158 &sock_in6->sin6_addr.in6_u);
2159 conn->login_port = ntohs(sock_in6->sin6_port);
2160
2161 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2162 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2163 &sock_in6->sin6_addr.in6_u);
2164 conn->local_port = ntohs(sock_in6->sin6_port);
2165 } else {
2166 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2167 sprintf(conn->login_ip, "%pI4",
2168 &sock_in->sin_addr.s_addr);
2169 conn->login_port = ntohs(sock_in->sin_port);
2170
2171 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2172 sprintf(conn->local_ip, "%pI4",
2173 &sock_in->sin_addr.s_addr);
2174 conn->local_port = ntohs(sock_in->sin_port);
2175 }
2176}
2177
2178static int
2179isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2180{
2181 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2182 struct isert_conn *isert_conn;
2183 int max_accept = 0, ret;
2184
2185accept_wait:
Sagi Grimberg8a2629a2014-04-29 13:13:45 +03002186 ret = down_interruptible(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002187 if (max_accept > 5)
2188 return -ENODEV;
2189
2190 spin_lock_bh(&np->np_thread_lock);
Sagi Grimberg3ddb8752014-05-19 17:44:22 +03002191 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002192 spin_unlock_bh(&np->np_thread_lock);
Sagi Grimberg3ddb8752014-05-19 17:44:22 +03002193 pr_debug("np_thread_state %d for isert_accept_np\n",
2194 np->np_thread_state);
2195 /**
2196 * No point in stalling here when np_thread
2197 * is in state RESET/SHUTDOWN/EXIT - bail
2198 **/
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002199 return -ENODEV;
2200 }
2201 spin_unlock_bh(&np->np_thread_lock);
2202
2203 mutex_lock(&isert_np->np_accept_mutex);
2204 if (list_empty(&isert_np->np_accept_list)) {
2205 mutex_unlock(&isert_np->np_accept_mutex);
2206 max_accept++;
2207 goto accept_wait;
2208 }
2209 isert_conn = list_first_entry(&isert_np->np_accept_list,
2210 struct isert_conn, conn_accept_node);
2211 list_del_init(&isert_conn->conn_accept_node);
2212 mutex_unlock(&isert_np->np_accept_mutex);
2213
2214 conn->context = isert_conn;
2215 isert_conn->conn = conn;
2216 max_accept = 0;
2217
2218 ret = isert_rdma_post_recvl(isert_conn);
2219 if (ret)
2220 return ret;
2221
2222 ret = isert_rdma_accept(isert_conn);
2223 if (ret)
2224 return ret;
2225
2226 isert_set_conn_info(np, conn, isert_conn);
2227
2228 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2229 return 0;
2230}
2231
2232static void
2233isert_free_np(struct iscsi_np *np)
2234{
2235 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2236
2237 rdma_destroy_id(isert_np->np_cm_id);
2238
2239 np->np_context = NULL;
2240 kfree(isert_np);
2241}
2242
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002243static void isert_wait_conn(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002244{
2245 struct isert_conn *isert_conn = conn->context;
2246
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002247 pr_debug("isert_wait_conn: Starting \n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002248
Sagi Grimberg1813e802014-05-19 17:44:23 +03002249 mutex_lock(&isert_conn->conn_mutex);
2250 if (isert_conn->conn_cm_id) {
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002251 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002252 rdma_disconnect(isert_conn->conn_cm_id);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07002253 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002254 /*
2255 * Only wait for conn_wait_comp_err if the isert_conn made it
2256 * into full feature phase..
2257 */
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07002258 if (isert_conn->state == ISER_CONN_INIT) {
2259 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07002260 return;
2261 }
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002262 if (isert_conn->state == ISER_CONN_UP)
2263 isert_conn->state = ISER_CONN_TERMINATING;
Nicholas Bellingerd9e507c2013-07-03 03:05:37 -07002264 mutex_unlock(&isert_conn->conn_mutex);
2265
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002266 wait_for_completion(&isert_conn->conn_wait_comp_err);
2267
2268 wait_for_completion(&isert_conn->conn_wait);
2269}
2270
2271static void isert_free_conn(struct iscsi_conn *conn)
2272{
2273 struct isert_conn *isert_conn = conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002274
2275 isert_put_conn(isert_conn);
2276}
2277
2278static struct iscsit_transport iser_target_transport = {
2279 .name = "IB/iSER",
2280 .transport_type = ISCSI_INFINIBAND,
2281 .owner = THIS_MODULE,
2282 .iscsit_setup_np = isert_setup_np,
2283 .iscsit_accept_np = isert_accept_np,
2284 .iscsit_free_np = isert_free_np,
Nicholas Bellingerd8bd97a02014-02-03 12:54:39 -08002285 .iscsit_wait_conn = isert_wait_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002286 .iscsit_free_conn = isert_free_conn,
2287 .iscsit_alloc_cmd = isert_alloc_cmd,
2288 .iscsit_get_login_rx = isert_get_login_rx,
2289 .iscsit_put_login_tx = isert_put_login_tx,
2290 .iscsit_immediate_queue = isert_immediate_queue,
2291 .iscsit_response_queue = isert_response_queue,
2292 .iscsit_get_dataout = isert_get_dataout,
2293 .iscsit_queue_data_in = isert_put_datain,
2294 .iscsit_queue_status = isert_put_response,
2295};
2296
2297static int __init isert_init(void)
2298{
2299 int ret;
2300
2301 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2302 if (!isert_rx_wq) {
2303 pr_err("Unable to allocate isert_rx_wq\n");
2304 return -ENOMEM;
2305 }
2306
2307 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2308 if (!isert_comp_wq) {
2309 pr_err("Unable to allocate isert_comp_wq\n");
2310 ret = -ENOMEM;
2311 goto destroy_rx_wq;
2312 }
2313
2314 isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2315 sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2316 0, NULL);
2317 if (!isert_cmd_cache) {
2318 pr_err("Unable to create isert_cmd_cache\n");
2319 ret = -ENOMEM;
2320 goto destroy_tx_cq;
2321 }
2322
2323 iscsit_register_transport(&iser_target_transport);
2324 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2325 return 0;
2326
2327destroy_tx_cq:
2328 destroy_workqueue(isert_comp_wq);
2329destroy_rx_wq:
2330 destroy_workqueue(isert_rx_wq);
2331 return ret;
2332}
2333
2334static void __exit isert_exit(void)
2335{
Sagi Grimberg029a8942014-05-19 17:44:25 +03002336 flush_scheduled_work();
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002337 kmem_cache_destroy(isert_cmd_cache);
2338 destroy_workqueue(isert_comp_wq);
2339 destroy_workqueue(isert_rx_wq);
2340 iscsit_unregister_transport(&iser_target_transport);
2341 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2342}
2343
2344MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2345MODULE_VERSION("0.1");
2346MODULE_AUTHOR("nab@Linux-iSCSI.org");
2347MODULE_LICENSE("GPL");
2348
2349module_init(isert_init);
2350module_exit(isert_exit);