~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/xprtrdma/svc_rdma_transport.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
  3  * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
  4  *
  5  * This software is available to you under a choice of one of two
  6  * licenses.  You may choose to be licensed under the terms of the GNU
  7  * General Public License (GPL) Version 2, available from the file
  8  * COPYING in the main directory of this source tree, or the BSD-type
  9  * license below:
 10  *
 11  * Redistribution and use in source and binary forms, with or without
 12  * modification, are permitted provided that the following conditions
 13  * are met:
 14  *
 15  *      Redistributions of source code must retain the above copyright
 16  *      notice, this list of conditions and the following disclaimer.
 17  *
 18  *      Redistributions in binary form must reproduce the above
 19  *      copyright notice, this list of conditions and the following
 20  *      disclaimer in the documentation and/or other materials provided
 21  *      with the distribution.
 22  *
 23  *      Neither the name of the Network Appliance, Inc. nor the names of
 24  *      its contributors may be used to endorse or promote products
 25  *      derived from this software without specific prior written
 26  *      permission.
 27  *
 28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 39  *
 40  * Author: Tom Tucker <tom@opengridcomputing.com>
 41  */
 42 
 43 #include <linux/sunrpc/svc_xprt.h>
 44 #include <linux/sunrpc/debug.h>
 45 #include <linux/sunrpc/rpc_rdma.h>
 46 #include <linux/interrupt.h>
 47 #include <linux/sched.h>
 48 #include <linux/slab.h>
 49 #include <linux/spinlock.h>
 50 #include <linux/workqueue.h>
 51 #include <rdma/ib_verbs.h>
 52 #include <rdma/rdma_cm.h>
 53 #include <linux/sunrpc/svc_rdma.h>
 54 #include <linux/export.h>
 55 #include "xprt_rdma.h"
 56 
 57 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
 58 
 59 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
 60                                         struct net *net,
 61                                         struct sockaddr *sa, int salen,
 62                                         int flags);
 63 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
 64 static void svc_rdma_release_rqst(struct svc_rqst *);
 65 static void dto_tasklet_func(unsigned long data);
 66 static void svc_rdma_detach(struct svc_xprt *xprt);
 67 static void svc_rdma_free(struct svc_xprt *xprt);
 68 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
 69 static int svc_rdma_secure_port(struct svc_rqst *);
 70 static void rq_cq_reap(struct svcxprt_rdma *xprt);
 71 static void sq_cq_reap(struct svcxprt_rdma *xprt);
 72 
 73 static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
 74 static DEFINE_SPINLOCK(dto_lock);
 75 static LIST_HEAD(dto_xprt_q);
 76 
 77 static struct svc_xprt_ops svc_rdma_ops = {
 78         .xpo_create = svc_rdma_create,
 79         .xpo_recvfrom = svc_rdma_recvfrom,
 80         .xpo_sendto = svc_rdma_sendto,
 81         .xpo_release_rqst = svc_rdma_release_rqst,
 82         .xpo_detach = svc_rdma_detach,
 83         .xpo_free = svc_rdma_free,
 84         .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
 85         .xpo_has_wspace = svc_rdma_has_wspace,
 86         .xpo_accept = svc_rdma_accept,
 87         .xpo_secure_port = svc_rdma_secure_port,
 88 };
 89 
 90 struct svc_xprt_class svc_rdma_class = {
 91         .xcl_name = "rdma",
 92         .xcl_owner = THIS_MODULE,
 93         .xcl_ops = &svc_rdma_ops,
 94         .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
 95         .xcl_ident = XPRT_TRANSPORT_RDMA,
 96 };
 97 
 98 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
 99 {
100         struct svc_rdma_op_ctxt *ctxt;
101 
102         while (1) {
103                 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
104                 if (ctxt)
105                         break;
106                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
107         }
108         ctxt->xprt = xprt;
109         INIT_LIST_HEAD(&ctxt->dto_q);
110         ctxt->count = 0;
111         ctxt->frmr = NULL;
112         atomic_inc(&xprt->sc_ctxt_used);
113         return ctxt;
114 }
115 
116 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
117 {
118         struct svcxprt_rdma *xprt = ctxt->xprt;
119         int i;
120         for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
121                 /*
122                  * Unmap the DMA addr in the SGE if the lkey matches
123                  * the sc_dma_lkey, otherwise, ignore it since it is
124                  * an FRMR lkey and will be unmapped later when the
125                  * last WR that uses it completes.
126                  */
127                 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
128                         atomic_dec(&xprt->sc_dma_used);
129                         ib_dma_unmap_page(xprt->sc_cm_id->device,
130                                             ctxt->sge[i].addr,
131                                             ctxt->sge[i].length,
132                                             ctxt->direction);
133                 }
134         }
135 }
136 
137 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
138 {
139         struct svcxprt_rdma *xprt;
140         int i;
141 
142         BUG_ON(!ctxt);
143         xprt = ctxt->xprt;
144         if (free_pages)
145                 for (i = 0; i < ctxt->count; i++)
146                         put_page(ctxt->pages[i]);
147 
148         kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
149         atomic_dec(&xprt->sc_ctxt_used);
150 }
151 
152 /*
153  * Temporary NFS req mappings are shared across all transport
154  * instances. These are short lived and should be bounded by the number
155  * of concurrent server threads * depth of the SQ.
156  */
157 struct svc_rdma_req_map *svc_rdma_get_req_map(void)
158 {
159         struct svc_rdma_req_map *map;
160         while (1) {
161                 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
162                 if (map)
163                         break;
164                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
165         }
166         map->count = 0;
167         return map;
168 }
169 
170 void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
171 {
172         kmem_cache_free(svc_rdma_map_cachep, map);
173 }
174 
175 /* ib_cq event handler */
176 static void cq_event_handler(struct ib_event *event, void *context)
177 {
178         struct svc_xprt *xprt = context;
179         dprintk("svcrdma: received CQ event id=%d, context=%p\n",
180                 event->event, context);
181         set_bit(XPT_CLOSE, &xprt->xpt_flags);
182 }
183 
184 /* QP event handler */
185 static void qp_event_handler(struct ib_event *event, void *context)
186 {
187         struct svc_xprt *xprt = context;
188 
189         switch (event->event) {
190         /* These are considered benign events */
191         case IB_EVENT_PATH_MIG:
192         case IB_EVENT_COMM_EST:
193         case IB_EVENT_SQ_DRAINED:
194         case IB_EVENT_QP_LAST_WQE_REACHED:
195                 dprintk("svcrdma: QP event %d received for QP=%p\n",
196                         event->event, event->element.qp);
197                 break;
198         /* These are considered fatal events */
199         case IB_EVENT_PATH_MIG_ERR:
200         case IB_EVENT_QP_FATAL:
201         case IB_EVENT_QP_REQ_ERR:
202         case IB_EVENT_QP_ACCESS_ERR:
203         case IB_EVENT_DEVICE_FATAL:
204         default:
205                 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
206                         "closing transport\n",
207                         event->event, event->element.qp);
208                 set_bit(XPT_CLOSE, &xprt->xpt_flags);
209                 break;
210         }
211 }
212 
213 /*
214  * Data Transfer Operation Tasklet
215  *
216  * Walks a list of transports with I/O pending, removing entries as
217  * they are added to the server's I/O pending list. Two bits indicate
218  * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
219  * spinlock that serializes access to the transport list with the RQ
220  * and SQ interrupt handlers.
221  */
222 static void dto_tasklet_func(unsigned long data)
223 {
224         struct svcxprt_rdma *xprt;
225         unsigned long flags;
226 
227         spin_lock_irqsave(&dto_lock, flags);
228         while (!list_empty(&dto_xprt_q)) {
229                 xprt = list_entry(dto_xprt_q.next,
230                                   struct svcxprt_rdma, sc_dto_q);
231                 list_del_init(&xprt->sc_dto_q);
232                 spin_unlock_irqrestore(&dto_lock, flags);
233 
234                 rq_cq_reap(xprt);
235                 sq_cq_reap(xprt);
236 
237                 svc_xprt_put(&xprt->sc_xprt);
238                 spin_lock_irqsave(&dto_lock, flags);
239         }
240         spin_unlock_irqrestore(&dto_lock, flags);
241 }
242 
243 /*
244  * Receive Queue Completion Handler
245  *
246  * Since an RQ completion handler is called on interrupt context, we
247  * need to defer the handling of the I/O to a tasklet
248  */
249 static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
250 {
251         struct svcxprt_rdma *xprt = cq_context;
252         unsigned long flags;
253 
254         /* Guard against unconditional flush call for destroyed QP */
255         if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
256                 return;
257 
258         /*
259          * Set the bit regardless of whether or not it's on the list
260          * because it may be on the list already due to an SQ
261          * completion.
262          */
263         set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
264 
265         /*
266          * If this transport is not already on the DTO transport queue,
267          * add it
268          */
269         spin_lock_irqsave(&dto_lock, flags);
270         if (list_empty(&xprt->sc_dto_q)) {
271                 svc_xprt_get(&xprt->sc_xprt);
272                 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
273         }
274         spin_unlock_irqrestore(&dto_lock, flags);
275 
276         /* Tasklet does all the work to avoid irqsave locks. */
277         tasklet_schedule(&dto_tasklet);
278 }
279 
280 /*
281  * rq_cq_reap - Process the RQ CQ.
282  *
283  * Take all completing WC off the CQE and enqueue the associated DTO
284  * context on the dto_q for the transport.
285  *
286  * Note that caller must hold a transport reference.
287  */
288 static void rq_cq_reap(struct svcxprt_rdma *xprt)
289 {
290         int ret;
291         struct ib_wc wc;
292         struct svc_rdma_op_ctxt *ctxt = NULL;
293 
294         if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
295                 return;
296 
297         ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
298         atomic_inc(&rdma_stat_rq_poll);
299 
300         while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
301                 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
302                 ctxt->wc_status = wc.status;
303                 ctxt->byte_len = wc.byte_len;
304                 svc_rdma_unmap_dma(ctxt);
305                 if (wc.status != IB_WC_SUCCESS) {
306                         /* Close the transport */
307                         dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
308                         set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
309                         svc_rdma_put_context(ctxt, 1);
310                         svc_xprt_put(&xprt->sc_xprt);
311                         continue;
312                 }
313                 spin_lock_bh(&xprt->sc_rq_dto_lock);
314                 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
315                 spin_unlock_bh(&xprt->sc_rq_dto_lock);
316                 svc_xprt_put(&xprt->sc_xprt);
317         }
318 
319         if (ctxt)
320                 atomic_inc(&rdma_stat_rq_prod);
321 
322         set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
323         /*
324          * If data arrived before established event,
325          * don't enqueue. This defers RPC I/O until the
326          * RDMA connection is complete.
327          */
328         if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
329                 svc_xprt_enqueue(&xprt->sc_xprt);
330 }
331 
332 /*
333  * Process a completion context
334  */
335 static void process_context(struct svcxprt_rdma *xprt,
336                             struct svc_rdma_op_ctxt *ctxt)
337 {
338         svc_rdma_unmap_dma(ctxt);
339 
340         switch (ctxt->wr_op) {
341         case IB_WR_SEND:
342                 BUG_ON(ctxt->frmr);
343                 svc_rdma_put_context(ctxt, 1);
344                 break;
345 
346         case IB_WR_RDMA_WRITE:
347                 BUG_ON(ctxt->frmr);
348                 svc_rdma_put_context(ctxt, 0);
349                 break;
350 
351         case IB_WR_RDMA_READ:
352         case IB_WR_RDMA_READ_WITH_INV:
353                 svc_rdma_put_frmr(xprt, ctxt->frmr);
354                 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
355                         struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
356                         BUG_ON(!read_hdr);
357                         spin_lock_bh(&xprt->sc_rq_dto_lock);
358                         set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
359                         list_add_tail(&read_hdr->dto_q,
360                                       &xprt->sc_read_complete_q);
361                         spin_unlock_bh(&xprt->sc_rq_dto_lock);
362                         svc_xprt_enqueue(&xprt->sc_xprt);
363                 }
364                 svc_rdma_put_context(ctxt, 0);
365                 break;
366 
367         default:
368                 BUG_ON(1);
369                 printk(KERN_ERR "svcrdma: unexpected completion type, "
370                        "opcode=%d\n",
371                        ctxt->wr_op);
372                 break;
373         }
374 }
375 
376 /*
377  * Send Queue Completion Handler - potentially called on interrupt context.
378  *
379  * Note that caller must hold a transport reference.
380  */
381 static void sq_cq_reap(struct svcxprt_rdma *xprt)
382 {
383         struct svc_rdma_op_ctxt *ctxt = NULL;
384         struct ib_wc wc_a[6];
385         struct ib_wc *wc;
386         struct ib_cq *cq = xprt->sc_sq_cq;
387         int ret;
388 
389         memset(wc_a, 0, sizeof(wc_a));
390 
391         if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
392                 return;
393 
394         ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
395         atomic_inc(&rdma_stat_sq_poll);
396         while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
397                 int i;
398 
399                 for (i = 0; i < ret; i++) {
400                         wc = &wc_a[i];
401                         if (wc->status != IB_WC_SUCCESS) {
402                                 dprintk("svcrdma: sq wc err status %d\n",
403                                         wc->status);
404 
405                                 /* Close the transport */
406                                 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
407                         }
408 
409                         /* Decrement used SQ WR count */
410                         atomic_dec(&xprt->sc_sq_count);
411                         wake_up(&xprt->sc_send_wait);
412 
413                         ctxt = (struct svc_rdma_op_ctxt *)
414                                 (unsigned long)wc->wr_id;
415                         if (ctxt)
416                                 process_context(xprt, ctxt);
417 
418                         svc_xprt_put(&xprt->sc_xprt);
419                 }
420         }
421 
422         if (ctxt)
423                 atomic_inc(&rdma_stat_sq_prod);
424 }
425 
426 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
427 {
428         struct svcxprt_rdma *xprt = cq_context;
429         unsigned long flags;
430 
431         /* Guard against unconditional flush call for destroyed QP */
432         if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
433                 return;
434 
435         /*
436          * Set the bit regardless of whether or not it's on the list
437          * because it may be on the list already due to an RQ
438          * completion.
439          */
440         set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
441 
442         /*
443          * If this transport is not already on the DTO transport queue,
444          * add it
445          */
446         spin_lock_irqsave(&dto_lock, flags);
447         if (list_empty(&xprt->sc_dto_q)) {
448                 svc_xprt_get(&xprt->sc_xprt);
449                 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
450         }
451         spin_unlock_irqrestore(&dto_lock, flags);
452 
453         /* Tasklet does all the work to avoid irqsave locks. */
454         tasklet_schedule(&dto_tasklet);
455 }
456 
457 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
458                                              int listener)
459 {
460         struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
461 
462         if (!cma_xprt)
463                 return NULL;
464         svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
465         INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
466         INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
467         INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
468         INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
469         INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
470         init_waitqueue_head(&cma_xprt->sc_send_wait);
471 
472         spin_lock_init(&cma_xprt->sc_lock);
473         spin_lock_init(&cma_xprt->sc_rq_dto_lock);
474         spin_lock_init(&cma_xprt->sc_frmr_q_lock);
475 
476         cma_xprt->sc_ord = svcrdma_ord;
477 
478         cma_xprt->sc_max_req_size = svcrdma_max_req_size;
479         cma_xprt->sc_max_requests = svcrdma_max_requests;
480         cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
481         atomic_set(&cma_xprt->sc_sq_count, 0);
482         atomic_set(&cma_xprt->sc_ctxt_used, 0);
483 
484         if (listener)
485                 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
486 
487         return cma_xprt;
488 }
489 
490 struct page *svc_rdma_get_page(void)
491 {
492         struct page *page;
493 
494         while ((page = alloc_page(GFP_KERNEL)) == NULL) {
495                 /* If we can't get memory, wait a bit and try again */
496                 printk(KERN_INFO "svcrdma: out of memory...retrying in 1s\n");
497                 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
498         }
499         return page;
500 }
501 
502 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
503 {
504         struct ib_recv_wr recv_wr, *bad_recv_wr;
505         struct svc_rdma_op_ctxt *ctxt;
506         struct page *page;
507         dma_addr_t pa;
508         int sge_no;
509         int buflen;
510         int ret;
511 
512         ctxt = svc_rdma_get_context(xprt);
513         buflen = 0;
514         ctxt->direction = DMA_FROM_DEVICE;
515         for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
516                 BUG_ON(sge_no >= xprt->sc_max_sge);
517                 page = svc_rdma_get_page();
518                 ctxt->pages[sge_no] = page;
519                 pa = ib_dma_map_page(xprt->sc_cm_id->device,
520                                      page, 0, PAGE_SIZE,
521                                      DMA_FROM_DEVICE);
522                 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
523                         goto err_put_ctxt;
524                 atomic_inc(&xprt->sc_dma_used);
525                 ctxt->sge[sge_no].addr = pa;
526                 ctxt->sge[sge_no].length = PAGE_SIZE;
527                 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
528                 ctxt->count = sge_no + 1;
529                 buflen += PAGE_SIZE;
530         }
531         recv_wr.next = NULL;
532         recv_wr.sg_list = &ctxt->sge[0];
533         recv_wr.num_sge = ctxt->count;
534         recv_wr.wr_id = (u64)(unsigned long)ctxt;
535 
536         svc_xprt_get(&xprt->sc_xprt);
537         ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
538         if (ret) {
539                 svc_rdma_unmap_dma(ctxt);
540                 svc_rdma_put_context(ctxt, 1);
541                 svc_xprt_put(&xprt->sc_xprt);
542         }
543         return ret;
544 
545  err_put_ctxt:
546         svc_rdma_unmap_dma(ctxt);
547         svc_rdma_put_context(ctxt, 1);
548         return -ENOMEM;
549 }
550 
551 /*
552  * This function handles the CONNECT_REQUEST event on a listening
553  * endpoint. It is passed the cma_id for the _new_ connection. The context in
554  * this cma_id is inherited from the listening cma_id and is the svc_xprt
555  * structure for the listening endpoint.
556  *
557  * This function creates a new xprt for the new connection and enqueues it on
558  * the accept queue for the listent xprt. When the listen thread is kicked, it
559  * will call the recvfrom method on the listen xprt which will accept the new
560  * connection.
561  */
562 static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
563 {
564         struct svcxprt_rdma *listen_xprt = new_cma_id->context;
565         struct svcxprt_rdma *newxprt;
566         struct sockaddr *sa;
567 
568         /* Create a new transport */
569         newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
570         if (!newxprt) {
571                 dprintk("svcrdma: failed to create new transport\n");
572                 return;
573         }
574         newxprt->sc_cm_id = new_cma_id;
575         new_cma_id->context = newxprt;
576         dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
577                 newxprt, newxprt->sc_cm_id, listen_xprt);
578 
579         /* Save client advertised inbound read limit for use later in accept. */
580         newxprt->sc_ord = client_ird;
581 
582         /* Set the local and remote addresses in the transport */
583         sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
584         svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
585         sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
586         svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
587 
588         /*
589          * Enqueue the new transport on the accept queue of the listening
590          * transport
591          */
592         spin_lock_bh(&listen_xprt->sc_lock);
593         list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
594         spin_unlock_bh(&listen_xprt->sc_lock);
595 
596         set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
597         svc_xprt_enqueue(&listen_xprt->sc_xprt);
598 }
599 
600 /*
601  * Handles events generated on the listening endpoint. These events will be
602  * either be incoming connect requests or adapter removal  events.
603  */
604 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
605                                struct rdma_cm_event *event)
606 {
607         struct svcxprt_rdma *xprt = cma_id->context;
608         int ret = 0;
609 
610         switch (event->event) {
611         case RDMA_CM_EVENT_CONNECT_REQUEST:
612                 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
613                         "event=%d\n", cma_id, cma_id->context, event->event);
614                 handle_connect_req(cma_id,
615                                    event->param.conn.initiator_depth);
616                 break;
617 
618         case RDMA_CM_EVENT_ESTABLISHED:
619                 /* Accept complete */
620                 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
621                         "cm_id=%p\n", xprt, cma_id);
622                 break;
623 
624         case RDMA_CM_EVENT_DEVICE_REMOVAL:
625                 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
626                         xprt, cma_id);
627                 if (xprt)
628                         set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
629                 break;
630 
631         default:
632                 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
633                         "event=%d\n", cma_id, event->event);
634                 break;
635         }
636 
637         return ret;
638 }
639 
640 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
641                             struct rdma_cm_event *event)
642 {
643         struct svc_xprt *xprt = cma_id->context;
644         struct svcxprt_rdma *rdma =
645                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
646         switch (event->event) {
647         case RDMA_CM_EVENT_ESTABLISHED:
648                 /* Accept complete */
649                 svc_xprt_get(xprt);
650                 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
651                         "cm_id=%p\n", xprt, cma_id);
652                 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
653                 svc_xprt_enqueue(xprt);
654                 break;
655         case RDMA_CM_EVENT_DISCONNECTED:
656                 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
657                         xprt, cma_id);
658                 if (xprt) {
659                         set_bit(XPT_CLOSE, &xprt->xpt_flags);
660                         svc_xprt_enqueue(xprt);
661                         svc_xprt_put(xprt);
662                 }
663                 break;
664         case RDMA_CM_EVENT_DEVICE_REMOVAL:
665                 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
666                         "event=%d\n", cma_id, xprt, event->event);
667                 if (xprt) {
668                         set_bit(XPT_CLOSE, &xprt->xpt_flags);
669                         svc_xprt_enqueue(xprt);
670                 }
671                 break;
672         default:
673                 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
674                         "event=%d\n", cma_id, event->event);
675                 break;
676         }
677         return 0;
678 }
679 
680 /*
681  * Create a listening RDMA service endpoint.
682  */
683 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
684                                         struct net *net,
685                                         struct sockaddr *sa, int salen,
686                                         int flags)
687 {
688         struct rdma_cm_id *listen_id;
689         struct svcxprt_rdma *cma_xprt;
690         struct svc_xprt *xprt;
691         int ret;
692 
693         dprintk("svcrdma: Creating RDMA socket\n");
694         if (sa->sa_family != AF_INET) {
695                 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
696                 return ERR_PTR(-EAFNOSUPPORT);
697         }
698         cma_xprt = rdma_create_xprt(serv, 1);
699         if (!cma_xprt)
700                 return ERR_PTR(-ENOMEM);
701         xprt = &cma_xprt->sc_xprt;
702 
703         listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
704                                    IB_QPT_RC);
705         if (IS_ERR(listen_id)) {
706                 ret = PTR_ERR(listen_id);
707                 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
708                 goto err0;
709         }
710 
711         ret = rdma_bind_addr(listen_id, sa);
712         if (ret) {
713                 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
714                 goto err1;
715         }
716         cma_xprt->sc_cm_id = listen_id;
717 
718         ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
719         if (ret) {
720                 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
721                 goto err1;
722         }
723 
724         /*
725          * We need to use the address from the cm_id in case the
726          * caller specified 0 for the port number.
727          */
728         sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
729         svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
730 
731         return &cma_xprt->sc_xprt;
732 
733  err1:
734         rdma_destroy_id(listen_id);
735  err0:
736         kfree(cma_xprt);
737         return ERR_PTR(ret);
738 }
739 
740 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
741 {
742         struct ib_mr *mr;
743         struct ib_fast_reg_page_list *pl;
744         struct svc_rdma_fastreg_mr *frmr;
745 
746         frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
747         if (!frmr)
748                 goto err;
749 
750         mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
751         if (IS_ERR(mr))
752                 goto err_free_frmr;
753 
754         pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
755                                          RPCSVC_MAXPAGES);
756         if (IS_ERR(pl))
757                 goto err_free_mr;
758 
759         frmr->mr = mr;
760         frmr->page_list = pl;
761         INIT_LIST_HEAD(&frmr->frmr_list);
762         return frmr;
763 
764  err_free_mr:
765         ib_dereg_mr(mr);
766  err_free_frmr:
767         kfree(frmr);
768  err:
769         return ERR_PTR(-ENOMEM);
770 }
771 
772 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
773 {
774         struct svc_rdma_fastreg_mr *frmr;
775 
776         while (!list_empty(&xprt->sc_frmr_q)) {
777                 frmr = list_entry(xprt->sc_frmr_q.next,
778                                   struct svc_rdma_fastreg_mr, frmr_list);
779                 list_del_init(&frmr->frmr_list);
780                 ib_dereg_mr(frmr->mr);
781                 ib_free_fast_reg_page_list(frmr->page_list);
782                 kfree(frmr);
783         }
784 }
785 
786 struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
787 {
788         struct svc_rdma_fastreg_mr *frmr = NULL;
789 
790         spin_lock_bh(&rdma->sc_frmr_q_lock);
791         if (!list_empty(&rdma->sc_frmr_q)) {
792                 frmr = list_entry(rdma->sc_frmr_q.next,
793                                   struct svc_rdma_fastreg_mr, frmr_list);
794                 list_del_init(&frmr->frmr_list);
795                 frmr->map_len = 0;
796                 frmr->page_list_len = 0;
797         }
798         spin_unlock_bh(&rdma->sc_frmr_q_lock);
799         if (frmr)
800                 return frmr;
801 
802         return rdma_alloc_frmr(rdma);
803 }
804 
805 static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
806                            struct svc_rdma_fastreg_mr *frmr)
807 {
808         int page_no;
809         for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
810                 dma_addr_t addr = frmr->page_list->page_list[page_no];
811                 if (ib_dma_mapping_error(frmr->mr->device, addr))
812                         continue;
813                 atomic_dec(&xprt->sc_dma_used);
814                 ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
815                                   frmr->direction);
816         }
817 }
818 
819 void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
820                        struct svc_rdma_fastreg_mr *frmr)
821 {
822         if (frmr) {
823                 frmr_unmap_dma(rdma, frmr);
824                 spin_lock_bh(&rdma->sc_frmr_q_lock);
825                 BUG_ON(!list_empty(&frmr->frmr_list));
826                 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
827                 spin_unlock_bh(&rdma->sc_frmr_q_lock);
828         }
829 }
830 
831 /*
832  * This is the xpo_recvfrom function for listening endpoints. Its
833  * purpose is to accept incoming connections. The CMA callback handler
834  * has already created a new transport and attached it to the new CMA
835  * ID.
836  *
837  * There is a queue of pending connections hung on the listening
838  * transport. This queue contains the new svc_xprt structure. This
839  * function takes svc_xprt structures off the accept_q and completes
840  * the connection.
841  */
842 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
843 {
844         struct svcxprt_rdma *listen_rdma;
845         struct svcxprt_rdma *newxprt = NULL;
846         struct rdma_conn_param conn_param;
847         struct ib_qp_init_attr qp_attr;
848         struct ib_device_attr devattr;
849         int uninitialized_var(dma_mr_acc);
850         int need_dma_mr;
851         int ret;
852         int i;
853 
854         listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
855         clear_bit(XPT_CONN, &xprt->xpt_flags);
856         /* Get the next entry off the accept list */
857         spin_lock_bh(&listen_rdma->sc_lock);
858         if (!list_empty(&listen_rdma->sc_accept_q)) {
859                 newxprt = list_entry(listen_rdma->sc_accept_q.next,
860                                      struct svcxprt_rdma, sc_accept_q);
861                 list_del_init(&newxprt->sc_accept_q);
862         }
863         if (!list_empty(&listen_rdma->sc_accept_q))
864                 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
865         spin_unlock_bh(&listen_rdma->sc_lock);
866         if (!newxprt)
867                 return NULL;
868 
869         dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
870                 newxprt, newxprt->sc_cm_id);
871 
872         ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
873         if (ret) {
874                 dprintk("svcrdma: could not query device attributes on "
875                         "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
876                 goto errout;
877         }
878 
879         /* Qualify the transport resource defaults with the
880          * capabilities of this particular device */
881         newxprt->sc_max_sge = min((size_t)devattr.max_sge,
882                                   (size_t)RPCSVC_MAXPAGES);
883         newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
884                                    (size_t)svcrdma_max_requests);
885         newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
886 
887         /*
888          * Limit ORD based on client limit, local device limit, and
889          * configured svcrdma limit.
890          */
891         newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
892         newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
893 
894         newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
895         if (IS_ERR(newxprt->sc_pd)) {
896                 dprintk("svcrdma: error creating PD for connect request\n");
897                 goto errout;
898         }
899         newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
900                                          sq_comp_handler,
901                                          cq_event_handler,
902                                          newxprt,
903                                          newxprt->sc_sq_depth,
904                                          0);
905         if (IS_ERR(newxprt->sc_sq_cq)) {
906                 dprintk("svcrdma: error creating SQ CQ for connect request\n");
907                 goto errout;
908         }
909         newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
910                                          rq_comp_handler,
911                                          cq_event_handler,
912                                          newxprt,
913                                          newxprt->sc_max_requests,
914                                          0);
915         if (IS_ERR(newxprt->sc_rq_cq)) {
916                 dprintk("svcrdma: error creating RQ CQ for connect request\n");
917                 goto errout;
918         }
919 
920         memset(&qp_attr, 0, sizeof qp_attr);
921         qp_attr.event_handler = qp_event_handler;
922         qp_attr.qp_context = &newxprt->sc_xprt;
923         qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
924         qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
925         qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
926         qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
927         qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
928         qp_attr.qp_type = IB_QPT_RC;
929         qp_attr.send_cq = newxprt->sc_sq_cq;
930         qp_attr.recv_cq = newxprt->sc_rq_cq;
931         dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
932                 "    cm_id->device=%p, sc_pd->device=%p\n"
933                 "    cap.max_send_wr = %d\n"
934                 "    cap.max_recv_wr = %d\n"
935                 "    cap.max_send_sge = %d\n"
936                 "    cap.max_recv_sge = %d\n",
937                 newxprt->sc_cm_id, newxprt->sc_pd,
938                 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
939                 qp_attr.cap.max_send_wr,
940                 qp_attr.cap.max_recv_wr,
941                 qp_attr.cap.max_send_sge,
942                 qp_attr.cap.max_recv_sge);
943 
944         ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
945         if (ret) {
946                 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
947                 goto errout;
948         }
949         newxprt->sc_qp = newxprt->sc_cm_id->qp;
950 
951         /*
952          * Use the most secure set of MR resources based on the
953          * transport type and available memory management features in
954          * the device. Here's the table implemented below:
955          *
956          *              Fast    Global  DMA     Remote WR
957          *              Reg     LKEY    MR      Access
958          *              Sup'd   Sup'd   Needed  Needed
959          *
960          * IWARP        N       N       Y       Y
961          *              N       Y       Y       Y
962          *              Y       N       Y       N
963          *              Y       Y       N       -
964          *
965          * IB           N       N       Y       N
966          *              N       Y       N       -
967          *              Y       N       Y       N
968          *              Y       Y       N       -
969          *
970          * NB:  iWARP requires remote write access for the data sink
971          *      of an RDMA_READ. IB does not.
972          */
973         if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
974                 newxprt->sc_frmr_pg_list_len =
975                         devattr.max_fast_reg_page_list_len;
976                 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
977         }
978 
979         /*
980          * Determine if a DMA MR is required and if so, what privs are required
981          */
982         switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
983         case RDMA_TRANSPORT_IWARP:
984                 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
985                 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
986                         need_dma_mr = 1;
987                         dma_mr_acc =
988                                 (IB_ACCESS_LOCAL_WRITE |
989                                  IB_ACCESS_REMOTE_WRITE);
990                 } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
991                         need_dma_mr = 1;
992                         dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
993                 } else
994                         need_dma_mr = 0;
995                 break;
996         case RDMA_TRANSPORT_IB:
997                 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
998                         need_dma_mr = 1;
999                         dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
1000                 } else if (!(devattr.device_cap_flags &
1001                              IB_DEVICE_LOCAL_DMA_LKEY)) {
1002                         need_dma_mr = 1;
1003                         dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
1004                 } else
1005                         need_dma_mr = 0;
1006                 break;
1007         default:
1008                 goto errout;
1009         }
1010 
1011         /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1012         if (need_dma_mr) {
1013                 /* Register all of physical memory */
1014                 newxprt->sc_phys_mr =
1015                         ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1016                 if (IS_ERR(newxprt->sc_phys_mr)) {
1017                         dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1018                                 ret);
1019                         goto errout;
1020                 }
1021                 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1022         } else
1023                 newxprt->sc_dma_lkey =
1024                         newxprt->sc_cm_id->device->local_dma_lkey;
1025 
1026         /* Post receive buffers */
1027         for (i = 0; i < newxprt->sc_max_requests; i++) {
1028                 ret = svc_rdma_post_recv(newxprt);
1029                 if (ret) {
1030                         dprintk("svcrdma: failure posting receive buffers\n");
1031                         goto errout;
1032                 }
1033         }
1034 
1035         /* Swap out the handler */
1036         newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1037 
1038         /*
1039          * Arm the CQs for the SQ and RQ before accepting so we can't
1040          * miss the first message
1041          */
1042         ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1043         ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1044 
1045         /* Accept Connection */
1046         set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1047         memset(&conn_param, 0, sizeof conn_param);
1048         conn_param.responder_resources = 0;
1049         conn_param.initiator_depth = newxprt->sc_ord;
1050         ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1051         if (ret) {
1052                 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1053                        ret);
1054                 goto errout;
1055         }
1056 
1057         dprintk("svcrdma: new connection %p accepted with the following "
1058                 "attributes:\n"
1059                 "    local_ip        : %pI4\n"
1060                 "    local_port      : %d\n"
1061                 "    remote_ip       : %pI4\n"
1062                 "    remote_port     : %d\n"
1063                 "    max_sge         : %d\n"
1064                 "    sq_depth        : %d\n"
1065                 "    max_requests    : %d\n"
1066                 "    ord             : %d\n",
1067                 newxprt,
1068                 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1069                          route.addr.src_addr)->sin_addr.s_addr,
1070                 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1071                        route.addr.src_addr)->sin_port),
1072                 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1073                          route.addr.dst_addr)->sin_addr.s_addr,
1074                 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1075                        route.addr.dst_addr)->sin_port),
1076                 newxprt->sc_max_sge,
1077                 newxprt->sc_sq_depth,
1078                 newxprt->sc_max_requests,
1079                 newxprt->sc_ord);
1080 
1081         return &newxprt->sc_xprt;
1082 
1083  errout:
1084         dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1085         /* Take a reference in case the DTO handler runs */
1086         svc_xprt_get(&newxprt->sc_xprt);
1087         if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1088                 ib_destroy_qp(newxprt->sc_qp);
1089         rdma_destroy_id(newxprt->sc_cm_id);
1090         /* This call to put will destroy the transport */
1091         svc_xprt_put(&newxprt->sc_xprt);
1092         return NULL;
1093 }
1094 
1095 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1096 {
1097 }
1098 
1099 /*
1100  * When connected, an svc_xprt has at least two references:
1101  *
1102  * - A reference held by the cm_id between the ESTABLISHED and
1103  *   DISCONNECTED events. If the remote peer disconnected first, this
1104  *   reference could be gone.
1105  *
1106  * - A reference held by the svc_recv code that called this function
1107  *   as part of close processing.
1108  *
1109  * At a minimum one references should still be held.
1110  */
1111 static void svc_rdma_detach(struct svc_xprt *xprt)
1112 {
1113         struct svcxprt_rdma *rdma =
1114                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1115         dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1116 
1117         /* Disconnect and flush posted WQE */
1118         rdma_disconnect(rdma->sc_cm_id);
1119 }
1120 
1121 static void __svc_rdma_free(struct work_struct *work)
1122 {
1123         struct svcxprt_rdma *rdma =
1124                 container_of(work, struct svcxprt_rdma, sc_work);
1125         dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1126 
1127         /* We should only be called from kref_put */
1128         BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
1129 
1130         /*
1131          * Destroy queued, but not processed read completions. Note
1132          * that this cleanup has to be done before destroying the
1133          * cm_id because the device ptr is needed to unmap the dma in
1134          * svc_rdma_put_context.
1135          */
1136         while (!list_empty(&rdma->sc_read_complete_q)) {
1137                 struct svc_rdma_op_ctxt *ctxt;
1138                 ctxt = list_entry(rdma->sc_read_complete_q.next,
1139                                   struct svc_rdma_op_ctxt,
1140                                   dto_q);
1141                 list_del_init(&ctxt->dto_q);
1142                 svc_rdma_put_context(ctxt, 1);
1143         }
1144 
1145         /* Destroy queued, but not processed recv completions */
1146         while (!list_empty(&rdma->sc_rq_dto_q)) {
1147                 struct svc_rdma_op_ctxt *ctxt;
1148                 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1149                                   struct svc_rdma_op_ctxt,
1150                                   dto_q);
1151                 list_del_init(&ctxt->dto_q);
1152                 svc_rdma_put_context(ctxt, 1);
1153         }
1154 
1155         /* Warn if we leaked a resource or under-referenced */
1156         WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
1157         WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
1158 
1159         /* De-allocate fastreg mr */
1160         rdma_dealloc_frmr_q(rdma);
1161 
1162         /* Destroy the QP if present (not a listener) */
1163         if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1164                 ib_destroy_qp(rdma->sc_qp);
1165 
1166         if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1167                 ib_destroy_cq(rdma->sc_sq_cq);
1168 
1169         if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1170                 ib_destroy_cq(rdma->sc_rq_cq);
1171 
1172         if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1173                 ib_dereg_mr(rdma->sc_phys_mr);
1174 
1175         if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1176                 ib_dealloc_pd(rdma->sc_pd);
1177 
1178         /* Destroy the CM ID */
1179         rdma_destroy_id(rdma->sc_cm_id);
1180 
1181         kfree(rdma);
1182 }
1183 
1184 static void svc_rdma_free(struct svc_xprt *xprt)
1185 {
1186         struct svcxprt_rdma *rdma =
1187                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1188         INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1189         queue_work(svc_rdma_wq, &rdma->sc_work);
1190 }
1191 
1192 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1193 {
1194         struct svcxprt_rdma *rdma =
1195                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1196 
1197         /*
1198          * If there are already waiters on the SQ,
1199          * return false.
1200          */
1201         if (waitqueue_active(&rdma->sc_send_wait))
1202                 return 0;
1203 
1204         /* Otherwise return true. */
1205         return 1;
1206 }
1207 
1208 static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1209 {
1210         return 1;
1211 }
1212 
1213 /*
1214  * Attempt to register the kvec representing the RPC memory with the
1215  * device.
1216  *
1217  * Returns:
1218  *  NULL : The device does not support fastreg or there were no more
1219  *         fastreg mr.
1220  *  frmr : The kvec register request was successfully posted.
1221  *    <0 : An error was encountered attempting to register the kvec.
1222  */
1223 int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1224                      struct svc_rdma_fastreg_mr *frmr)
1225 {
1226         struct ib_send_wr fastreg_wr;
1227         u8 key;
1228 
1229         /* Bump the key */
1230         key = (u8)(frmr->mr->lkey & 0x000000FF);
1231         ib_update_fast_reg_key(frmr->mr, ++key);
1232 
1233         /* Prepare FASTREG WR */
1234         memset(&fastreg_wr, 0, sizeof fastreg_wr);
1235         fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1236         fastreg_wr.send_flags = IB_SEND_SIGNALED;
1237         fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1238         fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1239         fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1240         fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1241         fastreg_wr.wr.fast_reg.length = frmr->map_len;
1242         fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1243         fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1244         return svc_rdma_send(xprt, &fastreg_wr);
1245 }
1246 
1247 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1248 {
1249         struct ib_send_wr *bad_wr, *n_wr;
1250         int wr_count;
1251         int i;
1252         int ret;
1253 
1254         if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1255                 return -ENOTCONN;
1256 
1257         BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1258         wr_count = 1;
1259         for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1260                 wr_count++;
1261 
1262         /* If the SQ is full, wait until an SQ entry is available */
1263         while (1) {
1264                 spin_lock_bh(&xprt->sc_lock);
1265                 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
1266                         spin_unlock_bh(&xprt->sc_lock);
1267                         atomic_inc(&rdma_stat_sq_starve);
1268 
1269                         /* See if we can opportunistically reap SQ WR to make room */
1270                         sq_cq_reap(xprt);
1271 
1272                         /* Wait until SQ WR available if SQ still full */
1273                         wait_event(xprt->sc_send_wait,
1274                                    atomic_read(&xprt->sc_sq_count) <
1275                                    xprt->sc_sq_depth);
1276                         if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1277                                 return -ENOTCONN;
1278                         continue;
1279                 }
1280                 /* Take a transport ref for each WR posted */
1281                 for (i = 0; i < wr_count; i++)
1282                         svc_xprt_get(&xprt->sc_xprt);
1283 
1284                 /* Bump used SQ WR count and post */
1285                 atomic_add(wr_count, &xprt->sc_sq_count);
1286                 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1287                 if (ret) {
1288                         set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1289                         atomic_sub(wr_count, &xprt->sc_sq_count);
1290                         for (i = 0; i < wr_count; i ++)
1291                                 svc_xprt_put(&xprt->sc_xprt);
1292                         dprintk("svcrdma: failed to post SQ WR rc=%d, "
1293                                "sc_sq_count=%d, sc_sq_depth=%d\n",
1294                                ret, atomic_read(&xprt->sc_sq_count),
1295                                xprt->sc_sq_depth);
1296                 }
1297                 spin_unlock_bh(&xprt->sc_lock);
1298                 if (ret)
1299                         wake_up(&xprt->sc_send_wait);
1300                 break;
1301         }
1302         return ret;
1303 }
1304 
1305 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1306                          enum rpcrdma_errcode err)
1307 {
1308         struct ib_send_wr err_wr;
1309         struct page *p;
1310         struct svc_rdma_op_ctxt *ctxt;
1311         u32 *va;
1312         int length;
1313         int ret;
1314 
1315         p = svc_rdma_get_page();
1316         va = page_address(p);
1317 
1318         /* XDR encode error */
1319         length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1320 
1321         ctxt = svc_rdma_get_context(xprt);
1322         ctxt->direction = DMA_FROM_DEVICE;
1323         ctxt->count = 1;
1324         ctxt->pages[0] = p;
1325 
1326         /* Prepare SGE for local address */
1327         ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1328                                             p, 0, length, DMA_FROM_DEVICE);
1329         if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1330                 put_page(p);
1331                 svc_rdma_put_context(ctxt, 1);
1332                 return;
1333         }
1334         atomic_inc(&xprt->sc_dma_used);
1335         ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1336         ctxt->sge[0].length = length;
1337 
1338         /* Prepare SEND WR */
1339         memset(&err_wr, 0, sizeof err_wr);
1340         ctxt->wr_op = IB_WR_SEND;
1341         err_wr.wr_id = (unsigned long)ctxt;
1342         err_wr.sg_list = ctxt->sge;
1343         err_wr.num_sge = 1;
1344         err_wr.opcode = IB_WR_SEND;
1345         err_wr.send_flags = IB_SEND_SIGNALED;
1346 
1347         /* Post It */
1348         ret = svc_rdma_send(xprt, &err_wr);
1349         if (ret) {
1350                 dprintk("svcrdma: Error %d posting send for protocol error\n",
1351                         ret);
1352                 svc_rdma_unmap_dma(ctxt);
1353                 svc_rdma_put_context(ctxt, 1);
1354         }
1355 }
1356 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp