~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/xprtrdma/rpc_rdma.c

Version: ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
  3  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  4  *
  5  * This software is available to you under a choice of one of two
  6  * licenses.  You may choose to be licensed under the terms of the GNU
  7  * General Public License (GPL) Version 2, available from the file
  8  * COPYING in the main directory of this source tree, or the BSD-type
  9  * license below:
 10  *
 11  * Redistribution and use in source and binary forms, with or without
 12  * modification, are permitted provided that the following conditions
 13  * are met:
 14  *
 15  *      Redistributions of source code must retain the above copyright
 16  *      notice, this list of conditions and the following disclaimer.
 17  *
 18  *      Redistributions in binary form must reproduce the above
 19  *      copyright notice, this list of conditions and the following
 20  *      disclaimer in the documentation and/or other materials provided
 21  *      with the distribution.
 22  *
 23  *      Neither the name of the Network Appliance, Inc. nor the names of
 24  *      its contributors may be used to endorse or promote products
 25  *      derived from this software without specific prior written
 26  *      permission.
 27  *
 28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 39  */
 40 
 41 /*
 42  * rpc_rdma.c
 43  *
 44  * This file contains the guts of the RPC RDMA protocol, and
 45  * does marshaling/unmarshaling, etc. It is also where interfacing
 46  * to the Linux RPC framework lives.
 47  */
 48 
 49 #include "xprt_rdma.h"
 50 
 51 #include <linux/highmem.h>
 52 
 53 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 54 # define RPCDBG_FACILITY        RPCDBG_TRANS
 55 #endif
 56 
 57 static const char transfertypes[][12] = {
 58         "inline",       /* no chunks */
 59         "read list",    /* some argument via rdma read */
 60         "*read list",   /* entire request via rdma read */
 61         "write list",   /* some result via rdma write */
 62         "reply chunk"   /* entire reply via rdma write */
 63 };
 64 
 65 /* Returns size of largest RPC-over-RDMA header in a Call message
 66  *
 67  * The largest Call header contains a full-size Read list and a
 68  * minimal Reply chunk.
 69  */
 70 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
 71 {
 72         unsigned int size;
 73 
 74         /* Fixed header fields and list discriminators */
 75         size = RPCRDMA_HDRLEN_MIN;
 76 
 77         /* Maximum Read list size */
 78         maxsegs += 2;   /* segment for head and tail buffers */
 79         size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
 80 
 81         /* Minimal Read chunk size */
 82         size += sizeof(__be32); /* segment count */
 83         size += rpcrdma_segment_maxsz * sizeof(__be32);
 84         size += sizeof(__be32); /* list discriminator */
 85 
 86         dprintk("RPC:       %s: max call header size = %u\n",
 87                 __func__, size);
 88         return size;
 89 }
 90 
 91 /* Returns size of largest RPC-over-RDMA header in a Reply message
 92  *
 93  * There is only one Write list or one Reply chunk per Reply
 94  * message.  The larger list is the Write list.
 95  */
 96 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
 97 {
 98         unsigned int size;
 99 
100         /* Fixed header fields and list discriminators */
101         size = RPCRDMA_HDRLEN_MIN;
102 
103         /* Maximum Write list size */
104         maxsegs += 2;   /* segment for head and tail buffers */
105         size = sizeof(__be32);          /* segment count */
106         size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
107         size += sizeof(__be32); /* list discriminator */
108 
109         dprintk("RPC:       %s: max reply header size = %u\n",
110                 __func__, size);
111         return size;
112 }
113 
114 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
115 {
116         struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
117         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
118         unsigned int maxsegs = ia->ri_max_segs;
119 
120         ia->ri_max_inline_write = cdata->inline_wsize -
121                                   rpcrdma_max_call_header_size(maxsegs);
122         ia->ri_max_inline_read = cdata->inline_rsize -
123                                  rpcrdma_max_reply_header_size(maxsegs);
124 }
125 
126 /* The client can send a request inline as long as the RPCRDMA header
127  * plus the RPC call fit under the transport's inline limit. If the
128  * combined call message size exceeds that limit, the client must use
129  * a Read chunk for this operation.
130  *
131  * A Read chunk is also required if sending the RPC call inline would
132  * exceed this device's max_sge limit.
133  */
134 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
135                                 struct rpc_rqst *rqst)
136 {
137         struct xdr_buf *xdr = &rqst->rq_snd_buf;
138         unsigned int count, remaining, offset;
139 
140         if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
141                 return false;
142 
143         if (xdr->page_len) {
144                 remaining = xdr->page_len;
145                 offset = offset_in_page(xdr->page_base);
146                 count = RPCRDMA_MIN_SEND_SGES;
147                 while (remaining) {
148                         remaining -= min_t(unsigned int,
149                                            PAGE_SIZE - offset, remaining);
150                         offset = 0;
151                         if (++count > r_xprt->rx_ia.ri_max_send_sges)
152                                 return false;
153                 }
154         }
155 
156         return true;
157 }
158 
159 /* The client can't know how large the actual reply will be. Thus it
160  * plans for the largest possible reply for that particular ULP
161  * operation. If the maximum combined reply message size exceeds that
162  * limit, the client must provide a write list or a reply chunk for
163  * this request.
164  */
165 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
166                                    struct rpc_rqst *rqst)
167 {
168         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
169 
170         return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
171 }
172 
173 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
174  * a byte range. Other modes coalesce these SGEs into a single MR
175  * when they can.
176  *
177  * Returns pointer to next available SGE, and bumps the total number
178  * of SGEs consumed.
179  */
180 static struct rpcrdma_mr_seg *
181 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
182                      unsigned int *n)
183 {
184         u32 remaining, page_offset;
185         char *base;
186 
187         base = vec->iov_base;
188         page_offset = offset_in_page(base);
189         remaining = vec->iov_len;
190         while (remaining) {
191                 seg->mr_page = NULL;
192                 seg->mr_offset = base;
193                 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
194                 remaining -= seg->mr_len;
195                 base += seg->mr_len;
196                 ++seg;
197                 ++(*n);
198                 page_offset = 0;
199         }
200         return seg;
201 }
202 
203 /* Convert @xdrbuf into SGEs no larger than a page each. As they
204  * are registered, these SGEs are then coalesced into RDMA segments
205  * when the selected memreg mode supports it.
206  *
207  * Returns positive number of SGEs consumed, or a negative errno.
208  */
209 
210 static int
211 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
212                      unsigned int pos, enum rpcrdma_chunktype type,
213                      struct rpcrdma_mr_seg *seg)
214 {
215         unsigned long page_base;
216         unsigned int len, n;
217         struct page **ppages;
218 
219         n = 0;
220         if (pos == 0)
221                 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
222 
223         len = xdrbuf->page_len;
224         ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
225         page_base = offset_in_page(xdrbuf->page_base);
226         while (len) {
227                 if (unlikely(!*ppages)) {
228                         /* XXX: Certain upper layer operations do
229                          *      not provide receive buffer pages.
230                          */
231                         *ppages = alloc_page(GFP_ATOMIC);
232                         if (!*ppages)
233                                 return -EAGAIN;
234                 }
235                 seg->mr_page = *ppages;
236                 seg->mr_offset = (char *)page_base;
237                 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
238                 len -= seg->mr_len;
239                 ++ppages;
240                 ++seg;
241                 ++n;
242                 page_base = 0;
243         }
244 
245         /* When encoding a Read chunk, the tail iovec contains an
246          * XDR pad and may be omitted.
247          */
248         if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
249                 goto out;
250 
251         /* When encoding a Write chunk, some servers need to see an
252          * extra segment for non-XDR-aligned Write chunks. The upper
253          * layer provides space in the tail iovec that may be used
254          * for this purpose.
255          */
256         if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
257                 goto out;
258 
259         if (xdrbuf->tail[0].iov_len)
260                 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
261 
262 out:
263         if (unlikely(n > RPCRDMA_MAX_SEGS))
264                 return -EIO;
265         return n;
266 }
267 
268 static inline int
269 encode_item_present(struct xdr_stream *xdr)
270 {
271         __be32 *p;
272 
273         p = xdr_reserve_space(xdr, sizeof(*p));
274         if (unlikely(!p))
275                 return -EMSGSIZE;
276 
277         *p = xdr_one;
278         return 0;
279 }
280 
281 static inline int
282 encode_item_not_present(struct xdr_stream *xdr)
283 {
284         __be32 *p;
285 
286         p = xdr_reserve_space(xdr, sizeof(*p));
287         if (unlikely(!p))
288                 return -EMSGSIZE;
289 
290         *p = xdr_zero;
291         return 0;
292 }
293 
294 static void
295 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
296 {
297         *iptr++ = cpu_to_be32(mr->mr_handle);
298         *iptr++ = cpu_to_be32(mr->mr_length);
299         xdr_encode_hyper(iptr, mr->mr_offset);
300 }
301 
302 static int
303 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
304 {
305         __be32 *p;
306 
307         p = xdr_reserve_space(xdr, 4 * sizeof(*p));
308         if (unlikely(!p))
309                 return -EMSGSIZE;
310 
311         xdr_encode_rdma_segment(p, mr);
312         return 0;
313 }
314 
315 static int
316 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
317                     u32 position)
318 {
319         __be32 *p;
320 
321         p = xdr_reserve_space(xdr, 6 * sizeof(*p));
322         if (unlikely(!p))
323                 return -EMSGSIZE;
324 
325         *p++ = xdr_one;                 /* Item present */
326         *p++ = cpu_to_be32(position);
327         xdr_encode_rdma_segment(p, mr);
328         return 0;
329 }
330 
331 /* Register and XDR encode the Read list. Supports encoding a list of read
332  * segments that belong to a single read chunk.
333  *
334  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
335  *
336  *  Read chunklist (a linked list):
337  *   N elements, position P (same P for all chunks of same arg!):
338  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
339  *
340  * Returns zero on success, or a negative errno if a failure occurred.
341  * @xdr is advanced to the next position in the stream.
342  *
343  * Only a single @pos value is currently supported.
344  */
345 static noinline int
346 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
347                          struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
348 {
349         struct xdr_stream *xdr = &req->rl_stream;
350         struct rpcrdma_mr_seg *seg;
351         struct rpcrdma_mr *mr;
352         unsigned int pos;
353         int nsegs;
354 
355         pos = rqst->rq_snd_buf.head[0].iov_len;
356         if (rtype == rpcrdma_areadch)
357                 pos = 0;
358         seg = req->rl_segments;
359         nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
360                                      rtype, seg);
361         if (nsegs < 0)
362                 return nsegs;
363 
364         do {
365                 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
366                                                    false, &mr);
367                 if (IS_ERR(seg))
368                         return PTR_ERR(seg);
369                 rpcrdma_mr_push(mr, &req->rl_registered);
370 
371                 if (encode_read_segment(xdr, mr, pos) < 0)
372                         return -EMSGSIZE;
373 
374                 trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
375                 r_xprt->rx_stats.read_chunk_count++;
376                 nsegs -= mr->mr_nents;
377         } while (nsegs);
378 
379         return 0;
380 }
381 
382 /* Register and XDR encode the Write list. Supports encoding a list
383  * containing one array of plain segments that belong to a single
384  * write chunk.
385  *
386  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
387  *
388  *  Write chunklist (a list of (one) counted array):
389  *   N elements:
390  *    1 - N - HLOO - HLOO - ... - HLOO - 0
391  *
392  * Returns zero on success, or a negative errno if a failure occurred.
393  * @xdr is advanced to the next position in the stream.
394  *
395  * Only a single Write chunk is currently supported.
396  */
397 static noinline int
398 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
399                           struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
400 {
401         struct xdr_stream *xdr = &req->rl_stream;
402         struct rpcrdma_mr_seg *seg;
403         struct rpcrdma_mr *mr;
404         int nsegs, nchunks;
405         __be32 *segcount;
406 
407         seg = req->rl_segments;
408         nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
409                                      rqst->rq_rcv_buf.head[0].iov_len,
410                                      wtype, seg);
411         if (nsegs < 0)
412                 return nsegs;
413 
414         if (encode_item_present(xdr) < 0)
415                 return -EMSGSIZE;
416         segcount = xdr_reserve_space(xdr, sizeof(*segcount));
417         if (unlikely(!segcount))
418                 return -EMSGSIZE;
419         /* Actual value encoded below */
420 
421         nchunks = 0;
422         do {
423                 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
424                                                    true, &mr);
425                 if (IS_ERR(seg))
426                         return PTR_ERR(seg);
427                 rpcrdma_mr_push(mr, &req->rl_registered);
428 
429                 if (encode_rdma_segment(xdr, mr) < 0)
430                         return -EMSGSIZE;
431 
432                 trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
433                 r_xprt->rx_stats.write_chunk_count++;
434                 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
435                 nchunks++;
436                 nsegs -= mr->mr_nents;
437         } while (nsegs);
438 
439         /* Update count of segments in this Write chunk */
440         *segcount = cpu_to_be32(nchunks);
441 
442         return 0;
443 }
444 
445 /* Register and XDR encode the Reply chunk. Supports encoding an array
446  * of plain segments that belong to a single write (reply) chunk.
447  *
448  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
449  *
450  *  Reply chunk (a counted array):
451  *   N elements:
452  *    1 - N - HLOO - HLOO - ... - HLOO
453  *
454  * Returns zero on success, or a negative errno if a failure occurred.
455  * @xdr is advanced to the next position in the stream.
456  */
457 static noinline int
458 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
459                            struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
460 {
461         struct xdr_stream *xdr = &req->rl_stream;
462         struct rpcrdma_mr_seg *seg;
463         struct rpcrdma_mr *mr;
464         int nsegs, nchunks;
465         __be32 *segcount;
466 
467         seg = req->rl_segments;
468         nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
469         if (nsegs < 0)
470                 return nsegs;
471 
472         if (encode_item_present(xdr) < 0)
473                 return -EMSGSIZE;
474         segcount = xdr_reserve_space(xdr, sizeof(*segcount));
475         if (unlikely(!segcount))
476                 return -EMSGSIZE;
477         /* Actual value encoded below */
478 
479         nchunks = 0;
480         do {
481                 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
482                                                    true, &mr);
483                 if (IS_ERR(seg))
484                         return PTR_ERR(seg);
485                 rpcrdma_mr_push(mr, &req->rl_registered);
486 
487                 if (encode_rdma_segment(xdr, mr) < 0)
488                         return -EMSGSIZE;
489 
490                 trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
491                 r_xprt->rx_stats.reply_chunk_count++;
492                 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
493                 nchunks++;
494                 nsegs -= mr->mr_nents;
495         } while (nsegs);
496 
497         /* Update count of segments in the Reply chunk */
498         *segcount = cpu_to_be32(nchunks);
499 
500         return 0;
501 }
502 
503 /**
504  * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
505  * @sc: sendctx containing SGEs to unmap
506  *
507  */
508 void
509 rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
510 {
511         struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
512         struct ib_sge *sge;
513         unsigned int count;
514 
515         /* The first two SGEs contain the transport header and
516          * the inline buffer. These are always left mapped so
517          * they can be cheaply re-used.
518          */
519         sge = &sc->sc_sges[2];
520         for (count = sc->sc_unmap_count; count; ++sge, --count)
521                 ib_dma_unmap_page(ia->ri_device,
522                                   sge->addr, sge->length, DMA_TO_DEVICE);
523 
524         if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
525                 smp_mb__after_atomic();
526                 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
527         }
528 }
529 
530 /* Prepare an SGE for the RPC-over-RDMA transport header.
531  */
532 static bool
533 rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
534                         u32 len)
535 {
536         struct rpcrdma_sendctx *sc = req->rl_sendctx;
537         struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
538         struct ib_sge *sge = sc->sc_sges;
539 
540         if (!rpcrdma_dma_map_regbuf(ia, rb))
541                 goto out_regbuf;
542         sge->addr = rdmab_addr(rb);
543         sge->length = len;
544         sge->lkey = rdmab_lkey(rb);
545 
546         ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
547                                       sge->length, DMA_TO_DEVICE);
548         sc->sc_wr.num_sge++;
549         return true;
550 
551 out_regbuf:
552         pr_err("rpcrdma: failed to DMA map a Send buffer\n");
553         return false;
554 }
555 
556 /* Prepare the Send SGEs. The head and tail iovec, and each entry
557  * in the page list, gets its own SGE.
558  */
559 static bool
560 rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
561                          struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
562 {
563         struct rpcrdma_sendctx *sc = req->rl_sendctx;
564         unsigned int sge_no, page_base, len, remaining;
565         struct rpcrdma_regbuf *rb = req->rl_sendbuf;
566         struct ib_device *device = ia->ri_device;
567         struct ib_sge *sge = sc->sc_sges;
568         u32 lkey = ia->ri_pd->local_dma_lkey;
569         struct page *page, **ppages;
570 
571         /* The head iovec is straightforward, as it is already
572          * DMA-mapped. Sync the content that has changed.
573          */
574         if (!rpcrdma_dma_map_regbuf(ia, rb))
575                 goto out_regbuf;
576         sge_no = 1;
577         sge[sge_no].addr = rdmab_addr(rb);
578         sge[sge_no].length = xdr->head[0].iov_len;
579         sge[sge_no].lkey = rdmab_lkey(rb);
580         ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
581                                       sge[sge_no].length, DMA_TO_DEVICE);
582 
583         /* If there is a Read chunk, the page list is being handled
584          * via explicit RDMA, and thus is skipped here. However, the
585          * tail iovec may include an XDR pad for the page list, as
586          * well as additional content, and may not reside in the
587          * same page as the head iovec.
588          */
589         if (rtype == rpcrdma_readch) {
590                 len = xdr->tail[0].iov_len;
591 
592                 /* Do not include the tail if it is only an XDR pad */
593                 if (len < 4)
594                         goto out;
595 
596                 page = virt_to_page(xdr->tail[0].iov_base);
597                 page_base = offset_in_page(xdr->tail[0].iov_base);
598 
599                 /* If the content in the page list is an odd length,
600                  * xdr_write_pages() has added a pad at the beginning
601                  * of the tail iovec. Force the tail's non-pad content
602                  * to land at the next XDR position in the Send message.
603                  */
604                 page_base += len & 3;
605                 len -= len & 3;
606                 goto map_tail;
607         }
608 
609         /* If there is a page list present, temporarily DMA map
610          * and prepare an SGE for each page to be sent.
611          */
612         if (xdr->page_len) {
613                 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
614                 page_base = offset_in_page(xdr->page_base);
615                 remaining = xdr->page_len;
616                 while (remaining) {
617                         sge_no++;
618                         if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
619                                 goto out_mapping_overflow;
620 
621                         len = min_t(u32, PAGE_SIZE - page_base, remaining);
622                         sge[sge_no].addr = ib_dma_map_page(device, *ppages,
623                                                            page_base, len,
624                                                            DMA_TO_DEVICE);
625                         if (ib_dma_mapping_error(device, sge[sge_no].addr))
626                                 goto out_mapping_err;
627                         sge[sge_no].length = len;
628                         sge[sge_no].lkey = lkey;
629 
630                         sc->sc_unmap_count++;
631                         ppages++;
632                         remaining -= len;
633                         page_base = 0;
634                 }
635         }
636 
637         /* The tail iovec is not always constructed in the same
638          * page where the head iovec resides (see, for example,
639          * gss_wrap_req_priv). To neatly accommodate that case,
640          * DMA map it separately.
641          */
642         if (xdr->tail[0].iov_len) {
643                 page = virt_to_page(xdr->tail[0].iov_base);
644                 page_base = offset_in_page(xdr->tail[0].iov_base);
645                 len = xdr->tail[0].iov_len;
646 
647 map_tail:
648                 sge_no++;
649                 sge[sge_no].addr = ib_dma_map_page(device, page,
650                                                    page_base, len,
651                                                    DMA_TO_DEVICE);
652                 if (ib_dma_mapping_error(device, sge[sge_no].addr))
653                         goto out_mapping_err;
654                 sge[sge_no].length = len;
655                 sge[sge_no].lkey = lkey;
656                 sc->sc_unmap_count++;
657         }
658 
659 out:
660         sc->sc_wr.num_sge += sge_no;
661         if (sc->sc_unmap_count)
662                 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
663         return true;
664 
665 out_regbuf:
666         pr_err("rpcrdma: failed to DMA map a Send buffer\n");
667         return false;
668 
669 out_mapping_overflow:
670         rpcrdma_unmap_sendctx(sc);
671         pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
672         return false;
673 
674 out_mapping_err:
675         rpcrdma_unmap_sendctx(sc);
676         pr_err("rpcrdma: Send mapping error\n");
677         return false;
678 }
679 
680 /**
681  * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
682  * @r_xprt: controlling transport
683  * @req: context of RPC Call being marshalled
684  * @hdrlen: size of transport header, in bytes
685  * @xdr: xdr_buf containing RPC Call
686  * @rtype: chunk type being encoded
687  *
688  * Returns 0 on success; otherwise a negative errno is returned.
689  */
690 int
691 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
692                           struct rpcrdma_req *req, u32 hdrlen,
693                           struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
694 {
695         req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
696         if (!req->rl_sendctx)
697                 return -ENOBUFS;
698         req->rl_sendctx->sc_wr.num_sge = 0;
699         req->rl_sendctx->sc_unmap_count = 0;
700         req->rl_sendctx->sc_req = req;
701         __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
702 
703         if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
704                 return -EIO;
705 
706         if (rtype != rpcrdma_areadch)
707                 if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
708                         return -EIO;
709 
710         return 0;
711 }
712 
713 /**
714  * rpcrdma_marshal_req - Marshal and send one RPC request
715  * @r_xprt: controlling transport
716  * @rqst: RPC request to be marshaled
717  *
718  * For the RPC in "rqst", this function:
719  *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
720  *  - Registers Read, Write, and Reply chunks
721  *  - Constructs the transport header
722  *  - Posts a Send WR to send the transport header and request
723  *
724  * Returns:
725  *      %0 if the RPC was sent successfully,
726  *      %-ENOTCONN if the connection was lost,
727  *      %-EAGAIN if not enough pages are available for on-demand reply buffer,
728  *      %-ENOBUFS if no MRs are available to register chunks,
729  *      %-EMSGSIZE if the transport header is too small,
730  *      %-EIO if a permanent problem occurred while marshaling.
731  */
732 int
733 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
734 {
735         struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
736         struct xdr_stream *xdr = &req->rl_stream;
737         enum rpcrdma_chunktype rtype, wtype;
738         bool ddp_allowed;
739         __be32 *p;
740         int ret;
741 
742         rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
743         xdr_init_encode(xdr, &req->rl_hdrbuf,
744                         req->rl_rdmabuf->rg_base);
745 
746         /* Fixed header fields */
747         ret = -EMSGSIZE;
748         p = xdr_reserve_space(xdr, 4 * sizeof(*p));
749         if (!p)
750                 goto out_err;
751         *p++ = rqst->rq_xid;
752         *p++ = rpcrdma_version;
753         *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
754 
755         /* When the ULP employs a GSS flavor that guarantees integrity
756          * or privacy, direct data placement of individual data items
757          * is not allowed.
758          */
759         ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
760                                                 RPCAUTH_AUTH_DATATOUCH);
761 
762         /*
763          * Chunks needed for results?
764          *
765          * o If the expected result is under the inline threshold, all ops
766          *   return as inline.
767          * o Large read ops return data as write chunk(s), header as
768          *   inline.
769          * o Large non-read ops return as a single reply chunk.
770          */
771         if (rpcrdma_results_inline(r_xprt, rqst))
772                 wtype = rpcrdma_noch;
773         else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
774                 wtype = rpcrdma_writech;
775         else
776                 wtype = rpcrdma_replych;
777 
778         /*
779          * Chunks needed for arguments?
780          *
781          * o If the total request is under the inline threshold, all ops
782          *   are sent as inline.
783          * o Large write ops transmit data as read chunk(s), header as
784          *   inline.
785          * o Large non-write ops are sent with the entire message as a
786          *   single read chunk (protocol 0-position special case).
787          *
788          * This assumes that the upper layer does not present a request
789          * that both has a data payload, and whose non-data arguments
790          * by themselves are larger than the inline threshold.
791          */
792         if (rpcrdma_args_inline(r_xprt, rqst)) {
793                 *p++ = rdma_msg;
794                 rtype = rpcrdma_noch;
795         } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
796                 *p++ = rdma_msg;
797                 rtype = rpcrdma_readch;
798         } else {
799                 r_xprt->rx_stats.nomsg_call_count++;
800                 *p++ = rdma_nomsg;
801                 rtype = rpcrdma_areadch;
802         }
803 
804         /* If this is a retransmit, discard previously registered
805          * chunks. Very likely the connection has been replaced,
806          * so these registrations are invalid and unusable.
807          */
808         while (unlikely(!list_empty(&req->rl_registered))) {
809                 struct rpcrdma_mr *mr;
810 
811                 mr = rpcrdma_mr_pop(&req->rl_registered);
812                 rpcrdma_mr_defer_recovery(mr);
813         }
814 
815         /* This implementation supports the following combinations
816          * of chunk lists in one RPC-over-RDMA Call message:
817          *
818          *   - Read list
819          *   - Write list
820          *   - Reply chunk
821          *   - Read list + Reply chunk
822          *
823          * It might not yet support the following combinations:
824          *
825          *   - Read list + Write list
826          *
827          * It does not support the following combinations:
828          *
829          *   - Write list + Reply chunk
830          *   - Read list + Write list + Reply chunk
831          *
832          * This implementation supports only a single chunk in each
833          * Read or Write list. Thus for example the client cannot
834          * send a Call message with a Position Zero Read chunk and a
835          * regular Read chunk at the same time.
836          */
837         if (rtype != rpcrdma_noch) {
838                 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
839                 if (ret)
840                         goto out_err;
841         }
842         ret = encode_item_not_present(xdr);
843         if (ret)
844                 goto out_err;
845 
846         if (wtype == rpcrdma_writech) {
847                 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
848                 if (ret)
849                         goto out_err;
850         }
851         ret = encode_item_not_present(xdr);
852         if (ret)
853                 goto out_err;
854 
855         if (wtype != rpcrdma_replych)
856                 ret = encode_item_not_present(xdr);
857         else
858                 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
859         if (ret)
860                 goto out_err;
861 
862         trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
863 
864         ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
865                                         &rqst->rq_snd_buf, rtype);
866         if (ret)
867                 goto out_err;
868         return 0;
869 
870 out_err:
871         if (ret != -ENOBUFS) {
872                 pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
873                 r_xprt->rx_stats.failed_marshal_count++;
874         }
875         return ret;
876 }
877 
878 /**
879  * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
880  * @rqst: controlling RPC request
881  * @srcp: points to RPC message payload in receive buffer
882  * @copy_len: remaining length of receive buffer content
883  * @pad: Write chunk pad bytes needed (zero for pure inline)
884  *
885  * The upper layer has set the maximum number of bytes it can
886  * receive in each component of rq_rcv_buf. These values are set in
887  * the head.iov_len, page_len, tail.iov_len, and buflen fields.
888  *
889  * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
890  * many cases this function simply updates iov_base pointers in
891  * rq_rcv_buf to point directly to the received reply data, to
892  * avoid copying reply data.
893  *
894  * Returns the count of bytes which had to be memcopied.
895  */
896 static unsigned long
897 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
898 {
899         unsigned long fixup_copy_count;
900         int i, npages, curlen;
901         char *destp;
902         struct page **ppages;
903         int page_base;
904 
905         /* The head iovec is redirected to the RPC reply message
906          * in the receive buffer, to avoid a memcopy.
907          */
908         rqst->rq_rcv_buf.head[0].iov_base = srcp;
909         rqst->rq_private_buf.head[0].iov_base = srcp;
910 
911         /* The contents of the receive buffer that follow
912          * head.iov_len bytes are copied into the page list.
913          */
914         curlen = rqst->rq_rcv_buf.head[0].iov_len;
915         if (curlen > copy_len)
916                 curlen = copy_len;
917         trace_xprtrdma_fixup(rqst, copy_len, curlen);
918         srcp += curlen;
919         copy_len -= curlen;
920 
921         ppages = rqst->rq_rcv_buf.pages +
922                 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
923         page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
924         fixup_copy_count = 0;
925         if (copy_len && rqst->rq_rcv_buf.page_len) {
926                 int pagelist_len;
927 
928                 pagelist_len = rqst->rq_rcv_buf.page_len;
929                 if (pagelist_len > copy_len)
930                         pagelist_len = copy_len;
931                 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
932                 for (i = 0; i < npages; i++) {
933                         curlen = PAGE_SIZE - page_base;
934                         if (curlen > pagelist_len)
935                                 curlen = pagelist_len;
936 
937                         trace_xprtrdma_fixup_pg(rqst, i, srcp,
938                                                 copy_len, curlen);
939                         destp = kmap_atomic(ppages[i]);
940                         memcpy(destp + page_base, srcp, curlen);
941                         flush_dcache_page(ppages[i]);
942                         kunmap_atomic(destp);
943                         srcp += curlen;
944                         copy_len -= curlen;
945                         fixup_copy_count += curlen;
946                         pagelist_len -= curlen;
947                         if (!pagelist_len)
948                                 break;
949                         page_base = 0;
950                 }
951 
952                 /* Implicit padding for the last segment in a Write
953                  * chunk is inserted inline at the front of the tail
954                  * iovec. The upper layer ignores the content of
955                  * the pad. Simply ensure inline content in the tail
956                  * that follows the Write chunk is properly aligned.
957                  */
958                 if (pad)
959                         srcp -= pad;
960         }
961 
962         /* The tail iovec is redirected to the remaining data
963          * in the receive buffer, to avoid a memcopy.
964          */
965         if (copy_len || pad) {
966                 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
967                 rqst->rq_private_buf.tail[0].iov_base = srcp;
968         }
969 
970         return fixup_copy_count;
971 }
972 
973 /* By convention, backchannel calls arrive via rdma_msg type
974  * messages, and never populate the chunk lists. This makes
975  * the RPC/RDMA header small and fixed in size, so it is
976  * straightforward to check the RPC header's direction field.
977  */
978 static bool
979 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
980 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
981 {
982         struct xdr_stream *xdr = &rep->rr_stream;
983         __be32 *p;
984 
985         if (rep->rr_proc != rdma_msg)
986                 return false;
987 
988         /* Peek at stream contents without advancing. */
989         p = xdr_inline_decode(xdr, 0);
990 
991         /* Chunk lists */
992         if (*p++ != xdr_zero)
993                 return false;
994         if (*p++ != xdr_zero)
995                 return false;
996         if (*p++ != xdr_zero)
997                 return false;
998 
999         /* RPC header */
1000         if (*p++ != rep->rr_xid)
1001                 return false;
1002         if (*p != cpu_to_be32(RPC_CALL))
1003                 return false;
1004 
1005         /* Now that we are sure this is a backchannel call,
1006          * advance to the RPC header.
1007          */
1008         p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1009         if (unlikely(!p))
1010                 goto out_short;
1011 
1012         rpcrdma_bc_receive_call(r_xprt, rep);
1013         return true;
1014 
1015 out_short:
1016         pr_warn("RPC/RDMA short backward direction call\n");
1017         if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1018                 xprt_disconnect_done(&r_xprt->rx_xprt);
1019         return true;
1020 }
1021 #else   /* CONFIG_SUNRPC_BACKCHANNEL */
1022 {
1023         return false;
1024 }
1025 #endif  /* CONFIG_SUNRPC_BACKCHANNEL */
1026 
1027 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1028 {
1029         u32 handle;
1030         u64 offset;
1031         __be32 *p;
1032 
1033         p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1034         if (unlikely(!p))
1035                 return -EIO;
1036 
1037         handle = be32_to_cpup(p++);
1038         *length = be32_to_cpup(p++);
1039         xdr_decode_hyper(p, &offset);
1040 
1041         trace_xprtrdma_decode_seg(handle, *length, offset);
1042         return 0;
1043 }
1044 
1045 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1046 {
1047         u32 segcount, seglength;
1048         __be32 *p;
1049 
1050         p = xdr_inline_decode(xdr, sizeof(*p));
1051         if (unlikely(!p))
1052                 return -EIO;
1053 
1054         *length = 0;
1055         segcount = be32_to_cpup(p);
1056         while (segcount--) {
1057                 if (decode_rdma_segment(xdr, &seglength))
1058                         return -EIO;
1059                 *length += seglength;
1060         }
1061 
1062         return 0;
1063 }
1064 
1065 /* In RPC-over-RDMA Version One replies, a Read list is never
1066  * expected. This decoder is a stub that returns an error if
1067  * a Read list is present.
1068  */
1069 static int decode_read_list(struct xdr_stream *xdr)
1070 {
1071         __be32 *p;
1072 
1073         p = xdr_inline_decode(xdr, sizeof(*p));
1074         if (unlikely(!p))
1075                 return -EIO;
1076         if (unlikely(*p != xdr_zero))
1077                 return -EIO;
1078         return 0;
1079 }
1080 
1081 /* Supports only one Write chunk in the Write list
1082  */
1083 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1084 {
1085         u32 chunklen;
1086         bool first;
1087         __be32 *p;
1088 
1089         *length = 0;
1090         first = true;
1091         do {
1092                 p = xdr_inline_decode(xdr, sizeof(*p));
1093                 if (unlikely(!p))
1094                         return -EIO;
1095                 if (*p == xdr_zero)
1096                         break;
1097                 if (!first)
1098                         return -EIO;
1099 
1100                 if (decode_write_chunk(xdr, &chunklen))
1101                         return -EIO;
1102                 *length += chunklen;
1103                 first = false;
1104         } while (true);
1105         return 0;
1106 }
1107 
1108 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1109 {
1110         __be32 *p;
1111 
1112         p = xdr_inline_decode(xdr, sizeof(*p));
1113         if (unlikely(!p))
1114                 return -EIO;
1115 
1116         *length = 0;
1117         if (*p != xdr_zero)
1118                 if (decode_write_chunk(xdr, length))
1119                         return -EIO;
1120         return 0;
1121 }
1122 
1123 static int
1124 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1125                    struct rpc_rqst *rqst)
1126 {
1127         struct xdr_stream *xdr = &rep->rr_stream;
1128         u32 writelist, replychunk, rpclen;
1129         char *base;
1130 
1131         /* Decode the chunk lists */
1132         if (decode_read_list(xdr))
1133                 return -EIO;
1134         if (decode_write_list(xdr, &writelist))
1135                 return -EIO;
1136         if (decode_reply_chunk(xdr, &replychunk))
1137                 return -EIO;
1138 
1139         /* RDMA_MSG sanity checks */
1140         if (unlikely(replychunk))
1141                 return -EIO;
1142 
1143         /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1144         base = (char *)xdr_inline_decode(xdr, 0);
1145         rpclen = xdr_stream_remaining(xdr);
1146         r_xprt->rx_stats.fixup_copy_count +=
1147                 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1148 
1149         r_xprt->rx_stats.total_rdma_reply += writelist;
1150         return rpclen + xdr_align_size(writelist);
1151 }
1152 
1153 static noinline int
1154 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1155 {
1156         struct xdr_stream *xdr = &rep->rr_stream;
1157         u32 writelist, replychunk;
1158 
1159         /* Decode the chunk lists */
1160         if (decode_read_list(xdr))
1161                 return -EIO;
1162         if (decode_write_list(xdr, &writelist))
1163                 return -EIO;
1164         if (decode_reply_chunk(xdr, &replychunk))
1165                 return -EIO;
1166 
1167         /* RDMA_NOMSG sanity checks */
1168         if (unlikely(writelist))
1169                 return -EIO;
1170         if (unlikely(!replychunk))
1171                 return -EIO;
1172 
1173         /* Reply chunk buffer already is the reply vector */
1174         r_xprt->rx_stats.total_rdma_reply += replychunk;
1175         return replychunk;
1176 }
1177 
1178 static noinline int
1179 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1180                      struct rpc_rqst *rqst)
1181 {
1182         struct xdr_stream *xdr = &rep->rr_stream;
1183         __be32 *p;
1184 
1185         p = xdr_inline_decode(xdr, sizeof(*p));
1186         if (unlikely(!p))
1187                 return -EIO;
1188 
1189         switch (*p) {
1190         case err_vers:
1191                 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1192                 if (!p)
1193                         break;
1194                 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1195                         rqst->rq_task->tk_pid, __func__,
1196                         be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1197                 break;
1198         case err_chunk:
1199                 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1200                         rqst->rq_task->tk_pid, __func__);
1201                 break;
1202         default:
1203                 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1204                         rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1205         }
1206 
1207         r_xprt->rx_stats.bad_reply_count++;
1208         return -EREMOTEIO;
1209 }
1210 
1211 /* Perform XID lookup, reconstruction of the RPC reply, and
1212  * RPC completion while holding the transport lock to ensure
1213  * the rep, rqst, and rq_task pointers remain stable.
1214  */
1215 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1216 {
1217         struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1218         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1219         struct rpc_rqst *rqst = rep->rr_rqst;
1220         unsigned long cwnd;
1221         int status;
1222 
1223         xprt->reestablish_timeout = 0;
1224 
1225         switch (rep->rr_proc) {
1226         case rdma_msg:
1227                 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1228                 break;
1229         case rdma_nomsg:
1230                 status = rpcrdma_decode_nomsg(r_xprt, rep);
1231                 break;
1232         case rdma_error:
1233                 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1234                 break;
1235         default:
1236                 status = -EIO;
1237         }
1238         if (status < 0)
1239                 goto out_badheader;
1240 
1241 out:
1242         spin_lock(&xprt->recv_lock);
1243         cwnd = xprt->cwnd;
1244         xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
1245         if (xprt->cwnd > cwnd)
1246                 xprt_release_rqst_cong(rqst->rq_task);
1247 
1248         xprt_complete_rqst(rqst->rq_task, status);
1249         xprt_unpin_rqst(rqst);
1250         spin_unlock(&xprt->recv_lock);
1251         return;
1252 
1253 /* If the incoming reply terminated a pending RPC, the next
1254  * RPC call will post a replacement receive buffer as it is
1255  * being marshaled.
1256  */
1257 out_badheader:
1258         trace_xprtrdma_reply_hdr(rep);
1259         r_xprt->rx_stats.bad_reply_count++;
1260         status = -EIO;
1261         goto out;
1262 }
1263 
1264 void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1265 {
1266         /* Invalidate and unmap the data payloads before waking
1267          * the waiting application. This guarantees the memory
1268          * regions are properly fenced from the server before the
1269          * application accesses the data. It also ensures proper
1270          * send flow control: waking the next RPC waits until this
1271          * RPC has relinquished all its Send Queue entries.
1272          */
1273         if (!list_empty(&req->rl_registered))
1274                 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
1275                                                     &req->rl_registered);
1276 
1277         /* Ensure that any DMA mapped pages associated with
1278          * the Send of the RPC Call have been unmapped before
1279          * allowing the RPC to complete. This protects argument
1280          * memory not controlled by the RPC client from being
1281          * re-used before we're done with it.
1282          */
1283         if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1284                 r_xprt->rx_stats.reply_waits_for_send++;
1285                 out_of_line_wait_on_bit(&req->rl_flags,
1286                                         RPCRDMA_REQ_F_TX_RESOURCES,
1287                                         bit_wait,
1288                                         TASK_UNINTERRUPTIBLE);
1289         }
1290 }
1291 
1292 /* Reply handling runs in the poll worker thread. Anything that
1293  * might wait is deferred to a separate workqueue.
1294  */
1295 void rpcrdma_deferred_completion(struct work_struct *work)
1296 {
1297         struct rpcrdma_rep *rep =
1298                         container_of(work, struct rpcrdma_rep, rr_work);
1299         struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1300         struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1301 
1302         trace_xprtrdma_defer_cmp(rep);
1303         if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1304                 r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
1305         rpcrdma_release_rqst(r_xprt, req);
1306         rpcrdma_complete_rqst(rep);
1307 }
1308 
1309 /* Process received RPC/RDMA messages.
1310  *
1311  * Errors must result in the RPC task either being awakened, or
1312  * allowed to timeout, to discover the errors at that time.
1313  */
1314 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1315 {
1316         struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1317         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1318         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1319         struct rpcrdma_req *req;
1320         struct rpc_rqst *rqst;
1321         u32 credits;
1322         __be32 *p;
1323 
1324         if (rep->rr_hdrbuf.head[0].iov_len == 0)
1325                 goto out_badstatus;
1326 
1327         xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1328                         rep->rr_hdrbuf.head[0].iov_base);
1329 
1330         /* Fixed transport header fields */
1331         p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1332         if (unlikely(!p))
1333                 goto out_shortreply;
1334         rep->rr_xid = *p++;
1335         rep->rr_vers = *p++;
1336         credits = be32_to_cpu(*p++);
1337         rep->rr_proc = *p++;
1338 
1339         if (rep->rr_vers != rpcrdma_version)
1340                 goto out_badversion;
1341 
1342         if (rpcrdma_is_bcall(r_xprt, rep))
1343                 return;
1344 
1345         /* Match incoming rpcrdma_rep to an rpcrdma_req to
1346          * get context for handling any incoming chunks.
1347          */
1348         spin_lock(&xprt->recv_lock);
1349         rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1350         if (!rqst)
1351                 goto out_norqst;
1352         xprt_pin_rqst(rqst);
1353 
1354         if (credits == 0)
1355                 credits = 1;    /* don't deadlock */
1356         else if (credits > buf->rb_max_requests)
1357                 credits = buf->rb_max_requests;
1358         buf->rb_credits = credits;
1359 
1360         spin_unlock(&xprt->recv_lock);
1361 
1362         req = rpcr_to_rdmar(rqst);
1363         req->rl_reply = rep;
1364         rep->rr_rqst = rqst;
1365         clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
1366 
1367         trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1368 
1369         queue_work(rpcrdma_receive_wq, &rep->rr_work);
1370         return;
1371 
1372 out_badstatus:
1373         rpcrdma_recv_buffer_put(rep);
1374         if (r_xprt->rx_ep.rep_connected == 1) {
1375                 r_xprt->rx_ep.rep_connected = -EIO;
1376                 rpcrdma_conn_func(&r_xprt->rx_ep);
1377         }
1378         return;
1379 
1380 out_badversion:
1381         trace_xprtrdma_reply_vers(rep);
1382         goto repost;
1383 
1384 /* The RPC transaction has already been terminated, or the header
1385  * is corrupt.
1386  */
1387 out_norqst:
1388         spin_unlock(&xprt->recv_lock);
1389         trace_xprtrdma_reply_rqst(rep);
1390         goto repost;
1391 
1392 out_shortreply:
1393         trace_xprtrdma_reply_short(rep);
1394 
1395 /* If no pending RPC transaction was matched, post a replacement
1396  * receive buffer before returning.
1397  */
1398 repost:
1399         r_xprt->rx_stats.bad_reply_count++;
1400         if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1401                 rpcrdma_recv_buffer_put(rep);
1402 }
1403 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp