~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/xdr.c

Version: ~ [ linux-6.6-rc1 ] ~ [ linux-6.5.2 ] ~ [ linux-6.4.15 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.52 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.131 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.194 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.256 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.294 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.325 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * linux/net/sunrpc/xdr.c
  3  *
  4  * Generic XDR support.
  5  *
  6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7  */
  8 
  9 #include <linux/types.h>
 10 #include <linux/socket.h>
 11 #include <linux/string.h>
 12 #include <linux/kernel.h>
 13 #include <linux/pagemap.h>
 14 #include <linux/errno.h>
 15 #include <linux/in.h>
 16 #include <linux/net.h>
 17 #include <net/sock.h>
 18 #include <linux/sunrpc/xdr.h>
 19 #include <linux/sunrpc/msg_prot.h>
 20 
 21 /*
 22  * XDR functions for basic NFS types
 23  */
 24 u32 *
 25 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
 26 {
 27         unsigned int    quadlen = XDR_QUADLEN(obj->len);
 28 
 29         p[quadlen] = 0;         /* zero trailing bytes */
 30         *p++ = htonl(obj->len);
 31         memcpy(p, obj->data, obj->len);
 32         return p + XDR_QUADLEN(obj->len);
 33 }
 34 
 35 u32 *
 36 xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len)
 37 {
 38         if (ntohl(*p++) != len)
 39                 return NULL;
 40         memcpy(obj, p, len);
 41         return p + XDR_QUADLEN(len);
 42 }
 43 
 44 u32 *
 45 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
 46 {
 47         unsigned int    len;
 48 
 49         if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
 50                 return NULL;
 51         obj->len  = len;
 52         obj->data = (u8 *) p;
 53         return p + XDR_QUADLEN(len);
 54 }
 55 
 56 u32 *
 57 xdr_encode_array(u32 *p, const char *array, unsigned int len)
 58 {
 59         int quadlen = XDR_QUADLEN(len);
 60 
 61         p[quadlen] = 0;
 62         *p++ = htonl(len);
 63         memcpy(p, array, len);
 64         return p + quadlen;
 65 }
 66 
 67 u32 *
 68 xdr_encode_string(u32 *p, const char *string)
 69 {
 70         return xdr_encode_array(p, string, strlen(string));
 71 }
 72 
 73 u32 *
 74 xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
 75 {
 76         unsigned int    len;
 77         char            *string;
 78 
 79         if ((len = ntohl(*p++)) > maxlen)
 80                 return NULL;
 81         if (lenp)
 82                 *lenp = len;
 83         if ((len % 4) != 0) {
 84                 string = (char *) p;
 85         } else {
 86                 string = (char *) (p - 1);
 87                 memmove(string, p, len);
 88         }
 89         string[len] = '\0';
 90         *sp = string;
 91         return p + XDR_QUADLEN(len);
 92 }
 93 
 94 u32 *
 95 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
 96 {
 97         unsigned int    len;
 98 
 99         if ((len = ntohl(*p++)) > maxlen)
100                 return NULL;
101         *lenp = len;
102         *sp = (char *) p;
103         return p + XDR_QUADLEN(len);
104 }
105 
106 void
107 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
108                  unsigned int len)
109 {
110         xdr->pages = pages;
111         xdr->page_base = base;
112         xdr->page_len = len;
113 
114         if (len & 3) {
115                 struct iovec *iov = xdr->tail;
116                 unsigned int pad = 4 - (len & 3);
117 
118                 iov->iov_base = (void *) "\0\0\0";
119                 iov->iov_len  = pad;
120                 len += pad;
121         }
122         xdr->len += len;
123 }
124 
125 void
126 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
127                  struct page **pages, unsigned int base, unsigned int len)
128 {
129         struct iovec *head = xdr->head;
130         struct iovec *tail = xdr->tail;
131         char *buf = (char *)head->iov_base;
132         unsigned int buflen = head->iov_len;
133 
134         head->iov_len  = offset;
135 
136         xdr->pages = pages;
137         xdr->page_base = base;
138         xdr->page_len = len;
139 
140         tail->iov_base = buf + offset;
141         tail->iov_len = buflen - offset;
142 
143         xdr->len += len;
144 }
145 
146 /*
147  * Realign the iovec if the server missed out some reply elements
148  * (such as post-op attributes,...)
149  * Note: This is a simple implementation that assumes that
150  *            len <= iov->iov_len !!!
151  *       The RPC header (assumed to be the 1st element in the iov array)
152  *            is not shifted.
153  */
154 void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
155 {
156         struct iovec *pvec;
157 
158         for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
159                 struct iovec *svec = pvec - 1;
160 
161                 if (len > pvec->iov_len) {
162                         printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
163                         return;
164                 }
165                 memmove((char *)pvec->iov_base + len, pvec->iov_base,
166                         pvec->iov_len - len);
167 
168                 if (len > svec->iov_len) {
169                         printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
170                         return;
171                 }
172                 memcpy(pvec->iov_base,
173                        (char *)svec->iov_base + svec->iov_len - len, len);
174         }
175 }
176 
177 /*
178  * Map a struct xdr_buf into an iovec array.
179  */
180 int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, size_t base)
181 {
182         struct iovec    *iov = iov_base;
183         struct page     **ppage = xdr->pages;
184         unsigned int    len, pglen = xdr->page_len;
185 
186         len = xdr->head[0].iov_len;
187         if (base < len) {
188                 iov->iov_len = len - base;
189                 iov->iov_base = (char *)xdr->head[0].iov_base + base;
190                 iov++;
191                 base = 0;
192         } else
193                 base -= len;
194 
195         if (pglen == 0)
196                 goto map_tail;
197         if (base >= pglen) {
198                 base -= pglen;
199                 goto map_tail;
200         }
201         if (base || xdr->page_base) {
202                 pglen -= base;
203                 base  += xdr->page_base;
204                 ppage += base >> PAGE_CACHE_SHIFT;
205                 base &= ~PAGE_CACHE_MASK;
206         }
207         do {
208                 len = PAGE_CACHE_SIZE;
209                 iov->iov_base = kmap(*ppage);
210                 if (base) {
211                         iov->iov_base += base;
212                         len -= base;
213                         base = 0;
214                 }
215                 if (pglen < len)
216                         len = pglen;
217                 iov->iov_len = len;
218                 iov++;
219                 ppage++;
220         } while ((pglen -= len) != 0);
221 map_tail:
222         if (xdr->tail[0].iov_len) {
223                 iov->iov_len = xdr->tail[0].iov_len - base;
224                 iov->iov_base = (char *)xdr->tail[0].iov_base + base;
225                 iov++;
226         }
227         return (iov - iov_base);
228 }
229 
230 void xdr_kunmap(struct xdr_buf *xdr, size_t base)
231 {
232         struct page     **ppage = xdr->pages;
233         unsigned int    pglen = xdr->page_len;
234 
235         if (!pglen)
236                 return;
237         if (base > xdr->head[0].iov_len)
238                 base -= xdr->head[0].iov_len;
239         else
240                 base = 0;
241 
242         if (base >= pglen)
243                 return;
244         if (base || xdr->page_base) {
245                 pglen -= base;
246                 base  += xdr->page_base;
247                 ppage += base >> PAGE_CACHE_SHIFT;
248                 /* Note: The offset means that the length of the first
249                  * page is really (PAGE_CACHE_SIZE - (base & ~PAGE_CACHE_MASK)).
250                  * In order to avoid an extra test inside the loop,
251                  * we bump pglen here, and just subtract PAGE_CACHE_SIZE... */
252                 pglen += base & ~PAGE_CACHE_MASK;
253         }
254         for (;;) {
255                 flush_dcache_page(*ppage);
256                 kunmap(*ppage);
257                 if (pglen <= PAGE_CACHE_SIZE)
258                         break;
259                 pglen -= PAGE_CACHE_SIZE;
260                 ppage++;
261         }
262 }
263 
264 void
265 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
266                           skb_reader_t *desc,
267                           skb_read_actor_t copy_actor)
268 {
269         struct page     **ppage = xdr->pages;
270         unsigned int    len, pglen = xdr->page_len;
271         int             ret;
272 
273         len = xdr->head[0].iov_len;
274         if (base < len) {
275                 len -= base;
276                 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
277                 if (ret != len || !desc->count)
278                         return;
279                 base = 0;
280         } else
281                 base -= len;
282 
283         if (pglen == 0)
284                 goto copy_tail;
285         if (base >= pglen) {
286                 base -= pglen;
287                 goto copy_tail;
288         }
289         if (base || xdr->page_base) {
290                 pglen -= base;
291                 base  += xdr->page_base;
292                 ppage += base >> PAGE_CACHE_SHIFT;
293                 base &= ~PAGE_CACHE_MASK;
294         }
295         do {
296                 char *kaddr;
297 
298                 len = PAGE_CACHE_SIZE;
299                 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
300                 if (base) {
301                         len -= base;
302                         if (pglen < len)
303                                 len = pglen;
304                         ret = copy_actor(desc, kaddr + base, len);
305                         base = 0;
306                 } else {
307                         if (pglen < len)
308                                 len = pglen;
309                         ret = copy_actor(desc, kaddr, len);
310                 }
311                 flush_dcache_page(*ppage);
312                 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
313                 if (ret != len || !desc->count)
314                         return;
315                 ppage++;
316         } while ((pglen -= len) != 0);
317 copy_tail:
318         len = xdr->tail[0].iov_len;
319         if (base < len)
320                 copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
321 }
322 
323 
324 int
325 xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
326                 struct xdr_buf *xdr, unsigned int base, int msgflags)
327 {
328         struct page **ppage = xdr->pages;
329         unsigned int len, pglen = xdr->page_len;
330         int err, ret = 0;
331         ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
332         mm_segment_t oldfs;
333 
334         len = xdr->head[0].iov_len;
335         if (base < len || (addr != NULL && base == 0)) {
336                 struct iovec iov = {
337                         .iov_base = xdr->head[0].iov_base + base,
338                         .iov_len  = len - base,
339                 };
340                 struct msghdr msg = {
341                         .msg_name    = addr,
342                         .msg_namelen = addrlen,
343                         .msg_flags   = msgflags,
344                 };
345 
346                 if (iov.iov_len != 0) {
347                         msg.msg_iov     = &iov;
348                         msg.msg_iovlen  = 1;
349                 }
350                 if (xdr->len > len)
351                         msg.msg_flags |= MSG_MORE;
352                 oldfs = get_fs(); set_fs(get_ds());
353                 err = sock_sendmsg(sock, &msg, iov.iov_len);
354                 set_fs(oldfs);
355                 if (ret == 0)
356                         ret = err;
357                 else if (err > 0)
358                         ret += err;
359                 if (err != iov.iov_len)
360                         goto out;
361                 base = 0;
362         } else
363                 base -= len;
364 
365         if (pglen == 0)
366                 goto copy_tail;
367         if (base >= pglen) {
368                 base -= pglen;
369                 goto copy_tail;
370         }
371         if (base || xdr->page_base) {
372                 pglen -= base;
373                 base  += xdr->page_base;
374                 ppage += base >> PAGE_CACHE_SHIFT;
375                 base &= ~PAGE_CACHE_MASK;
376         }
377 
378         sendpage = sock->ops->sendpage ? : sock_no_sendpage;
379         do {
380                 int flags = msgflags;
381 
382                 len = PAGE_CACHE_SIZE;
383                 if (base)
384                         len -= base;
385                 if (pglen < len)
386                         len = pglen;
387 
388                 if (pglen != len || xdr->tail[0].iov_len != 0)
389                         flags |= MSG_MORE;
390 
391                 /* Hmm... We might be dealing with highmem pages */
392                 if (PageHighMem(*ppage))
393                         sendpage = sock_no_sendpage;
394                 err = sendpage(sock, *ppage, base, len, flags);
395                 if (ret == 0)
396                         ret = err;
397                 else if (err > 0)
398                         ret += err;
399                 if (err != len)
400                         goto out;
401                 base = 0;
402                 ppage++;
403         } while ((pglen -= len) != 0);
404 copy_tail:
405         len = xdr->tail[0].iov_len;
406         if (base < len) {
407                 struct iovec iov = {
408                         .iov_base = xdr->tail[0].iov_base + base,
409                         .iov_len  = len - base,
410                 };
411                 struct msghdr msg = {
412                         .msg_iov     = &iov,
413                         .msg_iovlen  = 1,
414                         .msg_flags   = msgflags,
415                 };
416                 oldfs = get_fs(); set_fs(get_ds());
417                 err = sock_sendmsg(sock, &msg, iov.iov_len);
418                 set_fs(oldfs);
419                 if (ret == 0)
420                         ret = err;
421                 else if (err > 0)
422                         ret += err;
423         }
424 out:
425         return ret;
426 }
427 
428 
429 /*
430  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
431  *
432  * _shift_data_right_pages
433  * @pages: vector of pages containing both the source and dest memory area.
434  * @pgto_base: page vector address of destination
435  * @pgfrom_base: page vector address of source
436  * @len: number of bytes to copy
437  *
438  * Note: the addresses pgto_base and pgfrom_base are both calculated in
439  *       the same way:
440  *            if a memory area starts at byte 'base' in page 'pages[i]',
441  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
442  * Also note: pgfrom_base must be < pgto_base, but the memory areas
443  *      they point to may overlap.
444  */
445 static void
446 _shift_data_right_pages(struct page **pages, size_t pgto_base,
447                 size_t pgfrom_base, size_t len)
448 {
449         struct page **pgfrom, **pgto;
450         char *vfrom, *vto;
451         size_t copy;
452 
453         BUG_ON(pgto_base <= pgfrom_base);
454 
455         pgto_base += len;
456         pgfrom_base += len;
457 
458         pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
459         pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
460 
461         pgto_base &= ~PAGE_CACHE_MASK;
462         pgfrom_base &= ~PAGE_CACHE_MASK;
463 
464         do {
465                 /* Are any pointers crossing a page boundary? */
466                 if (pgto_base == 0) {
467                         pgto_base = PAGE_CACHE_SIZE;
468                         pgto--;
469                 }
470                 if (pgfrom_base == 0) {
471                         pgfrom_base = PAGE_CACHE_SIZE;
472                         pgfrom--;
473                 }
474 
475                 copy = len;
476                 if (copy > pgto_base)
477                         copy = pgto_base;
478                 if (copy > pgfrom_base)
479                         copy = pgfrom_base;
480                 pgto_base -= copy;
481                 pgfrom_base -= copy;
482 
483                 vto = kmap_atomic(*pgto, KM_USER0);
484                 vfrom = kmap_atomic(*pgfrom, KM_USER1);
485                 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
486                 kunmap_atomic(vfrom, KM_USER1);
487                 kunmap_atomic(vto, KM_USER0);
488 
489         } while ((len -= copy) != 0);
490 }
491 
492 /*
493  * _copy_to_pages
494  * @pages: array of pages
495  * @pgbase: page vector address of destination
496  * @p: pointer to source data
497  * @len: length
498  *
499  * Copies data from an arbitrary memory location into an array of pages
500  * The copy is assumed to be non-overlapping.
501  */
502 static void
503 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
504 {
505         struct page **pgto;
506         char *vto;
507         size_t copy;
508 
509         pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
510         pgbase &= ~PAGE_CACHE_MASK;
511 
512         do {
513                 copy = PAGE_CACHE_SIZE - pgbase;
514                 if (copy > len)
515                         copy = len;
516 
517                 vto = kmap_atomic(*pgto, KM_USER0);
518                 memcpy(vto + pgbase, p, copy);
519                 kunmap_atomic(vto, KM_USER0);
520 
521                 pgbase += copy;
522                 if (pgbase == PAGE_CACHE_SIZE) {
523                         pgbase = 0;
524                         pgto++;
525                 }
526                 p += copy;
527 
528         } while ((len -= copy) != 0);
529 }
530 
531 /*
532  * _copy_from_pages
533  * @p: pointer to destination
534  * @pages: array of pages
535  * @pgbase: offset of source data
536  * @len: length
537  *
538  * Copies data into an arbitrary memory location from an array of pages
539  * The copy is assumed to be non-overlapping.
540  */
541 static void
542 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
543 {
544         struct page **pgfrom;
545         char *vfrom;
546         size_t copy;
547 
548         pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
549         pgbase &= ~PAGE_CACHE_MASK;
550 
551         do {
552                 copy = PAGE_CACHE_SIZE - pgbase;
553                 if (copy > len)
554                         copy = len;
555 
556                 vfrom = kmap_atomic(*pgfrom, KM_USER0);
557                 memcpy(p, vfrom + pgbase, copy);
558                 kunmap_atomic(vfrom, KM_USER0);
559 
560                 pgbase += copy;
561                 if (pgbase == PAGE_CACHE_SIZE) {
562                         pgbase = 0;
563                         pgfrom++;
564                 }
565                 p += copy;
566 
567         } while ((len -= copy) != 0);
568 }
569 
570 /*
571  * xdr_shrink_bufhead
572  * @buf: xdr_buf
573  * @len: bytes to remove from buf->head[0]
574  *
575  * Shrinks XDR buffer's header iovec buf->head[0] by 
576  * 'len' bytes. The extra data is not lost, but is instead
577  * moved into the inlined pages and/or the tail.
578  */
579 void
580 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
581 {
582         struct iovec *head, *tail;
583         size_t copy, offs;
584         unsigned int pglen = buf->page_len;
585 
586         tail = buf->tail;
587         head = buf->head;
588         BUG_ON (len > head->iov_len);
589 
590         /* Shift the tail first */
591         if (tail->iov_len != 0) {
592                 if (tail->iov_len > len) {
593                         copy = tail->iov_len - len;
594                         memmove((char *)tail->iov_base + len,
595                                         tail->iov_base, copy);
596                 }
597                 /* Copy from the inlined pages into the tail */
598                 copy = len;
599                 if (copy > pglen)
600                         copy = pglen;
601                 offs = len - copy;
602                 if (offs >= tail->iov_len)
603                         copy = 0;
604                 else if (copy > tail->iov_len - offs)
605                         copy = tail->iov_len - offs;
606                 if (copy != 0)
607                         _copy_from_pages((char *)tail->iov_base + offs,
608                                         buf->pages,
609                                         buf->page_base + pglen + offs - len,
610                                         copy);
611                 /* Do we also need to copy data from the head into the tail ? */
612                 if (len > pglen) {
613                         offs = copy = len - pglen;
614                         if (copy > tail->iov_len)
615                                 copy = tail->iov_len;
616                         memcpy(tail->iov_base,
617                                         (char *)head->iov_base +
618                                         head->iov_len - offs,
619                                         copy);
620                 }
621         }
622         /* Now handle pages */
623         if (pglen != 0) {
624                 if (pglen > len)
625                         _shift_data_right_pages(buf->pages,
626                                         buf->page_base + len,
627                                         buf->page_base,
628                                         pglen - len);
629                 copy = len;
630                 if (len > pglen)
631                         copy = pglen;
632                 _copy_to_pages(buf->pages, buf->page_base,
633                                 (char *)head->iov_base + head->iov_len - len,
634                                 copy);
635         }
636         head->iov_len -= len;
637         buf->len -= len;
638 }
639 
640 /*
641  * xdr_shrink_pagelen
642  * @buf: xdr_buf
643  * @len: bytes to remove from buf->pages
644  *
645  * Shrinks XDR buffer's page array buf->pages by 
646  * 'len' bytes. The extra data is not lost, but is instead
647  * moved into the tail.
648  */
649 void
650 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
651 {
652         struct iovec *tail;
653         size_t copy;
654         char *p;
655         unsigned int pglen = buf->page_len;
656 
657         tail = buf->tail;
658         BUG_ON (len > pglen);
659 
660         /* Shift the tail first */
661         if (tail->iov_len != 0) {
662                 p = (char *)tail->iov_base + len;
663                 if (tail->iov_len > len) {
664                         copy = tail->iov_len - len;
665                         memmove(p, tail->iov_base, copy);
666                 } else
667                         buf->len -= len;
668                 /* Copy from the inlined pages into the tail */
669                 copy = len;
670                 if (copy > tail->iov_len)
671                         copy = tail->iov_len;
672                 _copy_from_pages((char *)tail->iov_base,
673                                 buf->pages, buf->page_base + pglen - len,
674                                 copy);
675         }
676         buf->page_len -= len;
677         buf->len -= len;
678 }
679 
680 void
681 xdr_shift_buf(struct xdr_buf *buf, size_t len)
682 {
683         xdr_shrink_bufhead(buf, len);
684 }
685 
686 void
687 xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
688                  unsigned int len)
689 {
690         struct xdr_buf *buf = xdr->buf;
691         struct iovec *iov = buf->tail;
692         buf->pages = pages;
693         buf->page_base = base;
694         buf->page_len = len;
695 
696         iov->iov_base = (char *)xdr->p;
697         iov->iov_len  = 0;
698         xdr->iov = iov;
699 
700         if (len & 3) {
701                 unsigned int pad = 4 - (len & 3);
702 
703                 BUG_ON(xdr->p >= xdr->end);
704                 iov->iov_base = (char *)xdr->p + (len & 3);
705                 iov->iov_len  += pad;
706                 len += pad;
707                 *xdr->p++ = 0;
708         }
709         buf->len += len;
710 }
711 
712 void
713 xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
714 {
715         struct xdr_buf *buf = xdr->buf;
716         struct iovec *iov;
717         ssize_t shift;
718         int padding;
719 
720         /* Realign pages to current pointer position */
721         iov  = buf->head;
722         shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
723         if (shift > 0)
724                 xdr_shrink_bufhead(buf, shift);
725 
726         /* Truncate page data and move it into the tail */
727         if (buf->page_len > len)
728                 xdr_shrink_pagelen(buf, buf->page_len - len);
729         padding = (XDR_QUADLEN(len) << 2) - len;
730         xdr->iov = iov = buf->tail;
731         xdr->p = (uint32_t *)((char *)iov->iov_base + padding);
732         xdr->end = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
733 }
734 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp