~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/nfs/pagelist.c

Version: ~ [ linux-6.6-rc1 ] ~ [ linux-6.5.2 ] ~ [ linux-6.4.15 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.52 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.131 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.194 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.256 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.294 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.325 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * linux/fs/nfs/pagelist.c
  3  *
  4  * A set of helper functions for managing NFS read and write requests.
  5  * The main purpose of these routines is to provide support for the
  6  * coalescing of several requests into a single RPC call.
  7  *
  8  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
  9  *
 10  */
 11 
 12 #include <linux/slab.h>
 13 #include <linux/file.h>
 14 #include <linux/sched.h>
 15 #include <linux/sunrpc/clnt.h>
 16 #include <linux/nfs.h>
 17 #include <linux/nfs3.h>
 18 #include <linux/nfs4.h>
 19 #include <linux/nfs_page.h>
 20 #include <linux/nfs_fs.h>
 21 #include <linux/nfs_mount.h>
 22 #include <linux/export.h>
 23 
 24 #include "internal.h"
 25 #include "pnfs.h"
 26 
 27 static struct kmem_cache *nfs_page_cachep;
 28 
 29 bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
 30 {
 31         p->npages = pagecount;
 32         if (pagecount <= ARRAY_SIZE(p->page_array))
 33                 p->pagevec = p->page_array;
 34         else {
 35                 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
 36                 if (!p->pagevec)
 37                         p->npages = 0;
 38         }
 39         return p->pagevec != NULL;
 40 }
 41 
 42 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
 43                        struct nfs_pgio_header *hdr,
 44                        void (*release)(struct nfs_pgio_header *hdr))
 45 {
 46         hdr->req = nfs_list_entry(desc->pg_list.next);
 47         hdr->inode = desc->pg_inode;
 48         hdr->cred = hdr->req->wb_context->cred;
 49         hdr->io_start = req_offset(hdr->req);
 50         hdr->good_bytes = desc->pg_count;
 51         hdr->dreq = desc->pg_dreq;
 52         hdr->layout_private = desc->pg_layout_private;
 53         hdr->release = release;
 54         hdr->completion_ops = desc->pg_completion_ops;
 55         if (hdr->completion_ops->init_hdr)
 56                 hdr->completion_ops->init_hdr(hdr);
 57 }
 58 EXPORT_SYMBOL_GPL(nfs_pgheader_init);
 59 
 60 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
 61 {
 62         spin_lock(&hdr->lock);
 63         if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
 64             || pos < hdr->io_start + hdr->good_bytes) {
 65                 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
 66                 hdr->good_bytes = pos - hdr->io_start;
 67                 hdr->error = error;
 68         }
 69         spin_unlock(&hdr->lock);
 70 }
 71 
 72 static inline struct nfs_page *
 73 nfs_page_alloc(void)
 74 {
 75         struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
 76         if (p)
 77                 INIT_LIST_HEAD(&p->wb_list);
 78         return p;
 79 }
 80 
 81 static inline void
 82 nfs_page_free(struct nfs_page *p)
 83 {
 84         kmem_cache_free(nfs_page_cachep, p);
 85 }
 86 
 87 static void
 88 nfs_iocounter_inc(struct nfs_io_counter *c)
 89 {
 90         atomic_inc(&c->io_count);
 91 }
 92 
 93 static void
 94 nfs_iocounter_dec(struct nfs_io_counter *c)
 95 {
 96         if (atomic_dec_and_test(&c->io_count)) {
 97                 clear_bit(NFS_IO_INPROGRESS, &c->flags);
 98                 smp_mb__after_clear_bit();
 99                 wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
100         }
101 }
102 
103 static int
104 __nfs_iocounter_wait(struct nfs_io_counter *c)
105 {
106         wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
107         DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
108         int ret = 0;
109 
110         do {
111                 prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
112                 set_bit(NFS_IO_INPROGRESS, &c->flags);
113                 if (atomic_read(&c->io_count) == 0)
114                         break;
115                 ret = nfs_wait_bit_killable(&c->flags);
116         } while (atomic_read(&c->io_count) != 0);
117         finish_wait(wq, &q.wait);
118         return ret;
119 }
120 
121 /**
122  * nfs_iocounter_wait - wait for i/o to complete
123  * @c: nfs_io_counter to use
124  *
125  * returns -ERESTARTSYS if interrupted by a fatal signal.
126  * Otherwise returns 0 once the io_count hits 0.
127  */
128 int
129 nfs_iocounter_wait(struct nfs_io_counter *c)
130 {
131         if (atomic_read(&c->io_count) == 0)
132                 return 0;
133         return __nfs_iocounter_wait(c);
134 }
135 
136 /**
137  * nfs_create_request - Create an NFS read/write request.
138  * @ctx: open context to use
139  * @inode: inode to which the request is attached
140  * @page: page to write
141  * @offset: starting offset within the page for the write
142  * @count: number of bytes to read/write
143  *
144  * The page must be locked by the caller. This makes sure we never
145  * create two different requests for the same page.
146  * User should ensure it is safe to sleep in this function.
147  */
148 struct nfs_page *
149 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
150                    struct page *page,
151                    unsigned int offset, unsigned int count)
152 {
153         struct nfs_page         *req;
154         struct nfs_lock_context *l_ctx;
155 
156         if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
157                 return ERR_PTR(-EBADF);
158         /* try to allocate the request struct */
159         req = nfs_page_alloc();
160         if (req == NULL)
161                 return ERR_PTR(-ENOMEM);
162 
163         /* get lock context early so we can deal with alloc failures */
164         l_ctx = nfs_get_lock_context(ctx);
165         if (IS_ERR(l_ctx)) {
166                 nfs_page_free(req);
167                 return ERR_CAST(l_ctx);
168         }
169         req->wb_lock_context = l_ctx;
170         nfs_iocounter_inc(&l_ctx->io_count);
171 
172         /* Initialize the request struct. Initially, we assume a
173          * long write-back delay. This will be adjusted in
174          * update_nfs_request below if the region is not locked. */
175         req->wb_page    = page;
176         req->wb_index   = page_file_index(page);
177         page_cache_get(page);
178         req->wb_offset  = offset;
179         req->wb_pgbase  = offset;
180         req->wb_bytes   = count;
181         req->wb_context = get_nfs_open_context(ctx);
182         kref_init(&req->wb_kref);
183         return req;
184 }
185 
186 /**
187  * nfs_unlock_request - Unlock request and wake up sleepers.
188  * @req:
189  */
190 void nfs_unlock_request(struct nfs_page *req)
191 {
192         if (!NFS_WBACK_BUSY(req)) {
193                 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
194                 BUG();
195         }
196         smp_mb__before_clear_bit();
197         clear_bit(PG_BUSY, &req->wb_flags);
198         smp_mb__after_clear_bit();
199         wake_up_bit(&req->wb_flags, PG_BUSY);
200 }
201 
202 /**
203  * nfs_unlock_and_release_request - Unlock request and release the nfs_page
204  * @req:
205  */
206 void nfs_unlock_and_release_request(struct nfs_page *req)
207 {
208         nfs_unlock_request(req);
209         nfs_release_request(req);
210 }
211 
212 /*
213  * nfs_clear_request - Free up all resources allocated to the request
214  * @req:
215  *
216  * Release page and open context resources associated with a read/write
217  * request after it has completed.
218  */
219 static void nfs_clear_request(struct nfs_page *req)
220 {
221         struct page *page = req->wb_page;
222         struct nfs_open_context *ctx = req->wb_context;
223         struct nfs_lock_context *l_ctx = req->wb_lock_context;
224 
225         if (page != NULL) {
226                 page_cache_release(page);
227                 req->wb_page = NULL;
228         }
229         if (l_ctx != NULL) {
230                 nfs_iocounter_dec(&l_ctx->io_count);
231                 nfs_put_lock_context(l_ctx);
232                 req->wb_lock_context = NULL;
233         }
234         if (ctx != NULL) {
235                 put_nfs_open_context(ctx);
236                 req->wb_context = NULL;
237         }
238 }
239 
240 
241 /**
242  * nfs_release_request - Release the count on an NFS read/write request
243  * @req: request to release
244  *
245  * Note: Should never be called with the spinlock held!
246  */
247 static void nfs_free_request(struct kref *kref)
248 {
249         struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
250 
251         /* Release struct file and open context */
252         nfs_clear_request(req);
253         nfs_page_free(req);
254 }
255 
256 void nfs_release_request(struct nfs_page *req)
257 {
258         kref_put(&req->wb_kref, nfs_free_request);
259 }
260 
261 static int nfs_wait_bit_uninterruptible(void *word)
262 {
263         io_schedule();
264         return 0;
265 }
266 
267 /**
268  * nfs_wait_on_request - Wait for a request to complete.
269  * @req: request to wait upon.
270  *
271  * Interruptible by fatal signals only.
272  * The user is responsible for holding a count on the request.
273  */
274 int
275 nfs_wait_on_request(struct nfs_page *req)
276 {
277         return wait_on_bit(&req->wb_flags, PG_BUSY,
278                         nfs_wait_bit_uninterruptible,
279                         TASK_UNINTERRUPTIBLE);
280 }
281 
282 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
283 {
284         /*
285          * FIXME: ideally we should be able to coalesce all requests
286          * that are not block boundary aligned, but currently this
287          * is problematic for the case of bsize < PAGE_CACHE_SIZE,
288          * since nfs_flush_multi and nfs_pagein_multi assume you
289          * can have only one struct nfs_page.
290          */
291         if (desc->pg_bsize < PAGE_SIZE)
292                 return 0;
293 
294         return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
295 }
296 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
297 
298 /**
299  * nfs_pageio_init - initialise a page io descriptor
300  * @desc: pointer to descriptor
301  * @inode: pointer to inode
302  * @doio: pointer to io function
303  * @bsize: io block size
304  * @io_flags: extra parameters for the io function
305  */
306 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
307                      struct inode *inode,
308                      const struct nfs_pageio_ops *pg_ops,
309                      const struct nfs_pgio_completion_ops *compl_ops,
310                      size_t bsize,
311                      int io_flags)
312 {
313         INIT_LIST_HEAD(&desc->pg_list);
314         desc->pg_bytes_written = 0;
315         desc->pg_count = 0;
316         desc->pg_bsize = bsize;
317         desc->pg_base = 0;
318         desc->pg_moreio = 0;
319         desc->pg_recoalesce = 0;
320         desc->pg_inode = inode;
321         desc->pg_ops = pg_ops;
322         desc->pg_completion_ops = compl_ops;
323         desc->pg_ioflags = io_flags;
324         desc->pg_error = 0;
325         desc->pg_lseg = NULL;
326         desc->pg_dreq = NULL;
327         desc->pg_layout_private = NULL;
328 }
329 EXPORT_SYMBOL_GPL(nfs_pageio_init);
330 
331 /**
332  * nfs_can_coalesce_requests - test two requests for compatibility
333  * @prev: pointer to nfs_page
334  * @req: pointer to nfs_page
335  *
336  * The nfs_page structures 'prev' and 'req' are compared to ensure that the
337  * page data area they describe is contiguous, and that their RPC
338  * credentials, NFSv4 open state, and lockowners are the same.
339  *
340  * Return 'true' if this is the case, else return 'false'.
341  */
342 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
343                                       struct nfs_page *req,
344                                       struct nfs_pageio_descriptor *pgio)
345 {
346         if (req->wb_context->cred != prev->wb_context->cred)
347                 return false;
348         if (req->wb_lock_context->lockowner.l_owner != prev->wb_lock_context->lockowner.l_owner)
349                 return false;
350         if (req->wb_lock_context->lockowner.l_pid != prev->wb_lock_context->lockowner.l_pid)
351                 return false;
352         if (req->wb_context->state != prev->wb_context->state)
353                 return false;
354         if (req->wb_pgbase != 0)
355                 return false;
356         if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
357                 return false;
358         if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
359                 return false;
360         return pgio->pg_ops->pg_test(pgio, prev, req);
361 }
362 
363 /**
364  * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
365  * @desc: destination io descriptor
366  * @req: request
367  *
368  * Returns true if the request 'req' was successfully coalesced into the
369  * existing list of pages 'desc'.
370  */
371 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
372                                      struct nfs_page *req)
373 {
374         if (desc->pg_count != 0) {
375                 struct nfs_page *prev;
376 
377                 prev = nfs_list_entry(desc->pg_list.prev);
378                 if (!nfs_can_coalesce_requests(prev, req, desc))
379                         return 0;
380         } else {
381                 if (desc->pg_ops->pg_init)
382                         desc->pg_ops->pg_init(desc, req);
383                 desc->pg_base = req->wb_pgbase;
384         }
385         nfs_list_remove_request(req);
386         nfs_list_add_request(req, &desc->pg_list);
387         desc->pg_count += req->wb_bytes;
388         return 1;
389 }
390 
391 /*
392  * Helper for nfs_pageio_add_request and nfs_pageio_complete
393  */
394 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
395 {
396         if (!list_empty(&desc->pg_list)) {
397                 int error = desc->pg_ops->pg_doio(desc);
398                 if (error < 0)
399                         desc->pg_error = error;
400                 else
401                         desc->pg_bytes_written += desc->pg_count;
402         }
403         if (list_empty(&desc->pg_list)) {
404                 desc->pg_count = 0;
405                 desc->pg_base = 0;
406         }
407 }
408 
409 /**
410  * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
411  * @desc: destination io descriptor
412  * @req: request
413  *
414  * Returns true if the request 'req' was successfully coalesced into the
415  * existing list of pages 'desc'.
416  */
417 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
418                            struct nfs_page *req)
419 {
420         while (!nfs_pageio_do_add_request(desc, req)) {
421                 desc->pg_moreio = 1;
422                 nfs_pageio_doio(desc);
423                 if (desc->pg_error < 0)
424                         return 0;
425                 desc->pg_moreio = 0;
426                 if (desc->pg_recoalesce)
427                         return 0;
428         }
429         return 1;
430 }
431 
432 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
433 {
434         LIST_HEAD(head);
435 
436         do {
437                 list_splice_init(&desc->pg_list, &head);
438                 desc->pg_bytes_written -= desc->pg_count;
439                 desc->pg_count = 0;
440                 desc->pg_base = 0;
441                 desc->pg_recoalesce = 0;
442 
443                 while (!list_empty(&head)) {
444                         struct nfs_page *req;
445 
446                         req = list_first_entry(&head, struct nfs_page, wb_list);
447                         nfs_list_remove_request(req);
448                         if (__nfs_pageio_add_request(desc, req))
449                                 continue;
450                         if (desc->pg_error < 0)
451                                 return 0;
452                         break;
453                 }
454         } while (desc->pg_recoalesce);
455         return 1;
456 }
457 
458 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
459                 struct nfs_page *req)
460 {
461         int ret;
462 
463         do {
464                 ret = __nfs_pageio_add_request(desc, req);
465                 if (ret)
466                         break;
467                 if (desc->pg_error < 0)
468                         break;
469                 ret = nfs_do_recoalesce(desc);
470         } while (ret);
471         return ret;
472 }
473 EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
474 
475 /**
476  * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
477  * @desc: pointer to io descriptor
478  */
479 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
480 {
481         for (;;) {
482                 nfs_pageio_doio(desc);
483                 if (!desc->pg_recoalesce)
484                         break;
485                 if (!nfs_do_recoalesce(desc))
486                         break;
487         }
488 }
489 EXPORT_SYMBOL_GPL(nfs_pageio_complete);
490 
491 /**
492  * nfs_pageio_cond_complete - Conditional I/O completion
493  * @desc: pointer to io descriptor
494  * @index: page index
495  *
496  * It is important to ensure that processes don't try to take locks
497  * on non-contiguous ranges of pages as that might deadlock. This
498  * function should be called before attempting to wait on a locked
499  * nfs_page. It will complete the I/O if the page index 'index'
500  * is not contiguous with the existing list of pages in 'desc'.
501  */
502 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
503 {
504         if (!list_empty(&desc->pg_list)) {
505                 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
506                 if (index != prev->wb_index + 1)
507                         nfs_pageio_complete(desc);
508         }
509 }
510 
511 int __init nfs_init_nfspagecache(void)
512 {
513         nfs_page_cachep = kmem_cache_create("nfs_page",
514                                             sizeof(struct nfs_page),
515                                             0, SLAB_HWCACHE_ALIGN,
516                                             NULL);
517         if (nfs_page_cachep == NULL)
518                 return -ENOMEM;
519 
520         return 0;
521 }
522 
523 void nfs_destroy_nfspagecache(void)
524 {
525         kmem_cache_destroy(nfs_page_cachep);
526 }
527 
528 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp