~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/cache.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * net/sunrpc/cache.c
  3  *
  4  * Generic code for various authentication-related caches
  5  * used by sunrpc clients and servers.
  6  *
  7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
  8  *
  9  * Released under terms in GPL version 2.  See COPYING.
 10  *
 11  */
 12 
 13 #include <linux/types.h>
 14 #include <linux/fs.h>
 15 #include <linux/file.h>
 16 #include <linux/slab.h>
 17 #include <linux/signal.h>
 18 #include <linux/sched.h>
 19 #include <linux/kmod.h>
 20 #include <linux/list.h>
 21 #include <linux/module.h>
 22 #include <linux/ctype.h>
 23 #include <asm/uaccess.h>
 24 #include <linux/poll.h>
 25 #include <linux/seq_file.h>
 26 #include <linux/proc_fs.h>
 27 #include <linux/net.h>
 28 #include <linux/workqueue.h>
 29 #include <linux/mutex.h>
 30 #include <linux/pagemap.h>
 31 #include <asm/ioctls.h>
 32 #include <linux/sunrpc/types.h>
 33 #include <linux/sunrpc/cache.h>
 34 #include <linux/sunrpc/stats.h>
 35 #include <linux/sunrpc/rpc_pipe_fs.h>
 36 #include "netns.h"
 37 
 38 #define  RPCDBG_FACILITY RPCDBG_CACHE
 39 
 40 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
 41 static void cache_revisit_request(struct cache_head *item);
 42 
 43 static void cache_init(struct cache_head *h)
 44 {
 45         time_t now = seconds_since_boot();
 46         h->next = NULL;
 47         h->flags = 0;
 48         kref_init(&h->ref);
 49         h->expiry_time = now + CACHE_NEW_EXPIRY;
 50         h->last_refresh = now;
 51 }
 52 
 53 static inline int cache_is_valid(struct cache_head *h);
 54 static void cache_fresh_locked(struct cache_head *head, time_t expiry);
 55 static void cache_fresh_unlocked(struct cache_head *head,
 56                                 struct cache_detail *detail);
 57 
 58 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
 59                                        struct cache_head *key, int hash)
 60 {
 61         struct cache_head **head,  **hp;
 62         struct cache_head *new = NULL, *freeme = NULL;
 63 
 64         head = &detail->hash_table[hash];
 65 
 66         read_lock(&detail->hash_lock);
 67 
 68         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
 69                 struct cache_head *tmp = *hp;
 70                 if (detail->match(tmp, key)) {
 71                         if (cache_is_expired(detail, tmp))
 72                                 /* This entry is expired, we will discard it. */
 73                                 break;
 74                         cache_get(tmp);
 75                         read_unlock(&detail->hash_lock);
 76                         return tmp;
 77                 }
 78         }
 79         read_unlock(&detail->hash_lock);
 80         /* Didn't find anything, insert an empty entry */
 81 
 82         new = detail->alloc();
 83         if (!new)
 84                 return NULL;
 85         /* must fully initialise 'new', else
 86          * we might get lose if we need to
 87          * cache_put it soon.
 88          */
 89         cache_init(new);
 90         detail->init(new, key);
 91 
 92         write_lock(&detail->hash_lock);
 93 
 94         /* check if entry appeared while we slept */
 95         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
 96                 struct cache_head *tmp = *hp;
 97                 if (detail->match(tmp, key)) {
 98                         if (cache_is_expired(detail, tmp)) {
 99                                 *hp = tmp->next;
100                                 tmp->next = NULL;
101                                 detail->entries --;
102                                 if (cache_is_valid(tmp) == -EAGAIN)
103                                         set_bit(CACHE_NEGATIVE, &tmp->flags);
104                                 cache_fresh_locked(tmp, 0);
105                                 freeme = tmp;
106                                 break;
107                         }
108                         cache_get(tmp);
109                         write_unlock(&detail->hash_lock);
110                         cache_put(new, detail);
111                         return tmp;
112                 }
113         }
114         new->next = *head;
115         *head = new;
116         detail->entries++;
117         cache_get(new);
118         write_unlock(&detail->hash_lock);
119 
120         if (freeme) {
121                 cache_fresh_unlocked(freeme, detail);
122                 cache_put(freeme, detail);
123         }
124         return new;
125 }
126 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
127 
128 
129 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
130 
131 static void cache_fresh_locked(struct cache_head *head, time_t expiry)
132 {
133         head->expiry_time = expiry;
134         head->last_refresh = seconds_since_boot();
135         smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
136         set_bit(CACHE_VALID, &head->flags);
137 }
138 
139 static void cache_fresh_unlocked(struct cache_head *head,
140                                  struct cache_detail *detail)
141 {
142         if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
143                 cache_revisit_request(head);
144                 cache_dequeue(detail, head);
145         }
146 }
147 
148 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
149                                        struct cache_head *new, struct cache_head *old, int hash)
150 {
151         /* The 'old' entry is to be replaced by 'new'.
152          * If 'old' is not VALID, we update it directly,
153          * otherwise we need to replace it
154          */
155         struct cache_head **head;
156         struct cache_head *tmp;
157 
158         if (!test_bit(CACHE_VALID, &old->flags)) {
159                 write_lock(&detail->hash_lock);
160                 if (!test_bit(CACHE_VALID, &old->flags)) {
161                         if (test_bit(CACHE_NEGATIVE, &new->flags))
162                                 set_bit(CACHE_NEGATIVE, &old->flags);
163                         else
164                                 detail->update(old, new);
165                         cache_fresh_locked(old, new->expiry_time);
166                         write_unlock(&detail->hash_lock);
167                         cache_fresh_unlocked(old, detail);
168                         return old;
169                 }
170                 write_unlock(&detail->hash_lock);
171         }
172         /* We need to insert a new entry */
173         tmp = detail->alloc();
174         if (!tmp) {
175                 cache_put(old, detail);
176                 return NULL;
177         }
178         cache_init(tmp);
179         detail->init(tmp, old);
180         head = &detail->hash_table[hash];
181 
182         write_lock(&detail->hash_lock);
183         if (test_bit(CACHE_NEGATIVE, &new->flags))
184                 set_bit(CACHE_NEGATIVE, &tmp->flags);
185         else
186                 detail->update(tmp, new);
187         tmp->next = *head;
188         *head = tmp;
189         detail->entries++;
190         cache_get(tmp);
191         cache_fresh_locked(tmp, new->expiry_time);
192         cache_fresh_locked(old, 0);
193         write_unlock(&detail->hash_lock);
194         cache_fresh_unlocked(tmp, detail);
195         cache_fresh_unlocked(old, detail);
196         cache_put(old, detail);
197         return tmp;
198 }
199 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
200 
201 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
202 {
203         if (cd->cache_upcall)
204                 return cd->cache_upcall(cd, h);
205         return sunrpc_cache_pipe_upcall(cd, h);
206 }
207 
208 static inline int cache_is_valid(struct cache_head *h)
209 {
210         if (!test_bit(CACHE_VALID, &h->flags))
211                 return -EAGAIN;
212         else {
213                 /* entry is valid */
214                 if (test_bit(CACHE_NEGATIVE, &h->flags))
215                         return -ENOENT;
216                 else {
217                         /*
218                          * In combination with write barrier in
219                          * sunrpc_cache_update, ensures that anyone
220                          * using the cache entry after this sees the
221                          * updated contents:
222                          */
223                         smp_rmb();
224                         return 0;
225                 }
226         }
227 }
228 
229 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
230 {
231         int rv;
232 
233         write_lock(&detail->hash_lock);
234         rv = cache_is_valid(h);
235         if (rv == -EAGAIN) {
236                 set_bit(CACHE_NEGATIVE, &h->flags);
237                 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
238                 rv = -ENOENT;
239         }
240         write_unlock(&detail->hash_lock);
241         cache_fresh_unlocked(h, detail);
242         return rv;
243 }
244 
245 /*
246  * This is the generic cache management routine for all
247  * the authentication caches.
248  * It checks the currency of a cache item and will (later)
249  * initiate an upcall to fill it if needed.
250  *
251  *
252  * Returns 0 if the cache_head can be used, or cache_puts it and returns
253  * -EAGAIN if upcall is pending and request has been queued
254  * -ETIMEDOUT if upcall failed or request could not be queue or
255  *           upcall completed but item is still invalid (implying that
256  *           the cache item has been replaced with a newer one).
257  * -ENOENT if cache entry was negative
258  */
259 int cache_check(struct cache_detail *detail,
260                     struct cache_head *h, struct cache_req *rqstp)
261 {
262         int rv;
263         long refresh_age, age;
264 
265         /* First decide return status as best we can */
266         rv = cache_is_valid(h);
267 
268         /* now see if we want to start an upcall */
269         refresh_age = (h->expiry_time - h->last_refresh);
270         age = seconds_since_boot() - h->last_refresh;
271 
272         if (rqstp == NULL) {
273                 if (rv == -EAGAIN)
274                         rv = -ENOENT;
275         } else if (rv == -EAGAIN ||
276                    (h->expiry_time != 0 && age > refresh_age/2)) {
277                 dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
278                                 refresh_age, age);
279                 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
280                         switch (cache_make_upcall(detail, h)) {
281                         case -EINVAL:
282                                 rv = try_to_negate_entry(detail, h);
283                                 break;
284                         case -EAGAIN:
285                                 cache_fresh_unlocked(h, detail);
286                                 break;
287                         }
288                 }
289         }
290 
291         if (rv == -EAGAIN) {
292                 if (!cache_defer_req(rqstp, h)) {
293                         /*
294                          * Request was not deferred; handle it as best
295                          * we can ourselves:
296                          */
297                         rv = cache_is_valid(h);
298                         if (rv == -EAGAIN)
299                                 rv = -ETIMEDOUT;
300                 }
301         }
302         if (rv)
303                 cache_put(h, detail);
304         return rv;
305 }
306 EXPORT_SYMBOL_GPL(cache_check);
307 
308 /*
309  * caches need to be periodically cleaned.
310  * For this we maintain a list of cache_detail and
311  * a current pointer into that list and into the table
312  * for that entry.
313  *
314  * Each time cache_clean is called it finds the next non-empty entry
315  * in the current table and walks the list in that entry
316  * looking for entries that can be removed.
317  *
318  * An entry gets removed if:
319  * - The expiry is before current time
320  * - The last_refresh time is before the flush_time for that cache
321  *
322  * later we might drop old entries with non-NEVER expiry if that table
323  * is getting 'full' for some definition of 'full'
324  *
325  * The question of "how often to scan a table" is an interesting one
326  * and is answered in part by the use of the "nextcheck" field in the
327  * cache_detail.
328  * When a scan of a table begins, the nextcheck field is set to a time
329  * that is well into the future.
330  * While scanning, if an expiry time is found that is earlier than the
331  * current nextcheck time, nextcheck is set to that expiry time.
332  * If the flush_time is ever set to a time earlier than the nextcheck
333  * time, the nextcheck time is then set to that flush_time.
334  *
335  * A table is then only scanned if the current time is at least
336  * the nextcheck time.
337  *
338  */
339 
340 static LIST_HEAD(cache_list);
341 static DEFINE_SPINLOCK(cache_list_lock);
342 static struct cache_detail *current_detail;
343 static int current_index;
344 
345 static void do_cache_clean(struct work_struct *work);
346 static struct delayed_work cache_cleaner;
347 
348 void sunrpc_init_cache_detail(struct cache_detail *cd)
349 {
350         rwlock_init(&cd->hash_lock);
351         INIT_LIST_HEAD(&cd->queue);
352         spin_lock(&cache_list_lock);
353         cd->nextcheck = 0;
354         cd->entries = 0;
355         atomic_set(&cd->readers, 0);
356         cd->last_close = 0;
357         cd->last_warn = -1;
358         list_add(&cd->others, &cache_list);
359         spin_unlock(&cache_list_lock);
360 
361         /* start the cleaning process */
362         schedule_delayed_work(&cache_cleaner, 0);
363 }
364 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
365 
366 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
367 {
368         cache_purge(cd);
369         spin_lock(&cache_list_lock);
370         write_lock(&cd->hash_lock);
371         if (cd->entries || atomic_read(&cd->inuse)) {
372                 write_unlock(&cd->hash_lock);
373                 spin_unlock(&cache_list_lock);
374                 goto out;
375         }
376         if (current_detail == cd)
377                 current_detail = NULL;
378         list_del_init(&cd->others);
379         write_unlock(&cd->hash_lock);
380         spin_unlock(&cache_list_lock);
381         if (list_empty(&cache_list)) {
382                 /* module must be being unloaded so its safe to kill the worker */
383                 cancel_delayed_work_sync(&cache_cleaner);
384         }
385         return;
386 out:
387         printk(KERN_ERR "RPC: failed to unregister %s cache\n", cd->name);
388 }
389 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
390 
391 /* clean cache tries to find something to clean
392  * and cleans it.
393  * It returns 1 if it cleaned something,
394  *            0 if it didn't find anything this time
395  *           -1 if it fell off the end of the list.
396  */
397 static int cache_clean(void)
398 {
399         int rv = 0;
400         struct list_head *next;
401 
402         spin_lock(&cache_list_lock);
403 
404         /* find a suitable table if we don't already have one */
405         while (current_detail == NULL ||
406             current_index >= current_detail->hash_size) {
407                 if (current_detail)
408                         next = current_detail->others.next;
409                 else
410                         next = cache_list.next;
411                 if (next == &cache_list) {
412                         current_detail = NULL;
413                         spin_unlock(&cache_list_lock);
414                         return -1;
415                 }
416                 current_detail = list_entry(next, struct cache_detail, others);
417                 if (current_detail->nextcheck > seconds_since_boot())
418                         current_index = current_detail->hash_size;
419                 else {
420                         current_index = 0;
421                         current_detail->nextcheck = seconds_since_boot()+30*60;
422                 }
423         }
424 
425         /* find a non-empty bucket in the table */
426         while (current_detail &&
427                current_index < current_detail->hash_size &&
428                current_detail->hash_table[current_index] == NULL)
429                 current_index++;
430 
431         /* find a cleanable entry in the bucket and clean it, or set to next bucket */
432 
433         if (current_detail && current_index < current_detail->hash_size) {
434                 struct cache_head *ch, **cp;
435                 struct cache_detail *d;
436 
437                 write_lock(&current_detail->hash_lock);
438 
439                 /* Ok, now to clean this strand */
440 
441                 cp = & current_detail->hash_table[current_index];
442                 for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
443                         if (current_detail->nextcheck > ch->expiry_time)
444                                 current_detail->nextcheck = ch->expiry_time+1;
445                         if (!cache_is_expired(current_detail, ch))
446                                 continue;
447 
448                         *cp = ch->next;
449                         ch->next = NULL;
450                         current_detail->entries--;
451                         rv = 1;
452                         break;
453                 }
454 
455                 write_unlock(&current_detail->hash_lock);
456                 d = current_detail;
457                 if (!ch)
458                         current_index ++;
459                 spin_unlock(&cache_list_lock);
460                 if (ch) {
461                         set_bit(CACHE_CLEANED, &ch->flags);
462                         cache_fresh_unlocked(ch, d);
463                         cache_put(ch, d);
464                 }
465         } else
466                 spin_unlock(&cache_list_lock);
467 
468         return rv;
469 }
470 
471 /*
472  * We want to regularly clean the cache, so we need to schedule some work ...
473  */
474 static void do_cache_clean(struct work_struct *work)
475 {
476         int delay = 5;
477         if (cache_clean() == -1)
478                 delay = round_jiffies_relative(30*HZ);
479 
480         if (list_empty(&cache_list))
481                 delay = 0;
482 
483         if (delay)
484                 schedule_delayed_work(&cache_cleaner, delay);
485 }
486 
487 
488 /*
489  * Clean all caches promptly.  This just calls cache_clean
490  * repeatedly until we are sure that every cache has had a chance to
491  * be fully cleaned
492  */
493 void cache_flush(void)
494 {
495         while (cache_clean() != -1)
496                 cond_resched();
497         while (cache_clean() != -1)
498                 cond_resched();
499 }
500 EXPORT_SYMBOL_GPL(cache_flush);
501 
502 void cache_purge(struct cache_detail *detail)
503 {
504         detail->flush_time = LONG_MAX;
505         detail->nextcheck = seconds_since_boot();
506         cache_flush();
507         detail->flush_time = 1;
508 }
509 EXPORT_SYMBOL_GPL(cache_purge);
510 
511 
512 /*
513  * Deferral and Revisiting of Requests.
514  *
515  * If a cache lookup finds a pending entry, we
516  * need to defer the request and revisit it later.
517  * All deferred requests are stored in a hash table,
518  * indexed by "struct cache_head *".
519  * As it may be wasteful to store a whole request
520  * structure, we allow the request to provide a
521  * deferred form, which must contain a
522  * 'struct cache_deferred_req'
523  * This cache_deferred_req contains a method to allow
524  * it to be revisited when cache info is available
525  */
526 
527 #define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
528 #define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
529 
530 #define DFR_MAX 300     /* ??? */
531 
532 static DEFINE_SPINLOCK(cache_defer_lock);
533 static LIST_HEAD(cache_defer_list);
534 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
535 static int cache_defer_cnt;
536 
537 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
538 {
539         hlist_del_init(&dreq->hash);
540         if (!list_empty(&dreq->recent)) {
541                 list_del_init(&dreq->recent);
542                 cache_defer_cnt--;
543         }
544 }
545 
546 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
547 {
548         int hash = DFR_HASH(item);
549 
550         INIT_LIST_HEAD(&dreq->recent);
551         hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
552 }
553 
554 static void setup_deferral(struct cache_deferred_req *dreq,
555                            struct cache_head *item,
556                            int count_me)
557 {
558 
559         dreq->item = item;
560 
561         spin_lock(&cache_defer_lock);
562 
563         __hash_deferred_req(dreq, item);
564 
565         if (count_me) {
566                 cache_defer_cnt++;
567                 list_add(&dreq->recent, &cache_defer_list);
568         }
569 
570         spin_unlock(&cache_defer_lock);
571 
572 }
573 
574 struct thread_deferred_req {
575         struct cache_deferred_req handle;
576         struct completion completion;
577 };
578 
579 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
580 {
581         struct thread_deferred_req *dr =
582                 container_of(dreq, struct thread_deferred_req, handle);
583         complete(&dr->completion);
584 }
585 
586 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
587 {
588         struct thread_deferred_req sleeper;
589         struct cache_deferred_req *dreq = &sleeper.handle;
590 
591         sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
592         dreq->revisit = cache_restart_thread;
593 
594         setup_deferral(dreq, item, 0);
595 
596         if (!test_bit(CACHE_PENDING, &item->flags) ||
597             wait_for_completion_interruptible_timeout(
598                     &sleeper.completion, req->thread_wait) <= 0) {
599                 /* The completion wasn't completed, so we need
600                  * to clean up
601                  */
602                 spin_lock(&cache_defer_lock);
603                 if (!hlist_unhashed(&sleeper.handle.hash)) {
604                         __unhash_deferred_req(&sleeper.handle);
605                         spin_unlock(&cache_defer_lock);
606                 } else {
607                         /* cache_revisit_request already removed
608                          * this from the hash table, but hasn't
609                          * called ->revisit yet.  It will very soon
610                          * and we need to wait for it.
611                          */
612                         spin_unlock(&cache_defer_lock);
613                         wait_for_completion(&sleeper.completion);
614                 }
615         }
616 }
617 
618 static void cache_limit_defers(void)
619 {
620         /* Make sure we haven't exceed the limit of allowed deferred
621          * requests.
622          */
623         struct cache_deferred_req *discard = NULL;
624 
625         if (cache_defer_cnt <= DFR_MAX)
626                 return;
627 
628         spin_lock(&cache_defer_lock);
629 
630         /* Consider removing either the first or the last */
631         if (cache_defer_cnt > DFR_MAX) {
632                 if (prandom_u32() & 1)
633                         discard = list_entry(cache_defer_list.next,
634                                              struct cache_deferred_req, recent);
635                 else
636                         discard = list_entry(cache_defer_list.prev,
637                                              struct cache_deferred_req, recent);
638                 __unhash_deferred_req(discard);
639         }
640         spin_unlock(&cache_defer_lock);
641         if (discard)
642                 discard->revisit(discard, 1);
643 }
644 
645 /* Return true if and only if a deferred request is queued. */
646 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
647 {
648         struct cache_deferred_req *dreq;
649 
650         if (req->thread_wait) {
651                 cache_wait_req(req, item);
652                 if (!test_bit(CACHE_PENDING, &item->flags))
653                         return false;
654         }
655         dreq = req->defer(req);
656         if (dreq == NULL)
657                 return false;
658         setup_deferral(dreq, item, 1);
659         if (!test_bit(CACHE_PENDING, &item->flags))
660                 /* Bit could have been cleared before we managed to
661                  * set up the deferral, so need to revisit just in case
662                  */
663                 cache_revisit_request(item);
664 
665         cache_limit_defers();
666         return true;
667 }
668 
669 static void cache_revisit_request(struct cache_head *item)
670 {
671         struct cache_deferred_req *dreq;
672         struct list_head pending;
673         struct hlist_node *tmp;
674         int hash = DFR_HASH(item);
675 
676         INIT_LIST_HEAD(&pending);
677         spin_lock(&cache_defer_lock);
678 
679         hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
680                 if (dreq->item == item) {
681                         __unhash_deferred_req(dreq);
682                         list_add(&dreq->recent, &pending);
683                 }
684 
685         spin_unlock(&cache_defer_lock);
686 
687         while (!list_empty(&pending)) {
688                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
689                 list_del_init(&dreq->recent);
690                 dreq->revisit(dreq, 0);
691         }
692 }
693 
694 void cache_clean_deferred(void *owner)
695 {
696         struct cache_deferred_req *dreq, *tmp;
697         struct list_head pending;
698 
699 
700         INIT_LIST_HEAD(&pending);
701         spin_lock(&cache_defer_lock);
702 
703         list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
704                 if (dreq->owner == owner) {
705                         __unhash_deferred_req(dreq);
706                         list_add(&dreq->recent, &pending);
707                 }
708         }
709         spin_unlock(&cache_defer_lock);
710 
711         while (!list_empty(&pending)) {
712                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
713                 list_del_init(&dreq->recent);
714                 dreq->revisit(dreq, 1);
715         }
716 }
717 
718 /*
719  * communicate with user-space
720  *
721  * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
722  * On read, you get a full request, or block.
723  * On write, an update request is processed.
724  * Poll works if anything to read, and always allows write.
725  *
726  * Implemented by linked list of requests.  Each open file has
727  * a ->private that also exists in this list.  New requests are added
728  * to the end and may wakeup and preceding readers.
729  * New readers are added to the head.  If, on read, an item is found with
730  * CACHE_UPCALLING clear, we free it from the list.
731  *
732  */
733 
734 static DEFINE_SPINLOCK(queue_lock);
735 static DEFINE_MUTEX(queue_io_mutex);
736 
737 struct cache_queue {
738         struct list_head        list;
739         int                     reader; /* if 0, then request */
740 };
741 struct cache_request {
742         struct cache_queue      q;
743         struct cache_head       *item;
744         char                    * buf;
745         int                     len;
746         int                     readers;
747 };
748 struct cache_reader {
749         struct cache_queue      q;
750         int                     offset; /* if non-0, we have a refcnt on next request */
751 };
752 
753 static int cache_request(struct cache_detail *detail,
754                                struct cache_request *crq)
755 {
756         char *bp = crq->buf;
757         int len = PAGE_SIZE;
758 
759         detail->cache_request(detail, crq->item, &bp, &len);
760         if (len < 0)
761                 return -EAGAIN;
762         return PAGE_SIZE - len;
763 }
764 
765 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
766                           loff_t *ppos, struct cache_detail *cd)
767 {
768         struct cache_reader *rp = filp->private_data;
769         struct cache_request *rq;
770         struct inode *inode = file_inode(filp);
771         int err;
772 
773         if (count == 0)
774                 return 0;
775 
776         mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
777                               * readers on this file */
778  again:
779         spin_lock(&queue_lock);
780         /* need to find next request */
781         while (rp->q.list.next != &cd->queue &&
782                list_entry(rp->q.list.next, struct cache_queue, list)
783                ->reader) {
784                 struct list_head *next = rp->q.list.next;
785                 list_move(&rp->q.list, next);
786         }
787         if (rp->q.list.next == &cd->queue) {
788                 spin_unlock(&queue_lock);
789                 mutex_unlock(&inode->i_mutex);
790                 WARN_ON_ONCE(rp->offset);
791                 return 0;
792         }
793         rq = container_of(rp->q.list.next, struct cache_request, q.list);
794         WARN_ON_ONCE(rq->q.reader);
795         if (rp->offset == 0)
796                 rq->readers++;
797         spin_unlock(&queue_lock);
798 
799         if (rq->len == 0) {
800                 err = cache_request(cd, rq);
801                 if (err < 0)
802                         goto out;
803                 rq->len = err;
804         }
805 
806         if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
807                 err = -EAGAIN;
808                 spin_lock(&queue_lock);
809                 list_move(&rp->q.list, &rq->q.list);
810                 spin_unlock(&queue_lock);
811         } else {
812                 if (rp->offset + count > rq->len)
813                         count = rq->len - rp->offset;
814                 err = -EFAULT;
815                 if (copy_to_user(buf, rq->buf + rp->offset, count))
816                         goto out;
817                 rp->offset += count;
818                 if (rp->offset >= rq->len) {
819                         rp->offset = 0;
820                         spin_lock(&queue_lock);
821                         list_move(&rp->q.list, &rq->q.list);
822                         spin_unlock(&queue_lock);
823                 }
824                 err = 0;
825         }
826  out:
827         if (rp->offset == 0) {
828                 /* need to release rq */
829                 spin_lock(&queue_lock);
830                 rq->readers--;
831                 if (rq->readers == 0 &&
832                     !test_bit(CACHE_PENDING, &rq->item->flags)) {
833                         list_del(&rq->q.list);
834                         spin_unlock(&queue_lock);
835                         cache_put(rq->item, cd);
836                         kfree(rq->buf);
837                         kfree(rq);
838                 } else
839                         spin_unlock(&queue_lock);
840         }
841         if (err == -EAGAIN)
842                 goto again;
843         mutex_unlock(&inode->i_mutex);
844         return err ? err :  count;
845 }
846 
847 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
848                                  size_t count, struct cache_detail *cd)
849 {
850         ssize_t ret;
851 
852         if (count == 0)
853                 return -EINVAL;
854         if (copy_from_user(kaddr, buf, count))
855                 return -EFAULT;
856         kaddr[count] = '\0';
857         ret = cd->cache_parse(cd, kaddr, count);
858         if (!ret)
859                 ret = count;
860         return ret;
861 }
862 
863 static ssize_t cache_slow_downcall(const char __user *buf,
864                                    size_t count, struct cache_detail *cd)
865 {
866         static char write_buf[8192]; /* protected by queue_io_mutex */
867         ssize_t ret = -EINVAL;
868 
869         if (count >= sizeof(write_buf))
870                 goto out;
871         mutex_lock(&queue_io_mutex);
872         ret = cache_do_downcall(write_buf, buf, count, cd);
873         mutex_unlock(&queue_io_mutex);
874 out:
875         return ret;
876 }
877 
878 static ssize_t cache_downcall(struct address_space *mapping,
879                               const char __user *buf,
880                               size_t count, struct cache_detail *cd)
881 {
882         struct page *page;
883         char *kaddr;
884         ssize_t ret = -ENOMEM;
885 
886         if (count >= PAGE_CACHE_SIZE)
887                 goto out_slow;
888 
889         page = find_or_create_page(mapping, 0, GFP_KERNEL);
890         if (!page)
891                 goto out_slow;
892 
893         kaddr = kmap(page);
894         ret = cache_do_downcall(kaddr, buf, count, cd);
895         kunmap(page);
896         unlock_page(page);
897         page_cache_release(page);
898         return ret;
899 out_slow:
900         return cache_slow_downcall(buf, count, cd);
901 }
902 
903 static ssize_t cache_write(struct file *filp, const char __user *buf,
904                            size_t count, loff_t *ppos,
905                            struct cache_detail *cd)
906 {
907         struct address_space *mapping = filp->f_mapping;
908         struct inode *inode = file_inode(filp);
909         ssize_t ret = -EINVAL;
910 
911         if (!cd->cache_parse)
912                 goto out;
913 
914         mutex_lock(&inode->i_mutex);
915         ret = cache_downcall(mapping, buf, count, cd);
916         mutex_unlock(&inode->i_mutex);
917 out:
918         return ret;
919 }
920 
921 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
922 
923 static unsigned int cache_poll(struct file *filp, poll_table *wait,
924                                struct cache_detail *cd)
925 {
926         unsigned int mask;
927         struct cache_reader *rp = filp->private_data;
928         struct cache_queue *cq;
929 
930         poll_wait(filp, &queue_wait, wait);
931 
932         /* alway allow write */
933         mask = POLLOUT | POLLWRNORM;
934 
935         if (!rp)
936                 return mask;
937 
938         spin_lock(&queue_lock);
939 
940         for (cq= &rp->q; &cq->list != &cd->queue;
941              cq = list_entry(cq->list.next, struct cache_queue, list))
942                 if (!cq->reader) {
943                         mask |= POLLIN | POLLRDNORM;
944                         break;
945                 }
946         spin_unlock(&queue_lock);
947         return mask;
948 }
949 
950 static int cache_ioctl(struct inode *ino, struct file *filp,
951                        unsigned int cmd, unsigned long arg,
952                        struct cache_detail *cd)
953 {
954         int len = 0;
955         struct cache_reader *rp = filp->private_data;
956         struct cache_queue *cq;
957 
958         if (cmd != FIONREAD || !rp)
959                 return -EINVAL;
960 
961         spin_lock(&queue_lock);
962 
963         /* only find the length remaining in current request,
964          * or the length of the next request
965          */
966         for (cq= &rp->q; &cq->list != &cd->queue;
967              cq = list_entry(cq->list.next, struct cache_queue, list))
968                 if (!cq->reader) {
969                         struct cache_request *cr =
970                                 container_of(cq, struct cache_request, q);
971                         len = cr->len - rp->offset;
972                         break;
973                 }
974         spin_unlock(&queue_lock);
975 
976         return put_user(len, (int __user *)arg);
977 }
978 
979 static int cache_open(struct inode *inode, struct file *filp,
980                       struct cache_detail *cd)
981 {
982         struct cache_reader *rp = NULL;
983 
984         if (!cd || !try_module_get(cd->owner))
985                 return -EACCES;
986         nonseekable_open(inode, filp);
987         if (filp->f_mode & FMODE_READ) {
988                 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
989                 if (!rp) {
990                         module_put(cd->owner);
991                         return -ENOMEM;
992                 }
993                 rp->offset = 0;
994                 rp->q.reader = 1;
995                 atomic_inc(&cd->readers);
996                 spin_lock(&queue_lock);
997                 list_add(&rp->q.list, &cd->queue);
998                 spin_unlock(&queue_lock);
999         }
1000         filp->private_data = rp;
1001         return 0;
1002 }
1003 
1004 static int cache_release(struct inode *inode, struct file *filp,
1005                          struct cache_detail *cd)
1006 {
1007         struct cache_reader *rp = filp->private_data;
1008 
1009         if (rp) {
1010                 spin_lock(&queue_lock);
1011                 if (rp->offset) {
1012                         struct cache_queue *cq;
1013                         for (cq= &rp->q; &cq->list != &cd->queue;
1014                              cq = list_entry(cq->list.next, struct cache_queue, list))
1015                                 if (!cq->reader) {
1016                                         container_of(cq, struct cache_request, q)
1017                                                 ->readers--;
1018                                         break;
1019                                 }
1020                         rp->offset = 0;
1021                 }
1022                 list_del(&rp->q.list);
1023                 spin_unlock(&queue_lock);
1024 
1025                 filp->private_data = NULL;
1026                 kfree(rp);
1027 
1028                 cd->last_close = seconds_since_boot();
1029                 atomic_dec(&cd->readers);
1030         }
1031         module_put(cd->owner);
1032         return 0;
1033 }
1034 
1035 
1036 
1037 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1038 {
1039         struct cache_queue *cq, *tmp;
1040         struct cache_request *cr;
1041         struct list_head dequeued;
1042 
1043         INIT_LIST_HEAD(&dequeued);
1044         spin_lock(&queue_lock);
1045         list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1046                 if (!cq->reader) {
1047                         cr = container_of(cq, struct cache_request, q);
1048                         if (cr->item != ch)
1049                                 continue;
1050                         if (test_bit(CACHE_PENDING, &ch->flags))
1051                                 /* Lost a race and it is pending again */
1052                                 break;
1053                         if (cr->readers != 0)
1054                                 continue;
1055                         list_move(&cr->q.list, &dequeued);
1056                 }
1057         spin_unlock(&queue_lock);
1058         while (!list_empty(&dequeued)) {
1059                 cr = list_entry(dequeued.next, struct cache_request, q.list);
1060                 list_del(&cr->q.list);
1061                 cache_put(cr->item, detail);
1062                 kfree(cr->buf);
1063                 kfree(cr);
1064         }
1065 }
1066 
1067 /*
1068  * Support routines for text-based upcalls.
1069  * Fields are separated by spaces.
1070  * Fields are either mangled to quote space tab newline slosh with slosh
1071  * or a hexified with a leading \x
1072  * Record is terminated with newline.
1073  *
1074  */
1075 
1076 void qword_add(char **bpp, int *lp, char *str)
1077 {
1078         char *bp = *bpp;
1079         int len = *lp;
1080         char c;
1081 
1082         if (len < 0) return;
1083 
1084         while ((c=*str++) && len)
1085                 switch(c) {
1086                 case ' ':
1087                 case '\t':
1088                 case '\n':
1089                 case '\\':
1090                         if (len >= 4) {
1091                                 *bp++ = '\\';
1092                                 *bp++ = '' + ((c & 0300)>>6);
1093                                 *bp++ = '' + ((c & 0070)>>3);
1094                                 *bp++ = '' + ((c & 0007)>>0);
1095                         }
1096                         len -= 4;
1097                         break;
1098                 default:
1099                         *bp++ = c;
1100                         len--;
1101                 }
1102         if (c || len <1) len = -1;
1103         else {
1104                 *bp++ = ' ';
1105                 len--;
1106         }
1107         *bpp = bp;
1108         *lp = len;
1109 }
1110 EXPORT_SYMBOL_GPL(qword_add);
1111 
1112 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1113 {
1114         char *bp = *bpp;
1115         int len = *lp;
1116 
1117         if (len < 0) return;
1118 
1119         if (len > 2) {
1120                 *bp++ = '\\';
1121                 *bp++ = 'x';
1122                 len -= 2;
1123                 while (blen && len >= 2) {
1124                         bp = hex_byte_pack(bp, *buf++);
1125                         len -= 2;
1126                         blen--;
1127                 }
1128         }
1129         if (blen || len<1) len = -1;
1130         else {
1131                 *bp++ = ' ';
1132                 len--;
1133         }
1134         *bpp = bp;
1135         *lp = len;
1136 }
1137 EXPORT_SYMBOL_GPL(qword_addhex);
1138 
1139 static void warn_no_listener(struct cache_detail *detail)
1140 {
1141         if (detail->last_warn != detail->last_close) {
1142                 detail->last_warn = detail->last_close;
1143                 if (detail->warn_no_listener)
1144                         detail->warn_no_listener(detail, detail->last_close != 0);
1145         }
1146 }
1147 
1148 static bool cache_listeners_exist(struct cache_detail *detail)
1149 {
1150         if (atomic_read(&detail->readers))
1151                 return true;
1152         if (detail->last_close == 0)
1153                 /* This cache was never opened */
1154                 return false;
1155         if (detail->last_close < seconds_since_boot() - 30)
1156                 /*
1157                  * We allow for the possibility that someone might
1158                  * restart a userspace daemon without restarting the
1159                  * server; but after 30 seconds, we give up.
1160                  */
1161                  return false;
1162         return true;
1163 }
1164 
1165 /*
1166  * register an upcall request to user-space and queue it up for read() by the
1167  * upcall daemon.
1168  *
1169  * Each request is at most one page long.
1170  */
1171 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1172 {
1173 
1174         char *buf;
1175         struct cache_request *crq;
1176         int ret = 0;
1177 
1178         if (!detail->cache_request)
1179                 return -EINVAL;
1180 
1181         if (!cache_listeners_exist(detail)) {
1182                 warn_no_listener(detail);
1183                 return -EINVAL;
1184         }
1185         if (test_bit(CACHE_CLEANED, &h->flags))
1186                 /* Too late to make an upcall */
1187                 return -EAGAIN;
1188 
1189         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1190         if (!buf)
1191                 return -EAGAIN;
1192 
1193         crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1194         if (!crq) {
1195                 kfree(buf);
1196                 return -EAGAIN;
1197         }
1198 
1199         crq->q.reader = 0;
1200         crq->buf = buf;
1201         crq->len = 0;
1202         crq->readers = 0;
1203         spin_lock(&queue_lock);
1204         if (test_bit(CACHE_PENDING, &h->flags)) {
1205                 crq->item = cache_get(h);
1206                 list_add_tail(&crq->q.list, &detail->queue);
1207         } else
1208                 /* Lost a race, no longer PENDING, so don't enqueue */
1209                 ret = -EAGAIN;
1210         spin_unlock(&queue_lock);
1211         wake_up(&queue_wait);
1212         if (ret == -EAGAIN) {
1213                 kfree(buf);
1214                 kfree(crq);
1215         }
1216         return ret;
1217 }
1218 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1219 
1220 /*
1221  * parse a message from user-space and pass it
1222  * to an appropriate cache
1223  * Messages are, like requests, separated into fields by
1224  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1225  *
1226  * Message is
1227  *   reply cachename expiry key ... content....
1228  *
1229  * key and content are both parsed by cache
1230  */
1231 
1232 int qword_get(char **bpp, char *dest, int bufsize)
1233 {
1234         /* return bytes copied, or -1 on error */
1235         char *bp = *bpp;
1236         int len = 0;
1237 
1238         while (*bp == ' ') bp++;
1239 
1240         if (bp[0] == '\\' && bp[1] == 'x') {
1241                 /* HEX STRING */
1242                 bp += 2;
1243                 while (len < bufsize - 1) {
1244                         int h, l;
1245 
1246                         h = hex_to_bin(bp[0]);
1247                         if (h < 0)
1248                                 break;
1249 
1250                         l = hex_to_bin(bp[1]);
1251                         if (l < 0)
1252                                 break;
1253 
1254                         *dest++ = (h << 4) | l;
1255                         bp += 2;
1256                         len++;
1257                 }
1258         } else {
1259                 /* text with \nnn octal quoting */
1260                 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1261                         if (*bp == '\\' &&
1262                             isodigit(bp[1]) && (bp[1] <= '3') &&
1263                             isodigit(bp[2]) &&
1264                             isodigit(bp[3])) {
1265                                 int byte = (*++bp -'');
1266                                 bp++;
1267                                 byte = (byte << 3) | (*bp++ - '');
1268                                 byte = (byte << 3) | (*bp++ - '');
1269                                 *dest++ = byte;
1270                                 len++;
1271                         } else {
1272                                 *dest++ = *bp++;
1273                                 len++;
1274                         }
1275                 }
1276         }
1277 
1278         if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1279                 return -1;
1280         while (*bp == ' ') bp++;
1281         *bpp = bp;
1282         *dest = '\0';
1283         return len;
1284 }
1285 EXPORT_SYMBOL_GPL(qword_get);
1286 
1287 
1288 /*
1289  * support /proc/sunrpc/cache/$CACHENAME/content
1290  * as a seqfile.
1291  * We call ->cache_show passing NULL for the item to
1292  * get a header, then pass each real item in the cache
1293  */
1294 
1295 struct handle {
1296         struct cache_detail *cd;
1297 };
1298 
1299 static void *c_start(struct seq_file *m, loff_t *pos)
1300         __acquires(cd->hash_lock)
1301 {
1302         loff_t n = *pos;
1303         unsigned int hash, entry;
1304         struct cache_head *ch;
1305         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1306 
1307 
1308         read_lock(&cd->hash_lock);
1309         if (!n--)
1310                 return SEQ_START_TOKEN;
1311         hash = n >> 32;
1312         entry = n & ((1LL<<32) - 1);
1313 
1314         for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1315                 if (!entry--)
1316                         return ch;
1317         n &= ~((1LL<<32) - 1);
1318         do {
1319                 hash++;
1320                 n += 1LL<<32;
1321         } while(hash < cd->hash_size &&
1322                 cd->hash_table[hash]==NULL);
1323         if (hash >= cd->hash_size)
1324                 return NULL;
1325         *pos = n+1;
1326         return cd->hash_table[hash];
1327 }
1328 
1329 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1330 {
1331         struct cache_head *ch = p;
1332         int hash = (*pos >> 32);
1333         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1334 
1335         if (p == SEQ_START_TOKEN)
1336                 hash = 0;
1337         else if (ch->next == NULL) {
1338                 hash++;
1339                 *pos += 1LL<<32;
1340         } else {
1341                 ++*pos;
1342                 return ch->next;
1343         }
1344         *pos &= ~((1LL<<32) - 1);
1345         while (hash < cd->hash_size &&
1346                cd->hash_table[hash] == NULL) {
1347                 hash++;
1348                 *pos += 1LL<<32;
1349         }
1350         if (hash >= cd->hash_size)
1351                 return NULL;
1352         ++*pos;
1353         return cd->hash_table[hash];
1354 }
1355 
1356 static void c_stop(struct seq_file *m, void *p)
1357         __releases(cd->hash_lock)
1358 {
1359         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1360         read_unlock(&cd->hash_lock);
1361 }
1362 
1363 static int c_show(struct seq_file *m, void *p)
1364 {
1365         struct cache_head *cp = p;
1366         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1367 
1368         if (p == SEQ_START_TOKEN)
1369                 return cd->cache_show(m, cd, NULL);
1370 
1371         ifdebug(CACHE)
1372                 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1373                            convert_to_wallclock(cp->expiry_time),
1374                            atomic_read(&cp->ref.refcount), cp->flags);
1375         cache_get(cp);
1376         if (cache_check(cd, cp, NULL))
1377                 /* cache_check does a cache_put on failure */
1378                 seq_printf(m, "# ");
1379         else {
1380                 if (cache_is_expired(cd, cp))
1381                         seq_printf(m, "# ");
1382                 cache_put(cp, cd);
1383         }
1384 
1385         return cd->cache_show(m, cd, cp);
1386 }
1387 
1388 static const struct seq_operations cache_content_op = {
1389         .start  = c_start,
1390         .next   = c_next,
1391         .stop   = c_stop,
1392         .show   = c_show,
1393 };
1394 
1395 static int content_open(struct inode *inode, struct file *file,
1396                         struct cache_detail *cd)
1397 {
1398         struct handle *han;
1399 
1400         if (!cd || !try_module_get(cd->owner))
1401                 return -EACCES;
1402         han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1403         if (han == NULL) {
1404                 module_put(cd->owner);
1405                 return -ENOMEM;
1406         }
1407 
1408         han->cd = cd;
1409         return 0;
1410 }
1411 
1412 static int content_release(struct inode *inode, struct file *file,
1413                 struct cache_detail *cd)
1414 {
1415         int ret = seq_release_private(inode, file);
1416         module_put(cd->owner);
1417         return ret;
1418 }
1419 
1420 static int open_flush(struct inode *inode, struct file *file,
1421                         struct cache_detail *cd)
1422 {
1423         if (!cd || !try_module_get(cd->owner))
1424                 return -EACCES;
1425         return nonseekable_open(inode, file);
1426 }
1427 
1428 static int release_flush(struct inode *inode, struct file *file,
1429                         struct cache_detail *cd)
1430 {
1431         module_put(cd->owner);
1432         return 0;
1433 }
1434 
1435 static ssize_t read_flush(struct file *file, char __user *buf,
1436                           size_t count, loff_t *ppos,
1437                           struct cache_detail *cd)
1438 {
1439         char tbuf[22];
1440         unsigned long p = *ppos;
1441         size_t len;
1442 
1443         snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
1444         len = strlen(tbuf);
1445         if (p >= len)
1446                 return 0;
1447         len -= p;
1448         if (len > count)
1449                 len = count;
1450         if (copy_to_user(buf, (void*)(tbuf+p), len))
1451                 return -EFAULT;
1452         *ppos += len;
1453         return len;
1454 }
1455 
1456 static ssize_t write_flush(struct file *file, const char __user *buf,
1457                            size_t count, loff_t *ppos,
1458                            struct cache_detail *cd)
1459 {
1460         char tbuf[20];
1461         char *bp, *ep;
1462 
1463         if (*ppos || count > sizeof(tbuf)-1)
1464                 return -EINVAL;
1465         if (copy_from_user(tbuf, buf, count))
1466                 return -EFAULT;
1467         tbuf[count] = 0;
1468         simple_strtoul(tbuf, &ep, 0);
1469         if (*ep && *ep != '\n')
1470                 return -EINVAL;
1471 
1472         bp = tbuf;
1473         cd->flush_time = get_expiry(&bp);
1474         cd->nextcheck = seconds_since_boot();
1475         cache_flush();
1476 
1477         *ppos += count;
1478         return count;
1479 }
1480 
1481 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1482                                  size_t count, loff_t *ppos)
1483 {
1484         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1485 
1486         return cache_read(filp, buf, count, ppos, cd);
1487 }
1488 
1489 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1490                                   size_t count, loff_t *ppos)
1491 {
1492         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1493 
1494         return cache_write(filp, buf, count, ppos, cd);
1495 }
1496 
1497 static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1498 {
1499         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1500 
1501         return cache_poll(filp, wait, cd);
1502 }
1503 
1504 static long cache_ioctl_procfs(struct file *filp,
1505                                unsigned int cmd, unsigned long arg)
1506 {
1507         struct inode *inode = file_inode(filp);
1508         struct cache_detail *cd = PDE_DATA(inode);
1509 
1510         return cache_ioctl(inode, filp, cmd, arg, cd);
1511 }
1512 
1513 static int cache_open_procfs(struct inode *inode, struct file *filp)
1514 {
1515         struct cache_detail *cd = PDE_DATA(inode);
1516 
1517         return cache_open(inode, filp, cd);
1518 }
1519 
1520 static int cache_release_procfs(struct inode *inode, struct file *filp)
1521 {
1522         struct cache_detail *cd = PDE_DATA(inode);
1523 
1524         return cache_release(inode, filp, cd);
1525 }
1526 
1527 static const struct file_operations cache_file_operations_procfs = {
1528         .owner          = THIS_MODULE,
1529         .llseek         = no_llseek,
1530         .read           = cache_read_procfs,
1531         .write          = cache_write_procfs,
1532         .poll           = cache_poll_procfs,
1533         .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1534         .open           = cache_open_procfs,
1535         .release        = cache_release_procfs,
1536 };
1537 
1538 static int content_open_procfs(struct inode *inode, struct file *filp)
1539 {
1540         struct cache_detail *cd = PDE_DATA(inode);
1541 
1542         return content_open(inode, filp, cd);
1543 }
1544 
1545 static int content_release_procfs(struct inode *inode, struct file *filp)
1546 {
1547         struct cache_detail *cd = PDE_DATA(inode);
1548 
1549         return content_release(inode, filp, cd);
1550 }
1551 
1552 static const struct file_operations content_file_operations_procfs = {
1553         .open           = content_open_procfs,
1554         .read           = seq_read,
1555         .llseek         = seq_lseek,
1556         .release        = content_release_procfs,
1557 };
1558 
1559 static int open_flush_procfs(struct inode *inode, struct file *filp)
1560 {
1561         struct cache_detail *cd = PDE_DATA(inode);
1562 
1563         return open_flush(inode, filp, cd);
1564 }
1565 
1566 static int release_flush_procfs(struct inode *inode, struct file *filp)
1567 {
1568         struct cache_detail *cd = PDE_DATA(inode);
1569 
1570         return release_flush(inode, filp, cd);
1571 }
1572 
1573 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1574                             size_t count, loff_t *ppos)
1575 {
1576         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1577 
1578         return read_flush(filp, buf, count, ppos, cd);
1579 }
1580 
1581 static ssize_t write_flush_procfs(struct file *filp,
1582                                   const char __user *buf,
1583                                   size_t count, loff_t *ppos)
1584 {
1585         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1586 
1587         return write_flush(filp, buf, count, ppos, cd);
1588 }
1589 
1590 static const struct file_operations cache_flush_operations_procfs = {
1591         .open           = open_flush_procfs,
1592         .read           = read_flush_procfs,
1593         .write          = write_flush_procfs,
1594         .release        = release_flush_procfs,
1595         .llseek         = no_llseek,
1596 };
1597 
1598 static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1599 {
1600         struct sunrpc_net *sn;
1601 
1602         if (cd->u.procfs.proc_ent == NULL)
1603                 return;
1604         if (cd->u.procfs.flush_ent)
1605                 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1606         if (cd->u.procfs.channel_ent)
1607                 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1608         if (cd->u.procfs.content_ent)
1609                 remove_proc_entry("content", cd->u.procfs.proc_ent);
1610         cd->u.procfs.proc_ent = NULL;
1611         sn = net_generic(net, sunrpc_net_id);
1612         remove_proc_entry(cd->name, sn->proc_net_rpc);
1613 }
1614 
1615 #ifdef CONFIG_PROC_FS
1616 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1617 {
1618         struct proc_dir_entry *p;
1619         struct sunrpc_net *sn;
1620 
1621         sn = net_generic(net, sunrpc_net_id);
1622         cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1623         if (cd->u.procfs.proc_ent == NULL)
1624                 goto out_nomem;
1625         cd->u.procfs.channel_ent = NULL;
1626         cd->u.procfs.content_ent = NULL;
1627 
1628         p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1629                              cd->u.procfs.proc_ent,
1630                              &cache_flush_operations_procfs, cd);
1631         cd->u.procfs.flush_ent = p;
1632         if (p == NULL)
1633                 goto out_nomem;
1634 
1635         if (cd->cache_request || cd->cache_parse) {
1636                 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1637                                      cd->u.procfs.proc_ent,
1638                                      &cache_file_operations_procfs, cd);
1639                 cd->u.procfs.channel_ent = p;
1640                 if (p == NULL)
1641                         goto out_nomem;
1642         }
1643         if (cd->cache_show) {
1644                 p = proc_create_data("content", S_IFREG|S_IRUSR,
1645                                 cd->u.procfs.proc_ent,
1646                                 &content_file_operations_procfs, cd);
1647                 cd->u.procfs.content_ent = p;
1648                 if (p == NULL)
1649                         goto out_nomem;
1650         }
1651         return 0;
1652 out_nomem:
1653         remove_cache_proc_entries(cd, net);
1654         return -ENOMEM;
1655 }
1656 #else /* CONFIG_PROC_FS */
1657 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1658 {
1659         return 0;
1660 }
1661 #endif
1662 
1663 void __init cache_initialize(void)
1664 {
1665         INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1666 }
1667 
1668 int cache_register_net(struct cache_detail *cd, struct net *net)
1669 {
1670         int ret;
1671 
1672         sunrpc_init_cache_detail(cd);
1673         ret = create_cache_proc_entries(cd, net);
1674         if (ret)
1675                 sunrpc_destroy_cache_detail(cd);
1676         return ret;
1677 }
1678 EXPORT_SYMBOL_GPL(cache_register_net);
1679 
1680 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1681 {
1682         remove_cache_proc_entries(cd, net);
1683         sunrpc_destroy_cache_detail(cd);
1684 }
1685 EXPORT_SYMBOL_GPL(cache_unregister_net);
1686 
1687 struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1688 {
1689         struct cache_detail *cd;
1690 
1691         cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1692         if (cd == NULL)
1693                 return ERR_PTR(-ENOMEM);
1694 
1695         cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
1696                                  GFP_KERNEL);
1697         if (cd->hash_table == NULL) {
1698                 kfree(cd);
1699                 return ERR_PTR(-ENOMEM);
1700         }
1701         cd->net = net;
1702         return cd;
1703 }
1704 EXPORT_SYMBOL_GPL(cache_create_net);
1705 
1706 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1707 {
1708         kfree(cd->hash_table);
1709         kfree(cd);
1710 }
1711 EXPORT_SYMBOL_GPL(cache_destroy_net);
1712 
1713 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1714                                  size_t count, loff_t *ppos)
1715 {
1716         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1717 
1718         return cache_read(filp, buf, count, ppos, cd);
1719 }
1720 
1721 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1722                                   size_t count, loff_t *ppos)
1723 {
1724         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1725 
1726         return cache_write(filp, buf, count, ppos, cd);
1727 }
1728 
1729 static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1730 {
1731         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1732 
1733         return cache_poll(filp, wait, cd);
1734 }
1735 
1736 static long cache_ioctl_pipefs(struct file *filp,
1737                               unsigned int cmd, unsigned long arg)
1738 {
1739         struct inode *inode = file_inode(filp);
1740         struct cache_detail *cd = RPC_I(inode)->private;
1741 
1742         return cache_ioctl(inode, filp, cmd, arg, cd);
1743 }
1744 
1745 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1746 {
1747         struct cache_detail *cd = RPC_I(inode)->private;
1748 
1749         return cache_open(inode, filp, cd);
1750 }
1751 
1752 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1753 {
1754         struct cache_detail *cd = RPC_I(inode)->private;
1755 
1756         return cache_release(inode, filp, cd);
1757 }
1758 
1759 const struct file_operations cache_file_operations_pipefs = {
1760         .owner          = THIS_MODULE,
1761         .llseek         = no_llseek,
1762         .read           = cache_read_pipefs,
1763         .write          = cache_write_pipefs,
1764         .poll           = cache_poll_pipefs,
1765         .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1766         .open           = cache_open_pipefs,
1767         .release        = cache_release_pipefs,
1768 };
1769 
1770 static int content_open_pipefs(struct inode *inode, struct file *filp)
1771 {
1772         struct cache_detail *cd = RPC_I(inode)->private;
1773 
1774         return content_open(inode, filp, cd);
1775 }
1776 
1777 static int content_release_pipefs(struct inode *inode, struct file *filp)
1778 {
1779         struct cache_detail *cd = RPC_I(inode)->private;
1780 
1781         return content_release(inode, filp, cd);
1782 }
1783 
1784 const struct file_operations content_file_operations_pipefs = {
1785         .open           = content_open_pipefs,
1786         .read           = seq_read,
1787         .llseek         = seq_lseek,
1788         .release        = content_release_pipefs,
1789 };
1790 
1791 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1792 {
1793         struct cache_detail *cd = RPC_I(inode)->private;
1794 
1795         return open_flush(inode, filp, cd);
1796 }
1797 
1798 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1799 {
1800         struct cache_detail *cd = RPC_I(inode)->private;
1801 
1802         return release_flush(inode, filp, cd);
1803 }
1804 
1805 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1806                             size_t count, loff_t *ppos)
1807 {
1808         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1809 
1810         return read_flush(filp, buf, count, ppos, cd);
1811 }
1812 
1813 static ssize_t write_flush_pipefs(struct file *filp,
1814                                   const char __user *buf,
1815                                   size_t count, loff_t *ppos)
1816 {
1817         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1818 
1819         return write_flush(filp, buf, count, ppos, cd);
1820 }
1821 
1822 const struct file_operations cache_flush_operations_pipefs = {
1823         .open           = open_flush_pipefs,
1824         .read           = read_flush_pipefs,
1825         .write          = write_flush_pipefs,
1826         .release        = release_flush_pipefs,
1827         .llseek         = no_llseek,
1828 };
1829 
1830 int sunrpc_cache_register_pipefs(struct dentry *parent,
1831                                  const char *name, umode_t umode,
1832                                  struct cache_detail *cd)
1833 {
1834         struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1835         if (IS_ERR(dir))
1836                 return PTR_ERR(dir);
1837         cd->u.pipefs.dir = dir;
1838         return 0;
1839 }
1840 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1841 
1842 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1843 {
1844         rpc_remove_cache_dir(cd->u.pipefs.dir);
1845         cd->u.pipefs.dir = NULL;
1846 }
1847 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1848 
1849 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp