~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/mbcache.c

Version: ~ [ linux-5.10-rc1 ] ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * linux/fs/mbcache.c
  3  * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
  4  */
  5 
  6 /*
  7  * Filesystem Meta Information Block Cache (mbcache)
  8  *
  9  * The mbcache caches blocks of block devices that need to be located
 10  * by their device/block number, as well as by other criteria (such
 11  * as the block's contents).
 12  *
 13  * There can only be one cache entry in a cache per device and block number.
 14  * Additional indexes need not be unique in this sense. The number of
 15  * additional indexes (=other criteria) can be hardwired at compile time
 16  * or specified at cache create time.
 17  *
 18  * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
 19  * in the cache. A valid entry is in the main hash tables of the cache,
 20  * and may also be in the lru list. An invalid entry is not in any hashes
 21  * or lists.
 22  *
 23  * A valid cache entry is only in the lru list if no handles refer to it.
 24  * Invalid cache entries will be freed when the last handle to the cache
 25  * entry is released. Entries that cannot be freed immediately are put
 26  * back on the lru list.
 27  */
 28 
 29 #include <linux/kernel.h>
 30 #include <linux/module.h>
 31 
 32 #include <linux/hash.h>
 33 #include <linux/fs.h>
 34 #include <linux/mm.h>
 35 #include <linux/slab.h>
 36 #include <linux/sched.h>
 37 #include <linux/init.h>
 38 #include <linux/mbcache.h>
 39 
 40 
 41 #ifdef MB_CACHE_DEBUG
 42 # define mb_debug(f...) do { \
 43                 printk(KERN_DEBUG f); \
 44                 printk("\n"); \
 45         } while (0)
 46 #define mb_assert(c) do { if (!(c)) \
 47                 printk(KERN_ERR "assertion " #c " failed\n"); \
 48         } while(0)
 49 #else
 50 # define mb_debug(f...) do { } while(0)
 51 # define mb_assert(c) do { } while(0)
 52 #endif
 53 #define mb_error(f...) do { \
 54                 printk(KERN_ERR f); \
 55                 printk("\n"); \
 56         } while(0)
 57                 
 58 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
 59 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
 60 MODULE_LICENSE("GPL");
 61 
 62 EXPORT_SYMBOL(mb_cache_create);
 63 EXPORT_SYMBOL(mb_cache_shrink);
 64 EXPORT_SYMBOL(mb_cache_destroy);
 65 EXPORT_SYMBOL(mb_cache_entry_alloc);
 66 EXPORT_SYMBOL(mb_cache_entry_insert);
 67 EXPORT_SYMBOL(mb_cache_entry_release);
 68 EXPORT_SYMBOL(mb_cache_entry_takeout);
 69 EXPORT_SYMBOL(mb_cache_entry_free);
 70 EXPORT_SYMBOL(mb_cache_entry_dup);
 71 EXPORT_SYMBOL(mb_cache_entry_get);
 72 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
 73 EXPORT_SYMBOL(mb_cache_entry_find_first);
 74 EXPORT_SYMBOL(mb_cache_entry_find_next);
 75 #endif
 76 
 77 
 78 /*
 79  * Global data: list of all mbcache's, lru list, and a spinlock for
 80  * accessing cache data structures on SMP machines. The lru list is
 81  * global across all mbcaches.
 82  */
 83 
 84 static LIST_HEAD(mb_cache_list);
 85 static LIST_HEAD(mb_cache_lru_list);
 86 static spinlock_t mb_cache_spinlock = SPIN_LOCK_UNLOCKED;
 87 static struct shrinker *mb_shrinker;
 88 
 89 static inline int
 90 mb_cache_indexes(struct mb_cache *cache)
 91 {
 92 #ifdef MB_CACHE_INDEXES_COUNT
 93         return MB_CACHE_INDEXES_COUNT;
 94 #else
 95         return cache->c_indexes_count;
 96 #endif
 97 }
 98 
 99 /*
100  * What the mbcache registers as to get shrunk dynamically.
101  */
102 
103 static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
104 
105 
106 static inline int
107 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
108 {
109         return !list_empty(&ce->e_block_list);
110 }
111 
112 
113 static inline void
114 __mb_cache_entry_unhash(struct mb_cache_entry *ce)
115 {
116         int n;
117 
118         if (__mb_cache_entry_is_hashed(ce)) {
119                 list_del_init(&ce->e_block_list);
120                 for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
121                         list_del(&ce->e_indexes[n].o_list);
122         }
123 }
124 
125 
126 static inline void
127 __mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
128 {
129         struct mb_cache *cache = ce->e_cache;
130 
131         mb_assert(atomic_read(&ce->e_used) == 0);
132         if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
133                 /* free failed -- put back on the lru list
134                    for freeing later. */
135                 spin_lock(&mb_cache_spinlock);
136                 list_add(&ce->e_lru_list, &mb_cache_lru_list);
137                 spin_unlock(&mb_cache_spinlock);
138         } else {
139                 kmem_cache_free(cache->c_entry_cache, ce);
140                 atomic_dec(&cache->c_entry_count);
141         }
142 }
143 
144 
145 static inline void
146 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
147 {
148         if (atomic_dec_and_test(&ce->e_used)) {
149                 if (!__mb_cache_entry_is_hashed(ce))
150                         goto forget;
151                 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
152         }
153         spin_unlock(&mb_cache_spinlock);
154         return;
155 forget:
156         spin_unlock(&mb_cache_spinlock);
157         __mb_cache_entry_forget(ce, GFP_KERNEL);
158 }
159 
160 
161 /*
162  * mb_cache_shrink_fn()  memory pressure callback
163  *
164  * This function is called by the kernel memory management when memory
165  * gets low.
166  *
167  * @nr_to_scan: Number of objects to scan
168  * @gfp_mask: (ignored)
169  *
170  * Returns the number of objects which are present in the cache.
171  */
172 static int
173 mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
174 {
175         LIST_HEAD(free_list);
176         struct list_head *l, *ltmp;
177         int count = 0;
178 
179         spin_lock(&mb_cache_spinlock);
180         list_for_each(l, &mb_cache_list) {
181                 struct mb_cache *cache =
182                         list_entry(l, struct mb_cache, c_cache_list);
183                 mb_debug("cache %s (%d)", cache->c_name,
184                           atomic_read(&cache->c_entry_count));
185                 count += atomic_read(&cache->c_entry_count);
186         }
187         mb_debug("trying to free %d entries", nr_to_scan);
188         if (nr_to_scan == 0) {
189                 spin_unlock(&mb_cache_spinlock);
190                 goto out;
191         }
192         while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
193                 struct mb_cache_entry *ce =
194                         list_entry(mb_cache_lru_list.next,
195                                    struct mb_cache_entry, e_lru_list);
196                 list_move_tail(&ce->e_lru_list, &free_list);
197                 __mb_cache_entry_unhash(ce);
198         }
199         spin_unlock(&mb_cache_spinlock);
200         list_for_each_safe(l, ltmp, &free_list) {
201                 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
202                                                    e_lru_list), gfp_mask);
203         }
204 out:
205         return count;
206 }
207 
208 
209 /*
210  * mb_cache_create()  create a new cache
211  *
212  * All entries in one cache are equal size. Cache entries may be from
213  * multiple devices. If this is the first mbcache created, registers
214  * the cache with kernel memory management. Returns NULL if no more
215  * memory was available.
216  *
217  * @name: name of the cache (informal)
218  * @cache_op: contains the callback called when freeing a cache entry
219  * @entry_size: The size of a cache entry, including
220  *              struct mb_cache_entry
221  * @indexes_count: number of additional indexes in the cache. Must equal
222  *                 MB_CACHE_INDEXES_COUNT if the number of indexes is
223  *                 hardwired.
224  * @bucket_bits: log2(number of hash buckets)
225  */
226 struct mb_cache *
227 mb_cache_create(const char *name, struct mb_cache_op *cache_op,
228                 size_t entry_size, int indexes_count, int bucket_bits)
229 {
230         int m=0, n, bucket_count = 1 << bucket_bits;
231         struct mb_cache *cache = NULL;
232 
233         if(entry_size < sizeof(struct mb_cache_entry) +
234            indexes_count * sizeof(struct mb_cache_entry_index))
235                 return NULL;
236 
237         cache = kmalloc(sizeof(struct mb_cache) +
238                         indexes_count * sizeof(struct list_head), GFP_KERNEL);
239         if (!cache)
240                 goto fail;
241         cache->c_name = name;
242         cache->c_op.free = NULL;
243         if (cache_op)
244                 cache->c_op.free = cache_op->free;
245         atomic_set(&cache->c_entry_count, 0);
246         cache->c_bucket_bits = bucket_bits;
247 #ifdef MB_CACHE_INDEXES_COUNT
248         mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
249 #else
250         cache->c_indexes_count = indexes_count;
251 #endif
252         cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
253                                       GFP_KERNEL);
254         if (!cache->c_block_hash)
255                 goto fail;
256         for (n=0; n<bucket_count; n++)
257                 INIT_LIST_HEAD(&cache->c_block_hash[n]);
258         for (m=0; m<indexes_count; m++) {
259                 cache->c_indexes_hash[m] = kmalloc(bucket_count *
260                                                  sizeof(struct list_head),
261                                                  GFP_KERNEL);
262                 if (!cache->c_indexes_hash[m])
263                         goto fail;
264                 for (n=0; n<bucket_count; n++)
265                         INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
266         }
267         cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
268                 SLAB_RECLAIM_ACCOUNT, NULL, NULL);
269         if (!cache->c_entry_cache)
270                 goto fail;
271 
272         spin_lock(&mb_cache_spinlock);
273         list_add(&cache->c_cache_list, &mb_cache_list);
274         spin_unlock(&mb_cache_spinlock);
275         return cache;
276 
277 fail:
278         if (cache) {
279                 while (--m >= 0)
280                         kfree(cache->c_indexes_hash[m]);
281                 if (cache->c_block_hash)
282                         kfree(cache->c_block_hash);
283                 kfree(cache);
284         }
285         return NULL;
286 }
287 
288 
289 /*
290  * mb_cache_shrink()
291  *
292  * Removes all cache entires of a device from the cache. All cache entries
293  * currently in use cannot be freed, and thus remain in the cache. All others
294  * are freed.
295  *
296  * @cache: which cache to shrink
297  * @bdev: which device's cache entries to shrink
298  */
299 void
300 mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev)
301 {
302         LIST_HEAD(free_list);
303         struct list_head *l, *ltmp;
304 
305         spin_lock(&mb_cache_spinlock);
306         list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
307                 struct mb_cache_entry *ce =
308                         list_entry(l, struct mb_cache_entry, e_lru_list);
309                 if (ce->e_bdev == bdev) {
310                         list_move_tail(&ce->e_lru_list, &free_list);
311                         __mb_cache_entry_unhash(ce);
312                 }
313         }
314         spin_unlock(&mb_cache_spinlock);
315         list_for_each_safe(l, ltmp, &free_list) {
316                 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
317                                                    e_lru_list), GFP_KERNEL);
318         }
319 }
320 
321 
322 /*
323  * mb_cache_destroy()
324  *
325  * Shrinks the cache to its minimum possible size (hopefully 0 entries),
326  * and then destroys it. If this was the last mbcache, un-registers the
327  * mbcache from kernel memory management.
328  */
329 void
330 mb_cache_destroy(struct mb_cache *cache)
331 {
332         LIST_HEAD(free_list);
333         struct list_head *l, *ltmp;
334         int n;
335 
336         spin_lock(&mb_cache_spinlock);
337         list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
338                 struct mb_cache_entry *ce =
339                         list_entry(l, struct mb_cache_entry, e_lru_list);
340                 if (ce->e_cache == cache) {
341                         list_move_tail(&ce->e_lru_list, &free_list);
342                         __mb_cache_entry_unhash(ce);
343                 }
344         }
345         list_del(&cache->c_cache_list);
346         spin_unlock(&mb_cache_spinlock);
347 
348         list_for_each_safe(l, ltmp, &free_list) {
349                 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
350                                                    e_lru_list), GFP_KERNEL);
351         }
352 
353         if (atomic_read(&cache->c_entry_count) > 0) {
354                 mb_error("cache %s: %d orphaned entries",
355                           cache->c_name,
356                           atomic_read(&cache->c_entry_count));
357         }
358 
359         kmem_cache_destroy(cache->c_entry_cache);
360 
361         for (n=0; n < mb_cache_indexes(cache); n++)
362                 kfree(cache->c_indexes_hash[n]);
363         kfree(cache->c_block_hash);
364         kfree(cache);
365 }
366 
367 
368 /*
369  * mb_cache_entry_alloc()
370  *
371  * Allocates a new cache entry. The new entry will not be valid initially,
372  * and thus cannot be looked up yet. It should be filled with data, and
373  * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
374  * if no more memory was available.
375  */
376 struct mb_cache_entry *
377 mb_cache_entry_alloc(struct mb_cache *cache)
378 {
379         struct mb_cache_entry *ce;
380 
381         atomic_inc(&cache->c_entry_count);
382         ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
383         if (ce) {
384                 INIT_LIST_HEAD(&ce->e_lru_list);
385                 INIT_LIST_HEAD(&ce->e_block_list);
386                 ce->e_cache = cache;
387                 atomic_set(&ce->e_used, 1);
388         }
389         return ce;
390 }
391 
392 
393 /*
394  * mb_cache_entry_insert()
395  *
396  * Inserts an entry that was allocated using mb_cache_entry_alloc() into
397  * the cache. After this, the cache entry can be looked up, but is not yet
398  * in the lru list as the caller still holds a handle to it. Returns 0 on
399  * success, or -EBUSY if a cache entry for that device + inode exists
400  * already (this may happen after a failed lookup, but when another process
401  * has inserted the same cache entry in the meantime).
402  *
403  * @bdev: device the cache entry belongs to
404  * @block: block number
405  * @keys: array of additional keys. There must be indexes_count entries
406  *        in the array (as specified when creating the cache).
407  */
408 int
409 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
410                       sector_t block, unsigned int keys[])
411 {
412         struct mb_cache *cache = ce->e_cache;
413         unsigned int bucket;
414         struct list_head *l;
415         int error = -EBUSY, n;
416 
417         bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 
418                            cache->c_bucket_bits);
419         spin_lock(&mb_cache_spinlock);
420         list_for_each_prev(l, &cache->c_block_hash[bucket]) {
421                 struct mb_cache_entry *ce =
422                         list_entry(l, struct mb_cache_entry, e_block_list);
423                 if (ce->e_bdev == bdev && ce->e_block == block)
424                         goto out;
425         }
426         __mb_cache_entry_unhash(ce);
427         ce->e_bdev = bdev;
428         ce->e_block = block;
429         list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
430         for (n=0; n<mb_cache_indexes(cache); n++) {
431                 ce->e_indexes[n].o_key = keys[n];
432                 bucket = hash_long(keys[n], cache->c_bucket_bits);
433                 list_add(&ce->e_indexes[n].o_list,
434                          &cache->c_indexes_hash[n][bucket]);
435         }
436         error = 0;
437 out:
438         spin_unlock(&mb_cache_spinlock);
439         return error;
440 }
441 
442 
443 /*
444  * mb_cache_entry_release()
445  *
446  * Release a handle to a cache entry. When the last handle to a cache entry
447  * is released it is either freed (if it is invalid) or otherwise inserted
448  * in to the lru list.
449  */
450 void
451 mb_cache_entry_release(struct mb_cache_entry *ce)
452 {
453         spin_lock(&mb_cache_spinlock);
454         __mb_cache_entry_release_unlock(ce);
455 }
456 
457 
458 /*
459  * mb_cache_entry_takeout()
460  *
461  * Take a cache entry out of the cache, making it invalid. The entry can later
462  * be re-inserted using mb_cache_entry_insert(), or released using
463  * mb_cache_entry_release().
464  */
465 void
466 mb_cache_entry_takeout(struct mb_cache_entry *ce)
467 {
468         spin_lock(&mb_cache_spinlock);
469         mb_assert(list_empty(&ce->e_lru_list));
470         __mb_cache_entry_unhash(ce);
471         spin_unlock(&mb_cache_spinlock);
472 }
473 
474 
475 /*
476  * mb_cache_entry_free()
477  *
478  * This is equivalent to the sequence mb_cache_entry_takeout() --
479  * mb_cache_entry_release().
480  */
481 void
482 mb_cache_entry_free(struct mb_cache_entry *ce)
483 {
484         spin_lock(&mb_cache_spinlock);
485         mb_assert(list_empty(&ce->e_lru_list));
486         __mb_cache_entry_unhash(ce);
487         __mb_cache_entry_release_unlock(ce);
488 }
489 
490 
491 /*
492  * mb_cache_entry_dup()
493  *
494  * Duplicate a handle to a cache entry (does not duplicate the cache entry
495  * itself). After the call, both the old and the new handle must be released.
496  */
497 struct mb_cache_entry *
498 mb_cache_entry_dup(struct mb_cache_entry *ce)
499 {
500         atomic_inc(&ce->e_used);
501         return ce;
502 }
503 
504 
505 /*
506  * mb_cache_entry_get()
507  *
508  * Get a cache entry  by device / block number. (There can only be one entry
509  * in the cache per device and block.) Returns NULL if no such cache entry
510  * exists.
511  */
512 struct mb_cache_entry *
513 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
514                    sector_t block)
515 {
516         unsigned int bucket;
517         struct list_head *l;
518         struct mb_cache_entry *ce;
519 
520         bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
521                            cache->c_bucket_bits);
522         spin_lock(&mb_cache_spinlock);
523         list_for_each(l, &cache->c_block_hash[bucket]) {
524                 ce = list_entry(l, struct mb_cache_entry, e_block_list);
525                 if (ce->e_bdev == bdev && ce->e_block == block) {
526                         if (!list_empty(&ce->e_lru_list))
527                                 list_del_init(&ce->e_lru_list);
528                         atomic_inc(&ce->e_used);
529                         goto cleanup;
530                 }
531         }
532         ce = NULL;
533 
534 cleanup:
535         spin_unlock(&mb_cache_spinlock);
536         return ce;
537 }
538 
539 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
540 
541 static struct mb_cache_entry *
542 __mb_cache_entry_find(struct list_head *l, struct list_head *head,
543                       int index, struct block_device *bdev, unsigned int key)
544 {
545         while (l != head) {
546                 struct mb_cache_entry *ce =
547                         list_entry(l, struct mb_cache_entry,
548                                    e_indexes[index].o_list);
549                 if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
550                         if (!list_empty(&ce->e_lru_list))
551                                 list_del_init(&ce->e_lru_list);
552                         atomic_inc(&ce->e_used);
553                         return ce;
554                 }
555                 l = l->next;
556         }
557         return NULL;
558 }
559 
560 
561 /*
562  * mb_cache_entry_find_first()
563  *
564  * Find the first cache entry on a given device with a certain key in
565  * an additional index. Additonal matches can be found with
566  * mb_cache_entry_find_next(). Returns NULL if no match was found.
567  *
568  * @cache: the cache to search
569  * @index: the number of the additonal index to search (0<=index<indexes_count)
570  * @bdev: the device the cache entry should belong to
571  * @key: the key in the index
572  */
573 struct mb_cache_entry *
574 mb_cache_entry_find_first(struct mb_cache *cache, int index,
575                           struct block_device *bdev, unsigned int key)
576 {
577         unsigned int bucket = hash_long(key, cache->c_bucket_bits);
578         struct list_head *l;
579         struct mb_cache_entry *ce;
580 
581         mb_assert(index < mb_cache_indexes(cache));
582         spin_lock(&mb_cache_spinlock);
583         l = cache->c_indexes_hash[index][bucket].next;
584         ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
585                                    index, bdev, key);
586         spin_unlock(&mb_cache_spinlock);
587         return ce;
588 }
589 
590 
591 /*
592  * mb_cache_entry_find_next()
593  *
594  * Find the next cache entry on a given device with a certain key in an
595  * additional index. Returns NULL if no match could be found. The previous
596  * entry is atomatically released, so that mb_cache_entry_find_next() can
597  * be called like this:
598  *
599  * entry = mb_cache_entry_find_first();
600  * while (entry) {
601  *      ...
602  *      entry = mb_cache_entry_find_next(entry, ...);
603  * }
604  *
605  * @prev: The previous match
606  * @index: the number of the additonal index to search (0<=index<indexes_count)
607  * @bdev: the device the cache entry should belong to
608  * @key: the key in the index
609  */
610 struct mb_cache_entry *
611 mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
612                          struct block_device *bdev, unsigned int key)
613 {
614         struct mb_cache *cache = prev->e_cache;
615         unsigned int bucket = hash_long(key, cache->c_bucket_bits);
616         struct list_head *l;
617         struct mb_cache_entry *ce;
618 
619         mb_assert(index < mb_cache_indexes(cache));
620         spin_lock(&mb_cache_spinlock);
621         l = prev->e_indexes[index].o_list.next;
622         ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
623                                    index, bdev, key);
624         __mb_cache_entry_release_unlock(prev);
625         return ce;
626 }
627 
628 #endif  /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
629 
630 static int __init init_mbcache(void)
631 {
632         mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
633         return 0;
634 }
635 
636 static void __exit exit_mbcache(void)
637 {
638         remove_shrinker(mb_shrinker);
639 }
640 
641 module_init(init_mbcache)
642 module_exit(exit_mbcache)
643 
644 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp