~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/zswap.c

Version: ~ [ linux-6.0 ] ~ [ linux-5.19.12 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.71 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.146 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.215 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.260 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.295 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.330 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * zswap.c - zswap driver file
  4  *
  5  * zswap is a backend for frontswap that takes pages that are in the process
  6  * of being swapped out and attempts to compress and store them in a
  7  * RAM-based memory pool.  This can result in a significant I/O reduction on
  8  * the swap device and, in the case where decompressing from RAM is faster
  9  * than reading from the swap device, can also improve workload performance.
 10  *
 11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 12 */
 13 
 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 15 
 16 #include <linux/module.h>
 17 #include <linux/cpu.h>
 18 #include <linux/highmem.h>
 19 #include <linux/slab.h>
 20 #include <linux/spinlock.h>
 21 #include <linux/types.h>
 22 #include <linux/atomic.h>
 23 #include <linux/frontswap.h>
 24 #include <linux/rbtree.h>
 25 #include <linux/swap.h>
 26 #include <linux/crypto.h>
 27 #include <linux/mempool.h>
 28 #include <linux/zpool.h>
 29 
 30 #include <linux/mm_types.h>
 31 #include <linux/page-flags.h>
 32 #include <linux/swapops.h>
 33 #include <linux/writeback.h>
 34 #include <linux/pagemap.h>
 35 
 36 /*********************************
 37 * statistics
 38 **********************************/
 39 /* Total bytes used by the compressed storage */
 40 static u64 zswap_pool_total_size;
 41 /* The number of compressed pages currently stored in zswap */
 42 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
 43 /* The number of same-value filled pages currently stored in zswap */
 44 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
 45 
 46 /*
 47  * The statistics below are not protected from concurrent access for
 48  * performance reasons so they may not be a 100% accurate.  However,
 49  * they do provide useful information on roughly how many times a
 50  * certain event is occurring.
 51 */
 52 
 53 /* Pool limit was hit (see zswap_max_pool_percent) */
 54 static u64 zswap_pool_limit_hit;
 55 /* Pages written back when pool limit was reached */
 56 static u64 zswap_written_back_pages;
 57 /* Store failed due to a reclaim failure after pool limit was reached */
 58 static u64 zswap_reject_reclaim_fail;
 59 /* Compressed page was too big for the allocator to (optimally) store */
 60 static u64 zswap_reject_compress_poor;
 61 /* Store failed because underlying allocator could not get memory */
 62 static u64 zswap_reject_alloc_fail;
 63 /* Store failed because the entry metadata could not be allocated (rare) */
 64 static u64 zswap_reject_kmemcache_fail;
 65 /* Duplicate store was encountered (rare) */
 66 static u64 zswap_duplicate_entry;
 67 
 68 /*********************************
 69 * tunables
 70 **********************************/
 71 
 72 #define ZSWAP_PARAM_UNSET ""
 73 
 74 /* Enable/disable zswap (disabled by default) */
 75 static bool zswap_enabled;
 76 static int zswap_enabled_param_set(const char *,
 77                                    const struct kernel_param *);
 78 static struct kernel_param_ops zswap_enabled_param_ops = {
 79         .set =          zswap_enabled_param_set,
 80         .get =          param_get_bool,
 81 };
 82 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
 83 
 84 /* Crypto compressor to use */
 85 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
 86 static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
 87 static int zswap_compressor_param_set(const char *,
 88                                       const struct kernel_param *);
 89 static struct kernel_param_ops zswap_compressor_param_ops = {
 90         .set =          zswap_compressor_param_set,
 91         .get =          param_get_charp,
 92         .free =         param_free_charp,
 93 };
 94 module_param_cb(compressor, &zswap_compressor_param_ops,
 95                 &zswap_compressor, 0644);
 96 
 97 /* Compressed storage zpool to use */
 98 #define ZSWAP_ZPOOL_DEFAULT "zbud"
 99 static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
100 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
101 static struct kernel_param_ops zswap_zpool_param_ops = {
102         .set =          zswap_zpool_param_set,
103         .get =          param_get_charp,
104         .free =         param_free_charp,
105 };
106 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
107 
108 /* The maximum percentage of memory that the compressed pool can occupy */
109 static unsigned int zswap_max_pool_percent = 20;
110 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
111 
112 /* Enable/disable handling same-value filled pages (enabled by default) */
113 static bool zswap_same_filled_pages_enabled = true;
114 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
115                    bool, 0644);
116 
117 /*********************************
118 * data structures
119 **********************************/
120 
121 struct zswap_pool {
122         struct zpool *zpool;
123         struct crypto_comp * __percpu *tfm;
124         struct kref kref;
125         struct list_head list;
126         struct work_struct work;
127         struct hlist_node node;
128         char tfm_name[CRYPTO_MAX_ALG_NAME];
129 };
130 
131 /*
132  * struct zswap_entry
133  *
134  * This structure contains the metadata for tracking a single compressed
135  * page within zswap.
136  *
137  * rbnode - links the entry into red-black tree for the appropriate swap type
138  * offset - the swap offset for the entry.  Index into the red-black tree.
139  * refcount - the number of outstanding reference to the entry. This is needed
140  *            to protect against premature freeing of the entry by code
141  *            concurrent calls to load, invalidate, and writeback.  The lock
142  *            for the zswap_tree structure that contains the entry must
143  *            be held while changing the refcount.  Since the lock must
144  *            be held, there is no reason to also make refcount atomic.
145  * length - the length in bytes of the compressed page data.  Needed during
146  *          decompression. For a same value filled page length is 0.
147  * pool - the zswap_pool the entry's data is in
148  * handle - zpool allocation handle that stores the compressed page data
149  * value - value of the same-value filled pages which have same content
150  */
151 struct zswap_entry {
152         struct rb_node rbnode;
153         pgoff_t offset;
154         int refcount;
155         unsigned int length;
156         struct zswap_pool *pool;
157         union {
158                 unsigned long handle;
159                 unsigned long value;
160         };
161 };
162 
163 struct zswap_header {
164         swp_entry_t swpentry;
165 };
166 
167 /*
168  * The tree lock in the zswap_tree struct protects a few things:
169  * - the rbtree
170  * - the refcount field of each entry in the tree
171  */
172 struct zswap_tree {
173         struct rb_root rbroot;
174         spinlock_t lock;
175 };
176 
177 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
178 
179 /* RCU-protected iteration */
180 static LIST_HEAD(zswap_pools);
181 /* protects zswap_pools list modification */
182 static DEFINE_SPINLOCK(zswap_pools_lock);
183 /* pool counter to provide unique names to zpool */
184 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
185 
186 /* used by param callback function */
187 static bool zswap_init_started;
188 
189 /* fatal error during init */
190 static bool zswap_init_failed;
191 
192 /* init completed, but couldn't create the initial pool */
193 static bool zswap_has_pool;
194 
195 /*********************************
196 * helpers and fwd declarations
197 **********************************/
198 
199 #define zswap_pool_debug(msg, p)                                \
200         pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
201                  zpool_get_type((p)->zpool))
202 
203 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
204 static int zswap_pool_get(struct zswap_pool *pool);
205 static void zswap_pool_put(struct zswap_pool *pool);
206 
207 static const struct zpool_ops zswap_zpool_ops = {
208         .evict = zswap_writeback_entry
209 };
210 
211 static bool zswap_is_full(void)
212 {
213         return totalram_pages() * zswap_max_pool_percent / 100 <
214                         DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
215 }
216 
217 static void zswap_update_total_size(void)
218 {
219         struct zswap_pool *pool;
220         u64 total = 0;
221 
222         rcu_read_lock();
223 
224         list_for_each_entry_rcu(pool, &zswap_pools, list)
225                 total += zpool_get_total_size(pool->zpool);
226 
227         rcu_read_unlock();
228 
229         zswap_pool_total_size = total;
230 }
231 
232 /*********************************
233 * zswap entry functions
234 **********************************/
235 static struct kmem_cache *zswap_entry_cache;
236 
237 static int __init zswap_entry_cache_create(void)
238 {
239         zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
240         return zswap_entry_cache == NULL;
241 }
242 
243 static void __init zswap_entry_cache_destroy(void)
244 {
245         kmem_cache_destroy(zswap_entry_cache);
246 }
247 
248 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
249 {
250         struct zswap_entry *entry;
251         entry = kmem_cache_alloc(zswap_entry_cache, gfp);
252         if (!entry)
253                 return NULL;
254         entry->refcount = 1;
255         RB_CLEAR_NODE(&entry->rbnode);
256         return entry;
257 }
258 
259 static void zswap_entry_cache_free(struct zswap_entry *entry)
260 {
261         kmem_cache_free(zswap_entry_cache, entry);
262 }
263 
264 /*********************************
265 * rbtree functions
266 **********************************/
267 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
268 {
269         struct rb_node *node = root->rb_node;
270         struct zswap_entry *entry;
271 
272         while (node) {
273                 entry = rb_entry(node, struct zswap_entry, rbnode);
274                 if (entry->offset > offset)
275                         node = node->rb_left;
276                 else if (entry->offset < offset)
277                         node = node->rb_right;
278                 else
279                         return entry;
280         }
281         return NULL;
282 }
283 
284 /*
285  * In the case that a entry with the same offset is found, a pointer to
286  * the existing entry is stored in dupentry and the function returns -EEXIST
287  */
288 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
289                         struct zswap_entry **dupentry)
290 {
291         struct rb_node **link = &root->rb_node, *parent = NULL;
292         struct zswap_entry *myentry;
293 
294         while (*link) {
295                 parent = *link;
296                 myentry = rb_entry(parent, struct zswap_entry, rbnode);
297                 if (myentry->offset > entry->offset)
298                         link = &(*link)->rb_left;
299                 else if (myentry->offset < entry->offset)
300                         link = &(*link)->rb_right;
301                 else {
302                         *dupentry = myentry;
303                         return -EEXIST;
304                 }
305         }
306         rb_link_node(&entry->rbnode, parent, link);
307         rb_insert_color(&entry->rbnode, root);
308         return 0;
309 }
310 
311 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
312 {
313         if (!RB_EMPTY_NODE(&entry->rbnode)) {
314                 rb_erase(&entry->rbnode, root);
315                 RB_CLEAR_NODE(&entry->rbnode);
316         }
317 }
318 
319 /*
320  * Carries out the common pattern of freeing and entry's zpool allocation,
321  * freeing the entry itself, and decrementing the number of stored pages.
322  */
323 static void zswap_free_entry(struct zswap_entry *entry)
324 {
325         if (!entry->length)
326                 atomic_dec(&zswap_same_filled_pages);
327         else {
328                 zpool_free(entry->pool->zpool, entry->handle);
329                 zswap_pool_put(entry->pool);
330         }
331         zswap_entry_cache_free(entry);
332         atomic_dec(&zswap_stored_pages);
333         zswap_update_total_size();
334 }
335 
336 /* caller must hold the tree lock */
337 static void zswap_entry_get(struct zswap_entry *entry)
338 {
339         entry->refcount++;
340 }
341 
342 /* caller must hold the tree lock
343 * remove from the tree and free it, if nobody reference the entry
344 */
345 static void zswap_entry_put(struct zswap_tree *tree,
346                         struct zswap_entry *entry)
347 {
348         int refcount = --entry->refcount;
349 
350         BUG_ON(refcount < 0);
351         if (refcount == 0) {
352                 zswap_rb_erase(&tree->rbroot, entry);
353                 zswap_free_entry(entry);
354         }
355 }
356 
357 /* caller must hold the tree lock */
358 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
359                                 pgoff_t offset)
360 {
361         struct zswap_entry *entry;
362 
363         entry = zswap_rb_search(root, offset);
364         if (entry)
365                 zswap_entry_get(entry);
366 
367         return entry;
368 }
369 
370 /*********************************
371 * per-cpu code
372 **********************************/
373 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
374 
375 static int zswap_dstmem_prepare(unsigned int cpu)
376 {
377         u8 *dst;
378 
379         dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
380         if (!dst)
381                 return -ENOMEM;
382 
383         per_cpu(zswap_dstmem, cpu) = dst;
384         return 0;
385 }
386 
387 static int zswap_dstmem_dead(unsigned int cpu)
388 {
389         u8 *dst;
390 
391         dst = per_cpu(zswap_dstmem, cpu);
392         kfree(dst);
393         per_cpu(zswap_dstmem, cpu) = NULL;
394 
395         return 0;
396 }
397 
398 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
399 {
400         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
401         struct crypto_comp *tfm;
402 
403         if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
404                 return 0;
405 
406         tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
407         if (IS_ERR_OR_NULL(tfm)) {
408                 pr_err("could not alloc crypto comp %s : %ld\n",
409                        pool->tfm_name, PTR_ERR(tfm));
410                 return -ENOMEM;
411         }
412         *per_cpu_ptr(pool->tfm, cpu) = tfm;
413         return 0;
414 }
415 
416 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
417 {
418         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
419         struct crypto_comp *tfm;
420 
421         tfm = *per_cpu_ptr(pool->tfm, cpu);
422         if (!IS_ERR_OR_NULL(tfm))
423                 crypto_free_comp(tfm);
424         *per_cpu_ptr(pool->tfm, cpu) = NULL;
425         return 0;
426 }
427 
428 /*********************************
429 * pool functions
430 **********************************/
431 
432 static struct zswap_pool *__zswap_pool_current(void)
433 {
434         struct zswap_pool *pool;
435 
436         pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
437         WARN_ONCE(!pool && zswap_has_pool,
438                   "%s: no page storage pool!\n", __func__);
439 
440         return pool;
441 }
442 
443 static struct zswap_pool *zswap_pool_current(void)
444 {
445         assert_spin_locked(&zswap_pools_lock);
446 
447         return __zswap_pool_current();
448 }
449 
450 static struct zswap_pool *zswap_pool_current_get(void)
451 {
452         struct zswap_pool *pool;
453 
454         rcu_read_lock();
455 
456         pool = __zswap_pool_current();
457         if (!zswap_pool_get(pool))
458                 pool = NULL;
459 
460         rcu_read_unlock();
461 
462         return pool;
463 }
464 
465 static struct zswap_pool *zswap_pool_last_get(void)
466 {
467         struct zswap_pool *pool, *last = NULL;
468 
469         rcu_read_lock();
470 
471         list_for_each_entry_rcu(pool, &zswap_pools, list)
472                 last = pool;
473         WARN_ONCE(!last && zswap_has_pool,
474                   "%s: no page storage pool!\n", __func__);
475         if (!zswap_pool_get(last))
476                 last = NULL;
477 
478         rcu_read_unlock();
479 
480         return last;
481 }
482 
483 /* type and compressor must be null-terminated */
484 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
485 {
486         struct zswap_pool *pool;
487 
488         assert_spin_locked(&zswap_pools_lock);
489 
490         list_for_each_entry_rcu(pool, &zswap_pools, list) {
491                 if (strcmp(pool->tfm_name, compressor))
492                         continue;
493                 if (strcmp(zpool_get_type(pool->zpool), type))
494                         continue;
495                 /* if we can't get it, it's about to be destroyed */
496                 if (!zswap_pool_get(pool))
497                         continue;
498                 return pool;
499         }
500 
501         return NULL;
502 }
503 
504 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
505 {
506         struct zswap_pool *pool;
507         char name[38]; /* 'zswap' + 32 char (max) num + \0 */
508         gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
509         int ret;
510 
511         if (!zswap_has_pool) {
512                 /* if either are unset, pool initialization failed, and we
513                  * need both params to be set correctly before trying to
514                  * create a pool.
515                  */
516                 if (!strcmp(type, ZSWAP_PARAM_UNSET))
517                         return NULL;
518                 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
519                         return NULL;
520         }
521 
522         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
523         if (!pool)
524                 return NULL;
525 
526         /* unique name for each pool specifically required by zsmalloc */
527         snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
528 
529         pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
530         if (!pool->zpool) {
531                 pr_err("%s zpool not available\n", type);
532                 goto error;
533         }
534         pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
535 
536         strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
537         pool->tfm = alloc_percpu(struct crypto_comp *);
538         if (!pool->tfm) {
539                 pr_err("percpu alloc failed\n");
540                 goto error;
541         }
542 
543         ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
544                                        &pool->node);
545         if (ret)
546                 goto error;
547         pr_debug("using %s compressor\n", pool->tfm_name);
548 
549         /* being the current pool takes 1 ref; this func expects the
550          * caller to always add the new pool as the current pool
551          */
552         kref_init(&pool->kref);
553         INIT_LIST_HEAD(&pool->list);
554 
555         zswap_pool_debug("created", pool);
556 
557         return pool;
558 
559 error:
560         free_percpu(pool->tfm);
561         if (pool->zpool)
562                 zpool_destroy_pool(pool->zpool);
563         kfree(pool);
564         return NULL;
565 }
566 
567 static __init struct zswap_pool *__zswap_pool_create_fallback(void)
568 {
569         bool has_comp, has_zpool;
570 
571         has_comp = crypto_has_comp(zswap_compressor, 0, 0);
572         if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
573                 pr_err("compressor %s not available, using default %s\n",
574                        zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
575                 param_free_charp(&zswap_compressor);
576                 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
577                 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
578         }
579         if (!has_comp) {
580                 pr_err("default compressor %s not available\n",
581                        zswap_compressor);
582                 param_free_charp(&zswap_compressor);
583                 zswap_compressor = ZSWAP_PARAM_UNSET;
584         }
585 
586         has_zpool = zpool_has_pool(zswap_zpool_type);
587         if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
588                 pr_err("zpool %s not available, using default %s\n",
589                        zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
590                 param_free_charp(&zswap_zpool_type);
591                 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
592                 has_zpool = zpool_has_pool(zswap_zpool_type);
593         }
594         if (!has_zpool) {
595                 pr_err("default zpool %s not available\n",
596                        zswap_zpool_type);
597                 param_free_charp(&zswap_zpool_type);
598                 zswap_zpool_type = ZSWAP_PARAM_UNSET;
599         }
600 
601         if (!has_comp || !has_zpool)
602                 return NULL;
603 
604         return zswap_pool_create(zswap_zpool_type, zswap_compressor);
605 }
606 
607 static void zswap_pool_destroy(struct zswap_pool *pool)
608 {
609         zswap_pool_debug("destroying", pool);
610 
611         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
612         free_percpu(pool->tfm);
613         zpool_destroy_pool(pool->zpool);
614         kfree(pool);
615 }
616 
617 static int __must_check zswap_pool_get(struct zswap_pool *pool)
618 {
619         if (!pool)
620                 return 0;
621 
622         return kref_get_unless_zero(&pool->kref);
623 }
624 
625 static void __zswap_pool_release(struct work_struct *work)
626 {
627         struct zswap_pool *pool = container_of(work, typeof(*pool), work);
628 
629         synchronize_rcu();
630 
631         /* nobody should have been able to get a kref... */
632         WARN_ON(kref_get_unless_zero(&pool->kref));
633 
634         /* pool is now off zswap_pools list and has no references. */
635         zswap_pool_destroy(pool);
636 }
637 
638 static void __zswap_pool_empty(struct kref *kref)
639 {
640         struct zswap_pool *pool;
641 
642         pool = container_of(kref, typeof(*pool), kref);
643 
644         spin_lock(&zswap_pools_lock);
645 
646         WARN_ON(pool == zswap_pool_current());
647 
648         list_del_rcu(&pool->list);
649 
650         INIT_WORK(&pool->work, __zswap_pool_release);
651         schedule_work(&pool->work);
652 
653         spin_unlock(&zswap_pools_lock);
654 }
655 
656 static void zswap_pool_put(struct zswap_pool *pool)
657 {
658         kref_put(&pool->kref, __zswap_pool_empty);
659 }
660 
661 /*********************************
662 * param callbacks
663 **********************************/
664 
665 /* val must be a null-terminated string */
666 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
667                              char *type, char *compressor)
668 {
669         struct zswap_pool *pool, *put_pool = NULL;
670         char *s = strstrip((char *)val);
671         int ret;
672 
673         if (zswap_init_failed) {
674                 pr_err("can't set param, initialization failed\n");
675                 return -ENODEV;
676         }
677 
678         /* no change required */
679         if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
680                 return 0;
681 
682         /* if this is load-time (pre-init) param setting,
683          * don't create a pool; that's done during init.
684          */
685         if (!zswap_init_started)
686                 return param_set_charp(s, kp);
687 
688         if (!type) {
689                 if (!zpool_has_pool(s)) {
690                         pr_err("zpool %s not available\n", s);
691                         return -ENOENT;
692                 }
693                 type = s;
694         } else if (!compressor) {
695                 if (!crypto_has_comp(s, 0, 0)) {
696                         pr_err("compressor %s not available\n", s);
697                         return -ENOENT;
698                 }
699                 compressor = s;
700         } else {
701                 WARN_ON(1);
702                 return -EINVAL;
703         }
704 
705         spin_lock(&zswap_pools_lock);
706 
707         pool = zswap_pool_find_get(type, compressor);
708         if (pool) {
709                 zswap_pool_debug("using existing", pool);
710                 WARN_ON(pool == zswap_pool_current());
711                 list_del_rcu(&pool->list);
712         }
713 
714         spin_unlock(&zswap_pools_lock);
715 
716         if (!pool)
717                 pool = zswap_pool_create(type, compressor);
718 
719         if (pool)
720                 ret = param_set_charp(s, kp);
721         else
722                 ret = -EINVAL;
723 
724         spin_lock(&zswap_pools_lock);
725 
726         if (!ret) {
727                 put_pool = zswap_pool_current();
728                 list_add_rcu(&pool->list, &zswap_pools);
729                 zswap_has_pool = true;
730         } else if (pool) {
731                 /* add the possibly pre-existing pool to the end of the pools
732                  * list; if it's new (and empty) then it'll be removed and
733                  * destroyed by the put after we drop the lock
734                  */
735                 list_add_tail_rcu(&pool->list, &zswap_pools);
736                 put_pool = pool;
737         }
738 
739         spin_unlock(&zswap_pools_lock);
740 
741         if (!zswap_has_pool && !pool) {
742                 /* if initial pool creation failed, and this pool creation also
743                  * failed, maybe both compressor and zpool params were bad.
744                  * Allow changing this param, so pool creation will succeed
745                  * when the other param is changed. We already verified this
746                  * param is ok in the zpool_has_pool() or crypto_has_comp()
747                  * checks above.
748                  */
749                 ret = param_set_charp(s, kp);
750         }
751 
752         /* drop the ref from either the old current pool,
753          * or the new pool we failed to add
754          */
755         if (put_pool)
756                 zswap_pool_put(put_pool);
757 
758         return ret;
759 }
760 
761 static int zswap_compressor_param_set(const char *val,
762                                       const struct kernel_param *kp)
763 {
764         return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
765 }
766 
767 static int zswap_zpool_param_set(const char *val,
768                                  const struct kernel_param *kp)
769 {
770         return __zswap_param_set(val, kp, NULL, zswap_compressor);
771 }
772 
773 static int zswap_enabled_param_set(const char *val,
774                                    const struct kernel_param *kp)
775 {
776         if (zswap_init_failed) {
777                 pr_err("can't enable, initialization failed\n");
778                 return -ENODEV;
779         }
780         if (!zswap_has_pool && zswap_init_started) {
781                 pr_err("can't enable, no pool configured\n");
782                 return -ENODEV;
783         }
784 
785         return param_set_bool(val, kp);
786 }
787 
788 /*********************************
789 * writeback code
790 **********************************/
791 /* return enum for zswap_get_swap_cache_page */
792 enum zswap_get_swap_ret {
793         ZSWAP_SWAPCACHE_NEW,
794         ZSWAP_SWAPCACHE_EXIST,
795         ZSWAP_SWAPCACHE_FAIL,
796 };
797 
798 /*
799  * zswap_get_swap_cache_page
800  *
801  * This is an adaption of read_swap_cache_async()
802  *
803  * This function tries to find a page with the given swap entry
804  * in the swapper_space address space (the swap cache).  If the page
805  * is found, it is returned in retpage.  Otherwise, a page is allocated,
806  * added to the swap cache, and returned in retpage.
807  *
808  * If success, the swap cache page is returned in retpage
809  * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
810  * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
811  *     the new page is added to swapcache and locked
812  * Returns ZSWAP_SWAPCACHE_FAIL on error
813  */
814 static int zswap_get_swap_cache_page(swp_entry_t entry,
815                                 struct page **retpage)
816 {
817         bool page_was_allocated;
818 
819         *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
820                         NULL, 0, &page_was_allocated);
821         if (page_was_allocated)
822                 return ZSWAP_SWAPCACHE_NEW;
823         if (!*retpage)
824                 return ZSWAP_SWAPCACHE_FAIL;
825         return ZSWAP_SWAPCACHE_EXIST;
826 }
827 
828 /*
829  * Attempts to free an entry by adding a page to the swap cache,
830  * decompressing the entry data into the page, and issuing a
831  * bio write to write the page back to the swap device.
832  *
833  * This can be thought of as a "resumed writeback" of the page
834  * to the swap device.  We are basically resuming the same swap
835  * writeback path that was intercepted with the frontswap_store()
836  * in the first place.  After the page has been decompressed into
837  * the swap cache, the compressed version stored by zswap can be
838  * freed.
839  */
840 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
841 {
842         struct zswap_header *zhdr;
843         swp_entry_t swpentry;
844         struct zswap_tree *tree;
845         pgoff_t offset;
846         struct zswap_entry *entry;
847         struct page *page;
848         struct crypto_comp *tfm;
849         u8 *src, *dst;
850         unsigned int dlen;
851         int ret;
852         struct writeback_control wbc = {
853                 .sync_mode = WB_SYNC_NONE,
854         };
855 
856         /* extract swpentry from data */
857         zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
858         swpentry = zhdr->swpentry; /* here */
859         zpool_unmap_handle(pool, handle);
860         tree = zswap_trees[swp_type(swpentry)];
861         offset = swp_offset(swpentry);
862 
863         /* find and ref zswap entry */
864         spin_lock(&tree->lock);
865         entry = zswap_entry_find_get(&tree->rbroot, offset);
866         if (!entry) {
867                 /* entry was invalidated */
868                 spin_unlock(&tree->lock);
869                 return 0;
870         }
871         spin_unlock(&tree->lock);
872         BUG_ON(offset != entry->offset);
873 
874         /* try to allocate swap cache page */
875         switch (zswap_get_swap_cache_page(swpentry, &page)) {
876         case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
877                 ret = -ENOMEM;
878                 goto fail;
879 
880         case ZSWAP_SWAPCACHE_EXIST:
881                 /* page is already in the swap cache, ignore for now */
882                 put_page(page);
883                 ret = -EEXIST;
884                 goto fail;
885 
886         case ZSWAP_SWAPCACHE_NEW: /* page is locked */
887                 /* decompress */
888                 dlen = PAGE_SIZE;
889                 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
890                                 ZPOOL_MM_RO) + sizeof(struct zswap_header);
891                 dst = kmap_atomic(page);
892                 tfm = *get_cpu_ptr(entry->pool->tfm);
893                 ret = crypto_comp_decompress(tfm, src, entry->length,
894                                              dst, &dlen);
895                 put_cpu_ptr(entry->pool->tfm);
896                 kunmap_atomic(dst);
897                 zpool_unmap_handle(entry->pool->zpool, entry->handle);
898                 BUG_ON(ret);
899                 BUG_ON(dlen != PAGE_SIZE);
900 
901                 /* page is up to date */
902                 SetPageUptodate(page);
903         }
904 
905         /* move it to the tail of the inactive list after end_writeback */
906         SetPageReclaim(page);
907 
908         /* start writeback */
909         __swap_writepage(page, &wbc, end_swap_bio_write);
910         put_page(page);
911         zswap_written_back_pages++;
912 
913         spin_lock(&tree->lock);
914         /* drop local reference */
915         zswap_entry_put(tree, entry);
916 
917         /*
918         * There are two possible situations for entry here:
919         * (1) refcount is 1(normal case),  entry is valid and on the tree
920         * (2) refcount is 0, entry is freed and not on the tree
921         *     because invalidate happened during writeback
922         *  search the tree and free the entry if find entry
923         */
924         if (entry == zswap_rb_search(&tree->rbroot, offset))
925                 zswap_entry_put(tree, entry);
926         spin_unlock(&tree->lock);
927 
928         goto end;
929 
930         /*
931         * if we get here due to ZSWAP_SWAPCACHE_EXIST
932         * a load may happening concurrently
933         * it is safe and okay to not free the entry
934         * if we free the entry in the following put
935         * it it either okay to return !0
936         */
937 fail:
938         spin_lock(&tree->lock);
939         zswap_entry_put(tree, entry);
940         spin_unlock(&tree->lock);
941 
942 end:
943         return ret;
944 }
945 
946 static int zswap_shrink(void)
947 {
948         struct zswap_pool *pool;
949         int ret;
950 
951         pool = zswap_pool_last_get();
952         if (!pool)
953                 return -ENOENT;
954 
955         ret = zpool_shrink(pool->zpool, 1, NULL);
956 
957         zswap_pool_put(pool);
958 
959         return ret;
960 }
961 
962 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
963 {
964         unsigned int pos;
965         unsigned long *page;
966 
967         page = (unsigned long *)ptr;
968         for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
969                 if (page[pos] != page[0])
970                         return 0;
971         }
972         *value = page[0];
973         return 1;
974 }
975 
976 static void zswap_fill_page(void *ptr, unsigned long value)
977 {
978         unsigned long *page;
979 
980         page = (unsigned long *)ptr;
981         memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
982 }
983 
984 /*********************************
985 * frontswap hooks
986 **********************************/
987 /* attempts to compress and store an single page */
988 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
989                                 struct page *page)
990 {
991         struct zswap_tree *tree = zswap_trees[type];
992         struct zswap_entry *entry, *dupentry;
993         struct crypto_comp *tfm;
994         int ret;
995         unsigned int hlen, dlen = PAGE_SIZE;
996         unsigned long handle, value;
997         char *buf;
998         u8 *src, *dst;
999         struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
1000 
1001         /* THP isn't supported */
1002         if (PageTransHuge(page)) {
1003                 ret = -EINVAL;
1004                 goto reject;
1005         }
1006 
1007         if (!zswap_enabled || !tree) {
1008                 ret = -ENODEV;
1009                 goto reject;
1010         }
1011 
1012         /* reclaim space if needed */
1013         if (zswap_is_full()) {
1014                 zswap_pool_limit_hit++;
1015                 if (zswap_shrink()) {
1016                         zswap_reject_reclaim_fail++;
1017                         ret = -ENOMEM;
1018                         goto reject;
1019                 }
1020 
1021                 /* A second zswap_is_full() check after
1022                  * zswap_shrink() to make sure it's now
1023                  * under the max_pool_percent
1024                  */
1025                 if (zswap_is_full()) {
1026                         ret = -ENOMEM;
1027                         goto reject;
1028                 }
1029         }
1030 
1031         /* allocate entry */
1032         entry = zswap_entry_cache_alloc(GFP_KERNEL);
1033         if (!entry) {
1034                 zswap_reject_kmemcache_fail++;
1035                 ret = -ENOMEM;
1036                 goto reject;
1037         }
1038 
1039         if (zswap_same_filled_pages_enabled) {
1040                 src = kmap_atomic(page);
1041                 if (zswap_is_page_same_filled(src, &value)) {
1042                         kunmap_atomic(src);
1043                         entry->offset = offset;
1044                         entry->length = 0;
1045                         entry->value = value;
1046                         atomic_inc(&zswap_same_filled_pages);
1047                         goto insert_entry;
1048                 }
1049                 kunmap_atomic(src);
1050         }
1051 
1052         /* if entry is successfully added, it keeps the reference */
1053         entry->pool = zswap_pool_current_get();
1054         if (!entry->pool) {
1055                 ret = -EINVAL;
1056                 goto freepage;
1057         }
1058 
1059         /* compress */
1060         dst = get_cpu_var(zswap_dstmem);
1061         tfm = *get_cpu_ptr(entry->pool->tfm);
1062         src = kmap_atomic(page);
1063         ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1064         kunmap_atomic(src);
1065         put_cpu_ptr(entry->pool->tfm);
1066         if (ret) {
1067                 ret = -EINVAL;
1068                 goto put_dstmem;
1069         }
1070 
1071         /* store */
1072         hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
1073         ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
1074                            __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1075                            &handle);
1076         if (ret == -ENOSPC) {
1077                 zswap_reject_compress_poor++;
1078                 goto put_dstmem;
1079         }
1080         if (ret) {
1081                 zswap_reject_alloc_fail++;
1082                 goto put_dstmem;
1083         }
1084         buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1085         memcpy(buf, &zhdr, hlen);
1086         memcpy(buf + hlen, dst, dlen);
1087         zpool_unmap_handle(entry->pool->zpool, handle);
1088         put_cpu_var(zswap_dstmem);
1089 
1090         /* populate entry */
1091         entry->offset = offset;
1092         entry->handle = handle;
1093         entry->length = dlen;
1094 
1095 insert_entry:
1096         /* map */
1097         spin_lock(&tree->lock);
1098         do {
1099                 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1100                 if (ret == -EEXIST) {
1101                         zswap_duplicate_entry++;
1102                         /* remove from rbtree */
1103                         zswap_rb_erase(&tree->rbroot, dupentry);
1104                         zswap_entry_put(tree, dupentry);
1105                 }
1106         } while (ret == -EEXIST);
1107         spin_unlock(&tree->lock);
1108 
1109         /* update stats */
1110         atomic_inc(&zswap_stored_pages);
1111         zswap_update_total_size();
1112 
1113         return 0;
1114 
1115 put_dstmem:
1116         put_cpu_var(zswap_dstmem);
1117         zswap_pool_put(entry->pool);
1118 freepage:
1119         zswap_entry_cache_free(entry);
1120 reject:
1121         return ret;
1122 }
1123 
1124 /*
1125  * returns 0 if the page was successfully decompressed
1126  * return -1 on entry not found or error
1127 */
1128 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1129                                 struct page *page)
1130 {
1131         struct zswap_tree *tree = zswap_trees[type];
1132         struct zswap_entry *entry;
1133         struct crypto_comp *tfm;
1134         u8 *src, *dst;
1135         unsigned int dlen;
1136         int ret;
1137 
1138         /* find */
1139         spin_lock(&tree->lock);
1140         entry = zswap_entry_find_get(&tree->rbroot, offset);
1141         if (!entry) {
1142                 /* entry was written back */
1143                 spin_unlock(&tree->lock);
1144                 return -1;
1145         }
1146         spin_unlock(&tree->lock);
1147 
1148         if (!entry->length) {
1149                 dst = kmap_atomic(page);
1150                 zswap_fill_page(dst, entry->value);
1151                 kunmap_atomic(dst);
1152                 goto freeentry;
1153         }
1154 
1155         /* decompress */
1156         dlen = PAGE_SIZE;
1157         src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1158         if (zpool_evictable(entry->pool->zpool))
1159                 src += sizeof(struct zswap_header);
1160         dst = kmap_atomic(page);
1161         tfm = *get_cpu_ptr(entry->pool->tfm);
1162         ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1163         put_cpu_ptr(entry->pool->tfm);
1164         kunmap_atomic(dst);
1165         zpool_unmap_handle(entry->pool->zpool, entry->handle);
1166         BUG_ON(ret);
1167 
1168 freeentry:
1169         spin_lock(&tree->lock);
1170         zswap_entry_put(tree, entry);
1171         spin_unlock(&tree->lock);
1172 
1173         return 0;
1174 }
1175 
1176 /* frees an entry in zswap */
1177 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1178 {
1179         struct zswap_tree *tree = zswap_trees[type];
1180         struct zswap_entry *entry;
1181 
1182         /* find */
1183         spin_lock(&tree->lock);
1184         entry = zswap_rb_search(&tree->rbroot, offset);
1185         if (!entry) {
1186                 /* entry was written back */
1187                 spin_unlock(&tree->lock);
1188                 return;
1189         }
1190 
1191         /* remove from rbtree */
1192         zswap_rb_erase(&tree->rbroot, entry);
1193 
1194         /* drop the initial reference from entry creation */
1195         zswap_entry_put(tree, entry);
1196 
1197         spin_unlock(&tree->lock);
1198 }
1199 
1200 /* frees all zswap entries for the given swap type */
1201 static void zswap_frontswap_invalidate_area(unsigned type)
1202 {
1203         struct zswap_tree *tree = zswap_trees[type];
1204         struct zswap_entry *entry, *n;
1205 
1206         if (!tree)
1207                 return;
1208 
1209         /* walk the tree and free everything */
1210         spin_lock(&tree->lock);
1211         rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1212                 zswap_free_entry(entry);
1213         tree->rbroot = RB_ROOT;
1214         spin_unlock(&tree->lock);
1215         kfree(tree);
1216         zswap_trees[type] = NULL;
1217 }
1218 
1219 static void zswap_frontswap_init(unsigned type)
1220 {
1221         struct zswap_tree *tree;
1222 
1223         tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1224         if (!tree) {
1225                 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1226                 return;
1227         }
1228 
1229         tree->rbroot = RB_ROOT;
1230         spin_lock_init(&tree->lock);
1231         zswap_trees[type] = tree;
1232 }
1233 
1234 static struct frontswap_ops zswap_frontswap_ops = {
1235         .store = zswap_frontswap_store,
1236         .load = zswap_frontswap_load,
1237         .invalidate_page = zswap_frontswap_invalidate_page,
1238         .invalidate_area = zswap_frontswap_invalidate_area,
1239         .init = zswap_frontswap_init
1240 };
1241 
1242 /*********************************
1243 * debugfs functions
1244 **********************************/
1245 #ifdef CONFIG_DEBUG_FS
1246 #include <linux/debugfs.h>
1247 
1248 static struct dentry *zswap_debugfs_root;
1249 
1250 static int __init zswap_debugfs_init(void)
1251 {
1252         if (!debugfs_initialized())
1253                 return -ENODEV;
1254 
1255         zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1256 
1257         debugfs_create_u64("pool_limit_hit", 0444,
1258                            zswap_debugfs_root, &zswap_pool_limit_hit);
1259         debugfs_create_u64("reject_reclaim_fail", 0444,
1260                            zswap_debugfs_root, &zswap_reject_reclaim_fail);
1261         debugfs_create_u64("reject_alloc_fail", 0444,
1262                            zswap_debugfs_root, &zswap_reject_alloc_fail);
1263         debugfs_create_u64("reject_kmemcache_fail", 0444,
1264                            zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1265         debugfs_create_u64("reject_compress_poor", 0444,
1266                            zswap_debugfs_root, &zswap_reject_compress_poor);
1267         debugfs_create_u64("written_back_pages", 0444,
1268                            zswap_debugfs_root, &zswap_written_back_pages);
1269         debugfs_create_u64("duplicate_entry", 0444,
1270                            zswap_debugfs_root, &zswap_duplicate_entry);
1271         debugfs_create_u64("pool_total_size", 0444,
1272                            zswap_debugfs_root, &zswap_pool_total_size);
1273         debugfs_create_atomic_t("stored_pages", 0444,
1274                                 zswap_debugfs_root, &zswap_stored_pages);
1275         debugfs_create_atomic_t("same_filled_pages", 0444,
1276                                 zswap_debugfs_root, &zswap_same_filled_pages);
1277 
1278         return 0;
1279 }
1280 
1281 static void __exit zswap_debugfs_exit(void)
1282 {
1283         debugfs_remove_recursive(zswap_debugfs_root);
1284 }
1285 #else
1286 static int __init zswap_debugfs_init(void)
1287 {
1288         return 0;
1289 }
1290 
1291 static void __exit zswap_debugfs_exit(void) { }
1292 #endif
1293 
1294 /*********************************
1295 * module init and exit
1296 **********************************/
1297 static int __init init_zswap(void)
1298 {
1299         struct zswap_pool *pool;
1300         int ret;
1301 
1302         zswap_init_started = true;
1303 
1304         if (zswap_entry_cache_create()) {
1305                 pr_err("entry cache creation failed\n");
1306                 goto cache_fail;
1307         }
1308 
1309         ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1310                                 zswap_dstmem_prepare, zswap_dstmem_dead);
1311         if (ret) {
1312                 pr_err("dstmem alloc failed\n");
1313                 goto dstmem_fail;
1314         }
1315 
1316         ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1317                                       "mm/zswap_pool:prepare",
1318                                       zswap_cpu_comp_prepare,
1319                                       zswap_cpu_comp_dead);
1320         if (ret)
1321                 goto hp_fail;
1322 
1323         pool = __zswap_pool_create_fallback();
1324         if (pool) {
1325                 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1326                         zpool_get_type(pool->zpool));
1327                 list_add(&pool->list, &zswap_pools);
1328                 zswap_has_pool = true;
1329         } else {
1330                 pr_err("pool creation failed\n");
1331                 zswap_enabled = false;
1332         }
1333 
1334         frontswap_register_ops(&zswap_frontswap_ops);
1335         if (zswap_debugfs_init())
1336                 pr_warn("debugfs initialization failed\n");
1337         return 0;
1338 
1339 hp_fail:
1340         cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1341 dstmem_fail:
1342         zswap_entry_cache_destroy();
1343 cache_fail:
1344         /* if built-in, we aren't unloaded on failure; don't allow use */
1345         zswap_init_failed = true;
1346         zswap_enabled = false;
1347         return -ENOMEM;
1348 }
1349 /* must be late so crypto has time to come up */
1350 late_initcall(init_zswap);
1351 
1352 MODULE_LICENSE("GPL");
1353 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1354 MODULE_DESCRIPTION("Compressed cache for swap pages");
1355 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp