~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/workingset.c

Version: ~ [ linux-5.3-rc5 ] ~ [ linux-5.2.9 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.67 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.139 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.189 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.189 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.72 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Workingset detection
  3  *
  4  * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
  5  */
  6 
  7 #include <linux/memcontrol.h>
  8 #include <linux/writeback.h>
  9 #include <linux/pagemap.h>
 10 #include <linux/atomic.h>
 11 #include <linux/module.h>
 12 #include <linux/swap.h>
 13 #include <linux/fs.h>
 14 #include <linux/mm.h>
 15 
 16 /*
 17  *              Double CLOCK lists
 18  *
 19  * Per zone, two clock lists are maintained for file pages: the
 20  * inactive and the active list.  Freshly faulted pages start out at
 21  * the head of the inactive list and page reclaim scans pages from the
 22  * tail.  Pages that are accessed multiple times on the inactive list
 23  * are promoted to the active list, to protect them from reclaim,
 24  * whereas active pages are demoted to the inactive list when the
 25  * active list grows too big.
 26  *
 27  *   fault ------------------------+
 28  *                                 |
 29  *              +--------------+   |            +-------------+
 30  *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
 31  *              +--------------+                +-------------+    |
 32  *                     |                                           |
 33  *                     +-------------- promotion ------------------+
 34  *
 35  *
 36  *              Access frequency and refault distance
 37  *
 38  * A workload is thrashing when its pages are frequently used but they
 39  * are evicted from the inactive list every time before another access
 40  * would have promoted them to the active list.
 41  *
 42  * In cases where the average access distance between thrashing pages
 43  * is bigger than the size of memory there is nothing that can be
 44  * done - the thrashing set could never fit into memory under any
 45  * circumstance.
 46  *
 47  * However, the average access distance could be bigger than the
 48  * inactive list, yet smaller than the size of memory.  In this case,
 49  * the set could fit into memory if it weren't for the currently
 50  * active pages - which may be used more, hopefully less frequently:
 51  *
 52  *      +-memory available to cache-+
 53  *      |                           |
 54  *      +-inactive------+-active----+
 55  *  a b | c d e f g h i | J K L M N |
 56  *      +---------------+-----------+
 57  *
 58  * It is prohibitively expensive to accurately track access frequency
 59  * of pages.  But a reasonable approximation can be made to measure
 60  * thrashing on the inactive list, after which refaulting pages can be
 61  * activated optimistically to compete with the existing active pages.
 62  *
 63  * Approximating inactive page access frequency - Observations:
 64  *
 65  * 1. When a page is accessed for the first time, it is added to the
 66  *    head of the inactive list, slides every existing inactive page
 67  *    towards the tail by one slot, and pushes the current tail page
 68  *    out of memory.
 69  *
 70  * 2. When a page is accessed for the second time, it is promoted to
 71  *    the active list, shrinking the inactive list by one slot.  This
 72  *    also slides all inactive pages that were faulted into the cache
 73  *    more recently than the activated page towards the tail of the
 74  *    inactive list.
 75  *
 76  * Thus:
 77  *
 78  * 1. The sum of evictions and activations between any two points in
 79  *    time indicate the minimum number of inactive pages accessed in
 80  *    between.
 81  *
 82  * 2. Moving one inactive page N page slots towards the tail of the
 83  *    list requires at least N inactive page accesses.
 84  *
 85  * Combining these:
 86  *
 87  * 1. When a page is finally evicted from memory, the number of
 88  *    inactive pages accessed while the page was in cache is at least
 89  *    the number of page slots on the inactive list.
 90  *
 91  * 2. In addition, measuring the sum of evictions and activations (E)
 92  *    at the time of a page's eviction, and comparing it to another
 93  *    reading (R) at the time the page faults back into memory tells
 94  *    the minimum number of accesses while the page was not cached.
 95  *    This is called the refault distance.
 96  *
 97  * Because the first access of the page was the fault and the second
 98  * access the refault, we combine the in-cache distance with the
 99  * out-of-cache distance to get the complete minimum access distance
100  * of this page:
101  *
102  *      NR_inactive + (R - E)
103  *
104  * And knowing the minimum access distance of a page, we can easily
105  * tell if the page would be able to stay in cache assuming all page
106  * slots in the cache were available:
107  *
108  *   NR_inactive + (R - E) <= NR_inactive + NR_active
109  *
110  * which can be further simplified to
111  *
112  *   (R - E) <= NR_active
113  *
114  * Put into words, the refault distance (out-of-cache) can be seen as
115  * a deficit in inactive list space (in-cache).  If the inactive list
116  * had (R - E) more page slots, the page would not have been evicted
117  * in between accesses, but activated instead.  And on a full system,
118  * the only thing eating into inactive list space is active pages.
119  *
120  *
121  *              Activating refaulting pages
122  *
123  * All that is known about the active list is that the pages have been
124  * accessed more than once in the past.  This means that at any given
125  * time there is actually a good chance that pages on the active list
126  * are no longer in active use.
127  *
128  * So when a refault distance of (R - E) is observed and there are at
129  * least (R - E) active pages, the refaulting page is activated
130  * optimistically in the hope that (R - E) active pages are actually
131  * used less frequently than the refaulting page - or even not used at
132  * all anymore.
133  *
134  * If this is wrong and demotion kicks in, the pages which are truly
135  * used more frequently will be reactivated while the less frequently
136  * used once will be evicted from memory.
137  *
138  * But if this is right, the stale pages will be pushed out of memory
139  * and the used pages get to stay in cache.
140  *
141  *
142  *              Implementation
143  *
144  * For each zone's file LRU lists, a counter for inactive evictions
145  * and activations is maintained (zone->inactive_age).
146  *
147  * On eviction, a snapshot of this counter (along with some bits to
148  * identify the zone) is stored in the now empty page cache radix tree
149  * slot of the evicted page.  This is called a shadow entry.
150  *
151  * On cache misses for which there are shadow entries, an eligible
152  * refault distance will immediately activate the refaulting page.
153  */
154 
155 static void *pack_shadow(unsigned long eviction, struct zone *zone)
156 {
157         eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone);
158         eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
159         eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
160 
161         return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
162 }
163 
164 static void unpack_shadow(void *shadow,
165                           struct zone **zone,
166                           unsigned long *distance)
167 {
168         unsigned long entry = (unsigned long)shadow;
169         unsigned long eviction;
170         unsigned long refault;
171         unsigned long mask;
172         int zid, nid;
173 
174         entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
175         zid = entry & ((1UL << ZONES_SHIFT) - 1);
176         entry >>= ZONES_SHIFT;
177         nid = entry & ((1UL << NODES_SHIFT) - 1);
178         entry >>= NODES_SHIFT;
179         eviction = entry;
180 
181         *zone = NODE_DATA(nid)->node_zones + zid;
182 
183         refault = atomic_long_read(&(*zone)->inactive_age);
184         mask = ~0UL >> (NODES_SHIFT + ZONES_SHIFT +
185                         RADIX_TREE_EXCEPTIONAL_SHIFT);
186         /*
187          * The unsigned subtraction here gives an accurate distance
188          * across inactive_age overflows in most cases.
189          *
190          * There is a special case: usually, shadow entries have a
191          * short lifetime and are either refaulted or reclaimed along
192          * with the inode before they get too old.  But it is not
193          * impossible for the inactive_age to lap a shadow entry in
194          * the field, which can then can result in a false small
195          * refault distance, leading to a false activation should this
196          * old entry actually refault again.  However, earlier kernels
197          * used to deactivate unconditionally with *every* reclaim
198          * invocation for the longest time, so the occasional
199          * inappropriate activation leading to pressure on the active
200          * list is not a problem.
201          */
202         *distance = (refault - eviction) & mask;
203 }
204 
205 /**
206  * workingset_eviction - note the eviction of a page from memory
207  * @mapping: address space the page was backing
208  * @page: the page being evicted
209  *
210  * Returns a shadow entry to be stored in @mapping->page_tree in place
211  * of the evicted @page so that a later refault can be detected.
212  */
213 void *workingset_eviction(struct address_space *mapping, struct page *page)
214 {
215         struct zone *zone = page_zone(page);
216         unsigned long eviction;
217 
218         eviction = atomic_long_inc_return(&zone->inactive_age);
219         return pack_shadow(eviction, zone);
220 }
221 
222 /**
223  * workingset_refault - evaluate the refault of a previously evicted page
224  * @shadow: shadow entry of the evicted page
225  *
226  * Calculates and evaluates the refault distance of the previously
227  * evicted page in the context of the zone it was allocated in.
228  *
229  * Returns %true if the page should be activated, %false otherwise.
230  */
231 bool workingset_refault(void *shadow)
232 {
233         unsigned long refault_distance;
234         struct zone *zone;
235 
236         unpack_shadow(shadow, &zone, &refault_distance);
237         inc_zone_state(zone, WORKINGSET_REFAULT);
238 
239         if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) {
240                 inc_zone_state(zone, WORKINGSET_ACTIVATE);
241                 return true;
242         }
243         return false;
244 }
245 
246 /**
247  * workingset_activation - note a page activation
248  * @page: page that is being activated
249  */
250 void workingset_activation(struct page *page)
251 {
252         atomic_long_inc(&page_zone(page)->inactive_age);
253 }
254 
255 /*
256  * Shadow entries reflect the share of the working set that does not
257  * fit into memory, so their number depends on the access pattern of
258  * the workload.  In most cases, they will refault or get reclaimed
259  * along with the inode, but a (malicious) workload that streams
260  * through files with a total size several times that of available
261  * memory, while preventing the inodes from being reclaimed, can
262  * create excessive amounts of shadow nodes.  To keep a lid on this,
263  * track shadow nodes and reclaim them when they grow way past the
264  * point where they would still be useful.
265  */
266 
267 struct list_lru workingset_shadow_nodes;
268 
269 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
270                                         struct shrink_control *sc)
271 {
272         unsigned long shadow_nodes;
273         unsigned long max_nodes;
274         unsigned long pages;
275 
276         /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
277         local_irq_disable();
278         shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
279         local_irq_enable();
280 
281         pages = node_present_pages(sc->nid);
282         /*
283          * Active cache pages are limited to 50% of memory, and shadow
284          * entries that represent a refault distance bigger than that
285          * do not have any effect.  Limit the number of shadow nodes
286          * such that shadow entries do not exceed the number of active
287          * cache pages, assuming a worst-case node population density
288          * of 1/8th on average.
289          *
290          * On 64-bit with 7 radix_tree_nodes per page and 64 slots
291          * each, this will reclaim shadow entries when they consume
292          * ~2% of available memory:
293          *
294          * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
295          */
296         max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
297 
298         if (shadow_nodes <= max_nodes)
299                 return 0;
300 
301         return shadow_nodes - max_nodes;
302 }
303 
304 static enum lru_status shadow_lru_isolate(struct list_head *item,
305                                           struct list_lru_one *lru,
306                                           spinlock_t *lru_lock,
307                                           void *arg)
308 {
309         struct address_space *mapping;
310         struct radix_tree_node *node;
311         unsigned int i;
312         int ret;
313 
314         /*
315          * Page cache insertions and deletions synchroneously maintain
316          * the shadow node LRU under the mapping->tree_lock and the
317          * lru_lock.  Because the page cache tree is emptied before
318          * the inode can be destroyed, holding the lru_lock pins any
319          * address_space that has radix tree nodes on the LRU.
320          *
321          * We can then safely transition to the mapping->tree_lock to
322          * pin only the address_space of the particular node we want
323          * to reclaim, take the node off-LRU, and drop the lru_lock.
324          */
325 
326         node = container_of(item, struct radix_tree_node, private_list);
327         mapping = node->private_data;
328 
329         /* Coming from the list, invert the lock order */
330         if (!spin_trylock(&mapping->tree_lock)) {
331                 spin_unlock(lru_lock);
332                 ret = LRU_RETRY;
333                 goto out;
334         }
335 
336         list_lru_isolate(lru, item);
337         spin_unlock(lru_lock);
338 
339         /*
340          * The nodes should only contain one or more shadow entries,
341          * no pages, so we expect to be able to remove them all and
342          * delete and free the empty node afterwards.
343          */
344 
345         BUG_ON(!node->count);
346         BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
347 
348         for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
349                 if (node->slots[i]) {
350                         BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
351                         node->slots[i] = NULL;
352                         BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
353                         node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
354                         BUG_ON(!mapping->nrshadows);
355                         mapping->nrshadows--;
356                 }
357         }
358         BUG_ON(node->count);
359         inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
360         if (!__radix_tree_delete_node(&mapping->page_tree, node))
361                 BUG();
362 
363         spin_unlock(&mapping->tree_lock);
364         ret = LRU_REMOVED_RETRY;
365 out:
366         local_irq_enable();
367         cond_resched();
368         local_irq_disable();
369         spin_lock(lru_lock);
370         return ret;
371 }
372 
373 static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
374                                        struct shrink_control *sc)
375 {
376         unsigned long ret;
377 
378         /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
379         local_irq_disable();
380         ret =  list_lru_shrink_walk(&workingset_shadow_nodes, sc,
381                                     shadow_lru_isolate, NULL);
382         local_irq_enable();
383         return ret;
384 }
385 
386 static struct shrinker workingset_shadow_shrinker = {
387         .count_objects = count_shadow_nodes,
388         .scan_objects = scan_shadow_nodes,
389         .seeks = DEFAULT_SEEKS,
390         .flags = SHRINKER_NUMA_AWARE,
391 };
392 
393 /*
394  * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
395  * mapping->tree_lock.
396  */
397 static struct lock_class_key shadow_nodes_key;
398 
399 static int __init workingset_init(void)
400 {
401         int ret;
402 
403         ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
404         if (ret)
405                 goto err;
406         ret = register_shrinker(&workingset_shadow_shrinker);
407         if (ret)
408                 goto err_list_lru;
409         return 0;
410 err_list_lru:
411         list_lru_destroy(&workingset_shadow_nodes);
412 err:
413         return ret;
414 }
415 module_init(workingset_init);
416 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp