~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/memcontrol.h

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* memcontrol.h - Memory Controller
  2  *
  3  * Copyright IBM Corporation, 2007
  4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5  *
  6  * Copyright 2007 OpenVZ SWsoft Inc
  7  * Author: Pavel Emelianov <xemul@openvz.org>
  8  *
  9  * This program is free software; you can redistribute it and/or modify
 10  * it under the terms of the GNU General Public License as published by
 11  * the Free Software Foundation; either version 2 of the License, or
 12  * (at your option) any later version.
 13  *
 14  * This program is distributed in the hope that it will be useful,
 15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17  * GNU General Public License for more details.
 18  */
 19 
 20 #ifndef _LINUX_MEMCONTROL_H
 21 #define _LINUX_MEMCONTROL_H
 22 #include <linux/cgroup.h>
 23 #include <linux/vm_event_item.h>
 24 #include <linux/hardirq.h>
 25 #include <linux/jump_label.h>
 26 #include <linux/page_counter.h>
 27 #include <linux/vmpressure.h>
 28 #include <linux/eventfd.h>
 29 #include <linux/mm.h>
 30 #include <linux/vmstat.h>
 31 #include <linux/writeback.h>
 32 #include <linux/page-flags.h>
 33 
 34 struct mem_cgroup;
 35 struct page;
 36 struct mm_struct;
 37 struct kmem_cache;
 38 
 39 /* Cgroup-specific page state, on top of universal node page state */
 40 enum memcg_stat_item {
 41         MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
 42         MEMCG_RSS,
 43         MEMCG_RSS_HUGE,
 44         MEMCG_SWAP,
 45         MEMCG_SOCK,
 46         /* XXX: why are these zone and not node counters? */
 47         MEMCG_KERNEL_STACK_KB,
 48         MEMCG_NR_STAT,
 49 };
 50 
 51 enum memcg_memory_event {
 52         MEMCG_LOW,
 53         MEMCG_HIGH,
 54         MEMCG_MAX,
 55         MEMCG_OOM,
 56         MEMCG_OOM_KILL,
 57         MEMCG_SWAP_MAX,
 58         MEMCG_SWAP_FAIL,
 59         MEMCG_NR_MEMORY_EVENTS,
 60 };
 61 
 62 enum mem_cgroup_protection {
 63         MEMCG_PROT_NONE,
 64         MEMCG_PROT_LOW,
 65         MEMCG_PROT_MIN,
 66 };
 67 
 68 struct mem_cgroup_reclaim_cookie {
 69         pg_data_t *pgdat;
 70         int priority;
 71         unsigned int generation;
 72 };
 73 
 74 #ifdef CONFIG_MEMCG
 75 
 76 #define MEM_CGROUP_ID_SHIFT     16
 77 #define MEM_CGROUP_ID_MAX       USHRT_MAX
 78 
 79 struct mem_cgroup_id {
 80         int id;
 81         refcount_t ref;
 82 };
 83 
 84 /*
 85  * Per memcg event counter is incremented at every pagein/pageout. With THP,
 86  * it will be incremated by the number of pages. This counter is used for
 87  * for trigger some periodic events. This is straightforward and better
 88  * than using jiffies etc. to handle periodic memcg event.
 89  */
 90 enum mem_cgroup_events_target {
 91         MEM_CGROUP_TARGET_THRESH,
 92         MEM_CGROUP_TARGET_SOFTLIMIT,
 93         MEM_CGROUP_TARGET_NUMAINFO,
 94         MEM_CGROUP_NTARGETS,
 95 };
 96 
 97 struct mem_cgroup_stat_cpu {
 98         long count[MEMCG_NR_STAT];
 99         unsigned long events[NR_VM_EVENT_ITEMS];
100         unsigned long nr_page_events;
101         unsigned long targets[MEM_CGROUP_NTARGETS];
102 };
103 
104 struct mem_cgroup_reclaim_iter {
105         struct mem_cgroup *position;
106         /* scan generation, increased every round-trip */
107         unsigned int generation;
108 };
109 
110 struct lruvec_stat {
111         long count[NR_VM_NODE_STAT_ITEMS];
112 };
113 
114 /*
115  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
116  * which have elements charged to this memcg.
117  */
118 struct memcg_shrinker_map {
119         struct rcu_head rcu;
120         unsigned long map[0];
121 };
122 
123 /*
124  * per-zone information in memory controller.
125  */
126 struct mem_cgroup_per_node {
127         struct lruvec           lruvec;
128 
129         struct lruvec_stat __percpu *lruvec_stat_cpu;
130         atomic_long_t           lruvec_stat[NR_VM_NODE_STAT_ITEMS];
131 
132         unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
133 
134         struct mem_cgroup_reclaim_iter  iter[DEF_PRIORITY + 1];
135 
136 #ifdef CONFIG_MEMCG_KMEM
137         struct memcg_shrinker_map __rcu *shrinker_map;
138 #endif
139         struct rb_node          tree_node;      /* RB tree node */
140         unsigned long           usage_in_excess;/* Set to the value by which */
141                                                 /* the soft limit is exceeded*/
142         bool                    on_tree;
143         bool                    congested;      /* memcg has many dirty pages */
144                                                 /* backed by a congested BDI */
145 
146         struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
147                                                 /* use container_of        */
148 };
149 
150 struct mem_cgroup_threshold {
151         struct eventfd_ctx *eventfd;
152         unsigned long threshold;
153 };
154 
155 /* For threshold */
156 struct mem_cgroup_threshold_ary {
157         /* An array index points to threshold just below or equal to usage. */
158         int current_threshold;
159         /* Size of entries[] */
160         unsigned int size;
161         /* Array of thresholds */
162         struct mem_cgroup_threshold entries[0];
163 };
164 
165 struct mem_cgroup_thresholds {
166         /* Primary thresholds array */
167         struct mem_cgroup_threshold_ary *primary;
168         /*
169          * Spare threshold array.
170          * This is needed to make mem_cgroup_unregister_event() "never fail".
171          * It must be able to store at least primary->size - 1 entries.
172          */
173         struct mem_cgroup_threshold_ary *spare;
174 };
175 
176 enum memcg_kmem_state {
177         KMEM_NONE,
178         KMEM_ALLOCATED,
179         KMEM_ONLINE,
180 };
181 
182 #if defined(CONFIG_SMP)
183 struct memcg_padding {
184         char x[0];
185 } ____cacheline_internodealigned_in_smp;
186 #define MEMCG_PADDING(name)      struct memcg_padding name;
187 #else
188 #define MEMCG_PADDING(name)
189 #endif
190 
191 /*
192  * The memory controller data structure. The memory controller controls both
193  * page cache and RSS per cgroup. We would eventually like to provide
194  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
195  * to help the administrator determine what knobs to tune.
196  */
197 struct mem_cgroup {
198         struct cgroup_subsys_state css;
199 
200         /* Private memcg ID. Used to ID objects that outlive the cgroup */
201         struct mem_cgroup_id id;
202 
203         /* Accounted resources */
204         struct page_counter memory;
205         struct page_counter swap;
206 
207         /* Legacy consumer-oriented counters */
208         struct page_counter memsw;
209         struct page_counter kmem;
210         struct page_counter tcpmem;
211 
212         /* Upper bound of normal memory consumption range */
213         unsigned long high;
214 
215         /* Range enforcement for interrupt charges */
216         struct work_struct high_work;
217 
218         unsigned long soft_limit;
219 
220         /* vmpressure notifications */
221         struct vmpressure vmpressure;
222 
223         /*
224          * Should the accounting and control be hierarchical, per subtree?
225          */
226         bool use_hierarchy;
227 
228         /*
229          * Should the OOM killer kill all belonging tasks, had it kill one?
230          */
231         bool oom_group;
232 
233         /* protected by memcg_oom_lock */
234         bool            oom_lock;
235         int             under_oom;
236 
237         int     swappiness;
238         /* OOM-Killer disable */
239         int             oom_kill_disable;
240 
241         /* memory.events */
242         struct cgroup_file events_file;
243 
244         /* handle for "memory.swap.events" */
245         struct cgroup_file swap_events_file;
246 
247         /* protect arrays of thresholds */
248         struct mutex thresholds_lock;
249 
250         /* thresholds for memory usage. RCU-protected */
251         struct mem_cgroup_thresholds thresholds;
252 
253         /* thresholds for mem+swap usage. RCU-protected */
254         struct mem_cgroup_thresholds memsw_thresholds;
255 
256         /* For oom notifier event fd */
257         struct list_head oom_notify;
258 
259         /*
260          * Should we move charges of a task when a task is moved into this
261          * mem_cgroup ? And what type of charges should we move ?
262          */
263         unsigned long move_charge_at_immigrate;
264         /* taken only while moving_account > 0 */
265         spinlock_t              move_lock;
266         unsigned long           move_lock_flags;
267 
268         MEMCG_PADDING(_pad1_);
269 
270         /*
271          * set > 0 if pages under this cgroup are moving to other cgroup.
272          */
273         atomic_t                moving_account;
274         struct task_struct      *move_lock_task;
275 
276         /* memory.stat */
277         struct mem_cgroup_stat_cpu __percpu *stat_cpu;
278 
279         MEMCG_PADDING(_pad2_);
280 
281         atomic_long_t           stat[MEMCG_NR_STAT];
282         atomic_long_t           events[NR_VM_EVENT_ITEMS];
283         atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
284 
285         unsigned long           socket_pressure;
286 
287         /* Legacy tcp memory accounting */
288         bool                    tcpmem_active;
289         int                     tcpmem_pressure;
290 
291 #ifdef CONFIG_MEMCG_KMEM
292         /* Index in the kmem_cache->memcg_params.memcg_caches array */
293         int kmemcg_id;
294         enum memcg_kmem_state kmem_state;
295         struct list_head kmem_caches;
296 #endif
297 
298         int last_scanned_node;
299 #if MAX_NUMNODES > 1
300         nodemask_t      scan_nodes;
301         atomic_t        numainfo_events;
302         atomic_t        numainfo_updating;
303 #endif
304 
305 #ifdef CONFIG_CGROUP_WRITEBACK
306         struct list_head cgwb_list;
307         struct wb_domain cgwb_domain;
308 #endif
309 
310         /* List of events which userspace want to receive */
311         struct list_head event_list;
312         spinlock_t event_list_lock;
313 
314         struct mem_cgroup_per_node *nodeinfo[0];
315         /* WARNING: nodeinfo must be the last member here */
316 };
317 
318 /*
319  * size of first charge trial. "32" comes from vmscan.c's magic value.
320  * TODO: maybe necessary to use big numbers in big irons.
321  */
322 #define MEMCG_CHARGE_BATCH 32U
323 
324 extern struct mem_cgroup *root_mem_cgroup;
325 
326 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
327 {
328         return (memcg == root_mem_cgroup);
329 }
330 
331 static inline bool mem_cgroup_disabled(void)
332 {
333         return !cgroup_subsys_enabled(memory_cgrp_subsys);
334 }
335 
336 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
337                                                 struct mem_cgroup *memcg);
338 
339 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
340                           gfp_t gfp_mask, struct mem_cgroup **memcgp,
341                           bool compound);
342 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
343                           gfp_t gfp_mask, struct mem_cgroup **memcgp,
344                           bool compound);
345 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
346                               bool lrucare, bool compound);
347 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
348                 bool compound);
349 void mem_cgroup_uncharge(struct page *page);
350 void mem_cgroup_uncharge_list(struct list_head *page_list);
351 
352 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
353 
354 static struct mem_cgroup_per_node *
355 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
356 {
357         return memcg->nodeinfo[nid];
358 }
359 
360 /**
361  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
362  * @node: node of the wanted lruvec
363  * @memcg: memcg of the wanted lruvec
364  *
365  * Returns the lru list vector holding pages for a given @node or a given
366  * @memcg and @zone. This can be the node lruvec, if the memory controller
367  * is disabled.
368  */
369 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
370                                 struct mem_cgroup *memcg)
371 {
372         struct mem_cgroup_per_node *mz;
373         struct lruvec *lruvec;
374 
375         if (mem_cgroup_disabled()) {
376                 lruvec = node_lruvec(pgdat);
377                 goto out;
378         }
379 
380         mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
381         lruvec = &mz->lruvec;
382 out:
383         /*
384          * Since a node can be onlined after the mem_cgroup was created,
385          * we have to be prepared to initialize lruvec->pgdat here;
386          * and if offlined then reonlined, we need to reinitialize it.
387          */
388         if (unlikely(lruvec->pgdat != pgdat))
389                 lruvec->pgdat = pgdat;
390         return lruvec;
391 }
392 
393 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
394 
395 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
396 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
397 
398 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
399 
400 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
401 
402 static inline
403 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
404         return css ? container_of(css, struct mem_cgroup, css) : NULL;
405 }
406 
407 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
408 {
409         if (memcg)
410                 css_put(&memcg->css);
411 }
412 
413 #define mem_cgroup_from_counter(counter, member)        \
414         container_of(counter, struct mem_cgroup, member)
415 
416 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
417                                    struct mem_cgroup *,
418                                    struct mem_cgroup_reclaim_cookie *);
419 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
420 int mem_cgroup_scan_tasks(struct mem_cgroup *,
421                           int (*)(struct task_struct *, void *), void *);
422 
423 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
424 {
425         if (mem_cgroup_disabled())
426                 return 0;
427 
428         return memcg->id.id;
429 }
430 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
431 
432 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
433 {
434         return mem_cgroup_from_css(seq_css(m));
435 }
436 
437 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
438 {
439         struct mem_cgroup_per_node *mz;
440 
441         if (mem_cgroup_disabled())
442                 return NULL;
443 
444         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
445         return mz->memcg;
446 }
447 
448 /**
449  * parent_mem_cgroup - find the accounting parent of a memcg
450  * @memcg: memcg whose parent to find
451  *
452  * Returns the parent memcg, or NULL if this is the root or the memory
453  * controller is in legacy no-hierarchy mode.
454  */
455 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
456 {
457         if (!memcg->memory.parent)
458                 return NULL;
459         return mem_cgroup_from_counter(memcg->memory.parent, memory);
460 }
461 
462 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
463                               struct mem_cgroup *root)
464 {
465         if (root == memcg)
466                 return true;
467         if (!root->use_hierarchy)
468                 return false;
469         return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
470 }
471 
472 static inline bool mm_match_cgroup(struct mm_struct *mm,
473                                    struct mem_cgroup *memcg)
474 {
475         struct mem_cgroup *task_memcg;
476         bool match = false;
477 
478         rcu_read_lock();
479         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
480         if (task_memcg)
481                 match = mem_cgroup_is_descendant(task_memcg, memcg);
482         rcu_read_unlock();
483         return match;
484 }
485 
486 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
487 ino_t page_cgroup_ino(struct page *page);
488 
489 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
490 {
491         if (mem_cgroup_disabled())
492                 return true;
493         return !!(memcg->css.flags & CSS_ONLINE);
494 }
495 
496 /*
497  * For memory reclaim.
498  */
499 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
500 
501 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
502                 int zid, int nr_pages);
503 
504 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
505                                            int nid, unsigned int lru_mask);
506 
507 static inline
508 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
509 {
510         struct mem_cgroup_per_node *mz;
511         unsigned long nr_pages = 0;
512         int zid;
513 
514         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
515         for (zid = 0; zid < MAX_NR_ZONES; zid++)
516                 nr_pages += mz->lru_zone_size[zid][lru];
517         return nr_pages;
518 }
519 
520 static inline
521 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
522                 enum lru_list lru, int zone_idx)
523 {
524         struct mem_cgroup_per_node *mz;
525 
526         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
527         return mz->lru_zone_size[zone_idx][lru];
528 }
529 
530 void mem_cgroup_handle_over_high(void);
531 
532 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
533 
534 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
535                                 struct task_struct *p);
536 
537 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
538 
539 static inline void mem_cgroup_enter_user_fault(void)
540 {
541         WARN_ON(current->in_user_fault);
542         current->in_user_fault = 1;
543 }
544 
545 static inline void mem_cgroup_exit_user_fault(void)
546 {
547         WARN_ON(!current->in_user_fault);
548         current->in_user_fault = 0;
549 }
550 
551 static inline bool task_in_memcg_oom(struct task_struct *p)
552 {
553         return p->memcg_in_oom;
554 }
555 
556 bool mem_cgroup_oom_synchronize(bool wait);
557 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
558                                             struct mem_cgroup *oom_domain);
559 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
560 
561 #ifdef CONFIG_MEMCG_SWAP
562 extern int do_swap_account;
563 #endif
564 
565 struct mem_cgroup *lock_page_memcg(struct page *page);
566 void __unlock_page_memcg(struct mem_cgroup *memcg);
567 void unlock_page_memcg(struct page *page);
568 
569 /*
570  * idx can be of type enum memcg_stat_item or node_stat_item.
571  * Keep in sync with memcg_exact_page_state().
572  */
573 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
574                                              int idx)
575 {
576         long x = atomic_long_read(&memcg->stat[idx]);
577 #ifdef CONFIG_SMP
578         if (x < 0)
579                 x = 0;
580 #endif
581         return x;
582 }
583 
584 /* idx can be of type enum memcg_stat_item or node_stat_item */
585 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
586                                      int idx, int val)
587 {
588         long x;
589 
590         if (mem_cgroup_disabled())
591                 return;
592 
593         x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
594         if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
595                 atomic_long_add(x, &memcg->stat[idx]);
596                 x = 0;
597         }
598         __this_cpu_write(memcg->stat_cpu->count[idx], x);
599 }
600 
601 /* idx can be of type enum memcg_stat_item or node_stat_item */
602 static inline void mod_memcg_state(struct mem_cgroup *memcg,
603                                    int idx, int val)
604 {
605         unsigned long flags;
606 
607         local_irq_save(flags);
608         __mod_memcg_state(memcg, idx, val);
609         local_irq_restore(flags);
610 }
611 
612 /**
613  * mod_memcg_page_state - update page state statistics
614  * @page: the page
615  * @idx: page state item to account
616  * @val: number of pages (positive or negative)
617  *
618  * The @page must be locked or the caller must use lock_page_memcg()
619  * to prevent double accounting when the page is concurrently being
620  * moved to another memcg:
621  *
622  *   lock_page(page) or lock_page_memcg(page)
623  *   if (TestClearPageState(page))
624  *     mod_memcg_page_state(page, state, -1);
625  *   unlock_page(page) or unlock_page_memcg(page)
626  *
627  * Kernel pages are an exception to this, since they'll never move.
628  */
629 static inline void __mod_memcg_page_state(struct page *page,
630                                           int idx, int val)
631 {
632         if (page->mem_cgroup)
633                 __mod_memcg_state(page->mem_cgroup, idx, val);
634 }
635 
636 static inline void mod_memcg_page_state(struct page *page,
637                                         int idx, int val)
638 {
639         if (page->mem_cgroup)
640                 mod_memcg_state(page->mem_cgroup, idx, val);
641 }
642 
643 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
644                                               enum node_stat_item idx)
645 {
646         struct mem_cgroup_per_node *pn;
647         long x;
648 
649         if (mem_cgroup_disabled())
650                 return node_page_state(lruvec_pgdat(lruvec), idx);
651 
652         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
653         x = atomic_long_read(&pn->lruvec_stat[idx]);
654 #ifdef CONFIG_SMP
655         if (x < 0)
656                 x = 0;
657 #endif
658         return x;
659 }
660 
661 static inline void __mod_lruvec_state(struct lruvec *lruvec,
662                                       enum node_stat_item idx, int val)
663 {
664         struct mem_cgroup_per_node *pn;
665         long x;
666 
667         /* Update node */
668         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
669 
670         if (mem_cgroup_disabled())
671                 return;
672 
673         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
674 
675         /* Update memcg */
676         __mod_memcg_state(pn->memcg, idx, val);
677 
678         /* Update lruvec */
679         x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
680         if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
681                 atomic_long_add(x, &pn->lruvec_stat[idx]);
682                 x = 0;
683         }
684         __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
685 }
686 
687 static inline void mod_lruvec_state(struct lruvec *lruvec,
688                                     enum node_stat_item idx, int val)
689 {
690         unsigned long flags;
691 
692         local_irq_save(flags);
693         __mod_lruvec_state(lruvec, idx, val);
694         local_irq_restore(flags);
695 }
696 
697 static inline void __mod_lruvec_page_state(struct page *page,
698                                            enum node_stat_item idx, int val)
699 {
700         pg_data_t *pgdat = page_pgdat(page);
701         struct lruvec *lruvec;
702 
703         /* Untracked pages have no memcg, no lruvec. Update only the node */
704         if (!page->mem_cgroup) {
705                 __mod_node_page_state(pgdat, idx, val);
706                 return;
707         }
708 
709         lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
710         __mod_lruvec_state(lruvec, idx, val);
711 }
712 
713 static inline void mod_lruvec_page_state(struct page *page,
714                                          enum node_stat_item idx, int val)
715 {
716         unsigned long flags;
717 
718         local_irq_save(flags);
719         __mod_lruvec_page_state(page, idx, val);
720         local_irq_restore(flags);
721 }
722 
723 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
724                                                 gfp_t gfp_mask,
725                                                 unsigned long *total_scanned);
726 
727 static inline void __count_memcg_events(struct mem_cgroup *memcg,
728                                         enum vm_event_item idx,
729                                         unsigned long count)
730 {
731         unsigned long x;
732 
733         if (mem_cgroup_disabled())
734                 return;
735 
736         x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
737         if (unlikely(x > MEMCG_CHARGE_BATCH)) {
738                 atomic_long_add(x, &memcg->events[idx]);
739                 x = 0;
740         }
741         __this_cpu_write(memcg->stat_cpu->events[idx], x);
742 }
743 
744 static inline void count_memcg_events(struct mem_cgroup *memcg,
745                                       enum vm_event_item idx,
746                                       unsigned long count)
747 {
748         unsigned long flags;
749 
750         local_irq_save(flags);
751         __count_memcg_events(memcg, idx, count);
752         local_irq_restore(flags);
753 }
754 
755 static inline void count_memcg_page_event(struct page *page,
756                                           enum vm_event_item idx)
757 {
758         if (page->mem_cgroup)
759                 count_memcg_events(page->mem_cgroup, idx, 1);
760 }
761 
762 static inline void count_memcg_event_mm(struct mm_struct *mm,
763                                         enum vm_event_item idx)
764 {
765         struct mem_cgroup *memcg;
766 
767         if (mem_cgroup_disabled())
768                 return;
769 
770         rcu_read_lock();
771         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
772         if (likely(memcg))
773                 count_memcg_events(memcg, idx, 1);
774         rcu_read_unlock();
775 }
776 
777 static inline void memcg_memory_event(struct mem_cgroup *memcg,
778                                       enum memcg_memory_event event)
779 {
780         do {
781                 atomic_long_inc(&memcg->memory_events[event]);
782                 cgroup_file_notify(&memcg->events_file);
783 
784                 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
785                         break;
786         } while ((memcg = parent_mem_cgroup(memcg)) &&
787                  !mem_cgroup_is_root(memcg));
788 }
789 
790 static inline void memcg_memory_event_mm(struct mm_struct *mm,
791                                          enum memcg_memory_event event)
792 {
793         struct mem_cgroup *memcg;
794 
795         if (mem_cgroup_disabled())
796                 return;
797 
798         rcu_read_lock();
799         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
800         if (likely(memcg))
801                 memcg_memory_event(memcg, event);
802         rcu_read_unlock();
803 }
804 
805 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
806 void mem_cgroup_split_huge_fixup(struct page *head);
807 #endif
808 
809 #else /* CONFIG_MEMCG */
810 
811 #define MEM_CGROUP_ID_SHIFT     0
812 #define MEM_CGROUP_ID_MAX       0
813 
814 struct mem_cgroup;
815 
816 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
817 {
818         return true;
819 }
820 
821 static inline bool mem_cgroup_disabled(void)
822 {
823         return true;
824 }
825 
826 static inline void memcg_memory_event(struct mem_cgroup *memcg,
827                                       enum memcg_memory_event event)
828 {
829 }
830 
831 static inline void memcg_memory_event_mm(struct mm_struct *mm,
832                                          enum memcg_memory_event event)
833 {
834 }
835 
836 static inline enum mem_cgroup_protection mem_cgroup_protected(
837         struct mem_cgroup *root, struct mem_cgroup *memcg)
838 {
839         return MEMCG_PROT_NONE;
840 }
841 
842 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
843                                         gfp_t gfp_mask,
844                                         struct mem_cgroup **memcgp,
845                                         bool compound)
846 {
847         *memcgp = NULL;
848         return 0;
849 }
850 
851 static inline int mem_cgroup_try_charge_delay(struct page *page,
852                                               struct mm_struct *mm,
853                                               gfp_t gfp_mask,
854                                               struct mem_cgroup **memcgp,
855                                               bool compound)
856 {
857         *memcgp = NULL;
858         return 0;
859 }
860 
861 static inline void mem_cgroup_commit_charge(struct page *page,
862                                             struct mem_cgroup *memcg,
863                                             bool lrucare, bool compound)
864 {
865 }
866 
867 static inline void mem_cgroup_cancel_charge(struct page *page,
868                                             struct mem_cgroup *memcg,
869                                             bool compound)
870 {
871 }
872 
873 static inline void mem_cgroup_uncharge(struct page *page)
874 {
875 }
876 
877 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
878 {
879 }
880 
881 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
882 {
883 }
884 
885 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
886                                 struct mem_cgroup *memcg)
887 {
888         return node_lruvec(pgdat);
889 }
890 
891 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
892                                                     struct pglist_data *pgdat)
893 {
894         return &pgdat->lruvec;
895 }
896 
897 static inline bool mm_match_cgroup(struct mm_struct *mm,
898                 struct mem_cgroup *memcg)
899 {
900         return true;
901 }
902 
903 static inline bool task_in_mem_cgroup(struct task_struct *task,
904                                       const struct mem_cgroup *memcg)
905 {
906         return true;
907 }
908 
909 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
910 {
911         return NULL;
912 }
913 
914 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
915 {
916         return NULL;
917 }
918 
919 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
920 {
921 }
922 
923 static inline struct mem_cgroup *
924 mem_cgroup_iter(struct mem_cgroup *root,
925                 struct mem_cgroup *prev,
926                 struct mem_cgroup_reclaim_cookie *reclaim)
927 {
928         return NULL;
929 }
930 
931 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
932                                          struct mem_cgroup *prev)
933 {
934 }
935 
936 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
937                 int (*fn)(struct task_struct *, void *), void *arg)
938 {
939         return 0;
940 }
941 
942 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
943 {
944         return 0;
945 }
946 
947 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
948 {
949         WARN_ON_ONCE(id);
950         /* XXX: This should always return root_mem_cgroup */
951         return NULL;
952 }
953 
954 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
955 {
956         return NULL;
957 }
958 
959 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
960 {
961         return NULL;
962 }
963 
964 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
965 {
966         return true;
967 }
968 
969 static inline unsigned long
970 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
971 {
972         return 0;
973 }
974 static inline
975 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
976                 enum lru_list lru, int zone_idx)
977 {
978         return 0;
979 }
980 
981 static inline unsigned long
982 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
983                              int nid, unsigned int lru_mask)
984 {
985         return 0;
986 }
987 
988 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
989 {
990         return 0;
991 }
992 
993 static inline void
994 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
995 {
996 }
997 
998 static inline void
999 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1000 {
1001 }
1002 
1003 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1004 {
1005         return NULL;
1006 }
1007 
1008 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1009 {
1010 }
1011 
1012 static inline void unlock_page_memcg(struct page *page)
1013 {
1014 }
1015 
1016 static inline void mem_cgroup_handle_over_high(void)
1017 {
1018 }
1019 
1020 static inline void mem_cgroup_enter_user_fault(void)
1021 {
1022 }
1023 
1024 static inline void mem_cgroup_exit_user_fault(void)
1025 {
1026 }
1027 
1028 static inline bool task_in_memcg_oom(struct task_struct *p)
1029 {
1030         return false;
1031 }
1032 
1033 static inline bool mem_cgroup_oom_synchronize(bool wait)
1034 {
1035         return false;
1036 }
1037 
1038 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1039         struct task_struct *victim, struct mem_cgroup *oom_domain)
1040 {
1041         return NULL;
1042 }
1043 
1044 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1045 {
1046 }
1047 
1048 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
1049                                              int idx)
1050 {
1051         return 0;
1052 }
1053 
1054 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1055                                      int idx,
1056                                      int nr)
1057 {
1058 }
1059 
1060 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1061                                    int idx,
1062                                    int nr)
1063 {
1064 }
1065 
1066 static inline void __mod_memcg_page_state(struct page *page,
1067                                           int idx,
1068                                           int nr)
1069 {
1070 }
1071 
1072 static inline void mod_memcg_page_state(struct page *page,
1073                                         int idx,
1074                                         int nr)
1075 {
1076 }
1077 
1078 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1079                                               enum node_stat_item idx)
1080 {
1081         return node_page_state(lruvec_pgdat(lruvec), idx);
1082 }
1083 
1084 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1085                                       enum node_stat_item idx, int val)
1086 {
1087         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1088 }
1089 
1090 static inline void mod_lruvec_state(struct lruvec *lruvec,
1091                                     enum node_stat_item idx, int val)
1092 {
1093         mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1094 }
1095 
1096 static inline void __mod_lruvec_page_state(struct page *page,
1097                                            enum node_stat_item idx, int val)
1098 {
1099         __mod_node_page_state(page_pgdat(page), idx, val);
1100 }
1101 
1102 static inline void mod_lruvec_page_state(struct page *page,
1103                                          enum node_stat_item idx, int val)
1104 {
1105         mod_node_page_state(page_pgdat(page), idx, val);
1106 }
1107 
1108 static inline
1109 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1110                                             gfp_t gfp_mask,
1111                                             unsigned long *total_scanned)
1112 {
1113         return 0;
1114 }
1115 
1116 static inline void mem_cgroup_split_huge_fixup(struct page *head)
1117 {
1118 }
1119 
1120 static inline void count_memcg_events(struct mem_cgroup *memcg,
1121                                       enum vm_event_item idx,
1122                                       unsigned long count)
1123 {
1124 }
1125 
1126 static inline void count_memcg_page_event(struct page *page,
1127                                           int idx)
1128 {
1129 }
1130 
1131 static inline
1132 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1133 {
1134 }
1135 #endif /* CONFIG_MEMCG */
1136 
1137 /* idx can be of type enum memcg_stat_item or node_stat_item */
1138 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1139                                      int idx)
1140 {
1141         __mod_memcg_state(memcg, idx, 1);
1142 }
1143 
1144 /* idx can be of type enum memcg_stat_item or node_stat_item */
1145 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1146                                      int idx)
1147 {
1148         __mod_memcg_state(memcg, idx, -1);
1149 }
1150 
1151 /* idx can be of type enum memcg_stat_item or node_stat_item */
1152 static inline void __inc_memcg_page_state(struct page *page,
1153                                           int idx)
1154 {
1155         __mod_memcg_page_state(page, idx, 1);
1156 }
1157 
1158 /* idx can be of type enum memcg_stat_item or node_stat_item */
1159 static inline void __dec_memcg_page_state(struct page *page,
1160                                           int idx)
1161 {
1162         __mod_memcg_page_state(page, idx, -1);
1163 }
1164 
1165 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1166                                       enum node_stat_item idx)
1167 {
1168         __mod_lruvec_state(lruvec, idx, 1);
1169 }
1170 
1171 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1172                                       enum node_stat_item idx)
1173 {
1174         __mod_lruvec_state(lruvec, idx, -1);
1175 }
1176 
1177 static inline void __inc_lruvec_page_state(struct page *page,
1178                                            enum node_stat_item idx)
1179 {
1180         __mod_lruvec_page_state(page, idx, 1);
1181 }
1182 
1183 static inline void __dec_lruvec_page_state(struct page *page,
1184                                            enum node_stat_item idx)
1185 {
1186         __mod_lruvec_page_state(page, idx, -1);
1187 }
1188 
1189 /* idx can be of type enum memcg_stat_item or node_stat_item */
1190 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1191                                    int idx)
1192 {
1193         mod_memcg_state(memcg, idx, 1);
1194 }
1195 
1196 /* idx can be of type enum memcg_stat_item or node_stat_item */
1197 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1198                                    int idx)
1199 {
1200         mod_memcg_state(memcg, idx, -1);
1201 }
1202 
1203 /* idx can be of type enum memcg_stat_item or node_stat_item */
1204 static inline void inc_memcg_page_state(struct page *page,
1205                                         int idx)
1206 {
1207         mod_memcg_page_state(page, idx, 1);
1208 }
1209 
1210 /* idx can be of type enum memcg_stat_item or node_stat_item */
1211 static inline void dec_memcg_page_state(struct page *page,
1212                                         int idx)
1213 {
1214         mod_memcg_page_state(page, idx, -1);
1215 }
1216 
1217 static inline void inc_lruvec_state(struct lruvec *lruvec,
1218                                     enum node_stat_item idx)
1219 {
1220         mod_lruvec_state(lruvec, idx, 1);
1221 }
1222 
1223 static inline void dec_lruvec_state(struct lruvec *lruvec,
1224                                     enum node_stat_item idx)
1225 {
1226         mod_lruvec_state(lruvec, idx, -1);
1227 }
1228 
1229 static inline void inc_lruvec_page_state(struct page *page,
1230                                          enum node_stat_item idx)
1231 {
1232         mod_lruvec_page_state(page, idx, 1);
1233 }
1234 
1235 static inline void dec_lruvec_page_state(struct page *page,
1236                                          enum node_stat_item idx)
1237 {
1238         mod_lruvec_page_state(page, idx, -1);
1239 }
1240 
1241 #ifdef CONFIG_CGROUP_WRITEBACK
1242 
1243 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1244 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1245                          unsigned long *pheadroom, unsigned long *pdirty,
1246                          unsigned long *pwriteback);
1247 
1248 #else   /* CONFIG_CGROUP_WRITEBACK */
1249 
1250 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1251 {
1252         return NULL;
1253 }
1254 
1255 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1256                                        unsigned long *pfilepages,
1257                                        unsigned long *pheadroom,
1258                                        unsigned long *pdirty,
1259                                        unsigned long *pwriteback)
1260 {
1261 }
1262 
1263 #endif  /* CONFIG_CGROUP_WRITEBACK */
1264 
1265 struct sock;
1266 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1267 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1268 #ifdef CONFIG_MEMCG
1269 extern struct static_key_false memcg_sockets_enabled_key;
1270 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1271 void mem_cgroup_sk_alloc(struct sock *sk);
1272 void mem_cgroup_sk_free(struct sock *sk);
1273 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1274 {
1275         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1276                 return true;
1277         do {
1278                 if (time_before(jiffies, memcg->socket_pressure))
1279                         return true;
1280         } while ((memcg = parent_mem_cgroup(memcg)));
1281         return false;
1282 }
1283 #else
1284 #define mem_cgroup_sockets_enabled 0
1285 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1286 static inline void mem_cgroup_sk_free(struct sock *sk) { };
1287 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1288 {
1289         return false;
1290 }
1291 #endif
1292 
1293 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1294 void memcg_kmem_put_cache(struct kmem_cache *cachep);
1295 
1296 #ifdef CONFIG_MEMCG_KMEM
1297 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1298 void __memcg_kmem_uncharge(struct page *page, int order);
1299 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1300                               struct mem_cgroup *memcg);
1301 
1302 extern struct static_key_false memcg_kmem_enabled_key;
1303 extern struct workqueue_struct *memcg_kmem_cache_wq;
1304 
1305 extern int memcg_nr_cache_ids;
1306 void memcg_get_cache_ids(void);
1307 void memcg_put_cache_ids(void);
1308 
1309 /*
1310  * Helper macro to loop through all memcg-specific caches. Callers must still
1311  * check if the cache is valid (it is either valid or NULL).
1312  * the slab_mutex must be held when looping through those caches
1313  */
1314 #define for_each_memcg_cache_index(_idx)        \
1315         for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1316 
1317 static inline bool memcg_kmem_enabled(void)
1318 {
1319         return static_branch_unlikely(&memcg_kmem_enabled_key);
1320 }
1321 
1322 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1323 {
1324         if (memcg_kmem_enabled())
1325                 return __memcg_kmem_charge(page, gfp, order);
1326         return 0;
1327 }
1328 
1329 static inline void memcg_kmem_uncharge(struct page *page, int order)
1330 {
1331         if (memcg_kmem_enabled())
1332                 __memcg_kmem_uncharge(page, order);
1333 }
1334 
1335 static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
1336                                           int order, struct mem_cgroup *memcg)
1337 {
1338         if (memcg_kmem_enabled())
1339                 return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
1340         return 0;
1341 }
1342 /*
1343  * helper for accessing a memcg's index. It will be used as an index in the
1344  * child cache array in kmem_cache, and also to derive its name. This function
1345  * will return -1 when this is not a kmem-limited memcg.
1346  */
1347 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1348 {
1349         return memcg ? memcg->kmemcg_id : -1;
1350 }
1351 
1352 extern int memcg_expand_shrinker_maps(int new_id);
1353 
1354 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1355                                    int nid, int shrinker_id);
1356 #else
1357 
1358 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1359 {
1360         return 0;
1361 }
1362 
1363 static inline void memcg_kmem_uncharge(struct page *page, int order)
1364 {
1365 }
1366 
1367 static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1368 {
1369         return 0;
1370 }
1371 
1372 static inline void __memcg_kmem_uncharge(struct page *page, int order)
1373 {
1374 }
1375 
1376 #define for_each_memcg_cache_index(_idx)        \
1377         for (; NULL; )
1378 
1379 static inline bool memcg_kmem_enabled(void)
1380 {
1381         return false;
1382 }
1383 
1384 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1385 {
1386         return -1;
1387 }
1388 
1389 static inline void memcg_get_cache_ids(void)
1390 {
1391 }
1392 
1393 static inline void memcg_put_cache_ids(void)
1394 {
1395 }
1396 
1397 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1398                                           int nid, int shrinker_id) { }
1399 #endif /* CONFIG_MEMCG_KMEM */
1400 
1401 #endif /* _LINUX_MEMCONTROL_H */
1402 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp