~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/memcontrol.h

Version: ~ [ linux-5.15-rc1 ] ~ [ linux-5.14.5 ] ~ [ linux-5.13.18 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.66 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.147 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.206 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.246 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.282 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.283 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* memcontrol.h - Memory Controller
  2  *
  3  * Copyright IBM Corporation, 2007
  4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5  *
  6  * Copyright 2007 OpenVZ SWsoft Inc
  7  * Author: Pavel Emelianov <xemul@openvz.org>
  8  *
  9  * This program is free software; you can redistribute it and/or modify
 10  * it under the terms of the GNU General Public License as published by
 11  * the Free Software Foundation; either version 2 of the License, or
 12  * (at your option) any later version.
 13  *
 14  * This program is distributed in the hope that it will be useful,
 15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17  * GNU General Public License for more details.
 18  */
 19 
 20 #ifndef _LINUX_MEMCONTROL_H
 21 #define _LINUX_MEMCONTROL_H
 22 #include <linux/cgroup.h>
 23 #include <linux/vm_event_item.h>
 24 #include <linux/hardirq.h>
 25 #include <linux/jump_label.h>
 26 
 27 struct mem_cgroup;
 28 struct page_cgroup;
 29 struct page;
 30 struct mm_struct;
 31 struct kmem_cache;
 32 
 33 /*
 34  * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
 35  * These two lists should keep in accord with each other.
 36  */
 37 enum mem_cgroup_stat_index {
 38         /*
 39          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
 40          */
 41         MEM_CGROUP_STAT_CACHE,          /* # of pages charged as cache */
 42         MEM_CGROUP_STAT_RSS,            /* # of pages charged as anon rss */
 43         MEM_CGROUP_STAT_RSS_HUGE,       /* # of pages charged as anon huge */
 44         MEM_CGROUP_STAT_FILE_MAPPED,    /* # of pages charged as file rss */
 45         MEM_CGROUP_STAT_WRITEBACK,      /* # of pages under writeback */
 46         MEM_CGROUP_STAT_SWAP,           /* # of pages, swapped out */
 47         MEM_CGROUP_STAT_NSTATS,
 48 };
 49 
 50 struct mem_cgroup_reclaim_cookie {
 51         struct zone *zone;
 52         int priority;
 53         unsigned int generation;
 54 };
 55 
 56 #ifdef CONFIG_MEMCG
 57 /*
 58  * All "charge" functions with gfp_mask should use GFP_KERNEL or
 59  * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
 60  * alloc memory but reclaims memory from all available zones. So, "where I want
 61  * memory from" bits of gfp_mask has no meaning. So any bits of that field is
 62  * available but adding a rule is better. charge functions' gfp_mask should
 63  * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
 64  * codes.
 65  * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
 66  */
 67 
 68 extern int mem_cgroup_charge_anon(struct page *page, struct mm_struct *mm,
 69                                 gfp_t gfp_mask);
 70 /* for swap handling */
 71 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
 72                 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
 73 extern void mem_cgroup_commit_charge_swapin(struct page *page,
 74                                         struct mem_cgroup *memcg);
 75 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
 76 
 77 extern int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
 78                                         gfp_t gfp_mask);
 79 
 80 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
 81 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
 82 
 83 /* For coalescing uncharge for reducing memcg' overhead*/
 84 extern void mem_cgroup_uncharge_start(void);
 85 extern void mem_cgroup_uncharge_end(void);
 86 
 87 extern void mem_cgroup_uncharge_page(struct page *page);
 88 extern void mem_cgroup_uncharge_cache_page(struct page *page);
 89 
 90 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
 91                                   struct mem_cgroup *memcg);
 92 bool task_in_mem_cgroup(struct task_struct *task,
 93                         const struct mem_cgroup *memcg);
 94 
 95 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 96 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 97 
 98 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
 99 extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
100 
101 static inline
102 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
103 {
104         struct mem_cgroup *task_memcg;
105         bool match;
106 
107         rcu_read_lock();
108         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
109         match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
110         rcu_read_unlock();
111         return match;
112 }
113 
114 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
115 
116 extern void
117 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
118                              struct mem_cgroup **memcgp);
119 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
120         struct page *oldpage, struct page *newpage, bool migration_ok);
121 
122 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
123                                    struct mem_cgroup *,
124                                    struct mem_cgroup_reclaim_cookie *);
125 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
126 
127 /*
128  * For memory reclaim.
129  */
130 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
131 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
132 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
133 void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
134 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
135                                         struct task_struct *p);
136 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
137                                         struct page *newpage);
138 
139 static inline void mem_cgroup_oom_enable(void)
140 {
141         WARN_ON(current->memcg_oom.may_oom);
142         current->memcg_oom.may_oom = 1;
143 }
144 
145 static inline void mem_cgroup_oom_disable(void)
146 {
147         WARN_ON(!current->memcg_oom.may_oom);
148         current->memcg_oom.may_oom = 0;
149 }
150 
151 static inline bool task_in_memcg_oom(struct task_struct *p)
152 {
153         return p->memcg_oom.memcg;
154 }
155 
156 bool mem_cgroup_oom_synchronize(bool wait);
157 
158 #ifdef CONFIG_MEMCG_SWAP
159 extern int do_swap_account;
160 #endif
161 
162 static inline bool mem_cgroup_disabled(void)
163 {
164         if (memory_cgrp_subsys.disabled)
165                 return true;
166         return false;
167 }
168 
169 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
170                                          unsigned long *flags);
171 
172 extern atomic_t memcg_moving;
173 
174 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
175                                         bool *locked, unsigned long *flags)
176 {
177         if (mem_cgroup_disabled())
178                 return;
179         rcu_read_lock();
180         *locked = false;
181         if (atomic_read(&memcg_moving))
182                 __mem_cgroup_begin_update_page_stat(page, locked, flags);
183 }
184 
185 void __mem_cgroup_end_update_page_stat(struct page *page,
186                                 unsigned long *flags);
187 static inline void mem_cgroup_end_update_page_stat(struct page *page,
188                                         bool *locked, unsigned long *flags)
189 {
190         if (mem_cgroup_disabled())
191                 return;
192         if (*locked)
193                 __mem_cgroup_end_update_page_stat(page, flags);
194         rcu_read_unlock();
195 }
196 
197 void mem_cgroup_update_page_stat(struct page *page,
198                                  enum mem_cgroup_stat_index idx,
199                                  int val);
200 
201 static inline void mem_cgroup_inc_page_stat(struct page *page,
202                                             enum mem_cgroup_stat_index idx)
203 {
204         mem_cgroup_update_page_stat(page, idx, 1);
205 }
206 
207 static inline void mem_cgroup_dec_page_stat(struct page *page,
208                                             enum mem_cgroup_stat_index idx)
209 {
210         mem_cgroup_update_page_stat(page, idx, -1);
211 }
212 
213 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
214                                                 gfp_t gfp_mask,
215                                                 unsigned long *total_scanned);
216 
217 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
218 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
219                                              enum vm_event_item idx)
220 {
221         if (mem_cgroup_disabled())
222                 return;
223         __mem_cgroup_count_vm_event(mm, idx);
224 }
225 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
226 void mem_cgroup_split_huge_fixup(struct page *head);
227 #endif
228 
229 #ifdef CONFIG_DEBUG_VM
230 bool mem_cgroup_bad_page_check(struct page *page);
231 void mem_cgroup_print_bad_page(struct page *page);
232 #endif
233 #else /* CONFIG_MEMCG */
234 struct mem_cgroup;
235 
236 static inline int mem_cgroup_charge_anon(struct page *page,
237                                         struct mm_struct *mm, gfp_t gfp_mask)
238 {
239         return 0;
240 }
241 
242 static inline int mem_cgroup_charge_file(struct page *page,
243                                         struct mm_struct *mm, gfp_t gfp_mask)
244 {
245         return 0;
246 }
247 
248 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
249                 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
250 {
251         return 0;
252 }
253 
254 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
255                                           struct mem_cgroup *memcg)
256 {
257 }
258 
259 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
260 {
261 }
262 
263 static inline void mem_cgroup_uncharge_start(void)
264 {
265 }
266 
267 static inline void mem_cgroup_uncharge_end(void)
268 {
269 }
270 
271 static inline void mem_cgroup_uncharge_page(struct page *page)
272 {
273 }
274 
275 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
276 {
277 }
278 
279 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
280                                                     struct mem_cgroup *memcg)
281 {
282         return &zone->lruvec;
283 }
284 
285 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
286                                                     struct zone *zone)
287 {
288         return &zone->lruvec;
289 }
290 
291 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
292 {
293         return NULL;
294 }
295 
296 static inline bool mm_match_cgroup(struct mm_struct *mm,
297                 struct mem_cgroup *memcg)
298 {
299         return true;
300 }
301 
302 static inline bool task_in_mem_cgroup(struct task_struct *task,
303                                       const struct mem_cgroup *memcg)
304 {
305         return true;
306 }
307 
308 static inline struct cgroup_subsys_state
309                 *mem_cgroup_css(struct mem_cgroup *memcg)
310 {
311         return NULL;
312 }
313 
314 static inline void
315 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
316                              struct mem_cgroup **memcgp)
317 {
318 }
319 
320 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
321                 struct page *oldpage, struct page *newpage, bool migration_ok)
322 {
323 }
324 
325 static inline struct mem_cgroup *
326 mem_cgroup_iter(struct mem_cgroup *root,
327                 struct mem_cgroup *prev,
328                 struct mem_cgroup_reclaim_cookie *reclaim)
329 {
330         return NULL;
331 }
332 
333 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
334                                          struct mem_cgroup *prev)
335 {
336 }
337 
338 static inline bool mem_cgroup_disabled(void)
339 {
340         return true;
341 }
342 
343 static inline int
344 mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
345 {
346         return 1;
347 }
348 
349 static inline unsigned long
350 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
351 {
352         return 0;
353 }
354 
355 static inline void
356 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
357                               int increment)
358 {
359 }
360 
361 static inline void
362 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
363 {
364 }
365 
366 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
367                                         bool *locked, unsigned long *flags)
368 {
369 }
370 
371 static inline void mem_cgroup_end_update_page_stat(struct page *page,
372                                         bool *locked, unsigned long *flags)
373 {
374 }
375 
376 static inline void mem_cgroup_oom_enable(void)
377 {
378 }
379 
380 static inline void mem_cgroup_oom_disable(void)
381 {
382 }
383 
384 static inline bool task_in_memcg_oom(struct task_struct *p)
385 {
386         return false;
387 }
388 
389 static inline bool mem_cgroup_oom_synchronize(bool wait)
390 {
391         return false;
392 }
393 
394 static inline void mem_cgroup_inc_page_stat(struct page *page,
395                                             enum mem_cgroup_stat_index idx)
396 {
397 }
398 
399 static inline void mem_cgroup_dec_page_stat(struct page *page,
400                                             enum mem_cgroup_stat_index idx)
401 {
402 }
403 
404 static inline
405 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
406                                             gfp_t gfp_mask,
407                                             unsigned long *total_scanned)
408 {
409         return 0;
410 }
411 
412 static inline void mem_cgroup_split_huge_fixup(struct page *head)
413 {
414 }
415 
416 static inline
417 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
418 {
419 }
420 static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
421                                 struct page *newpage)
422 {
423 }
424 #endif /* CONFIG_MEMCG */
425 
426 #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
427 static inline bool
428 mem_cgroup_bad_page_check(struct page *page)
429 {
430         return false;
431 }
432 
433 static inline void
434 mem_cgroup_print_bad_page(struct page *page)
435 {
436 }
437 #endif
438 
439 enum {
440         UNDER_LIMIT,
441         SOFT_LIMIT,
442         OVER_LIMIT,
443 };
444 
445 struct sock;
446 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
447 void sock_update_memcg(struct sock *sk);
448 void sock_release_memcg(struct sock *sk);
449 #else
450 static inline void sock_update_memcg(struct sock *sk)
451 {
452 }
453 static inline void sock_release_memcg(struct sock *sk)
454 {
455 }
456 #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
457 
458 #ifdef CONFIG_MEMCG_KMEM
459 extern struct static_key memcg_kmem_enabled_key;
460 
461 extern int memcg_limited_groups_array_size;
462 
463 /*
464  * Helper macro to loop through all memcg-specific caches. Callers must still
465  * check if the cache is valid (it is either valid or NULL).
466  * the slab_mutex must be held when looping through those caches
467  */
468 #define for_each_memcg_cache_index(_idx)        \
469         for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
470 
471 static inline bool memcg_kmem_enabled(void)
472 {
473         return static_key_false(&memcg_kmem_enabled_key);
474 }
475 
476 /*
477  * In general, we'll do everything in our power to not incur in any overhead
478  * for non-memcg users for the kmem functions. Not even a function call, if we
479  * can avoid it.
480  *
481  * Therefore, we'll inline all those functions so that in the best case, we'll
482  * see that kmemcg is off for everybody and proceed quickly.  If it is on,
483  * we'll still do most of the flag checking inline. We check a lot of
484  * conditions, but because they are pretty simple, they are expected to be
485  * fast.
486  */
487 bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
488                                         int order);
489 void __memcg_kmem_commit_charge(struct page *page,
490                                        struct mem_cgroup *memcg, int order);
491 void __memcg_kmem_uncharge_pages(struct page *page, int order);
492 
493 int memcg_cache_id(struct mem_cgroup *memcg);
494 
495 int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
496                              struct kmem_cache *root_cache);
497 void memcg_free_cache_params(struct kmem_cache *s);
498 
499 int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
500 void memcg_update_array_size(int num_groups);
501 
502 struct kmem_cache *
503 __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
504 
505 int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
506 void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
507 
508 int __memcg_cleanup_cache_params(struct kmem_cache *s);
509 
510 /**
511  * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
512  * @gfp: the gfp allocation flags.
513  * @memcg: a pointer to the memcg this was charged against.
514  * @order: allocation order.
515  *
516  * returns true if the memcg where the current task belongs can hold this
517  * allocation.
518  *
519  * We return true automatically if this allocation is not to be accounted to
520  * any memcg.
521  */
522 static inline bool
523 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
524 {
525         if (!memcg_kmem_enabled())
526                 return true;
527 
528         /*
529          * __GFP_NOFAIL allocations will move on even if charging is not
530          * possible. Therefore we don't even try, and have this allocation
531          * unaccounted. We could in theory charge it with
532          * res_counter_charge_nofail, but we hope those allocations are rare,
533          * and won't be worth the trouble.
534          */
535         if (gfp & __GFP_NOFAIL)
536                 return true;
537         if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
538                 return true;
539 
540         /* If the test is dying, just let it go. */
541         if (unlikely(fatal_signal_pending(current)))
542                 return true;
543 
544         return __memcg_kmem_newpage_charge(gfp, memcg, order);
545 }
546 
547 /**
548  * memcg_kmem_uncharge_pages: uncharge pages from memcg
549  * @page: pointer to struct page being freed
550  * @order: allocation order.
551  *
552  * there is no need to specify memcg here, since it is embedded in page_cgroup
553  */
554 static inline void
555 memcg_kmem_uncharge_pages(struct page *page, int order)
556 {
557         if (memcg_kmem_enabled())
558                 __memcg_kmem_uncharge_pages(page, order);
559 }
560 
561 /**
562  * memcg_kmem_commit_charge: embeds correct memcg in a page
563  * @page: pointer to struct page recently allocated
564  * @memcg: the memcg structure we charged against
565  * @order: allocation order.
566  *
567  * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
568  * failure of the allocation. if @page is NULL, this function will revert the
569  * charges. Otherwise, it will commit the memcg given by @memcg to the
570  * corresponding page_cgroup.
571  */
572 static inline void
573 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
574 {
575         if (memcg_kmem_enabled() && memcg)
576                 __memcg_kmem_commit_charge(page, memcg, order);
577 }
578 
579 /**
580  * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
581  * @cachep: the original global kmem cache
582  * @gfp: allocation flags.
583  *
584  * All memory allocated from a per-memcg cache is charged to the owner memcg.
585  */
586 static __always_inline struct kmem_cache *
587 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
588 {
589         if (!memcg_kmem_enabled())
590                 return cachep;
591         if (gfp & __GFP_NOFAIL)
592                 return cachep;
593         if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
594                 return cachep;
595         if (unlikely(fatal_signal_pending(current)))
596                 return cachep;
597 
598         return __memcg_kmem_get_cache(cachep, gfp);
599 }
600 #else
601 #define for_each_memcg_cache_index(_idx)        \
602         for (; NULL; )
603 
604 static inline bool memcg_kmem_enabled(void)
605 {
606         return false;
607 }
608 
609 static inline bool
610 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
611 {
612         return true;
613 }
614 
615 static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
616 {
617 }
618 
619 static inline void
620 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
621 {
622 }
623 
624 static inline int memcg_cache_id(struct mem_cgroup *memcg)
625 {
626         return -1;
627 }
628 
629 static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
630                 struct kmem_cache *s, struct kmem_cache *root_cache)
631 {
632         return 0;
633 }
634 
635 static inline void memcg_free_cache_params(struct kmem_cache *s)
636 {
637 }
638 
639 static inline struct kmem_cache *
640 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
641 {
642         return cachep;
643 }
644 #endif /* CONFIG_MEMCG_KMEM */
645 #endif /* _LINUX_MEMCONTROL_H */
646 
647 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp