~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/memcontrol.h

Version: ~ [ linux-5.9-rc6 ] ~ [ linux-5.8.10 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.66 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.146 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.198 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.236 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.236 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* memcontrol.h - Memory Controller
  2  *
  3  * Copyright IBM Corporation, 2007
  4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5  *
  6  * Copyright 2007 OpenVZ SWsoft Inc
  7  * Author: Pavel Emelianov <xemul@openvz.org>
  8  *
  9  * This program is free software; you can redistribute it and/or modify
 10  * it under the terms of the GNU General Public License as published by
 11  * the Free Software Foundation; either version 2 of the License, or
 12  * (at your option) any later version.
 13  *
 14  * This program is distributed in the hope that it will be useful,
 15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17  * GNU General Public License for more details.
 18  */
 19 
 20 #ifndef _LINUX_MEMCONTROL_H
 21 #define _LINUX_MEMCONTROL_H
 22 #include <linux/cgroup.h>
 23 #include <linux/vm_event_item.h>
 24 #include <linux/hardirq.h>
 25 #include <linux/jump_label.h>
 26 
 27 struct mem_cgroup;
 28 struct page;
 29 struct mm_struct;
 30 struct kmem_cache;
 31 
 32 /*
 33  * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
 34  * These two lists should keep in accord with each other.
 35  */
 36 enum mem_cgroup_stat_index {
 37         /*
 38          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
 39          */
 40         MEM_CGROUP_STAT_CACHE,          /* # of pages charged as cache */
 41         MEM_CGROUP_STAT_RSS,            /* # of pages charged as anon rss */
 42         MEM_CGROUP_STAT_RSS_HUGE,       /* # of pages charged as anon huge */
 43         MEM_CGROUP_STAT_FILE_MAPPED,    /* # of pages charged as file rss */
 44         MEM_CGROUP_STAT_DIRTY,          /* # of dirty pages in page cache */
 45         MEM_CGROUP_STAT_WRITEBACK,      /* # of pages under writeback */
 46         MEM_CGROUP_STAT_SWAP,           /* # of pages, swapped out */
 47         MEM_CGROUP_STAT_NSTATS,
 48 };
 49 
 50 struct mem_cgroup_reclaim_cookie {
 51         struct zone *zone;
 52         int priority;
 53         unsigned int generation;
 54 };
 55 
 56 enum mem_cgroup_events_index {
 57         MEM_CGROUP_EVENTS_PGPGIN,       /* # of pages paged in */
 58         MEM_CGROUP_EVENTS_PGPGOUT,      /* # of pages paged out */
 59         MEM_CGROUP_EVENTS_PGFAULT,      /* # of page-faults */
 60         MEM_CGROUP_EVENTS_PGMAJFAULT,   /* # of major page-faults */
 61         MEM_CGROUP_EVENTS_NSTATS,
 62         /* default hierarchy events */
 63         MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
 64         MEMCG_HIGH,
 65         MEMCG_MAX,
 66         MEMCG_OOM,
 67         MEMCG_NR_EVENTS,
 68 };
 69 
 70 #ifdef CONFIG_MEMCG
 71 extern struct cgroup_subsys_state *mem_cgroup_root_css;
 72 
 73 void mem_cgroup_events(struct mem_cgroup *memcg,
 74                        enum mem_cgroup_events_index idx,
 75                        unsigned int nr);
 76 
 77 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
 78 
 79 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
 80                           gfp_t gfp_mask, struct mem_cgroup **memcgp);
 81 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
 82                               bool lrucare);
 83 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
 84 void mem_cgroup_uncharge(struct page *page);
 85 void mem_cgroup_uncharge_list(struct list_head *page_list);
 86 
 87 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
 88                         bool lrucare);
 89 
 90 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
 91 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
 92 
 93 bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
 94                               struct mem_cgroup *root);
 95 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
 96 
 97 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 98 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 99 
100 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
101 extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
102 
103 static inline bool mm_match_cgroup(struct mm_struct *mm,
104                                    struct mem_cgroup *memcg)
105 {
106         struct mem_cgroup *task_memcg;
107         bool match = false;
108 
109         rcu_read_lock();
110         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
111         if (task_memcg)
112                 match = mem_cgroup_is_descendant(task_memcg, memcg);
113         rcu_read_unlock();
114         return match;
115 }
116 
117 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
118 extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
119 
120 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
121                                    struct mem_cgroup *,
122                                    struct mem_cgroup_reclaim_cookie *);
123 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
124 
125 /*
126  * For memory reclaim.
127  */
128 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
129 bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
130 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
131 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
132 void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
133 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
134                                         struct task_struct *p);
135 
136 static inline void mem_cgroup_oom_enable(void)
137 {
138         WARN_ON(current->memcg_oom.may_oom);
139         current->memcg_oom.may_oom = 1;
140 }
141 
142 static inline void mem_cgroup_oom_disable(void)
143 {
144         WARN_ON(!current->memcg_oom.may_oom);
145         current->memcg_oom.may_oom = 0;
146 }
147 
148 static inline bool task_in_memcg_oom(struct task_struct *p)
149 {
150         return p->memcg_oom.memcg;
151 }
152 
153 bool mem_cgroup_oom_synchronize(bool wait);
154 
155 #ifdef CONFIG_MEMCG_SWAP
156 extern int do_swap_account;
157 #endif
158 
159 static inline bool mem_cgroup_disabled(void)
160 {
161         if (memory_cgrp_subsys.disabled)
162                 return true;
163         return false;
164 }
165 
166 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
167 void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
168                                  enum mem_cgroup_stat_index idx, int val);
169 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
170 
171 static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
172                                             enum mem_cgroup_stat_index idx)
173 {
174         mem_cgroup_update_page_stat(memcg, idx, 1);
175 }
176 
177 static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
178                                             enum mem_cgroup_stat_index idx)
179 {
180         mem_cgroup_update_page_stat(memcg, idx, -1);
181 }
182 
183 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
184                                                 gfp_t gfp_mask,
185                                                 unsigned long *total_scanned);
186 
187 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
188 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
189                                              enum vm_event_item idx)
190 {
191         if (mem_cgroup_disabled())
192                 return;
193         __mem_cgroup_count_vm_event(mm, idx);
194 }
195 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
196 void mem_cgroup_split_huge_fixup(struct page *head);
197 #endif
198 
199 #else /* CONFIG_MEMCG */
200 struct mem_cgroup;
201 
202 #define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
203 
204 static inline void mem_cgroup_events(struct mem_cgroup *memcg,
205                                      enum mem_cgroup_events_index idx,
206                                      unsigned int nr)
207 {
208 }
209 
210 static inline bool mem_cgroup_low(struct mem_cgroup *root,
211                                   struct mem_cgroup *memcg)
212 {
213         return false;
214 }
215 
216 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
217                                         gfp_t gfp_mask,
218                                         struct mem_cgroup **memcgp)
219 {
220         *memcgp = NULL;
221         return 0;
222 }
223 
224 static inline void mem_cgroup_commit_charge(struct page *page,
225                                             struct mem_cgroup *memcg,
226                                             bool lrucare)
227 {
228 }
229 
230 static inline void mem_cgroup_cancel_charge(struct page *page,
231                                             struct mem_cgroup *memcg)
232 {
233 }
234 
235 static inline void mem_cgroup_uncharge(struct page *page)
236 {
237 }
238 
239 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
240 {
241 }
242 
243 static inline void mem_cgroup_migrate(struct page *oldpage,
244                                       struct page *newpage,
245                                       bool lrucare)
246 {
247 }
248 
249 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
250                                                     struct mem_cgroup *memcg)
251 {
252         return &zone->lruvec;
253 }
254 
255 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
256                                                     struct zone *zone)
257 {
258         return &zone->lruvec;
259 }
260 
261 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
262 {
263         return NULL;
264 }
265 
266 static inline bool mm_match_cgroup(struct mm_struct *mm,
267                 struct mem_cgroup *memcg)
268 {
269         return true;
270 }
271 
272 static inline bool task_in_mem_cgroup(struct task_struct *task,
273                                       const struct mem_cgroup *memcg)
274 {
275         return true;
276 }
277 
278 static inline struct cgroup_subsys_state
279                 *mem_cgroup_css(struct mem_cgroup *memcg)
280 {
281         return NULL;
282 }
283 
284 static inline struct mem_cgroup *
285 mem_cgroup_iter(struct mem_cgroup *root,
286                 struct mem_cgroup *prev,
287                 struct mem_cgroup_reclaim_cookie *reclaim)
288 {
289         return NULL;
290 }
291 
292 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
293                                          struct mem_cgroup *prev)
294 {
295 }
296 
297 static inline bool mem_cgroup_disabled(void)
298 {
299         return true;
300 }
301 
302 static inline int
303 mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
304 {
305         return 1;
306 }
307 
308 static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
309 {
310         return true;
311 }
312 
313 static inline unsigned long
314 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
315 {
316         return 0;
317 }
318 
319 static inline void
320 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
321                               int increment)
322 {
323 }
324 
325 static inline void
326 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
327 {
328 }
329 
330 static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
331 {
332         return NULL;
333 }
334 
335 static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
336 {
337 }
338 
339 static inline void mem_cgroup_oom_enable(void)
340 {
341 }
342 
343 static inline void mem_cgroup_oom_disable(void)
344 {
345 }
346 
347 static inline bool task_in_memcg_oom(struct task_struct *p)
348 {
349         return false;
350 }
351 
352 static inline bool mem_cgroup_oom_synchronize(bool wait)
353 {
354         return false;
355 }
356 
357 static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
358                                             enum mem_cgroup_stat_index idx)
359 {
360 }
361 
362 static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
363                                             enum mem_cgroup_stat_index idx)
364 {
365 }
366 
367 static inline
368 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
369                                             gfp_t gfp_mask,
370                                             unsigned long *total_scanned)
371 {
372         return 0;
373 }
374 
375 static inline void mem_cgroup_split_huge_fixup(struct page *head)
376 {
377 }
378 
379 static inline
380 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
381 {
382 }
383 #endif /* CONFIG_MEMCG */
384 
385 enum {
386         UNDER_LIMIT,
387         SOFT_LIMIT,
388         OVER_LIMIT,
389 };
390 
391 #ifdef CONFIG_CGROUP_WRITEBACK
392 
393 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
394 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
395 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
396                          unsigned long *pdirty, unsigned long *pwriteback);
397 
398 #else   /* CONFIG_CGROUP_WRITEBACK */
399 
400 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
401 {
402         return NULL;
403 }
404 
405 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
406                                        unsigned long *pavail,
407                                        unsigned long *pdirty,
408                                        unsigned long *pwriteback)
409 {
410 }
411 
412 #endif  /* CONFIG_CGROUP_WRITEBACK */
413 
414 struct sock;
415 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
416 void sock_update_memcg(struct sock *sk);
417 void sock_release_memcg(struct sock *sk);
418 #else
419 static inline void sock_update_memcg(struct sock *sk)
420 {
421 }
422 static inline void sock_release_memcg(struct sock *sk)
423 {
424 }
425 #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
426 
427 #ifdef CONFIG_MEMCG_KMEM
428 extern struct static_key memcg_kmem_enabled_key;
429 
430 extern int memcg_nr_cache_ids;
431 extern void memcg_get_cache_ids(void);
432 extern void memcg_put_cache_ids(void);
433 
434 /*
435  * Helper macro to loop through all memcg-specific caches. Callers must still
436  * check if the cache is valid (it is either valid or NULL).
437  * the slab_mutex must be held when looping through those caches
438  */
439 #define for_each_memcg_cache_index(_idx)        \
440         for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
441 
442 static inline bool memcg_kmem_enabled(void)
443 {
444         return static_key_false(&memcg_kmem_enabled_key);
445 }
446 
447 bool memcg_kmem_is_active(struct mem_cgroup *memcg);
448 
449 /*
450  * In general, we'll do everything in our power to not incur in any overhead
451  * for non-memcg users for the kmem functions. Not even a function call, if we
452  * can avoid it.
453  *
454  * Therefore, we'll inline all those functions so that in the best case, we'll
455  * see that kmemcg is off for everybody and proceed quickly.  If it is on,
456  * we'll still do most of the flag checking inline. We check a lot of
457  * conditions, but because they are pretty simple, they are expected to be
458  * fast.
459  */
460 bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
461                                         int order);
462 void __memcg_kmem_commit_charge(struct page *page,
463                                        struct mem_cgroup *memcg, int order);
464 void __memcg_kmem_uncharge_pages(struct page *page, int order);
465 
466 int memcg_cache_id(struct mem_cgroup *memcg);
467 
468 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
469 void __memcg_kmem_put_cache(struct kmem_cache *cachep);
470 
471 struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
472 
473 int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
474                       unsigned long nr_pages);
475 void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
476 
477 /**
478  * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
479  * @gfp: the gfp allocation flags.
480  * @memcg: a pointer to the memcg this was charged against.
481  * @order: allocation order.
482  *
483  * returns true if the memcg where the current task belongs can hold this
484  * allocation.
485  *
486  * We return true automatically if this allocation is not to be accounted to
487  * any memcg.
488  */
489 static inline bool
490 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
491 {
492         if (!memcg_kmem_enabled())
493                 return true;
494 
495         if (gfp & __GFP_NOACCOUNT)
496                 return true;
497         /*
498          * __GFP_NOFAIL allocations will move on even if charging is not
499          * possible. Therefore we don't even try, and have this allocation
500          * unaccounted. We could in theory charge it forcibly, but we hope
501          * those allocations are rare, and won't be worth the trouble.
502          */
503         if (gfp & __GFP_NOFAIL)
504                 return true;
505         if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
506                 return true;
507 
508         /* If the test is dying, just let it go. */
509         if (unlikely(fatal_signal_pending(current)))
510                 return true;
511 
512         return __memcg_kmem_newpage_charge(gfp, memcg, order);
513 }
514 
515 /**
516  * memcg_kmem_uncharge_pages: uncharge pages from memcg
517  * @page: pointer to struct page being freed
518  * @order: allocation order.
519  */
520 static inline void
521 memcg_kmem_uncharge_pages(struct page *page, int order)
522 {
523         if (memcg_kmem_enabled())
524                 __memcg_kmem_uncharge_pages(page, order);
525 }
526 
527 /**
528  * memcg_kmem_commit_charge: embeds correct memcg in a page
529  * @page: pointer to struct page recently allocated
530  * @memcg: the memcg structure we charged against
531  * @order: allocation order.
532  *
533  * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
534  * failure of the allocation. if @page is NULL, this function will revert the
535  * charges. Otherwise, it will commit @page to @memcg.
536  */
537 static inline void
538 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
539 {
540         if (memcg_kmem_enabled() && memcg)
541                 __memcg_kmem_commit_charge(page, memcg, order);
542 }
543 
544 /**
545  * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
546  * @cachep: the original global kmem cache
547  * @gfp: allocation flags.
548  *
549  * All memory allocated from a per-memcg cache is charged to the owner memcg.
550  */
551 static __always_inline struct kmem_cache *
552 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
553 {
554         if (!memcg_kmem_enabled())
555                 return cachep;
556         if (gfp & __GFP_NOACCOUNT)
557                 return cachep;
558         if (gfp & __GFP_NOFAIL)
559                 return cachep;
560         if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
561                 return cachep;
562         if (unlikely(fatal_signal_pending(current)))
563                 return cachep;
564 
565         return __memcg_kmem_get_cache(cachep);
566 }
567 
568 static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
569 {
570         if (memcg_kmem_enabled())
571                 __memcg_kmem_put_cache(cachep);
572 }
573 
574 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
575 {
576         if (!memcg_kmem_enabled())
577                 return NULL;
578         return __mem_cgroup_from_kmem(ptr);
579 }
580 #else
581 #define for_each_memcg_cache_index(_idx)        \
582         for (; NULL; )
583 
584 static inline bool memcg_kmem_enabled(void)
585 {
586         return false;
587 }
588 
589 static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
590 {
591         return false;
592 }
593 
594 static inline bool
595 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
596 {
597         return true;
598 }
599 
600 static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
601 {
602 }
603 
604 static inline void
605 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
606 {
607 }
608 
609 static inline int memcg_cache_id(struct mem_cgroup *memcg)
610 {
611         return -1;
612 }
613 
614 static inline void memcg_get_cache_ids(void)
615 {
616 }
617 
618 static inline void memcg_put_cache_ids(void)
619 {
620 }
621 
622 static inline struct kmem_cache *
623 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
624 {
625         return cachep;
626 }
627 
628 static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
629 {
630 }
631 
632 static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
633 {
634         return NULL;
635 }
636 #endif /* CONFIG_MEMCG_KMEM */
637 #endif /* _LINUX_MEMCONTROL_H */
638 
639 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp