~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/vmstat.h

Version: ~ [ linux-5.3 ] ~ [ linux-5.2.15 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.73 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.144 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.193 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.193 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.73 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_VMSTAT_H
  2 #define _LINUX_VMSTAT_H
  3 
  4 #include <linux/types.h>
  5 #include <linux/percpu.h>
  6 #include <linux/mm.h>
  7 #include <linux/mmzone.h>
  8 #include <linux/vm_event_item.h>
  9 #include <linux/atomic.h>
 10 
 11 extern int sysctl_stat_interval;
 12 
 13 #ifdef CONFIG_VM_EVENT_COUNTERS
 14 /*
 15  * Light weight per cpu counter implementation.
 16  *
 17  * Counters should only be incremented and no critical kernel component
 18  * should rely on the counter values.
 19  *
 20  * Counters are handled completely inline. On many platforms the code
 21  * generated will simply be the increment of a global address.
 22  */
 23 
 24 struct vm_event_state {
 25         unsigned long event[NR_VM_EVENT_ITEMS];
 26 };
 27 
 28 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
 29 
 30 /*
 31  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
 32  * local_irq_disable overhead.
 33  */
 34 static inline void __count_vm_event(enum vm_event_item item)
 35 {
 36         raw_cpu_inc(vm_event_states.event[item]);
 37 }
 38 
 39 static inline void count_vm_event(enum vm_event_item item)
 40 {
 41         this_cpu_inc(vm_event_states.event[item]);
 42 }
 43 
 44 static inline void __count_vm_events(enum vm_event_item item, long delta)
 45 {
 46         raw_cpu_add(vm_event_states.event[item], delta);
 47 }
 48 
 49 static inline void count_vm_events(enum vm_event_item item, long delta)
 50 {
 51         this_cpu_add(vm_event_states.event[item], delta);
 52 }
 53 
 54 extern void all_vm_events(unsigned long *);
 55 
 56 extern void vm_events_fold_cpu(int cpu);
 57 
 58 #else
 59 
 60 /* Disable counters */
 61 static inline void count_vm_event(enum vm_event_item item)
 62 {
 63 }
 64 static inline void count_vm_events(enum vm_event_item item, long delta)
 65 {
 66 }
 67 static inline void __count_vm_event(enum vm_event_item item)
 68 {
 69 }
 70 static inline void __count_vm_events(enum vm_event_item item, long delta)
 71 {
 72 }
 73 static inline void all_vm_events(unsigned long *ret)
 74 {
 75 }
 76 static inline void vm_events_fold_cpu(int cpu)
 77 {
 78 }
 79 
 80 #endif /* CONFIG_VM_EVENT_COUNTERS */
 81 
 82 #ifdef CONFIG_NUMA_BALANCING
 83 #define count_vm_numa_event(x)     count_vm_event(x)
 84 #define count_vm_numa_events(x, y) count_vm_events(x, y)
 85 #else
 86 #define count_vm_numa_event(x) do {} while (0)
 87 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
 88 #endif /* CONFIG_NUMA_BALANCING */
 89 
 90 #ifdef CONFIG_DEBUG_TLBFLUSH
 91 #define count_vm_tlb_event(x)      count_vm_event(x)
 92 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
 93 #else
 94 #define count_vm_tlb_event(x)     do {} while (0)
 95 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
 96 #endif
 97 
 98 #ifdef CONFIG_DEBUG_VM_VMACACHE
 99 #define count_vm_vmacache_event(x) count_vm_event(x)
100 #else
101 #define count_vm_vmacache_event(x) do {} while (0)
102 #endif
103 
104 #define __count_zone_vm_events(item, zone, delta) \
105                 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
106                 zone_idx(zone), delta)
107 
108 /*
109  * Zone based page accounting with per cpu differentials.
110  */
111 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
112 
113 static inline void zone_page_state_add(long x, struct zone *zone,
114                                  enum zone_stat_item item)
115 {
116         atomic_long_add(x, &zone->vm_stat[item]);
117         atomic_long_add(x, &vm_stat[item]);
118 }
119 
120 static inline unsigned long global_page_state(enum zone_stat_item item)
121 {
122         long x = atomic_long_read(&vm_stat[item]);
123 #ifdef CONFIG_SMP
124         if (x < 0)
125                 x = 0;
126 #endif
127         return x;
128 }
129 
130 static inline unsigned long zone_page_state(struct zone *zone,
131                                         enum zone_stat_item item)
132 {
133         long x = atomic_long_read(&zone->vm_stat[item]);
134 #ifdef CONFIG_SMP
135         if (x < 0)
136                 x = 0;
137 #endif
138         return x;
139 }
140 
141 /*
142  * More accurate version that also considers the currently pending
143  * deltas. For that we need to loop over all cpus to find the current
144  * deltas. There is no synchronization so the result cannot be
145  * exactly accurate either.
146  */
147 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
148                                         enum zone_stat_item item)
149 {
150         long x = atomic_long_read(&zone->vm_stat[item]);
151 
152 #ifdef CONFIG_SMP
153         int cpu;
154         for_each_online_cpu(cpu)
155                 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
156 
157         if (x < 0)
158                 x = 0;
159 #endif
160         return x;
161 }
162 
163 #ifdef CONFIG_NUMA
164 /*
165  * Determine the per node value of a stat item. This function
166  * is called frequently in a NUMA machine, so try to be as
167  * frugal as possible.
168  */
169 static inline unsigned long node_page_state(int node,
170                                  enum zone_stat_item item)
171 {
172         struct zone *zones = NODE_DATA(node)->node_zones;
173 
174         return
175 #ifdef CONFIG_ZONE_DMA
176                 zone_page_state(&zones[ZONE_DMA], item) +
177 #endif
178 #ifdef CONFIG_ZONE_DMA32
179                 zone_page_state(&zones[ZONE_DMA32], item) +
180 #endif
181 #ifdef CONFIG_HIGHMEM
182                 zone_page_state(&zones[ZONE_HIGHMEM], item) +
183 #endif
184                 zone_page_state(&zones[ZONE_NORMAL], item) +
185                 zone_page_state(&zones[ZONE_MOVABLE], item);
186 }
187 
188 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
189 
190 #else
191 
192 #define node_page_state(node, item) global_page_state(item)
193 #define zone_statistics(_zl, _z, gfp) do { } while (0)
194 
195 #endif /* CONFIG_NUMA */
196 
197 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
198 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
199 
200 #ifdef CONFIG_SMP
201 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
202 void __inc_zone_page_state(struct page *, enum zone_stat_item);
203 void __dec_zone_page_state(struct page *, enum zone_stat_item);
204 
205 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
206 void inc_zone_page_state(struct page *, enum zone_stat_item);
207 void dec_zone_page_state(struct page *, enum zone_stat_item);
208 
209 extern void inc_zone_state(struct zone *, enum zone_stat_item);
210 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
211 extern void dec_zone_state(struct zone *, enum zone_stat_item);
212 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
213 
214 void cpu_vm_stats_fold(int cpu);
215 void refresh_zone_stat_thresholds(void);
216 
217 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
218 
219 int calculate_pressure_threshold(struct zone *zone);
220 int calculate_normal_threshold(struct zone *zone);
221 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
222                                 int (*calculate_pressure)(struct zone *));
223 #else /* CONFIG_SMP */
224 
225 /*
226  * We do not maintain differentials in a single processor configuration.
227  * The functions directly modify the zone and global counters.
228  */
229 static inline void __mod_zone_page_state(struct zone *zone,
230                         enum zone_stat_item item, int delta)
231 {
232         zone_page_state_add(delta, zone, item);
233 }
234 
235 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
236 {
237         atomic_long_inc(&zone->vm_stat[item]);
238         atomic_long_inc(&vm_stat[item]);
239 }
240 
241 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
242 {
243         atomic_long_dec(&zone->vm_stat[item]);
244         atomic_long_dec(&vm_stat[item]);
245 }
246 
247 static inline void __inc_zone_page_state(struct page *page,
248                         enum zone_stat_item item)
249 {
250         __inc_zone_state(page_zone(page), item);
251 }
252 
253 static inline void __dec_zone_page_state(struct page *page,
254                         enum zone_stat_item item)
255 {
256         __dec_zone_state(page_zone(page), item);
257 }
258 
259 /*
260  * We only use atomic operations to update counters. So there is no need to
261  * disable interrupts.
262  */
263 #define inc_zone_page_state __inc_zone_page_state
264 #define dec_zone_page_state __dec_zone_page_state
265 #define mod_zone_page_state __mod_zone_page_state
266 
267 #define inc_zone_state __inc_zone_state
268 #define dec_zone_state __dec_zone_state
269 
270 #define set_pgdat_percpu_threshold(pgdat, callback) { }
271 
272 static inline void refresh_cpu_vm_stats(int cpu) { }
273 static inline void refresh_zone_stat_thresholds(void) { }
274 static inline void cpu_vm_stats_fold(int cpu) { }
275 
276 static inline void drain_zonestat(struct zone *zone,
277                         struct per_cpu_pageset *pset) { }
278 #endif          /* CONFIG_SMP */
279 
280 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
281                                              int migratetype)
282 {
283         __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
284         if (is_migrate_cma(migratetype))
285                 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
286 }
287 
288 extern const char * const vmstat_text[];
289 
290 #endif /* _LINUX_VMSTAT_H */
291 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp