1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
11 extern int sysctl_stat_interval;
13 #ifdef CONFIG_VM_EVENT_COUNTERS
15 * Light weight per cpu counter implementation.
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
24 struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
28 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32 * local_irq_disable overhead.
34 static inline void __count_vm_event(enum vm_event_item item)
37 raw_cpu_inc(vm_event_states.event[item]);
41 static inline void count_vm_event(enum vm_event_item item)
43 this_cpu_inc(vm_event_states.event[item]);
46 static inline void __count_vm_events(enum vm_event_item item, long delta)
49 raw_cpu_add(vm_event_states.event[item], delta);
53 static inline void count_vm_events(enum vm_event_item item, long delta)
55 this_cpu_add(vm_event_states.event[item], delta);
58 extern void all_vm_events(unsigned long *);
60 extern void vm_events_fold_cpu(int cpu);
64 /* Disable counters */
65 static inline void count_vm_event(enum vm_event_item item)
68 static inline void count_vm_events(enum vm_event_item item, long delta)
71 static inline void __count_vm_event(enum vm_event_item item)
74 static inline void __count_vm_events(enum vm_event_item item, long delta)
77 static inline void all_vm_events(unsigned long *ret)
80 static inline void vm_events_fold_cpu(int cpu)
84 #endif /* CONFIG_VM_EVENT_COUNTERS */
86 #ifdef CONFIG_NUMA_BALANCING
87 #define count_vm_numa_event(x) count_vm_event(x)
88 #define count_vm_numa_events(x, y) count_vm_events(x, y)
90 #define count_vm_numa_event(x) do {} while (0)
91 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
92 #endif /* CONFIG_NUMA_BALANCING */
94 #ifdef CONFIG_DEBUG_TLBFLUSH
95 #define count_vm_tlb_event(x) count_vm_event(x)
96 #define count_vm_tlb_events(x, y) count_vm_events(x, y)
98 #define count_vm_tlb_event(x) do {} while (0)
99 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
102 #ifdef CONFIG_DEBUG_VM_VMACACHE
103 #define count_vm_vmacache_event(x) count_vm_event(x)
105 #define count_vm_vmacache_event(x) do {} while (0)
108 #define __count_zone_vm_events(item, zone, delta) \
109 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
110 zone_idx(zone), delta)
113 * Zone based page accounting with per cpu differentials.
115 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
117 static inline void zone_page_state_add(long x, struct zone *zone,
118 enum zone_stat_item item)
120 atomic_long_add(x, &zone->vm_stat[item]);
121 atomic_long_add(x, &vm_stat[item]);
124 static inline unsigned long global_page_state(enum zone_stat_item item)
126 long x = atomic_long_read(&vm_stat[item]);
134 static inline unsigned long zone_page_state(struct zone *zone,
135 enum zone_stat_item item)
137 long x = atomic_long_read(&zone->vm_stat[item]);
146 * More accurate version that also considers the currently pending
147 * deltas. For that we need to loop over all cpus to find the current
148 * deltas. There is no synchronization so the result cannot be
149 * exactly accurate either.
151 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
152 enum zone_stat_item item)
154 long x = atomic_long_read(&zone->vm_stat[item]);
158 for_each_online_cpu(cpu)
159 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
169 extern unsigned long node_page_state(int node, enum zone_stat_item item);
170 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
174 #define node_page_state(node, item) global_page_state(item)
175 #define zone_statistics(_zl, _z, gfp) do { } while (0)
177 #endif /* CONFIG_NUMA */
179 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
180 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
183 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
184 void __inc_zone_page_state(struct page *, enum zone_stat_item);
185 void __dec_zone_page_state(struct page *, enum zone_stat_item);
187 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
188 void inc_zone_page_state(struct page *, enum zone_stat_item);
189 void dec_zone_page_state(struct page *, enum zone_stat_item);
191 extern void inc_zone_state(struct zone *, enum zone_stat_item);
192 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
193 extern void dec_zone_state(struct zone *, enum zone_stat_item);
194 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
196 void cpu_vm_stats_fold(int cpu);
197 void refresh_zone_stat_thresholds(void);
199 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
201 int calculate_pressure_threshold(struct zone *zone);
202 int calculate_normal_threshold(struct zone *zone);
203 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
204 int (*calculate_pressure)(struct zone *));
205 #else /* CONFIG_SMP */
208 * We do not maintain differentials in a single processor configuration.
209 * The functions directly modify the zone and global counters.
211 static inline void __mod_zone_page_state(struct zone *zone,
212 enum zone_stat_item item, long delta)
214 zone_page_state_add(delta, zone, item);
217 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
219 atomic_long_inc(&zone->vm_stat[item]);
220 atomic_long_inc(&vm_stat[item]);
223 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
225 atomic_long_dec(&zone->vm_stat[item]);
226 atomic_long_dec(&vm_stat[item]);
229 static inline void __inc_zone_page_state(struct page *page,
230 enum zone_stat_item item)
232 __inc_zone_state(page_zone(page), item);
235 static inline void __dec_zone_page_state(struct page *page,
236 enum zone_stat_item item)
238 __dec_zone_state(page_zone(page), item);
242 * We only use atomic operations to update counters. So there is no need to
243 * disable interrupts.
245 #define inc_zone_page_state __inc_zone_page_state
246 #define dec_zone_page_state __dec_zone_page_state
247 #define mod_zone_page_state __mod_zone_page_state
249 #define inc_zone_state __inc_zone_state
250 #define dec_zone_state __dec_zone_state
252 #define set_pgdat_percpu_threshold(pgdat, callback) { }
254 static inline void refresh_zone_stat_thresholds(void) { }
255 static inline void cpu_vm_stats_fold(int cpu) { }
257 static inline void drain_zonestat(struct zone *zone,
258 struct per_cpu_pageset *pset) { }
259 #endif /* CONFIG_SMP */
261 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
264 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
265 if (is_migrate_cma(migratetype))
266 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
269 extern const char * const vmstat_text[];
271 #endif /* _LINUX_VMSTAT_H */