~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/memblock.h

Version: ~ [ linux-5.2 ] ~ [ linux-5.1.16 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.57 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.132 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.184 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.184 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.69 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_MEMBLOCK_H
  2 #define _LINUX_MEMBLOCK_H
  3 #ifdef __KERNEL__
  4 
  5 #ifdef CONFIG_HAVE_MEMBLOCK
  6 /*
  7  * Logical memory blocks.
  8  *
  9  * Copyright (C) 2001 Peter Bergner, IBM Corp.
 10  *
 11  * This program is free software; you can redistribute it and/or
 12  * modify it under the terms of the GNU General Public License
 13  * as published by the Free Software Foundation; either version
 14  * 2 of the License, or (at your option) any later version.
 15  */
 16 
 17 #include <linux/init.h>
 18 #include <linux/mm.h>
 19 
 20 #define INIT_MEMBLOCK_REGIONS   128
 21 #define INIT_PHYSMEM_REGIONS    4
 22 
 23 /* Definition of memblock flags. */
 24 enum {
 25         MEMBLOCK_NONE           = 0x0,  /* No special request */
 26         MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
 27         MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
 28         MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
 29 };
 30 
 31 struct memblock_region {
 32         phys_addr_t base;
 33         phys_addr_t size;
 34         unsigned long flags;
 35 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 36         int nid;
 37 #endif
 38 };
 39 
 40 struct memblock_type {
 41         unsigned long cnt;      /* number of regions */
 42         unsigned long max;      /* size of the allocated array */
 43         phys_addr_t total_size; /* size of all regions */
 44         struct memblock_region *regions;
 45         char *name;
 46 };
 47 
 48 struct memblock {
 49         bool bottom_up;  /* is bottom up direction? */
 50         phys_addr_t current_limit;
 51         struct memblock_type memory;
 52         struct memblock_type reserved;
 53 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 54         struct memblock_type physmem;
 55 #endif
 56 };
 57 
 58 extern struct memblock memblock;
 59 extern int memblock_debug;
 60 
 61 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 62 #define __init_memblock __meminit
 63 #define __initdata_memblock __meminitdata
 64 void memblock_discard(void);
 65 #else
 66 #define __init_memblock
 67 #define __initdata_memblock
 68 #endif
 69 
 70 #define memblock_dbg(fmt, ...) \
 71         if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 72 
 73 phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
 74                                         phys_addr_t start, phys_addr_t end,
 75                                         int nid, ulong flags);
 76 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
 77                                    phys_addr_t size, phys_addr_t align);
 78 void memblock_allow_resize(void);
 79 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 80 int memblock_add(phys_addr_t base, phys_addr_t size);
 81 int memblock_remove(phys_addr_t base, phys_addr_t size);
 82 int memblock_free(phys_addr_t base, phys_addr_t size);
 83 int memblock_reserve(phys_addr_t base, phys_addr_t size);
 84 void memblock_trim_memory(phys_addr_t align);
 85 bool memblock_overlaps_region(struct memblock_type *type,
 86                               phys_addr_t base, phys_addr_t size);
 87 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 88 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 89 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
 90 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 91 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 92 ulong choose_memblock_flags(void);
 93 
 94 /* Low level functions */
 95 int memblock_add_range(struct memblock_type *type,
 96                        phys_addr_t base, phys_addr_t size,
 97                        int nid, unsigned long flags);
 98 
 99 void __next_mem_range(u64 *idx, int nid, ulong flags,
100                       struct memblock_type *type_a,
101                       struct memblock_type *type_b, phys_addr_t *out_start,
102                       phys_addr_t *out_end, int *out_nid);
103 
104 void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
105                           struct memblock_type *type_a,
106                           struct memblock_type *type_b, phys_addr_t *out_start,
107                           phys_addr_t *out_end, int *out_nid);
108 
109 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
110                                 phys_addr_t *out_end);
111 
112 void __memblock_free_early(phys_addr_t base, phys_addr_t size);
113 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
114 
115 /**
116  * for_each_mem_range - iterate through memblock areas from type_a and not
117  * included in type_b. Or just type_a if type_b is NULL.
118  * @i: u64 used as loop variable
119  * @type_a: ptr to memblock_type to iterate
120  * @type_b: ptr to memblock_type which excludes from the iteration
121  * @nid: node selector, %NUMA_NO_NODE for all nodes
122  * @flags: pick from blocks based on memory attributes
123  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
124  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
125  * @p_nid: ptr to int for nid of the range, can be %NULL
126  */
127 #define for_each_mem_range(i, type_a, type_b, nid, flags,               \
128                            p_start, p_end, p_nid)                       \
129         for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,    \
130                                      p_start, p_end, p_nid);            \
131              i != (u64)ULLONG_MAX;                                      \
132              __next_mem_range(&i, nid, flags, type_a, type_b,           \
133                               p_start, p_end, p_nid))
134 
135 /**
136  * for_each_mem_range_rev - reverse iterate through memblock areas from
137  * type_a and not included in type_b. Or just type_a if type_b is NULL.
138  * @i: u64 used as loop variable
139  * @type_a: ptr to memblock_type to iterate
140  * @type_b: ptr to memblock_type which excludes from the iteration
141  * @nid: node selector, %NUMA_NO_NODE for all nodes
142  * @flags: pick from blocks based on memory attributes
143  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
144  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
145  * @p_nid: ptr to int for nid of the range, can be %NULL
146  */
147 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags,           \
148                                p_start, p_end, p_nid)                   \
149         for (i = (u64)ULLONG_MAX,                                       \
150                      __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
151                                           p_start, p_end, p_nid);       \
152              i != (u64)ULLONG_MAX;                                      \
153              __next_mem_range_rev(&i, nid, flags, type_a, type_b,       \
154                                   p_start, p_end, p_nid))
155 
156 /**
157  * for_each_reserved_mem_region - iterate over all reserved memblock areas
158  * @i: u64 used as loop variable
159  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
160  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
161  *
162  * Walks over reserved areas of memblock. Available as soon as memblock
163  * is initialized.
164  */
165 #define for_each_reserved_mem_region(i, p_start, p_end)                 \
166         for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);   \
167              i != (u64)ULLONG_MAX;                                      \
168              __next_reserved_mem_region(&i, p_start, p_end))
169 
170 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
171 {
172         return m->flags & MEMBLOCK_HOTPLUG;
173 }
174 
175 static inline bool memblock_is_mirror(struct memblock_region *m)
176 {
177         return m->flags & MEMBLOCK_MIRROR;
178 }
179 
180 static inline bool memblock_is_nomap(struct memblock_region *m)
181 {
182         return m->flags & MEMBLOCK_NOMAP;
183 }
184 
185 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
186 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
187                             unsigned long  *end_pfn);
188 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
189                           unsigned long *out_end_pfn, int *out_nid);
190 
191 /**
192  * for_each_mem_pfn_range - early memory pfn range iterator
193  * @i: an integer used as loop variable
194  * @nid: node selector, %MAX_NUMNODES for all nodes
195  * @p_start: ptr to ulong for start pfn of the range, can be %NULL
196  * @p_end: ptr to ulong for end pfn of the range, can be %NULL
197  * @p_nid: ptr to int for nid of the range, can be %NULL
198  *
199  * Walks over configured memory ranges.
200  */
201 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
202         for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
203              i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
204 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
205 
206 /**
207  * for_each_free_mem_range - iterate through free memblock areas
208  * @i: u64 used as loop variable
209  * @nid: node selector, %NUMA_NO_NODE for all nodes
210  * @flags: pick from blocks based on memory attributes
211  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
212  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
213  * @p_nid: ptr to int for nid of the range, can be %NULL
214  *
215  * Walks over free (memory && !reserved) areas of memblock.  Available as
216  * soon as memblock is initialized.
217  */
218 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
219         for_each_mem_range(i, &memblock.memory, &memblock.reserved,     \
220                            nid, flags, p_start, p_end, p_nid)
221 
222 /**
223  * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
224  * @i: u64 used as loop variable
225  * @nid: node selector, %NUMA_NO_NODE for all nodes
226  * @flags: pick from blocks based on memory attributes
227  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
228  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
229  * @p_nid: ptr to int for nid of the range, can be %NULL
230  *
231  * Walks over free (memory && !reserved) areas of memblock in reverse
232  * order.  Available as soon as memblock is initialized.
233  */
234 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
235                                         p_nid)                          \
236         for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
237                                nid, flags, p_start, p_end, p_nid)
238 
239 /**
240  * for_each_resv_unavail_range - iterate through reserved and unavailable memory
241  * @i: u64 used as loop variable
242  * @flags: pick from blocks based on memory attributes
243  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
244  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
245  *
246  * Walks over unavailable but reserved (reserved && !memory) areas of memblock.
247  * Available as soon as memblock is initialized.
248  * Note: because this memory does not belong to any physical node, flags and
249  * nid arguments do not make sense and thus not exported as arguments.
250  */
251 #define for_each_resv_unavail_range(i, p_start, p_end)                  \
252         for_each_mem_range(i, &memblock.reserved, &memblock.memory,     \
253                            NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
254 
255 static inline void memblock_set_region_flags(struct memblock_region *r,
256                                              unsigned long flags)
257 {
258         r->flags |= flags;
259 }
260 
261 static inline void memblock_clear_region_flags(struct memblock_region *r,
262                                                unsigned long flags)
263 {
264         r->flags &= ~flags;
265 }
266 
267 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
268 int memblock_set_node(phys_addr_t base, phys_addr_t size,
269                       struct memblock_type *type, int nid);
270 
271 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
272 {
273         r->nid = nid;
274 }
275 
276 static inline int memblock_get_region_node(const struct memblock_region *r)
277 {
278         return r->nid;
279 }
280 #else
281 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
282 {
283 }
284 
285 static inline int memblock_get_region_node(const struct memblock_region *r)
286 {
287         return 0;
288 }
289 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
290 
291 phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
292 phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
293 
294 phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
295 
296 /*
297  * Set the allocation direction to bottom-up or top-down.
298  */
299 static inline void __init memblock_set_bottom_up(bool enable)
300 {
301         memblock.bottom_up = enable;
302 }
303 
304 /*
305  * Check if the allocation direction is bottom-up or not.
306  * if this is true, that said, memblock will allocate memory
307  * in bottom-up direction.
308  */
309 static inline bool memblock_bottom_up(void)
310 {
311         return memblock.bottom_up;
312 }
313 
314 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
315 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
316 #define MEMBLOCK_ALLOC_ACCESSIBLE       0
317 
318 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
319                                         phys_addr_t start, phys_addr_t end,
320                                         ulong flags);
321 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
322                                 phys_addr_t max_addr);
323 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
324                                   phys_addr_t max_addr);
325 phys_addr_t memblock_phys_mem_size(void);
326 phys_addr_t memblock_reserved_size(void);
327 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
328 phys_addr_t memblock_start_of_DRAM(void);
329 phys_addr_t memblock_end_of_DRAM(void);
330 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
331 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
332 void memblock_mem_limit_remove_map(phys_addr_t limit);
333 bool memblock_is_memory(phys_addr_t addr);
334 bool memblock_is_map_memory(phys_addr_t addr);
335 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
336 bool memblock_is_reserved(phys_addr_t addr);
337 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
338 
339 extern void __memblock_dump_all(void);
340 
341 static inline void memblock_dump_all(void)
342 {
343         if (memblock_debug)
344                 __memblock_dump_all();
345 }
346 
347 /**
348  * memblock_set_current_limit - Set the current allocation limit to allow
349  *                         limiting allocations to what is currently
350  *                         accessible during boot
351  * @limit: New limit value (physical address)
352  */
353 void memblock_set_current_limit(phys_addr_t limit);
354 
355 
356 phys_addr_t memblock_get_current_limit(void);
357 
358 /*
359  * pfn conversion functions
360  *
361  * While the memory MEMBLOCKs should always be page aligned, the reserved
362  * MEMBLOCKs may not be. This accessor attempt to provide a very clear
363  * idea of what they return for such non aligned MEMBLOCKs.
364  */
365 
366 /**
367  * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
368  * @reg: memblock_region structure
369  */
370 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
371 {
372         return PFN_UP(reg->base);
373 }
374 
375 /**
376  * memblock_region_memory_end_pfn - Return the end_pfn this region
377  * @reg: memblock_region structure
378  */
379 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
380 {
381         return PFN_DOWN(reg->base + reg->size);
382 }
383 
384 /**
385  * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
386  * @reg: memblock_region structure
387  */
388 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
389 {
390         return PFN_DOWN(reg->base);
391 }
392 
393 /**
394  * memblock_region_reserved_end_pfn - Return the end_pfn this region
395  * @reg: memblock_region structure
396  */
397 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
398 {
399         return PFN_UP(reg->base + reg->size);
400 }
401 
402 #define for_each_memblock(memblock_type, region)                                        \
403         for (region = memblock.memblock_type.regions;                                   \
404              region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);    \
405              region++)
406 
407 #define for_each_memblock_type(i, memblock_type, rgn)                   \
408         for (i = 0, rgn = &memblock_type->regions[0];                   \
409              i < memblock_type->cnt;                                    \
410              i++, rgn = &memblock_type->regions[i])
411 
412 #ifdef CONFIG_MEMTEST
413 extern void early_memtest(phys_addr_t start, phys_addr_t end);
414 #else
415 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
416 {
417 }
418 #endif
419 
420 extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
421                 phys_addr_t end_addr);
422 #else
423 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
424 {
425         return 0;
426 }
427 
428 static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
429                 phys_addr_t end_addr)
430 {
431         return 0;
432 }
433 
434 #endif /* CONFIG_HAVE_MEMBLOCK */
435 
436 #endif /* __KERNEL__ */
437 
438 #endif /* _LINUX_MEMBLOCK_H */
439 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp