~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/mmap.c

Version: ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/arch/arm/mm/mmap.c
  3  */
  4 #include <linux/fs.h>
  5 #include <linux/mm.h>
  6 #include <linux/mman.h>
  7 #include <linux/shm.h>
  8 #include <linux/sched.h>
  9 #include <linux/io.h>
 10 #include <linux/personality.h>
 11 #include <linux/random.h>
 12 #include <asm/cachetype.h>
 13 
 14 #define COLOUR_ALIGN(addr,pgoff)                \
 15         ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
 16          (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 17 
 18 /* gap between mmap and stack */
 19 #define MIN_GAP (128*1024*1024UL)
 20 #define MAX_GAP ((TASK_SIZE)/6*5)
 21 
 22 static int mmap_is_legacy(void)
 23 {
 24         if (current->personality & ADDR_COMPAT_LAYOUT)
 25                 return 1;
 26 
 27         if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
 28                 return 1;
 29 
 30         return sysctl_legacy_va_layout;
 31 }
 32 
 33 static unsigned long mmap_base(unsigned long rnd)
 34 {
 35         unsigned long gap = rlimit(RLIMIT_STACK);
 36 
 37         if (gap < MIN_GAP)
 38                 gap = MIN_GAP;
 39         else if (gap > MAX_GAP)
 40                 gap = MAX_GAP;
 41 
 42         return PAGE_ALIGN(TASK_SIZE - gap - rnd);
 43 }
 44 
 45 /*
 46  * We need to ensure that shared mappings are correctly aligned to
 47  * avoid aliasing issues with VIPT caches.  We need to ensure that
 48  * a specific page of an object is always mapped at a multiple of
 49  * SHMLBA bytes.
 50  *
 51  * We unconditionally provide this function for all cases, however
 52  * in the VIVT case, we optimise out the alignment rules.
 53  */
 54 unsigned long
 55 arch_get_unmapped_area(struct file *filp, unsigned long addr,
 56                 unsigned long len, unsigned long pgoff, unsigned long flags)
 57 {
 58         struct mm_struct *mm = current->mm;
 59         struct vm_area_struct *vma;
 60         int do_align = 0;
 61         int aliasing = cache_is_vipt_aliasing();
 62         struct vm_unmapped_area_info info;
 63 
 64         /*
 65          * We only need to do colour alignment if either the I or D
 66          * caches alias.
 67          */
 68         if (aliasing)
 69                 do_align = filp || (flags & MAP_SHARED);
 70 
 71         /*
 72          * We enforce the MAP_FIXED case.
 73          */
 74         if (flags & MAP_FIXED) {
 75                 if (aliasing && flags & MAP_SHARED &&
 76                     (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 77                         return -EINVAL;
 78                 return addr;
 79         }
 80 
 81         if (len > TASK_SIZE)
 82                 return -ENOMEM;
 83 
 84         if (addr) {
 85                 if (do_align)
 86                         addr = COLOUR_ALIGN(addr, pgoff);
 87                 else
 88                         addr = PAGE_ALIGN(addr);
 89 
 90                 vma = find_vma(mm, addr);
 91                 if (TASK_SIZE - len >= addr &&
 92                     (!vma || addr + len <= vm_start_gap(vma)))
 93                         return addr;
 94         }
 95 
 96         info.flags = 0;
 97         info.length = len;
 98         info.low_limit = mm->mmap_base;
 99         info.high_limit = TASK_SIZE;
100         info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
101         info.align_offset = pgoff << PAGE_SHIFT;
102         return vm_unmapped_area(&info);
103 }
104 
105 unsigned long
106 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
107                         const unsigned long len, const unsigned long pgoff,
108                         const unsigned long flags)
109 {
110         struct vm_area_struct *vma;
111         struct mm_struct *mm = current->mm;
112         unsigned long addr = addr0;
113         int do_align = 0;
114         int aliasing = cache_is_vipt_aliasing();
115         struct vm_unmapped_area_info info;
116 
117         /*
118          * We only need to do colour alignment if either the I or D
119          * caches alias.
120          */
121         if (aliasing)
122                 do_align = filp || (flags & MAP_SHARED);
123 
124         /* requested length too big for entire address space */
125         if (len > TASK_SIZE)
126                 return -ENOMEM;
127 
128         if (flags & MAP_FIXED) {
129                 if (aliasing && flags & MAP_SHARED &&
130                     (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
131                         return -EINVAL;
132                 return addr;
133         }
134 
135         /* requesting a specific address */
136         if (addr) {
137                 if (do_align)
138                         addr = COLOUR_ALIGN(addr, pgoff);
139                 else
140                         addr = PAGE_ALIGN(addr);
141                 vma = find_vma(mm, addr);
142                 if (TASK_SIZE - len >= addr &&
143                                 (!vma || addr + len <= vm_start_gap(vma)))
144                         return addr;
145         }
146 
147         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148         info.length = len;
149         info.low_limit = FIRST_USER_ADDRESS;
150         info.high_limit = mm->mmap_base;
151         info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152         info.align_offset = pgoff << PAGE_SHIFT;
153         addr = vm_unmapped_area(&info);
154 
155         /*
156          * A failed mmap() very likely causes application failure,
157          * so fall back to the bottom-up function here. This scenario
158          * can happen with large stack limits and large mmap()
159          * allocations.
160          */
161         if (addr & ~PAGE_MASK) {
162                 VM_BUG_ON(addr != -ENOMEM);
163                 info.flags = 0;
164                 info.low_limit = mm->mmap_base;
165                 info.high_limit = TASK_SIZE;
166                 addr = vm_unmapped_area(&info);
167         }
168 
169         return addr;
170 }
171 
172 void arch_pick_mmap_layout(struct mm_struct *mm)
173 {
174         unsigned long random_factor = 0UL;
175 
176         /* 8 bits of randomness in 20 address space bits */
177         if ((current->flags & PF_RANDOMIZE) &&
178             !(current->personality & ADDR_NO_RANDOMIZE))
179                 random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
180 
181         if (mmap_is_legacy()) {
182                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
183                 mm->get_unmapped_area = arch_get_unmapped_area;
184         } else {
185                 mm->mmap_base = mmap_base(random_factor);
186                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
187         }
188 }
189 
190 /*
191  * You really shouldn't be using read() or write() on /dev/mem.  This
192  * might go away in the future.
193  */
194 int valid_phys_addr_range(phys_addr_t addr, size_t size)
195 {
196         if (addr < PHYS_OFFSET)
197                 return 0;
198         if (addr + size > __pa(high_memory - 1) + 1)
199                 return 0;
200 
201         return 1;
202 }
203 
204 /*
205  * Do not allow /dev/mem mappings beyond the supported physical range.
206  */
207 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
208 {
209         return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
210 }
211 
212 #ifdef CONFIG_STRICT_DEVMEM
213 
214 #include <linux/ioport.h>
215 
216 /*
217  * devmem_is_allowed() checks to see if /dev/mem access to a certain
218  * address is valid. The argument is a physical page number.
219  * We mimic x86 here by disallowing access to system RAM as well as
220  * device-exclusive MMIO regions. This effectively disable read()/write()
221  * on /dev/mem.
222  */
223 int devmem_is_allowed(unsigned long pfn)
224 {
225         if (iomem_is_exclusive(pfn << PAGE_SHIFT))
226                 return 0;
227         if (!page_is_ram(pfn))
228                 return 1;
229         return 0;
230 }
231 
232 #endif
233 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp