1 /* 2 * address space "slices" (meta-segments) support 3 * 4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation. 5 * 6 * Based on hugetlb implementation 7 * 8 * Copyright (C) 2003 David Gibson, IBM Corporation. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 #undef DEBUG 26 27 #include <linux/kernel.h> 28 #include <linux/mm.h> 29 #include <linux/pagemap.h> 30 #include <linux/err.h> 31 #include <linux/spinlock.h> 32 #include <linux/export.h> 33 #include <linux/hugetlb.h> 34 #include <asm/mman.h> 35 #include <asm/mmu.h> 36 #include <asm/copro.h> 37 #include <asm/hugetlb.h> 38 39 static DEFINE_SPINLOCK(slice_convert_lock); 40 /* 41 * One bit per slice. We have lower slices which cover 256MB segments 42 * upto 4G range. That gets us 16 low slices. For the rest we track slices 43 * in 1TB size. 44 */ 45 struct slice_mask { 46 u64 low_slices; 47 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH); 48 }; 49 50 #ifdef DEBUG 51 int _slice_debug = 1; 52 53 static void slice_print_mask(const char *label, struct slice_mask mask) 54 { 55 if (!_slice_debug) 56 return; 57 pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices); 58 pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices); 59 } 60 61 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0) 62 63 #else 64 65 static void slice_print_mask(const char *label, struct slice_mask mask) {} 66 #define slice_dbg(fmt...) 67 68 #endif 69 70 static void slice_range_to_mask(unsigned long start, unsigned long len, 71 struct slice_mask *ret) 72 { 73 unsigned long end = start + len - 1; 74 75 ret->low_slices = 0; 76 if (SLICE_NUM_HIGH) 77 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); 78 79 if (start < SLICE_LOW_TOP) { 80 unsigned long mend = min(end, 81 (unsigned long)(SLICE_LOW_TOP - 1)); 82 83 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) 84 - (1u << GET_LOW_SLICE_INDEX(start)); 85 } 86 87 if ((start + len) > SLICE_LOW_TOP) { 88 unsigned long start_index = GET_HIGH_SLICE_INDEX(start); 89 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); 90 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; 91 92 bitmap_set(ret->high_slices, start_index, count); 93 } 94 } 95 96 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, 97 unsigned long len) 98 { 99 struct vm_area_struct *vma; 100 101 if ((mm->context.slb_addr_limit - len) < addr) 102 return 0; 103 vma = find_vma(mm, addr); 104 return (!vma || (addr + len) <= vm_start_gap(vma)); 105 } 106 107 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) 108 { 109 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, 110 1ul << SLICE_LOW_SHIFT); 111 } 112 113 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) 114 { 115 unsigned long start = slice << SLICE_HIGH_SHIFT; 116 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); 117 118 #ifdef CONFIG_PPC64 119 /* Hack, so that each addresses is controlled by exactly one 120 * of the high or low area bitmaps, the first high area starts 121 * at 4GB, not 0 */ 122 if (start == 0) 123 start = SLICE_LOW_TOP; 124 #endif 125 126 return !slice_area_is_free(mm, start, end - start); 127 } 128 129 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, 130 unsigned long high_limit) 131 { 132 unsigned long i; 133 134 ret->low_slices = 0; 135 if (SLICE_NUM_HIGH) 136 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); 137 138 for (i = 0; i < SLICE_NUM_LOW; i++) 139 if (!slice_low_has_vma(mm, i)) 140 ret->low_slices |= 1u << i; 141 142 if (high_limit <= SLICE_LOW_TOP) 143 return; 144 145 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) 146 if (!slice_high_has_vma(mm, i)) 147 __set_bit(i, ret->high_slices); 148 } 149 150 static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret, 151 unsigned long high_limit) 152 { 153 unsigned char *hpsizes; 154 int index, mask_index; 155 unsigned long i; 156 u64 lpsizes; 157 158 ret->low_slices = 0; 159 if (SLICE_NUM_HIGH) 160 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); 161 162 lpsizes = mm->context.low_slices_psize; 163 for (i = 0; i < SLICE_NUM_LOW; i++) 164 if (((lpsizes >> (i * 4)) & 0xf) == psize) 165 ret->low_slices |= 1u << i; 166 167 if (high_limit <= SLICE_LOW_TOP) 168 return; 169 170 hpsizes = mm->context.high_slices_psize; 171 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) { 172 mask_index = i & 0x1; 173 index = i >> 1; 174 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) 175 __set_bit(i, ret->high_slices); 176 } 177 } 178 179 static int slice_check_fit(struct mm_struct *mm, 180 struct slice_mask mask, struct slice_mask available) 181 { 182 DECLARE_BITMAP(result, SLICE_NUM_HIGH); 183 /* 184 * Make sure we just do bit compare only to the max 185 * addr limit and not the full bit map size. 186 */ 187 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); 188 189 if (!SLICE_NUM_HIGH) 190 return (mask.low_slices & available.low_slices) == 191 mask.low_slices; 192 193 bitmap_and(result, mask.high_slices, 194 available.high_slices, slice_count); 195 196 return (mask.low_slices & available.low_slices) == mask.low_slices && 197 bitmap_equal(result, mask.high_slices, slice_count); 198 } 199 200 static void slice_flush_segments(void *parm) 201 { 202 #ifdef CONFIG_PPC64 203 struct mm_struct *mm = parm; 204 unsigned long flags; 205 206 if (mm != current->active_mm) 207 return; 208 209 copy_mm_to_paca(current->active_mm); 210 211 local_irq_save(flags); 212 slb_flush_and_rebolt(); 213 local_irq_restore(flags); 214 #endif 215 } 216 217 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) 218 { 219 int index, mask_index; 220 /* Write the new slice psize bits */ 221 unsigned char *hpsizes; 222 u64 lpsizes; 223 unsigned long i, flags; 224 225 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); 226 slice_print_mask(" mask", mask); 227 228 /* We need to use a spinlock here to protect against 229 * concurrent 64k -> 4k demotion ... 230 */ 231 spin_lock_irqsave(&slice_convert_lock, flags); 232 233 lpsizes = mm->context.low_slices_psize; 234 for (i = 0; i < SLICE_NUM_LOW; i++) 235 if (mask.low_slices & (1u << i)) 236 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 237 (((unsigned long)psize) << (i * 4)); 238 239 /* Assign the value back */ 240 mm->context.low_slices_psize = lpsizes; 241 242 hpsizes = mm->context.high_slices_psize; 243 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) { 244 mask_index = i & 0x1; 245 index = i >> 1; 246 if (test_bit(i, mask.high_slices)) 247 hpsizes[index] = (hpsizes[index] & 248 ~(0xf << (mask_index * 4))) | 249 (((unsigned long)psize) << (mask_index * 4)); 250 } 251 252 slice_dbg(" lsps=%lx, hsps=%lx\n", 253 (unsigned long)mm->context.low_slices_psize, 254 (unsigned long)mm->context.high_slices_psize); 255 256 spin_unlock_irqrestore(&slice_convert_lock, flags); 257 258 copro_flush_all_slbs(mm); 259 } 260 261 /* 262 * Compute which slice addr is part of; 263 * set *boundary_addr to the start or end boundary of that slice 264 * (depending on 'end' parameter); 265 * return boolean indicating if the slice is marked as available in the 266 * 'available' slice_mark. 267 */ 268 static bool slice_scan_available(unsigned long addr, 269 struct slice_mask available, 270 int end, 271 unsigned long *boundary_addr) 272 { 273 unsigned long slice; 274 if (addr < SLICE_LOW_TOP) { 275 slice = GET_LOW_SLICE_INDEX(addr); 276 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; 277 return !!(available.low_slices & (1u << slice)); 278 } else { 279 slice = GET_HIGH_SLICE_INDEX(addr); 280 *boundary_addr = (slice + end) ? 281 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; 282 return !!test_bit(slice, available.high_slices); 283 } 284 } 285 286 static unsigned long slice_find_area_bottomup(struct mm_struct *mm, 287 unsigned long len, 288 struct slice_mask available, 289 int psize, unsigned long high_limit) 290 { 291 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 292 unsigned long addr, found, next_end; 293 struct vm_unmapped_area_info info; 294 295 info.flags = 0; 296 info.length = len; 297 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); 298 info.align_offset = 0; 299 300 addr = TASK_UNMAPPED_BASE; 301 /* 302 * Check till the allow max value for this mmap request 303 */ 304 while (addr < high_limit) { 305 info.low_limit = addr; 306 if (!slice_scan_available(addr, available, 1, &addr)) 307 continue; 308 309 next_slice: 310 /* 311 * At this point [info.low_limit; addr) covers 312 * available slices only and ends at a slice boundary. 313 * Check if we need to reduce the range, or if we can 314 * extend it to cover the next available slice. 315 */ 316 if (addr >= high_limit) 317 addr = high_limit; 318 else if (slice_scan_available(addr, available, 1, &next_end)) { 319 addr = next_end; 320 goto next_slice; 321 } 322 info.high_limit = addr; 323 324 found = vm_unmapped_area(&info); 325 if (!(found & ~PAGE_MASK)) 326 return found; 327 } 328 329 return -ENOMEM; 330 } 331 332 static unsigned long slice_find_area_topdown(struct mm_struct *mm, 333 unsigned long len, 334 struct slice_mask available, 335 int psize, unsigned long high_limit) 336 { 337 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 338 unsigned long addr, found, prev; 339 struct vm_unmapped_area_info info; 340 341 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 342 info.length = len; 343 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); 344 info.align_offset = 0; 345 346 addr = mm->mmap_base; 347 /* 348 * If we are trying to allocate above DEFAULT_MAP_WINDOW 349 * Add the different to the mmap_base. 350 * Only for that request for which high_limit is above 351 * DEFAULT_MAP_WINDOW we should apply this. 352 */ 353 if (high_limit > DEFAULT_MAP_WINDOW) 354 addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW; 355 356 while (addr > PAGE_SIZE) { 357 info.high_limit = addr; 358 if (!slice_scan_available(addr - 1, available, 0, &addr)) 359 continue; 360 361 prev_slice: 362 /* 363 * At this point [addr; info.high_limit) covers 364 * available slices only and starts at a slice boundary. 365 * Check if we need to reduce the range, or if we can 366 * extend it to cover the previous available slice. 367 */ 368 if (addr < PAGE_SIZE) 369 addr = PAGE_SIZE; 370 else if (slice_scan_available(addr - 1, available, 0, &prev)) { 371 addr = prev; 372 goto prev_slice; 373 } 374 info.low_limit = addr; 375 376 found = vm_unmapped_area(&info); 377 if (!(found & ~PAGE_MASK)) 378 return found; 379 } 380 381 /* 382 * A failed mmap() very likely causes application failure, 383 * so fall back to the bottom-up function here. This scenario 384 * can happen with large stack limits and large mmap() 385 * allocations. 386 */ 387 return slice_find_area_bottomup(mm, len, available, psize, high_limit); 388 } 389 390 391 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, 392 struct slice_mask mask, int psize, 393 int topdown, unsigned long high_limit) 394 { 395 if (topdown) 396 return slice_find_area_topdown(mm, len, mask, psize, high_limit); 397 else 398 return slice_find_area_bottomup(mm, len, mask, psize, high_limit); 399 } 400 401 static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) 402 { 403 dst->low_slices |= src->low_slices; 404 if (!SLICE_NUM_HIGH) 405 return; 406 bitmap_or(dst->high_slices, dst->high_slices, src->high_slices, 407 SLICE_NUM_HIGH); 408 } 409 410 static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src) 411 { 412 dst->low_slices &= ~src->low_slices; 413 414 if (!SLICE_NUM_HIGH) 415 return; 416 bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices, 417 SLICE_NUM_HIGH); 418 } 419 420 #ifdef CONFIG_PPC_64K_PAGES 421 #define MMU_PAGE_BASE MMU_PAGE_64K 422 #else 423 #define MMU_PAGE_BASE MMU_PAGE_4K 424 #endif 425 426 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, 427 unsigned long flags, unsigned int psize, 428 int topdown) 429 { 430 struct slice_mask mask; 431 struct slice_mask good_mask; 432 struct slice_mask potential_mask; 433 struct slice_mask compat_mask; 434 int fixed = (flags & MAP_FIXED); 435 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 436 unsigned long page_size = 1UL << pshift; 437 struct mm_struct *mm = current->mm; 438 unsigned long newaddr; 439 unsigned long high_limit; 440 441 high_limit = DEFAULT_MAP_WINDOW; 442 if (addr >= high_limit || (fixed && (addr + len > high_limit))) 443 high_limit = TASK_SIZE; 444 445 if (len > high_limit) 446 return -ENOMEM; 447 if (len & (page_size - 1)) 448 return -EINVAL; 449 if (fixed) { 450 if (addr & (page_size - 1)) 451 return -EINVAL; 452 if (addr > high_limit - len) 453 return -ENOMEM; 454 } 455 456 if (high_limit > mm->context.slb_addr_limit) { 457 mm->context.slb_addr_limit = high_limit; 458 on_each_cpu(slice_flush_segments, mm, 1); 459 } 460 461 /* 462 * init different masks 463 */ 464 mask.low_slices = 0; 465 466 /* silence stupid warning */; 467 potential_mask.low_slices = 0; 468 469 compat_mask.low_slices = 0; 470 471 if (SLICE_NUM_HIGH) { 472 bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); 473 bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); 474 bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); 475 } 476 477 /* Sanity checks */ 478 BUG_ON(mm->task_size == 0); 479 BUG_ON(mm->context.slb_addr_limit == 0); 480 VM_BUG_ON(radix_enabled()); 481 482 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); 483 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", 484 addr, len, flags, topdown); 485 486 /* If hint, make sure it matches our alignment restrictions */ 487 if (!fixed && addr) { 488 addr = _ALIGN_UP(addr, page_size); 489 slice_dbg(" aligned addr=%lx\n", addr); 490 /* Ignore hint if it's too large or overlaps a VMA */ 491 if (addr > high_limit - len || 492 !slice_area_is_free(mm, addr, len)) 493 addr = 0; 494 } 495 496 /* First make up a "good" mask of slices that have the right size 497 * already 498 */ 499 slice_mask_for_size(mm, psize, &good_mask, high_limit); 500 slice_print_mask(" good_mask", good_mask); 501 502 /* 503 * Here "good" means slices that are already the right page size, 504 * "compat" means slices that have a compatible page size (i.e. 505 * 4k in a 64k pagesize kernel), and "free" means slices without 506 * any VMAs. 507 * 508 * If MAP_FIXED: 509 * check if fits in good | compat => OK 510 * check if fits in good | compat | free => convert free 511 * else bad 512 * If have hint: 513 * check if hint fits in good => OK 514 * check if hint fits in good | free => convert free 515 * Otherwise: 516 * search in good, found => OK 517 * search in good | free, found => convert free 518 * search in good | compat | free, found => convert free. 519 */ 520 521 #ifdef CONFIG_PPC_64K_PAGES 522 /* If we support combo pages, we can allow 64k pages in 4k slices */ 523 if (psize == MMU_PAGE_64K) { 524 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit); 525 if (fixed) 526 slice_or_mask(&good_mask, &compat_mask); 527 } 528 #endif 529 530 /* First check hint if it's valid or if we have MAP_FIXED */ 531 if (addr != 0 || fixed) { 532 /* Build a mask for the requested range */ 533 slice_range_to_mask(addr, len, &mask); 534 slice_print_mask(" mask", mask); 535 536 /* Check if we fit in the good mask. If we do, we just return, 537 * nothing else to do 538 */ 539 if (slice_check_fit(mm, mask, good_mask)) { 540 slice_dbg(" fits good !\n"); 541 return addr; 542 } 543 } else { 544 /* Now let's see if we can find something in the existing 545 * slices for that size 546 */ 547 newaddr = slice_find_area(mm, len, good_mask, 548 psize, topdown, high_limit); 549 if (newaddr != -ENOMEM) { 550 /* Found within the good mask, we don't have to setup, 551 * we thus return directly 552 */ 553 slice_dbg(" found area at 0x%lx\n", newaddr); 554 return newaddr; 555 } 556 } 557 /* 558 * We don't fit in the good mask, check what other slices are 559 * empty and thus can be converted 560 */ 561 slice_mask_for_free(mm, &potential_mask, high_limit); 562 slice_or_mask(&potential_mask, &good_mask); 563 slice_print_mask(" potential", potential_mask); 564 565 if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) { 566 slice_dbg(" fits potential !\n"); 567 goto convert; 568 } 569 570 /* If we have MAP_FIXED and failed the above steps, then error out */ 571 if (fixed) 572 return -EBUSY; 573 574 slice_dbg(" search...\n"); 575 576 /* If we had a hint that didn't work out, see if we can fit 577 * anywhere in the good area. 578 */ 579 if (addr) { 580 addr = slice_find_area(mm, len, good_mask, 581 psize, topdown, high_limit); 582 if (addr != -ENOMEM) { 583 slice_dbg(" found area at 0x%lx\n", addr); 584 return addr; 585 } 586 } 587 588 /* Now let's see if we can find something in the existing slices 589 * for that size plus free slices 590 */ 591 addr = slice_find_area(mm, len, potential_mask, 592 psize, topdown, high_limit); 593 594 #ifdef CONFIG_PPC_64K_PAGES 595 if (addr == -ENOMEM && psize == MMU_PAGE_64K) { 596 /* retry the search with 4k-page slices included */ 597 slice_or_mask(&potential_mask, &compat_mask); 598 addr = slice_find_area(mm, len, potential_mask, 599 psize, topdown, high_limit); 600 } 601 #endif 602 603 if (addr == -ENOMEM) 604 return -ENOMEM; 605 606 slice_range_to_mask(addr, len, &mask); 607 slice_dbg(" found potential area at 0x%lx\n", addr); 608 slice_print_mask(" mask", mask); 609 610 convert: 611 slice_andnot_mask(&mask, &good_mask); 612 slice_andnot_mask(&mask, &compat_mask); 613 if (mask.low_slices || 614 (SLICE_NUM_HIGH && 615 !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { 616 slice_convert(mm, mask, psize); 617 if (psize > MMU_PAGE_BASE) 618 on_each_cpu(slice_flush_segments, mm, 1); 619 } 620 return addr; 621 622 } 623 EXPORT_SYMBOL_GPL(slice_get_unmapped_area); 624 625 unsigned long arch_get_unmapped_area(struct file *filp, 626 unsigned long addr, 627 unsigned long len, 628 unsigned long pgoff, 629 unsigned long flags) 630 { 631 return slice_get_unmapped_area(addr, len, flags, 632 current->mm->context.user_psize, 0); 633 } 634 635 unsigned long arch_get_unmapped_area_topdown(struct file *filp, 636 const unsigned long addr0, 637 const unsigned long len, 638 const unsigned long pgoff, 639 const unsigned long flags) 640 { 641 return slice_get_unmapped_area(addr0, len, flags, 642 current->mm->context.user_psize, 1); 643 } 644 645 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 646 { 647 unsigned char *hpsizes; 648 int index, mask_index; 649 650 /* 651 * Radix doesn't use slice, but can get enabled along with MMU_SLICE 652 */ 653 if (radix_enabled()) { 654 #ifdef CONFIG_PPC_64K_PAGES 655 return MMU_PAGE_64K; 656 #else 657 return MMU_PAGE_4K; 658 #endif 659 } 660 if (addr < SLICE_LOW_TOP) { 661 u64 lpsizes; 662 lpsizes = mm->context.low_slices_psize; 663 index = GET_LOW_SLICE_INDEX(addr); 664 return (lpsizes >> (index * 4)) & 0xf; 665 } 666 hpsizes = mm->context.high_slices_psize; 667 index = GET_HIGH_SLICE_INDEX(addr); 668 mask_index = index & 0x1; 669 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf; 670 } 671 EXPORT_SYMBOL_GPL(get_slice_psize); 672 673 /* 674 * This is called by hash_page when it needs to do a lazy conversion of 675 * an address space from real 64K pages to combo 4K pages (typically 676 * when hitting a non cacheable mapping on a processor or hypervisor 677 * that won't allow them for 64K pages). 678 * 679 * This is also called in init_new_context() to change back the user 680 * psize from whatever the parent context had it set to 681 * N.B. This may be called before mm->context.id has been set. 682 * 683 * This function will only change the content of the {low,high)_slice_psize 684 * masks, it will not flush SLBs as this shall be handled lazily by the 685 * caller. 686 */ 687 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) 688 { 689 int index, mask_index; 690 unsigned char *hpsizes; 691 unsigned long flags, lpsizes; 692 unsigned int old_psize; 693 int i; 694 695 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); 696 697 VM_BUG_ON(radix_enabled()); 698 spin_lock_irqsave(&slice_convert_lock, flags); 699 700 old_psize = mm->context.user_psize; 701 slice_dbg(" old_psize=%d\n", old_psize); 702 if (old_psize == psize) 703 goto bail; 704 705 mm->context.user_psize = psize; 706 wmb(); 707 708 lpsizes = mm->context.low_slices_psize; 709 for (i = 0; i < SLICE_NUM_LOW; i++) 710 if (((lpsizes >> (i * 4)) & 0xf) == old_psize) 711 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 712 (((unsigned long)psize) << (i * 4)); 713 /* Assign the value back */ 714 mm->context.low_slices_psize = lpsizes; 715 716 hpsizes = mm->context.high_slices_psize; 717 for (i = 0; i < SLICE_NUM_HIGH; i++) { 718 mask_index = i & 0x1; 719 index = i >> 1; 720 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize) 721 hpsizes[index] = (hpsizes[index] & 722 ~(0xf << (mask_index * 4))) | 723 (((unsigned long)psize) << (mask_index * 4)); 724 } 725 726 727 728 729 slice_dbg(" lsps=%lx, hsps=%lx\n", 730 (unsigned long)mm->context.low_slices_psize, 731 (unsigned long)mm->context.high_slices_psize); 732 733 bail: 734 spin_unlock_irqrestore(&slice_convert_lock, flags); 735 } 736 737 void slice_set_range_psize(struct mm_struct *mm, unsigned long start, 738 unsigned long len, unsigned int psize) 739 { 740 struct slice_mask mask; 741 742 VM_BUG_ON(radix_enabled()); 743 744 slice_range_to_mask(start, len, &mask); 745 slice_convert(mm, mask, psize); 746 } 747 748 #ifdef CONFIG_HUGETLB_PAGE 749 /* 750 * is_hugepage_only_range() is used by generic code to verify whether 751 * a normal mmap mapping (non hugetlbfs) is valid on a given area. 752 * 753 * until the generic code provides a more generic hook and/or starts 754 * calling arch get_unmapped_area for MAP_FIXED (which our implementation 755 * here knows how to deal with), we hijack it to keep standard mappings 756 * away from us. 757 * 758 * because of that generic code limitation, MAP_FIXED mapping cannot 759 * "convert" back a slice with no VMAs to the standard page size, only 760 * get_unmapped_area() can. It would be possible to fix it here but I 761 * prefer working on fixing the generic code instead. 762 * 763 * WARNING: This will not work if hugetlbfs isn't enabled since the 764 * generic code will redefine that function as 0 in that. This is ok 765 * for now as we only use slices with hugetlbfs enabled. This should 766 * be fixed as the generic code gets fixed. 767 */ 768 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 769 unsigned long len) 770 { 771 struct slice_mask mask, available; 772 unsigned int psize = mm->context.user_psize; 773 unsigned long high_limit = mm->context.slb_addr_limit; 774 775 if (radix_enabled()) 776 return 0; 777 778 slice_range_to_mask(addr, len, &mask); 779 slice_mask_for_size(mm, psize, &available, high_limit); 780 #ifdef CONFIG_PPC_64K_PAGES 781 /* We need to account for 4k slices too */ 782 if (psize == MMU_PAGE_64K) { 783 struct slice_mask compat_mask; 784 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit); 785 slice_or_mask(&available, &compat_mask); 786 } 787 #endif 788 789 #if 0 /* too verbose */ 790 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", 791 mm, addr, len); 792 slice_print_mask(" mask", mask); 793 slice_print_mask(" available", available); 794 #endif 795 return !slice_check_fit(mm, mask, available); 796 } 797 #endif 798
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.