~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/mm/pat.c

Version: ~ [ linux-5.12 ] ~ [ linux-5.11.16 ] ~ [ linux-5.10.32 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.114 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.188 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.231 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.267 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.267 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Handle caching attributes in page tables (PAT)
  3  *
  4  * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  5  *          Suresh B Siddha <suresh.b.siddha@intel.com>
  6  *
  7  * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
  8  */
  9 
 10 #include <linux/seq_file.h>
 11 #include <linux/bootmem.h>
 12 #include <linux/debugfs.h>
 13 #include <linux/kernel.h>
 14 #include <linux/pfn_t.h>
 15 #include <linux/slab.h>
 16 #include <linux/mm.h>
 17 #include <linux/fs.h>
 18 #include <linux/rbtree.h>
 19 
 20 #include <asm/cacheflush.h>
 21 #include <asm/processor.h>
 22 #include <asm/tlbflush.h>
 23 #include <asm/x86_init.h>
 24 #include <asm/pgtable.h>
 25 #include <asm/fcntl.h>
 26 #include <asm/e820.h>
 27 #include <asm/mtrr.h>
 28 #include <asm/page.h>
 29 #include <asm/msr.h>
 30 #include <asm/pat.h>
 31 #include <asm/io.h>
 32 
 33 #include "pat_internal.h"
 34 #include "mm_internal.h"
 35 
 36 #undef pr_fmt
 37 #define pr_fmt(fmt) "" fmt
 38 
 39 static bool boot_cpu_done;
 40 
 41 static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
 42 static void init_cache_modes(void);
 43 
 44 void pat_disable(const char *reason)
 45 {
 46         if (!__pat_enabled)
 47                 return;
 48 
 49         if (boot_cpu_done) {
 50                 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
 51                 return;
 52         }
 53 
 54         __pat_enabled = 0;
 55         pr_info("x86/PAT: %s\n", reason);
 56 
 57         init_cache_modes();
 58 }
 59 
 60 static int __init nopat(char *str)
 61 {
 62         pat_disable("PAT support disabled.");
 63         return 0;
 64 }
 65 early_param("nopat", nopat);
 66 
 67 bool pat_enabled(void)
 68 {
 69         return !!__pat_enabled;
 70 }
 71 EXPORT_SYMBOL_GPL(pat_enabled);
 72 
 73 int pat_debug_enable;
 74 
 75 static int __init pat_debug_setup(char *str)
 76 {
 77         pat_debug_enable = 1;
 78         return 0;
 79 }
 80 __setup("debugpat", pat_debug_setup);
 81 
 82 #ifdef CONFIG_X86_PAT
 83 /*
 84  * X86 PAT uses page flags arch_1 and uncached together to keep track of
 85  * memory type of pages that have backing page struct.
 86  *
 87  * X86 PAT supports 4 different memory types:
 88  *  - _PAGE_CACHE_MODE_WB
 89  *  - _PAGE_CACHE_MODE_WC
 90  *  - _PAGE_CACHE_MODE_UC_MINUS
 91  *  - _PAGE_CACHE_MODE_WT
 92  *
 93  * _PAGE_CACHE_MODE_WB is the default type.
 94  */
 95 
 96 #define _PGMT_WB                0
 97 #define _PGMT_WC                (1UL << PG_arch_1)
 98 #define _PGMT_UC_MINUS          (1UL << PG_uncached)
 99 #define _PGMT_WT                (1UL << PG_uncached | 1UL << PG_arch_1)
100 #define _PGMT_MASK              (1UL << PG_uncached | 1UL << PG_arch_1)
101 #define _PGMT_CLEAR_MASK        (~_PGMT_MASK)
102 
103 static inline enum page_cache_mode get_page_memtype(struct page *pg)
104 {
105         unsigned long pg_flags = pg->flags & _PGMT_MASK;
106 
107         if (pg_flags == _PGMT_WB)
108                 return _PAGE_CACHE_MODE_WB;
109         else if (pg_flags == _PGMT_WC)
110                 return _PAGE_CACHE_MODE_WC;
111         else if (pg_flags == _PGMT_UC_MINUS)
112                 return _PAGE_CACHE_MODE_UC_MINUS;
113         else
114                 return _PAGE_CACHE_MODE_WT;
115 }
116 
117 static inline void set_page_memtype(struct page *pg,
118                                     enum page_cache_mode memtype)
119 {
120         unsigned long memtype_flags;
121         unsigned long old_flags;
122         unsigned long new_flags;
123 
124         switch (memtype) {
125         case _PAGE_CACHE_MODE_WC:
126                 memtype_flags = _PGMT_WC;
127                 break;
128         case _PAGE_CACHE_MODE_UC_MINUS:
129                 memtype_flags = _PGMT_UC_MINUS;
130                 break;
131         case _PAGE_CACHE_MODE_WT:
132                 memtype_flags = _PGMT_WT;
133                 break;
134         case _PAGE_CACHE_MODE_WB:
135         default:
136                 memtype_flags = _PGMT_WB;
137                 break;
138         }
139 
140         do {
141                 old_flags = pg->flags;
142                 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
143         } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
144 }
145 #else
146 static inline enum page_cache_mode get_page_memtype(struct page *pg)
147 {
148         return -1;
149 }
150 static inline void set_page_memtype(struct page *pg,
151                                     enum page_cache_mode memtype)
152 {
153 }
154 #endif
155 
156 enum {
157         PAT_UC = 0,             /* uncached */
158         PAT_WC = 1,             /* Write combining */
159         PAT_WT = 4,             /* Write Through */
160         PAT_WP = 5,             /* Write Protected */
161         PAT_WB = 6,             /* Write Back (default) */
162         PAT_UC_MINUS = 7,       /* UC, but can be overridden by MTRR */
163 };
164 
165 #define CM(c) (_PAGE_CACHE_MODE_ ## c)
166 
167 static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
168 {
169         enum page_cache_mode cache;
170         char *cache_mode;
171 
172         switch (pat_val) {
173         case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
174         case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
175         case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
176         case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
177         case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
178         case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
179         default:           cache = CM(WB);       cache_mode = "WB  "; break;
180         }
181 
182         memcpy(msg, cache_mode, 4);
183 
184         return cache;
185 }
186 
187 #undef CM
188 
189 /*
190  * Update the cache mode to pgprot translation tables according to PAT
191  * configuration.
192  * Using lower indices is preferred, so we start with highest index.
193  */
194 static void __init_cache_modes(u64 pat)
195 {
196         enum page_cache_mode cache;
197         char pat_msg[33];
198         int i;
199 
200         pat_msg[32] = 0;
201         for (i = 7; i >= 0; i--) {
202                 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
203                                            pat_msg + 4 * i);
204                 update_cache_mode_entry(i, cache);
205         }
206         pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
207 }
208 
209 #define PAT(x, y)       ((u64)PAT_ ## y << ((x)*8))
210 
211 static void pat_bsp_init(u64 pat)
212 {
213         u64 tmp_pat;
214 
215         if (!boot_cpu_has(X86_FEATURE_PAT)) {
216                 pat_disable("PAT not supported by CPU.");
217                 return;
218         }
219 
220         rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
221         if (!tmp_pat) {
222                 pat_disable("PAT MSR is 0, disabled.");
223                 return;
224         }
225 
226         wrmsrl(MSR_IA32_CR_PAT, pat);
227 
228         __init_cache_modes(pat);
229 }
230 
231 static void pat_ap_init(u64 pat)
232 {
233         if (!boot_cpu_has(X86_FEATURE_PAT)) {
234                 /*
235                  * If this happens we are on a secondary CPU, but switched to
236                  * PAT on the boot CPU. We have no way to undo PAT.
237                  */
238                 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
239         }
240 
241         wrmsrl(MSR_IA32_CR_PAT, pat);
242 }
243 
244 static void init_cache_modes(void)
245 {
246         u64 pat = 0;
247         static int init_cm_done;
248 
249         if (init_cm_done)
250                 return;
251 
252         if (boot_cpu_has(X86_FEATURE_PAT)) {
253                 /*
254                  * CPU supports PAT. Set PAT table to be consistent with
255                  * PAT MSR. This case supports "nopat" boot option, and
256                  * virtual machine environments which support PAT without
257                  * MTRRs. In specific, Xen has unique setup to PAT MSR.
258                  *
259                  * If PAT MSR returns 0, it is considered invalid and emulates
260                  * as No PAT.
261                  */
262                 rdmsrl(MSR_IA32_CR_PAT, pat);
263         }
264 
265         if (!pat) {
266                 /*
267                  * No PAT. Emulate the PAT table that corresponds to the two
268                  * cache bits, PWT (Write Through) and PCD (Cache Disable).
269                  * This setup is also the same as the BIOS default setup.
270                  *
271                  * PTE encoding:
272                  *
273                  *       PCD
274                  *       |PWT  PAT
275                  *       ||    slot
276                  *       00    0    WB : _PAGE_CACHE_MODE_WB
277                  *       01    1    WT : _PAGE_CACHE_MODE_WT
278                  *       10    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
279                  *       11    3    UC : _PAGE_CACHE_MODE_UC
280                  *
281                  * NOTE: When WC or WP is used, it is redirected to UC- per
282                  * the default setup in __cachemode2pte_tbl[].
283                  */
284                 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
285                       PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
286         }
287 
288         __init_cache_modes(pat);
289 
290         init_cm_done = 1;
291 }
292 
293 /**
294  * pat_init - Initialize PAT MSR and PAT table
295  *
296  * This function initializes PAT MSR and PAT table with an OS-defined value
297  * to enable additional cache attributes, WC and WT.
298  *
299  * This function must be called on all CPUs using the specific sequence of
300  * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
301  * procedure for PAT.
302  */
303 void pat_init(void)
304 {
305         u64 pat;
306         struct cpuinfo_x86 *c = &boot_cpu_data;
307 
308         if (!pat_enabled()) {
309                 init_cache_modes();
310                 return;
311         }
312 
313         if ((c->x86_vendor == X86_VENDOR_INTEL) &&
314             (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
315              ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
316                 /*
317                  * PAT support with the lower four entries. Intel Pentium 2,
318                  * 3, M, and 4 are affected by PAT errata, which makes the
319                  * upper four entries unusable. To be on the safe side, we don't
320                  * use those.
321                  *
322                  *  PTE encoding:
323                  *      PAT
324                  *      |PCD
325                  *      ||PWT  PAT
326                  *      |||    slot
327                  *      000    0    WB : _PAGE_CACHE_MODE_WB
328                  *      001    1    WC : _PAGE_CACHE_MODE_WC
329                  *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
330                  *      011    3    UC : _PAGE_CACHE_MODE_UC
331                  * PAT bit unused
332                  *
333                  * NOTE: When WT or WP is used, it is redirected to UC- per
334                  * the default setup in __cachemode2pte_tbl[].
335                  */
336                 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
337                       PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
338         } else {
339                 /*
340                  * Full PAT support.  We put WT in slot 7 to improve
341                  * robustness in the presence of errata that might cause
342                  * the high PAT bit to be ignored.  This way, a buggy slot 7
343                  * access will hit slot 3, and slot 3 is UC, so at worst
344                  * we lose performance without causing a correctness issue.
345                  * Pentium 4 erratum N46 is an example for such an erratum,
346                  * although we try not to use PAT at all on affected CPUs.
347                  *
348                  *  PTE encoding:
349                  *      PAT
350                  *      |PCD
351                  *      ||PWT  PAT
352                  *      |||    slot
353                  *      000    0    WB : _PAGE_CACHE_MODE_WB
354                  *      001    1    WC : _PAGE_CACHE_MODE_WC
355                  *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
356                  *      011    3    UC : _PAGE_CACHE_MODE_UC
357                  *      100    4    WB : Reserved
358                  *      101    5    WC : Reserved
359                  *      110    6    UC-: Reserved
360                  *      111    7    WT : _PAGE_CACHE_MODE_WT
361                  *
362                  * The reserved slots are unused, but mapped to their
363                  * corresponding types in the presence of PAT errata.
364                  */
365                 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
366                       PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, WT);
367         }
368 
369         if (!boot_cpu_done) {
370                 pat_bsp_init(pat);
371                 boot_cpu_done = true;
372         } else {
373                 pat_ap_init(pat);
374         }
375 }
376 
377 #undef PAT
378 
379 static DEFINE_SPINLOCK(memtype_lock);   /* protects memtype accesses */
380 
381 /*
382  * Does intersection of PAT memory type and MTRR memory type and returns
383  * the resulting memory type as PAT understands it.
384  * (Type in pat and mtrr will not have same value)
385  * The intersection is based on "Effective Memory Type" tables in IA-32
386  * SDM vol 3a
387  */
388 static unsigned long pat_x_mtrr_type(u64 start, u64 end,
389                                      enum page_cache_mode req_type)
390 {
391         /*
392          * Look for MTRR hint to get the effective type in case where PAT
393          * request is for WB.
394          */
395         if (req_type == _PAGE_CACHE_MODE_WB) {
396                 u8 mtrr_type, uniform;
397 
398                 mtrr_type = mtrr_type_lookup(start, end, &uniform);
399                 if (mtrr_type != MTRR_TYPE_WRBACK)
400                         return _PAGE_CACHE_MODE_UC_MINUS;
401 
402                 return _PAGE_CACHE_MODE_WB;
403         }
404 
405         return req_type;
406 }
407 
408 struct pagerange_state {
409         unsigned long           cur_pfn;
410         int                     ram;
411         int                     not_ram;
412 };
413 
414 static int
415 pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
416 {
417         struct pagerange_state *state = arg;
418 
419         state->not_ram  |= initial_pfn > state->cur_pfn;
420         state->ram      |= total_nr_pages > 0;
421         state->cur_pfn   = initial_pfn + total_nr_pages;
422 
423         return state->ram && state->not_ram;
424 }
425 
426 static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
427 {
428         int ret = 0;
429         unsigned long start_pfn = start >> PAGE_SHIFT;
430         unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
431         struct pagerange_state state = {start_pfn, 0, 0};
432 
433         /*
434          * For legacy reasons, physical address range in the legacy ISA
435          * region is tracked as non-RAM. This will allow users of
436          * /dev/mem to map portions of legacy ISA region, even when
437          * some of those portions are listed(or not even listed) with
438          * different e820 types(RAM/reserved/..)
439          */
440         if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
441                 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
442 
443         if (start_pfn < end_pfn) {
444                 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
445                                 &state, pagerange_is_ram_callback);
446         }
447 
448         return (ret > 0) ? -1 : (state.ram ? 1 : 0);
449 }
450 
451 /*
452  * For RAM pages, we use page flags to mark the pages with appropriate type.
453  * The page flags are limited to four types, WB (default), WC, WT and UC-.
454  * WP request fails with -EINVAL, and UC gets redirected to UC-.  Setting
455  * a new memory type is only allowed for a page mapped with the default WB
456  * type.
457  *
458  * Here we do two passes:
459  * - Find the memtype of all the pages in the range, look for any conflicts.
460  * - In case of no conflicts, set the new memtype for pages in the range.
461  */
462 static int reserve_ram_pages_type(u64 start, u64 end,
463                                   enum page_cache_mode req_type,
464                                   enum page_cache_mode *new_type)
465 {
466         struct page *page;
467         u64 pfn;
468 
469         if (req_type == _PAGE_CACHE_MODE_WP) {
470                 if (new_type)
471                         *new_type = _PAGE_CACHE_MODE_UC_MINUS;
472                 return -EINVAL;
473         }
474 
475         if (req_type == _PAGE_CACHE_MODE_UC) {
476                 /* We do not support strong UC */
477                 WARN_ON_ONCE(1);
478                 req_type = _PAGE_CACHE_MODE_UC_MINUS;
479         }
480 
481         for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
482                 enum page_cache_mode type;
483 
484                 page = pfn_to_page(pfn);
485                 type = get_page_memtype(page);
486                 if (type != _PAGE_CACHE_MODE_WB) {
487                         pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
488                                 start, end - 1, type, req_type);
489                         if (new_type)
490                                 *new_type = type;
491 
492                         return -EBUSY;
493                 }
494         }
495 
496         if (new_type)
497                 *new_type = req_type;
498 
499         for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
500                 page = pfn_to_page(pfn);
501                 set_page_memtype(page, req_type);
502         }
503         return 0;
504 }
505 
506 static int free_ram_pages_type(u64 start, u64 end)
507 {
508         struct page *page;
509         u64 pfn;
510 
511         for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
512                 page = pfn_to_page(pfn);
513                 set_page_memtype(page, _PAGE_CACHE_MODE_WB);
514         }
515         return 0;
516 }
517 
518 /*
519  * req_type typically has one of the:
520  * - _PAGE_CACHE_MODE_WB
521  * - _PAGE_CACHE_MODE_WC
522  * - _PAGE_CACHE_MODE_UC_MINUS
523  * - _PAGE_CACHE_MODE_UC
524  * - _PAGE_CACHE_MODE_WT
525  *
526  * If new_type is NULL, function will return an error if it cannot reserve the
527  * region with req_type. If new_type is non-NULL, function will return
528  * available type in new_type in case of no error. In case of any error
529  * it will return a negative return value.
530  */
531 int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
532                     enum page_cache_mode *new_type)
533 {
534         struct memtype *new;
535         enum page_cache_mode actual_type;
536         int is_range_ram;
537         int err = 0;
538 
539         BUG_ON(start >= end); /* end is exclusive */
540 
541         if (!pat_enabled()) {
542                 /* This is identical to page table setting without PAT */
543                 if (new_type)
544                         *new_type = req_type;
545                 return 0;
546         }
547 
548         /* Low ISA region is always mapped WB in page table. No need to track */
549         if (x86_platform.is_untracked_pat_range(start, end)) {
550                 if (new_type)
551                         *new_type = _PAGE_CACHE_MODE_WB;
552                 return 0;
553         }
554 
555         /*
556          * Call mtrr_lookup to get the type hint. This is an
557          * optimization for /dev/mem mmap'ers into WB memory (BIOS
558          * tools and ACPI tools). Use WB request for WB memory and use
559          * UC_MINUS otherwise.
560          */
561         actual_type = pat_x_mtrr_type(start, end, req_type);
562 
563         if (new_type)
564                 *new_type = actual_type;
565 
566         is_range_ram = pat_pagerange_is_ram(start, end);
567         if (is_range_ram == 1) {
568 
569                 err = reserve_ram_pages_type(start, end, req_type, new_type);
570 
571                 return err;
572         } else if (is_range_ram < 0) {
573                 return -EINVAL;
574         }
575 
576         new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
577         if (!new)
578                 return -ENOMEM;
579 
580         new->start      = start;
581         new->end        = end;
582         new->type       = actual_type;
583 
584         spin_lock(&memtype_lock);
585 
586         err = rbt_memtype_check_insert(new, new_type);
587         if (err) {
588                 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
589                         start, end - 1,
590                         cattr_name(new->type), cattr_name(req_type));
591                 kfree(new);
592                 spin_unlock(&memtype_lock);
593 
594                 return err;
595         }
596 
597         spin_unlock(&memtype_lock);
598 
599         dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
600                 start, end - 1, cattr_name(new->type), cattr_name(req_type),
601                 new_type ? cattr_name(*new_type) : "-");
602 
603         return err;
604 }
605 
606 int free_memtype(u64 start, u64 end)
607 {
608         int err = -EINVAL;
609         int is_range_ram;
610         struct memtype *entry;
611 
612         if (!pat_enabled())
613                 return 0;
614 
615         /* Low ISA region is always mapped WB. No need to track */
616         if (x86_platform.is_untracked_pat_range(start, end))
617                 return 0;
618 
619         is_range_ram = pat_pagerange_is_ram(start, end);
620         if (is_range_ram == 1) {
621 
622                 err = free_ram_pages_type(start, end);
623 
624                 return err;
625         } else if (is_range_ram < 0) {
626                 return -EINVAL;
627         }
628 
629         spin_lock(&memtype_lock);
630         entry = rbt_memtype_erase(start, end);
631         spin_unlock(&memtype_lock);
632 
633         if (IS_ERR(entry)) {
634                 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
635                         current->comm, current->pid, start, end - 1);
636                 return -EINVAL;
637         }
638 
639         kfree(entry);
640 
641         dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
642 
643         return 0;
644 }
645 
646 
647 /**
648  * lookup_memtype - Looksup the memory type for a physical address
649  * @paddr: physical address of which memory type needs to be looked up
650  *
651  * Only to be called when PAT is enabled
652  *
653  * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
654  * or _PAGE_CACHE_MODE_WT.
655  */
656 static enum page_cache_mode lookup_memtype(u64 paddr)
657 {
658         enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
659         struct memtype *entry;
660 
661         if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
662                 return rettype;
663 
664         if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
665                 struct page *page;
666 
667                 page = pfn_to_page(paddr >> PAGE_SHIFT);
668                 return get_page_memtype(page);
669         }
670 
671         spin_lock(&memtype_lock);
672 
673         entry = rbt_memtype_lookup(paddr);
674         if (entry != NULL)
675                 rettype = entry->type;
676         else
677                 rettype = _PAGE_CACHE_MODE_UC_MINUS;
678 
679         spin_unlock(&memtype_lock);
680         return rettype;
681 }
682 
683 /**
684  * io_reserve_memtype - Request a memory type mapping for a region of memory
685  * @start: start (physical address) of the region
686  * @end: end (physical address) of the region
687  * @type: A pointer to memtype, with requested type. On success, requested
688  * or any other compatible type that was available for the region is returned
689  *
690  * On success, returns 0
691  * On failure, returns non-zero
692  */
693 int io_reserve_memtype(resource_size_t start, resource_size_t end,
694                         enum page_cache_mode *type)
695 {
696         resource_size_t size = end - start;
697         enum page_cache_mode req_type = *type;
698         enum page_cache_mode new_type;
699         int ret;
700 
701         WARN_ON_ONCE(iomem_map_sanity_check(start, size));
702 
703         ret = reserve_memtype(start, end, req_type, &new_type);
704         if (ret)
705                 goto out_err;
706 
707         if (!is_new_memtype_allowed(start, size, req_type, new_type))
708                 goto out_free;
709 
710         if (kernel_map_sync_memtype(start, size, new_type) < 0)
711                 goto out_free;
712 
713         *type = new_type;
714         return 0;
715 
716 out_free:
717         free_memtype(start, end);
718         ret = -EBUSY;
719 out_err:
720         return ret;
721 }
722 
723 /**
724  * io_free_memtype - Release a memory type mapping for a region of memory
725  * @start: start (physical address) of the region
726  * @end: end (physical address) of the region
727  */
728 void io_free_memtype(resource_size_t start, resource_size_t end)
729 {
730         free_memtype(start, end);
731 }
732 
733 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
734                                 unsigned long size, pgprot_t vma_prot)
735 {
736         return vma_prot;
737 }
738 
739 #ifdef CONFIG_STRICT_DEVMEM
740 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
741 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
742 {
743         return 1;
744 }
745 #else
746 /* This check is needed to avoid cache aliasing when PAT is enabled */
747 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
748 {
749         u64 from = ((u64)pfn) << PAGE_SHIFT;
750         u64 to = from + size;
751         u64 cursor = from;
752 
753         if (!pat_enabled())
754                 return 1;
755 
756         while (cursor < to) {
757                 if (!devmem_is_allowed(pfn))
758                         return 0;
759                 cursor += PAGE_SIZE;
760                 pfn++;
761         }
762         return 1;
763 }
764 #endif /* CONFIG_STRICT_DEVMEM */
765 
766 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
767                                 unsigned long size, pgprot_t *vma_prot)
768 {
769         enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
770 
771         if (!range_is_allowed(pfn, size))
772                 return 0;
773 
774         if (file->f_flags & O_DSYNC)
775                 pcm = _PAGE_CACHE_MODE_UC_MINUS;
776 
777         *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
778                              cachemode2protval(pcm));
779         return 1;
780 }
781 
782 /*
783  * Change the memory type for the physial address range in kernel identity
784  * mapping space if that range is a part of identity map.
785  */
786 int kernel_map_sync_memtype(u64 base, unsigned long size,
787                             enum page_cache_mode pcm)
788 {
789         unsigned long id_sz;
790 
791         if (base > __pa(high_memory-1))
792                 return 0;
793 
794         /*
795          * some areas in the middle of the kernel identity range
796          * are not mapped, like the PCI space.
797          */
798         if (!page_is_ram(base >> PAGE_SHIFT))
799                 return 0;
800 
801         id_sz = (__pa(high_memory-1) <= base + size) ?
802                                 __pa(high_memory) - base :
803                                 size;
804 
805         if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
806                 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
807                         current->comm, current->pid,
808                         cattr_name(pcm),
809                         base, (unsigned long long)(base + size-1));
810                 return -EINVAL;
811         }
812         return 0;
813 }
814 
815 /*
816  * Internal interface to reserve a range of physical memory with prot.
817  * Reserved non RAM regions only and after successful reserve_memtype,
818  * this func also keeps identity mapping (if any) in sync with this new prot.
819  */
820 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
821                                 int strict_prot)
822 {
823         int is_ram = 0;
824         int ret;
825         enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
826         enum page_cache_mode pcm = want_pcm;
827 
828         is_ram = pat_pagerange_is_ram(paddr, paddr + size);
829 
830         /*
831          * reserve_pfn_range() for RAM pages. We do not refcount to keep
832          * track of number of mappings of RAM pages. We can assert that
833          * the type requested matches the type of first page in the range.
834          */
835         if (is_ram) {
836                 if (!pat_enabled())
837                         return 0;
838 
839                 pcm = lookup_memtype(paddr);
840                 if (want_pcm != pcm) {
841                         pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
842                                 current->comm, current->pid,
843                                 cattr_name(want_pcm),
844                                 (unsigned long long)paddr,
845                                 (unsigned long long)(paddr + size - 1),
846                                 cattr_name(pcm));
847                         *vma_prot = __pgprot((pgprot_val(*vma_prot) &
848                                              (~_PAGE_CACHE_MASK)) |
849                                              cachemode2protval(pcm));
850                 }
851                 return 0;
852         }
853 
854         ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
855         if (ret)
856                 return ret;
857 
858         if (pcm != want_pcm) {
859                 if (strict_prot ||
860                     !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
861                         free_memtype(paddr, paddr + size);
862                         pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
863                                current->comm, current->pid,
864                                cattr_name(want_pcm),
865                                (unsigned long long)paddr,
866                                (unsigned long long)(paddr + size - 1),
867                                cattr_name(pcm));
868                         return -EINVAL;
869                 }
870                 /*
871                  * We allow returning different type than the one requested in
872                  * non strict case.
873                  */
874                 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
875                                       (~_PAGE_CACHE_MASK)) |
876                                      cachemode2protval(pcm));
877         }
878 
879         if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
880                 free_memtype(paddr, paddr + size);
881                 return -EINVAL;
882         }
883         return 0;
884 }
885 
886 /*
887  * Internal interface to free a range of physical memory.
888  * Frees non RAM regions only.
889  */
890 static void free_pfn_range(u64 paddr, unsigned long size)
891 {
892         int is_ram;
893 
894         is_ram = pat_pagerange_is_ram(paddr, paddr + size);
895         if (is_ram == 0)
896                 free_memtype(paddr, paddr + size);
897 }
898 
899 /*
900  * track_pfn_copy is called when vma that is covering the pfnmap gets
901  * copied through copy_page_range().
902  *
903  * If the vma has a linear pfn mapping for the entire range, we get the prot
904  * from pte and reserve the entire vma range with single reserve_pfn_range call.
905  */
906 int track_pfn_copy(struct vm_area_struct *vma)
907 {
908         resource_size_t paddr;
909         unsigned long prot;
910         unsigned long vma_size = vma->vm_end - vma->vm_start;
911         pgprot_t pgprot;
912 
913         if (vma->vm_flags & VM_PAT) {
914                 /*
915                  * reserve the whole chunk covered by vma. We need the
916                  * starting address and protection from pte.
917                  */
918                 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
919                         WARN_ON_ONCE(1);
920                         return -EINVAL;
921                 }
922                 pgprot = __pgprot(prot);
923                 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
924         }
925 
926         return 0;
927 }
928 
929 /*
930  * prot is passed in as a parameter for the new mapping. If the vma has
931  * a linear pfn mapping for the entire range, or no vma is provided,
932  * reserve the entire pfn + size range with single reserve_pfn_range
933  * call.
934  */
935 int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
936                     unsigned long pfn, unsigned long addr, unsigned long size)
937 {
938         resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
939         enum page_cache_mode pcm;
940 
941         /* reserve the whole chunk starting from paddr */
942         if (!vma || (addr == vma->vm_start
943                                 && size == (vma->vm_end - vma->vm_start))) {
944                 int ret;
945 
946                 ret = reserve_pfn_range(paddr, size, prot, 0);
947                 if (ret == 0 && vma)
948                         vma->vm_flags |= VM_PAT;
949                 return ret;
950         }
951 
952         if (!pat_enabled())
953                 return 0;
954 
955         /*
956          * For anything smaller than the vma size we set prot based on the
957          * lookup.
958          */
959         pcm = lookup_memtype(paddr);
960 
961         /* Check memtype for the remaining pages */
962         while (size > PAGE_SIZE) {
963                 size -= PAGE_SIZE;
964                 paddr += PAGE_SIZE;
965                 if (pcm != lookup_memtype(paddr))
966                         return -EINVAL;
967         }
968 
969         *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
970                          cachemode2protval(pcm));
971 
972         return 0;
973 }
974 
975 int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
976                      pfn_t pfn)
977 {
978         enum page_cache_mode pcm;
979 
980         if (!pat_enabled())
981                 return 0;
982 
983         /* Set prot based on lookup */
984         pcm = lookup_memtype(pfn_t_to_phys(pfn));
985         *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
986                          cachemode2protval(pcm));
987 
988         return 0;
989 }
990 
991 /*
992  * untrack_pfn is called while unmapping a pfnmap for a region.
993  * untrack can be called for a specific region indicated by pfn and size or
994  * can be for the entire vma (in which case pfn, size are zero).
995  */
996 void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
997                  unsigned long size)
998 {
999         resource_size_t paddr;
1000         unsigned long prot;
1001 
1002         if (vma && !(vma->vm_flags & VM_PAT))
1003                 return;
1004 
1005         /* free the chunk starting from pfn or the whole chunk */
1006         paddr = (resource_size_t)pfn << PAGE_SHIFT;
1007         if (!paddr && !size) {
1008                 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1009                         WARN_ON_ONCE(1);
1010                         return;
1011                 }
1012 
1013                 size = vma->vm_end - vma->vm_start;
1014         }
1015         free_pfn_range(paddr, size);
1016         if (vma)
1017                 vma->vm_flags &= ~VM_PAT;
1018 }
1019 
1020 /*
1021  * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
1022  * with the old vma after its pfnmap page table has been removed.  The new
1023  * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
1024  */
1025 void untrack_pfn_moved(struct vm_area_struct *vma)
1026 {
1027         vma->vm_flags &= ~VM_PAT;
1028 }
1029 
1030 pgprot_t pgprot_writecombine(pgprot_t prot)
1031 {
1032         return __pgprot(pgprot_val(prot) |
1033                                 cachemode2protval(_PAGE_CACHE_MODE_WC));
1034 }
1035 EXPORT_SYMBOL_GPL(pgprot_writecombine);
1036 
1037 pgprot_t pgprot_writethrough(pgprot_t prot)
1038 {
1039         return __pgprot(pgprot_val(prot) |
1040                                 cachemode2protval(_PAGE_CACHE_MODE_WT));
1041 }
1042 EXPORT_SYMBOL_GPL(pgprot_writethrough);
1043 
1044 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
1045 
1046 static struct memtype *memtype_get_idx(loff_t pos)
1047 {
1048         struct memtype *print_entry;
1049         int ret;
1050 
1051         print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
1052         if (!print_entry)
1053                 return NULL;
1054 
1055         spin_lock(&memtype_lock);
1056         ret = rbt_memtype_copy_nth_element(print_entry, pos);
1057         spin_unlock(&memtype_lock);
1058 
1059         if (!ret) {
1060                 return print_entry;
1061         } else {
1062                 kfree(print_entry);
1063                 return NULL;
1064         }
1065 }
1066 
1067 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1068 {
1069         if (*pos == 0) {
1070                 ++*pos;
1071                 seq_puts(seq, "PAT memtype list:\n");
1072         }
1073 
1074         return memtype_get_idx(*pos);
1075 }
1076 
1077 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1078 {
1079         ++*pos;
1080         return memtype_get_idx(*pos);
1081 }
1082 
1083 static void memtype_seq_stop(struct seq_file *seq, void *v)
1084 {
1085 }
1086 
1087 static int memtype_seq_show(struct seq_file *seq, void *v)
1088 {
1089         struct memtype *print_entry = (struct memtype *)v;
1090 
1091         seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
1092                         print_entry->start, print_entry->end);
1093         kfree(print_entry);
1094 
1095         return 0;
1096 }
1097 
1098 static const struct seq_operations memtype_seq_ops = {
1099         .start = memtype_seq_start,
1100         .next  = memtype_seq_next,
1101         .stop  = memtype_seq_stop,
1102         .show  = memtype_seq_show,
1103 };
1104 
1105 static int memtype_seq_open(struct inode *inode, struct file *file)
1106 {
1107         return seq_open(file, &memtype_seq_ops);
1108 }
1109 
1110 static const struct file_operations memtype_fops = {
1111         .open    = memtype_seq_open,
1112         .read    = seq_read,
1113         .llseek  = seq_lseek,
1114         .release = seq_release,
1115 };
1116 
1117 static int __init pat_memtype_list_init(void)
1118 {
1119         if (pat_enabled()) {
1120                 debugfs_create_file("pat_memtype_list", S_IRUSR,
1121                                     arch_debugfs_dir, NULL, &memtype_fops);
1122         }
1123         return 0;
1124 }
1125 
1126 late_initcall(pat_memtype_list_init);
1127 
1128 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
1129 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp