~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/resctrl/pseudo_lock.c

Version: ~ [ linux-5.1-rc5 ] ~ [ linux-5.0.7 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.34 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.111 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.168 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.178 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.138 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.65 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Resource Director Technology (RDT)
  4  *
  5  * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
  6  *
  7  * Copyright (C) 2018 Intel Corporation
  8  *
  9  * Author: Reinette Chatre <reinette.chatre@intel.com>
 10  */
 11 
 12 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
 13 
 14 #include <linux/cacheinfo.h>
 15 #include <linux/cpu.h>
 16 #include <linux/cpumask.h>
 17 #include <linux/debugfs.h>
 18 #include <linux/kthread.h>
 19 #include <linux/mman.h>
 20 #include <linux/perf_event.h>
 21 #include <linux/pm_qos.h>
 22 #include <linux/slab.h>
 23 #include <linux/uaccess.h>
 24 
 25 #include <asm/cacheflush.h>
 26 #include <asm/intel-family.h>
 27 #include <asm/resctrl_sched.h>
 28 #include <asm/perf_event.h>
 29 
 30 #include "../../events/perf_event.h" /* For X86_CONFIG() */
 31 #include "internal.h"
 32 
 33 #define CREATE_TRACE_POINTS
 34 #include "pseudo_lock_event.h"
 35 
 36 /*
 37  * The bits needed to disable hardware prefetching varies based on the
 38  * platform. During initialization we will discover which bits to use.
 39  */
 40 static u64 prefetch_disable_bits;
 41 
 42 /*
 43  * Major number assigned to and shared by all devices exposing
 44  * pseudo-locked regions.
 45  */
 46 static unsigned int pseudo_lock_major;
 47 static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
 48 static struct class *pseudo_lock_class;
 49 
 50 /**
 51  * get_prefetch_disable_bits - prefetch disable bits of supported platforms
 52  *
 53  * Capture the list of platforms that have been validated to support
 54  * pseudo-locking. This includes testing to ensure pseudo-locked regions
 55  * with low cache miss rates can be created under variety of load conditions
 56  * as well as that these pseudo-locked regions can maintain their low cache
 57  * miss rates under variety of load conditions for significant lengths of time.
 58  *
 59  * After a platform has been validated to support pseudo-locking its
 60  * hardware prefetch disable bits are included here as they are documented
 61  * in the SDM.
 62  *
 63  * When adding a platform here also add support for its cache events to
 64  * measure_cycles_perf_fn()
 65  *
 66  * Return:
 67  * If platform is supported, the bits to disable hardware prefetchers, 0
 68  * if platform is not supported.
 69  */
 70 static u64 get_prefetch_disable_bits(void)
 71 {
 72         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
 73             boot_cpu_data.x86 != 6)
 74                 return 0;
 75 
 76         switch (boot_cpu_data.x86_model) {
 77         case INTEL_FAM6_BROADWELL_X:
 78                 /*
 79                  * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
 80                  * as:
 81                  * 0    L2 Hardware Prefetcher Disable (R/W)
 82                  * 1    L2 Adjacent Cache Line Prefetcher Disable (R/W)
 83                  * 2    DCU Hardware Prefetcher Disable (R/W)
 84                  * 3    DCU IP Prefetcher Disable (R/W)
 85                  * 63:4 Reserved
 86                  */
 87                 return 0xF;
 88         case INTEL_FAM6_ATOM_GOLDMONT:
 89         case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
 90                 /*
 91                  * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
 92                  * as:
 93                  * 0     L2 Hardware Prefetcher Disable (R/W)
 94                  * 1     Reserved
 95                  * 2     DCU Hardware Prefetcher Disable (R/W)
 96                  * 63:3  Reserved
 97                  */
 98                 return 0x5;
 99         }
100 
101         return 0;
102 }
103 
104 /**
105  * pseudo_lock_minor_get - Obtain available minor number
106  * @minor: Pointer to where new minor number will be stored
107  *
108  * A bitmask is used to track available minor numbers. Here the next free
109  * minor number is marked as unavailable and returned.
110  *
111  * Return: 0 on success, <0 on failure.
112  */
113 static int pseudo_lock_minor_get(unsigned int *minor)
114 {
115         unsigned long first_bit;
116 
117         first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
118 
119         if (first_bit == MINORBITS)
120                 return -ENOSPC;
121 
122         __clear_bit(first_bit, &pseudo_lock_minor_avail);
123         *minor = first_bit;
124 
125         return 0;
126 }
127 
128 /**
129  * pseudo_lock_minor_release - Return minor number to available
130  * @minor: The minor number made available
131  */
132 static void pseudo_lock_minor_release(unsigned int minor)
133 {
134         __set_bit(minor, &pseudo_lock_minor_avail);
135 }
136 
137 /**
138  * region_find_by_minor - Locate a pseudo-lock region by inode minor number
139  * @minor: The minor number of the device representing pseudo-locked region
140  *
141  * When the character device is accessed we need to determine which
142  * pseudo-locked region it belongs to. This is done by matching the minor
143  * number of the device to the pseudo-locked region it belongs.
144  *
145  * Minor numbers are assigned at the time a pseudo-locked region is associated
146  * with a cache instance.
147  *
148  * Return: On success return pointer to resource group owning the pseudo-locked
149  *         region, NULL on failure.
150  */
151 static struct rdtgroup *region_find_by_minor(unsigned int minor)
152 {
153         struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
154 
155         list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
156                 if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
157                         rdtgrp_match = rdtgrp;
158                         break;
159                 }
160         }
161         return rdtgrp_match;
162 }
163 
164 /**
165  * pseudo_lock_pm_req - A power management QoS request list entry
166  * @list:       Entry within the @pm_reqs list for a pseudo-locked region
167  * @req:        PM QoS request
168  */
169 struct pseudo_lock_pm_req {
170         struct list_head list;
171         struct dev_pm_qos_request req;
172 };
173 
174 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
175 {
176         struct pseudo_lock_pm_req *pm_req, *next;
177 
178         list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
179                 dev_pm_qos_remove_request(&pm_req->req);
180                 list_del(&pm_req->list);
181                 kfree(pm_req);
182         }
183 }
184 
185 /**
186  * pseudo_lock_cstates_constrain - Restrict cores from entering C6
187  *
188  * To prevent the cache from being affected by power management entering
189  * C6 has to be avoided. This is accomplished by requesting a latency
190  * requirement lower than lowest C6 exit latency of all supported
191  * platforms as found in the cpuidle state tables in the intel_idle driver.
192  * At this time it is possible to do so with a single latency requirement
193  * for all supported platforms.
194  *
195  * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
196  * the ACPI latencies need to be considered while keeping in mind that C2
197  * may be set to map to deeper sleep states. In this case the latency
198  * requirement needs to prevent entering C2 also.
199  */
200 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
201 {
202         struct pseudo_lock_pm_req *pm_req;
203         int cpu;
204         int ret;
205 
206         for_each_cpu(cpu, &plr->d->cpu_mask) {
207                 pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
208                 if (!pm_req) {
209                         rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
210                         ret = -ENOMEM;
211                         goto out_err;
212                 }
213                 ret = dev_pm_qos_add_request(get_cpu_device(cpu),
214                                              &pm_req->req,
215                                              DEV_PM_QOS_RESUME_LATENCY,
216                                              30);
217                 if (ret < 0) {
218                         rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
219                                             cpu);
220                         kfree(pm_req);
221                         ret = -1;
222                         goto out_err;
223                 }
224                 list_add(&pm_req->list, &plr->pm_reqs);
225         }
226 
227         return 0;
228 
229 out_err:
230         pseudo_lock_cstates_relax(plr);
231         return ret;
232 }
233 
234 /**
235  * pseudo_lock_region_clear - Reset pseudo-lock region data
236  * @plr: pseudo-lock region
237  *
238  * All content of the pseudo-locked region is reset - any memory allocated
239  * freed.
240  *
241  * Return: void
242  */
243 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
244 {
245         plr->size = 0;
246         plr->line_size = 0;
247         kfree(plr->kmem);
248         plr->kmem = NULL;
249         plr->r = NULL;
250         if (plr->d)
251                 plr->d->plr = NULL;
252         plr->d = NULL;
253         plr->cbm = 0;
254         plr->debugfs_dir = NULL;
255 }
256 
257 /**
258  * pseudo_lock_region_init - Initialize pseudo-lock region information
259  * @plr: pseudo-lock region
260  *
261  * Called after user provided a schemata to be pseudo-locked. From the
262  * schemata the &struct pseudo_lock_region is on entry already initialized
263  * with the resource, domain, and capacity bitmask. Here the information
264  * required for pseudo-locking is deduced from this data and &struct
265  * pseudo_lock_region initialized further. This information includes:
266  * - size in bytes of the region to be pseudo-locked
267  * - cache line size to know the stride with which data needs to be accessed
268  *   to be pseudo-locked
269  * - a cpu associated with the cache instance on which the pseudo-locking
270  *   flow can be executed
271  *
272  * Return: 0 on success, <0 on failure. Descriptive error will be written
273  * to last_cmd_status buffer.
274  */
275 static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
276 {
277         struct cpu_cacheinfo *ci;
278         int ret;
279         int i;
280 
281         /* Pick the first cpu we find that is associated with the cache. */
282         plr->cpu = cpumask_first(&plr->d->cpu_mask);
283 
284         if (!cpu_online(plr->cpu)) {
285                 rdt_last_cmd_printf("CPU %u associated with cache not online\n",
286                                     plr->cpu);
287                 ret = -ENODEV;
288                 goto out_region;
289         }
290 
291         ci = get_cpu_cacheinfo(plr->cpu);
292 
293         plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
294 
295         for (i = 0; i < ci->num_leaves; i++) {
296                 if (ci->info_list[i].level == plr->r->cache_level) {
297                         plr->line_size = ci->info_list[i].coherency_line_size;
298                         return 0;
299                 }
300         }
301 
302         ret = -1;
303         rdt_last_cmd_puts("Unable to determine cache line size\n");
304 out_region:
305         pseudo_lock_region_clear(plr);
306         return ret;
307 }
308 
309 /**
310  * pseudo_lock_init - Initialize a pseudo-lock region
311  * @rdtgrp: resource group to which new pseudo-locked region will belong
312  *
313  * A pseudo-locked region is associated with a resource group. When this
314  * association is created the pseudo-locked region is initialized. The
315  * details of the pseudo-locked region are not known at this time so only
316  * allocation is done and association established.
317  *
318  * Return: 0 on success, <0 on failure
319  */
320 static int pseudo_lock_init(struct rdtgroup *rdtgrp)
321 {
322         struct pseudo_lock_region *plr;
323 
324         plr = kzalloc(sizeof(*plr), GFP_KERNEL);
325         if (!plr)
326                 return -ENOMEM;
327 
328         init_waitqueue_head(&plr->lock_thread_wq);
329         INIT_LIST_HEAD(&plr->pm_reqs);
330         rdtgrp->plr = plr;
331         return 0;
332 }
333 
334 /**
335  * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
336  * @plr: pseudo-lock region
337  *
338  * Initialize the details required to set up the pseudo-locked region and
339  * allocate the contiguous memory that will be pseudo-locked to the cache.
340  *
341  * Return: 0 on success, <0 on failure.  Descriptive error will be written
342  * to last_cmd_status buffer.
343  */
344 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
345 {
346         int ret;
347 
348         ret = pseudo_lock_region_init(plr);
349         if (ret < 0)
350                 return ret;
351 
352         /*
353          * We do not yet support contiguous regions larger than
354          * KMALLOC_MAX_SIZE.
355          */
356         if (plr->size > KMALLOC_MAX_SIZE) {
357                 rdt_last_cmd_puts("Requested region exceeds maximum size\n");
358                 ret = -E2BIG;
359                 goto out_region;
360         }
361 
362         plr->kmem = kzalloc(plr->size, GFP_KERNEL);
363         if (!plr->kmem) {
364                 rdt_last_cmd_puts("Unable to allocate memory\n");
365                 ret = -ENOMEM;
366                 goto out_region;
367         }
368 
369         ret = 0;
370         goto out;
371 out_region:
372         pseudo_lock_region_clear(plr);
373 out:
374         return ret;
375 }
376 
377 /**
378  * pseudo_lock_free - Free a pseudo-locked region
379  * @rdtgrp: resource group to which pseudo-locked region belonged
380  *
381  * The pseudo-locked region's resources have already been released, or not
382  * yet created at this point. Now it can be freed and disassociated from the
383  * resource group.
384  *
385  * Return: void
386  */
387 static void pseudo_lock_free(struct rdtgroup *rdtgrp)
388 {
389         pseudo_lock_region_clear(rdtgrp->plr);
390         kfree(rdtgrp->plr);
391         rdtgrp->plr = NULL;
392 }
393 
394 /**
395  * pseudo_lock_fn - Load kernel memory into cache
396  * @_rdtgrp: resource group to which pseudo-lock region belongs
397  *
398  * This is the core pseudo-locking flow.
399  *
400  * First we ensure that the kernel memory cannot be found in the cache.
401  * Then, while taking care that there will be as little interference as
402  * possible, the memory to be loaded is accessed while core is running
403  * with class of service set to the bitmask of the pseudo-locked region.
404  * After this is complete no future CAT allocations will be allowed to
405  * overlap with this bitmask.
406  *
407  * Local register variables are utilized to ensure that the memory region
408  * to be locked is the only memory access made during the critical locking
409  * loop.
410  *
411  * Return: 0. Waiter on waitqueue will be woken on completion.
412  */
413 static int pseudo_lock_fn(void *_rdtgrp)
414 {
415         struct rdtgroup *rdtgrp = _rdtgrp;
416         struct pseudo_lock_region *plr = rdtgrp->plr;
417         u32 rmid_p, closid_p;
418         unsigned long i;
419 #ifdef CONFIG_KASAN
420         /*
421          * The registers used for local register variables are also used
422          * when KASAN is active. When KASAN is active we use a regular
423          * variable to ensure we always use a valid pointer, but the cost
424          * is that this variable will enter the cache through evicting the
425          * memory we are trying to lock into the cache. Thus expect lower
426          * pseudo-locking success rate when KASAN is active.
427          */
428         unsigned int line_size;
429         unsigned int size;
430         void *mem_r;
431 #else
432         register unsigned int line_size asm("esi");
433         register unsigned int size asm("edi");
434 #ifdef CONFIG_X86_64
435         register void *mem_r asm("rbx");
436 #else
437         register void *mem_r asm("ebx");
438 #endif /* CONFIG_X86_64 */
439 #endif /* CONFIG_KASAN */
440 
441         /*
442          * Make sure none of the allocated memory is cached. If it is we
443          * will get a cache hit in below loop from outside of pseudo-locked
444          * region.
445          * wbinvd (as opposed to clflush/clflushopt) is required to
446          * increase likelihood that allocated cache portion will be filled
447          * with associated memory.
448          */
449         native_wbinvd();
450 
451         /*
452          * Always called with interrupts enabled. By disabling interrupts
453          * ensure that we will not be preempted during this critical section.
454          */
455         local_irq_disable();
456 
457         /*
458          * Call wrmsr and rdmsr as directly as possible to avoid tracing
459          * clobbering local register variables or affecting cache accesses.
460          *
461          * Disable the hardware prefetcher so that when the end of the memory
462          * being pseudo-locked is reached the hardware will not read beyond
463          * the buffer and evict pseudo-locked memory read earlier from the
464          * cache.
465          */
466         __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
467         closid_p = this_cpu_read(pqr_state.cur_closid);
468         rmid_p = this_cpu_read(pqr_state.cur_rmid);
469         mem_r = plr->kmem;
470         size = plr->size;
471         line_size = plr->line_size;
472         /*
473          * Critical section begin: start by writing the closid associated
474          * with the capacity bitmask of the cache region being
475          * pseudo-locked followed by reading of kernel memory to load it
476          * into the cache.
477          */
478         __wrmsr(IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
479         /*
480          * Cache was flushed earlier. Now access kernel memory to read it
481          * into cache region associated with just activated plr->closid.
482          * Loop over data twice:
483          * - In first loop the cache region is shared with the page walker
484          *   as it populates the paging structure caches (including TLB).
485          * - In the second loop the paging structure caches are used and
486          *   cache region is populated with the memory being referenced.
487          */
488         for (i = 0; i < size; i += PAGE_SIZE) {
489                 /*
490                  * Add a barrier to prevent speculative execution of this
491                  * loop reading beyond the end of the buffer.
492                  */
493                 rmb();
494                 asm volatile("mov (%0,%1,1), %%eax\n\t"
495                         :
496                         : "r" (mem_r), "r" (i)
497                         : "%eax", "memory");
498         }
499         for (i = 0; i < size; i += line_size) {
500                 /*
501                  * Add a barrier to prevent speculative execution of this
502                  * loop reading beyond the end of the buffer.
503                  */
504                 rmb();
505                 asm volatile("mov (%0,%1,1), %%eax\n\t"
506                         :
507                         : "r" (mem_r), "r" (i)
508                         : "%eax", "memory");
509         }
510         /*
511          * Critical section end: restore closid with capacity bitmask that
512          * does not overlap with pseudo-locked region.
513          */
514         __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
515 
516         /* Re-enable the hardware prefetcher(s) */
517         wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
518         local_irq_enable();
519 
520         plr->thread_done = 1;
521         wake_up_interruptible(&plr->lock_thread_wq);
522         return 0;
523 }
524 
525 /**
526  * rdtgroup_monitor_in_progress - Test if monitoring in progress
527  * @r: resource group being queried
528  *
529  * Return: 1 if monitor groups have been created for this resource
530  * group, 0 otherwise.
531  */
532 static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
533 {
534         return !list_empty(&rdtgrp->mon.crdtgrp_list);
535 }
536 
537 /**
538  * rdtgroup_locksetup_user_restrict - Restrict user access to group
539  * @rdtgrp: resource group needing access restricted
540  *
541  * A resource group used for cache pseudo-locking cannot have cpus or tasks
542  * assigned to it. This is communicated to the user by restricting access
543  * to all the files that can be used to make such changes.
544  *
545  * Permissions restored with rdtgroup_locksetup_user_restore()
546  *
547  * Return: 0 on success, <0 on failure. If a failure occurs during the
548  * restriction of access an attempt will be made to restore permissions but
549  * the state of the mode of these files will be uncertain when a failure
550  * occurs.
551  */
552 static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
553 {
554         int ret;
555 
556         ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
557         if (ret)
558                 return ret;
559 
560         ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
561         if (ret)
562                 goto err_tasks;
563 
564         ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
565         if (ret)
566                 goto err_cpus;
567 
568         if (rdt_mon_capable) {
569                 ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
570                 if (ret)
571                         goto err_cpus_list;
572         }
573 
574         ret = 0;
575         goto out;
576 
577 err_cpus_list:
578         rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
579 err_cpus:
580         rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
581 err_tasks:
582         rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
583 out:
584         return ret;
585 }
586 
587 /**
588  * rdtgroup_locksetup_user_restore - Restore user access to group
589  * @rdtgrp: resource group needing access restored
590  *
591  * Restore all file access previously removed using
592  * rdtgroup_locksetup_user_restrict()
593  *
594  * Return: 0 on success, <0 on failure.  If a failure occurs during the
595  * restoration of access an attempt will be made to restrict permissions
596  * again but the state of the mode of these files will be uncertain when
597  * a failure occurs.
598  */
599 static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
600 {
601         int ret;
602 
603         ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
604         if (ret)
605                 return ret;
606 
607         ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
608         if (ret)
609                 goto err_tasks;
610 
611         ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
612         if (ret)
613                 goto err_cpus;
614 
615         if (rdt_mon_capable) {
616                 ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
617                 if (ret)
618                         goto err_cpus_list;
619         }
620 
621         ret = 0;
622         goto out;
623 
624 err_cpus_list:
625         rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
626 err_cpus:
627         rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
628 err_tasks:
629         rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
630 out:
631         return ret;
632 }
633 
634 /**
635  * rdtgroup_locksetup_enter - Resource group enters locksetup mode
636  * @rdtgrp: resource group requested to enter locksetup mode
637  *
638  * A resource group enters locksetup mode to reflect that it would be used
639  * to represent a pseudo-locked region and is in the process of being set
640  * up to do so. A resource group used for a pseudo-locked region would
641  * lose the closid associated with it so we cannot allow it to have any
642  * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
643  * future. Monitoring of a pseudo-locked region is not allowed either.
644  *
645  * The above and more restrictions on a pseudo-locked region are checked
646  * for and enforced before the resource group enters the locksetup mode.
647  *
648  * Returns: 0 if the resource group successfully entered locksetup mode, <0
649  * on failure. On failure the last_cmd_status buffer is updated with text to
650  * communicate details of failure to the user.
651  */
652 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
653 {
654         int ret;
655 
656         /*
657          * The default resource group can neither be removed nor lose the
658          * default closid associated with it.
659          */
660         if (rdtgrp == &rdtgroup_default) {
661                 rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
662                 return -EINVAL;
663         }
664 
665         /*
666          * Cache Pseudo-locking not supported when CDP is enabled.
667          *
668          * Some things to consider if you would like to enable this
669          * support (using L3 CDP as example):
670          * - When CDP is enabled two separate resources are exposed,
671          *   L3DATA and L3CODE, but they are actually on the same cache.
672          *   The implication for pseudo-locking is that if a
673          *   pseudo-locked region is created on a domain of one
674          *   resource (eg. L3CODE), then a pseudo-locked region cannot
675          *   be created on that same domain of the other resource
676          *   (eg. L3DATA). This is because the creation of a
677          *   pseudo-locked region involves a call to wbinvd that will
678          *   affect all cache allocations on particular domain.
679          * - Considering the previous, it may be possible to only
680          *   expose one of the CDP resources to pseudo-locking and
681          *   hide the other. For example, we could consider to only
682          *   expose L3DATA and since the L3 cache is unified it is
683          *   still possible to place instructions there are execute it.
684          * - If only one region is exposed to pseudo-locking we should
685          *   still keep in mind that availability of a portion of cache
686          *   for pseudo-locking should take into account both resources.
687          *   Similarly, if a pseudo-locked region is created in one
688          *   resource, the portion of cache used by it should be made
689          *   unavailable to all future allocations from both resources.
690          */
691         if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
692             rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
693                 rdt_last_cmd_puts("CDP enabled\n");
694                 return -EINVAL;
695         }
696 
697         /*
698          * Not knowing the bits to disable prefetching implies that this
699          * platform does not support Cache Pseudo-Locking.
700          */
701         prefetch_disable_bits = get_prefetch_disable_bits();
702         if (prefetch_disable_bits == 0) {
703                 rdt_last_cmd_puts("Pseudo-locking not supported\n");
704                 return -EINVAL;
705         }
706 
707         if (rdtgroup_monitor_in_progress(rdtgrp)) {
708                 rdt_last_cmd_puts("Monitoring in progress\n");
709                 return -EINVAL;
710         }
711 
712         if (rdtgroup_tasks_assigned(rdtgrp)) {
713                 rdt_last_cmd_puts("Tasks assigned to resource group\n");
714                 return -EINVAL;
715         }
716 
717         if (!cpumask_empty(&rdtgrp->cpu_mask)) {
718                 rdt_last_cmd_puts("CPUs assigned to resource group\n");
719                 return -EINVAL;
720         }
721 
722         if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
723                 rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
724                 return -EIO;
725         }
726 
727         ret = pseudo_lock_init(rdtgrp);
728         if (ret) {
729                 rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
730                 goto out_release;
731         }
732 
733         /*
734          * If this system is capable of monitoring a rmid would have been
735          * allocated when the control group was created. This is not needed
736          * anymore when this group would be used for pseudo-locking. This
737          * is safe to call on platforms not capable of monitoring.
738          */
739         free_rmid(rdtgrp->mon.rmid);
740 
741         ret = 0;
742         goto out;
743 
744 out_release:
745         rdtgroup_locksetup_user_restore(rdtgrp);
746 out:
747         return ret;
748 }
749 
750 /**
751  * rdtgroup_locksetup_exit - resource group exist locksetup mode
752  * @rdtgrp: resource group
753  *
754  * When a resource group exits locksetup mode the earlier restrictions are
755  * lifted.
756  *
757  * Return: 0 on success, <0 on failure
758  */
759 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
760 {
761         int ret;
762 
763         if (rdt_mon_capable) {
764                 ret = alloc_rmid();
765                 if (ret < 0) {
766                         rdt_last_cmd_puts("Out of RMIDs\n");
767                         return ret;
768                 }
769                 rdtgrp->mon.rmid = ret;
770         }
771 
772         ret = rdtgroup_locksetup_user_restore(rdtgrp);
773         if (ret) {
774                 free_rmid(rdtgrp->mon.rmid);
775                 return ret;
776         }
777 
778         pseudo_lock_free(rdtgrp);
779         return 0;
780 }
781 
782 /**
783  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
784  * @d: RDT domain
785  * @cbm: CBM to test
786  *
787  * @d represents a cache instance and @cbm a capacity bitmask that is
788  * considered for it. Determine if @cbm overlaps with any existing
789  * pseudo-locked region on @d.
790  *
791  * @cbm is unsigned long, even if only 32 bits are used, to make the
792  * bitmap functions work correctly.
793  *
794  * Return: true if @cbm overlaps with pseudo-locked region on @d, false
795  * otherwise.
796  */
797 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
798 {
799         unsigned int cbm_len;
800         unsigned long cbm_b;
801 
802         if (d->plr) {
803                 cbm_len = d->plr->r->cache.cbm_len;
804                 cbm_b = d->plr->cbm;
805                 if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
806                         return true;
807         }
808         return false;
809 }
810 
811 /**
812  * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
813  * @d: RDT domain under test
814  *
815  * The setup of a pseudo-locked region affects all cache instances within
816  * the hierarchy of the region. It is thus essential to know if any
817  * pseudo-locked regions exist within a cache hierarchy to prevent any
818  * attempts to create new pseudo-locked regions in the same hierarchy.
819  *
820  * Return: true if a pseudo-locked region exists in the hierarchy of @d or
821  *         if it is not possible to test due to memory allocation issue,
822  *         false otherwise.
823  */
824 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
825 {
826         cpumask_var_t cpu_with_psl;
827         struct rdt_resource *r;
828         struct rdt_domain *d_i;
829         bool ret = false;
830 
831         if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
832                 return true;
833 
834         /*
835          * First determine which cpus have pseudo-locked regions
836          * associated with them.
837          */
838         for_each_alloc_enabled_rdt_resource(r) {
839                 list_for_each_entry(d_i, &r->domains, list) {
840                         if (d_i->plr)
841                                 cpumask_or(cpu_with_psl, cpu_with_psl,
842                                            &d_i->cpu_mask);
843                 }
844         }
845 
846         /*
847          * Next test if new pseudo-locked region would intersect with
848          * existing region.
849          */
850         if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
851                 ret = true;
852 
853         free_cpumask_var(cpu_with_psl);
854         return ret;
855 }
856 
857 /**
858  * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
859  * @_plr: pseudo-lock region to measure
860  *
861  * There is no deterministic way to test if a memory region is cached. One
862  * way is to measure how long it takes to read the memory, the speed of
863  * access is a good way to learn how close to the cpu the data was. Even
864  * more, if the prefetcher is disabled and the memory is read at a stride
865  * of half the cache line, then a cache miss will be easy to spot since the
866  * read of the first half would be significantly slower than the read of
867  * the second half.
868  *
869  * Return: 0. Waiter on waitqueue will be woken on completion.
870  */
871 static int measure_cycles_lat_fn(void *_plr)
872 {
873         struct pseudo_lock_region *plr = _plr;
874         unsigned long i;
875         u64 start, end;
876         void *mem_r;
877 
878         local_irq_disable();
879         /*
880          * Disable hardware prefetchers.
881          */
882         wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
883         mem_r = READ_ONCE(plr->kmem);
884         /*
885          * Dummy execute of the time measurement to load the needed
886          * instructions into the L1 instruction cache.
887          */
888         start = rdtsc_ordered();
889         for (i = 0; i < plr->size; i += 32) {
890                 start = rdtsc_ordered();
891                 asm volatile("mov (%0,%1,1), %%eax\n\t"
892                              :
893                              : "r" (mem_r), "r" (i)
894                              : "%eax", "memory");
895                 end = rdtsc_ordered();
896                 trace_pseudo_lock_mem_latency((u32)(end - start));
897         }
898         wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
899         local_irq_enable();
900         plr->thread_done = 1;
901         wake_up_interruptible(&plr->lock_thread_wq);
902         return 0;
903 }
904 
905 /*
906  * Create a perf_event_attr for the hit and miss perf events that will
907  * be used during the performance measurement. A perf_event maintains
908  * a pointer to its perf_event_attr so a unique attribute structure is
909  * created for each perf_event.
910  *
911  * The actual configuration of the event is set right before use in order
912  * to use the X86_CONFIG macro.
913  */
914 static struct perf_event_attr perf_miss_attr = {
915         .type           = PERF_TYPE_RAW,
916         .size           = sizeof(struct perf_event_attr),
917         .pinned         = 1,
918         .disabled       = 0,
919         .exclude_user   = 1,
920 };
921 
922 static struct perf_event_attr perf_hit_attr = {
923         .type           = PERF_TYPE_RAW,
924         .size           = sizeof(struct perf_event_attr),
925         .pinned         = 1,
926         .disabled       = 0,
927         .exclude_user   = 1,
928 };
929 
930 struct residency_counts {
931         u64 miss_before, hits_before;
932         u64 miss_after,  hits_after;
933 };
934 
935 static int measure_residency_fn(struct perf_event_attr *miss_attr,
936                                 struct perf_event_attr *hit_attr,
937                                 struct pseudo_lock_region *plr,
938                                 struct residency_counts *counts)
939 {
940         u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
941         struct perf_event *miss_event, *hit_event;
942         int hit_pmcnum, miss_pmcnum;
943         unsigned int line_size;
944         unsigned int size;
945         unsigned long i;
946         void *mem_r;
947         u64 tmp;
948 
949         miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
950                                                       NULL, NULL, NULL);
951         if (IS_ERR(miss_event))
952                 goto out;
953 
954         hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
955                                                      NULL, NULL, NULL);
956         if (IS_ERR(hit_event))
957                 goto out_miss;
958 
959         local_irq_disable();
960         /*
961          * Check any possible error state of events used by performing
962          * one local read.
963          */
964         if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) {
965                 local_irq_enable();
966                 goto out_hit;
967         }
968         if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) {
969                 local_irq_enable();
970                 goto out_hit;
971         }
972 
973         /*
974          * Disable hardware prefetchers.
975          */
976         wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
977 
978         /* Initialize rest of local variables */
979         /*
980          * Performance event has been validated right before this with
981          * interrupts disabled - it is thus safe to read the counter index.
982          */
983         miss_pmcnum = x86_perf_rdpmc_index(miss_event);
984         hit_pmcnum = x86_perf_rdpmc_index(hit_event);
985         line_size = READ_ONCE(plr->line_size);
986         mem_r = READ_ONCE(plr->kmem);
987         size = READ_ONCE(plr->size);
988 
989         /*
990          * Read counter variables twice - first to load the instructions
991          * used in L1 cache, second to capture accurate value that does not
992          * include cache misses incurred because of instruction loads.
993          */
994         rdpmcl(hit_pmcnum, hits_before);
995         rdpmcl(miss_pmcnum, miss_before);
996         /*
997          * From SDM: Performing back-to-back fast reads are not guaranteed
998          * to be monotonic.
999          * Use LFENCE to ensure all previous instructions are retired
1000          * before proceeding.
1001          */
1002         rmb();
1003         rdpmcl(hit_pmcnum, hits_before);
1004         rdpmcl(miss_pmcnum, miss_before);
1005         /*
1006          * Use LFENCE to ensure all previous instructions are retired
1007          * before proceeding.
1008          */
1009         rmb();
1010         for (i = 0; i < size; i += line_size) {
1011                 /*
1012                  * Add a barrier to prevent speculative execution of this
1013                  * loop reading beyond the end of the buffer.
1014                  */
1015                 rmb();
1016                 asm volatile("mov (%0,%1,1), %%eax\n\t"
1017                              :
1018                              : "r" (mem_r), "r" (i)
1019                              : "%eax", "memory");
1020         }
1021         /*
1022          * Use LFENCE to ensure all previous instructions are retired
1023          * before proceeding.
1024          */
1025         rmb();
1026         rdpmcl(hit_pmcnum, hits_after);
1027         rdpmcl(miss_pmcnum, miss_after);
1028         /*
1029          * Use LFENCE to ensure all previous instructions are retired
1030          * before proceeding.
1031          */
1032         rmb();
1033         /* Re-enable hardware prefetchers */
1034         wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
1035         local_irq_enable();
1036 out_hit:
1037         perf_event_release_kernel(hit_event);
1038 out_miss:
1039         perf_event_release_kernel(miss_event);
1040 out:
1041         /*
1042          * All counts will be zero on failure.
1043          */
1044         counts->miss_before = miss_before;
1045         counts->hits_before = hits_before;
1046         counts->miss_after  = miss_after;
1047         counts->hits_after  = hits_after;
1048         return 0;
1049 }
1050 
1051 static int measure_l2_residency(void *_plr)
1052 {
1053         struct pseudo_lock_region *plr = _plr;
1054         struct residency_counts counts = {0};
1055 
1056         /*
1057          * Non-architectural event for the Goldmont Microarchitecture
1058          * from Intel x86 Architecture Software Developer Manual (SDM):
1059          * MEM_LOAD_UOPS_RETIRED D1H (event number)
1060          * Umask values:
1061          *     L2_HIT   02H
1062          *     L2_MISS  10H
1063          */
1064         switch (boot_cpu_data.x86_model) {
1065         case INTEL_FAM6_ATOM_GOLDMONT:
1066         case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
1067                 perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
1068                                                    .umask = 0x10);
1069                 perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
1070                                                   .umask = 0x2);
1071                 break;
1072         default:
1073                 goto out;
1074         }
1075 
1076         measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1077         /*
1078          * If a failure prevented the measurements from succeeding
1079          * tracepoints will still be written and all counts will be zero.
1080          */
1081         trace_pseudo_lock_l2(counts.hits_after - counts.hits_before,
1082                              counts.miss_after - counts.miss_before);
1083 out:
1084         plr->thread_done = 1;
1085         wake_up_interruptible(&plr->lock_thread_wq);
1086         return 0;
1087 }
1088 
1089 static int measure_l3_residency(void *_plr)
1090 {
1091         struct pseudo_lock_region *plr = _plr;
1092         struct residency_counts counts = {0};
1093 
1094         /*
1095          * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
1096          * has two "no fix" errata associated with it: BDM35 and BDM100. On
1097          * this platform the following events are used instead:
1098          * LONGEST_LAT_CACHE 2EH (Documented in SDM)
1099          *       REFERENCE 4FH
1100          *       MISS      41H
1101          */
1102 
1103         switch (boot_cpu_data.x86_model) {
1104         case INTEL_FAM6_BROADWELL_X:
1105                 /* On BDW the hit event counts references, not hits */
1106                 perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
1107                                                   .umask = 0x4f);
1108                 perf_miss_attr.config = X86_CONFIG(.event = 0x2e,
1109                                                    .umask = 0x41);
1110                 break;
1111         default:
1112                 goto out;
1113         }
1114 
1115         measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1116         /*
1117          * If a failure prevented the measurements from succeeding
1118          * tracepoints will still be written and all counts will be zero.
1119          */
1120 
1121         counts.miss_after -= counts.miss_before;
1122         if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) {
1123                 /*
1124                  * On BDW references and misses are counted, need to adjust.
1125                  * Sometimes the "hits" counter is a bit more than the
1126                  * references, for example, x references but x + 1 hits.
1127                  * To not report invalid hit values in this case we treat
1128                  * that as misses equal to references.
1129                  */
1130                 /* First compute the number of cache references measured */
1131                 counts.hits_after -= counts.hits_before;
1132                 /* Next convert references to cache hits */
1133                 counts.hits_after -= min(counts.miss_after, counts.hits_after);
1134         } else {
1135                 counts.hits_after -= counts.hits_before;
1136         }
1137 
1138         trace_pseudo_lock_l3(counts.hits_after, counts.miss_after);
1139 out:
1140         plr->thread_done = 1;
1141         wake_up_interruptible(&plr->lock_thread_wq);
1142         return 0;
1143 }
1144 
1145 /**
1146  * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
1147  *
1148  * The measurement of latency to access a pseudo-locked region should be
1149  * done from a cpu that is associated with that pseudo-locked region.
1150  * Determine which cpu is associated with this region and start a thread on
1151  * that cpu to perform the measurement, wait for that thread to complete.
1152  *
1153  * Return: 0 on success, <0 on failure
1154  */
1155 static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
1156 {
1157         struct pseudo_lock_region *plr = rdtgrp->plr;
1158         struct task_struct *thread;
1159         unsigned int cpu;
1160         int ret = -1;
1161 
1162         cpus_read_lock();
1163         mutex_lock(&rdtgroup_mutex);
1164 
1165         if (rdtgrp->flags & RDT_DELETED) {
1166                 ret = -ENODEV;
1167                 goto out;
1168         }
1169 
1170         if (!plr->d) {
1171                 ret = -ENODEV;
1172                 goto out;
1173         }
1174 
1175         plr->thread_done = 0;
1176         cpu = cpumask_first(&plr->d->cpu_mask);
1177         if (!cpu_online(cpu)) {
1178                 ret = -ENODEV;
1179                 goto out;
1180         }
1181 
1182         plr->cpu = cpu;
1183 
1184         if (sel == 1)
1185                 thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
1186                                                 cpu_to_node(cpu),
1187                                                 "pseudo_lock_measure/%u",
1188                                                 cpu);
1189         else if (sel == 2)
1190                 thread = kthread_create_on_node(measure_l2_residency, plr,
1191                                                 cpu_to_node(cpu),
1192                                                 "pseudo_lock_measure/%u",
1193                                                 cpu);
1194         else if (sel == 3)
1195                 thread = kthread_create_on_node(measure_l3_residency, plr,
1196                                                 cpu_to_node(cpu),
1197                                                 "pseudo_lock_measure/%u",
1198                                                 cpu);
1199         else
1200                 goto out;
1201 
1202         if (IS_ERR(thread)) {
1203                 ret = PTR_ERR(thread);
1204                 goto out;
1205         }
1206         kthread_bind(thread, cpu);
1207         wake_up_process(thread);
1208 
1209         ret = wait_event_interruptible(plr->lock_thread_wq,
1210                                        plr->thread_done == 1);
1211         if (ret < 0)
1212                 goto out;
1213 
1214         ret = 0;
1215 
1216 out:
1217         mutex_unlock(&rdtgroup_mutex);
1218         cpus_read_unlock();
1219         return ret;
1220 }
1221 
1222 static ssize_t pseudo_lock_measure_trigger(struct file *file,
1223                                            const char __user *user_buf,
1224                                            size_t count, loff_t *ppos)
1225 {
1226         struct rdtgroup *rdtgrp = file->private_data;
1227         size_t buf_size;
1228         char buf[32];
1229         int ret;
1230         int sel;
1231 
1232         buf_size = min(count, (sizeof(buf) - 1));
1233         if (copy_from_user(buf, user_buf, buf_size))
1234                 return -EFAULT;
1235 
1236         buf[buf_size] = '\0';
1237         ret = kstrtoint(buf, 10, &sel);
1238         if (ret == 0) {
1239                 if (sel != 1 && sel != 2 && sel != 3)
1240                         return -EINVAL;
1241                 ret = debugfs_file_get(file->f_path.dentry);
1242                 if (ret)
1243                         return ret;
1244                 ret = pseudo_lock_measure_cycles(rdtgrp, sel);
1245                 if (ret == 0)
1246                         ret = count;
1247                 debugfs_file_put(file->f_path.dentry);
1248         }
1249 
1250         return ret;
1251 }
1252 
1253 static const struct file_operations pseudo_measure_fops = {
1254         .write = pseudo_lock_measure_trigger,
1255         .open = simple_open,
1256         .llseek = default_llseek,
1257 };
1258 
1259 /**
1260  * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
1261  * @rdtgrp: resource group to which pseudo-lock region belongs
1262  *
1263  * Called when a resource group in the pseudo-locksetup mode receives a
1264  * valid schemata that should be pseudo-locked. Since the resource group is
1265  * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
1266  * allocated and initialized with the essential information. If a failure
1267  * occurs the resource group remains in the pseudo-locksetup mode with the
1268  * &struct pseudo_lock_region associated with it, but cleared from all
1269  * information and ready for the user to re-attempt pseudo-locking by
1270  * writing the schemata again.
1271  *
1272  * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
1273  * on failure. Descriptive error will be written to last_cmd_status buffer.
1274  */
1275 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
1276 {
1277         struct pseudo_lock_region *plr = rdtgrp->plr;
1278         struct task_struct *thread;
1279         unsigned int new_minor;
1280         struct device *dev;
1281         int ret;
1282 
1283         ret = pseudo_lock_region_alloc(plr);
1284         if (ret < 0)
1285                 return ret;
1286 
1287         ret = pseudo_lock_cstates_constrain(plr);
1288         if (ret < 0) {
1289                 ret = -EINVAL;
1290                 goto out_region;
1291         }
1292 
1293         plr->thread_done = 0;
1294 
1295         thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
1296                                         cpu_to_node(plr->cpu),
1297                                         "pseudo_lock/%u", plr->cpu);
1298         if (IS_ERR(thread)) {
1299                 ret = PTR_ERR(thread);
1300                 rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
1301                 goto out_cstates;
1302         }
1303 
1304         kthread_bind(thread, plr->cpu);
1305         wake_up_process(thread);
1306 
1307         ret = wait_event_interruptible(plr->lock_thread_wq,
1308                                        plr->thread_done == 1);
1309         if (ret < 0) {
1310                 /*
1311                  * If the thread does not get on the CPU for whatever
1312                  * reason and the process which sets up the region is
1313                  * interrupted then this will leave the thread in runnable
1314                  * state and once it gets on the CPU it will derefence
1315                  * the cleared, but not freed, plr struct resulting in an
1316                  * empty pseudo-locking loop.
1317                  */
1318                 rdt_last_cmd_puts("Locking thread interrupted\n");
1319                 goto out_cstates;
1320         }
1321 
1322         ret = pseudo_lock_minor_get(&new_minor);
1323         if (ret < 0) {
1324                 rdt_last_cmd_puts("Unable to obtain a new minor number\n");
1325                 goto out_cstates;
1326         }
1327 
1328         /*
1329          * Unlock access but do not release the reference. The
1330          * pseudo-locked region will still be here on return.
1331          *
1332          * The mutex has to be released temporarily to avoid a potential
1333          * deadlock with the mm->mmap_sem semaphore which is obtained in
1334          * the device_create() and debugfs_create_dir() callpath below
1335          * as well as before the mmap() callback is called.
1336          */
1337         mutex_unlock(&rdtgroup_mutex);
1338 
1339         if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
1340                 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
1341                                                       debugfs_resctrl);
1342                 if (!IS_ERR_OR_NULL(plr->debugfs_dir))
1343                         debugfs_create_file("pseudo_lock_measure", 0200,
1344                                             plr->debugfs_dir, rdtgrp,
1345                                             &pseudo_measure_fops);
1346         }
1347 
1348         dev = device_create(pseudo_lock_class, NULL,
1349                             MKDEV(pseudo_lock_major, new_minor),
1350                             rdtgrp, "%s", rdtgrp->kn->name);
1351 
1352         mutex_lock(&rdtgroup_mutex);
1353 
1354         if (IS_ERR(dev)) {
1355                 ret = PTR_ERR(dev);
1356                 rdt_last_cmd_printf("Failed to create character device: %d\n",
1357                                     ret);
1358                 goto out_debugfs;
1359         }
1360 
1361         /* We released the mutex - check if group was removed while we did so */
1362         if (rdtgrp->flags & RDT_DELETED) {
1363                 ret = -ENODEV;
1364                 goto out_device;
1365         }
1366 
1367         plr->minor = new_minor;
1368 
1369         rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
1370         closid_free(rdtgrp->closid);
1371         rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
1372         rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
1373 
1374         ret = 0;
1375         goto out;
1376 
1377 out_device:
1378         device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
1379 out_debugfs:
1380         debugfs_remove_recursive(plr->debugfs_dir);
1381         pseudo_lock_minor_release(new_minor);
1382 out_cstates:
1383         pseudo_lock_cstates_relax(plr);
1384 out_region:
1385         pseudo_lock_region_clear(plr);
1386 out:
1387         return ret;
1388 }
1389 
1390 /**
1391  * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
1392  * @rdtgrp: resource group to which the pseudo-locked region belongs
1393  *
1394  * The removal of a pseudo-locked region can be initiated when the resource
1395  * group is removed from user space via a "rmdir" from userspace or the
1396  * unmount of the resctrl filesystem. On removal the resource group does
1397  * not go back to pseudo-locksetup mode before it is removed, instead it is
1398  * removed directly. There is thus assymmetry with the creation where the
1399  * &struct pseudo_lock_region is removed here while it was not created in
1400  * rdtgroup_pseudo_lock_create().
1401  *
1402  * Return: void
1403  */
1404 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
1405 {
1406         struct pseudo_lock_region *plr = rdtgrp->plr;
1407 
1408         if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1409                 /*
1410                  * Default group cannot be a pseudo-locked region so we can
1411                  * free closid here.
1412                  */
1413                 closid_free(rdtgrp->closid);
1414                 goto free;
1415         }
1416 
1417         pseudo_lock_cstates_relax(plr);
1418         debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
1419         device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
1420         pseudo_lock_minor_release(plr->minor);
1421 
1422 free:
1423         pseudo_lock_free(rdtgrp);
1424 }
1425 
1426 static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
1427 {
1428         struct rdtgroup *rdtgrp;
1429 
1430         mutex_lock(&rdtgroup_mutex);
1431 
1432         rdtgrp = region_find_by_minor(iminor(inode));
1433         if (!rdtgrp) {
1434                 mutex_unlock(&rdtgroup_mutex);
1435                 return -ENODEV;
1436         }
1437 
1438         filp->private_data = rdtgrp;
1439         atomic_inc(&rdtgrp->waitcount);
1440         /* Perform a non-seekable open - llseek is not supported */
1441         filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1442 
1443         mutex_unlock(&rdtgroup_mutex);
1444 
1445         return 0;
1446 }
1447 
1448 static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
1449 {
1450         struct rdtgroup *rdtgrp;
1451 
1452         mutex_lock(&rdtgroup_mutex);
1453         rdtgrp = filp->private_data;
1454         WARN_ON(!rdtgrp);
1455         if (!rdtgrp) {
1456                 mutex_unlock(&rdtgroup_mutex);
1457                 return -ENODEV;
1458         }
1459         filp->private_data = NULL;
1460         atomic_dec(&rdtgrp->waitcount);
1461         mutex_unlock(&rdtgroup_mutex);
1462         return 0;
1463 }
1464 
1465 static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
1466 {
1467         /* Not supported */
1468         return -EINVAL;
1469 }
1470 
1471 static const struct vm_operations_struct pseudo_mmap_ops = {
1472         .mremap = pseudo_lock_dev_mremap,
1473 };
1474 
1475 static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
1476 {
1477         unsigned long vsize = vma->vm_end - vma->vm_start;
1478         unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1479         struct pseudo_lock_region *plr;
1480         struct rdtgroup *rdtgrp;
1481         unsigned long physical;
1482         unsigned long psize;
1483 
1484         mutex_lock(&rdtgroup_mutex);
1485 
1486         rdtgrp = filp->private_data;
1487         WARN_ON(!rdtgrp);
1488         if (!rdtgrp) {
1489                 mutex_unlock(&rdtgroup_mutex);
1490                 return -ENODEV;
1491         }
1492 
1493         plr = rdtgrp->plr;
1494 
1495         if (!plr->d) {
1496                 mutex_unlock(&rdtgroup_mutex);
1497                 return -ENODEV;
1498         }
1499 
1500         /*
1501          * Task is required to run with affinity to the cpus associated
1502          * with the pseudo-locked region. If this is not the case the task
1503          * may be scheduled elsewhere and invalidate entries in the
1504          * pseudo-locked region.
1505          */
1506         if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) {
1507                 mutex_unlock(&rdtgroup_mutex);
1508                 return -EINVAL;
1509         }
1510 
1511         physical = __pa(plr->kmem) >> PAGE_SHIFT;
1512         psize = plr->size - off;
1513 
1514         if (off > plr->size) {
1515                 mutex_unlock(&rdtgroup_mutex);
1516                 return -ENOSPC;
1517         }
1518 
1519         /*
1520          * Ensure changes are carried directly to the memory being mapped,
1521          * do not allow copy-on-write mapping.
1522          */
1523         if (!(vma->vm_flags & VM_SHARED)) {
1524                 mutex_unlock(&rdtgroup_mutex);
1525                 return -EINVAL;
1526         }
1527 
1528         if (vsize > psize) {
1529                 mutex_unlock(&rdtgroup_mutex);
1530                 return -ENOSPC;
1531         }
1532 
1533         memset(plr->kmem + off, 0, vsize);
1534 
1535         if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
1536                             vsize, vma->vm_page_prot)) {
1537                 mutex_unlock(&rdtgroup_mutex);
1538                 return -EAGAIN;
1539         }
1540         vma->vm_ops = &pseudo_mmap_ops;
1541         mutex_unlock(&rdtgroup_mutex);
1542         return 0;
1543 }
1544 
1545 static const struct file_operations pseudo_lock_dev_fops = {
1546         .owner =        THIS_MODULE,
1547         .llseek =       no_llseek,
1548         .read =         NULL,
1549         .write =        NULL,
1550         .open =         pseudo_lock_dev_open,
1551         .release =      pseudo_lock_dev_release,
1552         .mmap =         pseudo_lock_dev_mmap,
1553 };
1554 
1555 static char *pseudo_lock_devnode(struct device *dev, umode_t *mode)
1556 {
1557         struct rdtgroup *rdtgrp;
1558 
1559         rdtgrp = dev_get_drvdata(dev);
1560         if (mode)
1561                 *mode = 0600;
1562         return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
1563 }
1564 
1565 int rdt_pseudo_lock_init(void)
1566 {
1567         int ret;
1568 
1569         ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
1570         if (ret < 0)
1571                 return ret;
1572 
1573         pseudo_lock_major = ret;
1574 
1575         pseudo_lock_class = class_create(THIS_MODULE, "pseudo_lock");
1576         if (IS_ERR(pseudo_lock_class)) {
1577                 ret = PTR_ERR(pseudo_lock_class);
1578                 unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1579                 return ret;
1580         }
1581 
1582         pseudo_lock_class->devnode = pseudo_lock_devnode;
1583         return 0;
1584 }
1585 
1586 void rdt_pseudo_lock_release(void)
1587 {
1588         class_destroy(pseudo_lock_class);
1589         pseudo_lock_class = NULL;
1590         unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1591         pseudo_lock_major = 0;
1592 }
1593 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp