~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/oprofile/cell/spu_task_sync.c

Version: ~ [ linux-5.10-rc1 ] ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Cell Broadband Engine OProfile Support
  3  *
  4  * (C) Copyright IBM Corporation 2006
  5  *
  6  * Author: Maynard Johnson <maynardj@us.ibm.com>
  7  *
  8  * This program is free software; you can redistribute it and/or
  9  * modify it under the terms of the GNU General Public License
 10  * as published by the Free Software Foundation; either version
 11  * 2 of the License, or (at your option) any later version.
 12  */
 13 
 14 /* The purpose of this file is to handle SPU event task switching
 15  * and to record SPU context information into the OProfile
 16  * event buffer.
 17  *
 18  * Additionally, the spu_sync_buffer function is provided as a helper
 19  * for recoding actual SPU program counter samples to the event buffer.
 20  */
 21 #include <linux/dcookies.h>
 22 #include <linux/kref.h>
 23 #include <linux/mm.h>
 24 #include <linux/fs.h>
 25 #include <linux/module.h>
 26 #include <linux/notifier.h>
 27 #include <linux/numa.h>
 28 #include <linux/oprofile.h>
 29 #include <linux/slab.h>
 30 #include <linux/spinlock.h>
 31 #include "pr_util.h"
 32 
 33 #define RELEASE_ALL 9999
 34 
 35 static DEFINE_SPINLOCK(buffer_lock);
 36 static DEFINE_SPINLOCK(cache_lock);
 37 static int num_spu_nodes;
 38 int spu_prof_num_nodes;
 39 
 40 struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
 41 struct delayed_work spu_work;
 42 static unsigned max_spu_buff;
 43 
 44 static void spu_buff_add(unsigned long int value, int spu)
 45 {
 46         /* spu buff is a circular buffer.  Add entries to the
 47          * head.  Head is the index to store the next value.
 48          * The buffer is full when there is one available entry
 49          * in the queue, i.e. head and tail can't be equal.
 50          * That way we can tell the difference between the
 51          * buffer being full versus empty.
 52          *
 53          *  ASSUPTION: the buffer_lock is held when this function
 54          *             is called to lock the buffer, head and tail.
 55          */
 56         int full = 1;
 57 
 58         if (spu_buff[spu].head >= spu_buff[spu].tail) {
 59                 if ((spu_buff[spu].head - spu_buff[spu].tail)
 60                     <  (max_spu_buff - 1))
 61                         full = 0;
 62 
 63         } else if (spu_buff[spu].tail > spu_buff[spu].head) {
 64                 if ((spu_buff[spu].tail - spu_buff[spu].head)
 65                     > 1)
 66                         full = 0;
 67         }
 68 
 69         if (!full) {
 70                 spu_buff[spu].buff[spu_buff[spu].head] = value;
 71                 spu_buff[spu].head++;
 72 
 73                 if (spu_buff[spu].head >= max_spu_buff)
 74                         spu_buff[spu].head = 0;
 75         } else {
 76                 /* From the user's perspective make the SPU buffer
 77                  * size management/overflow look like we are using
 78                  * per cpu buffers.  The user uses the same
 79                  * per cpu parameter to adjust the SPU buffer size.
 80                  * Increment the sample_lost_overflow to inform
 81                  * the user the buffer size needs to be increased.
 82                  */
 83                 oprofile_cpu_buffer_inc_smpl_lost();
 84         }
 85 }
 86 
 87 /* This function copies the per SPU buffers to the
 88  * OProfile kernel buffer.
 89  */
 90 void sync_spu_buff(void)
 91 {
 92         int spu;
 93         unsigned long flags;
 94         int curr_head;
 95 
 96         for (spu = 0; spu < num_spu_nodes; spu++) {
 97                 /* In case there was an issue and the buffer didn't
 98                  * get created skip it.
 99                  */
100                 if (spu_buff[spu].buff == NULL)
101                         continue;
102 
103                 /* Hold the lock to make sure the head/tail
104                  * doesn't change while spu_buff_add() is
105                  * deciding if the buffer is full or not.
106                  * Being a little paranoid.
107                  */
108                 spin_lock_irqsave(&buffer_lock, flags);
109                 curr_head = spu_buff[spu].head;
110                 spin_unlock_irqrestore(&buffer_lock, flags);
111 
112                 /* Transfer the current contents to the kernel buffer.
113                  * data can still be added to the head of the buffer.
114                  */
115                 oprofile_put_buff(spu_buff[spu].buff,
116                                   spu_buff[spu].tail,
117                                   curr_head, max_spu_buff);
118 
119                 spin_lock_irqsave(&buffer_lock, flags);
120                 spu_buff[spu].tail = curr_head;
121                 spin_unlock_irqrestore(&buffer_lock, flags);
122         }
123 
124 }
125 
126 static void wq_sync_spu_buff(struct work_struct *work)
127 {
128         /* move data from spu buffers to kernel buffer */
129         sync_spu_buff();
130 
131         /* only reschedule if profiling is not done */
132         if (spu_prof_running)
133                 schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
134 }
135 
136 /* Container for caching information about an active SPU task. */
137 struct cached_info {
138         struct vma_to_fileoffset_map *map;
139         struct spu *the_spu;    /* needed to access pointer to local_store */
140         struct kref cache_ref;
141 };
142 
143 static struct cached_info *spu_info[MAX_NUMNODES * 8];
144 
145 static void destroy_cached_info(struct kref *kref)
146 {
147         struct cached_info *info;
148 
149         info = container_of(kref, struct cached_info, cache_ref);
150         vma_map_free(info->map);
151         kfree(info);
152         module_put(THIS_MODULE);
153 }
154 
155 /* Return the cached_info for the passed SPU number.
156  * ATTENTION:  Callers are responsible for obtaining the
157  *             cache_lock if needed prior to invoking this function.
158  */
159 static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
160 {
161         struct kref *ref;
162         struct cached_info *ret_info;
163 
164         if (spu_num >= num_spu_nodes) {
165                 printk(KERN_ERR "SPU_PROF: "
166                        "%s, line %d: Invalid index %d into spu info cache\n",
167                        __func__, __LINE__, spu_num);
168                 ret_info = NULL;
169                 goto out;
170         }
171         if (!spu_info[spu_num] && the_spu) {
172                 ref = spu_get_profile_private_kref(the_spu->ctx);
173                 if (ref) {
174                         spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
175                         kref_get(&spu_info[spu_num]->cache_ref);
176                 }
177         }
178 
179         ret_info = spu_info[spu_num];
180  out:
181         return ret_info;
182 }
183 
184 
185 /* Looks for cached info for the passed spu.  If not found, the
186  * cached info is created for the passed spu.
187  * Returns 0 for success; otherwise, -1 for error.
188  */
189 static int
190 prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
191 {
192         unsigned long flags;
193         struct vma_to_fileoffset_map *new_map;
194         int retval = 0;
195         struct cached_info *info;
196 
197         /* We won't bother getting cache_lock here since
198          * don't do anything with the cached_info that's returned.
199          */
200         info = get_cached_info(spu, spu->number);
201 
202         if (info) {
203                 pr_debug("Found cached SPU info.\n");
204                 goto out;
205         }
206 
207         /* Create cached_info and set spu_info[spu->number] to point to it.
208          * spu->number is a system-wide value, not a per-node value.
209          */
210         info = kzalloc(sizeof(struct cached_info), GFP_KERNEL);
211         if (!info) {
212                 printk(KERN_ERR "SPU_PROF: "
213                        "%s, line %d: create vma_map failed\n",
214                        __func__, __LINE__);
215                 retval = -ENOMEM;
216                 goto err_alloc;
217         }
218         new_map = create_vma_map(spu, objectId);
219         if (!new_map) {
220                 printk(KERN_ERR "SPU_PROF: "
221                        "%s, line %d: create vma_map failed\n",
222                        __func__, __LINE__);
223                 retval = -ENOMEM;
224                 goto err_alloc;
225         }
226 
227         pr_debug("Created vma_map\n");
228         info->map = new_map;
229         info->the_spu = spu;
230         kref_init(&info->cache_ref);
231         spin_lock_irqsave(&cache_lock, flags);
232         spu_info[spu->number] = info;
233         /* Increment count before passing off ref to SPUFS. */
234         kref_get(&info->cache_ref);
235 
236         /* We increment the module refcount here since SPUFS is
237          * responsible for the final destruction of the cached_info,
238          * and it must be able to access the destroy_cached_info()
239          * function defined in the OProfile module.  We decrement
240          * the module refcount in destroy_cached_info.
241          */
242         try_module_get(THIS_MODULE);
243         spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
244                                 destroy_cached_info);
245         spin_unlock_irqrestore(&cache_lock, flags);
246         goto out;
247 
248 err_alloc:
249         kfree(info);
250 out:
251         return retval;
252 }
253 
254 /*
255  * NOTE:  The caller is responsible for locking the
256  *        cache_lock prior to calling this function.
257  */
258 static int release_cached_info(int spu_index)
259 {
260         int index, end;
261 
262         if (spu_index == RELEASE_ALL) {
263                 end = num_spu_nodes;
264                 index = 0;
265         } else {
266                 if (spu_index >= num_spu_nodes) {
267                         printk(KERN_ERR "SPU_PROF: "
268                                 "%s, line %d: "
269                                 "Invalid index %d into spu info cache\n",
270                                 __func__, __LINE__, spu_index);
271                         goto out;
272                 }
273                 end = spu_index + 1;
274                 index = spu_index;
275         }
276         for (; index < end; index++) {
277                 if (spu_info[index]) {
278                         kref_put(&spu_info[index]->cache_ref,
279                                  destroy_cached_info);
280                         spu_info[index] = NULL;
281                 }
282         }
283 
284 out:
285         return 0;
286 }
287 
288 /* The source code for fast_get_dcookie was "borrowed"
289  * from drivers/oprofile/buffer_sync.c.
290  */
291 
292 /* Optimisation. We can manage without taking the dcookie sem
293  * because we cannot reach this code without at least one
294  * dcookie user still being registered (namely, the reader
295  * of the event buffer).
296  */
297 static inline unsigned long fast_get_dcookie(struct path *path)
298 {
299         unsigned long cookie;
300 
301         if (path->dentry->d_flags & DCACHE_COOKIE)
302                 return (unsigned long)path->dentry;
303         get_dcookie(path, &cookie);
304         return cookie;
305 }
306 
307 /* Look up the dcookie for the task's mm->exe_file,
308  * which corresponds loosely to "application name". Also, determine
309  * the offset for the SPU ELF object.  If computed offset is
310  * non-zero, it implies an embedded SPU object; otherwise, it's a
311  * separate SPU binary, in which case we retrieve it's dcookie.
312  * For the embedded case, we must determine if SPU ELF is embedded
313  * in the executable application or another file (i.e., shared lib).
314  * If embedded in a shared lib, we must get the dcookie and return
315  * that to the caller.
316  */
317 static unsigned long
318 get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
319                             unsigned long *spu_bin_dcookie,
320                             unsigned long spu_ref)
321 {
322         unsigned long app_cookie = 0;
323         unsigned int my_offset = 0;
324         struct vm_area_struct *vma;
325         struct mm_struct *mm = spu->mm;
326 
327         if (!mm)
328                 goto out;
329 
330         down_read(&mm->mmap_sem);
331 
332         if (mm->exe_file) {
333                 app_cookie = fast_get_dcookie(&mm->exe_file->f_path);
334                 pr_debug("got dcookie for %s\n",
335                          mm->exe_file->f_dentry->d_name.name);
336         }
337 
338         for (vma = mm->mmap; vma; vma = vma->vm_next) {
339                 if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
340                         continue;
341                 my_offset = spu_ref - vma->vm_start;
342                 if (!vma->vm_file)
343                         goto fail_no_image_cookie;
344 
345                 pr_debug("Found spu ELF at %X(object-id:%lx) for file %s\n",
346                          my_offset, spu_ref,
347                          vma->vm_file->f_dentry->d_name.name);
348                 *offsetp = my_offset;
349                 break;
350         }
351 
352         *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
353         pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name);
354 
355         up_read(&mm->mmap_sem);
356 
357 out:
358         return app_cookie;
359 
360 fail_no_image_cookie:
361         up_read(&mm->mmap_sem);
362 
363         printk(KERN_ERR "SPU_PROF: "
364                 "%s, line %d: Cannot find dcookie for SPU binary\n",
365                 __func__, __LINE__);
366         goto out;
367 }
368 
369 
370 
371 /* This function finds or creates cached context information for the
372  * passed SPU and records SPU context information into the OProfile
373  * event buffer.
374  */
375 static int process_context_switch(struct spu *spu, unsigned long objectId)
376 {
377         unsigned long flags;
378         int retval;
379         unsigned int offset = 0;
380         unsigned long spu_cookie = 0, app_dcookie;
381 
382         retval = prepare_cached_spu_info(spu, objectId);
383         if (retval)
384                 goto out;
385 
386         /* Get dcookie first because a mutex_lock is taken in that
387          * code path, so interrupts must not be disabled.
388          */
389         app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
390         if (!app_dcookie || !spu_cookie) {
391                 retval  = -ENOENT;
392                 goto out;
393         }
394 
395         /* Record context info in event buffer */
396         spin_lock_irqsave(&buffer_lock, flags);
397         spu_buff_add(ESCAPE_CODE, spu->number);
398         spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
399         spu_buff_add(spu->number, spu->number);
400         spu_buff_add(spu->pid, spu->number);
401         spu_buff_add(spu->tgid, spu->number);
402         spu_buff_add(app_dcookie, spu->number);
403         spu_buff_add(spu_cookie, spu->number);
404         spu_buff_add(offset, spu->number);
405 
406         /* Set flag to indicate SPU PC data can now be written out.  If
407          * the SPU program counter data is seen before an SPU context
408          * record is seen, the postprocessing will fail.
409          */
410         spu_buff[spu->number].ctx_sw_seen = 1;
411 
412         spin_unlock_irqrestore(&buffer_lock, flags);
413         smp_wmb();      /* insure spu event buffer updates are written */
414                         /* don't want entries intermingled... */
415 out:
416         return retval;
417 }
418 
419 /*
420  * This function is invoked on either a bind_context or unbind_context.
421  * If called for an unbind_context, the val arg is 0; otherwise,
422  * it is the object-id value for the spu context.
423  * The data arg is of type 'struct spu *'.
424  */
425 static int spu_active_notify(struct notifier_block *self, unsigned long val,
426                                 void *data)
427 {
428         int retval;
429         unsigned long flags;
430         struct spu *the_spu = data;
431 
432         pr_debug("SPU event notification arrived\n");
433         if (!val) {
434                 spin_lock_irqsave(&cache_lock, flags);
435                 retval = release_cached_info(the_spu->number);
436                 spin_unlock_irqrestore(&cache_lock, flags);
437         } else {
438                 retval = process_context_switch(the_spu, val);
439         }
440         return retval;
441 }
442 
443 static struct notifier_block spu_active = {
444         .notifier_call = spu_active_notify,
445 };
446 
447 static int number_of_online_nodes(void)
448 {
449         u32 cpu; u32 tmp;
450         int nodes = 0;
451         for_each_online_cpu(cpu) {
452                 tmp = cbe_cpu_to_node(cpu) + 1;
453                 if (tmp > nodes)
454                         nodes++;
455         }
456         return nodes;
457 }
458 
459 static int oprofile_spu_buff_create(void)
460 {
461         int spu;
462 
463         max_spu_buff = oprofile_get_cpu_buffer_size();
464 
465         for (spu = 0; spu < num_spu_nodes; spu++) {
466                 /* create circular buffers to store the data in.
467                  * use locks to manage accessing the buffers
468                  */
469                 spu_buff[spu].head = 0;
470                 spu_buff[spu].tail = 0;
471 
472                 /*
473                  * Create a buffer for each SPU.  Can't reliably
474                  * create a single buffer for all spus due to not
475                  * enough contiguous kernel memory.
476                  */
477 
478                 spu_buff[spu].buff = kzalloc((max_spu_buff
479                                               * sizeof(unsigned long)),
480                                              GFP_KERNEL);
481 
482                 if (!spu_buff[spu].buff) {
483                         printk(KERN_ERR "SPU_PROF: "
484                                "%s, line %d:  oprofile_spu_buff_create "
485                        "failed to allocate spu buffer %d.\n",
486                                __func__, __LINE__, spu);
487 
488                         /* release the spu buffers that have been allocated */
489                         while (spu >= 0) {
490                                 kfree(spu_buff[spu].buff);
491                                 spu_buff[spu].buff = 0;
492                                 spu--;
493                         }
494                         return -ENOMEM;
495                 }
496         }
497         return 0;
498 }
499 
500 /* The main purpose of this function is to synchronize
501  * OProfile with SPUFS by registering to be notified of
502  * SPU task switches.
503  *
504  * NOTE: When profiling SPUs, we must ensure that only
505  * spu_sync_start is invoked and not the generic sync_start
506  * in drivers/oprofile/oprof.c.  A return value of
507  * SKIP_GENERIC_SYNC or SYNC_START_ERROR will
508  * accomplish this.
509  */
510 int spu_sync_start(void)
511 {
512         int spu;
513         int ret = SKIP_GENERIC_SYNC;
514         int register_ret;
515         unsigned long flags = 0;
516 
517         spu_prof_num_nodes = number_of_online_nodes();
518         num_spu_nodes = spu_prof_num_nodes * 8;
519         INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
520 
521         /* create buffer for storing the SPU data to put in
522          * the kernel buffer.
523          */
524         ret = oprofile_spu_buff_create();
525         if (ret)
526                 goto out;
527 
528         spin_lock_irqsave(&buffer_lock, flags);
529         for (spu = 0; spu < num_spu_nodes; spu++) {
530                 spu_buff_add(ESCAPE_CODE, spu);
531                 spu_buff_add(SPU_PROFILING_CODE, spu);
532                 spu_buff_add(num_spu_nodes, spu);
533         }
534         spin_unlock_irqrestore(&buffer_lock, flags);
535 
536         for (spu = 0; spu < num_spu_nodes; spu++) {
537                 spu_buff[spu].ctx_sw_seen = 0;
538                 spu_buff[spu].last_guard_val = 0;
539         }
540 
541         /* Register for SPU events  */
542         register_ret = spu_switch_event_register(&spu_active);
543         if (register_ret) {
544                 ret = SYNC_START_ERROR;
545                 goto out;
546         }
547 
548         pr_debug("spu_sync_start -- running.\n");
549 out:
550         return ret;
551 }
552 
553 /* Record SPU program counter samples to the oprofile event buffer. */
554 void spu_sync_buffer(int spu_num, unsigned int *samples,
555                      int num_samples)
556 {
557         unsigned long long file_offset;
558         unsigned long flags;
559         int i;
560         struct vma_to_fileoffset_map *map;
561         struct spu *the_spu;
562         unsigned long long spu_num_ll = spu_num;
563         unsigned long long spu_num_shifted = spu_num_ll << 32;
564         struct cached_info *c_info;
565 
566         /* We need to obtain the cache_lock here because it's
567          * possible that after getting the cached_info, the SPU job
568          * corresponding to this cached_info may end, thus resulting
569          * in the destruction of the cached_info.
570          */
571         spin_lock_irqsave(&cache_lock, flags);
572         c_info = get_cached_info(NULL, spu_num);
573         if (!c_info) {
574                 /* This legitimately happens when the SPU task ends before all
575                  * samples are recorded.
576                  * No big deal -- so we just drop a few samples.
577                  */
578                 pr_debug("SPU_PROF: No cached SPU contex "
579                           "for SPU #%d. Dropping samples.\n", spu_num);
580                 goto out;
581         }
582 
583         map = c_info->map;
584         the_spu = c_info->the_spu;
585         spin_lock(&buffer_lock);
586         for (i = 0; i < num_samples; i++) {
587                 unsigned int sample = *(samples+i);
588                 int grd_val = 0;
589                 file_offset = 0;
590                 if (sample == 0)
591                         continue;
592                 file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
593 
594                 /* If overlays are used by this SPU application, the guard
595                  * value is non-zero, indicating which overlay section is in
596                  * use.  We need to discard samples taken during the time
597                  * period which an overlay occurs (i.e., guard value changes).
598                  */
599                 if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
600                         spu_buff[spu_num].last_guard_val = grd_val;
601                         /* Drop the rest of the samples. */
602                         break;
603                 }
604 
605                 /* We must ensure that the SPU context switch has been written
606                  * out before samples for the SPU.  Otherwise, the SPU context
607                  * information is not available and the postprocessing of the
608                  * SPU PC will fail with no available anonymous map information.
609                  */
610                 if (spu_buff[spu_num].ctx_sw_seen)
611                         spu_buff_add((file_offset | spu_num_shifted),
612                                          spu_num);
613         }
614         spin_unlock(&buffer_lock);
615 out:
616         spin_unlock_irqrestore(&cache_lock, flags);
617 }
618 
619 
620 int spu_sync_stop(void)
621 {
622         unsigned long flags = 0;
623         int ret;
624         int k;
625 
626         ret = spu_switch_event_unregister(&spu_active);
627 
628         if (ret)
629                 printk(KERN_ERR "SPU_PROF: "
630                        "%s, line %d: spu_switch_event_unregister "      \
631                        "returned %d\n",
632                        __func__, __LINE__, ret);
633 
634         /* flush any remaining data in the per SPU buffers */
635         sync_spu_buff();
636 
637         spin_lock_irqsave(&cache_lock, flags);
638         ret = release_cached_info(RELEASE_ALL);
639         spin_unlock_irqrestore(&cache_lock, flags);
640 
641         /* remove scheduled work queue item rather then waiting
642          * for every queued entry to execute.  Then flush pending
643          * system wide buffer to event buffer.
644          */
645         cancel_delayed_work(&spu_work);
646 
647         for (k = 0; k < num_spu_nodes; k++) {
648                 spu_buff[k].ctx_sw_seen = 0;
649 
650                 /*
651                  * spu_sys_buff will be null if there was a problem
652                  * allocating the buffer.  Only delete if it exists.
653                  */
654                 kfree(spu_buff[k].buff);
655                 spu_buff[k].buff = 0;
656         }
657         pr_debug("spu_sync_stop -- done.\n");
658         return ret;
659 }
660 
661 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp