~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/platforms/cell/spufs/run.c

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.10 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.83 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.153 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.200 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.200 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.76 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #define DEBUG
  2 
  3 #include <linux/wait.h>
  4 #include <linux/ptrace.h>
  5 
  6 #include <asm/spu.h>
  7 #include <asm/spu_priv1.h>
  8 #include <asm/io.h>
  9 #include <asm/unistd.h>
 10 
 11 #include "spufs.h"
 12 
 13 /* interrupt-level stop callback function. */
 14 void spufs_stop_callback(struct spu *spu, int irq)
 15 {
 16         struct spu_context *ctx = spu->ctx;
 17 
 18         /*
 19          * It should be impossible to preempt a context while an exception
 20          * is being processed, since the context switch code is specially
 21          * coded to deal with interrupts ... But, just in case, sanity check
 22          * the context pointer.  It is OK to return doing nothing since
 23          * the exception will be regenerated when the context is resumed.
 24          */
 25         if (ctx) {
 26                 /* Copy exception arguments into module specific structure */
 27                 switch(irq) {
 28                 case 0 :
 29                         ctx->csa.class_0_pending = spu->class_0_pending;
 30                         ctx->csa.class_0_dar = spu->class_0_dar;
 31                         break;
 32                 case 1 :
 33                         ctx->csa.class_1_dsisr = spu->class_1_dsisr;
 34                         ctx->csa.class_1_dar = spu->class_1_dar;
 35                         break;
 36                 case 2 :
 37                         break;
 38                 }
 39 
 40                 /* ensure that the exception status has hit memory before a
 41                  * thread waiting on the context's stop queue is woken */
 42                 smp_wmb();
 43 
 44                 wake_up_all(&ctx->stop_wq);
 45         }
 46 }
 47 
 48 int spu_stopped(struct spu_context *ctx, u32 *stat)
 49 {
 50         u64 dsisr;
 51         u32 stopped;
 52 
 53         stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
 54                 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
 55 
 56 top:
 57         *stat = ctx->ops->status_read(ctx);
 58         if (*stat & stopped) {
 59                 /*
 60                  * If the spu hasn't finished stopping, we need to
 61                  * re-read the register to get the stopped value.
 62                  */
 63                 if (*stat & SPU_STATUS_RUNNING)
 64                         goto top;
 65                 return 1;
 66         }
 67 
 68         if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
 69                 return 1;
 70 
 71         dsisr = ctx->csa.class_1_dsisr;
 72         if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
 73                 return 1;
 74 
 75         if (ctx->csa.class_0_pending)
 76                 return 1;
 77 
 78         return 0;
 79 }
 80 
 81 static int spu_setup_isolated(struct spu_context *ctx)
 82 {
 83         int ret;
 84         u64 __iomem *mfc_cntl;
 85         u64 sr1;
 86         u32 status;
 87         unsigned long timeout;
 88         const u32 status_loading = SPU_STATUS_RUNNING
 89                 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
 90 
 91         ret = -ENODEV;
 92         if (!isolated_loader)
 93                 goto out;
 94 
 95         /*
 96          * We need to exclude userspace access to the context.
 97          *
 98          * To protect against memory access we invalidate all ptes
 99          * and make sure the pagefault handlers block on the mutex.
100          */
101         spu_unmap_mappings(ctx);
102 
103         mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
104 
105         /* purge the MFC DMA queue to ensure no spurious accesses before we
106          * enter kernel mode */
107         timeout = jiffies + HZ;
108         out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
109         while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
110                         != MFC_CNTL_PURGE_DMA_COMPLETE) {
111                 if (time_after(jiffies, timeout)) {
112                         printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
113                                         __func__);
114                         ret = -EIO;
115                         goto out;
116                 }
117                 cond_resched();
118         }
119 
120         /* clear purge status */
121         out_be64(mfc_cntl, 0);
122 
123         /* put the SPE in kernel mode to allow access to the loader */
124         sr1 = spu_mfc_sr1_get(ctx->spu);
125         sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
126         spu_mfc_sr1_set(ctx->spu, sr1);
127 
128         /* start the loader */
129         ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
130         ctx->ops->signal2_write(ctx,
131                         (unsigned long)isolated_loader & 0xffffffff);
132 
133         ctx->ops->runcntl_write(ctx,
134                         SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
135 
136         ret = 0;
137         timeout = jiffies + HZ;
138         while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
139                                 status_loading) {
140                 if (time_after(jiffies, timeout)) {
141                         printk(KERN_ERR "%s: timeout waiting for loader\n",
142                                         __func__);
143                         ret = -EIO;
144                         goto out_drop_priv;
145                 }
146                 cond_resched();
147         }
148 
149         if (!(status & SPU_STATUS_RUNNING)) {
150                 /* If isolated LOAD has failed: run SPU, we will get a stop-and
151                  * signal later. */
152                 pr_debug("%s: isolated LOAD failed\n", __func__);
153                 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
154                 ret = -EACCES;
155                 goto out_drop_priv;
156         }
157 
158         if (!(status & SPU_STATUS_ISOLATED_STATE)) {
159                 /* This isn't allowed by the CBEA, but check anyway */
160                 pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
161                 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
162                 ret = -EINVAL;
163                 goto out_drop_priv;
164         }
165 
166 out_drop_priv:
167         /* Finished accessing the loader. Drop kernel mode */
168         sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
169         spu_mfc_sr1_set(ctx->spu, sr1);
170 
171 out:
172         return ret;
173 }
174 
175 static int spu_run_init(struct spu_context *ctx, u32 *npc)
176 {
177         unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
178         int ret;
179 
180         spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
181 
182         /*
183          * NOSCHED is synchronous scheduling with respect to the caller.
184          * The caller waits for the context to be loaded.
185          */
186         if (ctx->flags & SPU_CREATE_NOSCHED) {
187                 if (ctx->state == SPU_STATE_SAVED) {
188                         ret = spu_activate(ctx, 0);
189                         if (ret)
190                                 return ret;
191                 }
192         }
193 
194         /*
195          * Apply special setup as required.
196          */
197         if (ctx->flags & SPU_CREATE_ISOLATE) {
198                 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
199                         ret = spu_setup_isolated(ctx);
200                         if (ret)
201                                 return ret;
202                 }
203 
204                 /*
205                  * If userspace has set the runcntrl register (eg, to
206                  * issue an isolated exit), we need to re-set it here
207                  */
208                 runcntl = ctx->ops->runcntl_read(ctx) &
209                         (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
210                 if (runcntl == 0)
211                         runcntl = SPU_RUNCNTL_RUNNABLE;
212         } else {
213                 unsigned long privcntl;
214 
215                 if (test_thread_flag(TIF_SINGLESTEP))
216                         privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
217                 else
218                         privcntl = SPU_PRIVCNTL_MODE_NORMAL;
219 
220                 ctx->ops->privcntl_write(ctx, privcntl);
221                 ctx->ops->npc_write(ctx, *npc);
222         }
223 
224         ctx->ops->runcntl_write(ctx, runcntl);
225 
226         if (ctx->flags & SPU_CREATE_NOSCHED) {
227                 spuctx_switch_state(ctx, SPU_UTIL_USER);
228         } else {
229 
230                 if (ctx->state == SPU_STATE_SAVED) {
231                         ret = spu_activate(ctx, 0);
232                         if (ret)
233                                 return ret;
234                 } else {
235                         spuctx_switch_state(ctx, SPU_UTIL_USER);
236                 }
237         }
238 
239         set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
240         return 0;
241 }
242 
243 static int spu_run_fini(struct spu_context *ctx, u32 *npc,
244                                u32 *status)
245 {
246         int ret = 0;
247 
248         spu_del_from_rq(ctx);
249 
250         *status = ctx->ops->status_read(ctx);
251         *npc = ctx->ops->npc_read(ctx);
252 
253         spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
254         clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
255         spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
256         spu_release(ctx);
257 
258         if (signal_pending(current))
259                 ret = -ERESTARTSYS;
260 
261         return ret;
262 }
263 
264 /*
265  * SPU syscall restarting is tricky because we violate the basic
266  * assumption that the signal handler is running on the interrupted
267  * thread. Here instead, the handler runs on PowerPC user space code,
268  * while the syscall was called from the SPU.
269  * This means we can only do a very rough approximation of POSIX
270  * signal semantics.
271  */
272 static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
273                           unsigned int *npc)
274 {
275         int ret;
276 
277         switch (*spu_ret) {
278         case -ERESTARTSYS:
279         case -ERESTARTNOINTR:
280                 /*
281                  * Enter the regular syscall restarting for
282                  * sys_spu_run, then restart the SPU syscall
283                  * callback.
284                  */
285                 *npc -= 8;
286                 ret = -ERESTARTSYS;
287                 break;
288         case -ERESTARTNOHAND:
289         case -ERESTART_RESTARTBLOCK:
290                 /*
291                  * Restart block is too hard for now, just return -EINTR
292                  * to the SPU.
293                  * ERESTARTNOHAND comes from sys_pause, we also return
294                  * -EINTR from there.
295                  * Assume that we need to be restarted ourselves though.
296                  */
297                 *spu_ret = -EINTR;
298                 ret = -ERESTARTSYS;
299                 break;
300         default:
301                 printk(KERN_WARNING "%s: unexpected return code %ld\n",
302                         __func__, *spu_ret);
303                 ret = 0;
304         }
305         return ret;
306 }
307 
308 static int spu_process_callback(struct spu_context *ctx)
309 {
310         struct spu_syscall_block s;
311         u32 ls_pointer, npc;
312         void __iomem *ls;
313         long spu_ret;
314         int ret;
315 
316         /* get syscall block from local store */
317         npc = ctx->ops->npc_read(ctx) & ~3;
318         ls = (void __iomem *)ctx->ops->get_ls(ctx);
319         ls_pointer = in_be32(ls + npc);
320         if (ls_pointer > (LS_SIZE - sizeof(s)))
321                 return -EFAULT;
322         memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
323 
324         /* do actual syscall without pinning the spu */
325         ret = 0;
326         spu_ret = -ENOSYS;
327         npc += 4;
328 
329         if (s.nr_ret < __NR_syscalls) {
330                 spu_release(ctx);
331                 /* do actual system call from here */
332                 spu_ret = spu_sys_callback(&s);
333                 if (spu_ret <= -ERESTARTSYS) {
334                         ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
335                 }
336                 mutex_lock(&ctx->state_mutex);
337                 if (ret == -ERESTARTSYS)
338                         return ret;
339         }
340 
341         /* need to re-get the ls, as it may have changed when we released the
342          * spu */
343         ls = (void __iomem *)ctx->ops->get_ls(ctx);
344 
345         /* write result, jump over indirect pointer */
346         memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
347         ctx->ops->npc_write(ctx, npc);
348         ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
349         return ret;
350 }
351 
352 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
353 {
354         int ret;
355         struct spu *spu;
356         u32 status;
357 
358         if (mutex_lock_interruptible(&ctx->run_mutex))
359                 return -ERESTARTSYS;
360 
361         ctx->event_return = 0;
362 
363         ret = spu_acquire(ctx);
364         if (ret)
365                 goto out_unlock;
366 
367         spu_enable_spu(ctx);
368 
369         spu_update_sched_info(ctx);
370 
371         ret = spu_run_init(ctx, npc);
372         if (ret) {
373                 spu_release(ctx);
374                 goto out;
375         }
376 
377         do {
378                 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
379                 if (unlikely(ret)) {
380                         /*
381                          * This is nasty: we need the state_mutex for all the
382                          * bookkeeping even if the syscall was interrupted by
383                          * a signal. ewww.
384                          */
385                         mutex_lock(&ctx->state_mutex);
386                         break;
387                 }
388                 spu = ctx->spu;
389                 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
390                                                 &ctx->sched_flags))) {
391                         if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
392                                 spu_switch_notify(spu, ctx);
393                                 continue;
394                         }
395                 }
396 
397                 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
398 
399                 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
400                     (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
401                         ret = spu_process_callback(ctx);
402                         if (ret)
403                                 break;
404                         status &= ~SPU_STATUS_STOPPED_BY_STOP;
405                 }
406                 ret = spufs_handle_class1(ctx);
407                 if (ret)
408                         break;
409 
410                 ret = spufs_handle_class0(ctx);
411                 if (ret)
412                         break;
413 
414                 if (signal_pending(current))
415                         ret = -ERESTARTSYS;
416         } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
417                                       SPU_STATUS_STOPPED_BY_HALT |
418                                        SPU_STATUS_SINGLE_STEP)));
419 
420         spu_disable_spu(ctx);
421         ret = spu_run_fini(ctx, npc, &status);
422         spu_yield(ctx);
423 
424         if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
425             (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
426                 ctx->stats.libassist++;
427 
428         if ((ret == 0) ||
429             ((ret == -ERESTARTSYS) &&
430              ((status & SPU_STATUS_STOPPED_BY_HALT) ||
431               (status & SPU_STATUS_SINGLE_STEP) ||
432               ((status & SPU_STATUS_STOPPED_BY_STOP) &&
433                (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
434                 ret = status;
435 
436         /* Note: we don't need to force_sig SIGTRAP on single-step
437          * since we have TIF_SINGLESTEP set, thus the kernel will do
438          * it upon return from the syscall anyawy
439          */
440         if (unlikely(status & SPU_STATUS_SINGLE_STEP))
441                 ret = -ERESTARTSYS;
442 
443         else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
444             && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
445                 force_sig(SIGTRAP, current);
446                 ret = -ERESTARTSYS;
447         }
448 
449 out:
450         *event = ctx->event_return;
451 out_unlock:
452         mutex_unlock(&ctx->run_mutex);
453         return ret;
454 }
455 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp