~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/kernel/process_64.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*  arch/sparc64/kernel/process.c
  2  *
  3  *  Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
  4  *  Copyright (C) 1996       Eddie C. Dost   (ecd@skynet.be)
  5  *  Copyright (C) 1997, 1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
  6  */
  7 
  8 /*
  9  * This file handles the architecture-dependent parts of process handling..
 10  */
 11 
 12 #include <stdarg.h>
 13 
 14 #include <linux/errno.h>
 15 #include <linux/export.h>
 16 #include <linux/sched.h>
 17 #include <linux/kernel.h>
 18 #include <linux/mm.h>
 19 #include <linux/fs.h>
 20 #include <linux/smp.h>
 21 #include <linux/stddef.h>
 22 #include <linux/ptrace.h>
 23 #include <linux/slab.h>
 24 #include <linux/user.h>
 25 #include <linux/delay.h>
 26 #include <linux/compat.h>
 27 #include <linux/tick.h>
 28 #include <linux/init.h>
 29 #include <linux/cpu.h>
 30 #include <linux/perf_event.h>
 31 #include <linux/elfcore.h>
 32 #include <linux/sysrq.h>
 33 #include <linux/nmi.h>
 34 #include <linux/context_tracking.h>
 35 
 36 #include <asm/uaccess.h>
 37 #include <asm/page.h>
 38 #include <asm/pgalloc.h>
 39 #include <asm/pgtable.h>
 40 #include <asm/processor.h>
 41 #include <asm/pstate.h>
 42 #include <asm/elf.h>
 43 #include <asm/fpumacro.h>
 44 #include <asm/head.h>
 45 #include <asm/cpudata.h>
 46 #include <asm/mmu_context.h>
 47 #include <asm/unistd.h>
 48 #include <asm/hypervisor.h>
 49 #include <asm/syscalls.h>
 50 #include <asm/irq_regs.h>
 51 #include <asm/smp.h>
 52 #include <asm/pcr.h>
 53 
 54 #include "kstack.h"
 55 
 56 /* Idle loop support on sparc64. */
 57 void arch_cpu_idle(void)
 58 {
 59         if (tlb_type != hypervisor) {
 60                 touch_nmi_watchdog();
 61                 local_irq_enable();
 62         } else {
 63                 unsigned long pstate;
 64 
 65                 local_irq_enable();
 66 
 67                 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
 68                  * the cpu sleep hypervisor call.
 69                  */
 70                 __asm__ __volatile__(
 71                         "rdpr %%pstate, %0\n\t"
 72                         "andn %0, %1, %0\n\t"
 73                         "wrpr %0, %%g0, %%pstate"
 74                         : "=&r" (pstate)
 75                         : "i" (PSTATE_IE));
 76 
 77                 if (!need_resched() && !cpu_is_offline(smp_processor_id()))
 78                         sun4v_cpu_yield();
 79 
 80                 /* Re-enable interrupts. */
 81                 __asm__ __volatile__(
 82                         "rdpr %%pstate, %0\n\t"
 83                         "or %0, %1, %0\n\t"
 84                         "wrpr %0, %%g0, %%pstate"
 85                         : "=&r" (pstate)
 86                         : "i" (PSTATE_IE));
 87         }
 88 }
 89 
 90 #ifdef CONFIG_HOTPLUG_CPU
 91 void arch_cpu_idle_dead()
 92 {
 93         sched_preempt_enable_no_resched();
 94         cpu_play_dead();
 95 }
 96 #endif
 97 
 98 #ifdef CONFIG_COMPAT
 99 static void show_regwindow32(struct pt_regs *regs)
100 {
101         struct reg_window32 __user *rw;
102         struct reg_window32 r_w;
103         mm_segment_t old_fs;
104         
105         __asm__ __volatile__ ("flushw");
106         rw = compat_ptr((unsigned)regs->u_regs[14]);
107         old_fs = get_fs();
108         set_fs (USER_DS);
109         if (copy_from_user (&r_w, rw, sizeof(r_w))) {
110                 set_fs (old_fs);
111                 return;
112         }
113 
114         set_fs (old_fs);                        
115         printk("l0: %08x l1: %08x l2: %08x l3: %08x "
116                "l4: %08x l5: %08x l6: %08x l7: %08x\n",
117                r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
118                r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
119         printk("i0: %08x i1: %08x i2: %08x i3: %08x "
120                "i4: %08x i5: %08x i6: %08x i7: %08x\n",
121                r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
122                r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
123 }
124 #else
125 #define show_regwindow32(regs)  do { } while (0)
126 #endif
127 
128 static void show_regwindow(struct pt_regs *regs)
129 {
130         struct reg_window __user *rw;
131         struct reg_window *rwk;
132         struct reg_window r_w;
133         mm_segment_t old_fs;
134 
135         if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
136                 __asm__ __volatile__ ("flushw");
137                 rw = (struct reg_window __user *)
138                         (regs->u_regs[14] + STACK_BIAS);
139                 rwk = (struct reg_window *)
140                         (regs->u_regs[14] + STACK_BIAS);
141                 if (!(regs->tstate & TSTATE_PRIV)) {
142                         old_fs = get_fs();
143                         set_fs (USER_DS);
144                         if (copy_from_user (&r_w, rw, sizeof(r_w))) {
145                                 set_fs (old_fs);
146                                 return;
147                         }
148                         rwk = &r_w;
149                         set_fs (old_fs);                        
150                 }
151         } else {
152                 show_regwindow32(regs);
153                 return;
154         }
155         printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
156                rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
157         printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
158                rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
159         printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
160                rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
161         printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
162                rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
163         if (regs->tstate & TSTATE_PRIV)
164                 printk("I7: <%pS>\n", (void *) rwk->ins[7]);
165 }
166 
167 void show_regs(struct pt_regs *regs)
168 {
169         show_regs_print_info(KERN_DEFAULT);
170 
171         printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
172                regs->tpc, regs->tnpc, regs->y, print_tainted());
173         printk("TPC: <%pS>\n", (void *) regs->tpc);
174         printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
175                regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
176                regs->u_regs[3]);
177         printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
178                regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
179                regs->u_regs[7]);
180         printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
181                regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
182                regs->u_regs[11]);
183         printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
184                regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
185                regs->u_regs[15]);
186         printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
187         show_regwindow(regs);
188         show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
189 }
190 
191 union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
192 static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
193 
194 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
195                               int this_cpu)
196 {
197         struct global_reg_snapshot *rp;
198 
199         flushw_all();
200 
201         rp = &global_cpu_snapshot[this_cpu].reg;
202 
203         rp->tstate = regs->tstate;
204         rp->tpc = regs->tpc;
205         rp->tnpc = regs->tnpc;
206         rp->o7 = regs->u_regs[UREG_I7];
207 
208         if (regs->tstate & TSTATE_PRIV) {
209                 struct reg_window *rw;
210 
211                 rw = (struct reg_window *)
212                         (regs->u_regs[UREG_FP] + STACK_BIAS);
213                 if (kstack_valid(tp, (unsigned long) rw)) {
214                         rp->i7 = rw->ins[7];
215                         rw = (struct reg_window *)
216                                 (rw->ins[6] + STACK_BIAS);
217                         if (kstack_valid(tp, (unsigned long) rw))
218                                 rp->rpc = rw->ins[7];
219                 }
220         } else {
221                 rp->i7 = 0;
222                 rp->rpc = 0;
223         }
224         rp->thread = tp;
225 }
226 
227 /* In order to avoid hangs we do not try to synchronize with the
228  * global register dump client cpus.  The last store they make is to
229  * the thread pointer, so do a short poll waiting for that to become
230  * non-NULL.
231  */
232 static void __global_reg_poll(struct global_reg_snapshot *gp)
233 {
234         int limit = 0;
235 
236         while (!gp->thread && ++limit < 100) {
237                 barrier();
238                 udelay(1);
239         }
240 }
241 
242 void arch_trigger_all_cpu_backtrace(void)
243 {
244         struct thread_info *tp = current_thread_info();
245         struct pt_regs *regs = get_irq_regs();
246         unsigned long flags;
247         int this_cpu, cpu;
248 
249         if (!regs)
250                 regs = tp->kregs;
251 
252         spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
253 
254         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
255 
256         this_cpu = raw_smp_processor_id();
257 
258         __global_reg_self(tp, regs, this_cpu);
259 
260         smp_fetch_global_regs();
261 
262         for_each_online_cpu(cpu) {
263                 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
264 
265                 __global_reg_poll(gp);
266 
267                 tp = gp->thread;
268                 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
269                        (cpu == this_cpu ? '*' : ' '), cpu,
270                        gp->tstate, gp->tpc, gp->tnpc,
271                        ((tp && tp->task) ? tp->task->comm : "NULL"),
272                        ((tp && tp->task) ? tp->task->pid : -1));
273 
274                 if (gp->tstate & TSTATE_PRIV) {
275                         printk("             TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
276                                (void *) gp->tpc,
277                                (void *) gp->o7,
278                                (void *) gp->i7,
279                                (void *) gp->rpc);
280                 } else {
281                         printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
282                                gp->tpc, gp->o7, gp->i7, gp->rpc);
283                 }
284 
285                 touch_nmi_watchdog();
286         }
287 
288         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
289 
290         spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
291 }
292 
293 #ifdef CONFIG_MAGIC_SYSRQ
294 
295 static void sysrq_handle_globreg(int key)
296 {
297         arch_trigger_all_cpu_backtrace();
298 }
299 
300 static struct sysrq_key_op sparc_globalreg_op = {
301         .handler        = sysrq_handle_globreg,
302         .help_msg       = "global-regs(y)",
303         .action_msg     = "Show Global CPU Regs",
304 };
305 
306 static void __global_pmu_self(int this_cpu)
307 {
308         struct global_pmu_snapshot *pp;
309         int i, num;
310 
311         if (!pcr_ops)
312                 return;
313 
314         pp = &global_cpu_snapshot[this_cpu].pmu;
315 
316         num = 1;
317         if (tlb_type == hypervisor &&
318             sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
319                 num = 4;
320 
321         for (i = 0; i < num; i++) {
322                 pp->pcr[i] = pcr_ops->read_pcr(i);
323                 pp->pic[i] = pcr_ops->read_pic(i);
324         }
325 }
326 
327 static void __global_pmu_poll(struct global_pmu_snapshot *pp)
328 {
329         int limit = 0;
330 
331         while (!pp->pcr[0] && ++limit < 100) {
332                 barrier();
333                 udelay(1);
334         }
335 }
336 
337 static void pmu_snapshot_all_cpus(void)
338 {
339         unsigned long flags;
340         int this_cpu, cpu;
341 
342         spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
343 
344         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
345 
346         this_cpu = raw_smp_processor_id();
347 
348         __global_pmu_self(this_cpu);
349 
350         smp_fetch_global_pmu();
351 
352         for_each_online_cpu(cpu) {
353                 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
354 
355                 __global_pmu_poll(pp);
356 
357                 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
358                        (cpu == this_cpu ? '*' : ' '), cpu,
359                        pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
360                        pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
361 
362                 touch_nmi_watchdog();
363         }
364 
365         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
366 
367         spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
368 }
369 
370 static void sysrq_handle_globpmu(int key)
371 {
372         pmu_snapshot_all_cpus();
373 }
374 
375 static struct sysrq_key_op sparc_globalpmu_op = {
376         .handler        = sysrq_handle_globpmu,
377         .help_msg       = "global-pmu(x)",
378         .action_msg     = "Show Global PMU Regs",
379 };
380 
381 static int __init sparc_sysrq_init(void)
382 {
383         int ret = register_sysrq_key('y', &sparc_globalreg_op);
384 
385         if (!ret)
386                 ret = register_sysrq_key('x', &sparc_globalpmu_op);
387         return ret;
388 }
389 
390 core_initcall(sparc_sysrq_init);
391 
392 #endif
393 
394 unsigned long thread_saved_pc(struct task_struct *tsk)
395 {
396         struct thread_info *ti = task_thread_info(tsk);
397         unsigned long ret = 0xdeadbeefUL;
398         
399         if (ti && ti->ksp) {
400                 unsigned long *sp;
401                 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
402                 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
403                     sp[14]) {
404                         unsigned long *fp;
405                         fp = (unsigned long *)(sp[14] + STACK_BIAS);
406                         if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
407                                 ret = fp[15];
408                 }
409         }
410         return ret;
411 }
412 
413 /* Free current thread data structures etc.. */
414 void exit_thread(void)
415 {
416         struct thread_info *t = current_thread_info();
417 
418         if (t->utraps) {
419                 if (t->utraps[0] < 2)
420                         kfree (t->utraps);
421                 else
422                         t->utraps[0]--;
423         }
424 }
425 
426 void flush_thread(void)
427 {
428         struct thread_info *t = current_thread_info();
429         struct mm_struct *mm;
430 
431         mm = t->task->mm;
432         if (mm)
433                 tsb_context_switch(mm);
434 
435         set_thread_wsaved(0);
436 
437         /* Clear FPU register state. */
438         t->fpsaved[0] = 0;
439 }
440 
441 /* It's a bit more tricky when 64-bit tasks are involved... */
442 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
443 {
444         bool stack_64bit = test_thread_64bit_stack(psp);
445         unsigned long fp, distance, rval;
446 
447         if (stack_64bit) {
448                 csp += STACK_BIAS;
449                 psp += STACK_BIAS;
450                 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
451                 fp += STACK_BIAS;
452                 if (test_thread_flag(TIF_32BIT))
453                         fp &= 0xffffffff;
454         } else
455                 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
456 
457         /* Now align the stack as this is mandatory in the Sparc ABI
458          * due to how register windows work.  This hides the
459          * restriction from thread libraries etc.
460          */
461         csp &= ~15UL;
462 
463         distance = fp - psp;
464         rval = (csp - distance);
465         if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
466                 rval = 0;
467         else if (!stack_64bit) {
468                 if (put_user(((u32)csp),
469                              &(((struct reg_window32 __user *)rval)->ins[6])))
470                         rval = 0;
471         } else {
472                 if (put_user(((u64)csp - STACK_BIAS),
473                              &(((struct reg_window __user *)rval)->ins[6])))
474                         rval = 0;
475                 else
476                         rval = rval - STACK_BIAS;
477         }
478 
479         return rval;
480 }
481 
482 /* Standard stuff. */
483 static inline void shift_window_buffer(int first_win, int last_win,
484                                        struct thread_info *t)
485 {
486         int i;
487 
488         for (i = first_win; i < last_win; i++) {
489                 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
490                 memcpy(&t->reg_window[i], &t->reg_window[i+1],
491                        sizeof(struct reg_window));
492         }
493 }
494 
495 void synchronize_user_stack(void)
496 {
497         struct thread_info *t = current_thread_info();
498         unsigned long window;
499 
500         flush_user_windows();
501         if ((window = get_thread_wsaved()) != 0) {
502                 window -= 1;
503                 do {
504                         struct reg_window *rwin = &t->reg_window[window];
505                         int winsize = sizeof(struct reg_window);
506                         unsigned long sp;
507 
508                         sp = t->rwbuf_stkptrs[window];
509 
510                         if (test_thread_64bit_stack(sp))
511                                 sp += STACK_BIAS;
512                         else
513                                 winsize = sizeof(struct reg_window32);
514 
515                         if (!copy_to_user((char __user *)sp, rwin, winsize)) {
516                                 shift_window_buffer(window, get_thread_wsaved() - 1, t);
517                                 set_thread_wsaved(get_thread_wsaved() - 1);
518                         }
519                 } while (window--);
520         }
521 }
522 
523 static void stack_unaligned(unsigned long sp)
524 {
525         siginfo_t info;
526 
527         info.si_signo = SIGBUS;
528         info.si_errno = 0;
529         info.si_code = BUS_ADRALN;
530         info.si_addr = (void __user *) sp;
531         info.si_trapno = 0;
532         force_sig_info(SIGBUS, &info, current);
533 }
534 
535 void fault_in_user_windows(void)
536 {
537         struct thread_info *t = current_thread_info();
538         unsigned long window;
539 
540         flush_user_windows();
541         window = get_thread_wsaved();
542 
543         if (likely(window != 0)) {
544                 window -= 1;
545                 do {
546                         struct reg_window *rwin = &t->reg_window[window];
547                         int winsize = sizeof(struct reg_window);
548                         unsigned long sp;
549 
550                         sp = t->rwbuf_stkptrs[window];
551 
552                         if (test_thread_64bit_stack(sp))
553                                 sp += STACK_BIAS;
554                         else
555                                 winsize = sizeof(struct reg_window32);
556 
557                         if (unlikely(sp & 0x7UL))
558                                 stack_unaligned(sp);
559 
560                         if (unlikely(copy_to_user((char __user *)sp,
561                                                   rwin, winsize)))
562                                 goto barf;
563                 } while (window--);
564         }
565         set_thread_wsaved(0);
566         return;
567 
568 barf:
569         set_thread_wsaved(window + 1);
570         user_exit();
571         do_exit(SIGILL);
572 }
573 
574 asmlinkage long sparc_do_fork(unsigned long clone_flags,
575                               unsigned long stack_start,
576                               struct pt_regs *regs,
577                               unsigned long stack_size)
578 {
579         int __user *parent_tid_ptr, *child_tid_ptr;
580         unsigned long orig_i1 = regs->u_regs[UREG_I1];
581         long ret;
582 
583 #ifdef CONFIG_COMPAT
584         if (test_thread_flag(TIF_32BIT)) {
585                 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
586                 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
587         } else
588 #endif
589         {
590                 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
591                 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
592         }
593 
594         ret = do_fork(clone_flags, stack_start, stack_size,
595                       parent_tid_ptr, child_tid_ptr);
596 
597         /* If we get an error and potentially restart the system
598          * call, we're screwed because copy_thread() clobbered
599          * the parent's %o1.  So detect that case and restore it
600          * here.
601          */
602         if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
603                 regs->u_regs[UREG_I1] = orig_i1;
604 
605         return ret;
606 }
607 
608 /* Copy a Sparc thread.  The fork() return value conventions
609  * under SunOS are nothing short of bletcherous:
610  * Parent -->  %o0 == childs  pid, %o1 == 0
611  * Child  -->  %o0 == parents pid, %o1 == 1
612  */
613 int copy_thread(unsigned long clone_flags, unsigned long sp,
614                 unsigned long arg, struct task_struct *p)
615 {
616         struct thread_info *t = task_thread_info(p);
617         struct pt_regs *regs = current_pt_regs();
618         struct sparc_stackf *parent_sf;
619         unsigned long child_stack_sz;
620         char *child_trap_frame;
621 
622         /* Calculate offset to stack_frame & pt_regs */
623         child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
624         child_trap_frame = (task_stack_page(p) +
625                             (THREAD_SIZE - child_stack_sz));
626 
627         t->new_child = 1;
628         t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
629         t->kregs = (struct pt_regs *) (child_trap_frame +
630                                        sizeof(struct sparc_stackf));
631         t->fpsaved[0] = 0;
632 
633         if (unlikely(p->flags & PF_KTHREAD)) {
634                 memset(child_trap_frame, 0, child_stack_sz);
635                 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 
636                         (current_pt_regs()->tstate + 1) & TSTATE_CWP;
637                 t->current_ds = ASI_P;
638                 t->kregs->u_regs[UREG_G1] = sp; /* function */
639                 t->kregs->u_regs[UREG_G2] = arg;
640                 return 0;
641         }
642 
643         parent_sf = ((struct sparc_stackf *) regs) - 1;
644         memcpy(child_trap_frame, parent_sf, child_stack_sz);
645         if (t->flags & _TIF_32BIT) {
646                 sp &= 0x00000000ffffffffUL;
647                 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
648         }
649         t->kregs->u_regs[UREG_FP] = sp;
650         __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 
651                 (regs->tstate + 1) & TSTATE_CWP;
652         t->current_ds = ASI_AIUS;
653         if (sp != regs->u_regs[UREG_FP]) {
654                 unsigned long csp;
655 
656                 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
657                 if (!csp)
658                         return -EFAULT;
659                 t->kregs->u_regs[UREG_FP] = csp;
660         }
661         if (t->utraps)
662                 t->utraps[0]++;
663 
664         /* Set the return value for the child. */
665         t->kregs->u_regs[UREG_I0] = current->pid;
666         t->kregs->u_regs[UREG_I1] = 1;
667 
668         /* Set the second return value for the parent. */
669         regs->u_regs[UREG_I1] = 0;
670 
671         if (clone_flags & CLONE_SETTLS)
672                 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
673 
674         return 0;
675 }
676 
677 typedef struct {
678         union {
679                 unsigned int    pr_regs[32];
680                 unsigned long   pr_dregs[16];
681         } pr_fr;
682         unsigned int __unused;
683         unsigned int    pr_fsr;
684         unsigned char   pr_qcnt;
685         unsigned char   pr_q_entrysize;
686         unsigned char   pr_en;
687         unsigned int    pr_q[64];
688 } elf_fpregset_t32;
689 
690 /*
691  * fill in the fpu structure for a core dump.
692  */
693 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
694 {
695         unsigned long *kfpregs = current_thread_info()->fpregs;
696         unsigned long fprs = current_thread_info()->fpsaved[0];
697 
698         if (test_thread_flag(TIF_32BIT)) {
699                 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
700 
701                 if (fprs & FPRS_DL)
702                         memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
703                                sizeof(unsigned int) * 32);
704                 else
705                         memset(&fpregs32->pr_fr.pr_regs[0], 0,
706                                sizeof(unsigned int) * 32);
707                 fpregs32->pr_qcnt = 0;
708                 fpregs32->pr_q_entrysize = 8;
709                 memset(&fpregs32->pr_q[0], 0,
710                        (sizeof(unsigned int) * 64));
711                 if (fprs & FPRS_FEF) {
712                         fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
713                         fpregs32->pr_en = 1;
714                 } else {
715                         fpregs32->pr_fsr = 0;
716                         fpregs32->pr_en = 0;
717                 }
718         } else {
719                 if(fprs & FPRS_DL)
720                         memcpy(&fpregs->pr_regs[0], kfpregs,
721                                sizeof(unsigned int) * 32);
722                 else
723                         memset(&fpregs->pr_regs[0], 0,
724                                sizeof(unsigned int) * 32);
725                 if(fprs & FPRS_DU)
726                         memcpy(&fpregs->pr_regs[16], kfpregs+16,
727                                sizeof(unsigned int) * 32);
728                 else
729                         memset(&fpregs->pr_regs[16], 0,
730                                sizeof(unsigned int) * 32);
731                 if(fprs & FPRS_FEF) {
732                         fpregs->pr_fsr = current_thread_info()->xfsr[0];
733                         fpregs->pr_gsr = current_thread_info()->gsr[0];
734                 } else {
735                         fpregs->pr_fsr = fpregs->pr_gsr = 0;
736                 }
737                 fpregs->pr_fprs = fprs;
738         }
739         return 1;
740 }
741 EXPORT_SYMBOL(dump_fpu);
742 
743 unsigned long get_wchan(struct task_struct *task)
744 {
745         unsigned long pc, fp, bias = 0;
746         struct thread_info *tp;
747         struct reg_window *rw;
748         unsigned long ret = 0;
749         int count = 0; 
750 
751         if (!task || task == current ||
752             task->state == TASK_RUNNING)
753                 goto out;
754 
755         tp = task_thread_info(task);
756         bias = STACK_BIAS;
757         fp = task_thread_info(task)->ksp + bias;
758 
759         do {
760                 if (!kstack_valid(tp, fp))
761                         break;
762                 rw = (struct reg_window *) fp;
763                 pc = rw->ins[7];
764                 if (!in_sched_functions(pc)) {
765                         ret = pc;
766                         goto out;
767                 }
768                 fp = rw->ins[6] + bias;
769         } while (++count < 16);
770 
771 out:
772         return ret;
773 }
774 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp