~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/ptrace.c

Version: ~ [ linux-5.8-rc4 ] ~ [ linux-5.7.7 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.50 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.131 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.187 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.229 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.229 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  PowerPC version
  3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4  *
  5  *  Derived from "arch/m68k/kernel/ptrace.c"
  6  *  Copyright (C) 1994 by Hamish Macdonald
  7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
  8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
  9  *
 10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
 11  * and Paul Mackerras (paulus@samba.org).
 12  *
 13  * This file is subject to the terms and conditions of the GNU General
 14  * Public License.  See the file README.legal in the main directory of
 15  * this archive for more details.
 16  */
 17 
 18 #include <linux/kernel.h>
 19 #include <linux/sched.h>
 20 #include <linux/mm.h>
 21 #include <linux/smp.h>
 22 #include <linux/errno.h>
 23 #include <linux/ptrace.h>
 24 #include <linux/regset.h>
 25 #include <linux/tracehook.h>
 26 #include <linux/elf.h>
 27 #include <linux/user.h>
 28 #include <linux/security.h>
 29 #include <linux/signal.h>
 30 #include <linux/seccomp.h>
 31 #include <linux/audit.h>
 32 #include <trace/syscall.h>
 33 #include <linux/hw_breakpoint.h>
 34 #include <linux/perf_event.h>
 35 #include <linux/context_tracking.h>
 36 
 37 #include <linux/uaccess.h>
 38 #include <linux/pkeys.h>
 39 #include <asm/page.h>
 40 #include <asm/pgtable.h>
 41 #include <asm/switch_to.h>
 42 #include <asm/tm.h>
 43 #include <asm/asm-prototypes.h>
 44 #include <asm/debug.h>
 45 
 46 #define CREATE_TRACE_POINTS
 47 #include <trace/events/syscalls.h>
 48 
 49 /*
 50  * The parameter save area on the stack is used to store arguments being passed
 51  * to callee function and is located at fixed offset from stack pointer.
 52  */
 53 #ifdef CONFIG_PPC32
 54 #define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
 55 #else /* CONFIG_PPC32 */
 56 #define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
 57 #endif
 58 
 59 struct pt_regs_offset {
 60         const char *name;
 61         int offset;
 62 };
 63 
 64 #define STR(s)  #s                      /* convert to string */
 65 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
 66 #define GPR_OFFSET_NAME(num)    \
 67         {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
 68         {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
 69 #define REG_OFFSET_END {.name = NULL, .offset = 0}
 70 
 71 #define TVSO(f) (offsetof(struct thread_vr_state, f))
 72 #define TFSO(f) (offsetof(struct thread_fp_state, f))
 73 #define TSO(f)  (offsetof(struct thread_struct, f))
 74 
 75 static const struct pt_regs_offset regoffset_table[] = {
 76         GPR_OFFSET_NAME(0),
 77         GPR_OFFSET_NAME(1),
 78         GPR_OFFSET_NAME(2),
 79         GPR_OFFSET_NAME(3),
 80         GPR_OFFSET_NAME(4),
 81         GPR_OFFSET_NAME(5),
 82         GPR_OFFSET_NAME(6),
 83         GPR_OFFSET_NAME(7),
 84         GPR_OFFSET_NAME(8),
 85         GPR_OFFSET_NAME(9),
 86         GPR_OFFSET_NAME(10),
 87         GPR_OFFSET_NAME(11),
 88         GPR_OFFSET_NAME(12),
 89         GPR_OFFSET_NAME(13),
 90         GPR_OFFSET_NAME(14),
 91         GPR_OFFSET_NAME(15),
 92         GPR_OFFSET_NAME(16),
 93         GPR_OFFSET_NAME(17),
 94         GPR_OFFSET_NAME(18),
 95         GPR_OFFSET_NAME(19),
 96         GPR_OFFSET_NAME(20),
 97         GPR_OFFSET_NAME(21),
 98         GPR_OFFSET_NAME(22),
 99         GPR_OFFSET_NAME(23),
100         GPR_OFFSET_NAME(24),
101         GPR_OFFSET_NAME(25),
102         GPR_OFFSET_NAME(26),
103         GPR_OFFSET_NAME(27),
104         GPR_OFFSET_NAME(28),
105         GPR_OFFSET_NAME(29),
106         GPR_OFFSET_NAME(30),
107         GPR_OFFSET_NAME(31),
108         REG_OFFSET_NAME(nip),
109         REG_OFFSET_NAME(msr),
110         REG_OFFSET_NAME(ctr),
111         REG_OFFSET_NAME(link),
112         REG_OFFSET_NAME(xer),
113         REG_OFFSET_NAME(ccr),
114 #ifdef CONFIG_PPC64
115         REG_OFFSET_NAME(softe),
116 #else
117         REG_OFFSET_NAME(mq),
118 #endif
119         REG_OFFSET_NAME(trap),
120         REG_OFFSET_NAME(dar),
121         REG_OFFSET_NAME(dsisr),
122         REG_OFFSET_END,
123 };
124 
125 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
126 static void flush_tmregs_to_thread(struct task_struct *tsk)
127 {
128         /*
129          * If task is not current, it will have been flushed already to
130          * it's thread_struct during __switch_to().
131          *
132          * A reclaim flushes ALL the state or if not in TM save TM SPRs
133          * in the appropriate thread structures from live.
134          */
135 
136         if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
137                 return;
138 
139         if (MSR_TM_SUSPENDED(mfmsr())) {
140                 tm_reclaim_current(TM_CAUSE_SIGNAL);
141         } else {
142                 tm_enable();
143                 tm_save_sprs(&(tsk->thread));
144         }
145 }
146 #else
147 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
148 #endif
149 
150 /**
151  * regs_query_register_offset() - query register offset from its name
152  * @name:       the name of a register
153  *
154  * regs_query_register_offset() returns the offset of a register in struct
155  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
156  */
157 int regs_query_register_offset(const char *name)
158 {
159         const struct pt_regs_offset *roff;
160         for (roff = regoffset_table; roff->name != NULL; roff++)
161                 if (!strcmp(roff->name, name))
162                         return roff->offset;
163         return -EINVAL;
164 }
165 
166 /**
167  * regs_query_register_name() - query register name from its offset
168  * @offset:     the offset of a register in struct pt_regs.
169  *
170  * regs_query_register_name() returns the name of a register from its
171  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
172  */
173 const char *regs_query_register_name(unsigned int offset)
174 {
175         const struct pt_regs_offset *roff;
176         for (roff = regoffset_table; roff->name != NULL; roff++)
177                 if (roff->offset == offset)
178                         return roff->name;
179         return NULL;
180 }
181 
182 /*
183  * does not yet catch signals sent when the child dies.
184  * in exit.c or in signal.c.
185  */
186 
187 /*
188  * Set of msr bits that gdb can change on behalf of a process.
189  */
190 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
191 #define MSR_DEBUGCHANGE 0
192 #else
193 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
194 #endif
195 
196 /*
197  * Max register writeable via put_reg
198  */
199 #ifdef CONFIG_PPC32
200 #define PT_MAX_PUT_REG  PT_MQ
201 #else
202 #define PT_MAX_PUT_REG  PT_CCR
203 #endif
204 
205 static unsigned long get_user_msr(struct task_struct *task)
206 {
207         return task->thread.regs->msr | task->thread.fpexc_mode;
208 }
209 
210 static int set_user_msr(struct task_struct *task, unsigned long msr)
211 {
212         task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
213         task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
214         return 0;
215 }
216 
217 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
218 static unsigned long get_user_ckpt_msr(struct task_struct *task)
219 {
220         return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
221 }
222 
223 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
224 {
225         task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
226         task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
227         return 0;
228 }
229 
230 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
231 {
232         task->thread.ckpt_regs.trap = trap & 0xfff0;
233         return 0;
234 }
235 #endif
236 
237 #ifdef CONFIG_PPC64
238 static int get_user_dscr(struct task_struct *task, unsigned long *data)
239 {
240         *data = task->thread.dscr;
241         return 0;
242 }
243 
244 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
245 {
246         task->thread.dscr = dscr;
247         task->thread.dscr_inherit = 1;
248         return 0;
249 }
250 #else
251 static int get_user_dscr(struct task_struct *task, unsigned long *data)
252 {
253         return -EIO;
254 }
255 
256 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
257 {
258         return -EIO;
259 }
260 #endif
261 
262 /*
263  * We prevent mucking around with the reserved area of trap
264  * which are used internally by the kernel.
265  */
266 static int set_user_trap(struct task_struct *task, unsigned long trap)
267 {
268         task->thread.regs->trap = trap & 0xfff0;
269         return 0;
270 }
271 
272 /*
273  * Get contents of register REGNO in task TASK.
274  */
275 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
276 {
277         if ((task->thread.regs == NULL) || !data)
278                 return -EIO;
279 
280         if (regno == PT_MSR) {
281                 *data = get_user_msr(task);
282                 return 0;
283         }
284 
285         if (regno == PT_DSCR)
286                 return get_user_dscr(task, data);
287 
288 #ifdef CONFIG_PPC64
289         /*
290          * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
291          * no more used as a flag, lets force usr to alway see the softe value as 1
292          * which means interrupts are not soft disabled.
293          */
294         if (regno == PT_SOFTE) {
295                 *data = 1;
296                 return  0;
297         }
298 #endif
299 
300         if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
301                 *data = ((unsigned long *)task->thread.regs)[regno];
302                 return 0;
303         }
304 
305         return -EIO;
306 }
307 
308 /*
309  * Write contents of register REGNO in task TASK.
310  */
311 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
312 {
313         if (task->thread.regs == NULL)
314                 return -EIO;
315 
316         if (regno == PT_MSR)
317                 return set_user_msr(task, data);
318         if (regno == PT_TRAP)
319                 return set_user_trap(task, data);
320         if (regno == PT_DSCR)
321                 return set_user_dscr(task, data);
322 
323         if (regno <= PT_MAX_PUT_REG) {
324                 ((unsigned long *)task->thread.regs)[regno] = data;
325                 return 0;
326         }
327         return -EIO;
328 }
329 
330 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
331                    unsigned int pos, unsigned int count,
332                    void *kbuf, void __user *ubuf)
333 {
334         int i, ret;
335 
336         if (target->thread.regs == NULL)
337                 return -EIO;
338 
339         if (!FULL_REGS(target->thread.regs)) {
340                 /* We have a partial register set.  Fill 14-31 with bogus values */
341                 for (i = 14; i < 32; i++)
342                         target->thread.regs->gpr[i] = NV_REG_POISON;
343         }
344 
345         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
346                                   target->thread.regs,
347                                   0, offsetof(struct pt_regs, msr));
348         if (!ret) {
349                 unsigned long msr = get_user_msr(target);
350                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
351                                           offsetof(struct pt_regs, msr),
352                                           offsetof(struct pt_regs, msr) +
353                                           sizeof(msr));
354         }
355 
356         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
357                      offsetof(struct pt_regs, msr) + sizeof(long));
358 
359         if (!ret)
360                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
361                                           &target->thread.regs->orig_gpr3,
362                                           offsetof(struct pt_regs, orig_gpr3),
363                                           sizeof(struct pt_regs));
364         if (!ret)
365                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
366                                                sizeof(struct pt_regs), -1);
367 
368         return ret;
369 }
370 
371 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
372                    unsigned int pos, unsigned int count,
373                    const void *kbuf, const void __user *ubuf)
374 {
375         unsigned long reg;
376         int ret;
377 
378         if (target->thread.regs == NULL)
379                 return -EIO;
380 
381         CHECK_FULL_REGS(target->thread.regs);
382 
383         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
384                                  target->thread.regs,
385                                  0, PT_MSR * sizeof(reg));
386 
387         if (!ret && count > 0) {
388                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
389                                          PT_MSR * sizeof(reg),
390                                          (PT_MSR + 1) * sizeof(reg));
391                 if (!ret)
392                         ret = set_user_msr(target, reg);
393         }
394 
395         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
396                      offsetof(struct pt_regs, msr) + sizeof(long));
397 
398         if (!ret)
399                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
400                                          &target->thread.regs->orig_gpr3,
401                                          PT_ORIG_R3 * sizeof(reg),
402                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
403 
404         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
405                 ret = user_regset_copyin_ignore(
406                         &pos, &count, &kbuf, &ubuf,
407                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
408                         PT_TRAP * sizeof(reg));
409 
410         if (!ret && count > 0) {
411                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
412                                          PT_TRAP * sizeof(reg),
413                                          (PT_TRAP + 1) * sizeof(reg));
414                 if (!ret)
415                         ret = set_user_trap(target, reg);
416         }
417 
418         if (!ret)
419                 ret = user_regset_copyin_ignore(
420                         &pos, &count, &kbuf, &ubuf,
421                         (PT_TRAP + 1) * sizeof(reg), -1);
422 
423         return ret;
424 }
425 
426 /*
427  * Regardless of transactions, 'fp_state' holds the current running
428  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
429  * value of all FPR registers for the current transaction.
430  *
431  * Userspace interface buffer layout:
432  *
433  * struct data {
434  *      u64     fpr[32];
435  *      u64     fpscr;
436  * };
437  */
438 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
439                    unsigned int pos, unsigned int count,
440                    void *kbuf, void __user *ubuf)
441 {
442 #ifdef CONFIG_VSX
443         u64 buf[33];
444         int i;
445 
446         flush_fp_to_thread(target);
447 
448         /* copy to local buffer then write that out */
449         for (i = 0; i < 32 ; i++)
450                 buf[i] = target->thread.TS_FPR(i);
451         buf[32] = target->thread.fp_state.fpscr;
452         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
453 #else
454         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
455                      offsetof(struct thread_fp_state, fpr[32]));
456 
457         flush_fp_to_thread(target);
458 
459         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
460                                    &target->thread.fp_state, 0, -1);
461 #endif
462 }
463 
464 /*
465  * Regardless of transactions, 'fp_state' holds the current running
466  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
467  * value of all FPR registers for the current transaction.
468  *
469  * Userspace interface buffer layout:
470  *
471  * struct data {
472  *      u64     fpr[32];
473  *      u64     fpscr;
474  * };
475  *
476  */
477 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
478                    unsigned int pos, unsigned int count,
479                    const void *kbuf, const void __user *ubuf)
480 {
481 #ifdef CONFIG_VSX
482         u64 buf[33];
483         int i;
484 
485         flush_fp_to_thread(target);
486 
487         for (i = 0; i < 32 ; i++)
488                 buf[i] = target->thread.TS_FPR(i);
489         buf[32] = target->thread.fp_state.fpscr;
490 
491         /* copy to local buffer then write that out */
492         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
493         if (i)
494                 return i;
495 
496         for (i = 0; i < 32 ; i++)
497                 target->thread.TS_FPR(i) = buf[i];
498         target->thread.fp_state.fpscr = buf[32];
499         return 0;
500 #else
501         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
502                      offsetof(struct thread_fp_state, fpr[32]));
503 
504         flush_fp_to_thread(target);
505 
506         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
507                                   &target->thread.fp_state, 0, -1);
508 #endif
509 }
510 
511 #ifdef CONFIG_ALTIVEC
512 /*
513  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
514  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
515  * corresponding vector registers.  Quadword 32 contains the vscr as the
516  * last word (offset 12) within that quadword.  Quadword 33 contains the
517  * vrsave as the first word (offset 0) within the quadword.
518  *
519  * This definition of the VMX state is compatible with the current PPC32
520  * ptrace interface.  This allows signal handling and ptrace to use the
521  * same structures.  This also simplifies the implementation of a bi-arch
522  * (combined (32- and 64-bit) gdb.
523  */
524 
525 static int vr_active(struct task_struct *target,
526                      const struct user_regset *regset)
527 {
528         flush_altivec_to_thread(target);
529         return target->thread.used_vr ? regset->n : 0;
530 }
531 
532 /*
533  * Regardless of transactions, 'vr_state' holds the current running
534  * value of all the VMX registers and 'ckvr_state' holds the last
535  * checkpointed value of all the VMX registers for the current
536  * transaction to fall back on in case it aborts.
537  *
538  * Userspace interface buffer layout:
539  *
540  * struct data {
541  *      vector128       vr[32];
542  *      vector128       vscr;
543  *      vector128       vrsave;
544  * };
545  */
546 static int vr_get(struct task_struct *target, const struct user_regset *regset,
547                   unsigned int pos, unsigned int count,
548                   void *kbuf, void __user *ubuf)
549 {
550         int ret;
551 
552         flush_altivec_to_thread(target);
553 
554         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
555                      offsetof(struct thread_vr_state, vr[32]));
556 
557         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
558                                   &target->thread.vr_state, 0,
559                                   33 * sizeof(vector128));
560         if (!ret) {
561                 /*
562                  * Copy out only the low-order word of vrsave.
563                  */
564                 union {
565                         elf_vrreg_t reg;
566                         u32 word;
567                 } vrsave;
568                 memset(&vrsave, 0, sizeof(vrsave));
569 
570                 vrsave.word = target->thread.vrsave;
571 
572                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
573                                           33 * sizeof(vector128), -1);
574         }
575 
576         return ret;
577 }
578 
579 /*
580  * Regardless of transactions, 'vr_state' holds the current running
581  * value of all the VMX registers and 'ckvr_state' holds the last
582  * checkpointed value of all the VMX registers for the current
583  * transaction to fall back on in case it aborts.
584  *
585  * Userspace interface buffer layout:
586  *
587  * struct data {
588  *      vector128       vr[32];
589  *      vector128       vscr;
590  *      vector128       vrsave;
591  * };
592  */
593 static int vr_set(struct task_struct *target, const struct user_regset *regset,
594                   unsigned int pos, unsigned int count,
595                   const void *kbuf, const void __user *ubuf)
596 {
597         int ret;
598 
599         flush_altivec_to_thread(target);
600 
601         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
602                      offsetof(struct thread_vr_state, vr[32]));
603 
604         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
605                                  &target->thread.vr_state, 0,
606                                  33 * sizeof(vector128));
607         if (!ret && count > 0) {
608                 /*
609                  * We use only the first word of vrsave.
610                  */
611                 union {
612                         elf_vrreg_t reg;
613                         u32 word;
614                 } vrsave;
615                 memset(&vrsave, 0, sizeof(vrsave));
616 
617                 vrsave.word = target->thread.vrsave;
618 
619                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
620                                          33 * sizeof(vector128), -1);
621                 if (!ret)
622                         target->thread.vrsave = vrsave.word;
623         }
624 
625         return ret;
626 }
627 #endif /* CONFIG_ALTIVEC */
628 
629 #ifdef CONFIG_VSX
630 /*
631  * Currently to set and and get all the vsx state, you need to call
632  * the fp and VMX calls as well.  This only get/sets the lower 32
633  * 128bit VSX registers.
634  */
635 
636 static int vsr_active(struct task_struct *target,
637                       const struct user_regset *regset)
638 {
639         flush_vsx_to_thread(target);
640         return target->thread.used_vsr ? regset->n : 0;
641 }
642 
643 /*
644  * Regardless of transactions, 'fp_state' holds the current running
645  * value of all FPR registers and 'ckfp_state' holds the last
646  * checkpointed value of all FPR registers for the current
647  * transaction.
648  *
649  * Userspace interface buffer layout:
650  *
651  * struct data {
652  *      u64     vsx[32];
653  * };
654  */
655 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
656                    unsigned int pos, unsigned int count,
657                    void *kbuf, void __user *ubuf)
658 {
659         u64 buf[32];
660         int ret, i;
661 
662         flush_tmregs_to_thread(target);
663         flush_fp_to_thread(target);
664         flush_altivec_to_thread(target);
665         flush_vsx_to_thread(target);
666 
667         for (i = 0; i < 32 ; i++)
668                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
669 
670         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
671                                   buf, 0, 32 * sizeof(double));
672 
673         return ret;
674 }
675 
676 /*
677  * Regardless of transactions, 'fp_state' holds the current running
678  * value of all FPR registers and 'ckfp_state' holds the last
679  * checkpointed value of all FPR registers for the current
680  * transaction.
681  *
682  * Userspace interface buffer layout:
683  *
684  * struct data {
685  *      u64     vsx[32];
686  * };
687  */
688 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
689                    unsigned int pos, unsigned int count,
690                    const void *kbuf, const void __user *ubuf)
691 {
692         u64 buf[32];
693         int ret,i;
694 
695         flush_tmregs_to_thread(target);
696         flush_fp_to_thread(target);
697         flush_altivec_to_thread(target);
698         flush_vsx_to_thread(target);
699 
700         for (i = 0; i < 32 ; i++)
701                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
702 
703         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
704                                  buf, 0, 32 * sizeof(double));
705         if (!ret)
706                 for (i = 0; i < 32 ; i++)
707                         target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
708 
709         return ret;
710 }
711 #endif /* CONFIG_VSX */
712 
713 #ifdef CONFIG_SPE
714 
715 /*
716  * For get_evrregs/set_evrregs functions 'data' has the following layout:
717  *
718  * struct {
719  *   u32 evr[32];
720  *   u64 acc;
721  *   u32 spefscr;
722  * }
723  */
724 
725 static int evr_active(struct task_struct *target,
726                       const struct user_regset *regset)
727 {
728         flush_spe_to_thread(target);
729         return target->thread.used_spe ? regset->n : 0;
730 }
731 
732 static int evr_get(struct task_struct *target, const struct user_regset *regset,
733                    unsigned int pos, unsigned int count,
734                    void *kbuf, void __user *ubuf)
735 {
736         int ret;
737 
738         flush_spe_to_thread(target);
739 
740         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
741                                   &target->thread.evr,
742                                   0, sizeof(target->thread.evr));
743 
744         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
745                      offsetof(struct thread_struct, spefscr));
746 
747         if (!ret)
748                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
749                                           &target->thread.acc,
750                                           sizeof(target->thread.evr), -1);
751 
752         return ret;
753 }
754 
755 static int evr_set(struct task_struct *target, const struct user_regset *regset,
756                    unsigned int pos, unsigned int count,
757                    const void *kbuf, const void __user *ubuf)
758 {
759         int ret;
760 
761         flush_spe_to_thread(target);
762 
763         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
764                                  &target->thread.evr,
765                                  0, sizeof(target->thread.evr));
766 
767         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
768                      offsetof(struct thread_struct, spefscr));
769 
770         if (!ret)
771                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
772                                          &target->thread.acc,
773                                          sizeof(target->thread.evr), -1);
774 
775         return ret;
776 }
777 #endif /* CONFIG_SPE */
778 
779 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
780 /**
781  * tm_cgpr_active - get active number of registers in CGPR
782  * @target:     The target task.
783  * @regset:     The user regset structure.
784  *
785  * This function checks for the active number of available
786  * regisers in transaction checkpointed GPR category.
787  */
788 static int tm_cgpr_active(struct task_struct *target,
789                           const struct user_regset *regset)
790 {
791         if (!cpu_has_feature(CPU_FTR_TM))
792                 return -ENODEV;
793 
794         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
795                 return 0;
796 
797         return regset->n;
798 }
799 
800 /**
801  * tm_cgpr_get - get CGPR registers
802  * @target:     The target task.
803  * @regset:     The user regset structure.
804  * @pos:        The buffer position.
805  * @count:      Number of bytes to copy.
806  * @kbuf:       Kernel buffer to copy from.
807  * @ubuf:       User buffer to copy into.
808  *
809  * This function gets transaction checkpointed GPR registers.
810  *
811  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
812  * GPR register values for the current transaction to fall back on if it
813  * aborts in between. This function gets those checkpointed GPR registers.
814  * The userspace interface buffer layout is as follows.
815  *
816  * struct data {
817  *      struct pt_regs ckpt_regs;
818  * };
819  */
820 static int tm_cgpr_get(struct task_struct *target,
821                         const struct user_regset *regset,
822                         unsigned int pos, unsigned int count,
823                         void *kbuf, void __user *ubuf)
824 {
825         int ret;
826 
827         if (!cpu_has_feature(CPU_FTR_TM))
828                 return -ENODEV;
829 
830         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
831                 return -ENODATA;
832 
833         flush_tmregs_to_thread(target);
834         flush_fp_to_thread(target);
835         flush_altivec_to_thread(target);
836 
837         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
838                                   &target->thread.ckpt_regs,
839                                   0, offsetof(struct pt_regs, msr));
840         if (!ret) {
841                 unsigned long msr = get_user_ckpt_msr(target);
842 
843                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
844                                           offsetof(struct pt_regs, msr),
845                                           offsetof(struct pt_regs, msr) +
846                                           sizeof(msr));
847         }
848 
849         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
850                      offsetof(struct pt_regs, msr) + sizeof(long));
851 
852         if (!ret)
853                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
854                                           &target->thread.ckpt_regs.orig_gpr3,
855                                           offsetof(struct pt_regs, orig_gpr3),
856                                           sizeof(struct pt_regs));
857         if (!ret)
858                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
859                                                sizeof(struct pt_regs), -1);
860 
861         return ret;
862 }
863 
864 /*
865  * tm_cgpr_set - set the CGPR registers
866  * @target:     The target task.
867  * @regset:     The user regset structure.
868  * @pos:        The buffer position.
869  * @count:      Number of bytes to copy.
870  * @kbuf:       Kernel buffer to copy into.
871  * @ubuf:       User buffer to copy from.
872  *
873  * This function sets in transaction checkpointed GPR registers.
874  *
875  * When the transaction is active, 'ckpt_regs' holds the checkpointed
876  * GPR register values for the current transaction to fall back on if it
877  * aborts in between. This function sets those checkpointed GPR registers.
878  * The userspace interface buffer layout is as follows.
879  *
880  * struct data {
881  *      struct pt_regs ckpt_regs;
882  * };
883  */
884 static int tm_cgpr_set(struct task_struct *target,
885                         const struct user_regset *regset,
886                         unsigned int pos, unsigned int count,
887                         const void *kbuf, const void __user *ubuf)
888 {
889         unsigned long reg;
890         int ret;
891 
892         if (!cpu_has_feature(CPU_FTR_TM))
893                 return -ENODEV;
894 
895         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
896                 return -ENODATA;
897 
898         flush_tmregs_to_thread(target);
899         flush_fp_to_thread(target);
900         flush_altivec_to_thread(target);
901 
902         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
903                                  &target->thread.ckpt_regs,
904                                  0, PT_MSR * sizeof(reg));
905 
906         if (!ret && count > 0) {
907                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
908                                          PT_MSR * sizeof(reg),
909                                          (PT_MSR + 1) * sizeof(reg));
910                 if (!ret)
911                         ret = set_user_ckpt_msr(target, reg);
912         }
913 
914         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
915                      offsetof(struct pt_regs, msr) + sizeof(long));
916 
917         if (!ret)
918                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
919                                          &target->thread.ckpt_regs.orig_gpr3,
920                                          PT_ORIG_R3 * sizeof(reg),
921                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
922 
923         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
924                 ret = user_regset_copyin_ignore(
925                         &pos, &count, &kbuf, &ubuf,
926                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
927                         PT_TRAP * sizeof(reg));
928 
929         if (!ret && count > 0) {
930                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
931                                          PT_TRAP * sizeof(reg),
932                                          (PT_TRAP + 1) * sizeof(reg));
933                 if (!ret)
934                         ret = set_user_ckpt_trap(target, reg);
935         }
936 
937         if (!ret)
938                 ret = user_regset_copyin_ignore(
939                         &pos, &count, &kbuf, &ubuf,
940                         (PT_TRAP + 1) * sizeof(reg), -1);
941 
942         return ret;
943 }
944 
945 /**
946  * tm_cfpr_active - get active number of registers in CFPR
947  * @target:     The target task.
948  * @regset:     The user regset structure.
949  *
950  * This function checks for the active number of available
951  * regisers in transaction checkpointed FPR category.
952  */
953 static int tm_cfpr_active(struct task_struct *target,
954                                 const struct user_regset *regset)
955 {
956         if (!cpu_has_feature(CPU_FTR_TM))
957                 return -ENODEV;
958 
959         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
960                 return 0;
961 
962         return regset->n;
963 }
964 
965 /**
966  * tm_cfpr_get - get CFPR registers
967  * @target:     The target task.
968  * @regset:     The user regset structure.
969  * @pos:        The buffer position.
970  * @count:      Number of bytes to copy.
971  * @kbuf:       Kernel buffer to copy from.
972  * @ubuf:       User buffer to copy into.
973  *
974  * This function gets in transaction checkpointed FPR registers.
975  *
976  * When the transaction is active 'ckfp_state' holds the checkpointed
977  * values for the current transaction to fall back on if it aborts
978  * in between. This function gets those checkpointed FPR registers.
979  * The userspace interface buffer layout is as follows.
980  *
981  * struct data {
982  *      u64     fpr[32];
983  *      u64     fpscr;
984  *};
985  */
986 static int tm_cfpr_get(struct task_struct *target,
987                         const struct user_regset *regset,
988                         unsigned int pos, unsigned int count,
989                         void *kbuf, void __user *ubuf)
990 {
991         u64 buf[33];
992         int i;
993 
994         if (!cpu_has_feature(CPU_FTR_TM))
995                 return -ENODEV;
996 
997         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
998                 return -ENODATA;
999 
1000         flush_tmregs_to_thread(target);
1001         flush_fp_to_thread(target);
1002         flush_altivec_to_thread(target);
1003 
1004         /* copy to local buffer then write that out */
1005         for (i = 0; i < 32 ; i++)
1006                 buf[i] = target->thread.TS_CKFPR(i);
1007         buf[32] = target->thread.ckfp_state.fpscr;
1008         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1009 }
1010 
1011 /**
1012  * tm_cfpr_set - set CFPR registers
1013  * @target:     The target task.
1014  * @regset:     The user regset structure.
1015  * @pos:        The buffer position.
1016  * @count:      Number of bytes to copy.
1017  * @kbuf:       Kernel buffer to copy into.
1018  * @ubuf:       User buffer to copy from.
1019  *
1020  * This function sets in transaction checkpointed FPR registers.
1021  *
1022  * When the transaction is active 'ckfp_state' holds the checkpointed
1023  * FPR register values for the current transaction to fall back on
1024  * if it aborts in between. This function sets these checkpointed
1025  * FPR registers. The userspace interface buffer layout is as follows.
1026  *
1027  * struct data {
1028  *      u64     fpr[32];
1029  *      u64     fpscr;
1030  *};
1031  */
1032 static int tm_cfpr_set(struct task_struct *target,
1033                         const struct user_regset *regset,
1034                         unsigned int pos, unsigned int count,
1035                         const void *kbuf, const void __user *ubuf)
1036 {
1037         u64 buf[33];
1038         int i;
1039 
1040         if (!cpu_has_feature(CPU_FTR_TM))
1041                 return -ENODEV;
1042 
1043         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1044                 return -ENODATA;
1045 
1046         flush_tmregs_to_thread(target);
1047         flush_fp_to_thread(target);
1048         flush_altivec_to_thread(target);
1049 
1050         for (i = 0; i < 32; i++)
1051                 buf[i] = target->thread.TS_CKFPR(i);
1052         buf[32] = target->thread.ckfp_state.fpscr;
1053 
1054         /* copy to local buffer then write that out */
1055         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1056         if (i)
1057                 return i;
1058         for (i = 0; i < 32 ; i++)
1059                 target->thread.TS_CKFPR(i) = buf[i];
1060         target->thread.ckfp_state.fpscr = buf[32];
1061         return 0;
1062 }
1063 
1064 /**
1065  * tm_cvmx_active - get active number of registers in CVMX
1066  * @target:     The target task.
1067  * @regset:     The user regset structure.
1068  *
1069  * This function checks for the active number of available
1070  * regisers in checkpointed VMX category.
1071  */
1072 static int tm_cvmx_active(struct task_struct *target,
1073                                 const struct user_regset *regset)
1074 {
1075         if (!cpu_has_feature(CPU_FTR_TM))
1076                 return -ENODEV;
1077 
1078         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1079                 return 0;
1080 
1081         return regset->n;
1082 }
1083 
1084 /**
1085  * tm_cvmx_get - get CMVX registers
1086  * @target:     The target task.
1087  * @regset:     The user regset structure.
1088  * @pos:        The buffer position.
1089  * @count:      Number of bytes to copy.
1090  * @kbuf:       Kernel buffer to copy from.
1091  * @ubuf:       User buffer to copy into.
1092  *
1093  * This function gets in transaction checkpointed VMX registers.
1094  *
1095  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1096  * the checkpointed values for the current transaction to fall
1097  * back on if it aborts in between. The userspace interface buffer
1098  * layout is as follows.
1099  *
1100  * struct data {
1101  *      vector128       vr[32];
1102  *      vector128       vscr;
1103  *      vector128       vrsave;
1104  *};
1105  */
1106 static int tm_cvmx_get(struct task_struct *target,
1107                         const struct user_regset *regset,
1108                         unsigned int pos, unsigned int count,
1109                         void *kbuf, void __user *ubuf)
1110 {
1111         int ret;
1112 
1113         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1114 
1115         if (!cpu_has_feature(CPU_FTR_TM))
1116                 return -ENODEV;
1117 
1118         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1119                 return -ENODATA;
1120 
1121         /* Flush the state */
1122         flush_tmregs_to_thread(target);
1123         flush_fp_to_thread(target);
1124         flush_altivec_to_thread(target);
1125 
1126         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1127                                         &target->thread.ckvr_state, 0,
1128                                         33 * sizeof(vector128));
1129         if (!ret) {
1130                 /*
1131                  * Copy out only the low-order word of vrsave.
1132                  */
1133                 union {
1134                         elf_vrreg_t reg;
1135                         u32 word;
1136                 } vrsave;
1137                 memset(&vrsave, 0, sizeof(vrsave));
1138                 vrsave.word = target->thread.ckvrsave;
1139                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1140                                                 33 * sizeof(vector128), -1);
1141         }
1142 
1143         return ret;
1144 }
1145 
1146 /**
1147  * tm_cvmx_set - set CMVX registers
1148  * @target:     The target task.
1149  * @regset:     The user regset structure.
1150  * @pos:        The buffer position.
1151  * @count:      Number of bytes to copy.
1152  * @kbuf:       Kernel buffer to copy into.
1153  * @ubuf:       User buffer to copy from.
1154  *
1155  * This function sets in transaction checkpointed VMX registers.
1156  *
1157  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1158  * the checkpointed values for the current transaction to fall
1159  * back on if it aborts in between. The userspace interface buffer
1160  * layout is as follows.
1161  *
1162  * struct data {
1163  *      vector128       vr[32];
1164  *      vector128       vscr;
1165  *      vector128       vrsave;
1166  *};
1167  */
1168 static int tm_cvmx_set(struct task_struct *target,
1169                         const struct user_regset *regset,
1170                         unsigned int pos, unsigned int count,
1171                         const void *kbuf, const void __user *ubuf)
1172 {
1173         int ret;
1174 
1175         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1176 
1177         if (!cpu_has_feature(CPU_FTR_TM))
1178                 return -ENODEV;
1179 
1180         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1181                 return -ENODATA;
1182 
1183         flush_tmregs_to_thread(target);
1184         flush_fp_to_thread(target);
1185         flush_altivec_to_thread(target);
1186 
1187         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1188                                         &target->thread.ckvr_state, 0,
1189                                         33 * sizeof(vector128));
1190         if (!ret && count > 0) {
1191                 /*
1192                  * We use only the low-order word of vrsave.
1193                  */
1194                 union {
1195                         elf_vrreg_t reg;
1196                         u32 word;
1197                 } vrsave;
1198                 memset(&vrsave, 0, sizeof(vrsave));
1199                 vrsave.word = target->thread.ckvrsave;
1200                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1201                                                 33 * sizeof(vector128), -1);
1202                 if (!ret)
1203                         target->thread.ckvrsave = vrsave.word;
1204         }
1205 
1206         return ret;
1207 }
1208 
1209 /**
1210  * tm_cvsx_active - get active number of registers in CVSX
1211  * @target:     The target task.
1212  * @regset:     The user regset structure.
1213  *
1214  * This function checks for the active number of available
1215  * regisers in transaction checkpointed VSX category.
1216  */
1217 static int tm_cvsx_active(struct task_struct *target,
1218                                 const struct user_regset *regset)
1219 {
1220         if (!cpu_has_feature(CPU_FTR_TM))
1221                 return -ENODEV;
1222 
1223         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1224                 return 0;
1225 
1226         flush_vsx_to_thread(target);
1227         return target->thread.used_vsr ? regset->n : 0;
1228 }
1229 
1230 /**
1231  * tm_cvsx_get - get CVSX registers
1232  * @target:     The target task.
1233  * @regset:     The user regset structure.
1234  * @pos:        The buffer position.
1235  * @count:      Number of bytes to copy.
1236  * @kbuf:       Kernel buffer to copy from.
1237  * @ubuf:       User buffer to copy into.
1238  *
1239  * This function gets in transaction checkpointed VSX registers.
1240  *
1241  * When the transaction is active 'ckfp_state' holds the checkpointed
1242  * values for the current transaction to fall back on if it aborts
1243  * in between. This function gets those checkpointed VSX registers.
1244  * The userspace interface buffer layout is as follows.
1245  *
1246  * struct data {
1247  *      u64     vsx[32];
1248  *};
1249  */
1250 static int tm_cvsx_get(struct task_struct *target,
1251                         const struct user_regset *regset,
1252                         unsigned int pos, unsigned int count,
1253                         void *kbuf, void __user *ubuf)
1254 {
1255         u64 buf[32];
1256         int ret, i;
1257 
1258         if (!cpu_has_feature(CPU_FTR_TM))
1259                 return -ENODEV;
1260 
1261         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1262                 return -ENODATA;
1263 
1264         /* Flush the state */
1265         flush_tmregs_to_thread(target);
1266         flush_fp_to_thread(target);
1267         flush_altivec_to_thread(target);
1268         flush_vsx_to_thread(target);
1269 
1270         for (i = 0; i < 32 ; i++)
1271                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1272         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1273                                   buf, 0, 32 * sizeof(double));
1274 
1275         return ret;
1276 }
1277 
1278 /**
1279  * tm_cvsx_set - set CFPR registers
1280  * @target:     The target task.
1281  * @regset:     The user regset structure.
1282  * @pos:        The buffer position.
1283  * @count:      Number of bytes to copy.
1284  * @kbuf:       Kernel buffer to copy into.
1285  * @ubuf:       User buffer to copy from.
1286  *
1287  * This function sets in transaction checkpointed VSX registers.
1288  *
1289  * When the transaction is active 'ckfp_state' holds the checkpointed
1290  * VSX register values for the current transaction to fall back on
1291  * if it aborts in between. This function sets these checkpointed
1292  * FPR registers. The userspace interface buffer layout is as follows.
1293  *
1294  * struct data {
1295  *      u64     vsx[32];
1296  *};
1297  */
1298 static int tm_cvsx_set(struct task_struct *target,
1299                         const struct user_regset *regset,
1300                         unsigned int pos, unsigned int count,
1301                         const void *kbuf, const void __user *ubuf)
1302 {
1303         u64 buf[32];
1304         int ret, i;
1305 
1306         if (!cpu_has_feature(CPU_FTR_TM))
1307                 return -ENODEV;
1308 
1309         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1310                 return -ENODATA;
1311 
1312         /* Flush the state */
1313         flush_tmregs_to_thread(target);
1314         flush_fp_to_thread(target);
1315         flush_altivec_to_thread(target);
1316         flush_vsx_to_thread(target);
1317 
1318         for (i = 0; i < 32 ; i++)
1319                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1320 
1321         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322                                  buf, 0, 32 * sizeof(double));
1323         if (!ret)
1324                 for (i = 0; i < 32 ; i++)
1325                         target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1326 
1327         return ret;
1328 }
1329 
1330 /**
1331  * tm_spr_active - get active number of registers in TM SPR
1332  * @target:     The target task.
1333  * @regset:     The user regset structure.
1334  *
1335  * This function checks the active number of available
1336  * regisers in the transactional memory SPR category.
1337  */
1338 static int tm_spr_active(struct task_struct *target,
1339                          const struct user_regset *regset)
1340 {
1341         if (!cpu_has_feature(CPU_FTR_TM))
1342                 return -ENODEV;
1343 
1344         return regset->n;
1345 }
1346 
1347 /**
1348  * tm_spr_get - get the TM related SPR registers
1349  * @target:     The target task.
1350  * @regset:     The user regset structure.
1351  * @pos:        The buffer position.
1352  * @count:      Number of bytes to copy.
1353  * @kbuf:       Kernel buffer to copy from.
1354  * @ubuf:       User buffer to copy into.
1355  *
1356  * This function gets transactional memory related SPR registers.
1357  * The userspace interface buffer layout is as follows.
1358  *
1359  * struct {
1360  *      u64             tm_tfhar;
1361  *      u64             tm_texasr;
1362  *      u64             tm_tfiar;
1363  * };
1364  */
1365 static int tm_spr_get(struct task_struct *target,
1366                       const struct user_regset *regset,
1367                       unsigned int pos, unsigned int count,
1368                       void *kbuf, void __user *ubuf)
1369 {
1370         int ret;
1371 
1372         /* Build tests */
1373         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1374         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1375         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1376 
1377         if (!cpu_has_feature(CPU_FTR_TM))
1378                 return -ENODEV;
1379 
1380         /* Flush the states */
1381         flush_tmregs_to_thread(target);
1382         flush_fp_to_thread(target);
1383         flush_altivec_to_thread(target);
1384 
1385         /* TFHAR register */
1386         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1387                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1388 
1389         /* TEXASR register */
1390         if (!ret)
1391                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1392                                 &target->thread.tm_texasr, sizeof(u64),
1393                                 2 * sizeof(u64));
1394 
1395         /* TFIAR register */
1396         if (!ret)
1397                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1398                                 &target->thread.tm_tfiar,
1399                                 2 * sizeof(u64), 3 * sizeof(u64));
1400         return ret;
1401 }
1402 
1403 /**
1404  * tm_spr_set - set the TM related SPR registers
1405  * @target:     The target task.
1406  * @regset:     The user regset structure.
1407  * @pos:        The buffer position.
1408  * @count:      Number of bytes to copy.
1409  * @kbuf:       Kernel buffer to copy into.
1410  * @ubuf:       User buffer to copy from.
1411  *
1412  * This function sets transactional memory related SPR registers.
1413  * The userspace interface buffer layout is as follows.
1414  *
1415  * struct {
1416  *      u64             tm_tfhar;
1417  *      u64             tm_texasr;
1418  *      u64             tm_tfiar;
1419  * };
1420  */
1421 static int tm_spr_set(struct task_struct *target,
1422                       const struct user_regset *regset,
1423                       unsigned int pos, unsigned int count,
1424                       const void *kbuf, const void __user *ubuf)
1425 {
1426         int ret;
1427 
1428         /* Build tests */
1429         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1430         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1431         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1432 
1433         if (!cpu_has_feature(CPU_FTR_TM))
1434                 return -ENODEV;
1435 
1436         /* Flush the states */
1437         flush_tmregs_to_thread(target);
1438         flush_fp_to_thread(target);
1439         flush_altivec_to_thread(target);
1440 
1441         /* TFHAR register */
1442         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1443                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1444 
1445         /* TEXASR register */
1446         if (!ret)
1447                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1448                                 &target->thread.tm_texasr, sizeof(u64),
1449                                 2 * sizeof(u64));
1450 
1451         /* TFIAR register */
1452         if (!ret)
1453                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1454                                 &target->thread.tm_tfiar,
1455                                  2 * sizeof(u64), 3 * sizeof(u64));
1456         return ret;
1457 }
1458 
1459 static int tm_tar_active(struct task_struct *target,
1460                          const struct user_regset *regset)
1461 {
1462         if (!cpu_has_feature(CPU_FTR_TM))
1463                 return -ENODEV;
1464 
1465         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1466                 return regset->n;
1467 
1468         return 0;
1469 }
1470 
1471 static int tm_tar_get(struct task_struct *target,
1472                       const struct user_regset *regset,
1473                       unsigned int pos, unsigned int count,
1474                       void *kbuf, void __user *ubuf)
1475 {
1476         int ret;
1477 
1478         if (!cpu_has_feature(CPU_FTR_TM))
1479                 return -ENODEV;
1480 
1481         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1482                 return -ENODATA;
1483 
1484         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1485                                 &target->thread.tm_tar, 0, sizeof(u64));
1486         return ret;
1487 }
1488 
1489 static int tm_tar_set(struct task_struct *target,
1490                       const struct user_regset *regset,
1491                       unsigned int pos, unsigned int count,
1492                       const void *kbuf, const void __user *ubuf)
1493 {
1494         int ret;
1495 
1496         if (!cpu_has_feature(CPU_FTR_TM))
1497                 return -ENODEV;
1498 
1499         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1500                 return -ENODATA;
1501 
1502         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1503                                 &target->thread.tm_tar, 0, sizeof(u64));
1504         return ret;
1505 }
1506 
1507 static int tm_ppr_active(struct task_struct *target,
1508                          const struct user_regset *regset)
1509 {
1510         if (!cpu_has_feature(CPU_FTR_TM))
1511                 return -ENODEV;
1512 
1513         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1514                 return regset->n;
1515 
1516         return 0;
1517 }
1518 
1519 
1520 static int tm_ppr_get(struct task_struct *target,
1521                       const struct user_regset *regset,
1522                       unsigned int pos, unsigned int count,
1523                       void *kbuf, void __user *ubuf)
1524 {
1525         int ret;
1526 
1527         if (!cpu_has_feature(CPU_FTR_TM))
1528                 return -ENODEV;
1529 
1530         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1531                 return -ENODATA;
1532 
1533         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1534                                 &target->thread.tm_ppr, 0, sizeof(u64));
1535         return ret;
1536 }
1537 
1538 static int tm_ppr_set(struct task_struct *target,
1539                       const struct user_regset *regset,
1540                       unsigned int pos, unsigned int count,
1541                       const void *kbuf, const void __user *ubuf)
1542 {
1543         int ret;
1544 
1545         if (!cpu_has_feature(CPU_FTR_TM))
1546                 return -ENODEV;
1547 
1548         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1549                 return -ENODATA;
1550 
1551         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1552                                 &target->thread.tm_ppr, 0, sizeof(u64));
1553         return ret;
1554 }
1555 
1556 static int tm_dscr_active(struct task_struct *target,
1557                          const struct user_regset *regset)
1558 {
1559         if (!cpu_has_feature(CPU_FTR_TM))
1560                 return -ENODEV;
1561 
1562         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1563                 return regset->n;
1564 
1565         return 0;
1566 }
1567 
1568 static int tm_dscr_get(struct task_struct *target,
1569                       const struct user_regset *regset,
1570                       unsigned int pos, unsigned int count,
1571                       void *kbuf, void __user *ubuf)
1572 {
1573         int ret;
1574 
1575         if (!cpu_has_feature(CPU_FTR_TM))
1576                 return -ENODEV;
1577 
1578         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1579                 return -ENODATA;
1580 
1581         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1582                                 &target->thread.tm_dscr, 0, sizeof(u64));
1583         return ret;
1584 }
1585 
1586 static int tm_dscr_set(struct task_struct *target,
1587                       const struct user_regset *regset,
1588                       unsigned int pos, unsigned int count,
1589                       const void *kbuf, const void __user *ubuf)
1590 {
1591         int ret;
1592 
1593         if (!cpu_has_feature(CPU_FTR_TM))
1594                 return -ENODEV;
1595 
1596         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1597                 return -ENODATA;
1598 
1599         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1600                                 &target->thread.tm_dscr, 0, sizeof(u64));
1601         return ret;
1602 }
1603 #endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1604 
1605 #ifdef CONFIG_PPC64
1606 static int ppr_get(struct task_struct *target,
1607                       const struct user_regset *regset,
1608                       unsigned int pos, unsigned int count,
1609                       void *kbuf, void __user *ubuf)
1610 {
1611         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1612                                    &target->thread.ppr, 0, sizeof(u64));
1613 }
1614 
1615 static int ppr_set(struct task_struct *target,
1616                       const struct user_regset *regset,
1617                       unsigned int pos, unsigned int count,
1618                       const void *kbuf, const void __user *ubuf)
1619 {
1620         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1621                                   &target->thread.ppr, 0, sizeof(u64));
1622 }
1623 
1624 static int dscr_get(struct task_struct *target,
1625                       const struct user_regset *regset,
1626                       unsigned int pos, unsigned int count,
1627                       void *kbuf, void __user *ubuf)
1628 {
1629         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1630                                    &target->thread.dscr, 0, sizeof(u64));
1631 }
1632 static int dscr_set(struct task_struct *target,
1633                       const struct user_regset *regset,
1634                       unsigned int pos, unsigned int count,
1635                       const void *kbuf, const void __user *ubuf)
1636 {
1637         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1638                                   &target->thread.dscr, 0, sizeof(u64));
1639 }
1640 #endif
1641 #ifdef CONFIG_PPC_BOOK3S_64
1642 static int tar_get(struct task_struct *target,
1643                       const struct user_regset *regset,
1644                       unsigned int pos, unsigned int count,
1645                       void *kbuf, void __user *ubuf)
1646 {
1647         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1648                                    &target->thread.tar, 0, sizeof(u64));
1649 }
1650 static int tar_set(struct task_struct *target,
1651                       const struct user_regset *regset,
1652                       unsigned int pos, unsigned int count,
1653                       const void *kbuf, const void __user *ubuf)
1654 {
1655         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1656                                   &target->thread.tar, 0, sizeof(u64));
1657 }
1658 
1659 static int ebb_active(struct task_struct *target,
1660                          const struct user_regset *regset)
1661 {
1662         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1663                 return -ENODEV;
1664 
1665         if (target->thread.used_ebb)
1666                 return regset->n;
1667 
1668         return 0;
1669 }
1670 
1671 static int ebb_get(struct task_struct *target,
1672                       const struct user_regset *regset,
1673                       unsigned int pos, unsigned int count,
1674                       void *kbuf, void __user *ubuf)
1675 {
1676         /* Build tests */
1677         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1678         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1679 
1680         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1681                 return -ENODEV;
1682 
1683         if (!target->thread.used_ebb)
1684                 return -ENODATA;
1685 
1686         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1687                         &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1688 }
1689 
1690 static int ebb_set(struct task_struct *target,
1691                       const struct user_regset *regset,
1692                       unsigned int pos, unsigned int count,
1693                       const void *kbuf, const void __user *ubuf)
1694 {
1695         int ret = 0;
1696 
1697         /* Build tests */
1698         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1699         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1700 
1701         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1702                 return -ENODEV;
1703 
1704         if (target->thread.used_ebb)
1705                 return -ENODATA;
1706 
1707         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1708                         &target->thread.ebbrr, 0, sizeof(unsigned long));
1709 
1710         if (!ret)
1711                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1712                         &target->thread.ebbhr, sizeof(unsigned long),
1713                         2 * sizeof(unsigned long));
1714 
1715         if (!ret)
1716                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1717                         &target->thread.bescr,
1718                         2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1719 
1720         return ret;
1721 }
1722 static int pmu_active(struct task_struct *target,
1723                          const struct user_regset *regset)
1724 {
1725         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1726                 return -ENODEV;
1727 
1728         return regset->n;
1729 }
1730 
1731 static int pmu_get(struct task_struct *target,
1732                       const struct user_regset *regset,
1733                       unsigned int pos, unsigned int count,
1734                       void *kbuf, void __user *ubuf)
1735 {
1736         /* Build tests */
1737         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1738         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1739         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1740         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1741 
1742         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1743                 return -ENODEV;
1744 
1745         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1746                         &target->thread.siar, 0,
1747                         5 * sizeof(unsigned long));
1748 }
1749 
1750 static int pmu_set(struct task_struct *target,
1751                       const struct user_regset *regset,
1752                       unsigned int pos, unsigned int count,
1753                       const void *kbuf, const void __user *ubuf)
1754 {
1755         int ret = 0;
1756 
1757         /* Build tests */
1758         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1759         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1760         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1761         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1762 
1763         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1764                 return -ENODEV;
1765 
1766         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1767                         &target->thread.siar, 0,
1768                         sizeof(unsigned long));
1769 
1770         if (!ret)
1771                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1772                         &target->thread.sdar, sizeof(unsigned long),
1773                         2 * sizeof(unsigned long));
1774 
1775         if (!ret)
1776                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1777                         &target->thread.sier, 2 * sizeof(unsigned long),
1778                         3 * sizeof(unsigned long));
1779 
1780         if (!ret)
1781                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1782                         &target->thread.mmcr2, 3 * sizeof(unsigned long),
1783                         4 * sizeof(unsigned long));
1784 
1785         if (!ret)
1786                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1787                         &target->thread.mmcr0, 4 * sizeof(unsigned long),
1788                         5 * sizeof(unsigned long));
1789         return ret;
1790 }
1791 #endif
1792 
1793 #ifdef CONFIG_PPC_MEM_KEYS
1794 static int pkey_active(struct task_struct *target,
1795                        const struct user_regset *regset)
1796 {
1797         if (!arch_pkeys_enabled())
1798                 return -ENODEV;
1799 
1800         return regset->n;
1801 }
1802 
1803 static int pkey_get(struct task_struct *target,
1804                     const struct user_regset *regset,
1805                     unsigned int pos, unsigned int count,
1806                     void *kbuf, void __user *ubuf)
1807 {
1808         BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1809         BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1810 
1811         if (!arch_pkeys_enabled())
1812                 return -ENODEV;
1813 
1814         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1815                                    &target->thread.amr, 0,
1816                                    ELF_NPKEY * sizeof(unsigned long));
1817 }
1818 
1819 static int pkey_set(struct task_struct *target,
1820                       const struct user_regset *regset,
1821                       unsigned int pos, unsigned int count,
1822                       const void *kbuf, const void __user *ubuf)
1823 {
1824         u64 new_amr;
1825         int ret;
1826 
1827         if (!arch_pkeys_enabled())
1828                 return -ENODEV;
1829 
1830         /* Only the AMR can be set from userspace */
1831         if (pos != 0 || count != sizeof(new_amr))
1832                 return -EINVAL;
1833 
1834         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1835                                  &new_amr, 0, sizeof(new_amr));
1836         if (ret)
1837                 return ret;
1838 
1839         /* UAMOR determines which bits of the AMR can be set from userspace. */
1840         target->thread.amr = (new_amr & target->thread.uamor) |
1841                 (target->thread.amr & ~target->thread.uamor);
1842 
1843         return 0;
1844 }
1845 #endif /* CONFIG_PPC_MEM_KEYS */
1846 
1847 /*
1848  * These are our native regset flavors.
1849  */
1850 enum powerpc_regset {
1851         REGSET_GPR,
1852         REGSET_FPR,
1853 #ifdef CONFIG_ALTIVEC
1854         REGSET_VMX,
1855 #endif
1856 #ifdef CONFIG_VSX
1857         REGSET_VSX,
1858 #endif
1859 #ifdef CONFIG_SPE
1860         REGSET_SPE,
1861 #endif
1862 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1863         REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1864         REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1865         REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1866         REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1867         REGSET_TM_SPR,          /* TM specific SPR registers */
1868         REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1869         REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1870         REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1871 #endif
1872 #ifdef CONFIG_PPC64
1873         REGSET_PPR,             /* PPR register */
1874         REGSET_DSCR,            /* DSCR register */
1875 #endif
1876 #ifdef CONFIG_PPC_BOOK3S_64
1877         REGSET_TAR,             /* TAR register */
1878         REGSET_EBB,             /* EBB registers */
1879         REGSET_PMR,             /* Performance Monitor Registers */
1880 #endif
1881 #ifdef CONFIG_PPC_MEM_KEYS
1882         REGSET_PKEY,            /* AMR register */
1883 #endif
1884 };
1885 
1886 static const struct user_regset native_regsets[] = {
1887         [REGSET_GPR] = {
1888                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1889                 .size = sizeof(long), .align = sizeof(long),
1890                 .get = gpr_get, .set = gpr_set
1891         },
1892         [REGSET_FPR] = {
1893                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1894                 .size = sizeof(double), .align = sizeof(double),
1895                 .get = fpr_get, .set = fpr_set
1896         },
1897 #ifdef CONFIG_ALTIVEC
1898         [REGSET_VMX] = {
1899                 .core_note_type = NT_PPC_VMX, .n = 34,
1900                 .size = sizeof(vector128), .align = sizeof(vector128),
1901                 .active = vr_active, .get = vr_get, .set = vr_set
1902         },
1903 #endif
1904 #ifdef CONFIG_VSX
1905         [REGSET_VSX] = {
1906                 .core_note_type = NT_PPC_VSX, .n = 32,
1907                 .size = sizeof(double), .align = sizeof(double),
1908                 .active = vsr_active, .get = vsr_get, .set = vsr_set
1909         },
1910 #endif
1911 #ifdef CONFIG_SPE
1912         [REGSET_SPE] = {
1913                 .core_note_type = NT_PPC_SPE, .n = 35,
1914                 .size = sizeof(u32), .align = sizeof(u32),
1915                 .active = evr_active, .get = evr_get, .set = evr_set
1916         },
1917 #endif
1918 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1919         [REGSET_TM_CGPR] = {
1920                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1921                 .size = sizeof(long), .align = sizeof(long),
1922                 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1923         },
1924         [REGSET_TM_CFPR] = {
1925                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1926                 .size = sizeof(double), .align = sizeof(double),
1927                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1928         },
1929         [REGSET_TM_CVMX] = {
1930                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1931                 .size = sizeof(vector128), .align = sizeof(vector128),
1932                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1933         },
1934         [REGSET_TM_CVSX] = {
1935                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1936                 .size = sizeof(double), .align = sizeof(double),
1937                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1938         },
1939         [REGSET_TM_SPR] = {
1940                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1941                 .size = sizeof(u64), .align = sizeof(u64),
1942                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1943         },
1944         [REGSET_TM_CTAR] = {
1945                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1946                 .size = sizeof(u64), .align = sizeof(u64),
1947                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1948         },
1949         [REGSET_TM_CPPR] = {
1950                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1951                 .size = sizeof(u64), .align = sizeof(u64),
1952                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1953         },
1954         [REGSET_TM_CDSCR] = {
1955                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1956                 .size = sizeof(u64), .align = sizeof(u64),
1957                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1958         },
1959 #endif
1960 #ifdef CONFIG_PPC64
1961         [REGSET_PPR] = {
1962                 .core_note_type = NT_PPC_PPR, .n = 1,
1963                 .size = sizeof(u64), .align = sizeof(u64),
1964                 .get = ppr_get, .set = ppr_set
1965         },
1966         [REGSET_DSCR] = {
1967                 .core_note_type = NT_PPC_DSCR, .n = 1,
1968                 .size = sizeof(u64), .align = sizeof(u64),
1969                 .get = dscr_get, .set = dscr_set
1970         },
1971 #endif
1972 #ifdef CONFIG_PPC_BOOK3S_64
1973         [REGSET_TAR] = {
1974                 .core_note_type = NT_PPC_TAR, .n = 1,
1975                 .size = sizeof(u64), .align = sizeof(u64),
1976                 .get = tar_get, .set = tar_set
1977         },
1978         [REGSET_EBB] = {
1979                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1980                 .size = sizeof(u64), .align = sizeof(u64),
1981                 .active = ebb_active, .get = ebb_get, .set = ebb_set
1982         },
1983         [REGSET_PMR] = {
1984                 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1985                 .size = sizeof(u64), .align = sizeof(u64),
1986                 .active = pmu_active, .get = pmu_get, .set = pmu_set
1987         },
1988 #endif
1989 #ifdef CONFIG_PPC_MEM_KEYS
1990         [REGSET_PKEY] = {
1991                 .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1992                 .size = sizeof(u64), .align = sizeof(u64),
1993                 .active = pkey_active, .get = pkey_get, .set = pkey_set
1994         },
1995 #endif
1996 };
1997 
1998 static const struct user_regset_view user_ppc_native_view = {
1999         .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2000         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2001 };
2002 
2003 #ifdef CONFIG_PPC64
2004 #include <linux/compat.h>
2005 
2006 static int gpr32_get_common(struct task_struct *target,
2007                      const struct user_regset *regset,
2008                      unsigned int pos, unsigned int count,
2009                             void *kbuf, void __user *ubuf,
2010                             unsigned long *regs)
2011 {
2012         compat_ulong_t *k = kbuf;
2013         compat_ulong_t __user *u = ubuf;
2014         compat_ulong_t reg;
2015 
2016         pos /= sizeof(reg);
2017         count /= sizeof(reg);
2018 
2019         if (kbuf)
2020                 for (; count > 0 && pos < PT_MSR; --count)
2021                         *k++ = regs[pos++];
2022         else
2023                 for (; count > 0 && pos < PT_MSR; --count)
2024                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2025                                 return -EFAULT;
2026 
2027         if (count > 0 && pos == PT_MSR) {
2028                 reg = get_user_msr(target);
2029                 if (kbuf)
2030                         *k++ = reg;
2031                 else if (__put_user(reg, u++))
2032                         return -EFAULT;
2033                 ++pos;
2034                 --count;
2035         }
2036 
2037         if (kbuf)
2038                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2039                         *k++ = regs[pos++];
2040         else
2041                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2042                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2043                                 return -EFAULT;
2044 
2045         kbuf = k;
2046         ubuf = u;
2047         pos *= sizeof(reg);
2048         count *= sizeof(reg);
2049         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2050                                         PT_REGS_COUNT * sizeof(reg), -1);
2051 }
2052 
2053 static int gpr32_set_common(struct task_struct *target,
2054                      const struct user_regset *regset,
2055                      unsigned int pos, unsigned int count,
2056                      const void *kbuf, const void __user *ubuf,
2057                      unsigned long *regs)
2058 {
2059         const compat_ulong_t *k = kbuf;
2060         const compat_ulong_t __user *u = ubuf;
2061         compat_ulong_t reg;
2062 
2063         pos /= sizeof(reg);
2064         count /= sizeof(reg);
2065 
2066         if (kbuf)
2067                 for (; count > 0 && pos < PT_MSR; --count)
2068                         regs[pos++] = *k++;
2069         else
2070                 for (; count > 0 && pos < PT_MSR; --count) {
2071                         if (__get_user(reg, u++))
2072                                 return -EFAULT;
2073                         regs[pos++] = reg;
2074                 }
2075 
2076 
2077         if (count > 0 && pos == PT_MSR) {
2078                 if (kbuf)
2079                         reg = *k++;
2080                 else if (__get_user(reg, u++))
2081                         return -EFAULT;
2082                 set_user_msr(target, reg);
2083                 ++pos;
2084                 --count;
2085         }
2086 
2087         if (kbuf) {
2088                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2089                         regs[pos++] = *k++;
2090                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2091                         ++k;
2092         } else {
2093                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2094                         if (__get_user(reg, u++))
2095                                 return -EFAULT;
2096                         regs[pos++] = reg;
2097                 }
2098                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2099                         if (__get_user(reg, u++))
2100                                 return -EFAULT;
2101         }
2102 
2103         if (count > 0 && pos == PT_TRAP) {
2104                 if (kbuf)
2105                         reg = *k++;
2106                 else if (__get_user(reg, u++))
2107                         return -EFAULT;
2108                 set_user_trap(target, reg);
2109                 ++pos;
2110                 --count;
2111         }
2112 
2113         kbuf = k;
2114         ubuf = u;
2115         pos *= sizeof(reg);
2116         count *= sizeof(reg);
2117         return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2118                                          (PT_TRAP + 1) * sizeof(reg), -1);
2119 }
2120 
2121 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2122 static int tm_cgpr32_get(struct task_struct *target,
2123                      const struct user_regset *regset,
2124                      unsigned int pos, unsigned int count,
2125                      void *kbuf, void __user *ubuf)
2126 {
2127         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2128                         &target->thread.ckpt_regs.gpr[0]);
2129 }
2130 
2131 static int tm_cgpr32_set(struct task_struct *target,
2132                      const struct user_regset *regset,
2133                      unsigned int pos, unsigned int count,
2134                      const void *kbuf, const void __user *ubuf)
2135 {
2136         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2137                         &target->thread.ckpt_regs.gpr[0]);
2138 }
2139 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2140 
2141 static int gpr32_get(struct task_struct *target,
2142                      const struct user_regset *regset,
2143                      unsigned int pos, unsigned int count,
2144                      void *kbuf, void __user *ubuf)
2145 {
2146         int i;
2147 
2148         if (target->thread.regs == NULL)
2149                 return -EIO;
2150 
2151         if (!FULL_REGS(target->thread.regs)) {
2152                 /*
2153                  * We have a partial register set.
2154                  * Fill 14-31 with bogus values.
2155                  */
2156                 for (i = 14; i < 32; i++)
2157                         target->thread.regs->gpr[i] = NV_REG_POISON;
2158         }
2159         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2160                         &target->thread.regs->gpr[0]);
2161 }
2162 
2163 static int gpr32_set(struct task_struct *target,
2164                      const struct user_regset *regset,
2165                      unsigned int pos, unsigned int count,
2166                      const void *kbuf, const void __user *ubuf)
2167 {
2168         if (target->thread.regs == NULL)
2169                 return -EIO;
2170 
2171         CHECK_FULL_REGS(target->thread.regs);
2172         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2173                         &target->thread.regs->gpr[0]);
2174 }
2175 
2176 /*
2177  * These are the regset flavors matching the CONFIG_PPC32 native set.
2178  */
2179 static const struct user_regset compat_regsets[] = {
2180         [REGSET_GPR] = {
2181                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2182                 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2183                 .get = gpr32_get, .set = gpr32_set
2184         },
2185         [REGSET_FPR] = {
2186                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2187                 .size = sizeof(double), .align = sizeof(double),
2188                 .get = fpr_get, .set = fpr_set
2189         },
2190 #ifdef CONFIG_ALTIVEC
2191         [REGSET_VMX] = {
2192                 .core_note_type = NT_PPC_VMX, .n = 34,
2193                 .size = sizeof(vector128), .align = sizeof(vector128),
2194                 .active = vr_active, .get = vr_get, .set = vr_set
2195         },
2196 #endif
2197 #ifdef CONFIG_SPE
2198         [REGSET_SPE] = {
2199                 .core_note_type = NT_PPC_SPE, .n = 35,
2200                 .size = sizeof(u32), .align = sizeof(u32),
2201                 .active = evr_active, .get = evr_get, .set = evr_set
2202         },
2203 #endif
2204 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2205         [REGSET_TM_CGPR] = {
2206                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2207                 .size = sizeof(long), .align = sizeof(long),
2208                 .active = tm_cgpr_active,
2209                 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2210         },
2211         [REGSET_TM_CFPR] = {
2212                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2213                 .size = sizeof(double), .align = sizeof(double),
2214                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2215         },
2216         [REGSET_TM_CVMX] = {
2217                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2218                 .size = sizeof(vector128), .align = sizeof(vector128),
2219                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2220         },
2221         [REGSET_TM_CVSX] = {
2222                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2223                 .size = sizeof(double), .align = sizeof(double),
2224                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2225         },
2226         [REGSET_TM_SPR] = {
2227                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2228                 .size = sizeof(u64), .align = sizeof(u64),
2229                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2230         },
2231         [REGSET_TM_CTAR] = {
2232                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2233                 .size = sizeof(u64), .align = sizeof(u64),
2234                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2235         },
2236         [REGSET_TM_CPPR] = {
2237                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2238                 .size = sizeof(u64), .align = sizeof(u64),
2239                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2240         },
2241         [REGSET_TM_CDSCR] = {
2242                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2243                 .size = sizeof(u64), .align = sizeof(u64),
2244                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2245         },
2246 #endif
2247 #ifdef CONFIG_PPC64
2248         [REGSET_PPR] = {
2249                 .core_note_type = NT_PPC_PPR, .n = 1,
2250                 .size = sizeof(u64), .align = sizeof(u64),
2251                 .get = ppr_get, .set = ppr_set
2252         },
2253         [REGSET_DSCR] = {
2254                 .core_note_type = NT_PPC_DSCR, .n = 1,
2255                 .size = sizeof(u64), .align = sizeof(u64),
2256                 .get = dscr_get, .set = dscr_set
2257         },
2258 #endif
2259 #ifdef CONFIG_PPC_BOOK3S_64
2260         [REGSET_TAR] = {
2261                 .core_note_type = NT_PPC_TAR, .n = 1,
2262                 .size = sizeof(u64), .align = sizeof(u64),
2263                 .get = tar_get, .set = tar_set
2264         },
2265         [REGSET_EBB] = {
2266                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2267                 .size = sizeof(u64), .align = sizeof(u64),
2268                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2269         },
2270 #endif
2271 };
2272 
2273 static const struct user_regset_view user_ppc_compat_view = {
2274         .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2275         .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2276 };
2277 #endif  /* CONFIG_PPC64 */
2278 
2279 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2280 {
2281 #ifdef CONFIG_PPC64
2282         if (test_tsk_thread_flag(task, TIF_32BIT))
2283                 return &user_ppc_compat_view;
2284 #endif
2285         return &user_ppc_native_view;
2286 }
2287 
2288 
2289 void user_enable_single_step(struct task_struct *task)
2290 {
2291         struct pt_regs *regs = task->thread.regs;
2292 
2293         if (regs != NULL) {
2294 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2295                 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2296                 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2297                 regs->msr |= MSR_DE;
2298 #else
2299                 regs->msr &= ~MSR_BE;
2300                 regs->msr |= MSR_SE;
2301 #endif
2302         }
2303         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2304 }
2305 
2306 void user_enable_block_step(struct task_struct *task)
2307 {
2308         struct pt_regs *regs = task->thread.regs;
2309 
2310         if (regs != NULL) {
2311 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2312                 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2313                 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2314                 regs->msr |= MSR_DE;
2315 #else
2316                 regs->msr &= ~MSR_SE;
2317                 regs->msr |= MSR_BE;
2318 #endif
2319         }
2320         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2321 }
2322 
2323 void user_disable_single_step(struct task_struct *task)
2324 {
2325         struct pt_regs *regs = task->thread.regs;
2326 
2327         if (regs != NULL) {
2328 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2329                 /*
2330                  * The logic to disable single stepping should be as
2331                  * simple as turning off the Instruction Complete flag.
2332                  * And, after doing so, if all debug flags are off, turn
2333                  * off DBCR0(IDM) and MSR(DE) .... Torez
2334                  */
2335                 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2336                 /*
2337                  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2338                  */
2339                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2340                                         task->thread.debug.dbcr1)) {
2341                         /*
2342                          * All debug events were off.....
2343                          */
2344                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2345                         regs->msr &= ~MSR_DE;
2346                 }
2347 #else
2348                 regs->msr &= ~(MSR_SE | MSR_BE);
2349 #endif
2350         }
2351         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2352 }
2353 
2354 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2355 void ptrace_triggered(struct perf_event *bp,
2356                       struct perf_sample_data *data, struct pt_regs *regs)
2357 {
2358         struct perf_event_attr attr;
2359 
2360         /*
2361          * Disable the breakpoint request here since ptrace has defined a
2362          * one-shot behaviour for breakpoint exceptions in PPC64.
2363          * The SIGTRAP signal is generated automatically for us in do_dabr().
2364          * We don't have to do anything about that here
2365          */
2366         attr = bp->attr;
2367         attr.disabled = true;
2368         modify_user_hw_breakpoint(bp, &attr);
2369 }
2370 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2371 
2372 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2373                                unsigned long data)
2374 {
2375 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2376         int ret;
2377         struct thread_struct *thread = &(task->thread);
2378         struct perf_event *bp;
2379         struct perf_event_attr attr;
2380 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2381 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2382         bool set_bp = true;
2383         struct arch_hw_breakpoint hw_brk;
2384 #endif
2385 
2386         /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2387          *  For embedded processors we support one DAC and no IAC's at the
2388          *  moment.
2389          */
2390         if (addr > 0)
2391                 return -EINVAL;
2392 
2393         /* The bottom 3 bits in dabr are flags */
2394         if ((data & ~0x7UL) >= TASK_SIZE)
2395                 return -EIO;
2396 
2397 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2398         /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2399          *  It was assumed, on previous implementations, that 3 bits were
2400          *  passed together with the data address, fitting the design of the
2401          *  DABR register, as follows:
2402          *
2403          *  bit 0: Read flag
2404          *  bit 1: Write flag
2405          *  bit 2: Breakpoint translation
2406          *
2407          *  Thus, we use them here as so.
2408          */
2409 
2410         /* Ensure breakpoint translation bit is set */
2411         if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2412                 return -EIO;
2413         hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2414         hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2415         hw_brk.len = 8;
2416         set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
2417 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2418         bp = thread->ptrace_bps[0];
2419         if (!set_bp) {
2420                 if (bp) {
2421                         unregister_hw_breakpoint(bp);
2422                         thread->ptrace_bps[0] = NULL;
2423                 }
2424                 return 0;
2425         }
2426         if (bp) {
2427                 attr = bp->attr;
2428                 attr.bp_addr = hw_brk.address;
2429                 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2430 
2431                 /* Enable breakpoint */
2432                 attr.disabled = false;
2433 
2434                 ret =  modify_user_hw_breakpoint(bp, &attr);
2435                 if (ret) {
2436                         return ret;
2437                 }
2438                 thread->ptrace_bps[0] = bp;
2439                 thread->hw_brk = hw_brk;
2440                 return 0;
2441         }
2442 
2443         /* Create a new breakpoint request if one doesn't exist already */
2444         hw_breakpoint_init(&attr);
2445         attr.bp_addr = hw_brk.address;
2446         attr.bp_len = 8;
2447         arch_bp_generic_fields(hw_brk.type,
2448                                &attr.bp_type);
2449 
2450         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2451                                                ptrace_triggered, NULL, task);
2452         if (IS_ERR(bp)) {
2453                 thread->ptrace_bps[0] = NULL;
2454                 return PTR_ERR(bp);
2455         }
2456 
2457 #else /* !CONFIG_HAVE_HW_BREAKPOINT */
2458         if (set_bp && (!ppc_breakpoint_available()))
2459                 return -ENODEV;
2460 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2461         task->thread.hw_brk = hw_brk;
2462 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2463         /* As described above, it was assumed 3 bits were passed with the data
2464          *  address, but we will assume only the mode bits will be passed
2465          *  as to not cause alignment restrictions for DAC-based processors.
2466          */
2467 
2468         /* DAC's hold the whole address without any mode flags */
2469         task->thread.debug.dac1 = data & ~0x3UL;
2470 
2471         if (task->thread.debug.dac1 == 0) {
2472                 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2473                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2474                                         task->thread.debug.dbcr1)) {
2475                         task->thread.regs->msr &= ~MSR_DE;
2476                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2477                 }
2478                 return 0;
2479         }
2480 
2481         /* Read or Write bits must be set */
2482 
2483         if (!(data & 0x3UL))
2484                 return -EINVAL;
2485 
2486         /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2487            register */
2488         task->thread.debug.dbcr0 |= DBCR0_IDM;
2489 
2490         /* Check for write and read flags and set DBCR0
2491            accordingly */
2492         dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2493         if (data & 0x1UL)
2494                 dbcr_dac(task) |= DBCR_DAC1R;
2495         if (data & 0x2UL)
2496                 dbcr_dac(task) |= DBCR_DAC1W;
2497         task->thread.regs->msr |= MSR_DE;
2498 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2499         return 0;
2500 }
2501 
2502 /*
2503  * Called by kernel/ptrace.c when detaching..
2504  *
2505  * Make sure single step bits etc are not set.
2506  */
2507 void ptrace_disable(struct task_struct *child)
2508 {
2509         /* make sure the single step bit is not set. */
2510         user_disable_single_step(child);
2511 }
2512 
2513 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2514 static long set_instruction_bp(struct task_struct *child,
2515                               struct ppc_hw_breakpoint *bp_info)
2516 {
2517         int slot;
2518         int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2519         int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2520         int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2521         int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2522 
2523         if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2524                 slot2_in_use = 1;
2525         if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2526                 slot4_in_use = 1;
2527 
2528         if (bp_info->addr >= TASK_SIZE)
2529                 return -EIO;
2530 
2531         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2532 
2533                 /* Make sure range is valid. */
2534                 if (bp_info->addr2 >= TASK_SIZE)
2535                         return -EIO;
2536 
2537                 /* We need a pair of IAC regsisters */
2538                 if ((!slot1_in_use) && (!slot2_in_use)) {
2539                         slot = 1;
2540                         child->thread.debug.iac1 = bp_info->addr;
2541                         child->thread.debug.iac2 = bp_info->addr2;
2542                         child->thread.debug.dbcr0 |= DBCR0_IAC1;
2543                         if (bp_info->addr_mode ==
2544                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2545                                 dbcr_iac_range(child) |= DBCR_IAC12X;
2546                         else
2547                                 dbcr_iac_range(child) |= DBCR_IAC12I;
2548 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2549                 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2550                         slot = 3;
2551                         child->thread.debug.iac3 = bp_info->addr;
2552                         child->thread.debug.iac4 = bp_info->addr2;
2553                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2554                         if (bp_info->addr_mode ==
2555                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2556                                 dbcr_iac_range(child) |= DBCR_IAC34X;
2557                         else
2558                                 dbcr_iac_range(child) |= DBCR_IAC34I;
2559 #endif
2560                 } else
2561                         return -ENOSPC;
2562         } else {
2563                 /* We only need one.  If possible leave a pair free in
2564                  * case a range is needed later
2565                  */
2566                 if (!slot1_in_use) {
2567                         /*
2568                          * Don't use iac1 if iac1-iac2 are free and either
2569                          * iac3 or iac4 (but not both) are free
2570                          */
2571                         if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2572                                 slot = 1;
2573                                 child->thread.debug.iac1 = bp_info->addr;
2574                                 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2575                                 goto out;
2576                         }
2577                 }
2578                 if (!slot2_in_use) {
2579                         slot = 2;
2580                         child->thread.debug.iac2 = bp_info->addr;
2581                         child->thread.debug.dbcr0 |= DBCR0_IAC2;
2582 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2583                 } else if (!slot3_in_use) {
2584                         slot = 3;
2585                         child->thread.debug.iac3 = bp_info->addr;
2586                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2587                 } else if (!slot4_in_use) {
2588                         slot = 4;
2589                         child->thread.debug.iac4 = bp_info->addr;
2590                         child->thread.debug.dbcr0 |= DBCR0_IAC4;
2591 #endif
2592                 } else
2593                         return -ENOSPC;
2594         }
2595 out:
2596         child->thread.debug.dbcr0 |= DBCR0_IDM;
2597         child->thread.regs->msr |= MSR_DE;
2598 
2599         return slot;
2600 }
2601 
2602 static int del_instruction_bp(struct task_struct *child, int slot)
2603 {
2604         switch (slot) {
2605         case 1:
2606                 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2607                         return -ENOENT;
2608 
2609                 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2610                         /* address range - clear slots 1 & 2 */
2611                         child->thread.debug.iac2 = 0;
2612                         dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2613                 }
2614                 child->thread.debug.iac1 = 0;
2615                 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2616                 break;
2617         case 2:
2618                 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2619                         return -ENOENT;
2620 
2621                 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2622                         /* used in a range */
2623                         return -EINVAL;
2624                 child->thread.debug.iac2 = 0;
2625                 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2626                 break;
2627 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2628         case 3:
2629                 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2630                         return -ENOENT;
2631 
2632                 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2633                         /* address range - clear slots 3 & 4 */
2634                         child->thread.debug.iac4 = 0;
2635                         dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2636                 }
2637                 child->thread.debug.iac3 = 0;
2638                 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2639                 break;
2640         case 4:
2641                 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2642                         return -ENOENT;
2643 
2644                 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2645                         /* Used in a range */
2646                         return -EINVAL;
2647                 child->thread.debug.iac4 = 0;
2648                 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2649                 break;
2650 #endif
2651         default:
2652                 return -EINVAL;
2653         }
2654         return 0;
2655 }
2656 
2657 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2658 {
2659         int byte_enable =
2660                 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2661                 & 0xf;
2662         int condition_mode =
2663                 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2664         int slot;
2665 
2666         if (byte_enable && (condition_mode == 0))
2667                 return -EINVAL;
2668 
2669         if (bp_info->addr >= TASK_SIZE)
2670                 return -EIO;
2671 
2672         if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2673                 slot = 1;
2674                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2675                         dbcr_dac(child) |= DBCR_DAC1R;
2676                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2677                         dbcr_dac(child) |= DBCR_DAC1W;
2678                 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2679 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2680                 if (byte_enable) {
2681                         child->thread.debug.dvc1 =
2682                                 (unsigned long)bp_info->condition_value;
2683                         child->thread.debug.dbcr2 |=
2684                                 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2685                                  (condition_mode << DBCR2_DVC1M_SHIFT));
2686                 }
2687 #endif
2688 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2689         } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2690                 /* Both dac1 and dac2 are part of a range */
2691                 return -ENOSPC;
2692 #endif
2693         } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2694                 slot = 2;
2695                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2696                         dbcr_dac(child) |= DBCR_DAC2R;
2697                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2698                         dbcr_dac(child) |= DBCR_DAC2W;
2699                 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2700 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2701                 if (byte_enable) {
2702                         child->thread.debug.dvc2 =
2703                                 (unsigned long)bp_info->condition_value;
2704                         child->thread.debug.dbcr2 |=
2705                                 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2706                                  (condition_mode << DBCR2_DVC2M_SHIFT));
2707                 }
2708 #endif
2709         } else
2710                 return -ENOSPC;
2711         child->thread.debug.dbcr0 |= DBCR0_IDM;
2712         child->thread.regs->msr |= MSR_DE;
2713 
2714         return slot + 4;
2715 }
2716 
2717 static int del_dac(struct task_struct *child, int slot)
2718 {
2719         if (slot == 1) {
2720                 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2721                         return -ENOENT;
2722 
2723                 child->thread.debug.dac1 = 0;
2724                 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2725 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2726                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2727                         child->thread.debug.dac2 = 0;
2728                         child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2729                 }
2730                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2731 #endif
2732 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2733                 child->thread.debug.dvc1 = 0;
2734 #endif
2735         } else if (slot == 2) {
2736                 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2737                         return -ENOENT;
2738 
2739 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2740                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2741                         /* Part of a range */
2742                         return -EINVAL;
2743                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2744 #endif
2745 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2746                 child->thread.debug.dvc2 = 0;
2747 #endif
2748                 child->thread.debug.dac2 = 0;
2749                 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2750         } else
2751                 return -EINVAL;
2752 
2753         return 0;
2754 }
2755 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2756 
2757 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2758 static int set_dac_range(struct task_struct *child,
2759                          struct ppc_hw_breakpoint *bp_info)
2760 {
2761         int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2762 
2763         /* We don't allow range watchpoints to be used with DVC */
2764         if (bp_info->condition_mode)
2765                 return -EINVAL;
2766 
2767         /*
2768          * Best effort to verify the address range.  The user/supervisor bits
2769          * prevent trapping in kernel space, but let's fail on an obvious bad
2770          * range.  The simple test on the mask is not fool-proof, and any
2771          * exclusive range will spill over into kernel space.
2772          */
2773         if (bp_info->addr >= TASK_SIZE)
2774                 return -EIO;
2775         if (mode == PPC_BREAKPOINT_MODE_MASK) {
2776                 /*
2777                  * dac2 is a bitmask.  Don't allow a mask that makes a
2778                  * kernel space address from a valid dac1 value
2779                  */
2780                 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2781                         return -EIO;
2782         } else {
2783                 /*
2784                  * For range breakpoints, addr2 must also be a valid address
2785                  */
2786                 if (bp_info->addr2 >= TASK_SIZE)
2787                         return -EIO;
2788         }
2789 
2790         if (child->thread.debug.dbcr0 &
2791             (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2792                 return -ENOSPC;
2793 
2794         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2795                 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2796         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2797                 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2798         child->thread.debug.dac1 = bp_info->addr;
2799         child->thread.debug.dac2 = bp_info->addr2;
2800         if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2801                 child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2802         else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2803                 child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2804         else    /* PPC_BREAKPOINT_MODE_MASK */
2805                 child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2806         child->thread.regs->msr |= MSR_DE;
2807 
2808         return 5;
2809 }
2810 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2811 
2812 static long ppc_set_hwdebug(struct task_struct *child,
2813                      struct ppc_hw_breakpoint *bp_info)
2814 {
2815 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2816         int len = 0;
2817         struct thread_struct *thread = &(child->thread);
2818         struct perf_event *bp;
2819         struct perf_event_attr attr;
2820 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2821 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2822         struct arch_hw_breakpoint brk;
2823 #endif
2824 
2825         if (bp_info->version != 1)
2826                 return -ENOTSUPP;
2827 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2828         /*
2829          * Check for invalid flags and combinations
2830          */
2831         if ((bp_info->trigger_type == 0) ||
2832             (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2833                                        PPC_BREAKPOINT_TRIGGER_RW)) ||
2834             (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2835             (bp_info->condition_mode &
2836              ~(PPC_BREAKPOINT_CONDITION_MODE |
2837                PPC_BREAKPOINT_CONDITION_BE_ALL)))
2838                 return -EINVAL;
2839 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2840         if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2841                 return -EINVAL;
2842 #endif
2843 
2844         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2845                 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2846                     (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2847                         return -EINVAL;
2848                 return set_instruction_bp(child, bp_info);
2849         }
2850         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2851                 return set_dac(child, bp_info);
2852 
2853 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2854         return set_dac_range(child, bp_info);
2855 #else
2856         return -EINVAL;
2857 #endif
2858 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2859         /*
2860          * We only support one data breakpoint
2861          */
2862         if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2863             (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2864             bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2865                 return -EINVAL;
2866 
2867         if ((unsigned long)bp_info->addr >= TASK_SIZE)
2868                 return -EIO;
2869 
2870         brk.address = bp_info->addr & ~7UL;
2871         brk.type = HW_BRK_TYPE_TRANSLATE;
2872         brk.len = 8;
2873         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2874                 brk.type |= HW_BRK_TYPE_READ;
2875         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2876                 brk.type |= HW_BRK_TYPE_WRITE;
2877 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2878         /*
2879          * Check if the request is for 'range' breakpoints. We can
2880          * support it if range < 8 bytes.
2881          */
2882         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2883                 len = bp_info->addr2 - bp_info->addr;
2884         else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2885                 len = 1;
2886         else
2887                 return -EINVAL;
2888         bp = thread->ptrace_bps[0];
2889         if (bp)
2890                 return -ENOSPC;
2891 
2892         /* Create a new breakpoint request if one doesn't exist already */
2893         hw_breakpoint_init(&attr);
2894         attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2895         attr.bp_len = len;
2896         arch_bp_generic_fields(brk.type, &attr.bp_type);
2897 
2898         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2899                                                ptrace_triggered, NULL, child);
2900         if (IS_ERR(bp)) {
2901                 thread->ptrace_bps[0] = NULL;
2902                 return PTR_ERR(bp);
2903         }
2904 
2905         return 1;
2906 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2907 
2908         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2909                 return -EINVAL;
2910 
2911         if (child->thread.hw_brk.address)
2912                 return -ENOSPC;
2913 
2914         if (!ppc_breakpoint_available())
2915                 return -ENODEV;
2916 
2917         child->thread.hw_brk = brk;
2918 
2919         return 1;
2920 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2921 }
2922 
2923 static long ppc_del_hwdebug(struct task_struct *child, long data)
2924 {
2925 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2926         int ret = 0;
2927         struct thread_struct *thread = &(child->thread);
2928         struct perf_event *bp;
2929 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2930 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2931         int rc;
2932 
2933         if (data <= 4)
2934                 rc = del_instruction_bp(child, (int)data);
2935         else
2936                 rc = del_dac(child, (int)data - 4);
2937 
2938         if (!rc) {
2939                 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2940                                         child->thread.debug.dbcr1)) {
2941                         child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2942                         child->thread.regs->msr &= ~MSR_DE;
2943                 }
2944         }
2945         return rc;
2946 #else
2947         if (data != 1)
2948                 return -EINVAL;
2949 
2950 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2951         bp = thread->ptrace_bps[0];
2952         if (bp) {
2953                 unregister_hw_breakpoint(bp);
2954                 thread->ptrace_bps[0] = NULL;
2955         } else
2956                 ret = -ENOENT;
2957         return ret;
2958 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2959         if (child->thread.hw_brk.address == 0)
2960                 return -ENOENT;
2961 
2962         child->thread.hw_brk.address = 0;
2963         child->thread.hw_brk.type = 0;
2964 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2965 
2966         return 0;
2967 #endif
2968 }
2969 
2970 long arch_ptrace(struct task_struct *child, long request,
2971                  unsigned long addr, unsigned long data)
2972 {
2973         int ret = -EPERM;
2974         void __user *datavp = (void __user *) data;
2975         unsigned long __user *datalp = datavp;
2976 
2977         switch (request) {
2978         /* read the word at location addr in the USER area. */
2979         case PTRACE_PEEKUSR: {
2980                 unsigned long index, tmp;
2981 
2982                 ret = -EIO;
2983                 /* convert to index and check */
2984 #ifdef CONFIG_PPC32
2985                 index = addr >> 2;
2986                 if ((addr & 3) || (index > PT_FPSCR)
2987                     || (child->thread.regs == NULL))
2988 #else
2989                 index = addr >> 3;
2990                 if ((addr & 7) || (index > PT_FPSCR))
2991 #endif
2992                         break;
2993 
2994                 CHECK_FULL_REGS(child->thread.regs);
2995                 if (index < PT_FPR0) {
2996                         ret = ptrace_get_reg(child, (int) index, &tmp);
2997                         if (ret)
2998                                 break;
2999                 } else {
3000                         unsigned int fpidx = index - PT_FPR0;
3001 
3002                         flush_fp_to_thread(child);
3003                         if (fpidx < (PT_FPSCR - PT_FPR0))
3004                                 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
3005                                        sizeof(long));
3006                         else
3007                                 tmp = child->thread.fp_state.fpscr;
3008                 }
3009                 ret = put_user(tmp, datalp);
3010                 break;
3011         }
3012 
3013         /* write the word at location addr in the USER area */
3014         case PTRACE_POKEUSR: {
3015                 unsigned long index;
3016 
3017                 ret = -EIO;
3018                 /* convert to index and check */
3019 #ifdef CONFIG_PPC32
3020                 index = addr >> 2;
3021                 if ((addr & 3) || (index > PT_FPSCR)
3022                     || (child->thread.regs == NULL))
3023 #else
3024                 index = addr >> 3;
3025                 if ((addr & 7) || (index > PT_FPSCR))
3026 #endif
3027                         break;
3028 
3029                 CHECK_FULL_REGS(child->thread.regs);
3030                 if (index < PT_FPR0) {
3031                         ret = ptrace_put_reg(child, index, data);
3032                 } else {
3033                         unsigned int fpidx = index - PT_FPR0;
3034 
3035                         flush_fp_to_thread(child);
3036                         if (fpidx < (PT_FPSCR - PT_FPR0))
3037                                 memcpy(&child->thread.TS_FPR(fpidx), &data,
3038                                        sizeof(long));
3039                         else
3040                                 child->thread.fp_state.fpscr = data;
3041                         ret = 0;
3042                 }
3043                 break;
3044         }
3045 
3046         case PPC_PTRACE_GETHWDBGINFO: {
3047                 struct ppc_debug_info dbginfo;
3048 
3049                 dbginfo.version = 1;
3050 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3051                 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3052                 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3053                 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3054                 dbginfo.data_bp_alignment = 4;
3055                 dbginfo.sizeof_condition = 4;
3056                 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3057                                    PPC_DEBUG_FEATURE_INSN_BP_MASK;
3058 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3059                 dbginfo.features |=
3060                                    PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3061                                    PPC_DEBUG_FEATURE_DATA_BP_MASK;
3062 #endif
3063 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3064                 dbginfo.num_instruction_bps = 0;
3065                 if (ppc_breakpoint_available())
3066                         dbginfo.num_data_bps = 1;
3067                 else
3068                         dbginfo.num_data_bps = 0;
3069                 dbginfo.num_condition_regs = 0;
3070 #ifdef CONFIG_PPC64
3071                 dbginfo.data_bp_alignment = 8;
3072 #else
3073                 dbginfo.data_bp_alignment = 4;
3074 #endif
3075                 dbginfo.sizeof_condition = 0;
3076 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3077                 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3078                 if (cpu_has_feature(CPU_FTR_DAWR))
3079                         dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3080 #else
3081                 dbginfo.features = 0;
3082 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3083 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3084 
3085                 if (copy_to_user(datavp, &dbginfo,
3086                                  sizeof(struct ppc_debug_info)))
3087                         return -EFAULT;
3088                 return 0;
3089         }
3090 
3091         case PPC_PTRACE_SETHWDEBUG: {
3092                 struct ppc_hw_breakpoint bp_info;
3093 
3094                 if (copy_from_user(&bp_info, datavp,
3095                                    sizeof(struct ppc_hw_breakpoint)))
3096                         return -EFAULT;
3097                 return ppc_set_hwdebug(child, &bp_info);
3098         }
3099 
3100         case PPC_PTRACE_DELHWDEBUG: {
3101                 ret = ppc_del_hwdebug(child, data);
3102                 break;
3103         }
3104 
3105         case PTRACE_GET_DEBUGREG: {
3106 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3107                 unsigned long dabr_fake;
3108 #endif
3109                 ret = -EINVAL;
3110                 /* We only support one DABR and no IABRS at the moment */
3111                 if (addr > 0)
3112                         break;
3113 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3114                 ret = put_user(child->thread.debug.dac1, datalp);
3115 #else
3116                 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3117                              (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3118                 ret = put_user(dabr_fake, datalp);
3119 #endif
3120                 break;
3121         }
3122 
3123         case PTRACE_SET_DEBUGREG:
3124                 ret = ptrace_set_debugreg(child, addr, data);
3125                 break;
3126 
3127 #ifdef CONFIG_PPC64
3128         case PTRACE_GETREGS64:
3129 #endif
3130         case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3131                 return copy_regset_to_user(child, &user_ppc_native_view,
3132                                            REGSET_GPR,
3133                                            0, sizeof(struct pt_regs),
3134                                            datavp);
3135 
3136 #ifdef CONFIG_PPC64
3137         case PTRACE_SETREGS64:
3138 #endif
3139         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3140                 return copy_regset_from_user(child, &user_ppc_native_view,
3141                                              REGSET_GPR,
3142                                              0, sizeof(struct pt_regs),
3143                                              datavp);
3144 
3145         case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3146                 return copy_regset_to_user(child, &user_ppc_native_view,
3147                                            REGSET_FPR,
3148                                            0, sizeof(elf_fpregset_t),
3149                                            datavp);
3150 
3151         case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3152                 return copy_regset_from_user(child, &user_ppc_native_view,
3153                                              REGSET_FPR,
3154                                              0, sizeof(elf_fpregset_t),
3155                                              datavp);
3156 
3157 #ifdef CONFIG_ALTIVEC
3158         case PTRACE_GETVRREGS:
3159                 return copy_regset_to_user(child, &user_ppc_native_view,
3160                                            REGSET_VMX,
3161                                            0, (33 * sizeof(vector128) +
3162                                                sizeof(u32)),
3163                                            datavp);
3164 
3165         case PTRACE_SETVRREGS:
3166                 return copy_regset_from_user(child, &user_ppc_native_view,
3167                                              REGSET_VMX,
3168                                              0, (33 * sizeof(vector128) +
3169                                                  sizeof(u32)),
3170                                              datavp);
3171 #endif
3172 #ifdef CONFIG_VSX
3173         case PTRACE_GETVSRREGS:
3174                 return copy_regset_to_user(child, &user_ppc_native_view,
3175                                            REGSET_VSX,
3176                                            0, 32 * sizeof(double),
3177                                            datavp);
3178 
3179         case PTRACE_SETVSRREGS:
3180                 return copy_regset_from_user(child, &user_ppc_native_view,
3181                                              REGSET_VSX,
3182                                              0, 32 * sizeof(double),
3183                                              datavp);
3184 #endif
3185 #ifdef CONFIG_SPE
3186         case PTRACE_GETEVRREGS:
3187                 /* Get the child spe register state. */
3188                 return copy_regset_to_user(child, &user_ppc_native_view,
3189                                            REGSET_SPE, 0, 35 * sizeof(u32),
3190                                            datavp);
3191 
3192         case PTRACE_SETEVRREGS:
3193                 /* Set the child spe register state. */
3194                 return copy_regset_from_user(child, &user_ppc_native_view,
3195                                              REGSET_SPE, 0, 35 * sizeof(u32),
3196                                              datavp);
3197 #endif
3198 
3199         default:
3200                 ret = ptrace_request(child, request, addr, data);
3201                 break;
3202         }
3203         return ret;
3204 }
3205 
3206 #ifdef CONFIG_SECCOMP
3207 static int do_seccomp(struct pt_regs *regs)
3208 {
3209         if (!test_thread_flag(TIF_SECCOMP))
3210                 return 0;
3211 
3212         /*
3213          * The ABI we present to seccomp tracers is that r3 contains
3214          * the syscall return value and orig_gpr3 contains the first
3215          * syscall parameter. This is different to the ptrace ABI where
3216          * both r3 and orig_gpr3 contain the first syscall parameter.
3217          */
3218         regs->gpr[3] = -ENOSYS;
3219 
3220         /*
3221          * We use the __ version here because we have already checked
3222          * TIF_SECCOMP. If this fails, there is nothing left to do, we
3223          * have already loaded -ENOSYS into r3, or seccomp has put
3224          * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3225          */
3226         if (__secure_computing(NULL))
3227                 return -1;
3228 
3229         /*
3230          * The syscall was allowed by seccomp, restore the register
3231          * state to what audit expects.
3232          * Note that we use orig_gpr3, which means a seccomp tracer can
3233          * modify the first syscall parameter (in orig_gpr3) and also
3234          * allow the syscall to proceed.
3235          */
3236         regs->gpr[3] = regs->orig_gpr3;
3237 
3238         return 0;
3239 }
3240 #else
3241 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3242 #endif /* CONFIG_SECCOMP */
3243 
3244 /**
3245  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3246  * @regs: the pt_regs of the task to trace (current)
3247  *
3248  * Performs various types of tracing on syscall entry. This includes seccomp,
3249  * ptrace, syscall tracepoints and audit.
3250  *
3251  * The pt_regs are potentially visible to userspace via ptrace, so their
3252  * contents is ABI.
3253  *
3254  * One or more of the tracers may modify the contents of pt_regs, in particular
3255  * to modify arguments or even the syscall number itself.
3256  *
3257  * It's also possible that a tracer can choose to reject the system call. In
3258  * that case this function will return an illegal syscall number, and will put
3259  * an appropriate return value in regs->r3.
3260  *
3261  * Return: the (possibly changed) syscall number.
3262  */
3263 long do_syscall_trace_enter(struct pt_regs *regs)
3264 {
3265         user_exit();
3266 
3267         /*
3268          * The tracer may decide to abort the syscall, if so tracehook
3269          * will return !0. Note that the tracer may also just change
3270          * regs->gpr[0] to an invalid syscall number, that is handled
3271          * below on the exit path.
3272          */
3273         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3274             tracehook_report_syscall_entry(regs))
3275                 goto skip;
3276 
3277         /* Run seccomp after ptrace; allow it to set gpr[3]. */
3278         if (do_seccomp(regs))
3279                 return -1;
3280 
3281         /* Avoid trace and audit when syscall is invalid. */
3282         if (regs->gpr[0] >= NR_syscalls)
3283                 goto skip;
3284 
3285         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3286                 trace_sys_enter(regs, regs->gpr[0]);
3287 
3288 #ifdef CONFIG_PPC64
3289         if (!is_32bit_task())
3290                 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3291                                     regs->gpr[5], regs->gpr[6]);
3292         else
3293 #endif
3294                 audit_syscall_entry(regs->gpr[0],
3295                                     regs->gpr[3] & 0xffffffff,
3296                                     regs->gpr[4] & 0xffffffff,
3297                                     regs->gpr[5] & 0xffffffff,
3298                                     regs->gpr[6] & 0xffffffff);
3299 
3300         /* Return the possibly modified but valid syscall number */
3301         return regs->gpr[0];
3302 
3303 skip:
3304         /*
3305          * If we are aborting explicitly, or if the syscall number is
3306          * now invalid, set the return value to -ENOSYS.
3307          */
3308         regs->gpr[3] = -ENOSYS;
3309         return -1;
3310 }
3311 
3312 void do_syscall_trace_leave(struct pt_regs *regs)
3313 {
3314         int step;
3315 
3316         audit_syscall_exit(regs);
3317 
3318         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3319                 trace_sys_exit(regs, regs->result);
3320 
3321         step = test_thread_flag(TIF_SINGLESTEP);
3322         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3323                 tracehook_report_syscall_exit(regs, step);
3324 
3325         user_enter();
3326 }
3327 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp