~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/unicore32/mm/alignment.c

Version: ~ [ linux-5.5-rc1 ] ~ [ linux-5.4.2 ] ~ [ linux-5.3.15 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.88 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.158 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.206 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.206 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.78 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * linux/arch/unicore32/mm/alignment.c
  3  *
  4  * Code specific to PKUnity SoC and UniCore ISA
  5  *
  6  * Copyright (C) 2001-2010 GUAN Xue-tao
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License version 2 as
 10  * published by the Free Software Foundation.
 11  */
 12 /*
 13  * TODO:
 14  *  FPU ldm/stm not handling
 15  */
 16 #include <linux/compiler.h>
 17 #include <linux/kernel.h>
 18 #include <linux/errno.h>
 19 #include <linux/string.h>
 20 #include <linux/init.h>
 21 #include <linux/sched.h>
 22 #include <linux/uaccess.h>
 23 
 24 #include <asm/pgtable.h>
 25 #include <asm/tlbflush.h>
 26 #include <asm/unaligned.h>
 27 
 28 #include "mm.h"
 29 
 30 #define CODING_BITS(i)  (i & 0xe0000120)
 31 
 32 #define LDST_P_BIT(i)   (i & (1 << 28)) /* Preindex             */
 33 #define LDST_U_BIT(i)   (i & (1 << 27)) /* Add offset           */
 34 #define LDST_W_BIT(i)   (i & (1 << 25)) /* Writeback            */
 35 #define LDST_L_BIT(i)   (i & (1 << 24)) /* Load                 */
 36 
 37 #define LDST_P_EQ_U(i)  ((((i) ^ ((i) >> 1)) & (1 << 27)) == 0)
 38 
 39 #define LDSTH_I_BIT(i)  (i & (1 << 26)) /* half-word immed      */
 40 #define LDM_S_BIT(i)    (i & (1 << 26)) /* write ASR from BSR */
 41 #define LDM_H_BIT(i)    (i & (1 << 6))  /* select r0-r15 or r16-r31 */
 42 
 43 #define RN_BITS(i)      ((i >> 19) & 31)        /* Rn                   */
 44 #define RD_BITS(i)      ((i >> 14) & 31)        /* Rd                   */
 45 #define RM_BITS(i)      (i & 31)        /* Rm                   */
 46 
 47 #define REGMASK_BITS(i) (((i & 0x7fe00) >> 3) | (i & 0x3f))
 48 #define OFFSET_BITS(i)  (i & 0x03fff)
 49 
 50 #define SHIFT_BITS(i)   ((i >> 9) & 0x1f)
 51 #define SHIFT_TYPE(i)   (i & 0xc0)
 52 #define SHIFT_LSL       0x00
 53 #define SHIFT_LSR       0x40
 54 #define SHIFT_ASR       0x80
 55 #define SHIFT_RORRRX    0xc0
 56 
 57 union offset_union {
 58         unsigned long un;
 59         signed long sn;
 60 };
 61 
 62 #define TYPE_ERROR      0
 63 #define TYPE_FAULT      1
 64 #define TYPE_LDST       2
 65 #define TYPE_DONE       3
 66 #define TYPE_SWAP  4
 67 #define TYPE_COLS  5            /* Coprocessor load/store */
 68 
 69 #define get8_unaligned_check(val, addr, err)            \
 70         __asm__(                                        \
 71         "1:     ldb.u   %1, [%2], #1\n"                 \
 72         "2:\n"                                          \
 73         "       .pushsection .fixup,\"ax\"\n"           \
 74         "       .align  2\n"                            \
 75         "3:     mov     %0, #1\n"                       \
 76         "       b       2b\n"                           \
 77         "       .popsection\n"                          \
 78         "       .pushsection __ex_table,\"a\"\n"                \
 79         "       .align  3\n"                            \
 80         "       .long   1b, 3b\n"                       \
 81         "       .popsection\n"                          \
 82         : "=r" (err), "=&r" (val), "=r" (addr)          \
 83         : "" (err), "2" (addr))
 84 
 85 #define get8t_unaligned_check(val, addr, err)           \
 86         __asm__(                                        \
 87         "1:     ldb.u   %1, [%2], #1\n"                 \
 88         "2:\n"                                          \
 89         "       .pushsection .fixup,\"ax\"\n"           \
 90         "       .align  2\n"                            \
 91         "3:     mov     %0, #1\n"                       \
 92         "       b       2b\n"                           \
 93         "       .popsection\n"                          \
 94         "       .pushsection __ex_table,\"a\"\n"                \
 95         "       .align  3\n"                            \
 96         "       .long   1b, 3b\n"                       \
 97         "       .popsection\n"                          \
 98         : "=r" (err), "=&r" (val), "=r" (addr)          \
 99         : "" (err), "2" (addr))
100 
101 #define get16_unaligned_check(val, addr)                        \
102         do {                                                    \
103                 unsigned int err = 0, v, a = addr;              \
104                 get8_unaligned_check(val, a, err);              \
105                 get8_unaligned_check(v, a, err);                \
106                 val |= v << 8;                                  \
107                 if (err)                                        \
108                         goto fault;                             \
109         } while (0)
110 
111 #define put16_unaligned_check(val, addr)                        \
112         do {                                                    \
113                 unsigned int err = 0, v = val, a = addr;        \
114                 __asm__(                                        \
115                 "1:     stb.u   %1, [%2], #1\n"                 \
116                 "       mov     %1, %1 >> #8\n"                 \
117                 "2:     stb.u   %1, [%2]\n"                     \
118                 "3:\n"                                          \
119                 "       .pushsection .fixup,\"ax\"\n"           \
120                 "       .align  2\n"                            \
121                 "4:     mov     %0, #1\n"                       \
122                 "       b       3b\n"                           \
123                 "       .popsection\n"                          \
124                 "       .pushsection __ex_table,\"a\"\n"                \
125                 "       .align  3\n"                            \
126                 "       .long   1b, 4b\n"                       \
127                 "       .long   2b, 4b\n"                       \
128                 "       .popsection\n"                          \
129                 : "=r" (err), "=&r" (v), "=&r" (a)              \
130                 : "" (err), "1" (v), "2" (a));                 \
131                 if (err)                                        \
132                         goto fault;                             \
133         } while (0)
134 
135 #define __put32_unaligned_check(ins, val, addr)                 \
136         do {                                                    \
137                 unsigned int err = 0, v = val, a = addr;        \
138                 __asm__(                                        \
139                 "1:     "ins"   %1, [%2], #1\n"                 \
140                 "       mov     %1, %1 >> #8\n"                 \
141                 "2:     "ins"   %1, [%2], #1\n"                 \
142                 "       mov     %1, %1 >> #8\n"                 \
143                 "3:     "ins"   %1, [%2], #1\n"                 \
144                 "       mov     %1, %1 >> #8\n"                 \
145                 "4:     "ins"   %1, [%2]\n"                     \
146                 "5:\n"                                          \
147                 "       .pushsection .fixup,\"ax\"\n"           \
148                 "       .align  2\n"                            \
149                 "6:     mov     %0, #1\n"                       \
150                 "       b       5b\n"                           \
151                 "       .popsection\n"                          \
152                 "       .pushsection __ex_table,\"a\"\n"                \
153                 "       .align  3\n"                            \
154                 "       .long   1b, 6b\n"                       \
155                 "       .long   2b, 6b\n"                       \
156                 "       .long   3b, 6b\n"                       \
157                 "       .long   4b, 6b\n"                       \
158                 "       .popsection\n"                          \
159                 : "=r" (err), "=&r" (v), "=&r" (a)              \
160                 : "" (err), "1" (v), "2" (a));                 \
161                 if (err)                                        \
162                         goto fault;                             \
163         } while (0)
164 
165 #define get32_unaligned_check(val, addr)                        \
166         do {                                                    \
167                 unsigned int err = 0, v, a = addr;              \
168                 get8_unaligned_check(val, a, err);              \
169                 get8_unaligned_check(v, a, err);                \
170                 val |= v << 8;                                  \
171                 get8_unaligned_check(v, a, err);                \
172                 val |= v << 16;                                 \
173                 get8_unaligned_check(v, a, err);                \
174                 val |= v << 24;                                 \
175                 if (err)                                        \
176                         goto fault;                             \
177         } while (0)
178 
179 #define put32_unaligned_check(val, addr)                        \
180         __put32_unaligned_check("stb.u", val, addr)
181 
182 #define get32t_unaligned_check(val, addr)                       \
183         do {                                                    \
184                 unsigned int err = 0, v, a = addr;              \
185                 get8t_unaligned_check(val, a, err);             \
186                 get8t_unaligned_check(v, a, err);               \
187                 val |= v << 8;                                  \
188                 get8t_unaligned_check(v, a, err);               \
189                 val |= v << 16;                                 \
190                 get8t_unaligned_check(v, a, err);               \
191                 val |= v << 24;                                 \
192                 if (err)                                        \
193                         goto fault;                             \
194         } while (0)
195 
196 #define put32t_unaligned_check(val, addr)                       \
197         __put32_unaligned_check("stb.u", val, addr)
198 
199 static void
200 do_alignment_finish_ldst(unsigned long addr, unsigned long instr,
201                          struct pt_regs *regs, union offset_union offset)
202 {
203         if (!LDST_U_BIT(instr))
204                 offset.un = -offset.un;
205 
206         if (!LDST_P_BIT(instr))
207                 addr += offset.un;
208 
209         if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
210                 regs->uregs[RN_BITS(instr)] = addr;
211 }
212 
213 static int
214 do_alignment_ldrhstrh(unsigned long addr, unsigned long instr,
215                       struct pt_regs *regs)
216 {
217         unsigned int rd = RD_BITS(instr);
218 
219         /* old value 0x40002120, can't judge swap instr correctly */
220         if ((instr & 0x4b003fe0) == 0x40000120)
221                 goto swp;
222 
223         if (LDST_L_BIT(instr)) {
224                 unsigned long val;
225                 get16_unaligned_check(val, addr);
226 
227                 /* signed half-word? */
228                 if (instr & 0x80)
229                         val = (signed long)((signed short)val);
230 
231                 regs->uregs[rd] = val;
232         } else
233                 put16_unaligned_check(regs->uregs[rd], addr);
234 
235         return TYPE_LDST;
236 
237 swp:
238         /* only handle swap word
239          * for swap byte should not active this alignment exception */
240         get32_unaligned_check(regs->uregs[RD_BITS(instr)], addr);
241         put32_unaligned_check(regs->uregs[RM_BITS(instr)], addr);
242         return TYPE_SWAP;
243 
244 fault:
245         return TYPE_FAULT;
246 }
247 
248 static int
249 do_alignment_ldrstr(unsigned long addr, unsigned long instr,
250                     struct pt_regs *regs)
251 {
252         unsigned int rd = RD_BITS(instr);
253 
254         if (!LDST_P_BIT(instr) && LDST_W_BIT(instr))
255                 goto trans;
256 
257         if (LDST_L_BIT(instr))
258                 get32_unaligned_check(regs->uregs[rd], addr);
259         else
260                 put32_unaligned_check(regs->uregs[rd], addr);
261         return TYPE_LDST;
262 
263 trans:
264         if (LDST_L_BIT(instr))
265                 get32t_unaligned_check(regs->uregs[rd], addr);
266         else
267                 put32t_unaligned_check(regs->uregs[rd], addr);
268         return TYPE_LDST;
269 
270 fault:
271         return TYPE_FAULT;
272 }
273 
274 /*
275  * LDM/STM alignment handler.
276  *
277  * There are 4 variants of this instruction:
278  *
279  * B = rn pointer before instruction, A = rn pointer after instruction
280  *              ------ increasing address ----->
281  *              |    | r0 | r1 | ... | rx |    |
282  * PU = 01             B                    A
283  * PU = 11        B                    A
284  * PU = 00        A                    B
285  * PU = 10             A                    B
286  */
287 static int
288 do_alignment_ldmstm(unsigned long addr, unsigned long instr,
289                     struct pt_regs *regs)
290 {
291         unsigned int rd, rn, pc_correction, reg_correction, nr_regs, regbits;
292         unsigned long eaddr, newaddr;
293 
294         if (LDM_S_BIT(instr))
295                 goto bad;
296 
297         pc_correction = 4;      /* processor implementation defined */
298 
299         /* count the number of registers in the mask to be transferred */
300         nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
301 
302         rn = RN_BITS(instr);
303         newaddr = eaddr = regs->uregs[rn];
304 
305         if (!LDST_U_BIT(instr))
306                 nr_regs = -nr_regs;
307         newaddr += nr_regs;
308         if (!LDST_U_BIT(instr))
309                 eaddr = newaddr;
310 
311         if (LDST_P_EQ_U(instr)) /* U = P */
312                 eaddr += 4;
313 
314         /*
315          * This is a "hint" - we already have eaddr worked out by the
316          * processor for us.
317          */
318         if (addr != eaddr) {
319                 printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
320                        "addr = %08lx, eaddr = %08lx\n",
321                        instruction_pointer(regs), instr, addr, eaddr);
322                 show_regs(regs);
323         }
324 
325         if (LDM_H_BIT(instr))
326                 reg_correction = 0x10;
327         else
328                 reg_correction = 0x00;
329 
330         for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
331              regbits >>= 1, rd += 1)
332                 if (regbits & 1) {
333                         if (LDST_L_BIT(instr))
334                                 get32_unaligned_check(regs->
335                                         uregs[rd + reg_correction], eaddr);
336                         else
337                                 put32_unaligned_check(regs->
338                                         uregs[rd + reg_correction], eaddr);
339                         eaddr += 4;
340                 }
341 
342         if (LDST_W_BIT(instr))
343                 regs->uregs[rn] = newaddr;
344         return TYPE_DONE;
345 
346 fault:
347         regs->UCreg_pc -= pc_correction;
348         return TYPE_FAULT;
349 
350 bad:
351         printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
352         return TYPE_ERROR;
353 }
354 
355 static int
356 do_alignment(unsigned long addr, unsigned int error_code, struct pt_regs *regs)
357 {
358         union offset_union offset;
359         unsigned long instr, instrptr;
360         int (*handler) (unsigned long addr, unsigned long instr,
361                         struct pt_regs *regs);
362         unsigned int type;
363 
364         instrptr = instruction_pointer(regs);
365         if (instrptr >= PAGE_OFFSET)
366                 instr = *(unsigned long *)instrptr;
367         else {
368                 __asm__ __volatile__(
369                                 "ldw.u  %0, [%1]\n"
370                                 : "=&r"(instr)
371                                 : "r"(instrptr));
372         }
373 
374         regs->UCreg_pc += 4;
375 
376         switch (CODING_BITS(instr)) {
377         case 0x40000120:        /* ldrh or strh */
378                 if (LDSTH_I_BIT(instr))
379                         offset.un = (instr & 0x3e00) >> 4 | (instr & 31);
380                 else
381                         offset.un = regs->uregs[RM_BITS(instr)];
382                 handler = do_alignment_ldrhstrh;
383                 break;
384 
385         case 0x60000000:        /* ldr or str immediate */
386         case 0x60000100:        /* ldr or str immediate */
387         case 0x60000020:        /* ldr or str immediate */
388         case 0x60000120:        /* ldr or str immediate */
389                 offset.un = OFFSET_BITS(instr);
390                 handler = do_alignment_ldrstr;
391                 break;
392 
393         case 0x40000000:        /* ldr or str register */
394                 offset.un = regs->uregs[RM_BITS(instr)];
395                 {
396                         unsigned int shiftval = SHIFT_BITS(instr);
397 
398                         switch (SHIFT_TYPE(instr)) {
399                         case SHIFT_LSL:
400                                 offset.un <<= shiftval;
401                                 break;
402 
403                         case SHIFT_LSR:
404                                 offset.un >>= shiftval;
405                                 break;
406 
407                         case SHIFT_ASR:
408                                 offset.sn >>= shiftval;
409                                 break;
410 
411                         case SHIFT_RORRRX:
412                                 if (shiftval == 0) {
413                                         offset.un >>= 1;
414                                         if (regs->UCreg_asr & PSR_C_BIT)
415                                                 offset.un |= 1 << 31;
416                                 } else
417                                         offset.un = offset.un >> shiftval |
418                                             offset.un << (32 - shiftval);
419                                 break;
420                         }
421                 }
422                 handler = do_alignment_ldrstr;
423                 break;
424 
425         case 0x80000000:        /* ldm or stm */
426         case 0x80000020:        /* ldm or stm */
427                 handler = do_alignment_ldmstm;
428                 break;
429 
430         default:
431                 goto bad;
432         }
433 
434         type = handler(addr, instr, regs);
435 
436         if (type == TYPE_ERROR || type == TYPE_FAULT)
437                 goto bad_or_fault;
438 
439         if (type == TYPE_LDST)
440                 do_alignment_finish_ldst(addr, instr, regs, offset);
441 
442         return 0;
443 
444 bad_or_fault:
445         if (type == TYPE_ERROR)
446                 goto bad;
447         regs->UCreg_pc -= 4;
448         /*
449          * We got a fault - fix it up, or die.
450          */
451         do_bad_area(addr, error_code, regs);
452         return 0;
453 
454 bad:
455         /*
456          * Oops, we didn't handle the instruction.
457          * However, we must handle fpu instr firstly.
458          */
459 #ifdef CONFIG_UNICORE_FPU_F64
460         /* handle co.load/store */
461 #define CODING_COLS                0xc0000000
462 #define COLS_OFFSET_BITS(i)     (i & 0x1FF)
463 #define COLS_L_BITS(i)          (i & (1<<24))
464 #define COLS_FN_BITS(i)         ((i>>14) & 31)
465         if ((instr & 0xe0000000) == CODING_COLS) {
466                 unsigned int fn = COLS_FN_BITS(instr);
467                 unsigned long val = 0;
468                 if (COLS_L_BITS(instr)) {
469                         get32t_unaligned_check(val, addr);
470                         switch (fn) {
471 #define ASM_MTF(n)      case n:                                         \
472                         __asm__ __volatile__("MTF %0, F" __stringify(n) \
473                                 : : "r"(val));                          \
474                         break;
475                         ASM_MTF(0); ASM_MTF(1); ASM_MTF(2); ASM_MTF(3);
476                         ASM_MTF(4); ASM_MTF(5); ASM_MTF(6); ASM_MTF(7);
477                         ASM_MTF(8); ASM_MTF(9); ASM_MTF(10); ASM_MTF(11);
478                         ASM_MTF(12); ASM_MTF(13); ASM_MTF(14); ASM_MTF(15);
479                         ASM_MTF(16); ASM_MTF(17); ASM_MTF(18); ASM_MTF(19);
480                         ASM_MTF(20); ASM_MTF(21); ASM_MTF(22); ASM_MTF(23);
481                         ASM_MTF(24); ASM_MTF(25); ASM_MTF(26); ASM_MTF(27);
482                         ASM_MTF(28); ASM_MTF(29); ASM_MTF(30); ASM_MTF(31);
483 #undef ASM_MTF
484                         }
485                 } else {
486                         switch (fn) {
487 #define ASM_MFF(n)      case n:                                         \
488                         __asm__ __volatile__("MFF %0, F" __stringify(n) \
489                                 : : "r"(val));                          \
490                         break;
491                         ASM_MFF(0); ASM_MFF(1); ASM_MFF(2); ASM_MFF(3);
492                         ASM_MFF(4); ASM_MFF(5); ASM_MFF(6); ASM_MFF(7);
493                         ASM_MFF(8); ASM_MFF(9); ASM_MFF(10); ASM_MFF(11);
494                         ASM_MFF(12); ASM_MFF(13); ASM_MFF(14); ASM_MFF(15);
495                         ASM_MFF(16); ASM_MFF(17); ASM_MFF(18); ASM_MFF(19);
496                         ASM_MFF(20); ASM_MFF(21); ASM_MFF(22); ASM_MFF(23);
497                         ASM_MFF(24); ASM_MFF(25); ASM_MFF(26); ASM_MFF(27);
498                         ASM_MFF(28); ASM_MFF(29); ASM_MFF(30); ASM_MFF(31);
499 #undef ASM_MFF
500                         }
501                         put32t_unaligned_check(val, addr);
502                 }
503                 return TYPE_COLS;
504         }
505 fault:
506         return TYPE_FAULT;
507 #endif
508         printk(KERN_ERR "Alignment trap: not handling instruction "
509                "%08lx at [<%08lx>]\n", instr, instrptr);
510         return 1;
511 }
512 
513 /*
514  * This needs to be done after sysctl_init, otherwise sys/ will be
515  * overwritten.  Actually, this shouldn't be in sys/ at all since
516  * it isn't a sysctl, and it doesn't contain sysctl information.
517  */
518 static int __init alignment_init(void)
519 {
520         hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
521                         "alignment exception");
522 
523         return 0;
524 }
525 
526 fs_initcall(alignment_init);
527 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp