@@ -264,86 +264,14 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
264264#define GET_F32_RS2C (insn , regs ) (get_f32_rs(insn, 2, regs))
265265#define GET_F32_RS2S (insn , regs ) (get_f32_rs(RVC_RS2S(insn), 0, regs))
266266
267- #ifdef CONFIG_RISCV_M_MODE
268- static inline int load_u8 (struct pt_regs * regs , const u8 * addr , u8 * r_val )
269- {
270- u8 val ;
271-
272- asm volatile ("lbu %0, %1" : "=&r" (val ) : "m" (* addr ));
273- * r_val = val ;
274-
275- return 0 ;
276- }
277-
278- static inline int store_u8 (struct pt_regs * regs , u8 * addr , u8 val )
279- {
280- asm volatile ("sb %0, %1\n" : : "r" (val ), "m" (* addr ));
281-
282- return 0 ;
283- }
284-
285- static inline int get_insn (struct pt_regs * regs , ulong mepc , ulong * r_insn )
286- {
287- register ulong __mepc asm ("a2" ) = mepc ;
288- ulong val , rvc_mask = 3 , tmp ;
289-
290- asm ("and %[tmp], %[addr], 2\n"
291- "bnez %[tmp], 1f\n"
292- #if defined(CONFIG_64BIT )
293- __stringify (LWU ) " %[insn], (%[addr])\n"
294- #else
295- __stringify (LW ) " %[insn], (%[addr])\n"
296- #endif
297- "and %[tmp], %[insn], %[rvc_mask]\n"
298- "beq %[tmp], %[rvc_mask], 2f\n"
299- "sll %[insn], %[insn], %[xlen_minus_16]\n"
300- "srl %[insn], %[insn], %[xlen_minus_16]\n"
301- "j 2f\n"
302- "1:\n"
303- "lhu %[insn], (%[addr])\n"
304- "and %[tmp], %[insn], %[rvc_mask]\n"
305- "bne %[tmp], %[rvc_mask], 2f\n"
306- "lhu %[tmp], 2(%[addr])\n"
307- "sll %[tmp], %[tmp], 16\n"
308- "add %[insn], %[insn], %[tmp]\n"
309- "2:"
310- : [insn ] "= & r " (val), [tmp] " = & r " (tmp)
311- : [addr ] "r" (__mepc ), [rvc_mask ] "r" (rvc_mask ),
312- [xlen_minus_16 ] "i" (XLEN_MINUS_16 ));
313-
314- * r_insn = val ;
315-
316- return 0 ;
317- }
318- #else
319- static inline int load_u8 (struct pt_regs * regs , const u8 * addr , u8 * r_val )
320- {
321- if (user_mode (regs )) {
322- return __get_user (* r_val , (u8 __user * )addr );
323- } else {
324- * r_val = * addr ;
325- return 0 ;
326- }
327- }
328-
329- static inline int store_u8 (struct pt_regs * regs , u8 * addr , u8 val )
330- {
331- if (user_mode (regs )) {
332- return __put_user (val , (u8 __user * )addr );
333- } else {
334- * addr = val ;
335- return 0 ;
336- }
337- }
338-
339- #define __read_insn (regs , insn , insn_addr ) \
267+ #define __read_insn (regs , insn , insn_addr , type ) \
340268({ \
341269 int __ret; \
342270 \
343271 if (user_mode(regs)) { \
344- __ret = __get_user(insn, insn_addr); \
272+ __ret = __get_user(insn, (type __user *) insn_addr); \
345273 } else { \
346- insn = *(__force u16 *)insn_addr; \
274+ insn = *(type *)insn_addr; \
347275 __ret = 0; \
348276 } \
349277 \
@@ -356,9 +284,8 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
356284
357285 if (epc & 0x2 ) {
358286 ulong tmp = 0 ;
359- u16 __user * insn_addr = (u16 __user * )epc ;
360287
361- if (__read_insn (regs , insn , insn_addr ))
288+ if (__read_insn (regs , insn , epc , u16 ))
362289 return - EFAULT ;
363290 /* __get_user() uses regular "lw" which sign extend the loaded
364291 * value make sure to clear higher order bits in case we "or" it
@@ -369,16 +296,14 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
369296 * r_insn = insn ;
370297 return 0 ;
371298 }
372- insn_addr ++ ;
373- if (__read_insn (regs , tmp , insn_addr ))
299+ epc += sizeof ( u16 ) ;
300+ if (__read_insn (regs , tmp , epc , u16 ))
374301 return - EFAULT ;
375302 * r_insn = (tmp << 16 ) | insn ;
376303
377304 return 0 ;
378305 } else {
379- u32 __user * insn_addr = (u32 __user * )epc ;
380-
381- if (__read_insn (regs , insn , insn_addr ))
306+ if (__read_insn (regs , insn , epc , u32 ))
382307 return - EFAULT ;
383308 if ((insn & __INSN_LENGTH_MASK ) == __INSN_LENGTH_32 ) {
384309 * r_insn = insn ;
@@ -390,7 +315,6 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
390315 return 0 ;
391316 }
392317}
393- #endif
394318
395319union reg_data {
396320 u8 data_bytes [8 ];
@@ -409,7 +333,7 @@ int handle_misaligned_load(struct pt_regs *regs)
409333 unsigned long epc = regs -> epc ;
410334 unsigned long insn ;
411335 unsigned long addr = regs -> badaddr ;
412- int i , fp = 0 , shift = 0 , len = 0 ;
336+ int fp = 0 , shift = 0 , len = 0 ;
413337
414338 perf_sw_event (PERF_COUNT_SW_ALIGNMENT_FAULTS , 1 , regs , addr );
415339
@@ -492,9 +416,11 @@ int handle_misaligned_load(struct pt_regs *regs)
492416 return - EOPNOTSUPP ;
493417
494418 val .data_u64 = 0 ;
495- for ( i = 0 ; i < len ; i ++ ) {
496- if (load_u8 ( regs , (void * )( addr + i ), & val . data_bytes [ i ] ))
419+ if ( user_mode ( regs ) ) {
420+ if (raw_copy_from_user ( & val , (u8 __user * ) addr , len ))
497421 return -1 ;
422+ } else {
423+ memcpy (& val , (u8 * )addr , len );
498424 }
499425
500426 if (!fp )
@@ -515,7 +441,7 @@ int handle_misaligned_store(struct pt_regs *regs)
515441 unsigned long epc = regs -> epc ;
516442 unsigned long insn ;
517443 unsigned long addr = regs -> badaddr ;
518- int i , len = 0 , fp = 0 ;
444+ int len = 0 , fp = 0 ;
519445
520446 perf_sw_event (PERF_COUNT_SW_ALIGNMENT_FAULTS , 1 , regs , addr );
521447
@@ -588,9 +514,11 @@ int handle_misaligned_store(struct pt_regs *regs)
588514 if (!IS_ENABLED (CONFIG_FPU ) && fp )
589515 return - EOPNOTSUPP ;
590516
591- for ( i = 0 ; i < len ; i ++ ) {
592- if (store_u8 ( regs , ( void * )( addr + i ), val . data_bytes [ i ] ))
517+ if ( user_mode ( regs ) ) {
518+ if (raw_copy_to_user (( u8 __user * ) addr , & val , len ))
593519 return -1 ;
520+ } else {
521+ memcpy ((u8 * )addr , & val , len );
594522 }
595523
596524 regs -> epc = epc + INSN_LEN (insn );
0 commit comments