@@ -409,6 +409,71 @@ asm (
409
409
" blr ;"
410
410
);
411
411
412
+ static int emit_atomic_ld_st (const struct bpf_insn insn , struct codegen_context * ctx , u32 * image )
413
+ {
414
+ u32 code = insn .code ;
415
+ u32 dst_reg = bpf_to_ppc (insn .dst_reg );
416
+ u32 src_reg = bpf_to_ppc (insn .src_reg );
417
+ u32 size = BPF_SIZE (code );
418
+ u32 tmp1_reg = bpf_to_ppc (TMP_REG_1 );
419
+ u32 tmp2_reg = bpf_to_ppc (TMP_REG_2 );
420
+ s16 off = insn .off ;
421
+ s32 imm = insn .imm ;
422
+
423
+ switch (imm ) {
424
+ case BPF_LOAD_ACQ :
425
+ switch (size ) {
426
+ case BPF_B :
427
+ EMIT (PPC_RAW_LBZ (dst_reg , src_reg , off ));
428
+ break ;
429
+ case BPF_H :
430
+ EMIT (PPC_RAW_LHZ (dst_reg , src_reg , off ));
431
+ break ;
432
+ case BPF_W :
433
+ EMIT (PPC_RAW_LWZ (dst_reg , src_reg , off ));
434
+ break ;
435
+ case BPF_DW :
436
+ if (off % 4 ) {
437
+ EMIT (PPC_RAW_LI (tmp1_reg , off ));
438
+ EMIT (PPC_RAW_LDX (dst_reg , src_reg , tmp1_reg ));
439
+ } else {
440
+ EMIT (PPC_RAW_LD (dst_reg , src_reg , off ));
441
+ }
442
+ break ;
443
+ }
444
+ EMIT (PPC_RAW_LWSYNC ());
445
+ break ;
446
+ case BPF_STORE_REL :
447
+ EMIT (PPC_RAW_LWSYNC ());
448
+ switch (size ) {
449
+ case BPF_B :
450
+ EMIT (PPC_RAW_STB (src_reg , dst_reg , off ));
451
+ break ;
452
+ case BPF_H :
453
+ EMIT (PPC_RAW_STH (src_reg , dst_reg , off ));
454
+ break ;
455
+ case BPF_W :
456
+ EMIT (PPC_RAW_STW (src_reg , dst_reg , off ));
457
+ break ;
458
+ case BPF_DW :
459
+ if (off % 4 ) {
460
+ EMIT (PPC_RAW_LI (tmp2_reg , off ));
461
+ EMIT (PPC_RAW_STDX (src_reg , dst_reg , tmp2_reg ));
462
+ } else {
463
+ EMIT (PPC_RAW_STD (src_reg , dst_reg , off ));
464
+ }
465
+ break ;
466
+ }
467
+ break ;
468
+ default :
469
+ pr_err_ratelimited ("unexpected atomic load/store op code %02x\n" ,
470
+ imm );
471
+ return - EINVAL ;
472
+ }
473
+
474
+ return 0 ;
475
+ }
476
+
412
477
/* Assemble the body code between the prologue & epilogue */
413
478
int bpf_jit_build_body (struct bpf_prog * fp , u32 * image , u32 * fimage , struct codegen_context * ctx ,
414
479
u32 * addrs , int pass , bool extra_pass )
@@ -898,8 +963,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
898
963
/*
899
964
* BPF_STX ATOMIC (atomic ops)
900
965
*/
966
+ case BPF_STX | BPF_ATOMIC | BPF_B :
967
+ case BPF_STX | BPF_ATOMIC | BPF_H :
901
968
case BPF_STX | BPF_ATOMIC | BPF_W :
902
969
case BPF_STX | BPF_ATOMIC | BPF_DW :
970
+ if (bpf_atomic_is_load_store (& insn [i ])) {
971
+ ret = emit_atomic_ld_st (insn [i ], ctx , image );
972
+ if (ret )
973
+ return ret ;
974
+
975
+ if (size != BPF_DW && insn_is_zext (& insn [i + 1 ]))
976
+ addrs [++ i ] = ctx -> idx * 4 ;
977
+ break ;
978
+ } else if (size == BPF_B || size == BPF_H ) {
979
+ pr_err_ratelimited (
980
+ "eBPF filter atomic op code %02x (@%d) unsupported\n" ,
981
+ code , i );
982
+ return - EOPNOTSUPP ;
983
+ }
984
+
903
985
save_reg = tmp2_reg ;
904
986
ret_reg = src_reg ;
905
987
0 commit comments