@@ -759,38 +759,38 @@ defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
759759 * extremely late to prevent them from being accidentally reordered in the backend
760760 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
761761 */
762- multiclass RELEASE_BINOP_MI<string op> {
762+ multiclass RELEASE_BINOP_MI<SDNode op> {
763763 def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
764764 "#BINOP "#NAME#"8mi PSEUDO!",
765- [(atomic_store_8 addr:$dst, (!cast<PatFrag>(op)
765+ [(atomic_store_8 addr:$dst, (op
766766 (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
767767 def NAME#8mr : I<0, Pseudo, (outs), (ins i8mem:$dst, GR8:$src),
768768 "#BINOP "#NAME#"8mr PSEUDO!",
769- [(atomic_store_8 addr:$dst, (!cast<PatFrag>(op)
769+ [(atomic_store_8 addr:$dst, (op
770770 (atomic_load_8 addr:$dst), GR8:$src))]>;
771771 // NAME#16 is not generated as 16-bit arithmetic instructions are considered
772772 // costly and avoided as far as possible by this backend anyway
773773 def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
774774 "#BINOP "#NAME#"32mi PSEUDO!",
775- [(atomic_store_32 addr:$dst, (!cast<PatFrag>(op)
775+ [(atomic_store_32 addr:$dst, (op
776776 (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
777777 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
778778 "#BINOP "#NAME#"32mr PSEUDO!",
779- [(atomic_store_32 addr:$dst, (!cast<PatFrag>(op)
779+ [(atomic_store_32 addr:$dst, (op
780780 (atomic_load_32 addr:$dst), GR32:$src))]>;
781781 def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
782782 "#BINOP "#NAME#"64mi32 PSEUDO!",
783- [(atomic_store_64 addr:$dst, (!cast<PatFrag>(op)
783+ [(atomic_store_64 addr:$dst, (op
784784 (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
785785 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
786786 "#BINOP "#NAME#"64mr PSEUDO!",
787- [(atomic_store_64 addr:$dst, (!cast<PatFrag>(op)
787+ [(atomic_store_64 addr:$dst, (op
788788 (atomic_load_64 addr:$dst), GR64:$src))]>;
789789}
790- defm RELEASE_ADD : RELEASE_BINOP_MI<" add" >;
791- defm RELEASE_AND : RELEASE_BINOP_MI<" and" >;
792- defm RELEASE_OR : RELEASE_BINOP_MI<"or" >;
793- defm RELEASE_XOR : RELEASE_BINOP_MI<" xor" >;
790+ defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
791+ defm RELEASE_AND : RELEASE_BINOP_MI<and>;
792+ defm RELEASE_OR : RELEASE_BINOP_MI<or >;
793+ defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
794794// Note: we don't deal with sub, because substractions of constants are
795795// optimized into additions before this code can run
796796
@@ -799,21 +799,21 @@ defm RELEASE_XOR : RELEASE_BINOP_MI<"xor">;
799799// FIXME: Version that doesn't clobber $src, using AVX's VADDSS.
800800// FIXME: This could also handle SIMD operations with *ps and *pd instructions.
801801let usesCustomInserter = 1 in {
802- multiclass RELEASE_FP_BINOP_MI<string op> {
802+ multiclass RELEASE_FP_BINOP_MI<SDNode op> {
803803 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, FR32:$src),
804804 "#BINOP "#NAME#"32mr PSEUDO!",
805805 [(atomic_store_32 addr:$dst,
806- (i32 (bitconvert (!cast<PatFrag>(op)
806+ (i32 (bitconvert (op
807807 (f32 (bitconvert (i32 (atomic_load_32 addr:$dst)))),
808808 FR32:$src))))]>, Requires<[HasSSE1]>;
809809 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, FR64:$src),
810810 "#BINOP "#NAME#"64mr PSEUDO!",
811811 [(atomic_store_64 addr:$dst,
812- (i64 (bitconvert (!cast<PatFrag>(op)
812+ (i64 (bitconvert (op
813813 (f64 (bitconvert (i64 (atomic_load_64 addr:$dst)))),
814814 FR64:$src))))]>, Requires<[HasSSE2]>;
815815}
816- defm RELEASE_FADD : RELEASE_FP_BINOP_MI<" fadd" >;
816+ defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
817817// FIXME: Add fsub, fmul, fdiv, ...
818818}
819819
0 commit comments