Skip to content

Conversation

tgross35
Copy link
Contributor

@tgross35 tgross35 commented Sep 10, 2025

Apply the following changes:

  • Ensure all float types are covered (f16 and f128 were often missing)
  • Switch to more straightforward test names
  • Remove some CHECK directives that are outdated (prefix changed but the directive did not get removed)
  • Add common check prefixes to merge similar blocks
  • Test a more similar set of platforms
  • Add missing nounwind
  • Test strictfp for each libcall where possible

This is a pre-test for 1.

Apply the following changes:

* Ensure all float types are covered (`f16` and `f128` were often
  missing)
* Switch to more straightforward test names
* Remove some CHECK directives that are outdated (prefix changed but the
  directive did not get removed)
* Add common check prefixes to merge similar blocks
* Test a more similar set of platforms
* Test `strictfp` for each libcall where possible

This is a pre-test for [1].

[1]: llvm#152684
@llvmbot
Copy link
Member

llvmbot commented Sep 10, 2025

@llvm/pr-subscribers-backend-x86

Author: Trevor Gross (tgross35)

Changes

Apply the following changes:

  • Ensure all float types are covered (f16 and f128 were often missing)
  • Switch to more straightforward test names
  • Remove some CHECK directives that are outdated (prefix changed but the directive did not get removed)
  • Add common check prefixes to merge similar blocks
  • Test a more similar set of platforms
  • Test strictfp for each libcall where possible

This is a pre-test for 1.


Patch is 68.72 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/157807.diff

6 Files Affected:

  • (modified) llvm/test/CodeGen/X86/llrint-conv.ll (+197-59)
  • (modified) llvm/test/CodeGen/X86/llround-conv.ll (+248-156)
  • (modified) llvm/test/CodeGen/X86/lrint-conv-i32.ll (+210-49)
  • (modified) llvm/test/CodeGen/X86/lrint-conv-i64.ll (+268-49)
  • (modified) llvm/test/CodeGen/X86/lround-conv-i32.ll (+128-48)
  • (modified) llvm/test/CodeGen/X86/lround-conv-i64.ll (+156-34)
diff --git a/llvm/test/CodeGen/X86/llrint-conv.ll b/llvm/test/CodeGen/X86/llrint-conv.ll
index 7bcf573118538..5f38645f74636 100644
--- a/llvm/test/CodeGen/X86/llrint-conv.ll
+++ b/llvm/test/CodeGen/X86/llrint-conv.ll
@@ -7,47 +7,15 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=X64,X64-AVX
 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
 
-define i64 @testmsxh(half %x) nounwind {
-; X86-NOSSE-LABEL: testmsxh:
-; X86-NOSSE:       # %bb.0: # %entry
-; X86-NOSSE-NEXT:    pushl %eax
-; X86-NOSSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT:    movl %eax, (%esp)
-; X86-NOSSE-NEXT:    calll __extendhfsf2
-; X86-NOSSE-NEXT:    fstps (%esp)
-; X86-NOSSE-NEXT:    calll llrintf
-; X86-NOSSE-NEXT:    popl %ecx
-; X86-NOSSE-NEXT:    retl
-;
-; X86-SSE2-LABEL: testmsxh:
-; X86-SSE2:       # %bb.0: # %entry
-; X86-SSE2-NEXT:    pushl %eax
-; X86-SSE2-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
-; X86-SSE2-NEXT:    pextrw $0, %xmm0, %eax
-; X86-SSE2-NEXT:    movw %ax, (%esp)
-; X86-SSE2-NEXT:    calll __extendhfsf2
-; X86-SSE2-NEXT:    fstps (%esp)
-; X86-SSE2-NEXT:    calll llrintf
-; X86-SSE2-NEXT:    popl %ecx
-; X86-SSE2-NEXT:    retl
-;
-; X64-SSE-LABEL: testmsxh:
-; X64-SSE:       # %bb.0: # %entry
-; X64-SSE-NEXT:    pushq %rax
-; X64-SSE-NEXT:    callq __extendhfsf2@PLT
-; X64-SSE-NEXT:    callq rintf@PLT
-; X64-SSE-NEXT:    callq __truncsfhf2@PLT
-; X64-SSE-NEXT:    callq __extendhfsf2@PLT
-; X64-SSE-NEXT:    cvttss2si %xmm0, %rax
-; X64-SSE-NEXT:    popq %rcx
-; X64-SSE-NEXT:    retq
-entry:
-  %0 = tail call i64 @llvm.llrint.i64.f16(half %x)
-  ret i64 %0
-}
+; FIXME: crash
+; define i64 @test_llrint_i64_f16(half %x) nounwind {
+; entry:
+;   %0 = tail call i64 @llvm.llrint.i64.f16(half %x)
+;   ret i64 %0
+; }
 
-define i64 @testmsxs(float %x) nounwind {
-; X86-NOSSE-LABEL: testmsxs:
+define i64 @test_llrint_i64_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_llrint_i64_f32:
 ; X86-NOSSE:       # %bb.0: # %entry
 ; X86-NOSSE-NEXT:    pushl %ebp
 ; X86-NOSSE-NEXT:    movl %esp, %ebp
@@ -61,7 +29,7 @@ define i64 @testmsxs(float %x) nounwind {
 ; X86-NOSSE-NEXT:    popl %ebp
 ; X86-NOSSE-NEXT:    retl
 ;
-; X86-SSE2-LABEL: testmsxs:
+; X86-SSE2-LABEL: test_llrint_i64_f32:
 ; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    pushl %ebp
 ; X86-SSE2-NEXT:    movl %esp, %ebp
@@ -77,7 +45,7 @@ define i64 @testmsxs(float %x) nounwind {
 ; X86-SSE2-NEXT:    popl %ebp
 ; X86-SSE2-NEXT:    retl
 ;
-; X86-AVX-LABEL: testmsxs:
+; X86-AVX-LABEL: test_llrint_i64_f32:
 ; X86-AVX:       # %bb.0: # %entry
 ; X86-AVX-NEXT:    pushl %ebp
 ; X86-AVX-NEXT:    movl %esp, %ebp
@@ -93,12 +61,12 @@ define i64 @testmsxs(float %x) nounwind {
 ; X86-AVX-NEXT:    popl %ebp
 ; X86-AVX-NEXT:    retl
 ;
-; X64-SSE-LABEL: testmsxs:
+; X64-SSE-LABEL: test_llrint_i64_f32:
 ; X64-SSE:       # %bb.0: # %entry
 ; X64-SSE-NEXT:    cvtss2si %xmm0, %rax
 ; X64-SSE-NEXT:    retq
 ;
-; X64-AVX-LABEL: testmsxs:
+; X64-AVX-LABEL: test_llrint_i64_f32:
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    vcvtss2si %xmm0, %rax
 ; X64-AVX-NEXT:    retq
@@ -107,8 +75,8 @@ entry:
   ret i64 %0
 }
 
-define i64 @testmsxd(double %x) nounwind {
-; X86-NOSSE-LABEL: testmsxd:
+define i64 @test_llrint_i64_f64(double %x) nounwind {
+; X86-NOSSE-LABEL: test_llrint_i64_f64:
 ; X86-NOSSE:       # %bb.0: # %entry
 ; X86-NOSSE-NEXT:    pushl %ebp
 ; X86-NOSSE-NEXT:    movl %esp, %ebp
@@ -122,7 +90,7 @@ define i64 @testmsxd(double %x) nounwind {
 ; X86-NOSSE-NEXT:    popl %ebp
 ; X86-NOSSE-NEXT:    retl
 ;
-; X86-SSE2-LABEL: testmsxd:
+; X86-SSE2-LABEL: test_llrint_i64_f64:
 ; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    pushl %ebp
 ; X86-SSE2-NEXT:    movl %esp, %ebp
@@ -138,7 +106,7 @@ define i64 @testmsxd(double %x) nounwind {
 ; X86-SSE2-NEXT:    popl %ebp
 ; X86-SSE2-NEXT:    retl
 ;
-; X86-AVX-LABEL: testmsxd:
+; X86-AVX-LABEL: test_llrint_i64_f64:
 ; X86-AVX:       # %bb.0: # %entry
 ; X86-AVX-NEXT:    pushl %ebp
 ; X86-AVX-NEXT:    movl %esp, %ebp
@@ -154,12 +122,12 @@ define i64 @testmsxd(double %x) nounwind {
 ; X86-AVX-NEXT:    popl %ebp
 ; X86-AVX-NEXT:    retl
 ;
-; X64-SSE-LABEL: testmsxd:
+; X64-SSE-LABEL: test_llrint_i64_f64:
 ; X64-SSE:       # %bb.0: # %entry
 ; X64-SSE-NEXT:    cvtsd2si %xmm0, %rax
 ; X64-SSE-NEXT:    retq
 ;
-; X64-AVX-LABEL: testmsxd:
+; X64-AVX-LABEL: test_llrint_i64_f64:
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    vcvtsd2si %xmm0, %rax
 ; X64-AVX-NEXT:    retq
@@ -168,8 +136,8 @@ entry:
   ret i64 %0
 }
 
-define i64 @testmsll(x86_fp80 %x) nounwind {
-; X86-LABEL: testmsll:
+define i64 @test_llrint_i64_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_llrint_i64_f80:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
@@ -183,7 +151,7 @@ define i64 @testmsll(x86_fp80 %x) nounwind {
 ; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl
 ;
-; X64-LABEL: testmsll:
+; X64-LABEL: test_llrint_i64_f80:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
 ; X64-NEXT:    fistpll -{{[0-9]+}}(%rsp)
@@ -195,8 +163,8 @@ entry:
 }
 
 ; FIXME(#44744): incorrect libcall
-define i64 @testmslq(fp128 %x) nounwind {
-; X86-NOSSE-LABEL: testmslq:
+define i64 @test_llrint_i64_f128(fp128 %x) nounwind {
+; X86-NOSSE-LABEL: test_llrint_i64_f128:
 ; X86-NOSSE:       # %bb.0: # %entry
 ; X86-NOSSE-NEXT:    pushl %ebp
 ; X86-NOSSE-NEXT:    movl %esp, %ebp
@@ -212,7 +180,7 @@ define i64 @testmslq(fp128 %x) nounwind {
 ; X86-NOSSE-NEXT:    popl %ebp
 ; X86-NOSSE-NEXT:    retl
 ;
-; X86-SSE2-LABEL: testmslq:
+; X86-SSE2-LABEL: test_llrint_i64_f128:
 ; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    pushl %ebp
 ; X86-SSE2-NEXT:    movl %esp, %ebp
@@ -228,7 +196,7 @@ define i64 @testmslq(fp128 %x) nounwind {
 ; X86-SSE2-NEXT:    popl %ebp
 ; X86-SSE2-NEXT:    retl
 ;
-; X86-AVX-LABEL: testmslq:
+; X86-AVX-LABEL: test_llrint_i64_f128:
 ; X86-AVX:       # %bb.0: # %entry
 ; X86-AVX-NEXT:    pushl %ebp
 ; X86-AVX-NEXT:    movl %esp, %ebp
@@ -241,11 +209,181 @@ define i64 @testmslq(fp128 %x) nounwind {
 ; X86-AVX-NEXT:    popl %ebp
 ; X86-AVX-NEXT:    retl
 ;
-; X64-LABEL: testmslq:
+; X64-LABEL: test_llrint_i64_f128:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    jmp llrintl@PLT # TAILCALL
 entry:
-  %0 = tail call i64 @llvm.llrint.i64.fp128(fp128 %x)
+  %0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x)
+  ret i64 %0
+}
+
+; FIXME: crash
+; define i64 @test_llrint_i64_f16_strict(half %x) nounwind strictfp {
+; entry:
+;   %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+;   ret i64 %0
+; }
+
+define i64 @test_llrint_i64_f32_strict(float %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_llrint_i64_f32_strict:
+; X86-NOSSE:       # %bb.0: # %entry
+; X86-NOSSE-NEXT:    pushl %eax
+; X86-NOSSE-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    fstps (%esp)
+; X86-NOSSE-NEXT:    wait
+; X86-NOSSE-NEXT:    calll llrintf
+; X86-NOSSE-NEXT:    popl %ecx
+; X86-NOSSE-NEXT:    retl
+;
+; X86-SSE2-LABEL: test_llrint_i64_f32_strict:
+; X86-SSE2:       # %bb.0: # %entry
+; X86-SSE2-NEXT:    pushl %eax
+; X86-SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT:    movss %xmm0, (%esp)
+; X86-SSE2-NEXT:    calll llrintf
+; X86-SSE2-NEXT:    popl %ecx
+; X86-SSE2-NEXT:    retl
+;
+; X86-AVX-LABEL: test_llrint_i64_f32_strict:
+; X86-AVX:       # %bb.0: # %entry
+; X86-AVX-NEXT:    pushl %eax
+; X86-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX-NEXT:    calll llrintf
+; X86-AVX-NEXT:    popl %ecx
+; X86-AVX-NEXT:    retl
+;
+; X64-LABEL: test_llrint_i64_f32_strict:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    callq llrintf@PLT
+; X64-NEXT:    popq %rcx
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+  ret i64 %0
+}
+
+define i64 @test_llrint_i64_f64_strict(double %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_llrint_i64_f64_strict:
+; X86-NOSSE:       # %bb.0: # %entry
+; X86-NOSSE-NEXT:    subl $8, %esp
+; X86-NOSSE-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    fstpl (%esp)
+; X86-NOSSE-NEXT:    wait
+; X86-NOSSE-NEXT:    calll llrint
+; X86-NOSSE-NEXT:    addl $8, %esp
+; X86-NOSSE-NEXT:    retl
+;
+; X86-SSE2-LABEL: test_llrint_i64_f64_strict:
+; X86-SSE2:       # %bb.0: # %entry
+; X86-SSE2-NEXT:    subl $8, %esp
+; X86-SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT:    movsd %xmm0, (%esp)
+; X86-SSE2-NEXT:    calll llrint
+; X86-SSE2-NEXT:    addl $8, %esp
+; X86-SSE2-NEXT:    retl
+;
+; X86-AVX-LABEL: test_llrint_i64_f64_strict:
+; X86-AVX:       # %bb.0: # %entry
+; X86-AVX-NEXT:    subl $8, %esp
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT:    calll llrint
+; X86-AVX-NEXT:    addl $8, %esp
+; X86-AVX-NEXT:    retl
+;
+; X64-LABEL: test_llrint_i64_f64_strict:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    callq llrint@PLT
+; X64-NEXT:    popq %rcx
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+  ret i64 %0
+}
+
+define i64 @test_llrint_i64_f80_strict(x86_fp80 %x) nounwind strictfp {
+; X86-LABEL: test_llrint_i64_f80_strict:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpt (%esp)
+; X86-NEXT:    wait
+; X86-NEXT:    calll llrintl
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_llrint_i64_f80_strict:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    subq $24, %rsp
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fstpt (%rsp)
+; X64-NEXT:    wait
+; X64-NEXT:    callq llrintl@PLT
+; X64-NEXT:    addq $24, %rsp
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+  ret i64 %0
+}
+
+; FIXME(#44744): incorrect libcall
+define i64 @test_llrint_i64_f128_strict(fp128 %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_llrint_i64_f128_strict:
+; X86-NOSSE:       # %bb.0: # %entry
+; X86-NOSSE-NEXT:    pushl %ebp
+; X86-NOSSE-NEXT:    movl %esp, %ebp
+; X86-NOSSE-NEXT:    andl $-16, %esp
+; X86-NOSSE-NEXT:    subl $16, %esp
+; X86-NOSSE-NEXT:    pushl 20(%ebp)
+; X86-NOSSE-NEXT:    pushl 16(%ebp)
+; X86-NOSSE-NEXT:    pushl 12(%ebp)
+; X86-NOSSE-NEXT:    pushl 8(%ebp)
+; X86-NOSSE-NEXT:    calll llrintl
+; X86-NOSSE-NEXT:    addl $16, %esp
+; X86-NOSSE-NEXT:    movl %ebp, %esp
+; X86-NOSSE-NEXT:    popl %ebp
+; X86-NOSSE-NEXT:    retl
+;
+; X86-SSE2-LABEL: test_llrint_i64_f128_strict:
+; X86-SSE2:       # %bb.0: # %entry
+; X86-SSE2-NEXT:    pushl %ebp
+; X86-SSE2-NEXT:    movl %esp, %ebp
+; X86-SSE2-NEXT:    andl $-16, %esp
+; X86-SSE2-NEXT:    subl $16, %esp
+; X86-SSE2-NEXT:    pushl 20(%ebp)
+; X86-SSE2-NEXT:    pushl 16(%ebp)
+; X86-SSE2-NEXT:    pushl 12(%ebp)
+; X86-SSE2-NEXT:    pushl 8(%ebp)
+; X86-SSE2-NEXT:    calll llrintl
+; X86-SSE2-NEXT:    addl $16, %esp
+; X86-SSE2-NEXT:    movl %ebp, %esp
+; X86-SSE2-NEXT:    popl %ebp
+; X86-SSE2-NEXT:    retl
+;
+; X86-AVX-LABEL: test_llrint_i64_f128_strict:
+; X86-AVX:       # %bb.0: # %entry
+; X86-AVX-NEXT:    pushl %ebp
+; X86-AVX-NEXT:    movl %esp, %ebp
+; X86-AVX-NEXT:    andl $-16, %esp
+; X86-AVX-NEXT:    subl $32, %esp
+; X86-AVX-NEXT:    vmovups 8(%ebp), %xmm0
+; X86-AVX-NEXT:    vmovups %xmm0, (%esp)
+; X86-AVX-NEXT:    calll llrintl
+; X86-AVX-NEXT:    movl %ebp, %esp
+; X86-AVX-NEXT:    popl %ebp
+; X86-AVX-NEXT:    retl
+;
+; X64-LABEL: test_llrint_i64_f128_strict:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    callq llrintl@PLT
+; X64-NEXT:    popq %rcx
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
   ret i64 %0
 }
 
diff --git a/llvm/test/CodeGen/X86/llround-conv.ll b/llvm/test/CodeGen/X86/llround-conv.ll
index 19a980b72809e..ef4df82e9e57e 100644
--- a/llvm/test/CodeGen/X86/llround-conv.ll
+++ b/llvm/test/CodeGen/X86/llround-conv.ll
@@ -1,88 +1,84 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown             | FileCheck %s --check-prefix=X86
-; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=i686-unknown              | FileCheck %s --check-prefixes=X86,X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2  | FileCheck %s --check-prefixes=X86,X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown            | FileCheck %s --check-prefixes=X64
 ; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
 
-define i64 @testmsxs(float %x) {
-; X86-LABEL: testmsxs:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    pushl %eax
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    flds {{[0-9]+}}(%esp)
-; X86-NEXT:    fstps (%esp)
-; X86-NEXT:    calll llroundf
-; X86-NEXT:    popl %ecx
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
+; FIXME: crash
+; define i64 @test_llround_f16(half %x) nounwind {
+;   %conv = tail call i64 @llvm.llround.f16(half %x)
+;   ret i64 %conv
+; }
+
+define i64 @test_llround_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_llround_f32:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    pushl %eax
+; X86-NOSSE-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    fstps (%esp)
+; X86-NOSSE-NEXT:    calll llroundf
+; X86-NOSSE-NEXT:    popl %ecx
+; X86-NOSSE-NEXT:    retl
 ;
-; SSE2-LABEL: testmsxs:
-; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    pushl %eax
-; SSE2-NEXT:    .cfi_def_cfa_offset 8
-; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT:    movss %xmm0, (%esp)
-; SSE2-NEXT:    calll llroundf
-; SSE2-NEXT:    popl %ecx
-; SSE2-NEXT:    .cfi_def_cfa_offset 4
-; SSE2-NEXT:    retl
+; X86-SSE2-LABEL: test_llround_f32:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    pushl %eax
+; X86-SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT:    movss %xmm0, (%esp)
+; X86-SSE2-NEXT:    calll llroundf
+; X86-SSE2-NEXT:    popl %ecx
+; X86-SSE2-NEXT:    retl
 ;
-; GISEL-X86-LABEL: testmsxs:
-; GISEL-X86:       # %bb.0: # %entry
+; X64-LABEL: test_llround_f32:
+; X64:       # %bb.0:
+; X64-NEXT:    jmp llroundf@PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_llround_f32:
+; GISEL-X86:       # %bb.0:
 ; GISEL-X86-NEXT:    subl $12, %esp
-; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
 ; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; GISEL-X86-NEXT:    movl %eax, (%esp)
 ; GISEL-X86-NEXT:    calll llroundf
 ; GISEL-X86-NEXT:    addl $12, %esp
-; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
 ; GISEL-X86-NEXT:    retl
 ;
-; X64-LABEL: testmsxs:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    jmp llroundf@PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsxs:
-; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-LABEL: test_llround_f32:
+; GISEL-X64:       # %bb.0:
 ; GISEL-X64-NEXT:    pushq %rax
-; GISEL-X64-NEXT:    .cfi_def_cfa_offset 16
 ; GISEL-X64-NEXT:    callq llroundf
 ; GISEL-X64-NEXT:    popq %rcx
-; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
 ; GISEL-X64-NEXT:    retq
-entry:
-  %0 = tail call i64 @llvm.llround.f32(float %x)
-  ret i64 %0
+  %conv = tail call i64 @llvm.llround.f32(float %x)
+  ret i64 %conv
 }
 
-define i64 @testmsxd(double %x) {
-; X86-LABEL: testmsxd:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:    fldl {{[0-9]+}}(%esp)
-; X86-NEXT:    fstpl (%esp)
-; X86-NEXT:    calll llround
-; X86-NEXT:    addl $8, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
+define i64 @test_llround_f64(double %x) nounwind {
+; X86-NOSSE-LABEL: test_llround_f64:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    subl $8, %esp
+; X86-NOSSE-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    fstpl (%esp)
+; X86-NOSSE-NEXT:    calll llround
+; X86-NOSSE-NEXT:    addl $8, %esp
+; X86-NOSSE-NEXT:    retl
 ;
-; SSE2-LABEL: testmsxd:
-; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    subl $8, %esp
-; SSE2-NEXT:    .cfi_def_cfa_offset 12
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT:    movsd %xmm0, (%esp)
-; SSE2-NEXT:    calll llround
-; SSE2-NEXT:    addl $8, %esp
-; SSE2-NEXT:    .cfi_def_cfa_offset 4
-; SSE2-NEXT:    retl
+; X86-SSE2-LABEL: test_llround_f64:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    subl $8, %esp
+; X86-SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT:    movsd %xmm0, (%esp)
+; X86-SSE2-NEXT:    calll llround
+; X86-SSE2-NEXT:    addl $8, %esp
+; X86-SSE2-NEXT:    retl
 ;
-; GISEL-X86-LABEL: testmsxd:
-; GISEL-X86:       # %bb.0: # %entry
+; X64-LABEL: test_llround_f64:
+; X64:       # %bb.0:
+; X64-NEXT:    jmp llround@PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_llround_f64:
+; GISEL-X86:       # %bb.0:
 ; GISEL-X86-NEXT:    subl $12, %esp
-; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
 ; GISEL-X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
 ; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; GISEL-X86-NEXT:    movl 4(%eax), %eax
@@ -92,111 +88,140 @@ define i64 @testmsxd(double %x) {
 ; GISEL-X86-NEXT:    movl %eax, 4(%edx)
 ; GISEL-X86-NEXT:    calll llround
 ; GISEL-X86-NEXT:    addl $12, %esp
-; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
 ; GISEL-X86-NEXT:    retl
 ;
-; X64-LABEL: testmsxd:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    jmp llround@PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsxd:
-; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-LABEL: test_llround_f64:
+; GISEL-X64:       # %bb.0:
 ; GISEL-X64-NEXT:    pushq %rax
-; GISEL-X64-NEXT:    .cfi_def_cfa_offset 16
 ; GISEL-X64-NEXT:    callq llround
 ; GISEL-X64-NEXT:    popq %rcx
-; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
 ; GISEL-X64-NEXT:    retq
-entry:
-  %0 = tail call i64 @llvm.llround.f64(double %x)
-  ret i64 %0
+  %conv = tail call i64 @llvm.llround.f64(double %x)
+  ret i64 %conv
 }
 
-define i64 @testmsll(x86_fp80 %x) {
-; X86-LABEL: testmsll:
-; X86:       # %bb.0: # %entry
+define i64 @test_llround_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_llround_f80:
+; X86:       # %bb.0:
 ; X86-NEXT:    subl $12, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 16
 ; X86-NEXT:    fldt {{[0-9]+}}(%esp)
 ; X86-NEXT:    fstpt (%esp)
 ; X86-NEXT:    calll llroundl
 ; X86-NEXT:    addl $12, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ;
-; SSE2-LABEL: testmsll:
-; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    subl $12, %esp
-; SSE2-NEXT:    .cfi_def_cfa_offset 16
-; SSE2-NEXT:    fldt {{[0-9]+}}(%esp)
-; SSE2-NEXT:    fstpt (%esp)
-; SSE2-NEXT:    calll llroundl
-; SSE2-NEXT:    addl $12, %esp
-; SSE2-NEXT:    .cfi_def_cfa_offset 4
-; SSE2-NEXT:    retl
+; X64-LABEL: test_llround_f80:
+; X64:       # %bb.0:
+; X64-NEXT:    jmp llroundl@PLT # TAILCALL
 ;
-; GISEL-X86-LABEL: testmsll:
-; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-LABEL: test_llround_f80:
+; GISEL-X86:       # %bb.0:
 ; GISEL-X86-NEXT:    subl $12, %esp
-; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
 ; GISEL-X86-NEXT:    fldt {{[0-9]+}}(%esp)
 ; GISEL-X86-NEXT:    fstpt (%esp)
 ; GISEL-X86-NEXT:    calll llroundl
 ; GISEL-X86-NEXT:    addl $12, %esp
-; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
 ; GISEL-X86-NEXT:    retl
 ;
-; X64-LABEL: testmsll:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    jmp llroundl@PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsll:
-; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-LABEL: test_llround_f80:
+; GISEL-X64:       # %bb.0:
 ; GISEL-X64-NEXT:    subq $24, %rsp
-; GISEL-X64-NEXT:    .cfi_def_cfa_offset 32
 ; GISEL-X64-NEXT:    fldt {{[0-9]+}}(%rsp)
 ; GISEL-X64-NEXT:    fstpt (%rsp)
 ; GISEL-X64-NEXT:    callq llroundl
 ; GISEL-X64-NEXT:    addq $24, %rsp
-; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
 ; GISEL-X64-NEXT:    retq
-entry:
-  %0 = tail call i64 @llvm.llround.f80(x86_fp80 %x)
-  re...
[truncated]

@tgross35
Copy link
Contributor Author

@nikic could you review?

Copy link
Contributor

@nikic nikic left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@nikic nikic enabled auto-merge (squash) September 10, 2025 08:06
@nikic nikic merged commit bc65352 into llvm:main Sep 10, 2025
11 checks passed
@tgross35 tgross35 deleted the lrint-f16-fix-pretest branch September 10, 2025 08:31
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants