-
Notifications
You must be signed in to change notification settings - Fork 13.3k
rustc -C opt-level=3 generates bad assembly code for Vec
by default
#44452
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Comments
This is the asm I am seeing:
|
This is the asm I am seeing on the playground (link): .text
.intel_syntax noprefix
.file "playground.cgu-0.rs"
.section ".text.cold._ZN49_$LT$alloc..raw_vec..RawVec$LT$T$C$$u20$A$GT$$GT$6double17h1882b1047d8074ffE","ax",@progbits
.p2align 4, 0x90
.type _ZN49_$LT$alloc..raw_vec..RawVec$LT$T$C$$u20$A$GT$$GT$6double17h1882b1047d8074ffE,@function
_ZN49_$LT$alloc..raw_vec..RawVec$LT$T$C$$u20$A$GT$$GT$6double17h1882b1047d8074ffE:
.cfi_startproc
push r14
.Lcfi0:
.cfi_def_cfa_offset 16
push rbx
.Lcfi1:
.cfi_def_cfa_offset 24
sub rsp, 72
.Lcfi2:
.cfi_def_cfa_offset 96
.Lcfi3:
.cfi_offset rbx, -24
.Lcfi4:
.cfi_offset r14, -16
mov r14, rdi
mov rbx, qword ptr [r14 + 8]
test rbx, rbx
je .LBB0_6
lea rsi, [4*rbx]
lea rcx, [8*rbx]
mov rdi, qword ptr [r14]
lea r9, [rsp + 8]
mov edx, 4
mov r8d, 4
call __rust_realloc@PLT
test rax, rax
je .LBB0_4
add rbx, rbx
jmp .LBB0_3
.LBB0_6:
lea rdx, [rsp + 8]
mov edi, 16
mov esi, 4
call __rust_alloc@PLT
test rax, rax
je .LBB0_8
mov ebx, 4
.LBB0_3:
mov qword ptr [r14], rax
mov qword ptr [r14 + 8], rbx
add rsp, 72
pop rbx
pop r14
ret
.LBB0_4:
mov rax, qword ptr [rsp + 8]
movups xmm0, xmmword ptr [rsp + 16]
movaps xmmword ptr [rsp + 32], xmm0
mov qword ptr [rsp + 8], rax
movaps xmm0, xmmword ptr [rsp + 32]
jmp .LBB0_5
.LBB0_8:
movups xmm0, xmmword ptr [rsp + 16]
movaps xmmword ptr [rsp + 32], xmm0
movaps xmm0, xmmword ptr [rsp + 32]
movaps xmmword ptr [rsp + 48], xmm0
movaps xmm0, xmmword ptr [rsp + 48]
.LBB0_5:
movups xmmword ptr [rsp + 16], xmm0
lea rdi, [rsp + 8]
call _ZN61_$LT$alloc..heap..Heap$u20$as$u20$alloc..allocator..Alloc$GT$3oom17h8b5f820f2dd0e667E
.Lfunc_end0:
.size _ZN49_$LT$alloc..raw_vec..RawVec$LT$T$C$$u20$A$GT$$GT$6double17h1882b1047d8074ffE, .Lfunc_end0-_ZN49_$LT$alloc..raw_vec..RawVec$LT$T$C$$u20$A$GT$$GT$6double17h1882b1047d8074ffE
.cfi_endproc
.section .text._ZN4core3ptr13drop_in_place17h62d05f1818f0f9d8E,"ax",@progbits
.p2align 4, 0x90
.type _ZN4core3ptr13drop_in_place17h62d05f1818f0f9d8E,@function
_ZN4core3ptr13drop_in_place17h62d05f1818f0f9d8E:
.cfi_startproc
mov rsi, qword ptr [rdi + 8]
test rsi, rsi
je .LBB1_1
mov rdi, qword ptr [rdi]
shl rsi, 2
mov edx, 4
jmp __rust_dealloc@PLT
.LBB1_1:
ret
.Lfunc_end1:
.size _ZN4core3ptr13drop_in_place17h62d05f1818f0f9d8E, .Lfunc_end1-_ZN4core3ptr13drop_in_place17h62d05f1818f0f9d8E
.cfi_endproc
.section ".text.cold._ZN61_$LT$alloc..heap..Heap$u20$as$u20$alloc..allocator..Alloc$GT$3oom17h8b5f820f2dd0e667E","ax",@progbits
.p2align 4, 0x90
.type _ZN61_$LT$alloc..heap..Heap$u20$as$u20$alloc..allocator..Alloc$GT$3oom17h8b5f820f2dd0e667E,@function
_ZN61_$LT$alloc..heap..Heap$u20$as$u20$alloc..allocator..Alloc$GT$3oom17h8b5f820f2dd0e667E:
.cfi_startproc
sub rsp, 24
.Lcfi5:
.cfi_def_cfa_offset 32
mov rax, qword ptr [rdi + 16]
mov qword ptr [rsp + 16], rax
movups xmm0, xmmword ptr [rdi]
movaps xmmword ptr [rsp], xmm0
mov rdi, rsp
call __rust_oom@PLT
.Lfunc_end2:
.size _ZN61_$LT$alloc..heap..Heap$u20$as$u20$alloc..allocator..Alloc$GT$3oom17h8b5f820f2dd0e667E, .Lfunc_end2-_ZN61_$LT$alloc..heap..Heap$u20$as$u20$alloc..allocator..Alloc$GT$3oom17h8b5f820f2dd0e667E
.cfi_endproc
.section .text._ZN10playground4main17hf21e17cd5a16d5d9E,"ax",@progbits
.p2align 4, 0x90
.type _ZN10playground4main17hf21e17cd5a16d5d9E,@function
_ZN10playground4main17hf21e17cd5a16d5d9E:
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 155, DW.ref.rust_eh_personality
.cfi_lsda 27, .Lexception0
push rbx
.Lcfi6:
.cfi_def_cfa_offset 16
sub rsp, 32
.Lcfi7:
.cfi_def_cfa_offset 48
.Lcfi8:
.cfi_offset rbx, -16
mov qword ptr [rsp + 8], 4
xorps xmm0, xmm0
movups xmmword ptr [rsp + 16], xmm0
.Ltmp0:
lea rdi, [rsp + 8]
call _ZN49_$LT$alloc..raw_vec..RawVec$LT$T$C$$u20$A$GT$$GT$6double17h1882b1047d8074ffE
.Ltmp1:
mov rdi, qword ptr [rsp + 8]
mov rax, qword ptr [rsp + 24]
mov dword ptr [rdi + 4*rax], 0
inc rax
mov qword ptr [rsp + 24], rax
je .LBB3_2
mov rsi, qword ptr [rsp + 16]
test rsi, rsi
je .LBB3_6
shl rsi, 2
mov edx, 4
call __rust_dealloc@PLT
.LBB3_6:
add rsp, 32
pop rbx
ret
.LBB3_2:
.Ltmp2:
lea rdi, [rip + panic_bounds_check_loc.2]
xor esi, esi
xor edx, edx
call _ZN4core9panicking18panic_bounds_check17h78beadfd8229dc37E@PLT
.Ltmp3:
.LBB3_7:
.Ltmp4:
mov rbx, rax
lea rdi, [rsp + 8]
call _ZN4core3ptr13drop_in_place17h62d05f1818f0f9d8E
mov rdi, rbx
call _Unwind_Resume@PLT
.Lfunc_end3:
.size _ZN10playground4main17hf21e17cd5a16d5d9E, .Lfunc_end3-_ZN10playground4main17hf21e17cd5a16d5d9E
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2
GCC_except_table3:
.Lexception0:
.byte 255
.byte 155
.asciz "\234"
.byte 3
.byte 26
.long .Ltmp0-.Lfunc_begin0
.long .Ltmp3-.Ltmp0
.long .Ltmp4-.Lfunc_begin0
.byte 0
.long .Ltmp3-.Lfunc_begin0
.long .Lfunc_end3-.Ltmp3
.long 0
.byte 0
.p2align 2
.section .text.main,"ax",@progbits
.globl main
.p2align 4, 0x90
.type main,@function
main:
.cfi_startproc
mov rax, rsi
mov rcx, rdi
lea rdi, [rip + _ZN10playground4main17hf21e17cd5a16d5d9E]
mov rsi, rcx
mov rdx, rax
jmp _ZN3std2rt10lang_start17h573cecb903a42a26E@PLT
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
.type str.1,@object
.section .rodata.str.1,"a",@progbits
.p2align 4
str.1:
.ascii "/checkout/src/liballoc/vec.rs"
.size str.1, 29
.type panic_bounds_check_loc.2,@object
.section .data.rel.ro.panic_bounds_check_loc.2,"aw",@progbits
.p2align 3
panic_bounds_check_loc.2:
.quad str.1
.quad 29
.long 1555
.long 10
.size panic_bounds_check_loc.2, 24
.hidden DW.ref.rust_eh_personality
.weak DW.ref.rust_eh_personality
.section .data.DW.ref.rust_eh_personality,"aGw",@progbits,DW.ref.rust_eh_personality,comdat
.p2align 3
.type DW.ref.rust_eh_personality,@object
.size DW.ref.rust_eh_personality, 8
DW.ref.rust_eh_personality:
.quad rust_eh_personality
.section ".note.GNU-stack","",@progbits Not all of it is due to the vector code (it seems the play ground doesn't support creating an object file for code without main). |
One difference between using It looks like in the C++ version, manages optimize out the zeroing of length/capacity. The My (possibly incorrect) attempt at labelling what's happening: ...
; Function prologue (save stack pointer and reserve space)
push rbp
mov rbp, rsp
push rbx
sub rsp, 24
; This sets the internal ptr to the (later) heap allocated data to 4 (The pointer is set to the alignment of the type initially if the vector is empty.)
mov qword ptr [rbp - 32], 4
; And this sets the both the capacity and length to 0 using vector instructions.
xorps xmm0, xmm0
movups xmmword ptr [rbp - 24], xmm0
; Load pointer to the RawVec to pass to the double function (I think)
lea rdi, [rbp - 32]
; Call to capacity doubling function.
call <alloc::raw_vec::RawVec<T, A>>::double
; Load ptr to heap memory into %rdi
mov rdi, qword ptr [rbp - 32]
; Load length into %rax
mov rax, qword ptr [rbp - 16]
; Set first element to 0 (I suppose it could be possible to avoid doing the 4*rax bit here since the length should always be 0 at this point.)
mov dword ptr [rdi + 4*rax], 0
; Increment length
inc rax
; Store the length back to the length field in the vector instance
mov qword ptr [rbp - 16], rax
; Jump to bounds check panic if incrementing length wraps to zero(I think)
je .LBB3_2
; Load value of the first element into %ebx
mov ebx, dword ptr [rdi]
; Load the capacity into %rsi
mov rsi, qword ptr [rbp - 24]
; Check if capacity == 0
test rsi, rsi
; Jump to end and skip deallocation if the capacity of the vector was 0.
je .LBB3_6
; Set up arguments to rust_dealloc (I think)
shl rsi, 2
mov edx, 4
; Deallocate vector memory
call __rust_dealloc@PLT
.LBB3_6:
; Load value from %eax into return register
mov eax, ebx
; Function epilogue
add rsp, 24
pop rbx
pop rbp
ret
...
|
When using example::foo:
push rbp
mov rbp, rsp
mov eax, 4
pop rbp
ret is way better than the C++ version foo(): # @foo()
push rax
mov edi, 4
;; -------------------------------------
;; what's the point of this memory allocation?
call operator new(unsigned long)
mov rdi, rax
call operator delete(void*)
;; -------------------------------------
mov eax, 314
pop rcx
ret Ideally the Rust versions with and without |
By altering Vec::push to: let len = self.len;
if len == self.buf.cap() {
self.buf.double();
}
unsafe {
let end = self.as_mut_ptr().offset(len as isize);
ptr::write(end, value);
self.len = len + 1;
} I got rid of the bounds check. I then tried to remove #[inline(never)] from RawVec::double(), and lo and behold, the example with 0 resolved to this: xorl %eax, %eax
retq
Using other (non-0) values in push seem to work fine too. I don't know whether that could have some negative in other cases though. |
Is |
Yeah, I didn't remove that. |
So... two questions:
|
Double is simply doubling. |
See #23670, reducing the size of |
Seems there is a bit of a tradeoff here. |
I don't think the benchmark provided is silly. It shows that something wrong is going on with It would be nice to know what happens in that benchmark w/o EDIT: maybe cold is not enough and we need to annotate the call to double as |
Extend doesn't use push anymore, so that benchmark isn't all that useful for current rust. EDIT: If I use push manually, the non-inlined version is significantly faster: #[bench]
fn x(b: &mut Bencher) {
let mut v = Vec::with_capacity(100);
b.iter(|| {
for n in 0..100 {
v.push(n);
}
v.truncate(0);
}
);
}
|
Actually, completely optimizing away the allocation would technically not be correct, since that would strictly speaking be changing the behaviour of the function, as the allocation could theoretically fail and cause a panic/exception. So in other words, the C++ version having a memory allocation call is correct. It might still be possible to improve the Rust implementation optimize down to something similar to the C++ one though. |
The first change in my previous comment makes this benchmark significantly faster (I got about 120-130 ns, compared to around 200 for the current version.) Though, it increases the code size a small bit, so for functions where nothing is known about the length of the vector it might cause a slight performance increase. I'm tinkering around to see if I can reproduce it with just using the assume intrinsic, I managed to get rid of the bounds check in the original example with that at least without any code size increase.. |
There is an |
I managed to avoid the code size increase,
|
@oyvindln how is getting rid of a potential panic "changing the behavior"? I mean, if the optimized vector doesn't fail with OOM when the unoptimized one would, is that really bad? Surely it doesn't affect algorithmic correctness… Memory allocation, OOM conditions, etc. are mostly implementation details (and consequently very chaotic on modern OSes) anyway. If someone is trying to test OOM by allocating and destroying single-element vectors, s/he is better off calling |
@H2CO3 Ah sorry, I worded myself badly. I don't see a problem with optimizing out the allocations in practice (and normally that's what you would want), I was just pondering why the allocations weren't removed in the C++ version. I tried various combinations of using the This was one compromise I came up with: pub fn push(&mut self, value: T) {
unsafe {
// This will panic or abort if we would allocate > isize::MAX bytes
// or if the length increment would overflow for zero-sized types.
if unlikely(self.len == self.buf.cap()) {
let len = self.len;
self.buf.double();
// Let llvm know that the length didn't change
// so if we push to a new vector of length 0,
// we don't have to calculate the offset from
// the start of the array.
// This might be worth revisiting once #31681 is solved.
// See also #44452.
assume(self.len() == len);
}
let end = self.as_mut_ptr().offset(self.len as isize);
// Copy len to a local before writing `value` to the vector. This seems to enable
// llvm to merge the length and loop counter when writing to a fresh vector.
let len = self.len;
ptr::write(end, value);
// Let llvm know that the length doesn't change in the write call.
assume(self.len() == len);
self.len += 1;
// Let llvm know that len didn't overflow.
// We can safely assume that there was no overflow,
// since if len == usize::MAX, growing the vector would fail.
// We already made the assumption that the invariant len <= capacity
// holds when checked if we should grow the vector.
//
// Letting llvm know that there was no overflow can help llvm
// avoid bounds checks after a push in some situations.
assume(self.len() != 0);
}
} It does result in one extra instruction for a lone raw push call though. |
@oyvindln Oh, that does make sense. Sorry, I thought you were writing in favor of not optimizing away the allocation. |
this is what I had in mind, but I expected the code to look like this: if unlikely(full()) {
grow();
} and |
I know that Would dropping |
@Techcable MIR inlining is not currently enabled by default, and LLVM does inline |
Thanks for the clarification @rkruppe, you'd think the LLVM docs would be right! Do you think using the |
On nightly the original code: pub fn foo() -> u32 {
let mut v: Vec<u32> = Vec::new();
v.push(0);
v[0]
} now generates this: example::foo:
xor eax, eax
ret |
Nice! A second push still makes it trip: https://godbolt.org/g/TZ1Mbo =/ clang appears to handle a couple of |
No idea if it'll help, but this sounds like the optimizer limitation that this C++ lightning talk is about: https://www.youtube.com/watch?v=s4wnuiCwTGU Dunno why Rust would fare worse at this. |
It doesn't anymore, it has regressed from 1.42 to 1.43 Edit: Currently O2 results in the expected assembly, O3 in a large mess. |
It looks like going from 1.49.0 to 1.15.0 we now will generate the expected optimal code for any number of pushes when sufficient capacity is provided using |
Compiling the following C++ snippet with
clang++ -O3
andg++ -O3
(see here):generates this assembly on x86_64:
(note: clang generates perfect assembly even with multiple push backs, the only thing that seems to trip it is a reallocation)
This snippet compiled with
rustc --C opt-level=3
(see here):generates the following assembly:
I've tried adding
-lto
and-C panic=abort
to rustc without much luck. I've also tried replacing[0]
withunsafe { *v.get_unchecked(0) }
without any luck. The only thing that makes it generate good assembly is usingVec::with_capacity(N)
(see here):generates
The text was updated successfully, but these errors were encountered: