From d4051178976aa0527ede20e6c053f72b3d4dc20c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Steinbrink?= Date: Wed, 17 Jul 2013 21:59:58 +0200 Subject: [PATCH 1/4] Avoid creating llenv blocks when there's nothing to load Currently, all closures have an llenv block to load values from the captured environment, but for closure that don't actually capture anything, that block is useless and can be skipped. --- src/librustc/middle/trans/closure.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/librustc/middle/trans/closure.rs b/src/librustc/middle/trans/closure.rs index 5e086f94f2867..7507f19ec2009 100644 --- a/src/librustc/middle/trans/closure.rs +++ b/src/librustc/middle/trans/closure.rs @@ -326,6 +326,11 @@ pub fn load_environment(fcx: fn_ctxt, sigil: ast::Sigil) { let _icx = push_ctxt("closure::load_environment"); + // Don't bother to create the block if there's nothing to load + if cap_vars.len() == 0 && !load_ret_handle { + return; + } + let llloadenv = match fcx.llloadenv { Some(ll) => ll, None => { From 3cccdbd9ccccd8768caea7cdf03e88a4f26ea301 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Steinbrink?= Date: Thu, 18 Jul 2013 20:45:01 +0200 Subject: [PATCH 2/4] Remove an unnecessary block/jump from the drop glue for @-pointer The nested with_cond calls each introduce a "next" block, with the inner one just jumping to the outer one. --- src/librustc/middle/trans/glue.rs | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/src/librustc/middle/trans/glue.rs b/src/librustc/middle/trans/glue.rs index ffe6d22d5814f..4a2072364e94f 100644 --- a/src/librustc/middle/trans/glue.rs +++ b/src/librustc/middle/trans/glue.rs @@ -546,18 +546,23 @@ pub fn decr_refcnt_maybe_free(bcx: block, box_ptr: ValueRef, let _icx = push_ctxt("decr_refcnt_maybe_free"); let ccx = bcx.ccx(); - do with_cond(bcx, IsNotNull(bcx, box_ptr)) |bcx| { - let rc_ptr = GEPi(bcx, box_ptr, [0u, abi::box_field_refcnt]); - let rc = Sub(bcx, Load(bcx, rc_ptr), C_int(ccx, 1)); - Store(bcx, rc, rc_ptr); - let zero_test = ICmp(bcx, lib::llvm::IntEQ, C_int(ccx, 0), rc); - do with_cond(bcx, zero_test) |bcx| { - match box_ptr_ptr { - Some(p) => free_ty(bcx, p, t), - None => free_ty_immediate(bcx, box_ptr, t) - } - } - } + let decr_bcx = sub_block(bcx, "decr"); + let free_bcx = sub_block(decr_bcx, "free"); + let next_bcx = sub_block(bcx, "next"); + CondBr(bcx, IsNotNull(bcx, box_ptr), decr_bcx.llbb, next_bcx.llbb); + + let rc_ptr = GEPi(decr_bcx, box_ptr, [0u, abi::box_field_refcnt]); + let rc = Sub(decr_bcx, Load(decr_bcx, rc_ptr), C_int(ccx, 1)); + Store(decr_bcx, rc, rc_ptr); + CondBr(decr_bcx, IsNull(decr_bcx, rc), free_bcx.llbb, next_bcx.llbb); + + let free_bcx = match box_ptr_ptr { + Some(p) => free_ty(free_bcx, p, t), + None => free_ty_immediate(free_bcx, box_ptr, t) + }; + Br(free_bcx, next_bcx.llbb); + + next_bcx } From 565a9bf20b726c5182a6c4c816c1d9a4108c778a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Steinbrink?= Date: Sun, 21 Jul 2013 15:33:40 +0200 Subject: [PATCH 3/4] Provide lower level access to the LLVM IR builder Currently, the helper functions in the "build" module can only append at the end of a block. For certain things we'll want to be able to insert code at arbitrary locations inside a block though. Although can we do that by directly calling the LLVM functions, that is rather ugly and means that somethings need to be implemented twice. Once in terms of the helper functions and once in terms of low level LLVM functions. Instead of doing that, we should provide a Builder type that provides low level access to the builder, and which can be used by both, the helper functions in the "build" module, as well larger units of abstractions that combine several LLVM instructions. --- src/librustc/middle/trans/base.rs | 1 + src/librustc/middle/trans/build.rs | 759 +++++--------------- src/librustc/middle/trans/builder.rs | 947 +++++++++++++++++++++++++ src/librustc/middle/trans/context.rs | 5 + src/librustc/middle/trans/debuginfo.rs | 4 +- src/librustc/middle/trans/mod.rs | 1 + 6 files changed, 1149 insertions(+), 568 deletions(-) create mode 100644 src/librustc/middle/trans/builder.rs diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index 45a44e7a8a7ef..5536fa6daa73a 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -41,6 +41,7 @@ use middle::trans::_match; use middle::trans::adt; use middle::trans::base; use middle::trans::build::*; +use middle::trans::builder::noname; use middle::trans::callee; use middle::trans::common::*; use middle::trans::consts; diff --git a/src/librustc/middle/trans/build.rs b/src/librustc/middle/trans/build.rs index 26fce42f8e35e..7861f658f53e8 100644 --- a/src/librustc/middle/trans/build.rs +++ b/src/librustc/middle/trans/build.rs @@ -11,21 +11,17 @@ use lib::llvm::llvm; use lib::llvm::{CallConv, AtomicBinOp, AtomicOrdering, AsmDialect}; -use lib::llvm::{Opcode, IntPredicate, RealPredicate, False}; -use lib::llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; +use lib::llvm::{Opcode, IntPredicate, RealPredicate}; +use lib::llvm::{ValueRef, BasicBlockRef}; use lib; use middle::trans::common::*; -use middle::trans::machine::llalign_of_min; use syntax::codemap::span; -use middle::trans::base; +use middle::trans::builder::Builder; use middle::trans::type_::Type; use std::cast; use std::libc::{c_uint, c_ulonglong, c_char}; -use std::hashmap::HashMap; -use std::str; -use std::vec; pub fn terminate(cx: block, _: &str) { cx.terminated = true; @@ -37,56 +33,12 @@ pub fn check_not_terminated(cx: block) { } } -pub fn B(cx: block) -> BuilderRef { - unsafe { - let b = cx.fcx.ccx.builder.B; - llvm::LLVMPositionBuilderAtEnd(b, cx.llbb); - return b; - } -} - -pub fn count_insn(cx: block, category: &str) { - if cx.ccx().sess.trans_stats() { - cx.ccx().stats.n_llvm_insns += 1; - } - do base::with_insn_ctxt |v| { - let h = &mut cx.ccx().stats.llvm_insns; - - // Build version of path with cycles removed. - - // Pass 1: scan table mapping str -> rightmost pos. - let mut mm = HashMap::new(); - let len = v.len(); - let mut i = 0u; - while i < len { - mm.insert(v[i], i); - i += 1u; - } - - // Pass 2: concat strings for each elt, skipping - // forwards over any cycles by advancing to rightmost - // occurrence of each element in path. - let mut s = ~"."; - i = 0u; - while i < len { - i = *mm.get(&v[i]); - s.push_char('/'); - s.push_str(v[i]); - i += 1u; - } - - s.push_char('/'); - s.push_str(category); - - let n = match h.find(&s) { - Some(&n) => n, - _ => 0u - }; - h.insert(s, n+1u); - } +pub fn B(cx: block) -> Builder { + let b = cx.fcx.ccx.builder(); + b.position_at_end(cx.llbb); + b } - // The difference between a block being unreachable and being terminated is // somewhat obscure, and has to do with error checking. When a block is // terminated, we're saying that trying to add any further statements in the @@ -96,64 +48,47 @@ pub fn count_insn(cx: block, category: &str) { // further instructions to the block should simply be ignored. pub fn RetVoid(cx: block) { - unsafe { - if cx.unreachable { return; } - check_not_terminated(cx); - terminate(cx, "RetVoid"); - count_insn(cx, "retvoid"); - llvm::LLVMBuildRetVoid(B(cx)); - } + if cx.unreachable { return; } + check_not_terminated(cx); + terminate(cx, "RetVoid"); + B(cx).ret_void(); } pub fn Ret(cx: block, V: ValueRef) { - unsafe { - if cx.unreachable { return; } - check_not_terminated(cx); - terminate(cx, "Ret"); - count_insn(cx, "ret"); - llvm::LLVMBuildRet(B(cx), V); - } + if cx.unreachable { return; } + check_not_terminated(cx); + terminate(cx, "Ret"); + B(cx).ret(V); } pub fn AggregateRet(cx: block, RetVals: &[ValueRef]) { if cx.unreachable { return; } check_not_terminated(cx); terminate(cx, "AggregateRet"); - unsafe { - llvm::LLVMBuildAggregateRet(B(cx), vec::raw::to_ptr(RetVals), - RetVals.len() as c_uint); - } + B(cx).aggregate_ret(RetVals); } pub fn Br(cx: block, Dest: BasicBlockRef) { - unsafe { - if cx.unreachable { return; } - check_not_terminated(cx); - terminate(cx, "Br"); - count_insn(cx, "br"); - llvm::LLVMBuildBr(B(cx), Dest); - } + if cx.unreachable { return; } + check_not_terminated(cx); + terminate(cx, "Br"); + B(cx).br(Dest); } pub fn CondBr(cx: block, If: ValueRef, Then: BasicBlockRef, Else: BasicBlockRef) { - unsafe { - if cx.unreachable { return; } - check_not_terminated(cx); - terminate(cx, "CondBr"); - count_insn(cx, "condbr"); - llvm::LLVMBuildCondBr(B(cx), If, Then, Else); - } + if cx.unreachable { return; } + check_not_terminated(cx); + terminate(cx, "CondBr"); + B(cx).cond_br(If, Then, Else); } pub fn Switch(cx: block, V: ValueRef, Else: BasicBlockRef, NumCases: uint) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(V); } - check_not_terminated(cx); - terminate(cx, "Switch"); - return llvm::LLVMBuildSwitch(B(cx), V, Else, NumCases as c_uint); - } + if cx.unreachable { return _Undef(V); } + check_not_terminated(cx); + terminate(cx, "Switch"); + B(cx).switch(V, Else, NumCases) } pub fn AddCase(S: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef) { @@ -164,22 +99,10 @@ pub fn AddCase(S: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef) { } pub fn IndirectBr(cx: block, Addr: ValueRef, NumDests: uint) { - unsafe { - if cx.unreachable { return; } - check_not_terminated(cx); - terminate(cx, "IndirectBr"); - count_insn(cx, "indirectbr"); - llvm::LLVMBuildIndirectBr(B(cx), Addr, NumDests as c_uint); - } -} - -// This is a really awful way to get a zero-length c-string, but better (and a -// lot more efficient) than doing str::as_c_str("", ...) every time. -pub fn noname() -> *c_char { - unsafe { - static cnull: uint = 0u; - return cast::transmute(&cnull); - } + if cx.unreachable { return; } + check_not_terminated(cx); + terminate(cx, "IndirectBr"); + B(cx).indirect_br(Addr, NumDests); } pub fn Invoke(cx: block, @@ -196,16 +119,7 @@ pub fn Invoke(cx: block, debug!("Invoke(%s with arguments (%s))", cx.val_to_str(Fn), Args.map(|a| cx.val_to_str(*a)).connect(", ")); - unsafe { - count_insn(cx, "invoke"); - llvm::LLVMBuildInvoke(B(cx), - Fn, - vec::raw::to_ptr(Args), - Args.len() as c_uint, - Then, - Catch, - noname()) - } + B(cx).invoke(Fn, Args, Then, Catch) } pub fn FastInvoke(cx: block, Fn: ValueRef, Args: &[ValueRef], @@ -213,23 +127,14 @@ pub fn FastInvoke(cx: block, Fn: ValueRef, Args: &[ValueRef], if cx.unreachable { return; } check_not_terminated(cx); terminate(cx, "FastInvoke"); - unsafe { - count_insn(cx, "fastinvoke"); - let v = llvm::LLVMBuildInvoke(B(cx), Fn, vec::raw::to_ptr(Args), - Args.len() as c_uint, - Then, Catch, noname()); - lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv); - } + B(cx).fast_invoke(Fn, Args, Then, Catch); } pub fn Unreachable(cx: block) { - unsafe { - if cx.unreachable { return; } - cx.unreachable = true; - if !cx.terminated { - count_insn(cx, "unreachable"); - llvm::LLVMBuildUnreachable(B(cx)); - } + if cx.unreachable { return; } + cx.unreachable = true; + if !cx.terminated { + B(cx).unreachable(); } } @@ -241,298 +146,192 @@ pub fn _Undef(val: ValueRef) -> ValueRef { /* Arithmetic */ pub fn Add(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "add"); - return llvm::LLVMBuildAdd(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).add(LHS, RHS) } pub fn NSWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "nswadd"); - return llvm::LLVMBuildNSWAdd(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).nswadd(LHS, RHS) } pub fn NUWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "nuwadd"); - return llvm::LLVMBuildNUWAdd(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).nuwadd(LHS, RHS) } pub fn FAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "fadd"); - return llvm::LLVMBuildFAdd(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).fadd(LHS, RHS) } pub fn Sub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "sub"); - return llvm::LLVMBuildSub(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).sub(LHS, RHS) } pub fn NSWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "nwsub"); - return llvm::LLVMBuildNSWSub(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).nswsub(LHS, RHS) } pub fn NUWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "nuwsub"); - return llvm::LLVMBuildNUWSub(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).nuwsub(LHS, RHS) } pub fn FSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "sub"); - return llvm::LLVMBuildFSub(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).fsub(LHS, RHS) } pub fn Mul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "mul"); - return llvm::LLVMBuildMul(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).mul(LHS, RHS) } pub fn NSWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "nswmul"); - return llvm::LLVMBuildNSWMul(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).nswmul(LHS, RHS) } pub fn NUWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "nuwmul"); - return llvm::LLVMBuildNUWMul(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).nuwmul(LHS, RHS) } pub fn FMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "fmul"); - return llvm::LLVMBuildFMul(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).fmul(LHS, RHS) } pub fn UDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "udiv"); - return llvm::LLVMBuildUDiv(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).udiv(LHS, RHS) } pub fn SDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "sdiv"); - return llvm::LLVMBuildSDiv(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).sdiv(LHS, RHS) } pub fn ExactSDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "extractsdiv"); - return llvm::LLVMBuildExactSDiv(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).exactsdiv(LHS, RHS) } pub fn FDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "fdiv"); - return llvm::LLVMBuildFDiv(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).fdiv(LHS, RHS) } pub fn URem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "urem"); - return llvm::LLVMBuildURem(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).urem(LHS, RHS) } pub fn SRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "srem"); - return llvm::LLVMBuildSRem(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).srem(LHS, RHS) } pub fn FRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "frem"); - return llvm::LLVMBuildFRem(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).frem(LHS, RHS) } pub fn Shl(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "shl"); - return llvm::LLVMBuildShl(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).shl(LHS, RHS) } pub fn LShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "lshr"); - return llvm::LLVMBuildLShr(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).lshr(LHS, RHS) } pub fn AShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "ashr"); - return llvm::LLVMBuildAShr(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).ashr(LHS, RHS) } pub fn And(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "and"); - return llvm::LLVMBuildAnd(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).and(LHS, RHS) } pub fn Or(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "or"); - return llvm::LLVMBuildOr(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).or(LHS, RHS) } pub fn Xor(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "xor"); - return llvm::LLVMBuildXor(B(cx), LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).xor(LHS, RHS) } pub fn BinOp(cx: block, Op: Opcode, LHS: ValueRef, RHS: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(LHS); } - count_insn(cx, "binop"); - return llvm::LLVMBuildBinOp(B(cx), Op, LHS, RHS, noname()); - } + if cx.unreachable { return _Undef(LHS); } + B(cx).binop(Op, LHS, RHS) } pub fn Neg(cx: block, V: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(V); } - count_insn(cx, "neg"); - return llvm::LLVMBuildNeg(B(cx), V, noname()); - } + if cx.unreachable { return _Undef(V); } + B(cx).neg(V) } pub fn NSWNeg(cx: block, V: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(V); } - count_insn(cx, "nswneg"); - return llvm::LLVMBuildNSWNeg(B(cx), V, noname()); - } + if cx.unreachable { return _Undef(V); } + B(cx).nswneg(V) } pub fn NUWNeg(cx: block, V: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(V); } - count_insn(cx, "nuwneg"); - return llvm::LLVMBuildNUWNeg(B(cx), V, noname()); - } + if cx.unreachable { return _Undef(V); } + B(cx).nuwneg(V) } pub fn FNeg(cx: block, V: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(V); } - count_insn(cx, "fneg"); - return llvm::LLVMBuildFNeg(B(cx), V, noname()); - } + if cx.unreachable { return _Undef(V); } + B(cx).fneg(V) } pub fn Not(cx: block, V: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable { return _Undef(V); } - count_insn(cx, "not"); - return llvm::LLVMBuildNot(B(cx), V, noname()); - } + if cx.unreachable { return _Undef(V); } + B(cx).not(V) } /* Memory */ pub fn Malloc(cx: block, Ty: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i8p().to_ref()); } - count_insn(cx, "malloc"); - return llvm::LLVMBuildMalloc(B(cx), Ty.to_ref(), noname()); + B(cx).malloc(Ty) } } pub fn ArrayMalloc(cx: block, Ty: Type, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i8p().to_ref()); } - count_insn(cx, "arraymalloc"); - return llvm::LLVMBuildArrayMalloc(B(cx), Ty.to_ref(), Val, noname()); + B(cx).array_malloc(Ty, Val) } } pub fn Alloca(cx: block, Ty: Type, name: &str) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty.ptr_to().to_ref()); } - count_insn(cx, "alloca"); - if name.is_empty() { - llvm::LLVMBuildAlloca(B(cx), Ty.to_ref(), noname()) - } else { - str::as_c_str( - name, - |c| llvm::LLVMBuildAlloca(B(cx), Ty.to_ref(), c)) - } + B(cx).alloca(Ty, name) } } pub fn ArrayAlloca(cx: block, Ty: Type, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty.ptr_to().to_ref()); } - count_insn(cx, "arrayalloca"); - return llvm::LLVMBuildArrayAlloca(B(cx), Ty.to_ref(), Val, noname()); + B(cx).array_alloca(Ty, Val) } } pub fn Free(cx: block, PointerVal: ValueRef) { - unsafe { - if cx.unreachable { return; } - count_insn(cx, "free"); - llvm::LLVMBuildFree(B(cx), PointerVal); - } + if cx.unreachable { return; } + B(cx).free(PointerVal) } pub fn Load(cx: block, PointerVal: ValueRef) -> ValueRef { @@ -547,8 +346,7 @@ pub fn Load(cx: block, PointerVal: ValueRef) -> ValueRef { }; return llvm::LLVMGetUndef(eltty.to_ref()); } - count_insn(cx, "load"); - return llvm::LLVMBuildLoad(B(cx), PointerVal, noname()); + B(cx).load(PointerVal) } } @@ -558,63 +356,43 @@ pub fn AtomicLoad(cx: block, PointerVal: ValueRef, order: AtomicOrdering) -> Val if cx.unreachable { return llvm::LLVMGetUndef(ccx.int_type.to_ref()); } - count_insn(cx, "load.atomic"); - let align = llalign_of_min(ccx, ccx.int_type); - return llvm::LLVMBuildAtomicLoad(B(cx), PointerVal, noname(), order, align as c_uint); + B(cx).atomic_load(PointerVal, order) } } pub fn LoadRangeAssert(cx: block, PointerVal: ValueRef, lo: c_ulonglong, hi: c_ulonglong, signed: lib::llvm::Bool) -> ValueRef { - let value = Load(cx, PointerVal); - - if !cx.unreachable { + if cx.unreachable { + let ccx = cx.fcx.ccx; + let ty = val_ty(PointerVal); + let eltty = if ty.kind() == lib::llvm::Array { + ty.element_type() + } else { + ccx.int_type + }; unsafe { - let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(PointerVal)); - let min = llvm::LLVMConstInt(t, lo, signed); - let max = llvm::LLVMConstInt(t, hi, signed); - - do [min, max].as_imm_buf |ptr, len| { - llvm::LLVMSetMetadata(value, lib::llvm::MD_range as c_uint, - llvm::LLVMMDNodeInContext(cx.fcx.ccx.llcx, - ptr, len as c_uint)); - } + llvm::LLVMGetUndef(eltty.to_ref()) } + } else { + B(cx).load_range_assert(PointerVal, lo, hi, signed) } - - value } pub fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) { - unsafe { - if cx.unreachable { return; } - debug!("Store %s -> %s", - cx.val_to_str(Val), - cx.val_to_str(Ptr)); - count_insn(cx, "store"); - llvm::LLVMBuildStore(B(cx), Val, Ptr); - } + if cx.unreachable { return; } + B(cx).store(Val, Ptr) } pub fn AtomicStore(cx: block, Val: ValueRef, Ptr: ValueRef, order: AtomicOrdering) { - unsafe { - if cx.unreachable { return; } - debug!("Store %s -> %s", - cx.val_to_str(Val), - cx.val_to_str(Ptr)); - count_insn(cx, "store.atomic"); - let align = llalign_of_min(cx.ccx(), cx.ccx().int_type); - llvm::LLVMBuildAtomicStore(B(cx), Val, Ptr, order, align as c_uint); - } + if cx.unreachable { return; } + B(cx).atomic_store(Val, Ptr, order) } pub fn GEP(cx: block, Pointer: ValueRef, Indices: &[ValueRef]) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref()); } - count_insn(cx, "gep"); - return llvm::LLVMBuildGEP(B(cx), Pointer, vec::raw::to_ptr(Indices), - Indices.len() as c_uint, noname()); + B(cx).gep(Pointer, Indices) } } @@ -622,54 +400,37 @@ pub fn GEP(cx: block, Pointer: ValueRef, Indices: &[ValueRef]) -> ValueRef { // in C_i32() #[inline] pub fn GEPi(cx: block, base: ValueRef, ixs: &[uint]) -> ValueRef { - // Small vector optimization. This should catch 100% of the cases that - // we care about. - if ixs.len() < 16 { - let mut small_vec = [ C_i32(0), ..16 ]; - for small_vec.mut_iter().zip(ixs.iter()).advance |(small_vec_e, &ix)| { - *small_vec_e = C_i32(ix as i32); - } - InBoundsGEP(cx, base, small_vec.slice(0, ixs.len())) - } else { - let v = do ixs.iter().transform |i| { C_i32(*i as i32) }.collect::<~[ValueRef]>(); - count_insn(cx, "gepi"); - InBoundsGEP(cx, base, v) + unsafe { + if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref()); } + B(cx).gepi(base, ixs) } } pub fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: &[ValueRef]) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref()); } - count_insn(cx, "inboundsgep"); - return llvm::LLVMBuildInBoundsGEP( - B(cx), Pointer, vec::raw::to_ptr(Indices), Indices.len() as c_uint, noname()); + B(cx).inbounds_gep(Pointer, Indices) } } pub fn StructGEP(cx: block, Pointer: ValueRef, Idx: uint) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref()); } - count_insn(cx, "structgep"); - return llvm::LLVMBuildStructGEP(B(cx), - Pointer, - Idx as c_uint, - noname()); + B(cx).struct_gep(Pointer, Idx) } } pub fn GlobalString(cx: block, _Str: *c_char) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i8p().to_ref()); } - count_insn(cx, "globalstring"); - return llvm::LLVMBuildGlobalString(B(cx), _Str, noname()); + B(cx).global_string(_Str) } } pub fn GlobalStringPtr(cx: block, _Str: *c_char) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i8p().to_ref()); } - count_insn(cx, "globalstringptr"); - return llvm::LLVMBuildGlobalStringPtr(B(cx), _Str, noname()); + B(cx).global_string_ptr(_Str) } } @@ -677,153 +438,134 @@ pub fn GlobalStringPtr(cx: block, _Str: *c_char) -> ValueRef { pub fn Trunc(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "trunc"); - return llvm::LLVMBuildTrunc(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).trunc(Val, DestTy) } } pub fn ZExt(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "zext"); - return llvm::LLVMBuildZExt(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).zext(Val, DestTy) } } pub fn SExt(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "sext"); - return llvm::LLVMBuildSExt(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).sext(Val, DestTy) } } pub fn FPToUI(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "fptoui"); - return llvm::LLVMBuildFPToUI(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).fptoui(Val, DestTy) } } pub fn FPToSI(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "fptosi"); - return llvm::LLVMBuildFPToSI(B(cx), Val, DestTy.to_ref(),noname()); + B(cx).fptosi(Val, DestTy) } } pub fn UIToFP(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "uitofp"); - return llvm::LLVMBuildUIToFP(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).uitofp(Val, DestTy) } } pub fn SIToFP(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "sitofp"); - return llvm::LLVMBuildSIToFP(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).sitofp(Val, DestTy) } } pub fn FPTrunc(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "fptrunc"); - return llvm::LLVMBuildFPTrunc(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).fptrunc(Val, DestTy) } } pub fn FPExt(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "fpext"); - return llvm::LLVMBuildFPExt(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).fpext(Val, DestTy) } } pub fn PtrToInt(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "ptrtoint"); - return llvm::LLVMBuildPtrToInt(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).ptrtoint(Val, DestTy) } } pub fn IntToPtr(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "inttoptr"); - return llvm::LLVMBuildIntToPtr(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).inttoptr(Val, DestTy) } } pub fn BitCast(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "bitcast"); - return llvm::LLVMBuildBitCast(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).bitcast(Val, DestTy) } } pub fn ZExtOrBitCast(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "zextorbitcast"); - return llvm::LLVMBuildZExtOrBitCast(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).zext_or_bitcast(Val, DestTy) } } pub fn SExtOrBitCast(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "sextorbitcast"); - return llvm::LLVMBuildSExtOrBitCast(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).sext_or_bitcast(Val, DestTy) } } pub fn TruncOrBitCast(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "truncorbitcast"); - return llvm::LLVMBuildTruncOrBitCast(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).trunc_or_bitcast(Val, DestTy) } } pub fn Cast(cx: block, Op: Opcode, Val: ValueRef, DestTy: Type, _: *u8) -> ValueRef { unsafe { - count_insn(cx, "cast"); if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - return llvm::LLVMBuildCast(B(cx), Op, Val, DestTy.to_ref(), noname()); + B(cx).cast(Op, Val, DestTy) } } pub fn PointerCast(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "pointercast"); - return llvm::LLVMBuildPointerCast(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).pointercast(Val, DestTy) } } pub fn IntCast(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "intcast"); - return llvm::LLVMBuildIntCast(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).intcast(Val, DestTy) } } pub fn FPCast(cx: block, Val: ValueRef, DestTy: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy.to_ref()); } - count_insn(cx, "fpcast"); - return llvm::LLVMBuildFPCast(B(cx), Val, DestTy.to_ref(), noname()); + B(cx).fpcast(Val, DestTy) } } @@ -833,8 +575,7 @@ pub fn ICmp(cx: block, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i1().to_ref()); } - count_insn(cx, "icmp"); - return llvm::LLVMBuildICmp(B(cx), Op as c_uint, LHS, RHS, noname()); + B(cx).icmp(Op, LHS, RHS) } } @@ -842,8 +583,7 @@ pub fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i1().to_ref()); } - count_insn(cx, "fcmp"); - return llvm::LLVMBuildFCmp(B(cx), Op as c_uint, LHS, RHS, noname()); + B(cx).fcmp(Op, LHS, RHS) } } @@ -851,22 +591,14 @@ pub fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef) pub fn EmptyPhi(cx: block, Ty: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty.to_ref()); } - count_insn(cx, "emptyphi"); - return llvm::LLVMBuildPhi(B(cx), Ty.to_ref(), noname()); + B(cx).empty_phi(Ty) } } -pub fn Phi(cx: block, Ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) - -> ValueRef { +pub fn Phi(cx: block, Ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty.to_ref()); } - assert_eq!(vals.len(), bbs.len()); - let phi = EmptyPhi(cx, Ty); - count_insn(cx, "addincoming"); - llvm::LLVMAddIncoming(phi, vec::raw::to_ptr(vals), - vec::raw::to_ptr(bbs), - vals.len() as c_uint); - return phi; + B(cx).phi(Ty, vals, bbs) } } @@ -888,123 +620,58 @@ pub fn _UndefReturn(cx: block, Fn: ValueRef) -> ValueRef { } else { ccx.int_type }; - count_insn(cx, "ret_undef"); - return llvm::LLVMGetUndef(retty.to_ref()); + B(cx).count_insn("ret_undef"); + llvm::LLVMGetUndef(retty.to_ref()) } } -pub fn add_span_comment(bcx: block, sp: span, text: &str) { - let ccx = bcx.ccx(); - if ccx.sess.asm_comments() { - let s = fmt!("%s (%s)", text, ccx.sess.codemap.span_to_str(sp)); - debug!("%s", s); - add_comment(bcx, s); - } +pub fn add_span_comment(cx: block, sp: span, text: &str) { + B(cx).add_span_comment(sp, text) } -pub fn add_comment(bcx: block, text: &str) { - unsafe { - let ccx = bcx.ccx(); - if ccx.sess.asm_comments() { - let sanitized = text.replace("$", ""); - let comment_text = ~"# " + - sanitized.replace("\n", "\n\t# "); - count_insn(bcx, "inlineasm"); - let asm = do comment_text.as_c_str |c| { - llvm::LLVMConstInlineAsm(Type::func([], &Type::void()).to_ref(), - c, noname(), False, False) - }; - Call(bcx, asm, []); - } - } +pub fn add_comment(cx: block, text: &str) { + B(cx).add_comment(text) } pub fn InlineAsmCall(cx: block, asm: *c_char, cons: *c_char, inputs: &[ValueRef], output: Type, volatile: bool, alignstack: bool, dia: AsmDialect) -> ValueRef { - unsafe { - count_insn(cx, "inlineasm"); - - let volatile = if volatile { lib::llvm::True } - else { lib::llvm::False }; - let alignstack = if alignstack { lib::llvm::True } - else { lib::llvm::False }; - - let argtys = do inputs.map |v| { - debug!("Asm Input Type: %?", cx.val_to_str(*v)); - val_ty(*v) - }; - - debug!("Asm Output Type: %?", cx.ccx().tn.type_to_str(output)); - let fty = Type::func(argtys, &output); - let v = llvm::LLVMInlineAsm(fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint); - - Call(cx, v, inputs) - } + B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) } pub fn Call(cx: block, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef { if cx.unreachable { return _UndefReturn(cx, Fn); } - unsafe { - count_insn(cx, "call"); - - debug!("Call(Fn=%s, Args=%?)", - cx.val_to_str(Fn), - Args.map(|arg| cx.val_to_str(*arg))); - - do Args.as_imm_buf |ptr, len| { - llvm::LLVMBuildCall(B(cx), Fn, ptr, len as c_uint, noname()) - } - } + B(cx).call(Fn, Args) } pub fn FastCall(cx: block, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef { if cx.unreachable { return _UndefReturn(cx, Fn); } - unsafe { - count_insn(cx, "fastcall"); - let v = llvm::LLVMBuildCall(B(cx), Fn, vec::raw::to_ptr(Args), - Args.len() as c_uint, noname()); - lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv); - return v; - } + B(cx).call(Fn, Args) } pub fn CallWithConv(cx: block, Fn: ValueRef, Args: &[ValueRef], Conv: CallConv) -> ValueRef { if cx.unreachable { return _UndefReturn(cx, Fn); } - unsafe { - count_insn(cx, "callwithconv"); - let v = llvm::LLVMBuildCall(B(cx), Fn, vec::raw::to_ptr(Args), - Args.len() as c_uint, noname()); - lib::llvm::SetInstructionCallConv(v, Conv); - return v; - } + B(cx).call_with_conv(Fn, Args, Conv) } -pub fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) -> - ValueRef { - unsafe { - if cx.unreachable { return _Undef(Then); } - count_insn(cx, "select"); - return llvm::LLVMBuildSelect(B(cx), If, Then, Else, noname()); - } +pub fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) -> ValueRef { + if cx.unreachable { return _Undef(Then); } + B(cx).select(If, Then, Else) } pub fn VAArg(cx: block, list: ValueRef, Ty: Type) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty.to_ref()); } - count_insn(cx, "vaarg"); - return llvm::LLVMBuildVAArg(B(cx), list, Ty.to_ref(), noname()); + B(cx).va_arg(list, Ty) } } -pub fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) -> - ValueRef { +pub fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().to_ref()); } - count_insn(cx, "extractelement"); - return llvm::LLVMBuildExtractElement(B(cx), VecVal, Index, noname()); + B(cx).extract_element(VecVal, Index) } } @@ -1012,8 +679,7 @@ pub fn InsertElement(cx: block, VecVal: ValueRef, EltVal: ValueRef, Index: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().to_ref()); } - count_insn(cx, "insertelement"); - llvm::LLVMBuildInsertElement(B(cx), VecVal, EltVal, Index, noname()) + B(cx).insert_element(VecVal, EltVal, Index) } } @@ -1021,52 +687,40 @@ pub fn ShuffleVector(cx: block, V1: ValueRef, V2: ValueRef, Mask: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().to_ref()); } - count_insn(cx, "shufflevector"); - llvm::LLVMBuildShuffleVector(B(cx), V1, V2, Mask, noname()) + B(cx).shuffle_vector(V1, V2, Mask) } } pub fn VectorSplat(cx: block, NumElts: uint, EltVal: ValueRef) -> ValueRef { unsafe { - let elt_ty = val_ty(EltVal); - let Undef = llvm::LLVMGetUndef(Type::vector(&elt_ty, NumElts as u64).to_ref()); - let VecVal = InsertElement(cx, Undef, EltVal, C_i32(0)); - ShuffleVector(cx, VecVal, Undef, C_null(Type::vector(&Type::i32(), NumElts as u64))) + if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().to_ref()); } + B(cx).vector_splat(NumElts, EltVal) } } pub fn ExtractValue(cx: block, AggVal: ValueRef, Index: uint) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::nil().to_ref()); } - count_insn(cx, "extractvalue"); - return llvm::LLVMBuildExtractValue( - B(cx), AggVal, Index as c_uint, noname()); + B(cx).extract_value(AggVal, Index) } } -pub fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef, - Index: uint) { - unsafe { - if cx.unreachable { return; } - count_insn(cx, "insertvalue"); - llvm::LLVMBuildInsertValue(B(cx), AggVal, EltVal, Index as c_uint, - noname()); - } +pub fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef, Index: uint) { + if cx.unreachable { return; } + B(cx).insert_value(AggVal, EltVal, Index) } pub fn IsNull(cx: block, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i1().to_ref()); } - count_insn(cx, "isnull"); - return llvm::LLVMBuildIsNull(B(cx), Val, noname()); + B(cx).is_null(Val) } } pub fn IsNotNull(cx: block, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Type::i1().to_ref()); } - count_insn(cx, "isnotnull"); - return llvm::LLVMBuildIsNotNull(B(cx), Val, noname()); + B(cx).is_not_null(Val) } } @@ -1074,67 +728,40 @@ pub fn PtrDiff(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; if cx.unreachable { return llvm::LLVMGetUndef(ccx.int_type.to_ref()); } - count_insn(cx, "ptrdiff"); - return llvm::LLVMBuildPtrDiff(B(cx), LHS, RHS, noname()); + B(cx).ptrdiff(LHS, RHS) } } pub fn Trap(cx: block) { - unsafe { - if cx.unreachable { return; } - let b = B(cx); - let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(b); - let FN: ValueRef = llvm::LLVMGetBasicBlockParent(BB); - let M: ModuleRef = llvm::LLVMGetGlobalParent(FN); - let T: ValueRef = str::as_c_str("llvm.trap", |buf| { - llvm::LLVMGetNamedFunction(M, buf) - }); - assert!((T as int != 0)); - let Args: ~[ValueRef] = ~[]; - count_insn(cx, "trap"); - llvm::LLVMBuildCall(b, T, vec::raw::to_ptr(Args), Args.len() as c_uint, noname()); - } + if cx.unreachable { return; } + B(cx).trap(); } pub fn LandingPad(cx: block, Ty: Type, PersFn: ValueRef, NumClauses: uint) -> ValueRef { - unsafe { - check_not_terminated(cx); - assert!(!cx.unreachable); - count_insn(cx, "landingpad"); - return llvm::LLVMBuildLandingPad( - B(cx), Ty.to_ref(), PersFn, NumClauses as c_uint, noname()); - } + check_not_terminated(cx); + assert!(!cx.unreachable); + B(cx).landing_pad(Ty, PersFn, NumClauses) } pub fn SetCleanup(cx: block, LandingPad: ValueRef) { - unsafe { - count_insn(cx, "setcleanup"); - llvm::LLVMSetCleanup(LandingPad, lib::llvm::True); - } + B(cx).set_cleanup(LandingPad) } pub fn Resume(cx: block, Exn: ValueRef) -> ValueRef { - unsafe { - check_not_terminated(cx); - terminate(cx, "Resume"); - count_insn(cx, "resume"); - return llvm::LLVMBuildResume(B(cx), Exn); - } + check_not_terminated(cx); + terminate(cx, "Resume"); + B(cx).resume(Exn) } // Atomic Operations pub fn AtomicCmpXchg(cx: block, dst: ValueRef, cmp: ValueRef, src: ValueRef, order: AtomicOrdering) -> ValueRef { - unsafe { - llvm::LLVMBuildAtomicCmpXchg(B(cx), dst, cmp, src, order) - } + B(cx).atomic_cmpxchg(dst, cmp, src, order) } pub fn AtomicRMW(cx: block, op: AtomicBinOp, dst: ValueRef, src: ValueRef, order: AtomicOrdering) -> ValueRef { - unsafe { - llvm::LLVMBuildAtomicRMW(B(cx), op, dst, src, order) - } + B(cx).atomic_rmw(op, dst, src, order) } diff --git a/src/librustc/middle/trans/builder.rs b/src/librustc/middle/trans/builder.rs new file mode 100644 index 0000000000000..a4a976145b9a5 --- /dev/null +++ b/src/librustc/middle/trans/builder.rs @@ -0,0 +1,947 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use lib; +use lib::llvm::llvm; +use lib::llvm::{CallConv, AtomicBinOp, AtomicOrdering, AsmDialect}; +use lib::llvm::{Opcode, IntPredicate, RealPredicate, False}; +use lib::llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; +use middle::trans::base; +use middle::trans::common::*; +use middle::trans::machine::llalign_of_min; +use middle::trans::type_::Type; +use std::cast; +use std::hashmap::HashMap; +use std::libc::{c_uint, c_ulonglong, c_char}; +use std::str; +use std::vec; +use syntax::codemap::span; + +pub struct Builder { + llbuilder: BuilderRef, + ccx: @mut CrateContext, +} + +// This is a really awful way to get a zero-length c-string, but better (and a +// lot more efficient) than doing str::as_c_str("", ...) every time. +pub fn noname() -> *c_char { + unsafe { + static cnull: uint = 0u; + cast::transmute(&cnull) + } +} + +impl Builder { + pub fn new(ccx: @mut CrateContext) -> Builder { + Builder { + llbuilder: ccx.builder.B, + ccx: ccx, + } + } + + pub fn count_insn(&self, category: &str) { + if self.ccx.sess.trans_stats() { + self.ccx.stats.n_llvm_insns += 1; + } + if self.ccx.sess.count_llvm_insns() { + do base::with_insn_ctxt |v| { + let h = &mut self.ccx.stats.llvm_insns; + + // Build version of path with cycles removed. + + // Pass 1: scan table mapping str -> rightmost pos. + let mut mm = HashMap::new(); + let len = v.len(); + let mut i = 0u; + while i < len { + mm.insert(v[i], i); + i += 1u; + } + + // Pass 2: concat strings for each elt, skipping + // forwards over any cycles by advancing to rightmost + // occurrence of each element in path. + let mut s = ~"."; + i = 0u; + while i < len { + i = *mm.get(&v[i]); + s.push_char('/'); + s.push_str(v[i]); + i += 1u; + } + + s.push_char('/'); + s.push_str(category); + + let n = match h.find(&s) { + Some(&n) => n, + _ => 0u + }; + h.insert(s, n+1u); + } + } + } + + pub fn position_before(&self, insn: ValueRef) { + unsafe { + llvm::LLVMPositionBuilderBefore(self.llbuilder, insn); + } + } + + pub fn position_at_end(&self, llbb: BasicBlockRef) { + unsafe { + llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); + } + } + + pub fn ret_void(&self) { + self.count_insn("retvoid"); + unsafe { + llvm::LLVMBuildRetVoid(self.llbuilder); + } + } + + pub fn ret(&self, v: ValueRef) { + self.count_insn("ret"); + unsafe { + llvm::LLVMBuildRet(self.llbuilder, v); + } + } + + pub fn aggregate_ret(&self, ret_vals: &[ValueRef]) { + unsafe { + llvm::LLVMBuildAggregateRet(self.llbuilder, + vec::raw::to_ptr(ret_vals), + ret_vals.len() as c_uint); + } + } + + pub fn br(&self, dest: BasicBlockRef) { + self.count_insn("br"); + unsafe { + llvm::LLVMBuildBr(self.llbuilder, dest); + } + } + + pub fn cond_br(&self, cond: ValueRef, then_llbb: BasicBlockRef, else_llbb: BasicBlockRef) { + self.count_insn("condbr"); + unsafe { + llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); + } + } + + pub fn switch(&self, v: ValueRef, else_llbb: BasicBlockRef, num_cases: uint) -> ValueRef { + unsafe { + llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) + } + } + + pub fn indirect_br(&self, addr: ValueRef, num_dests: uint) { + self.count_insn("indirectbr"); + unsafe { + llvm::LLVMBuildIndirectBr(self.llbuilder, addr, num_dests as c_uint); + } + } + + pub fn invoke(&self, + llfn: ValueRef, + args: &[ValueRef], + then: BasicBlockRef, + catch: BasicBlockRef) + -> ValueRef { + self.count_insn("invoke"); + unsafe { + llvm::LLVMBuildInvoke(self.llbuilder, + llfn, + vec::raw::to_ptr(args), + args.len() as c_uint, + then, + catch, + noname()) + } + } + + pub fn fast_invoke(&self, + llfn: ValueRef, + args: &[ValueRef], + then: BasicBlockRef, + catch: BasicBlockRef) { + self.count_insn("fastinvoke"); + let v = self.invoke(llfn, args, then, catch); + lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv); + } + + pub fn unreachable(&self) { + self.count_insn("unreachable"); + unsafe { + llvm::LLVMBuildUnreachable(self.llbuilder); + } + } + + /* Arithmetic */ + pub fn add(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("add"); + unsafe { + llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn nswadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("nswadd"); + unsafe { + llvm::LLVMBuildNSWAdd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn nuwadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("nuwadd"); + unsafe { + llvm::LLVMBuildNUWAdd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fadd(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("fadd"); + unsafe { + llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn sub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("sub"); + unsafe { + llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn nswsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("nwsub"); + unsafe { + llvm::LLVMBuildNSWSub(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn nuwsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("nuwsub"); + unsafe { + llvm::LLVMBuildNUWSub(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fsub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("sub"); + unsafe { + llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn mul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("mul"); + unsafe { + llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn nswmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("nswmul"); + unsafe { + llvm::LLVMBuildNSWMul(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn nuwmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("nuwmul"); + unsafe { + llvm::LLVMBuildNUWMul(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fmul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("fmul"); + unsafe { + llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn udiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("udiv"); + unsafe { + llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn sdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("sdiv"); + unsafe { + llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn exactsdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("exactsdiv"); + unsafe { + llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fdiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("fdiv"); + unsafe { + llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn urem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("urem"); + unsafe { + llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn srem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("srem"); + unsafe { + llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn frem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("frem"); + unsafe { + llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn shl(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("shl"); + unsafe { + llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn lshr(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("lshr"); + unsafe { + llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn ashr(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("ashr"); + unsafe { + llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn and(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("and"); + unsafe { + llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn or(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("or"); + unsafe { + llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn xor(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("xor"); + unsafe { + llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn binop(&self, op: Opcode, lhs: ValueRef, rhs: ValueRef) + -> ValueRef { + self.count_insn("binop"); + unsafe { + llvm::LLVMBuildBinOp(self.llbuilder, op, lhs, rhs, noname()) + } + } + + pub fn neg(&self, V: ValueRef) -> ValueRef { + self.count_insn("neg"); + unsafe { + llvm::LLVMBuildNeg(self.llbuilder, V, noname()) + } + } + + pub fn nswneg(&self, V: ValueRef) -> ValueRef { + self.count_insn("nswneg"); + unsafe { + llvm::LLVMBuildNSWNeg(self.llbuilder, V, noname()) + } + } + + pub fn nuwneg(&self, V: ValueRef) -> ValueRef { + self.count_insn("nuwneg"); + unsafe { + llvm::LLVMBuildNUWNeg(self.llbuilder, V, noname()) + } + } + pub fn fneg(&self, V: ValueRef) -> ValueRef { + self.count_insn("fneg"); + unsafe { + llvm::LLVMBuildFNeg(self.llbuilder, V, noname()) + } + } + + pub fn not(&self, V: ValueRef) -> ValueRef { + self.count_insn("not"); + unsafe { + llvm::LLVMBuildNot(self.llbuilder, V, noname()) + } + } + + /* Memory */ + pub fn malloc(&self, ty: Type) -> ValueRef { + self.count_insn("malloc"); + unsafe { + llvm::LLVMBuildMalloc(self.llbuilder, ty.to_ref(), noname()) + } + } + + pub fn array_malloc(&self, ty: Type, val: ValueRef) -> ValueRef { + self.count_insn("arraymalloc"); + unsafe { + llvm::LLVMBuildArrayMalloc(self.llbuilder, ty.to_ref(), val, noname()) + } + } + + pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + self.count_insn("alloca"); + unsafe { + if name.is_empty() { + llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), noname()) + } else { + str::as_c_str( + name, + |c| llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), c)) + } + } + } + + pub fn array_alloca(&self, ty: Type, val: ValueRef) -> ValueRef { + self.count_insn("arrayalloca"); + unsafe { + llvm::LLVMBuildArrayAlloca(self.llbuilder, ty.to_ref(), val, noname()) + } + } + + pub fn free(&self, ptr: ValueRef) { + self.count_insn("free"); + unsafe { + llvm::LLVMBuildFree(self.llbuilder, ptr); + } + } + + pub fn load(&self, ptr: ValueRef) -> ValueRef { + self.count_insn("load"); + unsafe { + llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()) + } + } + + pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef { + self.count_insn("load.atomic"); + unsafe { + let align = llalign_of_min(self.ccx, self.ccx.int_type); + llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order, align as c_uint) + } + } + + + pub fn load_range_assert(&self, ptr: ValueRef, lo: c_ulonglong, + hi: c_ulonglong, signed: lib::llvm::Bool) -> ValueRef { + let value = self.load(ptr); + + unsafe { + let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr)); + let min = llvm::LLVMConstInt(t, lo, signed); + let max = llvm::LLVMConstInt(t, hi, signed); + + do [min, max].as_imm_buf |ptr, len| { + llvm::LLVMSetMetadata(value, lib::llvm::MD_range as c_uint, + llvm::LLVMMDNodeInContext(self.ccx.llcx, + ptr, len as c_uint)); + } + } + + value + } + + pub fn store(&self, val: ValueRef, ptr: ValueRef) { + debug!("Store %s -> %s", + self.ccx.tn.val_to_str(val), + self.ccx.tn.val_to_str(ptr)); + self.count_insn("store"); + unsafe { + llvm::LLVMBuildStore(self.llbuilder, val, ptr); + } + } + + pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { + debug!("Store %s -> %s", + self.ccx.tn.val_to_str(val), + self.ccx.tn.val_to_str(ptr)); + self.count_insn("store.atomic"); + let align = llalign_of_min(self.ccx, self.ccx.int_type); + unsafe { + llvm::LLVMBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint); + } + } + + pub fn gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef { + self.count_insn("gep"); + unsafe { + llvm::LLVMBuildGEP(self.llbuilder, ptr, vec::raw::to_ptr(indices), + indices.len() as c_uint, noname()) + } + } + + // Simple wrapper around GEP that takes an array of ints and wraps them + // in C_i32() + #[inline] + pub fn gepi(&self, base: ValueRef, ixs: &[uint]) -> ValueRef { + // Small vector optimization. This should catch 100% of the cases that + // we care about. + if ixs.len() < 16 { + let mut small_vec = [ C_i32(0), ..16 ]; + for small_vec.mut_iter().zip(ixs.iter()).advance |(small_vec_e, &ix)| { + *small_vec_e = C_i32(ix as i32); + } + self.inbounds_gep(base, small_vec.slice(0, ixs.len())) + } else { + let v = do ixs.iter().transform |i| { C_i32(*i as i32) }.collect::<~[ValueRef]>(); + self.count_insn("gepi"); + self.inbounds_gep(base, v) + } + } + + pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef { + self.count_insn("inboundsgep"); + unsafe { + llvm::LLVMBuildInBoundsGEP( + self.llbuilder, ptr, vec::raw::to_ptr(indices), indices.len() as c_uint, noname()) + } + } + + pub fn struct_gep(&self, ptr: ValueRef, idx: uint) -> ValueRef { + self.count_insn("structgep"); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } + + pub fn global_string(&self, _Str: *c_char) -> ValueRef { + self.count_insn("globalstring"); + unsafe { + llvm::LLVMBuildGlobalString(self.llbuilder, _Str, noname()) + } + } + + pub fn global_string_ptr(&self, _Str: *c_char) -> ValueRef { + self.count_insn("globalstringptr"); + unsafe { + llvm::LLVMBuildGlobalStringPtr(self.llbuilder, _Str, noname()) + } + } + + /* Casts */ + pub fn trunc(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("trunc"); + unsafe { + llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn zext(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn sext(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("sext"); + unsafe { + llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn fptoui(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("fptoui"); + unsafe { + llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn fptosi(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("fptosi"); + unsafe { + llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty.to_ref(),noname()) + } + } + + pub fn uitofp(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("uitofp"); + unsafe { + llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn sitofp(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("sitofp"); + unsafe { + llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn fptrunc(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("fptrunc"); + unsafe { + llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn fpext(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("fpext"); + unsafe { + llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn ptrtoint(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("ptrtoint"); + unsafe { + llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn inttoptr(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("inttoptr"); + unsafe { + llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("bitcast"); + unsafe { + llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn zext_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("zextorbitcast"); + unsafe { + llvm::LLVMBuildZExtOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn sext_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("sextorbitcast"); + unsafe { + llvm::LLVMBuildSExtOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn trunc_or_bitcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("truncorbitcast"); + unsafe { + llvm::LLVMBuildTruncOrBitCast(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn cast(&self, op: Opcode, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("cast"); + unsafe { + llvm::LLVMBuildCast(self.llbuilder, op, val, dest_ty.to_ref(), noname()) + } + } + + pub fn pointercast(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("pointercast"); + unsafe { + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn intcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("intcast"); + unsafe { + llvm::LLVMBuildIntCast(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + pub fn fpcast(&self, val: ValueRef, dest_ty: Type) -> ValueRef { + self.count_insn("fpcast"); + unsafe { + llvm::LLVMBuildFPCast(self.llbuilder, val, dest_ty.to_ref(), noname()) + } + } + + + /* Comparisons */ + pub fn icmp(&self, op: IntPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("icmp"); + unsafe { + llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) + } + } + + pub fn fcmp(&self, op: RealPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("fcmp"); + unsafe { + llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) + } + } + + /* Miscellaneous instructions */ + pub fn empty_phi(&self, ty: Type) -> ValueRef { + self.count_insn("emptyphi"); + unsafe { + llvm::LLVMBuildPhi(self.llbuilder, ty.to_ref(), noname()) + } + } + + pub fn phi(&self, ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef { + assert_eq!(vals.len(), bbs.len()); + let phi = self.empty_phi(ty); + self.count_insn("addincoming"); + unsafe { + llvm::LLVMAddIncoming(phi, vec::raw::to_ptr(vals), + vec::raw::to_ptr(bbs), + vals.len() as c_uint); + phi + } + } + + pub fn add_span_comment(&self, sp: span, text: &str) { + if self.ccx.sess.asm_comments() { + let s = fmt!("%s (%s)", text, self.ccx.sess.codemap.span_to_str(sp)); + debug!("%s", s); + self.add_comment(s); + } + } + + pub fn add_comment(&self, text: &str) { + if self.ccx.sess.asm_comments() { + let sanitized = text.replace("$", ""); + let comment_text = fmt!("# %s", sanitized.replace("\n", "\n\t# ")); + self.count_insn("inlineasm"); + let asm = do comment_text.as_c_str |c| { + unsafe { + llvm::LLVMConstInlineAsm(Type::func([], &Type::void()).to_ref(), + c, noname(), False, False) + } + }; + self.call(asm, []); + } + } + + pub fn inline_asm_call(&self, asm: *c_char, cons: *c_char, + inputs: &[ValueRef], output: Type, + volatile: bool, alignstack: bool, + dia: AsmDialect) -> ValueRef { + self.count_insn("inlineasm"); + + let volatile = if volatile { lib::llvm::True } + else { lib::llvm::False }; + let alignstack = if alignstack { lib::llvm::True } + else { lib::llvm::False }; + + let argtys = do inputs.map |v| { + debug!("Asm Input Type: %?", self.ccx.tn.val_to_str(*v)); + val_ty(*v) + }; + + debug!("Asm Output Type: %?", self.ccx.tn.type_to_str(output)); + let fty = Type::func(argtys, &output); + unsafe { + let v = llvm::LLVMInlineAsm( + fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint); + self.call(v, inputs) + } + } + + pub fn call(&self, llfn: ValueRef, args: &[ValueRef]) -> ValueRef { + self.count_insn("call"); + + debug!("Call(llfn=%s, args=%?)", + self.ccx.tn.val_to_str(llfn), + args.map(|arg| self.ccx.tn.val_to_str(*arg))); + + do args.as_imm_buf |ptr, len| { + unsafe { + llvm::LLVMBuildCall(self.llbuilder, llfn, ptr, len as c_uint, noname()) + } + } + } + + pub fn fastcall(&self, llfn: ValueRef, args: &[ValueRef]) -> ValueRef { + self.count_insn("fastcall"); + unsafe { + let v = llvm::LLVMBuildCall(self.llbuilder, llfn, vec::raw::to_ptr(args), + args.len() as c_uint, noname()); + lib::llvm::SetInstructionCallConv(v, lib::llvm::FastCallConv); + v + } + } + + pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef], + conv: CallConv) -> ValueRef { + self.count_insn("callwithconv"); + unsafe { + let v = llvm::LLVMBuildCall(self.llbuilder, llfn, vec::raw::to_ptr(args), + args.len() as c_uint, noname()); + lib::llvm::SetInstructionCallConv(v, conv); + v + } + } + + pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef { + self.count_insn("select"); + unsafe { + llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) + } + } + + pub fn va_arg(&self, list: ValueRef, ty: Type) -> ValueRef { + self.count_insn("vaarg"); + unsafe { + llvm::LLVMBuildVAArg(self.llbuilder, list, ty.to_ref(), noname()) + } + } + + pub fn extract_element(&self, vec: ValueRef, idx: ValueRef) -> ValueRef { + self.count_insn("extractelement"); + unsafe { + llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) + } + } + + pub fn insert_element(&self, vec: ValueRef, elt: ValueRef, idx: ValueRef) -> ValueRef { + self.count_insn("insertelement"); + unsafe { + llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) + } + } + + pub fn shuffle_vector(&self, v1: ValueRef, v2: ValueRef, mask: ValueRef) -> ValueRef { + self.count_insn("shufflevector"); + unsafe { + llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) + } + } + + pub fn vector_splat(&self, num_elts: uint, elt: ValueRef) -> ValueRef { + unsafe { + let elt_ty = val_ty(elt); + let Undef = llvm::LLVMGetUndef(Type::vector(&elt_ty, num_elts as u64).to_ref()); + let vec = self.insert_element(Undef, elt, C_i32(0)); + self.shuffle_vector(vec, Undef, C_null(Type::vector(&Type::i32(), num_elts as u64))) + } + } + + pub fn extract_value(&self, agg_val: ValueRef, idx: uint) -> ValueRef { + self.count_insn("extractvalue"); + unsafe { + llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname()) + } + } + + pub fn insert_value(&self, agg_val: ValueRef, elt: ValueRef, + idx: uint) { + self.count_insn("insertvalue"); + unsafe { + llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, + noname()); + } + } + + pub fn is_null(&self, val: ValueRef) -> ValueRef { + self.count_insn("isnull"); + unsafe { + llvm::LLVMBuildIsNull(self.llbuilder, val, noname()) + } + } + + pub fn is_not_null(&self, val: ValueRef) -> ValueRef { + self.count_insn("isnotnull"); + unsafe { + llvm::LLVMBuildIsNotNull(self.llbuilder, val, noname()) + } + } + + pub fn ptrdiff(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("ptrdiff"); + unsafe { + llvm::LLVMBuildPtrDiff(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn trap(&self) { + unsafe { + let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(self.llbuilder); + let FN: ValueRef = llvm::LLVMGetBasicBlockParent(BB); + let M: ModuleRef = llvm::LLVMGetGlobalParent(FN); + let T: ValueRef = str::as_c_str("llvm.trap", |buf| { + llvm::LLVMGetNamedFunction(M, buf) + }); + assert!((T as int != 0)); + let args: &[ValueRef] = []; + self.count_insn("trap"); + llvm::LLVMBuildCall( + self.llbuilder, T, vec::raw::to_ptr(args), args.len() as c_uint, noname()); + } + } + + pub fn landing_pad(&self, ty: Type, pers_fn: ValueRef, num_clauses: uint) -> ValueRef { + self.count_insn("landingpad"); + unsafe { + llvm::LLVMBuildLandingPad( + self.llbuilder, ty.to_ref(), pers_fn, num_clauses as c_uint, noname()) + } + } + + pub fn set_cleanup(&self, landing_pad: ValueRef) { + self.count_insn("setcleanup"); + unsafe { + llvm::LLVMSetCleanup(landing_pad, lib::llvm::True); + } + } + + pub fn resume(&self, exn: ValueRef) -> ValueRef { + self.count_insn("resume"); + unsafe { + llvm::LLVMBuildResume(self.llbuilder, exn) + } + } + + // Atomic Operations + pub fn atomic_cmpxchg(&self, dst: ValueRef, + cmp: ValueRef, src: ValueRef, + order: AtomicOrdering) -> ValueRef { + unsafe { + llvm::LLVMBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, order) + } + } + pub fn atomic_rmw(&self, op: AtomicBinOp, + dst: ValueRef, src: ValueRef, + order: AtomicOrdering) -> ValueRef { + unsafe { + llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order) + } + } +} diff --git a/src/librustc/middle/trans/context.rs b/src/librustc/middle/trans/context.rs index ffebb87d5cf04..4990e78d524b7 100644 --- a/src/librustc/middle/trans/context.rs +++ b/src/librustc/middle/trans/context.rs @@ -19,6 +19,7 @@ use middle::astencode; use middle::resolve; use middle::trans::adt; use middle::trans::base; +use middle::trans::builder::Builder; use middle::trans::debuginfo; use middle::trans::type_use; use middle::ty; @@ -227,6 +228,10 @@ impl CrateContext { } } } + + pub fn builder(@mut self) -> Builder { + Builder::new(self) + } } #[unsafe_destructor] diff --git a/src/librustc/middle/trans/debuginfo.rs b/src/librustc/middle/trans/debuginfo.rs index 0e75e4e85c2f2..391a1283878fa 100644 --- a/src/librustc/middle/trans/debuginfo.rs +++ b/src/librustc/middle/trans/debuginfo.rs @@ -188,7 +188,7 @@ pub fn create_local_var_metadata(bcx: block, local: @ast::local) -> DIVariable { set_debug_location(cx, lexical_block_metadata(bcx), loc.line, loc.col.to_uint()); unsafe { let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd(DIB(cx), llptr, var_metadata, bcx.llbb); - llvm::LLVMSetInstDebugLocation(trans::build::B(bcx), instr); + llvm::LLVMSetInstDebugLocation(trans::build::B(bcx).llbuilder, instr); } return var_metadata; @@ -247,7 +247,7 @@ pub fn create_argument_metadata(bcx: block, arg: &ast::arg, span: span) -> Optio unsafe { let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd( DIB(cx), llptr, var_metadata, bcx.llbb); - llvm::LLVMSetInstDebugLocation(trans::build::B(bcx), instr); + llvm::LLVMSetInstDebugLocation(trans::build::B(bcx).llbuilder, instr); } return Some(var_metadata); } diff --git a/src/librustc/middle/trans/mod.rs b/src/librustc/middle/trans/mod.rs index 64d6bbec87c48..d47d9a4ff1628 100644 --- a/src/librustc/middle/trans/mod.rs +++ b/src/librustc/middle/trans/mod.rs @@ -22,6 +22,7 @@ pub mod context; pub mod consts; pub mod type_of; pub mod build; +pub mod builder; pub mod base; pub mod _match; pub mod uniq; From 205baa6ca2272e21032f8fb5477edefe4120bcbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Steinbrink?= Date: Sun, 21 Jul 2013 16:19:34 +0200 Subject: [PATCH 4/4] Avoid blocks for static allocas and loading the closure environment These blocks were required because previously we could only insert instructions at the end of blocks, but we wanted to have all allocas in one place, so they can be collapse. But now we have "direct" access the the LLVM IR builder and can position it freely. This allows us to use the same trick that clang uses, which means that we insert a dummy "marker" instruction to identify the spot at which we want to insert allocas. We can then later position the IR builder at that spot and insert the alloca instruction, without any dedicated block. The block for loading the closure environment can now also go away, because the function context now provides the toplevel block, and the translation of the loading happens first, so that's good enough. Makes the LLVM IR a bit more readable, saving a bunch of branches in the unoptimized code, which benefits unoptimized builds. --- src/librustc/lib/llvm.rs | 2 + src/librustc/middle/trans/base.rs | 91 ++++++++++++---------------- src/librustc/middle/trans/build.rs | 8 ++- src/librustc/middle/trans/closure.rs | 21 +------ src/librustc/middle/trans/common.rs | 23 +++---- src/librustc/middle/trans/foreign.rs | 34 ++++------- src/librustc/middle/trans/glue.rs | 7 +-- src/librustc/middle/trans/reflect.rs | 4 +- src/rustllvm/rustllvm.def.in | 1 + 9 files changed, 79 insertions(+), 112 deletions(-) diff --git a/src/librustc/lib/llvm.rs b/src/librustc/lib/llvm.rs index 05992c431dac8..d1a9e387d00ed 100644 --- a/src/librustc/lib/llvm.rs +++ b/src/librustc/lib/llvm.rs @@ -984,6 +984,8 @@ pub mod llvm { pub unsafe fn LLVMGetNextInstruction(Inst: ValueRef) -> ValueRef; #[fast_ffi] pub unsafe fn LLVMGetPreviousInstruction(Inst: ValueRef) -> ValueRef; + #[fast_ffi] + pub unsafe fn LLVMInstructionEraseFromParent(Inst: ValueRef); /* Operations on call sites */ #[fast_ffi] diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index 5536fa6daa73a..2512b6e3ece89 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -41,7 +41,7 @@ use middle::trans::_match; use middle::trans::adt; use middle::trans::base; use middle::trans::build::*; -use middle::trans::builder::noname; +use middle::trans::builder::{Builder, noname}; use middle::trans::callee; use middle::trans::common::*; use middle::trans::consts; @@ -1503,11 +1503,12 @@ pub fn memcpy_ty(bcx: block, dst: ValueRef, src: ValueRef, t: ty::t) { } pub fn zero_mem(cx: block, llptr: ValueRef, t: ty::t) { + if cx.unreachable { return; } let _icx = push_ctxt("zero_mem"); let bcx = cx; let ccx = cx.ccx(); let llty = type_of::type_of(ccx, t); - memzero(bcx, llptr, llty); + memzero(&B(bcx), llptr, llty); } // Always use this function instead of storing a zero constant to the memory @@ -1515,9 +1516,9 @@ pub fn zero_mem(cx: block, llptr: ValueRef, t: ty::t) { // allocation for large data structures, and the generated code will be // awful. (A telltale sign of this is large quantities of // `mov [byte ptr foo],0` in the generated code.) -pub fn memzero(cx: block, llptr: ValueRef, ty: Type) { +pub fn memzero(b: &Builder, llptr: ValueRef, ty: Type) { let _icx = push_ctxt("memzero"); - let ccx = cx.ccx(); + let ccx = b.ccx; let intrinsic_key = match ccx.sess.targ_cfg.arch { X86 | Arm | Mips => "llvm.memset.p0i8.i32", @@ -1525,12 +1526,12 @@ pub fn memzero(cx: block, llptr: ValueRef, ty: Type) { }; let llintrinsicfn = ccx.intrinsics.get_copy(&intrinsic_key); - let llptr = PointerCast(cx, llptr, Type::i8().ptr_to()); + let llptr = b.pointercast(llptr, Type::i8().ptr_to()); let llzeroval = C_u8(0); - let size = IntCast(cx, machine::llsize_of(ccx, ty), ccx.int_type); + let size = machine::llsize_of(ccx, ty); let align = C_i32(llalign_of_min(ccx, ty) as i32); let volatile = C_i1(false); - Call(cx, llintrinsicfn, [llptr, llzeroval, size, align, volatile]); + b.call(llintrinsicfn, [llptr, llzeroval, size, align, volatile]); } pub fn alloc_ty(bcx: block, t: ty::t, name: &str) -> ValueRef { @@ -1553,9 +1554,12 @@ pub fn alloca_maybe_zeroed(cx: block, ty: Type, name: &str, zero: bool) -> Value return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } } - let initcx = base::raw_block(cx.fcx, false, cx.fcx.get_llstaticallocas()); - let p = Alloca(initcx, ty, name); - if zero { memzero(initcx, p, ty); } + let p = Alloca(cx, ty, name); + if zero { + let b = cx.fcx.ccx.builder(); + b.position_before(cx.fcx.alloca_insert_pt.get()); + memzero(&b, p, ty); + } p } @@ -1566,7 +1570,7 @@ pub fn arrayalloca(cx: block, ty: Type, v: ValueRef) -> ValueRef { return llvm::LLVMGetUndef(ty.to_ref()); } } - return ArrayAlloca(base::raw_block(cx.fcx, false, cx.fcx.get_llstaticallocas()), ty, v); + return ArrayAlloca(cx, ty, v); } pub struct BasicBlocks { @@ -1597,8 +1601,8 @@ pub fn make_return_pointer(fcx: fn_ctxt, output_type: ty::t) -> ValueRef { llvm::LLVMGetParam(fcx.llfn, 0) } else { let lloutputtype = type_of::type_of(fcx.ccx, output_type); - alloca(raw_block(fcx, false, fcx.get_llstaticallocas()), lloutputtype, - "__make_return_pointer") + let bcx = fcx.entry_bcx.get(); + Alloca(bcx, lloutputtype, "__make_return_pointer") } } } @@ -1616,6 +1620,7 @@ pub fn new_fn_ctxt_w_id(ccx: @mut CrateContext, output_type: ty::t, skip_retptr: bool, param_substs: Option<@param_substs>, + opt_node_info: Option, sp: Option) -> fn_ctxt { for param_substs.iter().advance |p| { p.validate(); } @@ -1639,8 +1644,8 @@ pub fn new_fn_ctxt_w_id(ccx: @mut CrateContext, llvm::LLVMGetUndef(Type::i8p().to_ref()) }, llretptr: None, - llstaticallocas: None, - llloadenv: None, + entry_bcx: None, + alloca_insert_pt: None, llreturn: None, llself: None, personality: None, @@ -1658,6 +1663,15 @@ pub fn new_fn_ctxt_w_id(ccx: @mut CrateContext, fcx.llenv = unsafe { llvm::LLVMGetParam(llfndecl, fcx.env_arg_pos() as c_uint) }; + + unsafe { + let entry_bcx = top_scope_block(fcx, opt_node_info); + Load(entry_bcx, C_null(Type::i8p())); + + fcx.entry_bcx = Some(entry_bcx); + fcx.alloca_insert_pt = Some(llvm::LLVMGetFirstInstruction(entry_bcx.llbb)); + } + if !ty::type_is_nil(substd_output_type) && !(is_immediate && skip_retptr) { fcx.llretptr = Some(make_return_pointer(fcx, substd_output_type)); } @@ -1670,7 +1684,7 @@ pub fn new_fn_ctxt(ccx: @mut CrateContext, output_type: ty::t, sp: Option) -> fn_ctxt { - new_fn_ctxt_w_id(ccx, path, llfndecl, -1, output_type, false, None, sp) + new_fn_ctxt_w_id(ccx, path, llfndecl, -1, output_type, false, None, None, sp) } // NB: must keep 4 fns in sync: @@ -1785,9 +1799,8 @@ pub fn copy_args_to_allocas(fcx: fn_ctxt, // Ties up the llstaticallocas -> llloadenv -> lltop edges, // and builds the return block. -pub fn finish_fn(fcx: fn_ctxt, lltop: BasicBlockRef, last_bcx: block) { +pub fn finish_fn(fcx: fn_ctxt, last_bcx: block) { let _icx = push_ctxt("finish_fn"); - tie_up_header_blocks(fcx, lltop); let ret_cx = match fcx.llreturn { Some(llreturn) => { @@ -1799,6 +1812,7 @@ pub fn finish_fn(fcx: fn_ctxt, lltop: BasicBlockRef, last_bcx: block) { None => last_bcx }; build_return_block(fcx, ret_cx); + fcx.cleanup(); } // Builds the return block for a function. @@ -1811,29 +1825,6 @@ pub fn build_return_block(fcx: fn_ctxt, ret_cx: block) { } } -pub fn tie_up_header_blocks(fcx: fn_ctxt, lltop: BasicBlockRef) { - let _icx = push_ctxt("tie_up_header_blocks"); - let llnext = match fcx.llloadenv { - Some(ll) => { - unsafe { - llvm::LLVMMoveBasicBlockBefore(ll, lltop); - } - Br(raw_block(fcx, false, ll), lltop); - ll - } - None => lltop - }; - match fcx.llstaticallocas { - Some(ll) => { - unsafe { - llvm::LLVMMoveBasicBlockBefore(ll, llnext); - } - Br(raw_block(fcx, false, ll), llnext); - } - None => () - } -} - pub enum self_arg { impl_self(ty::t, ty::SelfMode), no_self, } // trans_closure: Builds an LLVM function out of a source function. @@ -1866,6 +1857,7 @@ pub fn trans_closure(ccx: @mut CrateContext, output_type, false, param_substs, + body.info(), Some(body.span)); let raw_llargs = create_llargs_for_fn_args(fcx, self_arg, decl.inputs); @@ -1877,9 +1869,8 @@ pub fn trans_closure(ccx: @mut CrateContext, // Create the first basic block in the function and keep a handle on it to // pass to finish_fn later. - let bcx_top = top_scope_block(fcx, body.info()); + let bcx_top = fcx.entry_bcx.get(); let mut bcx = bcx_top; - let lltop = bcx.llbb; let block_ty = node_id_type(bcx, body.id); let arg_tys = ty::ty_fn_args(node_id_type(bcx, id)); @@ -1915,7 +1906,7 @@ pub fn trans_closure(ccx: @mut CrateContext, } // Insert the mandatory first few basic blocks before lltop. - finish_fn(fcx, lltop, bcx); + finish_fn(fcx, bcx); } // trans_fn: creates an LLVM function corresponding to a source language @@ -2085,12 +2076,12 @@ pub fn trans_enum_variant_or_tuple_like_struct( result_ty, false, param_substs, + None, None); let raw_llargs = create_llargs_for_fn_args(fcx, no_self, fn_args); - let bcx = top_scope_block(fcx, None); - let lltop = bcx.llbb; + let bcx = fcx.entry_bcx.get(); let arg_tys = ty::ty_fn_args(ctor_ty); insert_synthetic_type_entries(bcx, fn_args, arg_tys); @@ -2108,7 +2099,7 @@ pub fn trans_enum_variant_or_tuple_like_struct( let arg_ty = arg_tys[i]; memcpy_ty(bcx, lldestptr, llarg, arg_ty); } - finish_fn(fcx, lltop, bcx); + finish_fn(fcx, bcx); } pub fn trans_enum_def(ccx: @mut CrateContext, enum_definition: &ast::enum_def, @@ -2336,9 +2327,7 @@ pub fn create_entry_wrapper(ccx: @mut CrateContext, // be updated if this assertion starts to fail. assert!(fcx.has_immediate_return_value); - let bcx = top_scope_block(fcx, None); - let lltop = bcx.llbb; - + let bcx = fcx.entry_bcx.get(); // Call main. let llenvarg = unsafe { let env_arg = fcx.env_arg_pos(); @@ -2347,7 +2336,7 @@ pub fn create_entry_wrapper(ccx: @mut CrateContext, let args = ~[llenvarg]; Call(bcx, main_llfn, args); - finish_fn(fcx, lltop, bcx); + finish_fn(fcx, bcx); return llfdecl; } diff --git a/src/librustc/middle/trans/build.rs b/src/librustc/middle/trans/build.rs index 7861f658f53e8..a8c7efb2ad43d 100644 --- a/src/librustc/middle/trans/build.rs +++ b/src/librustc/middle/trans/build.rs @@ -318,14 +318,18 @@ pub fn ArrayMalloc(cx: block, Ty: Type, Val: ValueRef) -> ValueRef { pub fn Alloca(cx: block, Ty: Type, name: &str) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty.ptr_to().to_ref()); } - B(cx).alloca(Ty, name) + let b = cx.fcx.ccx.builder(); + b.position_before(cx.fcx.alloca_insert_pt.get()); + b.alloca(Ty, name) } } pub fn ArrayAlloca(cx: block, Ty: Type, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty.ptr_to().to_ref()); } - B(cx).array_alloca(Ty, Val) + let b = cx.fcx.ccx.builder(); + b.position_before(cx.fcx.alloca_insert_pt.get()); + b.array_alloca(Ty, Val) } } diff --git a/src/librustc/middle/trans/closure.rs b/src/librustc/middle/trans/closure.rs index 7507f19ec2009..cdde96393a14f 100644 --- a/src/librustc/middle/trans/closure.rs +++ b/src/librustc/middle/trans/closure.rs @@ -11,7 +11,7 @@ use back::abi; use back::link::{mangle_internal_name_by_path_and_seq}; -use lib::llvm::{llvm, ValueRef}; +use lib::llvm::ValueRef; use middle::moves; use middle::trans::base::*; use middle::trans::build::*; @@ -25,7 +25,6 @@ use util::ppaux::ty_to_str; use middle::trans::type_::Type; -use std::str; use std::vec; use syntax::ast; use syntax::ast_map::path_name; @@ -331,23 +330,7 @@ pub fn load_environment(fcx: fn_ctxt, return; } - let llloadenv = match fcx.llloadenv { - Some(ll) => ll, - None => { - let ll = - str::as_c_str("load_env", - |buf| - unsafe { - llvm::LLVMAppendBasicBlockInContext(fcx.ccx.llcx, - fcx.llfn, - buf) - }); - fcx.llloadenv = Some(ll); - ll - } - }; - - let bcx = raw_block(fcx, false, llloadenv); + let bcx = fcx.entry_bcx.get(); // Load a pointer to the closure data, skipping over the box header: let llcdata = opaque_box_body(bcx, cdata_ty, fcx.llenv); diff --git a/src/librustc/middle/trans/common.rs b/src/librustc/middle/trans/common.rs index d90614ebc021c..f53f15a83d71e 100644 --- a/src/librustc/middle/trans/common.rs +++ b/src/librustc/middle/trans/common.rs @@ -174,17 +174,14 @@ pub struct fn_ctxt_ { // always be Some. llretptr: Option, + entry_bcx: Option, + // These elements: "hoisted basic blocks" containing // administrative activities that have to happen in only one place in // the function, due to LLVM's quirks. - // A block for all the function's static allocas, so that LLVM - // will coalesce them into a single alloca call. - llstaticallocas: Option, - // A block containing code that copies incoming arguments to space - // already allocated by code in one of the llallocas blocks. - // (LLVM requires that arguments be copied to local allocas before - // allowing most any operation to be performed on them.) - llloadenv: Option, + // A marker for the place where we want to insert the function's static + // allocas, so that LLVM will coalesce them into a single alloca call. + alloca_insert_pt: Option, llreturn: Option, // The 'self' value currently in use in this function, if there // is one. @@ -252,12 +249,12 @@ impl fn_ctxt_ { } } - pub fn get_llstaticallocas(&mut self) -> BasicBlockRef { - if self.llstaticallocas.is_none() { - self.llstaticallocas = Some(base::mk_staticallocas_basic_block(self.llfn)); + pub fn cleanup(&mut self) { + unsafe { + llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.get()); } - - self.llstaticallocas.get() + // Remove the cycle between fcx and bcx, so memory can be freed + self.entry_bcx = None; } pub fn get_llreturn(&mut self) -> BasicBlockRef { diff --git a/src/librustc/middle/trans/foreign.rs b/src/librustc/middle/trans/foreign.rs index 355e2f57b2c3f..08fbfdee9eac0 100644 --- a/src/librustc/middle/trans/foreign.rs +++ b/src/librustc/middle/trans/foreign.rs @@ -149,8 +149,7 @@ fn build_shim_fn_(ccx: @mut CrateContext, // Declare the body of the shim function: let fcx = new_fn_ctxt(ccx, ~[], llshimfn, tys.fn_sig.output, None); - let bcx = top_scope_block(fcx, None); - let lltop = bcx.llbb; + let bcx = fcx.entry_bcx.get(); let llargbundle = get_param(llshimfn, 0u); let llargvals = arg_builder(bcx, tys, llargbundle); @@ -162,13 +161,12 @@ fn build_shim_fn_(ccx: @mut CrateContext, // Don't finish up the function in the usual way, because this doesn't // follow the normal Rust calling conventions. - tie_up_header_blocks(fcx, lltop); - let ret_cx = match fcx.llreturn { Some(llreturn) => raw_block(fcx, false, llreturn), None => bcx }; RetVoid(ret_cx); + fcx.cleanup(); return llshimfn; } @@ -192,19 +190,15 @@ fn build_wrap_fn_(ccx: @mut CrateContext, ret_builder: wrap_ret_builder) { let _icx = push_ctxt("foreign::build_wrap_fn_"); let fcx = new_fn_ctxt(ccx, ~[], llwrapfn, tys.fn_sig.output, None); + let bcx = fcx.entry_bcx.get(); // Patch up the return type if it's not immediate and we're returning via // the C ABI. if needs_c_return && !ty::type_is_immediate(ccx.tcx, tys.fn_sig.output) { let lloutputtype = type_of::type_of(fcx.ccx, tys.fn_sig.output); - fcx.llretptr = Some(alloca(raw_block(fcx, false, fcx.get_llstaticallocas()), - lloutputtype, - "")); + fcx.llretptr = Some(alloca(bcx, lloutputtype, "")); } - let bcx = top_scope_block(fcx, None); - let lltop = bcx.llbb; - // Allocate the struct and write the arguments into it. let llargbundle = alloca(bcx, tys.bundle_ty, "__llargbundle"); arg_builder(bcx, tys, llwrapfn, llargbundle); @@ -215,10 +209,6 @@ fn build_wrap_fn_(ccx: @mut CrateContext, Call(bcx, shim_upcall, [llrawargbundle, llshimfnptr]); ret_builder(bcx, tys, llargbundle); - // Perform a custom version of `finish_fn`. First, tie up the header - // blocks. - tie_up_header_blocks(fcx, lltop); - // Then return according to the C ABI. let return_context = match fcx.llreturn { Some(llreturn) => raw_block(fcx, false, llreturn), @@ -239,6 +229,7 @@ fn build_wrap_fn_(ccx: @mut CrateContext, let llretptr = BitCast(return_context, fcx.llretptr.get(), return_type.ptr_to()); Ret(return_context, Load(return_context, llretptr)); } + fcx.cleanup(); } // For each foreign function F, we generate a wrapper function W and a shim @@ -430,8 +421,7 @@ pub fn trans_foreign_mod(ccx: @mut CrateContext, debug!("build_direct_fn(%s)", link_name(ccx, item)); let fcx = new_fn_ctxt(ccx, ~[], decl, tys.fn_sig.output, None); - let bcx = top_scope_block(fcx, None); - let lltop = bcx.llbb; + let bcx = fcx.entry_bcx.get(); let llbasefn = base_fn(ccx, link_name(ccx, item), tys, cc); let ty = ty::lookup_item_type(ccx.tcx, ast_util::local_def(item.id)).ty; @@ -443,7 +433,7 @@ pub fn trans_foreign_mod(ccx: @mut CrateContext, if !ty::type_is_nil(ret_ty) && !ty::type_is_bot(ret_ty) { Store(bcx, retval, fcx.llretptr.get()); } - finish_fn(fcx, lltop, bcx); + finish_fn(fcx, bcx); } // FIXME (#2535): this is very shaky and probably gets ABIs wrong all @@ -456,8 +446,7 @@ pub fn trans_foreign_mod(ccx: @mut CrateContext, debug!("build_fast_ffi_fn(%s)", link_name(ccx, item)); let fcx = new_fn_ctxt(ccx, ~[], decl, tys.fn_sig.output, None); - let bcx = top_scope_block(fcx, None); - let lltop = bcx.llbb; + let bcx = fcx.entry_bcx.get(); let llbasefn = base_fn(ccx, link_name(ccx, item), tys, cc); set_no_inline(fcx.llfn); set_fixed_stack_segment(fcx.llfn); @@ -471,7 +460,7 @@ pub fn trans_foreign_mod(ccx: @mut CrateContext, if !ty::type_is_nil(ret_ty) && !ty::type_is_bot(ret_ty) { Store(bcx, retval, fcx.llretptr.get()); } - finish_fn(fcx, lltop, bcx); + finish_fn(fcx, bcx); } fn build_wrap_fn(ccx: @mut CrateContext, @@ -619,6 +608,7 @@ pub fn trans_intrinsic(ccx: @mut CrateContext, output_type, true, Some(substs), + None, Some(item.span)); set_always_inline(fcx.llfn); @@ -628,7 +618,7 @@ pub fn trans_intrinsic(ccx: @mut CrateContext, set_fixed_stack_segment(fcx.llfn); } - let mut bcx = top_scope_block(fcx, None); + let mut bcx = fcx.entry_bcx.get(); let first_real_arg = fcx.arg_pos(0u); let nm = ccx.sess.str_of(item.ident); @@ -694,6 +684,7 @@ pub fn trans_intrinsic(ccx: @mut CrateContext, } } + fcx.cleanup(); return; } @@ -942,6 +933,7 @@ pub fn trans_intrinsic(ccx: @mut CrateContext, ccx.sess.span_bug(item.span, "unknown intrinsic"); } } + fcx.cleanup(); } /** diff --git a/src/librustc/middle/trans/glue.rs b/src/librustc/middle/trans/glue.rs index 4a2072364e94f..d3f5b9844c930 100644 --- a/src/librustc/middle/trans/glue.rs +++ b/src/librustc/middle/trans/glue.rs @@ -615,7 +615,7 @@ pub fn make_take_glue(bcx: block, v: ValueRef, t: ty::t) -> block { // Zero out the struct unsafe { let ty = Type::from_ref(llvm::LLVMTypeOf(v)); - memzero(bcx, v, ty); + memzero(&B(bcx), v, ty); } } @@ -707,13 +707,12 @@ pub fn make_generic_glue_inner(ccx: @mut CrateContext, // llfn is expected be declared to take a parameter of the appropriate // type, so we don't need to explicitly cast the function parameter. - let bcx = top_scope_block(fcx, None); - let lltop = bcx.llbb; + let bcx = fcx.entry_bcx.get(); let rawptr0_arg = fcx.arg_pos(0u); let llrawptr0 = unsafe { llvm::LLVMGetParam(llfn, rawptr0_arg as c_uint) }; let bcx = helper(bcx, llrawptr0, t); - finish_fn(fcx, lltop, bcx); + finish_fn(fcx, bcx); return llfn; } diff --git a/src/librustc/middle/trans/reflect.rs b/src/librustc/middle/trans/reflect.rs index 49f9f4481b98c..6df1df454ff46 100644 --- a/src/librustc/middle/trans/reflect.rs +++ b/src/librustc/middle/trans/reflect.rs @@ -303,7 +303,7 @@ impl Reflector { // llvm::LLVMGetParam(llfdecl, fcx.arg_pos(0u) as c_uint) }; - let mut bcx = top_scope_block(fcx, None); + let mut bcx = fcx.entry_bcx.get(); let arg = BitCast(bcx, arg, llptrty); let ret = adt::trans_get_discr(bcx, repr, arg); Store(bcx, ret, fcx.llretptr.get()); @@ -311,7 +311,7 @@ impl Reflector { Some(llreturn) => cleanup_and_Br(bcx, bcx, llreturn), None => bcx = cleanup_block(bcx, Some(bcx.llbb)) }; - finish_fn(fcx, bcx.llbb, bcx); + finish_fn(fcx, bcx); llfdecl }; diff --git a/src/rustllvm/rustllvm.def.in b/src/rustllvm/rustllvm.def.in index 5b6c3ed2f52f9..48888760fc665 100644 --- a/src/rustllvm/rustllvm.def.in +++ b/src/rustllvm/rustllvm.def.in @@ -409,6 +409,7 @@ LLVMInsertBasicBlock LLVMInsertBasicBlockInContext LLVMInsertIntoBuilder LLVMInsertIntoBuilderWithName +LLVMInstructionEraseFromParent LLVMInt16Type LLVMInt16TypeInContext LLVMInt1Type