|
| 1 | +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT |
| 2 | +// file at the top-level directory of this distribution and at |
| 3 | +// http://rust-lang.org/COPYRIGHT. |
| 4 | +// |
| 5 | +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 6 | +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 7 | +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
| 8 | +// option. This file may not be copied, modified, or distributed |
| 9 | +// except according to those terms. |
| 10 | +//! Set and unset common attributes on LLVM values. |
| 11 | +
|
| 12 | +use libc::{c_uint, c_ulonglong}; |
| 13 | +use llvm::{self, ValueRef, AttrHelper}; |
| 14 | +use middle::ty::{self, ClosureTyper}; |
| 15 | +use syntax::abi; |
| 16 | +use syntax::ast; |
| 17 | +pub use syntax::attr::InlineAttr; |
| 18 | +use trans::base; |
| 19 | +use trans::common; |
| 20 | +use trans::context::CrateContext; |
| 21 | +use trans::machine; |
| 22 | +use trans::type_of; |
| 23 | + |
| 24 | +/// Mark LLVM function to use split stack. |
| 25 | +#[inline] |
| 26 | +pub fn split_stack(val: ValueRef, set: bool) { |
| 27 | + unsafe { |
| 28 | + let attr = "split-stack\0".as_ptr() as *const _; |
| 29 | + if set { |
| 30 | + llvm::LLVMAddFunctionAttrString(val, llvm::FunctionIndex as c_uint, attr); |
| 31 | + } else { |
| 32 | + llvm::LLVMRemoveFunctionAttrString(val, llvm::FunctionIndex as c_uint, attr); |
| 33 | + } |
| 34 | + } |
| 35 | +} |
| 36 | + |
| 37 | +/// Mark LLVM function to use provided inline heuristic. |
| 38 | +#[inline] |
| 39 | +pub fn inline(val: ValueRef, inline: InlineAttr) { |
| 40 | + use self::InlineAttr::*; |
| 41 | + match inline { |
| 42 | + Hint => llvm::SetFunctionAttribute(val, llvm::InlineHintAttribute), |
| 43 | + Always => llvm::SetFunctionAttribute(val, llvm::AlwaysInlineAttribute), |
| 44 | + Never => llvm::SetFunctionAttribute(val, llvm::NoInlineAttribute), |
| 45 | + None => { |
| 46 | + let attr = llvm::InlineHintAttribute | |
| 47 | + llvm::AlwaysInlineAttribute | |
| 48 | + llvm::NoInlineAttribute; |
| 49 | + unsafe { |
| 50 | + llvm::LLVMRemoveFunctionAttr(val, attr.bits() as c_ulonglong) |
| 51 | + } |
| 52 | + }, |
| 53 | + }; |
| 54 | +} |
| 55 | + |
| 56 | +/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function. |
| 57 | +#[inline] |
| 58 | +pub fn emit_uwtable(val: ValueRef, emit: bool) { |
| 59 | + if emit { |
| 60 | + llvm::SetFunctionAttribute(val, llvm::UWTableAttribute); |
| 61 | + } else { |
| 62 | + unsafe { |
| 63 | + llvm::LLVMRemoveFunctionAttr(val, llvm::UWTableAttribute.bits() as c_ulonglong); |
| 64 | + } |
| 65 | + } |
| 66 | +} |
| 67 | + |
| 68 | +/// Tell LLVM whether the function can or cannot unwind. |
| 69 | +#[inline] |
| 70 | +#[allow(dead_code)] // possibly useful function |
| 71 | +pub fn unwind(val: ValueRef, can_unwind: bool) { |
| 72 | + if can_unwind { |
| 73 | + unsafe { |
| 74 | + llvm::LLVMRemoveFunctionAttr(val, llvm::NoUnwindAttribute.bits() as c_ulonglong); |
| 75 | + } |
| 76 | + } else { |
| 77 | + llvm::SetFunctionAttribute(val, llvm::NoUnwindAttribute); |
| 78 | + } |
| 79 | +} |
| 80 | + |
| 81 | +/// Tell LLVM whether it should optimise function for size. |
| 82 | +#[inline] |
| 83 | +#[allow(dead_code)] // possibly useful function |
| 84 | +pub fn set_optimize_for_size(val: ValueRef, optimize: bool) { |
| 85 | + if optimize { |
| 86 | + llvm::SetFunctionAttribute(val, llvm::OptimizeForSizeAttribute); |
| 87 | + } else { |
| 88 | + unsafe { |
| 89 | + llvm::LLVMRemoveFunctionAttr(val, llvm::OptimizeForSizeAttribute.bits() as c_ulonglong); |
| 90 | + } |
| 91 | + } |
| 92 | +} |
| 93 | + |
| 94 | +/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute]) |
| 95 | +/// attributes. |
| 96 | +pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) { |
| 97 | + use syntax::attr::*; |
| 98 | + inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs)); |
| 99 | + |
| 100 | + for attr in attrs { |
| 101 | + if attr.check_name("no_stack_check") { |
| 102 | + split_stack(llfn, false); |
| 103 | + } else if attr.check_name("cold") { |
| 104 | + unsafe { |
| 105 | + llvm::LLVMAddFunctionAttribute(llfn, |
| 106 | + llvm::FunctionIndex as c_uint, |
| 107 | + llvm::ColdAttribute as u64) |
| 108 | + } |
| 109 | + } else if attr.check_name("allocator") { |
| 110 | + llvm::NoAliasAttribute.apply_llfn(llvm::ReturnIndex as c_uint, llfn); |
| 111 | + } |
| 112 | + } |
| 113 | +} |
| 114 | + |
| 115 | +/// Composite function which converts function type into LLVM attributes for the function. |
| 116 | +pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>) |
| 117 | + -> llvm::AttrBuilder { |
| 118 | + use middle::ty::{BrAnon, ReLateBound}; |
| 119 | + |
| 120 | + let function_type; |
| 121 | + let (fn_sig, abi, env_ty) = match fn_type.sty { |
| 122 | + ty::ty_bare_fn(_, ref f) => (&f.sig, f.abi, None), |
| 123 | + ty::ty_closure(closure_did, substs) => { |
| 124 | + let typer = common::NormalizingClosureTyper::new(ccx.tcx()); |
| 125 | + function_type = typer.closure_type(closure_did, substs); |
| 126 | + let self_type = base::self_type_for_closure(ccx, closure_did, fn_type); |
| 127 | + (&function_type.sig, abi::RustCall, Some(self_type)) |
| 128 | + } |
| 129 | + _ => ccx.sess().bug("expected closure or function.") |
| 130 | + }; |
| 131 | + |
| 132 | + let fn_sig = ty::erase_late_bound_regions(ccx.tcx(), fn_sig); |
| 133 | + |
| 134 | + let mut attrs = llvm::AttrBuilder::new(); |
| 135 | + let ret_ty = fn_sig.output; |
| 136 | + |
| 137 | + // These have an odd calling convention, so we need to manually |
| 138 | + // unpack the input ty's |
| 139 | + let input_tys = match fn_type.sty { |
| 140 | + ty::ty_closure(..) => { |
| 141 | + assert!(abi == abi::RustCall); |
| 142 | + |
| 143 | + match fn_sig.inputs[0].sty { |
| 144 | + ty::ty_tup(ref inputs) => { |
| 145 | + let mut full_inputs = vec![env_ty.expect("Missing closure environment")]; |
| 146 | + full_inputs.push_all(inputs); |
| 147 | + full_inputs |
| 148 | + } |
| 149 | + _ => ccx.sess().bug("expected tuple'd inputs") |
| 150 | + } |
| 151 | + }, |
| 152 | + ty::ty_bare_fn(..) if abi == abi::RustCall => { |
| 153 | + let mut inputs = vec![fn_sig.inputs[0]]; |
| 154 | + |
| 155 | + match fn_sig.inputs[1].sty { |
| 156 | + ty::ty_tup(ref t_in) => { |
| 157 | + inputs.push_all(&t_in[..]); |
| 158 | + inputs |
| 159 | + } |
| 160 | + _ => ccx.sess().bug("expected tuple'd inputs") |
| 161 | + } |
| 162 | + } |
| 163 | + _ => fn_sig.inputs.clone() |
| 164 | + }; |
| 165 | + |
| 166 | + // Index 0 is the return value of the llvm func, so we start at 1 |
| 167 | + let mut first_arg_offset = 1; |
| 168 | + if let ty::FnConverging(ret_ty) = ret_ty { |
| 169 | + // A function pointer is called without the declaration |
| 170 | + // available, so we have to apply any attributes with ABI |
| 171 | + // implications directly to the call instruction. Right now, |
| 172 | + // the only attribute we need to worry about is `sret`. |
| 173 | + if type_of::return_uses_outptr(ccx, ret_ty) { |
| 174 | + let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty)); |
| 175 | + |
| 176 | + // The outptr can be noalias and nocapture because it's entirely |
| 177 | + // invisible to the program. We also know it's nonnull as well |
| 178 | + // as how many bytes we can dereference |
| 179 | + attrs.arg(1, llvm::StructRetAttribute) |
| 180 | + .arg(1, llvm::NoAliasAttribute) |
| 181 | + .arg(1, llvm::NoCaptureAttribute) |
| 182 | + .arg(1, llvm::DereferenceableAttribute(llret_sz)); |
| 183 | + |
| 184 | + // Add one more since there's an outptr |
| 185 | + first_arg_offset += 1; |
| 186 | + } else { |
| 187 | + // The `noalias` attribute on the return value is useful to a |
| 188 | + // function ptr caller. |
| 189 | + match ret_ty.sty { |
| 190 | + // `~` pointer return values never alias because ownership |
| 191 | + // is transferred |
| 192 | + ty::ty_uniq(it) if common::type_is_sized(ccx.tcx(), it) => { |
| 193 | + attrs.ret(llvm::NoAliasAttribute); |
| 194 | + } |
| 195 | + _ => {} |
| 196 | + } |
| 197 | + |
| 198 | + // We can also mark the return value as `dereferenceable` in certain cases |
| 199 | + match ret_ty.sty { |
| 200 | + // These are not really pointers but pairs, (pointer, len) |
| 201 | + ty::ty_rptr(_, ty::mt { ty: inner, .. }) |
| 202 | + | ty::ty_uniq(inner) if common::type_is_sized(ccx.tcx(), inner) => { |
| 203 | + let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner)); |
| 204 | + attrs.ret(llvm::DereferenceableAttribute(llret_sz)); |
| 205 | + } |
| 206 | + _ => {} |
| 207 | + } |
| 208 | + |
| 209 | + if let ty::ty_bool = ret_ty.sty { |
| 210 | + attrs.ret(llvm::ZExtAttribute); |
| 211 | + } |
| 212 | + } |
| 213 | + } |
| 214 | + |
| 215 | + for (idx, &t) in input_tys.iter().enumerate().map(|(i, v)| (i + first_arg_offset, v)) { |
| 216 | + match t.sty { |
| 217 | + // this needs to be first to prevent fat pointers from falling through |
| 218 | + _ if !common::type_is_immediate(ccx, t) => { |
| 219 | + let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t)); |
| 220 | + |
| 221 | + // For non-immediate arguments the callee gets its own copy of |
| 222 | + // the value on the stack, so there are no aliases. It's also |
| 223 | + // program-invisible so can't possibly capture |
| 224 | + attrs.arg(idx, llvm::NoAliasAttribute) |
| 225 | + .arg(idx, llvm::NoCaptureAttribute) |
| 226 | + .arg(idx, llvm::DereferenceableAttribute(llarg_sz)); |
| 227 | + } |
| 228 | + |
| 229 | + ty::ty_bool => { |
| 230 | + attrs.arg(idx, llvm::ZExtAttribute); |
| 231 | + } |
| 232 | + |
| 233 | + // `~` pointer parameters never alias because ownership is transferred |
| 234 | + ty::ty_uniq(inner) => { |
| 235 | + let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner)); |
| 236 | + |
| 237 | + attrs.arg(idx, llvm::NoAliasAttribute) |
| 238 | + .arg(idx, llvm::DereferenceableAttribute(llsz)); |
| 239 | + } |
| 240 | + |
| 241 | + // `&mut` pointer parameters never alias other parameters, or mutable global data |
| 242 | + // |
| 243 | + // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as both |
| 244 | + // `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on |
| 245 | + // memory dependencies rather than pointer equality |
| 246 | + ty::ty_rptr(b, mt) if mt.mutbl == ast::MutMutable || |
| 247 | + !ty::type_contents(ccx.tcx(), mt.ty).interior_unsafe() => { |
| 248 | + |
| 249 | + let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty)); |
| 250 | + attrs.arg(idx, llvm::NoAliasAttribute) |
| 251 | + .arg(idx, llvm::DereferenceableAttribute(llsz)); |
| 252 | + |
| 253 | + if mt.mutbl == ast::MutImmutable { |
| 254 | + attrs.arg(idx, llvm::ReadOnlyAttribute); |
| 255 | + } |
| 256 | + |
| 257 | + if let ReLateBound(_, BrAnon(_)) = *b { |
| 258 | + attrs.arg(idx, llvm::NoCaptureAttribute); |
| 259 | + } |
| 260 | + } |
| 261 | + |
| 262 | + // When a reference in an argument has no named lifetime, it's impossible for that |
| 263 | + // reference to escape this function (returned or stored beyond the call by a closure). |
| 264 | + ty::ty_rptr(&ReLateBound(_, BrAnon(_)), mt) => { |
| 265 | + let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty)); |
| 266 | + attrs.arg(idx, llvm::NoCaptureAttribute) |
| 267 | + .arg(idx, llvm::DereferenceableAttribute(llsz)); |
| 268 | + } |
| 269 | + |
| 270 | + // & pointer parameters are also never null and we know exactly how |
| 271 | + // many bytes we can dereference |
| 272 | + ty::ty_rptr(_, mt) => { |
| 273 | + let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty)); |
| 274 | + attrs.arg(idx, llvm::DereferenceableAttribute(llsz)); |
| 275 | + } |
| 276 | + _ => () |
| 277 | + } |
| 278 | + } |
| 279 | + |
| 280 | + attrs |
| 281 | +} |
0 commit comments