diff --git a/crates/rustc_codegen_nvvm/src/abi.rs b/crates/rustc_codegen_nvvm/src/abi.rs index 60147222..21c5ca33 100644 --- a/crates/rustc_codegen_nvvm/src/abi.rs +++ b/crates/rustc_codegen_nvvm/src/abi.rs @@ -4,7 +4,7 @@ use libc::c_uint; use rustc_abi::BackendRepr::Scalar; use rustc_abi::CanonAbi; use rustc_abi::Size; -use rustc_abi::{HasDataLayout, Primitive, Reg, RegKind}; +use rustc_abi::{Primitive, Reg, RegKind}; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; @@ -305,7 +305,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Cast { cast, .. } => cast.llvm_type(cx), PassMode::Indirect { .. } => { idx += 1; - llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + llargument_tys.push(cx.type_i8p()); cx.type_void() } }; @@ -353,7 +353,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { attrs: _, meta_attrs: None, on_stack: _, - } => cx.type_ptr_to(arg.memory_ty(cx)), + } => cx.type_i8p(), }; let (new, changed) = get_transformed_type(cx, llarg_ty); if changed { @@ -378,12 +378,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - unsafe { - llvm::LLVMPointerType( - self.llvm_type(cx), - cx.data_layout().instruction_address_space.0 as c_uint, - ) - } + cx.type_i8p() } fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { @@ -569,7 +564,6 @@ impl<'tcx> AbiBuilderMethods for Builder<'_, '_, 'tcx> { } pub(crate) trait ArgAbiExt<'ll, 'tcx> { - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn store( &self, bx: &mut Builder<'_, 'll, 'tcx>, @@ -585,12 +579,6 @@ pub(crate) trait ArgAbiExt<'ll, 'tcx> { } impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { - /// Gets the LLVM type for a place of the original Rust type of - /// this argument/return, i.e., the result of `type_of::type_of`. - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - self.layout.llvm_type(cx) - } - /// Stores a direct/indirect value described by this ArgAbi into a /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables diff --git a/crates/rustc_codegen_nvvm/src/allocator.rs b/crates/rustc_codegen_nvvm/src/allocator.rs index 1403fde3..0798f1fb 100644 --- a/crates/rustc_codegen_nvvm/src/allocator.rs +++ b/crates/rustc_codegen_nvvm/src/allocator.rs @@ -140,14 +140,12 @@ pub(crate) unsafe fn codegen( unsafe { llvm::LLVMBuildRetVoid(llbuilder) }; unsafe { llvm::LLVMDisposeBuilder(llbuilder) }; - let ptr_ty = unsafe { llvm::LLVMPointerType(llvm::LLVMInt8TypeInContext(llcx), 0) }; - for used in &mut used { - *used = unsafe { llvm::LLVMConstBitCast(used, ptr_ty) }; + *used = unsafe { llvm::LLVMConstBitCast(used, i8p) }; } let section = c"llvm.metadata"; - let array = unsafe { llvm::LLVMConstArray(ptr_ty, used.as_ptr(), used.len() as u32) }; + let array = unsafe { llvm::LLVMConstArray(i8p, used.as_ptr(), used.len() as u32) }; let g = unsafe { llvm::LLVMAddGlobal(llmod, llvm::LLVMTypeOf(array), c"llvm.used".as_ptr().cast()) }; diff --git a/crates/rustc_codegen_nvvm/src/builder.rs b/crates/rustc_codegen_nvvm/src/builder.rs index 08eee9ab..a0940704 100644 --- a/crates/rustc_codegen_nvvm/src/builder.rs +++ b/crates/rustc_codegen_nvvm/src/builder.rs @@ -4,7 +4,7 @@ use std::ptr; use libc::{c_char, c_uint}; use rustc_abi as abi; -use rustc_abi::{AddressSpace, Align, HasDataLayout, Size, TargetDataLayout, WrappingRange}; +use rustc_abi::{Align, HasDataLayout, Size, TargetDataLayout, WrappingRange}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; @@ -481,6 +481,10 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Cast to default addrspace if necessary let alloca_ty = llvm::LLVMTypeOf(alloca); let alloca_addrspace = llvm::LLVMGetPointerAddressSpace(alloca_ty); + let alloca = self.pointercast( + alloca, + self.type_i8p_ext(rustc_abi::AddressSpace(alloca_addrspace)), + ); let dest_ty = self.cx().type_ptr(); let dest_addrspace = llvm::LLVMGetPointerAddressSpace(dest_ty); if alloca_addrspace != dest_addrspace { @@ -493,7 +497,9 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { trace!("Load {ty:?} {:?}", ptr); - let ptr = self.pointercast(ptr, self.cx.type_ptr_to(ty)); + let ptr = self.pointercast(ptr, unsafe { + llvm::LLVMPointerType(ty, llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr))) + }); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); llvm::LLVMSetAlignment(load, align.bytes() as c_uint); @@ -503,7 +509,9 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value { trace!("Volatile load `{:?}`", ptr); - let ptr = self.pointercast(ptr, self.cx.type_ptr_to(ty)); + let ptr = self.pointercast(ptr, unsafe { + llvm::LLVMPointerType(ty, llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr))) + }); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); llvm::LLVMSetVolatile(load, llvm::True); @@ -711,14 +719,21 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { align: Align, flags: MemFlags, ) -> &'ll Value { + assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); + + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr)) }; + let store_pointer_ty = unsafe { llvm::LLVMPointerType(self.val_ty(val), address_space) }; + + let ptr = unsafe { + llvm::LLVMBuildBitCast(self.llbuilder, ptr, store_pointer_ty, c"NAME".as_ptr()) + }; trace!( "store_with_flags: {:?} into {:?} with align {:?}", val, ptr, align.bytes() ); - assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); - let ptr = self.check_store(val, ptr); + unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); let align = if flags.contains(MemFlags::UNALIGNED) { @@ -757,15 +772,20 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { trace!("gep: {ty:?} {:?} with indices {:?}", ptr, indices); - let ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty)); + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr)) }; + let ptr = self.pointercast(ptr, unsafe { llvm::LLVMPointerType(ty, address_space) }); unsafe { - llvm::LLVMBuildGEP2( + let res = llvm::LLVMBuildGEP2( self.llbuilder, ty, ptr, indices.as_ptr(), indices.len() as c_uint, UNNAMED, + ); + self.pointercast( + res, + self.type_i8p_ext(rustc_abi::AddressSpace(address_space)), ) } } @@ -777,15 +797,20 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { indices: &[&'ll Value], ) -> &'ll Value { trace!("gep inbounds: {ty:?} {:?} with indices {:?}", ptr, indices); - let ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty)); + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr)) }; + let ptr = self.pointercast(ptr, unsafe { llvm::LLVMPointerType(ty, address_space) }); unsafe { - llvm::LLVMBuildInBoundsGEP2( + let res = llvm::LLVMBuildInBoundsGEP2( self.llbuilder, ty, ptr, indices.as_ptr(), indices.len() as c_uint, UNNAMED, + ); + self.pointercast( + res, + self.type_i8p_ext(rustc_abi::AddressSpace(address_space)), ) } } @@ -892,20 +917,20 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn bitcast(&mut self, mut val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { trace!("Bitcast `{:?}` to ty `{:?}`", val, dest_ty); - unsafe { - let ty = self.val_ty(val); - let kind = llvm::LLVMRustGetTypeKind(ty); - if kind == llvm::TypeKind::Pointer { - let element = self.element_type(ty); - let addrspace = llvm::LLVMGetPointerAddressSpace(ty); - let new_ty = self.type_ptr_to_ext(element, AddressSpace::ZERO); - if addrspace != 0 { - trace!("injecting addrspace cast for `{:?}` to `{:?}`", ty, new_ty); - val = llvm::LLVMBuildAddrSpaceCast(self.llbuilder, val, new_ty, UNNAMED); - } + + let ty = self.val_ty(val); + let kind = unsafe { llvm::LLVMRustGetTypeKind(ty) }; + + if kind == llvm::TypeKind::Pointer { + let element = self.element_type(ty); + let addrspace = unsafe { llvm::LLVMGetPointerAddressSpace(ty) }; + let new_ty = unsafe { llvm::LLVMPointerType(element, 0) }; + if addrspace != 0 { + trace!("injecting addrspace cast for `{:?}` to `{:?}`", ty, new_ty); + val = unsafe { llvm::LLVMBuildAddrSpaceCast(self.llbuilder, val, new_ty, UNNAMED) }; } - llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) } + unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) } } fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { @@ -1066,6 +1091,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn insert_value(&mut self, agg_val: &'ll Value, mut elt: &'ll Value, idx: u64) -> &'ll Value { trace!("insert value {:?}, {:?}, {:?}", agg_val, elt, idx); + assert_eq!(idx as c_uint as u64, idx); let elt_ty = self.cx.val_ty(elt); @@ -1168,9 +1194,15 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ); } }; + let tuple = self.type_struct(&[self.val_ty(src), self.type_i1()], false); let res = self.atomic_op( dst, - |builder, dst| { + tuple, + |builder, dst, _| { + let address_space = + unsafe { llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)) }; + let dst_ty = unsafe { llvm::LLVMPointerType(builder.val_ty(cmp), address_space) }; + let dst = builder.pointercast(dst, dst_ty); // We are in a supported address space - just use ordinary atomics unsafe { llvm::LLVMRustBuildAtomicCmpXchg( @@ -1184,7 +1216,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } }, - |builder, dst| { + |builder, dst, _| { + let dst = builder.pointercast(dst, unsafe { + llvm::LLVMPointerType( + builder.val_ty(cmp), + llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)), + ) + }); // Local space is only accessible to the current thread. // So, there are no synchronization issues, and we can emulate it using a simple load / compare / store. let load: &'ll Value = @@ -1221,8 +1259,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } self.atomic_op( dst, - |builder, dst| { + self.val_ty(src), + |builder, dst, ty| { // We are in a supported address space - just use ordinary atomics + let address_space = + unsafe { llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)) }; + let dst_ty = unsafe { llvm::LLVMPointerType(ty, address_space) }; + let dst = builder.pointercast(dst, dst_ty); unsafe { llvm::LLVMBuildAtomicRMW( builder.llbuilder, @@ -1234,9 +1277,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } }, - |builder, dst| { + |builder, dst, ty| { // Local space is only accessible to the current thread. // So, there are no synchronization issues, and we can emulate it using a simple load / compare / store. + let dst = builder.pointercast(dst, unsafe { + llvm::LLVMPointerType(ty, llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst))) + }); + let load: &'ll Value = unsafe { llvm::LLVMBuildLoad(builder.llbuilder, dst, UNNAMED) }; let next_val = match op { @@ -1314,6 +1361,15 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let args = self.check_call("call", llty, llfn, args); let mut call = unsafe { + let llfn = if self.cx.type_kind(llty) == TypeKind::Pointer { + self.pointercast(llfn, llty) + } else if self.cx.type_kind(self.val_ty(llfn)) == TypeKind::Pointer { + let target_fnptr = llvm::LLVMPointerType(llty, 0); + self.pointercast(llfn, target_fnptr) + } else { + llfn + }; + llvm::LLVMRustBuildCall( self.llbuilder, llfn, @@ -1642,20 +1698,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn noundef_metadata(&mut self, _load: &'ll Value) {} - fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = self.cx.val_ty(ptr); - let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - - assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - - if dest_ptr_ty == stored_ptr_ty { - ptr - } else { - self.bitcast(ptr, stored_ptr_ty) - } - } - fn check_call<'b>( &mut self, typ: &str, @@ -1719,7 +1761,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { if !self.cx().sess().emit_lifetime_markers() { return; } - self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } @@ -1750,8 +1791,9 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { fn atomic_op( &mut self, dst: &'ll Value, - atomic_supported: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value) -> &'ll Value, - emulate_local: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value) -> &'ll Value, + ty: &'ll Type, + atomic_supported: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value, &'ll Type) -> &'ll Value, + emulate_local: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value, &'ll Type) -> &'ll Value, ) -> &'ll Value { // (FractalFir) Atomics in CUDA have some limitations, and we have to work around them. // For example, they are restricted in what address space they operate on. @@ -1803,7 +1845,7 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { let merge_bb = self.append_sibling_block("atomic_op_done"); // Execute atomic op if supported, then jump to merge self.switch_to_block(supported_bb); - let supported_res = atomic_supported(self, dst); + let supported_res = atomic_supported(self, dst, ty); self.br(merge_bb); // Check if the pointer is in the thread space. If so, we can emulate it. self.switch_to_block(unsupported_bb); @@ -1822,7 +1864,7 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { self.cond_br(isspacep_local, local_bb, atomic_ub_bb); // The pointer is in the thread(local) space. self.switch_to_block(local_bb); - let local_res = emulate_local(self, dst); + let local_res = emulate_local(self, dst, ty); self.br(merge_bb); // The pointer is neither in the supported address space, nor the local space. // This is very likely UB. So, we trap here. @@ -1830,7 +1872,7 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { self.switch_to_block(atomic_ub_bb); self.abort(); self.unreachable(); - // Atomic is impl has finished, and we can now switch to the merge_bb + // Atomic impl has finished, and we can now switch to the merge_bb self.switch_to_block(merge_bb); self.phi( self.val_ty(local_res), diff --git a/crates/rustc_codegen_nvvm/src/const_ty.rs b/crates/rustc_codegen_nvvm/src/const_ty.rs index 10b28191..92734105 100644 --- a/crates/rustc_codegen_nvvm/src/const_ty.rs +++ b/crates/rustc_codegen_nvvm/src/const_ty.rs @@ -1,5 +1,5 @@ use crate::llvm::{self, Bool, False, True, Type, Value}; -use crate::{consts::const_alloc_to_llvm, context::CodegenCx, ty::LayoutLlvmExt}; +use crate::{consts::const_alloc_to_llvm, context::CodegenCx}; use libc::c_uint; use rustc_abi as abi; use rustc_abi::Primitive::Pointer; @@ -11,7 +11,6 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_hashes::Hash128; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_middle::ty::layout::LayoutOf; use tracing::trace; impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { @@ -99,7 +98,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { g }); let len = s.len(); - let ty = self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)); + let ty = self.type_i8p(); let cs = unsafe { llvm::LLVMConstPointerCast(val, ty) }; (cs, self.const_usize(len as u64)) } diff --git a/crates/rustc_codegen_nvvm/src/ty.rs b/crates/rustc_codegen_nvvm/src/ty.rs index b88a2dea..8a31c60f 100644 --- a/crates/rustc_codegen_nvvm/src/ty.rs +++ b/crates/rustc_codegen_nvvm/src/ty.rs @@ -55,9 +55,7 @@ impl Type { impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn voidp(&self) -> &'ll Type { - // llvm uses i8* for void ptrs, void* is invalid - let i8_ty = self.type_i8(); - self.type_ptr_to_ext(i8_ty, AddressSpace::ZERO) + self.type_i8p() } pub(crate) fn type_named_struct(&self, name: &str) -> &'ll Type { @@ -108,7 +106,7 @@ impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - + #[track_caller] pub(crate) fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { assert_ne!( self.type_kind(ty), @@ -116,11 +114,16 @@ impl<'ll> CodegenCx<'ll, '_> { "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense" ); - unsafe { llvm::LLVMPointerType(ty, AddressSpace::ZERO.0) } + self.type_ptr_to_ext(ty, AddressSpace::ZERO) } - + #[track_caller] pub(crate) fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { - unsafe { llvm::LLVMPointerType(ty, address_space.0) } + assert_eq!( + ty, + self.type_ix(8), + "rustc_codegen_nvvm uses opaque pointers - specifying pointer type other than `i8` is not valid!" + ); + unsafe { llvm::LLVMPointerType(self.type_ix(8), address_space.0) } } pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { @@ -133,11 +136,6 @@ impl<'ll> CodegenCx<'ll, '_> { } } - pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type { - let ity = Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { @@ -219,7 +217,11 @@ impl<'ll, 'tcx> BaseTypeCodegenMethods for CodegenCx<'ll, 'tcx> { } fn element_type(&self, ty: &'ll Type) -> &'ll Type { - unsafe { llvm::LLVMGetElementType(ty) } + let res = unsafe { llvm::LLVMGetElementType(ty) }; + if self.type_kind(ty) == TypeKind::Pointer { + //assert_eq!(self.type_kind(res),TypeKind::Function, "{ty:?} is a pointer, and getting its pointee is a nonsense operation."); + } + res } fn vector_length(&self, ty: &'ll Type) -> usize { @@ -340,12 +342,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } let llty = match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty, _) => { - cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.expect_boxed_ty()).llvm_type(cx)) - } + ty::Ref(_, _, _) | ty::RawPtr(_, _) => cx.type_i8p(), + ty::Adt(def, _) if def.is_box() => cx.type_i8p(), ty::FnPtr(sig, hdr) => { cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig.with(hdr), ty::List::empty())) } @@ -426,17 +424,14 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { Float(f) => cx.type_from_float(f), Pointer(address_space) => { // If we know the alignment, pick something better than i8. - let (pointee, address_space) = if let Some(PointeeInfo { - safe: Some(_), - align, - .. - }) = self.pointee_info_at(cx, Size::ZERO) + let address_space = if let Some(PointeeInfo { safe: Some(_), .. }) = + self.pointee_info_at(cx, Size::ZERO) { - (cx.type_pointee_for_align(align), address_space) + address_space } else { - (cx.type_i8(), AddressSpace::ZERO) + AddressSpace::ZERO }; - cx.type_ptr_to_ext(pointee, address_space) + cx.type_i8p_ext(address_space) } } }