Unnamed repository; edit this file 'description' to name the repository.
use rustc crates instead of copy paste
hkalbasi 2022-12-07
parent f2c9502 · commit 05906da
-rw-r--r--Cargo.lock24
-rw-r--r--crates/hir-def/Cargo.toml2
-rw-r--r--crates/hir-def/src/adt.rs26
-rw-r--r--crates/hir-def/src/layout.rs1137
-rw-r--r--crates/hir-ty/Cargo.toml1
-rw-r--r--crates/hir-ty/src/infer.rs25
-rw-r--r--crates/hir-ty/src/layout.rs89
-rw-r--r--crates/hir-ty/src/layout/adt.rs961
-rw-r--r--crates/hir-ty/src/layout/target.rs2
-rw-r--r--crates/hir-ty/src/layout/tests.rs68
-rw-r--r--crates/hir/src/lib.rs26
-rw-r--r--crates/ide/src/hover/render.rs5
-rw-r--r--crates/ide/src/hover/tests.rs2
13 files changed, 288 insertions, 2080 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 84dda206db..589e932227 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -510,6 +510,8 @@ dependencies = [
"fst",
"hashbrown",
"hir-expand",
+ "hkalbasi-rustc-ap-rustc_abi",
+ "hkalbasi-rustc-ap-rustc_index",
"indexmap",
"itertools",
"la-arena",
@@ -564,6 +566,7 @@ dependencies = [
"expect-test",
"hir-def",
"hir-expand",
+ "hkalbasi-rustc-ap-rustc_index",
"itertools",
"la-arena",
"limit",
@@ -582,6 +585,27 @@ dependencies = [
]
[[package]]
+name = "hkalbasi-rustc-ap-rustc_abi"
+version = "0.0.20221125"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29c8368a30e518c0102d670d8515f7d424d875ee615ec7a7b6d29217b57a0371"
+dependencies = [
+ "bitflags",
+ "hkalbasi-rustc-ap-rustc_index",
+ "tracing",
+]
+
+[[package]]
+name = "hkalbasi-rustc-ap-rustc_index"
+version = "0.0.20221125"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c07bba80d7f6a8e1efb0f3e2115ef1eecbf97292dc8cad84e4982226b9aa12e2"
+dependencies = [
+ "arrayvec",
+ "smallvec",
+]
+
+[[package]]
name = "home"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/crates/hir-def/Cargo.toml b/crates/hir-def/Cargo.toml
index 22f98ea7cd..9ecce46601 100644
--- a/crates/hir-def/Cargo.toml
+++ b/crates/hir-def/Cargo.toml
@@ -33,6 +33,8 @@ base-db = { path = "../base-db", version = "0.0.0" }
syntax = { path = "../syntax", version = "0.0.0" }
profile = { path = "../profile", version = "0.0.0" }
hir-expand = { path = "../hir-expand", version = "0.0.0" }
+rustc_abi = { version = "0.0.20221125", package = "hkalbasi-rustc-ap-rustc_abi", default-features = false }
+rustc_index = { version = "0.0.20221125", package = "hkalbasi-rustc-ap-rustc_index", default-features = false }
mbe = { path = "../mbe", version = "0.0.0" }
cfg = { path = "../cfg", version = "0.0.0" }
tt = { path = "../tt", version = "0.0.0" }
diff --git a/crates/hir-def/src/adt.rs b/crates/hir-def/src/adt.rs
index 62efc40986..feed432148 100644
--- a/crates/hir-def/src/adt.rs
+++ b/crates/hir-def/src/adt.rs
@@ -9,6 +9,7 @@ use hir_expand::{
HirFileId, InFile,
};
use la_arena::{Arena, ArenaMap};
+use rustc_abi::{Integer, IntegerType};
use syntax::ast::{self, HasName, HasVisibility};
use tt::{Delimiter, DelimiterKind, Leaf, Subtree, TokenTree};
@@ -127,7 +128,24 @@ fn parse_repr_tt(tt: &Subtree) -> Option<ReprOptions> {
.map(Either::Left)
.or_else(|| BuiltinUint::from_suffix(repr).map(Either::Right))
{
- int = Some(builtin);
+ int = Some(match builtin {
+ Either::Left(bi) => match bi {
+ BuiltinInt::Isize => IntegerType::Pointer(true),
+ BuiltinInt::I8 => IntegerType::Fixed(Integer::I8, true),
+ BuiltinInt::I16 => IntegerType::Fixed(Integer::I16, true),
+ BuiltinInt::I32 => IntegerType::Fixed(Integer::I32, true),
+ BuiltinInt::I64 => IntegerType::Fixed(Integer::I64, true),
+ BuiltinInt::I128 => IntegerType::Fixed(Integer::I128, true),
+ },
+ Either::Right(bu) => match bu {
+ BuiltinUint::Usize => IntegerType::Pointer(false),
+ BuiltinUint::U8 => IntegerType::Fixed(Integer::I8, false),
+ BuiltinUint::U16 => IntegerType::Fixed(Integer::I16, false),
+ BuiltinUint::U32 => IntegerType::Fixed(Integer::I32, false),
+ BuiltinUint::U64 => IntegerType::Fixed(Integer::I64, false),
+ BuiltinUint::U128 => IntegerType::Fixed(Integer::I128, false),
+ },
+ });
}
ReprFlags::empty()
}
@@ -135,7 +153,7 @@ fn parse_repr_tt(tt: &Subtree) -> Option<ReprOptions> {
}
}
- Some(ReprOptions { int, align: max_align, pack: min_pack, flags })
+ Some(ReprOptions { int, align: max_align, pack: min_pack, flags, field_shuffle_seed: 0 })
}
impl StructData {
@@ -276,10 +294,10 @@ impl EnumData {
Some(id)
}
- pub fn variant_body_type(&self) -> Either<BuiltinInt, BuiltinUint> {
+ pub fn variant_body_type(&self) -> IntegerType {
match self.repr {
Some(ReprOptions { int: Some(builtin), .. }) => builtin,
- _ => Either::Left(BuiltinInt::Isize),
+ _ => IntegerType::Pointer(true),
}
}
}
diff --git a/crates/hir-def/src/layout.rs b/crates/hir-def/src/layout.rs
index cc8177376f..a427c464bc 100644
--- a/crates/hir-def/src/layout.rs
+++ b/crates/hir-def/src/layout.rs
@@ -1,419 +1,48 @@
-//! Definitions related to binary representations of types
+//! Definitions needed for computing data layout of types.
-use bitflags::bitflags;
-use either::Either;
-use std::{
- cmp, fmt,
- num::NonZeroUsize,
- ops::{Add, AddAssign, Mul, Sub},
-};
+use std::cmp;
-use crate::{
- builtin_type::{BuiltinInt, BuiltinUint},
- LocalEnumVariantId,
+use la_arena::{Idx, RawIdx};
+pub use rustc_abi::{
+ Abi, AbiAndPrefAlign, AddressSpace, Align, Endian, FieldsShape, Integer, IntegerType,
+ LayoutCalculator, Niche, Primitive, ReprFlags, ReprOptions, Scalar, Size, StructKind,
+ TargetDataLayout, WrappingRange,
};
-use la_arena::ArenaMap;
-
-/// Size of a type in bytes.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct Size {
- raw: u64,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Size {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "Size({} bytes)", self.raw)
- }
-}
-
-// Panicking addition, subtraction and multiplication for convenience.
-// Avoid during layout computation, return `LayoutError` instead.
-
-impl Add for Size {
- type Output = Size;
- #[inline]
- fn add(self, other: Size) -> Size {
- Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
- panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
- }))
- }
-}
-
-impl Sub for Size {
- type Output = Size;
- #[inline]
- fn sub(self, other: Size) -> Size {
- Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
- panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
- }))
- }
-}
-
-impl Mul<Size> for u64 {
- type Output = Size;
- #[inline]
- fn mul(self, size: Size) -> Size {
- size * self
- }
-}
-
-impl Mul<u64> for Size {
- type Output = Size;
- #[inline]
- fn mul(self, count: u64) -> Size {
- match self.bytes().checked_mul(count) {
- Some(bytes) => Size::from_bytes(bytes),
- None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
- }
- }
-}
-
-impl AddAssign for Size {
- #[inline]
- fn add_assign(&mut self, other: Size) {
- *self = *self + other;
- }
-}
-
-impl Size {
- pub const ZERO: Size = Size { raw: 0 };
-
- /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
- /// not a multiple of 8.
- pub fn from_bits(bits: impl TryInto<u64>) -> Size {
- let bits = bits.try_into().ok().unwrap();
- // Avoid potential overflow from `bits + 7`.
- Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
- }
-
- #[inline]
- pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
- let bytes: u64 = bytes.try_into().ok().unwrap();
- Size { raw: bytes }
- }
-
- #[inline]
- pub fn bytes(self) -> u64 {
- self.raw
- }
-
- #[inline]
- pub fn bytes_usize(self) -> usize {
- self.bytes().try_into().unwrap()
- }
-
- #[inline]
- pub fn bits(self) -> u64 {
- #[cold]
- fn overflow(bytes: u64) -> ! {
- panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
- }
-
- self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
- }
-
- #[inline]
- pub fn bits_usize(self) -> usize {
- self.bits().try_into().unwrap()
- }
-
- #[inline]
- pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option<Size> {
- let bytes = self.bytes().checked_add(offset.bytes())?;
- if bytes < dl.obj_size_bound() {
- Some(Size::from_bytes(bytes))
- } else {
- None
- }
- }
-
- #[inline]
- pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option<Size> {
- let bytes = self.bytes().checked_mul(count)?;
- if bytes < dl.obj_size_bound() {
- Some(Size::from_bytes(bytes))
- } else {
- None
- }
- }
-
- #[inline]
- pub fn align_to(self, align: Align) -> Size {
- let mask = align.bytes() - 1;
- Size::from_bytes((self.bytes() + mask) & !mask)
- }
-
- #[inline]
- pub fn is_aligned(self, align: Align) -> bool {
- let mask = align.bytes() - 1;
- self.bytes() & mask == 0
- }
-
- /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
- /// (i.e., if it is negative, fill with 1's on the left).
- #[inline]
- pub fn sign_extend(self, value: u128) -> u128 {
- let size = self.bits();
- if size == 0 {
- // Truncated until nothing is left.
- return 0;
- }
- // Sign-extend it.
- let shift = 128 - size;
- // Shift the unsigned value to the left, then shift back to the right as signed
- // (essentially fills with sign bit on the left).
- (((value << shift) as i128) >> shift) as u128
- }
-
- /// Truncates `value` to `self` bits.
- #[inline]
- pub fn truncate(self, value: u128) -> u128 {
- let size = self.bits();
- if size == 0 {
- // Truncated until nothing is left.
- return 0;
- }
- let shift = 128 - size;
- // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
- (value << shift) >> shift
- }
+use crate::LocalEnumVariantId;
- #[inline]
- pub fn signed_int_min(&self) -> i128 {
- self.sign_extend(1_u128 << (self.bits() - 1)) as i128
- }
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct RustcEnumVariantIdx(pub LocalEnumVariantId);
- #[inline]
- pub fn signed_int_max(&self) -> i128 {
- i128::MAX >> (128 - self.bits())
+impl rustc_index::vec::Idx for RustcEnumVariantIdx {
+ fn new(idx: usize) -> Self {
+ RustcEnumVariantIdx(Idx::from_raw(RawIdx::from(idx as u32)))
}
- #[inline]
- pub fn unsigned_int_max(&self) -> u128 {
- u128::MAX >> (128 - self.bits())
+ fn index(self) -> usize {
+ u32::from(self.0.into_raw()) as usize
}
}
-#[derive(Copy, Clone, Debug)]
-pub enum StructKind {
- /// A tuple, closure, or univariant which cannot be coerced to unsized.
- AlwaysSized,
- /// A univariant, the last field of which may be coerced to unsized.
- MaybeUnsized,
- /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
- Prefixed(Size, Align),
-}
-
-/// Describes how the fields of a type are located in memory.
-#[derive(PartialEq, Eq, Hash, Debug, Clone)]
-pub enum FieldsShape {
- /// Scalar primitives and `!`, which never have fields.
- Primitive,
-
- /// All fields start at no offset. The `usize` is the field count.
- Union(NonZeroUsize),
-
- /// Array/vector-like placement, with all fields of identical types.
- Array { stride: Size, count: u64 },
-
- /// Struct-like placement, with precomputed offsets.
- ///
- /// Fields are guaranteed to not overlap, but note that gaps
- /// before, between and after all the fields are NOT always
- /// padding, and as such their contents may not be discarded.
- /// For example, enum variants leave a gap at the start,
- /// where the discriminant field in the enum layout goes.
- Arbitrary {
- /// Offsets for the first byte of each field,
- /// ordered to match the source definition order.
- /// This vector does not go in increasing order.
- // FIXME(eddyb) use small vector optimization for the common case.
- offsets: Vec<Size>,
-
- /// Maps source order field indices to memory order indices,
- /// depending on how the fields were reordered (if at all).
- /// This is a permutation, with both the source order and the
- /// memory order using the same (0..n) index ranges.
- ///
- /// Note that during computation of `memory_index`, sometimes
- /// it is easier to operate on the inverse mapping (that is,
- /// from memory order to source order), and that is usually
- /// named `inverse_memory_index`.
- ///
- // FIXME(eddyb) build a better abstraction for permutations, if possible.
- // FIXME(camlorn) also consider small vector optimization here.
- memory_index: Vec<u32>,
- },
-}
-
-impl FieldsShape {
- #[inline]
- pub fn count(&self) -> usize {
- match *self {
- FieldsShape::Primitive => 0,
- FieldsShape::Union(count) => count.get(),
- FieldsShape::Array { count, .. } => count.try_into().unwrap(),
- FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
- }
- }
-
- #[inline]
- pub fn offset(&self, i: usize, dl: &TargetDataLayout) -> Size {
- match *self {
- FieldsShape::Primitive => {
- unreachable!("FieldsShape::offset: `Primitive`s have no fields")
- }
- FieldsShape::Union(count) => {
- assert!(
- i < count.get(),
- "tried to access field {} of union with {} fields",
- i,
- count
- );
- Size::ZERO
- }
- FieldsShape::Array { stride, count } => {
- let i = u64::try_from(i).unwrap();
- assert!(i < count);
- stride.checked_mul(i, dl).unwrap()
- }
- FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
- }
- }
-
- #[inline]
- pub fn memory_index(&self, i: usize) -> usize {
- match *self {
- FieldsShape::Primitive => {
- unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
- }
- FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
- FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
- }
- }
-
- /// Gets source indices of the fields by increasing offsets.
- #[inline]
- pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
- let mut inverse_small = [0u8; 64];
- let mut inverse_big = vec![];
- let use_small = self.count() <= inverse_small.len();
+pub type Layout = rustc_abi::LayoutS<RustcEnumVariantIdx>;
+pub type TagEncoding = rustc_abi::TagEncoding<RustcEnumVariantIdx>;
+pub type Variants = rustc_abi::Variants<RustcEnumVariantIdx>;
- // We have to write this logic twice in order to keep the array small.
- if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
- if use_small {
- for i in 0..self.count() {
- inverse_small[memory_index[i] as usize] = i as u8;
- }
- } else {
- inverse_big = vec![0; self.count()];
- for i in 0..self.count() {
- inverse_big[memory_index[i] as usize] = i as u32;
- }
- }
- }
-
- (0..self.count()).map(move |i| match *self {
- FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
- FieldsShape::Arbitrary { .. } => {
- if use_small {
- inverse_small[i] as usize
- } else {
- inverse_big[i] as usize
- }
- }
- })
- }
-}
-
-/// Integers, also used for enum discriminants.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-pub enum Integer {
- I8,
- I16,
- I32,
- I64,
- I128,
+pub trait IntegerExt {
+ fn repr_discr(
+ dl: &TargetDataLayout,
+ repr: &ReprOptions,
+ min: i128,
+ max: i128,
+ ) -> Result<(Integer, bool), LayoutError>;
}
-impl Integer {
- #[inline]
- pub fn size(self) -> Size {
- match self {
- Integer::I8 => Size::from_bytes(1),
- Integer::I16 => Size::from_bytes(2),
- Integer::I32 => Size::from_bytes(4),
- Integer::I64 => Size::from_bytes(8),
- Integer::I128 => Size::from_bytes(16),
- }
- }
-
- pub fn align(self, dl: &TargetDataLayout) -> AbiAndPrefAlign {
- match self {
- Integer::I8 => dl.i8_align,
- Integer::I16 => dl.i16_align,
- Integer::I32 => dl.i32_align,
- Integer::I64 => dl.i64_align,
- Integer::I128 => dl.i128_align,
- }
- }
-
- /// Finds the smallest integer with the given alignment.
- pub fn for_align(dl: &TargetDataLayout, wanted: Align) -> Option<Integer> {
- use Integer::*;
- for candidate in [I8, I16, I32, I64, I128] {
- if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
- return Some(candidate);
- }
- }
- None
- }
-
- /// Finds the smallest Integer type which can represent the signed value.
- #[inline]
- pub fn fit_signed(x: i128) -> Integer {
- match x {
- -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => Integer::I8,
- -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => Integer::I16,
- -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => Integer::I32,
- -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => Integer::I64,
- _ => Integer::I128,
- }
- }
-
- /// Finds the smallest Integer type which can represent the unsigned value.
- #[inline]
- pub fn fit_unsigned(x: u128) -> Integer {
- match x {
- 0..=0x0000_0000_0000_00ff => Integer::I8,
- 0..=0x0000_0000_0000_ffff => Integer::I16,
- 0..=0x0000_0000_ffff_ffff => Integer::I32,
- 0..=0xffff_ffff_ffff_ffff => Integer::I64,
- _ => Integer::I128,
- }
- }
-
- /// Gets the Integer type from an attr::IntType.
- pub fn from_attr(dl: &TargetDataLayout, ity: Either<BuiltinInt, BuiltinUint>) -> Integer {
- match ity {
- Either::Left(BuiltinInt::I8) | Either::Right(BuiltinUint::U8) => Integer::I8,
- Either::Left(BuiltinInt::I16) | Either::Right(BuiltinUint::U16) => Integer::I16,
- Either::Left(BuiltinInt::I32) | Either::Right(BuiltinUint::U32) => Integer::I32,
- Either::Left(BuiltinInt::I64) | Either::Right(BuiltinUint::U64) => Integer::I64,
- Either::Left(BuiltinInt::I128) | Either::Right(BuiltinUint::U128) => Integer::I128,
- Either::Left(BuiltinInt::Isize) | Either::Right(BuiltinUint::Usize) => {
- dl.ptr_sized_integer()
- }
- }
- }
-
+impl IntegerExt for Integer {
/// Finds the appropriate Integer type and signedness for the given
/// signed discriminant range and `#[repr]` attribute.
/// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
/// that shouldn't affect anything, other than maybe debuginfo.
- pub fn repr_discr(
+ fn repr_discr(
dl: &TargetDataLayout,
repr: &ReprOptions,
min: i128,
@@ -428,7 +57,7 @@ impl Integer {
if let Some(ity) = repr.int {
let discr = Integer::from_attr(dl, ity);
- let fit = if ity.is_left() { signed_fit } else { unsigned_fit };
+ let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
if discr < fit {
return Err(LayoutError::UserError(
"Integer::repr_discr: `#[repr]` hint too small for \
@@ -436,7 +65,7 @@ impl Integer {
.to_string(),
));
}
- return Ok((discr, ity.is_left()));
+ return Ok((discr, ity.is_signed()));
}
let at_least = if repr.c() {
@@ -457,717 +86,11 @@ impl Integer {
}
}
-/// Endianness of the target, which must match cfg(target-endian).
-#[derive(Copy, Clone, PartialEq, Eq)]
-pub enum Endian {
- Little,
- Big,
-}
-
-impl Endian {
- pub fn as_str(&self) -> &'static str {
- match self {
- Self::Little => "little",
- Self::Big => "big",
- }
- }
-}
-
-impl fmt::Debug for Endian {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str(self.as_str())
- }
-}
-
-/// An identifier that specifies the address space that some operation
-/// should operate on. Special address spaces have an effect on code generation,
-/// depending on the target and the address spaces it implements.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub struct AddressSpace(pub u32);
-
-/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
-/// for a target, which contains everything needed to compute layouts.
-#[derive(Debug, PartialEq, Eq)]
-pub struct TargetDataLayout {
- pub endian: Endian,
- pub i1_align: AbiAndPrefAlign,
- pub i8_align: AbiAndPrefAlign,
- pub i16_align: AbiAndPrefAlign,
- pub i32_align: AbiAndPrefAlign,
- pub i64_align: AbiAndPrefAlign,
- pub i128_align: AbiAndPrefAlign,
- pub f32_align: AbiAndPrefAlign,
- pub f64_align: AbiAndPrefAlign,
- pub pointer_size: Size,
- pub pointer_align: AbiAndPrefAlign,
- pub aggregate_align: AbiAndPrefAlign,
-
- /// Alignments for vector types.
- pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
-
- pub instruction_address_space: AddressSpace,
-
- /// Minimum size of #[repr(C)] enums (default I32 bits)
- pub c_enum_min_size: Integer,
-}
-
-impl TargetDataLayout {
- /// Returns exclusive upper bound on object size.
- ///
- /// The theoretical maximum object size is defined as the maximum positive `isize` value.
- /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
- /// index every address within an object along with one byte past the end, along with allowing
- /// `isize` to store the difference between any two pointers into an object.
- ///
- /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
- /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
- /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
- /// address space on 64-bit ARMv8 and x86_64.
- #[inline]
- pub fn obj_size_bound(&self) -> u64 {
- match self.pointer_size.bits() {
- 16 => 1 << 15,
- 32 => 1 << 31,
- 64 => 1 << 47,
- bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
- }
- }
-
- #[inline]
- pub fn ptr_sized_integer(&self) -> Integer {
- match self.pointer_size.bits() {
- 16 => Integer::I16,
- 32 => Integer::I32,
- 64 => Integer::I64,
- bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
- }
- }
-}
-
-/// Fundamental unit of memory access and layout.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub enum Primitive {
- /// The `bool` is the signedness of the `Integer` type.
- ///
- /// One would think we would not care about such details this low down,
- /// but some ABIs are described in terms of C types and ISAs where the
- /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
- /// a negative integer passed by zero-extension will appear positive in
- /// the callee, and most operations on it will produce the wrong values.
- Int(Integer, bool),
- F32,
- F64,
- Pointer,
-}
-
-impl Primitive {
- pub fn size(self, dl: &TargetDataLayout) -> Size {
- match self {
- Primitive::Int(i, _) => i.size(),
- Primitive::F32 => Size::from_bits(32),
- Primitive::F64 => Size::from_bits(64),
- Primitive::Pointer => dl.pointer_size,
- }
- }
-
- pub fn align(self, dl: &TargetDataLayout) -> AbiAndPrefAlign {
- match self {
- Primitive::Int(i, _) => i.align(dl),
- Primitive::F32 => dl.f32_align,
- Primitive::F64 => dl.f64_align,
- Primitive::Pointer => dl.pointer_align,
- }
- }
-}
-
-/// Inclusive wrap-around range of valid values, that is, if
-/// start > end, it represents `start..=MAX`,
-/// followed by `0..=end`.
-///
-/// That is, for an i8 primitive, a range of `254..=2` means following
-/// sequence:
-///
-/// 254 (-2), 255 (-1), 0, 1, 2
-///
-/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub struct WrappingRange {
- pub start: u128,
- pub end: u128,
-}
-
-impl WrappingRange {
- pub fn full(size: Size) -> Self {
- Self { start: 0, end: size.unsigned_int_max() }
- }
-
- /// Returns `true` if `v` is contained in the range.
- #[inline(always)]
- pub fn contains(&self, v: u128) -> bool {
- if self.start <= self.end {
- self.start <= v && v <= self.end
- } else {
- self.start <= v || v <= self.end
- }
- }
-
- /// Returns `self` with replaced `start`
- #[inline(always)]
- pub fn with_start(mut self, start: u128) -> Self {
- self.start = start;
- self
- }
-
- /// Returns `self` with replaced `end`
- #[inline(always)]
- pub fn with_end(mut self, end: u128) -> Self {
- self.end = end;
- self
- }
-
- /// Returns `true` if `size` completely fills the range.
- #[inline]
- pub fn is_full_for(&self, size: Size) -> bool {
- let max_value = size.unsigned_int_max();
- debug_assert!(self.start <= max_value && self.end <= max_value);
- self.start == (self.end.wrapping_add(1) & max_value)
- }
-}
-
-impl fmt::Debug for WrappingRange {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- if self.start > self.end {
- write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
- } else {
- write!(fmt, "{}..={}", self.start, self.end)?;
- }
- Ok(())
- }
-}
-
-/// Information about one scalar component of a Rust type.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub enum Scalar {
- Initialized {
- value: Primitive,
-
- // FIXME(eddyb) always use the shortest range, e.g., by finding
- // the largest space between two consecutive valid values and
- // taking everything else as the (shortest) valid range.
- valid_range: WrappingRange,
- },
- Union {
- /// Even for unions, we need to use the correct registers for the kind of
- /// values inside the union, so we keep the `Primitive` type around. We
- /// also use it to compute the size of the scalar.
- /// However, unions never have niches and even allow undef,
- /// so there is no `valid_range`.
- value: Primitive,
- },
-}
-
-impl Scalar {
- #[inline]
- pub fn is_bool(&self) -> bool {
- matches!(
- self,
- Scalar::Initialized {
- value: Primitive::Int(Integer::I8, false),
- valid_range: WrappingRange { start: 0, end: 1 }
- }
- )
- }
-
- /// Get the primitive representation of this type, ignoring the valid range and whether the
- /// value is allowed to be undefined (due to being a union).
- pub fn primitive(&self) -> Primitive {
- match *self {
- Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
- }
- }
-
- pub fn align(self, cx: &TargetDataLayout) -> AbiAndPrefAlign {
- self.primitive().align(cx)
- }
-
- pub fn size(self, cx: &TargetDataLayout) -> Size {
- self.primitive().size(cx)
- }
-
- #[inline]
- pub fn to_union(&self) -> Self {
- Self::Union { value: self.primitive() }
- }
-
- #[inline]
- pub fn valid_range(&self, cx: &TargetDataLayout) -> WrappingRange {
- match *self {
- Scalar::Initialized { valid_range, .. } => valid_range,
- Scalar::Union { value } => WrappingRange::full(value.size(cx)),
- }
- }
-
- #[inline]
- /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
- pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
- match self {
- Scalar::Initialized { valid_range, .. } => valid_range,
- Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
- }
- }
-
- /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
- #[inline]
- pub fn is_always_valid(&self, cx: &TargetDataLayout) -> bool {
- match *self {
- Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
- Scalar::Union { .. } => true,
- }
- }
-
- /// Returns `true` if this type can be left uninit.
- #[inline]
- pub fn is_uninit_valid(&self) -> bool {
- match *self {
- Scalar::Initialized { .. } => false,
- Scalar::Union { .. } => true,
- }
- }
-}
-
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub enum Abi {
- Uninhabited,
- Scalar(Scalar),
- ScalarPair(Scalar, Scalar),
- Vector {
- element: Scalar,
- count: u64,
- },
- Aggregate {
- /// If true, the size is exact, otherwise it's only a lower bound.
- sized: bool,
- },
-}
-
-impl Abi {
- /// Returns `true` if the layout corresponds to an unsized type.
- #[inline]
- pub fn is_unsized(&self) -> bool {
- match *self {
- Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
- Abi::Aggregate { sized } => !sized,
- }
- }
-
- /// Returns `true` if this is an uninhabited type
- #[inline]
- pub fn is_uninhabited(&self) -> bool {
- matches!(*self, Abi::Uninhabited)
- }
-
- /// Returns `true` is this is a scalar type
- #[inline]
- pub fn is_scalar(&self) -> bool {
- matches!(*self, Abi::Scalar(_))
- }
-}
-
-/// Alignment of a type in bytes (always a power of two).
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct Align {
- pow2: u8,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Align {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "Align({} bytes)", self.bytes())
- }
-}
-
-impl Align {
- pub const ONE: Align = Align { pow2: 0 };
- pub const MAX: Align = Align { pow2: 29 };
-
- #[inline]
- pub fn from_bytes(align: u64) -> Result<Align, String> {
- // Treat an alignment of 0 bytes like 1-byte alignment.
- if align == 0 {
- return Ok(Align::ONE);
- }
-
- #[cold]
- fn not_power_of_2(align: u64) -> String {
- format!("`{}` is not a power of 2", align)
- }
-
- #[cold]
- fn too_large(align: u64) -> String {
- format!("`{}` is too large", align)
- }
-
- let mut bytes = align;
- let mut pow2: u8 = 0;
- while (bytes & 1) == 0 {
- pow2 += 1;
- bytes >>= 1;
- }
- if bytes != 1 {
- return Err(not_power_of_2(align));
- }
- if pow2 > Self::MAX.pow2 {
- return Err(too_large(align));
- }
-
- Ok(Align { pow2 })
- }
-
- #[inline]
- pub fn bytes(self) -> u64 {
- 1 << self.pow2
- }
-
- #[inline]
- pub fn bits(self) -> u64 {
- self.bytes() * 8
- }
-
- /// Computes the best alignment possible for the given offset
- /// (the largest power of two that the offset is a multiple of).
- ///
- /// N.B., for an offset of `0`, this happens to return `2^64`.
- #[inline]
- pub fn max_for_offset(offset: Size) -> Align {
- Align { pow2: offset.bytes().trailing_zeros() as u8 }
- }
-
- /// Lower the alignment, if necessary, such that the given offset
- /// is aligned to it (the offset is a multiple of the alignment).
- #[inline]
- pub fn restrict_for_offset(self, offset: Size) -> Align {
- self.min(Align::max_for_offset(offset))
- }
-}
-
-/// A pair of alignments, ABI-mandated and preferred.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub struct AbiAndPrefAlign {
- pub abi: Align,
- pub pref: Align,
-}
-
-impl AbiAndPrefAlign {
- #[inline]
- pub fn new(align: Align) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: align, pref: align }
- }
-
- #[inline]
- pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
- }
-
- #[inline]
- pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
- AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
- }
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub struct Niche {
- pub offset: Size,
- pub value: Primitive,
- pub valid_range: WrappingRange,
-}
-
-impl Niche {
- pub fn from_scalar(cx: &TargetDataLayout, offset: Size, scalar: Scalar) -> Option<Self> {
- let (value, valid_range) = match scalar {
- Scalar::Initialized { value, valid_range } => (value, valid_range),
- _ => return None,
- };
- let niche = Niche { offset, value, valid_range };
- if niche.available(cx) > 0 {
- Some(niche)
- } else {
- None
- }
- }
-
- pub fn available(&self, cx: &TargetDataLayout) -> u128 {
- let Self { value, valid_range: v, .. } = *self;
- let size = value.size(cx);
- assert!(size.bits() <= 128);
- let max_value = size.unsigned_int_max();
-
- // Find out how many values are outside the valid range.
- let niche = v.end.wrapping_add(1)..v.start;
- niche.end.wrapping_sub(niche.start) & max_value
- }
-
- pub fn reserve(&self, cx: &TargetDataLayout, count: u128) -> Option<(u128, Scalar)> {
- assert!(count > 0);
-
- let Self { value, valid_range: v, .. } = *self;
- let size = value.size(cx);
- assert!(size.bits() <= 128);
- let max_value = size.unsigned_int_max();
-
- let niche = v.end.wrapping_add(1)..v.start;
- let available = niche.end.wrapping_sub(niche.start) & max_value;
- if count > available {
- return None;
- }
-
- // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
- // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
- // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
- // Having `None` in niche zero can enable some special optimizations.
- //
- // Bound selection criteria:
- // 1. Select closest to zero given wrapping semantics.
- // 2. Avoid moving past zero if possible.
- //
- // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
- // If niche zero is already reserved, the selection of bounds are of little interest.
- let move_start = |v: WrappingRange| {
- let start = v.start.wrapping_sub(count) & max_value;
- Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
- };
- let move_end = |v: WrappingRange| {
- let start = v.end.wrapping_add(1) & max_value;
- let end = v.end.wrapping_add(count) & max_value;
- Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
- };
- let distance_end_zero = max_value - v.end;
- if v.start > v.end {
- // zero is unavailable because wrapping occurs
- move_end(v)
- } else if v.start <= distance_end_zero {
- if count <= v.start {
- move_start(v)
- } else {
- // moved past zero, use other bound
- move_end(v)
- }
- } else {
- let end = v.end.wrapping_add(count) & max_value;
- let overshot_zero = (1..=v.end).contains(&end);
- if overshot_zero {
- // moved past zero, use other bound
- move_start(v)
- } else {
- move_end(v)
- }
- }
- }
-}
-
-#[derive(PartialEq, Eq, Hash, Debug, Clone)]
-pub enum TagEncoding {
- /// The tag directly stores the discriminant, but possibly with a smaller layout
- /// (so converting the tag to the discriminant can require sign extension).
- Direct,
-
- /// Niche (values invalid for a type) encoding the discriminant:
- /// Discriminant and variant index coincide.
- /// The variant `untagged_variant` contains a niche at an arbitrary
- /// offset (field `tag_field` of the enum), which for a variant with
- /// discriminant `d` is set to
- /// `(d - niche_variants.start).wrapping_add(niche_start)`.
- ///
- /// For example, `Option<(usize, &T)>` is represented such that
- /// `None` has a null pointer for the second tuple field, and
- /// `Some` is the identity function (with a non-null reference).
- Niche { untagged_variant: LocalEnumVariantId, niche_start: u128 },
-}
-
-#[derive(PartialEq, Eq, Hash, Debug, Clone)]
-pub enum Variants {
- /// Single enum variants, structs/tuples, unions, and all non-ADTs.
- Single,
-
- /// Enum-likes with more than one inhabited variant: each variant comes with
- /// a *discriminant* (usually the same as the variant index but the user can
- /// assign explicit discriminant values). That discriminant is encoded
- /// as a *tag* on the machine. The layout of each variant is
- /// a struct, and they all have space reserved for the tag.
- /// For enums, the tag is the sole field of the layout.
- Multiple {
- tag: Scalar,
- tag_encoding: TagEncoding,
- tag_field: usize,
- variants: ArenaMap<LocalEnumVariantId, Layout>,
- },
-}
-
-bitflags! {
- #[derive(Default)]
- pub struct ReprFlags: u8 {
- const IS_C = 1 << 0;
- const IS_SIMD = 1 << 1;
- const IS_TRANSPARENT = 1 << 2;
- // Internal only for now. If true, don't reorder fields.
- const IS_LINEAR = 1 << 3;
- // Any of these flags being set prevent field reordering optimisation.
- const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
- | ReprFlags::IS_SIMD.bits
- | ReprFlags::IS_LINEAR.bits;
- }
-}
-
-/// Represents the repr options provided by the user,
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
-pub struct ReprOptions {
- pub int: Option<Either<BuiltinInt, BuiltinUint>>,
- pub align: Option<Align>,
- pub pack: Option<Align>,
- pub flags: ReprFlags,
-}
-
-impl ReprOptions {
- #[inline]
- pub fn simd(&self) -> bool {
- self.flags.contains(ReprFlags::IS_SIMD)
- }
-
- #[inline]
- pub fn c(&self) -> bool {
- self.flags.contains(ReprFlags::IS_C)
- }
-
- #[inline]
- pub fn packed(&self) -> bool {
- self.pack.is_some()
- }
-
- #[inline]
- pub fn transparent(&self) -> bool {
- self.flags.contains(ReprFlags::IS_TRANSPARENT)
- }
-
- #[inline]
- pub fn linear(&self) -> bool {
- self.flags.contains(ReprFlags::IS_LINEAR)
- }
-
- /// Returns the discriminant type, given these `repr` options.
- /// This must only be called on enums!
- pub fn discr_type(&self) -> Either<BuiltinInt, BuiltinUint> {
- self.int.unwrap_or(Either::Left(BuiltinInt::Isize))
- }
-
- /// Returns `true` if this `#[repr()]` should inhabit "smart enum
- /// layout" optimizations, such as representing `Foo<&T>` as a
- /// single pointer.
- pub fn inhibit_enum_layout_opt(&self) -> bool {
- self.c() || self.int.is_some()
- }
-
- /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
- /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
- pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
- if let Some(pack) = self.pack {
- if pack.bytes() == 1 {
- return true;
- }
- }
-
- self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
- }
-
- /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
- pub fn inhibit_union_abi_opt(&self) -> bool {
- self.c()
- }
-}
-
-#[derive(PartialEq, Eq, Hash, Clone)]
-pub struct Layout {
- /// Says where the fields are located within the layout.
- pub fields: FieldsShape,
-
- /// Encodes information about multi-variant layouts.
- /// Even with `Multiple` variants, a layout still has its own fields! Those are then
- /// shared between all variants. One of them will be the discriminant,
- /// but e.g. generators can have more.
- ///
- /// To access all fields of this layout, both `fields` and the fields of the active variant
- /// must be taken into account.
- pub variants: Variants,
-
- /// The `abi` defines how this data is passed between functions, and it defines
- /// value restrictions via `valid_range`.
- ///
- /// Note that this is entirely orthogonal to the recursive structure defined by
- /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
- /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
- /// have to be taken into account to find all fields of this layout.
- pub abi: Abi,
-
- /// The leaf scalar with the largest number of invalid values
- /// (i.e. outside of its `valid_range`), if it exists.
- pub largest_niche: Option<Niche>,
-
- pub align: AbiAndPrefAlign,
- pub size: Size,
-}
-
-impl Layout {
- pub fn scalar(dl: &TargetDataLayout, scalar: Scalar) -> Self {
- let largest_niche = Niche::from_scalar(dl, Size::ZERO, scalar);
- let size = scalar.size(dl);
- let align = scalar.align(dl);
- Layout {
- variants: Variants::Single,
- fields: FieldsShape::Primitive,
- abi: Abi::Scalar(scalar),
- largest_niche,
- size,
- align,
- }
- }
-}
-
-impl fmt::Debug for Layout {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // This is how `Layout` used to print before it become
- // `Interned<LayoutS>`. We print it like this to avoid having to update
- // expected output in a lot of tests.
- let Layout { size, align, abi, fields, largest_niche, variants } = self;
- f.debug_struct("Layout")
- .field("size", size)
- .field("align", align)
- .field("abi", abi)
- .field("fields", fields)
- .field("largest_niche", largest_niche)
- .field("variants", variants)
- .finish()
- }
-}
-
-impl Layout {
- pub fn is_unsized(&self) -> bool {
- self.abi.is_unsized()
- }
-
- /// Returns `true` if the type is a ZST and not unsized.
- pub fn is_zst(&self) -> bool {
- match self.abi {
- Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
- Abi::Uninhabited => self.size.bytes() == 0,
- Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
- }
- }
-}
-
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum LayoutError {
UserError(String),
SizeOverflow,
HasPlaceholder,
NotImplemented,
+ Unknown,
}
diff --git a/crates/hir-ty/Cargo.toml b/crates/hir-ty/Cargo.toml
index 802face852..87a206e30f 100644
--- a/crates/hir-ty/Cargo.toml
+++ b/crates/hir-ty/Cargo.toml
@@ -25,6 +25,7 @@ chalk-derive = "0.87.0"
la-arena = { version = "0.3.0", path = "../../lib/la-arena" }
once_cell = "1.15.0"
typed-arena = "2.0.1"
+rustc_index = { version = "0.0.20221125", package = "hkalbasi-rustc-ap-rustc_index", default-features = false }
stdx = { path = "../stdx", version = "0.0.0" }
hir-def = { path = "../hir-def", version = "0.0.0" }
diff --git a/crates/hir-ty/src/infer.rs b/crates/hir-ty/src/infer.rs
index 112eb5bd84..874a54fc3e 100644
--- a/crates/hir-ty/src/infer.rs
+++ b/crates/hir-ty/src/infer.rs
@@ -19,10 +19,11 @@ use std::sync::Arc;
use chalk_ir::{cast::Cast, ConstValue, DebruijnIndex, Mutability, Safety, Scalar, TypeFlags};
use hir_def::{
body::Body,
- builtin_type::BuiltinType,
+ builtin_type::{BuiltinInt, BuiltinType, BuiltinUint},
data::{ConstData, StaticData},
expr::{BindingAnnotation, ExprId, PatId},
lang_item::LangItemTarget,
+ layout::Integer,
path::{path, Path},
resolver::{HasResolver, ResolveValueResult, Resolver, TypeNs, ValueNs},
type_ref::TypeRef,
@@ -70,8 +71,26 @@ pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<Infer
DefWithBodyId::StaticId(s) => ctx.collect_static(&db.static_data(s)),
DefWithBodyId::VariantId(v) => {
ctx.return_ty = TyBuilder::builtin(match db.enum_data(v.parent).variant_body_type() {
- Either::Left(builtin) => BuiltinType::Int(builtin),
- Either::Right(builtin) => BuiltinType::Uint(builtin),
+ hir_def::layout::IntegerType::Pointer(signed) => match signed {
+ true => BuiltinType::Int(BuiltinInt::Isize),
+ false => BuiltinType::Uint(BuiltinUint::Usize),
+ },
+ hir_def::layout::IntegerType::Fixed(size, signed) => match signed {
+ true => BuiltinType::Int(match size {
+ Integer::I8 => BuiltinInt::I8,
+ Integer::I16 => BuiltinInt::I16,
+ Integer::I32 => BuiltinInt::I32,
+ Integer::I64 => BuiltinInt::I64,
+ Integer::I128 => BuiltinInt::I128,
+ }),
+ false => BuiltinType::Uint(match size {
+ Integer::I8 => BuiltinUint::U8,
+ Integer::I16 => BuiltinUint::U16,
+ Integer::I32 => BuiltinUint::U32,
+ Integer::I64 => BuiltinUint::U64,
+ Integer::I128 => BuiltinUint::U128,
+ }),
+ },
});
}
}
diff --git a/crates/hir-ty/src/layout.rs b/crates/hir-ty/src/layout.rs
index ca39fde118..3c6489fa97 100644
--- a/crates/hir-ty/src/layout.rs
+++ b/crates/hir-ty/src/layout.rs
@@ -1,12 +1,15 @@
//! Compute the binary representation of a type
+use std::sync::Arc;
+
use chalk_ir::{AdtId, TyKind};
pub(self) use hir_def::layout::*;
use hir_def::LocalFieldId;
+use stdx::never;
use crate::{db::HirDatabase, Interner, Substitution, Ty};
-use self::adt::univariant;
+use self::adt::struct_variant_idx;
pub use self::{
adt::{layout_of_adt_query, layout_of_adt_recover},
target::current_target_data_layout_query,
@@ -21,6 +24,22 @@ macro_rules! user_error {
mod adt;
mod target;
+struct LayoutCx<'a> {
+ db: &'a dyn HirDatabase,
+}
+
+impl LayoutCalculator for LayoutCx<'_> {
+ type TargetDataLayoutRef = Arc<TargetDataLayout>;
+
+ fn delay_bug(&self, txt: &str) {
+ never!("{}", txt);
+ }
+
+ fn current_data_layout(&self) -> Arc<TargetDataLayout> {
+ self.db.current_target_data_layout()
+ }
+}
+
fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar {
Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) }
}
@@ -29,34 +48,9 @@ fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout {
Layout::scalar(dl, scalar_unit(dl, value))
}
-fn scalar_pair(dl: &TargetDataLayout, a: Scalar, b: Scalar) -> Layout {
- let b_align = b.align(dl);
- let align = a.align(dl).max(b_align).max(dl.aggregate_align);
- let b_offset = a.size(dl).align_to(b_align.abi);
- let size = b_offset.checked_add(b.size(dl), dl).unwrap().align_to(align.abi);
-
- // HACK(nox): We iter on `b` and then `a` because `max_by_key`
- // returns the last maximum.
- let largest_niche = Niche::from_scalar(dl, b_offset, b)
- .into_iter()
- .chain(Niche::from_scalar(dl, Size::ZERO, a))
- .max_by_key(|niche| niche.available(dl));
-
- Layout {
- variants: Variants::Single,
- fields: FieldsShape::Arbitrary {
- offsets: vec![Size::ZERO, b_offset],
- memory_index: vec![0, 1],
- },
- abi: Abi::ScalarPair(a, b),
- largest_niche,
- align,
- size,
- }
-}
-
pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError> {
let dl = &*db.current_target_data_layout();
+ let cx = LayoutCx { db };
Ok(match ty.kind(Interner) {
TyKind::Adt(AdtId(def), subst) => db.layout_of_adt(*def, subst.clone())?,
TyKind::Scalar(s) => match s {
@@ -113,14 +107,13 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError
TyKind::Tuple(len, tys) => {
let kind = if *len == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
- univariant(
- dl,
- &tys.iter(Interner)
- .map(|k| layout_of_ty(db, k.assert_ty_ref(Interner)))
- .collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- kind,
- )?
+ let fields = tys
+ .iter(Interner)
+ .map(|k| layout_of_ty(db, k.assert_ty_ref(Interner)))
+ .collect::<Result<Vec<_>, _>>()?;
+ let fields = fields.iter().collect::<Vec<_>>();
+ let fields = fields.iter().collect::<Vec<_>>();
+ cx.univariant(dl, &fields, &ReprOptions::default(), kind).ok_or(LayoutError::Unknown)?
}
TyKind::Array(element, count) => {
let count = match count.data(Interner).value {
@@ -146,7 +139,7 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError
let largest_niche = if count != 0 { element.largest_niche } else { None };
Layout {
- variants: Variants::Single,
+ variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Array { stride: element.size, count },
abi,
largest_niche,
@@ -157,7 +150,7 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError
TyKind::Slice(element) => {
let element = layout_of_ty(db, element)?;
Layout {
- variants: Variants::Single,
+ variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Array { stride: element.size, count: 0 },
abi: Abi::Aggregate { sized: false },
largest_niche: None,
@@ -194,13 +187,11 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError
};
// Effectively a (ptr, meta) tuple.
- scalar_pair(dl, data_ptr, metadata)
- }
- TyKind::FnDef(_, _) => {
- univariant(dl, &[], &ReprOptions::default(), StructKind::AlwaysSized)?
+ cx.scalar_pair(data_ptr, metadata)
}
+ TyKind::FnDef(_, _) => layout_of_unit(&cx, dl)?,
TyKind::Str => Layout {
- variants: Variants::Single,
+ variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
abi: Abi::Aggregate { sized: false },
largest_niche: None,
@@ -208,7 +199,7 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError
size: Size::ZERO,
},
TyKind::Never => Layout {
- variants: Variants::Single,
+ variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
largest_niche: None,
@@ -216,7 +207,7 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError
size: Size::ZERO,
},
TyKind::Dyn(_) | TyKind::Foreign(_) => {
- let mut unit = univariant(dl, &[], &ReprOptions::default(), StructKind::AlwaysSized)?;
+ let mut unit = layout_of_unit(&cx, dl)?;
match unit.abi {
Abi::Aggregate { ref mut sized } => *sized = false,
_ => user_error!("bug"),
@@ -241,6 +232,16 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result<Layout, LayoutError
})
}
+fn layout_of_unit(cx: &LayoutCx<'_>, dl: &TargetDataLayout) -> Result<Layout, LayoutError> {
+ cx.univariant::<RustcEnumVariantIdx, &&Layout>(
+ &dl,
+ &[],
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )
+ .ok_or(LayoutError::Unknown)
+}
+
fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty {
match pointee.kind(Interner) {
TyKind::Adt(AdtId(adt), subst) => match adt {
diff --git a/crates/hir-ty/src/layout/adt.rs b/crates/hir-ty/src/layout/adt.rs
index 9244353f3a..d9791a4b63 100644
--- a/crates/hir-ty/src/layout/adt.rs
+++ b/crates/hir-ty/src/layout/adt.rs
@@ -1,628 +1,110 @@
//! Compute the binary representation of structs, unions and enums
-use std::{
- cmp::{self, Ordering},
- iter,
- num::NonZeroUsize,
- ops::Bound,
-};
+use std::ops::Bound;
-use chalk_ir::TyKind;
use hir_def::{
adt::VariantData,
- layout::{
- Abi, AbiAndPrefAlign, Align, FieldsShape, Integer, Layout, LayoutError, Niche, Primitive,
- ReprOptions, Scalar, Size, StructKind, TagEncoding, TargetDataLayout, Variants,
- WrappingRange,
- },
- AdtId, EnumVariantId, LocalEnumVariantId, UnionId, VariantId,
+ layout::{Integer, IntegerExt, Layout, LayoutCalculator, LayoutError, RustcEnumVariantIdx},
+ AdtId, EnumVariantId, LocalEnumVariantId, VariantId,
};
-use la_arena::{ArenaMap, RawIdx};
+use la_arena::RawIdx;
+use rustc_index::vec::IndexVec;
-struct X(Option<NonZeroUsize>);
+use crate::{db::HirDatabase, lang_items::is_unsafe_cell, layout::field_ty, Substitution};
-use crate::{
- db::HirDatabase,
- lang_items::is_unsafe_cell,
- layout::{field_ty, scalar_unit},
- Interner, Substitution,
-};
+use super::{layout_of_ty, LayoutCx};
-use super::layout_of_ty;
+pub(crate) fn struct_variant_idx() -> RustcEnumVariantIdx {
+ RustcEnumVariantIdx(LocalEnumVariantId::from_raw(RawIdx::from(0)))
+}
pub fn layout_of_adt_query(
db: &dyn HirDatabase,
def: AdtId,
subst: Substitution,
) -> Result<Layout, LayoutError> {
+ let dl = db.current_target_data_layout();
+ let cx = LayoutCx { db };
let handle_variant = |def: VariantId, var: &VariantData| {
var.fields()
.iter()
.map(|(fd, _)| layout_of_ty(db, &field_ty(db, def, fd, &subst)))
.collect::<Result<Vec<_>, _>>()
};
- fn struct_variant_idx() -> LocalEnumVariantId {
- LocalEnumVariantId::from_raw(RawIdx::from(0))
- }
- let (variants, is_enum, repr) = match def {
+ let (variants, is_enum, is_union, repr) = match def {
AdtId::StructId(s) => {
let data = db.struct_data(s);
- let mut r = ArenaMap::new();
- r.insert(struct_variant_idx(), handle_variant(s.into(), &data.variant_data)?);
- (r, false, data.repr.unwrap_or_default())
+ let mut r = IndexVec::new();
+ r.push(handle_variant(s.into(), &data.variant_data)?);
+ (r, false, false, data.repr.unwrap_or_default())
+ }
+ AdtId::UnionId(id) => {
+ let data = db.union_data(id);
+ let mut r = IndexVec::new();
+ r.push(handle_variant(id.into(), &data.variant_data)?);
+ (r, false, true, data.repr.unwrap_or_default())
}
- AdtId::UnionId(id) => return layout_of_union(db, id, &subst),
AdtId::EnumId(e) => {
let data = db.enum_data(e);
let r = data
.variants
.iter()
.map(|(idx, v)| {
- Ok((
- idx,
- handle_variant(
- EnumVariantId { parent: e, local_id: idx }.into(),
- &v.variant_data,
- )?,
- ))
+ handle_variant(
+ EnumVariantId { parent: e, local_id: idx }.into(),
+ &v.variant_data,
+ )
})
- .collect::<Result<_, _>>()?;
- (r, true, data.repr.unwrap_or_default())
- }
- };
-
- // A variant is absent if it's uninhabited and only has ZST fields.
- // Present uninhabited variants only require space for their fields,
- // but *not* an encoding of the discriminant (e.g., a tag value).
- // See issue #49298 for more details on the need to leave space
- // for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &[Layout]| {
- let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
- let is_zst = fields.iter().all(|f| f.is_zst());
- uninhabited && is_zst
- };
- let (present_first, present_second) = {
- let mut present_variants =
- variants.iter().filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
- (present_variants.next(), present_variants.next())
- };
- let present_first = match present_first {
- Some(present_first) => present_first,
- // Uninhabited because it has no variants, or only absent ones.
- None if is_enum => return layout_of_ty(db, &TyKind::Never.intern(Interner)),
- // If it's a struct, still compute a layout so that we can still compute the
- // field offsets.
- None => struct_variant_idx(),
- };
-
- let is_univariant = !is_enum ||
- // Only one variant is present.
- (present_second.is_none() &&
- // Representation optimizations are allowed.
- !repr.inhibit_enum_layout_opt());
- let dl = &*db.current_target_data_layout();
-
- if is_univariant {
- // Struct, or univariant enum equivalent to a struct.
- // (Typechecking will reject discriminant-sizing attrs.)
-
- let v = present_first;
- let kind = if is_enum || variants[v].is_empty() {
- StructKind::AlwaysSized
- } else {
- let always_sized = !variants[v].last().unwrap().is_unsized();
- if !always_sized {
- StructKind::MaybeUnsized
- } else {
- StructKind::AlwaysSized
- }
- };
-
- let mut st = univariant(dl, &variants[v], &repr, kind)?;
- st.variants = Variants::Single;
-
- if is_unsafe_cell(def, db) {
- let hide_niches = |scalar: &mut _| match scalar {
- Scalar::Initialized { value, valid_range } => {
- *valid_range = WrappingRange::full(value.size(dl))
- }
- // Already doesn't have any niches
- Scalar::Union { .. } => {}
- };
- match &mut st.abi {
- Abi::Uninhabited => {}
- Abi::Scalar(scalar) => hide_niches(scalar),
- Abi::ScalarPair(a, b) => {
- hide_niches(a);
- hide_niches(b);
- }
- Abi::Vector { element, count: _ } => hide_niches(element),
- Abi::Aggregate { sized: _ } => {}
- }
- st.largest_niche = None;
- return Ok(st);
- }
-
- let (start, end) = layout_scalar_valid_range(db, def);
- match st.abi {
- Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
- if let Bound::Included(start) = start {
- let valid_range = scalar.valid_range_mut();
- valid_range.start = start;
- }
- if let Bound::Included(end) = end {
- let valid_range = scalar.valid_range_mut();
- valid_range.end = end;
- }
- // Update `largest_niche` if we have introduced a larger niche.
- let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
- if let Some(niche) = niche {
- match st.largest_niche {
- Some(largest_niche) => {
- // Replace the existing niche even if they're equal,
- // because this one is at a lower offset.
- if largest_niche.available(dl) <= niche.available(dl) {
- st.largest_niche = Some(niche);
- }
- }
- None => st.largest_niche = Some(niche),
- }
- }
- }
- _ => user_error!("nonscalar layout for layout_scalar_valid_range"),
- }
-
- return Ok(st);
- }
-
- // Until we've decided whether to use the tagged or
- // niche filling LayoutS, we don't want to intern the
- // variant layouts, so we can't store them in the
- // overall LayoutS. Store the overall LayoutS
- // and the variant LayoutSs here until then.
- struct TmpLayout {
- layout: Layout,
- variants: ArenaMap<LocalEnumVariantId, Layout>,
- }
-
- let calculate_niche_filling_layout = || -> Result<Option<TmpLayout>, LayoutError> {
- // The current code for niche-filling relies on variant indices
- // instead of actual discriminants, so enums with
- // explicit discriminants (RFC #2363) would misbehave.
- if repr.inhibit_enum_layout_opt()
- // FIXME: bring these codes back
- // || def
- // .variants()
- // .iter_enumerated()
- // .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
- {
- return Ok(None);
- }
-
- if variants.iter().count() < 2 {
- return Ok(None);
- }
-
- let mut align = dl.aggregate_align;
- let mut variant_layouts = variants
- .iter()
- .map(|(j, v)| {
- let mut st = univariant(dl, v, &repr, StructKind::AlwaysSized)?;
- st.variants = Variants::Single;
-
- align = align.max(st.align);
-
- Ok((j, st))
- })
- .collect::<Result<ArenaMap<_, _>, _>>()?;
-
- let largest_variant_index = match variant_layouts
- .iter()
- .max_by_key(|(_i, layout)| layout.size.bytes())
- .map(|(i, _layout)| i)
- {
- None => return Ok(None),
- Some(i) => i,
- };
-
- let count = variants
- .iter()
- .map(|(i, _)| i)
- .filter(|x| *x != largest_variant_index && !absent(&variants[*x]))
- .count() as u128;
-
- // Find the field with the largest niche
- let (field_index, niche, (niche_start, niche_scalar)) = match variants
- [largest_variant_index]
- .iter()
- .enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche?)))
- .max_by_key(|(_, niche)| niche.available(dl))
- .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))
- {
- None => return Ok(None),
- Some(x) => x,
- };
-
- let niche_offset =
- niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index, dl);
- let niche_size = niche.value.size(dl);
- let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
-
- let all_variants_fit = variant_layouts.iter_mut().all(|(i, layout)| {
- if i == largest_variant_index {
- return true;
- }
-
- layout.largest_niche = None;
-
- if layout.size <= niche_offset {
- // This variant will fit before the niche.
- return true;
- }
-
- // Determine if it'll fit after the niche.
- let this_align = layout.align.abi;
- let this_offset = (niche_offset + niche_size).align_to(this_align);
-
- if this_offset + layout.size > size {
- return false;
- }
-
- // It'll fit, but we need to make some adjustments.
- match layout.fields {
- FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for (j, offset) in offsets.iter_mut().enumerate() {
- if !variants[i][j].is_zst() {
- *offset += this_offset;
- }
- }
- }
- _ => {
- panic!("Layout of fields should be Arbitrary for variants")
- }
- }
-
- // It can't be a Scalar or ScalarPair because the offset isn't 0.
- if !layout.abi.is_uninhabited() {
- layout.abi = Abi::Aggregate { sized: true };
- }
- layout.size += this_offset;
-
- true
- });
-
- if !all_variants_fit {
- return Ok(None);
- }
-
- let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
-
- let others_zst = variant_layouts
- .iter()
- .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
- let same_size = size == variant_layouts[largest_variant_index].size;
- let same_align = align == variant_layouts[largest_variant_index].align;
-
- let abi = if variant_layouts.iter().all(|(_, v)| v.abi.is_uninhabited()) {
- Abi::Uninhabited
- } else if same_size && same_align && others_zst {
- match variant_layouts[largest_variant_index].abi {
- // When the total alignment and size match, we can use the
- // same ABI as the scalar variant with the reserved niche.
- Abi::Scalar(_) => Abi::Scalar(niche_scalar),
- Abi::ScalarPair(first, second) => {
- // Only the niche is guaranteed to be initialised,
- // so use union layouts for the other primitive.
- if niche_offset == Size::ZERO {
- Abi::ScalarPair(niche_scalar, second.to_union())
- } else {
- Abi::ScalarPair(first.to_union(), niche_scalar)
- }
- }
- _ => Abi::Aggregate { sized: true },
- }
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let layout = Layout {
- variants: Variants::Multiple {
- tag: niche_scalar,
- tag_encoding: TagEncoding::Niche {
- untagged_variant: largest_variant_index,
- niche_start,
- },
- tag_field: 0,
- variants: ArenaMap::new(),
- },
- fields: FieldsShape::Arbitrary { offsets: vec![niche_offset], memory_index: vec![0] },
- abi,
- largest_niche,
- size,
- align,
- };
-
- Ok(Some(TmpLayout { layout, variants: variant_layouts }))
- };
-
- let niche_filling_layout = calculate_niche_filling_layout()?;
-
- let (mut min, mut max) = (i128::MAX, i128::MIN);
- // FIXME: bring these back
- // let discr_type = repr.discr_type();
- // let bits = Integer::from_attr(dl, discr_type).size().bits();
- // for (i, discr) in def.discriminants(tcx) {
- // if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
- // continue;
- // }
- // let mut x = discr.val as i128;
- // if discr_type.is_signed() {
- // // sign extend the raw representation to be an i128
- // x = (x << (128 - bits)) >> (128 - bits);
- // }
- // if x < min {
- // min = x;
- // }
- // if x > max {
- // max = x;
- // }
- // }
- // We might have no inhabited variants, so pretend there's at least one.
- if (min, max) == (i128::MAX, i128::MIN) {
- min = 0;
- max = 0;
- }
- assert!(min <= max, "discriminant range is {}...{}", min, max);
- let (min_ity, signed) = Integer::repr_discr(dl, &repr, min, max)?;
-
- let mut align = dl.aggregate_align;
- let mut size = Size::ZERO;
-
- // We're interested in the smallest alignment, so start large.
- let mut start_align = Align::from_bytes(256).unwrap();
- assert_eq!(Integer::for_align(dl, start_align), None);
-
- // repr(C) on an enum tells us to make a (tag, union) layout,
- // so we need to grow the prefix alignment to be at least
- // the alignment of the union. (This value is used both for
- // determining the alignment of the overall enum, and the
- // determining the alignment of the payload after the tag.)
- let mut prefix_align = min_ity.align(dl).abi;
- if repr.c() {
- for (_, fields) in variants.iter() {
- for field in fields {
- prefix_align = prefix_align.max(field.align.abi);
- }
+ .collect::<Result<IndexVec<RustcEnumVariantIdx, _>, _>>()?;
+ (r, true, false, data.repr.unwrap_or_default())
}
- }
-
- // Create the set of structs that represent each variant.
- let mut layout_variants = variants
- .iter()
- .map(|(i, field_layouts)| {
- let mut st = univariant(
- dl,
- &field_layouts,
- &repr,
- StructKind::Prefixed(min_ity.size(), prefix_align),
- )?;
- st.variants = Variants::Single;
- // Find the first field we can't move later
- // to make room for a larger discriminant.
- for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) {
- if !field.is_zst() || field.align.abi.bytes() != 1 {
- start_align = start_align.min(field.align.abi);
- break;
- }
- }
- size = cmp::max(size, st.size);
- align = align.max(st.align);
- Ok((i, st))
- })
- .collect::<Result<ArenaMap<_, _>, _>>()?;
-
- // Align the maximum variant size to the largest alignment.
- size = size.align_to(align.abi);
-
- if size.bytes() >= dl.obj_size_bound() {
- return Err(LayoutError::SizeOverflow);
- }
-
- // Check to see if we should use a different type for the
- // discriminant. We can safely use a type with the same size
- // as the alignment of the first field of each variant.
- // We increase the size of the discriminant to avoid LLVM copying
- // padding when it doesn't need to. This normally causes unaligned
- // load/stores and excessive memcpy/memset operations. By using a
- // bigger integer size, LLVM can be sure about its contents and
- // won't be so conservative.
-
- // Use the initial field alignment
- let mut ity = if repr.c() || repr.int.is_some() {
- min_ity
- } else {
- Integer::for_align(dl, start_align).unwrap_or(min_ity)
- };
-
- // If the alignment is not larger than the chosen discriminant size,
- // don't use the alignment as the final size.
- if ity <= min_ity {
- ity = min_ity;
- } else {
- // Patch up the variants' first few fields.
- // Patch up the variants' first few fields.
- let old_ity_size = min_ity.size();
- let new_ity_size = ity.size();
- for (_, variant) in layout_variants.iter_mut() {
- match variant.fields {
- FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for i in offsets {
- if *i <= old_ity_size {
- assert_eq!(*i, old_ity_size);
- *i = new_ity_size;
- }
- }
- // We might be making the struct larger.
- if variant.size <= old_ity_size {
- variant.size = new_ity_size;
- }
- }
- _ => user_error!("bug"),
- }
- }
- }
-
- let tag_mask = ity.size().unsigned_int_max();
- let tag = Scalar::Initialized {
- value: Primitive::Int(ity, signed),
- valid_range: WrappingRange {
- start: (min as u128 & tag_mask),
- end: (max as u128 & tag_mask),
- },
};
- let mut abi = Abi::Aggregate { sized: true };
-
- if layout_variants.iter().all(|(_, v)| v.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- } else if tag.size(dl) == size {
- // Make sure we only use scalar layout when the enum is entirely its
- // own tag (i.e. it has no padding nor any non-ZST variant fields).
- abi = Abi::Scalar(tag);
+ let variants = variants.iter().map(|x| x.iter().collect::<Vec<_>>()).collect::<Vec<_>>();
+ let variants = variants.iter().map(|x| x.iter().collect()).collect();
+ if is_union {
+ cx.layout_of_union(&repr, &variants).ok_or(LayoutError::Unknown)
} else {
- // Try to use a ScalarPair for all tagged enums.
- let mut common_prim = None;
- let mut common_prim_initialized_in_all_variants = true;
- for ((_, field_layouts), (_, layout_variant)) in
- iter::zip(variants.iter(), layout_variants.iter())
- {
- let offsets = match layout_variant.fields {
- FieldsShape::Arbitrary { ref offsets, .. } => offsets,
- _ => user_error!("bug"),
- };
- let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
- let (field, offset) = match (fields.next(), fields.next()) {
- (None, None) => {
- common_prim_initialized_in_all_variants = false;
- continue;
- }
- (Some(pair), None) => pair,
- _ => {
- common_prim = None;
- break;
- }
- };
- let prim = match field.abi {
- Abi::Scalar(scalar) => {
- common_prim_initialized_in_all_variants &=
- matches!(scalar, Scalar::Initialized { .. });
- scalar.primitive()
- }
- _ => {
- common_prim = None;
- break;
- }
- };
- if let Some(pair) = common_prim {
- // This is pretty conservative. We could go fancier
- // by conflating things like i32 and u32, or even
- // realising that (u8, u8) could just cohabit with
- // u16 or even u32.
- if pair != (prim, offset) {
- common_prim = None;
- break;
- }
- } else {
- common_prim = Some((prim, offset));
- }
- }
- if let Some((prim, offset)) = common_prim {
- let prim_scalar = if common_prim_initialized_in_all_variants {
- scalar_unit(dl, prim)
- } else {
- // Common prim might be uninit.
- Scalar::Union { value: prim }
- };
- let pair = scalar_pair(dl, tag, prim_scalar);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => user_error!("bug"),
- };
- if pair_offsets[0] == Size::ZERO
- && pair_offsets[1] == *offset
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- }
-
- // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
- // variants to ensure they are consistent. This is because a downcast is
- // semantically a NOP, and thus should not affect layout.
- if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
- for (_, variant) in layout_variants.iter_mut() {
- // We only do this for variants with fields; the others are not accessed anyway.
- // Also do not overwrite any already existing "clever" ABIs.
- if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
- variant.abi = abi;
- // Also need to bump up the size and alignment, so that the entire value fits in here.
- variant.size = cmp::max(variant.size, size);
- variant.align.abi = cmp::max(variant.align.abi, align.abi);
- }
- }
+ cx.layout_of_struct_or_enum(
+ &repr,
+ &variants,
+ is_enum,
+ is_unsafe_cell(def, db),
+ layout_scalar_valid_range(db, def),
+ |min, max| Integer::repr_discr(&dl, &repr, min, max).unwrap_or((Integer::I8, false)),
+ variants.iter_enumerated().filter_map(|(id, _)| {
+ let AdtId::EnumId(e) = def else { return None };
+ let d = match db
+ .const_eval_variant(EnumVariantId { parent: e, local_id: id.0 })
+ .ok()?
+ {
+ crate::consteval::ComputedExpr::Literal(l) => match l {
+ hir_def::expr::Literal::Int(i, _) => i,
+ hir_def::expr::Literal::Uint(i, _) => i as i128,
+ _ => return None,
+ },
+ _ => return None,
+ };
+ Some((id, d))
+ }),
+ // FIXME: The current code for niche-filling relies on variant indices
+ // instead of actual discriminants, so enums with
+ // explicit discriminants (RFC #2363) would misbehave and we should disable
+ // niche optimization for them.
+ // The code that do it in rustc:
+ // repr.inhibit_enum_layout_opt() || def
+ // .variants()
+ // .iter_enumerated()
+ // .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
+ repr.inhibit_enum_layout_opt(),
+ !is_enum
+ && variants
+ .iter()
+ .next()
+ .and_then(|x| x.last().map(|x| x.is_unsized()))
+ .unwrap_or(true),
+ )
+ .ok_or(LayoutError::SizeOverflow)
}
-
- let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
-
- let tagged_layout = Layout {
- variants: Variants::Multiple {
- tag,
- tag_encoding: TagEncoding::Direct,
- tag_field: 0,
- variants: ArenaMap::new(),
- },
- fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
- largest_niche,
- abi,
- align,
- size,
- };
-
- let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
-
- let mut best_layout = match (tagged_layout, niche_filling_layout) {
- (tl, Some(nl)) => {
- // Pick the smaller layout; otherwise,
- // pick the layout with the larger niche; otherwise,
- // pick tagged as it has simpler codegen.
- use Ordering::*;
- let niche_size =
- |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
- match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
- (Greater, _) => nl,
- (Equal, Less) => nl,
- _ => tl,
- }
- }
- (tl, None) => tl,
- };
-
- // Now we can intern the variant layouts and store them in the enum layout.
- best_layout.layout.variants = match best_layout.layout.variants {
- Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
- Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
- }
- _ => user_error!("bug"),
- };
-
- Ok(best_layout.layout)
}
fn layout_scalar_valid_range(db: &dyn HirDatabase, def: AdtId) -> (Bound<u128>, Bound<u128>) {
@@ -649,302 +131,3 @@ pub fn layout_of_adt_recover(
) -> Result<Layout, LayoutError> {
user_error!("infinite sized recursive type");
}
-
-pub(crate) fn univariant(
- dl: &TargetDataLayout,
- fields: &[Layout],
- repr: &ReprOptions,
- kind: StructKind,
-) -> Result<Layout, LayoutError> {
- let pack = repr.pack;
- if pack.is_some() && repr.align.is_some() {
- user_error!("Struct can not be packed and aligned");
- }
-
- let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
-
- let optimize = !repr.inhibit_struct_field_reordering_opt();
- if optimize {
- let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
- let optimizing = &mut inverse_memory_index[..end];
- let field_align = |f: &Layout| {
- if let Some(pack) = pack {
- f.align.abi.min(pack)
- } else {
- f.align.abi
- }
- };
-
- match kind {
- StructKind::AlwaysSized | StructKind::MaybeUnsized => {
- optimizing.sort_by_key(|&x| {
- // Place ZSTs first to avoid "interesting offsets",
- // especially with only one or two non-ZST fields.
- let f = &fields[x as usize];
- (!f.is_zst(), cmp::Reverse(field_align(f)))
- });
- }
-
- StructKind::Prefixed(..) => {
- // Sort in ascending alignment so that the layout stays optimal
- // regardless of the prefix
- optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
- }
- }
- }
-
- // inverse_memory_index holds field indices by increasing memory offset.
- // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
- // We now write field offsets to the corresponding offset slot;
- // field 5 with offset 0 puts 0 in offsets[5].
- // At the bottom of this function, we invert `inverse_memory_index` to
- // produce `memory_index` (see `invert_mapping`).
-
- let mut sized = true;
- let mut offsets = vec![Size::ZERO; fields.len()];
- let mut offset = Size::ZERO;
- let mut largest_niche = None;
- let mut largest_niche_available = 0;
-
- if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
- let prefix_align =
- if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
- align = align.max(AbiAndPrefAlign::new(prefix_align));
- offset = prefix_size.align_to(prefix_align);
- }
-
- for &i in &inverse_memory_index {
- let field = &fields[i as usize];
- if !sized {
- user_error!("Unsized field is not last field");
- }
-
- if field.is_unsized() {
- sized = false;
- }
-
- // Invariant: offset < dl.obj_size_bound() <= 1<<61
- let field_align = if let Some(pack) = pack {
- field.align.min(AbiAndPrefAlign::new(pack))
- } else {
- field.align
- };
- offset = offset.align_to(field_align.abi);
- align = align.max(field_align);
-
- offsets[i as usize] = offset;
-
- if let Some(mut niche) = field.largest_niche {
- let available = niche.available(dl);
- if available > largest_niche_available {
- largest_niche_available = available;
- niche.offset =
- niche.offset.checked_add(offset, dl).ok_or(LayoutError::SizeOverflow)?;
- largest_niche = Some(niche);
- }
- }
-
- offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow)?;
- }
-
- if let Some(repr_align) = repr.align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- let min_size = offset;
-
- // As stated above, inverse_memory_index holds field indices by increasing offset.
- // This makes it an already-sorted view of the offsets vec.
- // To invert it, consider:
- // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
- // Field 5 would be the first element, so memory_index is i:
- // Note: if we didn't optimize, it's already right.
-
- let memory_index =
- if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
-
- let size = min_size.align_to(align.abi);
- let mut abi = Abi::Aggregate { sized };
-
- // Unpack newtype ABIs and find scalar pairs.
- if sized && size.bytes() > 0 {
- // All other fields must be ZSTs.
- let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
- match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
- // We have exactly one non-ZST field.
- (Some((i, field)), None, None) => {
- // Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
- match field.abi {
- // For plain scalars, or vectors of them, we can't unpack
- // newtypes for `#[repr(C)]`, as that affects C ABIs.
- Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi;
- }
- // But scalar pairs are Rust-specific and get
- // treated as aggregates by C ABIs anyway.
- Abi::ScalarPair(..) => {
- abi = field.abi;
- }
- _ => {}
- }
- }
- }
-
- // Two non-ZST fields, and they're both scalars.
- (Some((i, a)), Some((j, b)), None) => {
- match (a.abi, b.abi) {
- (Abi::Scalar(a), Abi::Scalar(b)) => {
- // Order by the memory placement, not source order.
- let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
- ((i, a), (j, b))
- } else {
- ((j, b), (i, a))
- };
- let pair = scalar_pair(dl, a, b);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, .. } => offsets,
- _ => unreachable!(),
- };
- if offsets[i] == pair_offsets[0]
- && offsets[j] == pair_offsets[1]
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- _ => {}
- }
- }
-
- _ => {}
- }
- }
-
- if fields.iter().any(|f| f.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- }
-
- Ok(Layout {
- variants: Variants::Single,
- fields: FieldsShape::Arbitrary { offsets, memory_index },
- abi,
- largest_niche,
- align,
- size,
- })
-}
-
-fn layout_of_union(
- db: &dyn HirDatabase,
- id: UnionId,
- subst: &Substitution,
-) -> Result<Layout, LayoutError> {
- let dl = &*db.current_target_data_layout();
-
- let union_data = db.union_data(id);
-
- let repr = union_data.repr.unwrap_or_default();
- let fields = union_data.variant_data.fields();
-
- if repr.pack.is_some() && repr.align.is_some() {
- user_error!("union cannot be packed and aligned");
- }
-
- let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
- if let Some(repr_align) = repr.align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- let optimize = !repr.inhibit_union_abi_opt();
- let mut size = Size::ZERO;
- let mut abi = Abi::Aggregate { sized: true };
- for (fd, _) in fields.iter() {
- let field_ty = field_ty(db, id.into(), fd, subst);
- let field = layout_of_ty(db, &field_ty)?;
- if field.is_unsized() {
- user_error!("unsized union field");
- }
- // If all non-ZST fields have the same ABI, forward this ABI
- if optimize && !field.is_zst() {
- // Discard valid range information and allow undef
- let field_abi = match field.abi {
- Abi::Scalar(x) => Abi::Scalar(x.to_union()),
- Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
- Abi::Vector { element: x, count } => Abi::Vector { element: x.to_union(), count },
- Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
- };
-
- if size == Size::ZERO {
- // first non ZST: initialize 'abi'
- abi = field_abi;
- } else if abi != field_abi {
- // different fields have different ABI: reset to Aggregate
- abi = Abi::Aggregate { sized: true };
- }
- }
-
- size = cmp::max(size, field.size);
- }
-
- if let Some(pack) = repr.pack {
- align = align.min(AbiAndPrefAlign::new(pack));
- }
-
- Ok(Layout {
- variants: Variants::Single,
- fields: FieldsShape::Union(
- NonZeroUsize::new(fields.len())
- .ok_or(LayoutError::UserError("union with zero fields".to_string()))?,
- ),
- abi,
- largest_niche: None,
- align,
- size: size.align_to(align.abi),
- })
-}
-
-// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
-// This is used to go between `memory_index` (source field order to memory order)
-// and `inverse_memory_index` (memory order to source field order).
-// See also `FieldsShape::Arbitrary::memory_index` for more details.
-// FIXME(eddyb) build a better abstraction for permutations, if possible.
-fn invert_mapping(map: &[u32]) -> Vec<u32> {
- let mut inverse = vec![0; map.len()];
- for i in 0..map.len() {
- inverse[map[i] as usize] = i as u32;
- }
- inverse
-}
-
-fn scalar_pair(dl: &TargetDataLayout, a: Scalar, b: Scalar) -> Layout {
- let b_align = b.align(dl);
- let align = a.align(dl).max(b_align).max(dl.aggregate_align);
- let b_offset = a.size(dl).align_to(b_align.abi);
- let size = b_offset.checked_add(b.size(dl), dl).unwrap().align_to(align.abi);
-
- // HACK(nox): We iter on `b` and then `a` because `max_by_key`
- // returns the last maximum.
- let largest_niche = Niche::from_scalar(dl, b_offset, b)
- .into_iter()
- .chain(Niche::from_scalar(dl, Size::ZERO, a))
- .max_by_key(|niche| niche.available(dl));
-
- Layout {
- variants: Variants::Single,
- fields: FieldsShape::Arbitrary {
- offsets: vec![Size::ZERO, b_offset],
- memory_index: vec![0, 1],
- },
- abi: Abi::ScalarPair(a, b),
- largest_niche,
- align,
- size,
- }
-}
diff --git a/crates/hir-ty/src/layout/target.rs b/crates/hir-ty/src/layout/target.rs
index ba810b12b1..48b1a68d51 100644
--- a/crates/hir-ty/src/layout/target.rs
+++ b/crates/hir-ty/src/layout/target.rs
@@ -35,7 +35,7 @@ pub fn current_target_data_layout_query(db: &dyn HirDatabase) -> Arc<TargetDataL
f32_align: AbiAndPrefAlign::new(Align::from_bytes(4).unwrap()),
f64_align: AbiAndPrefAlign::new(Align::from_bytes(8).unwrap()),
pointer_size,
- pointer_align: AbiAndPrefAlign::new(Align::from_bytes(8).unwrap()),
+ pointer_align: AbiAndPrefAlign::new(Align::from_bytes(pointer_size.bytes()).unwrap()),
aggregate_align: AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()),
vector_align: vec![],
instruction_address_space: AddressSpace(0),
diff --git a/crates/hir-ty/src/layout/tests.rs b/crates/hir-ty/src/layout/tests.rs
index 1cd6d4eae2..5d97a69501 100644
--- a/crates/hir-ty/src/layout/tests.rs
+++ b/crates/hir-ty/src/layout/tests.rs
@@ -49,6 +49,17 @@ fn check_fail(ra_fixture: &str, e: LayoutError) {
}
macro_rules! size_and_align {
+ (minicore: $($x:tt),*;$($t:tt)*) => {
+ {
+ #[allow(dead_code)]
+ $($t)*
+ check_size_and_align(
+ &format!("//- minicore: {}\n{}", stringify!($($x),*), stringify!($($t)*)),
+ ::std::mem::size_of::<Goal>() as u64,
+ ::std::mem::align_of::<Goal>() as u64,
+ );
+ }
+ };
($($t:tt)*) => {
{
#[allow(dead_code)]
@@ -67,7 +78,6 @@ fn hello_world() {
size_and_align! {
struct Goal(i32);
}
- //check_size_and_align(r#"struct Goal(i32)"#, 4, 4);
}
#[test]
@@ -148,33 +158,39 @@ fn tuple() {
#[test]
fn non_zero() {
- check_size_and_align(
- r#"
- //- minicore: non_zero, option
- use core::num::NonZeroU8;
- struct Goal(Option<NonZeroU8>);
- "#,
- 1,
- 1,
- );
+ size_and_align! {
+ minicore: non_zero, option;
+ use core::num::NonZeroU8;
+ struct Goal(Option<NonZeroU8>);
+ }
}
#[test]
fn niche_optimization() {
- check_size_and_align(
- r#"
- //- minicore: option
- struct Goal(Option<&i32>);
- "#,
- 8,
- 8,
- );
- check_size_and_align(
- r#"
- //- minicore: option
- struct Goal(Option<Option<bool>>);
- "#,
- 1,
- 1,
- );
+ size_and_align! {
+ minicore: option;
+ struct Goal(Option<&'static i32>);
+ }
+ size_and_align! {
+ minicore: option;
+ struct Goal(Option<Option<bool>>);
+ }
+}
+
+#[test]
+fn enums_with_discriminants() {
+ size_and_align! {
+ enum Goal {
+ A = 1000,
+ B = 2000,
+ C = 3000,
+ }
+ }
+ size_and_align! {
+ enum Goal {
+ A = 254,
+ B,
+ C, // implicitly becomes 256, so we need two bytes
+ }
+ }
}
diff --git a/crates/hir/src/lib.rs b/crates/hir/src/lib.rs
index 42b7c0781b..1b54091539 100644
--- a/crates/hir/src/lib.rs
+++ b/crates/hir/src/lib.rs
@@ -994,8 +994,30 @@ impl Enum {
Type::new_for_crate(
self.id.lookup(db.upcast()).container.krate(),
TyBuilder::builtin(match db.enum_data(self.id).variant_body_type() {
- Either::Left(builtin) => hir_def::builtin_type::BuiltinType::Int(builtin),
- Either::Right(builtin) => hir_def::builtin_type::BuiltinType::Uint(builtin),
+ hir_def::layout::IntegerType::Pointer(sign) => match sign {
+ true => hir_def::builtin_type::BuiltinType::Int(
+ hir_def::builtin_type::BuiltinInt::Isize,
+ ),
+ false => hir_def::builtin_type::BuiltinType::Uint(
+ hir_def::builtin_type::BuiltinUint::Usize,
+ ),
+ },
+ hir_def::layout::IntegerType::Fixed(i, sign) => match sign {
+ true => hir_def::builtin_type::BuiltinType::Int(match i {
+ hir_def::layout::Integer::I8 => hir_def::builtin_type::BuiltinInt::I8,
+ hir_def::layout::Integer::I16 => hir_def::builtin_type::BuiltinInt::I16,
+ hir_def::layout::Integer::I32 => hir_def::builtin_type::BuiltinInt::I32,
+ hir_def::layout::Integer::I64 => hir_def::builtin_type::BuiltinInt::I64,
+ hir_def::layout::Integer::I128 => hir_def::builtin_type::BuiltinInt::I128,
+ }),
+ false => hir_def::builtin_type::BuiltinType::Uint(match i {
+ hir_def::layout::Integer::I8 => hir_def::builtin_type::BuiltinUint::U8,
+ hir_def::layout::Integer::I16 => hir_def::builtin_type::BuiltinUint::U16,
+ hir_def::layout::Integer::I32 => hir_def::builtin_type::BuiltinUint::U32,
+ hir_def::layout::Integer::I64 => hir_def::builtin_type::BuiltinUint::U64,
+ hir_def::layout::Integer::I128 => hir_def::builtin_type::BuiltinUint::U128,
+ }),
+ },
}),
)
}
diff --git a/crates/ide/src/hover/render.rs b/crates/ide/src/hover/render.rs
index 470c6626f9..f37c9f4a6d 100644
--- a/crates/ide/src/hover/render.rs
+++ b/crates/ide/src/hover/render.rs
@@ -3,8 +3,7 @@ use std::fmt::Display;
use either::Either;
use hir::{
- db::HirDatabase, Adt, AsAssocItem, AttributeTemplate, HasAttrs, HasSource, HirDisplay,
- Semantics, TypeInfo,
+ Adt, AsAssocItem, AttributeTemplate, HasAttrs, HasSource, HirDisplay, Semantics, TypeInfo,
};
use ide_db::{
base_db::SourceDatabase,
@@ -398,7 +397,7 @@ pub(super) fn definition(
let offset = match var_def {
hir::VariantDef::Struct(s) => {
let layout = Adt::from(s).layout(db).ok()?;
- layout.fields.offset(id, &db.current_target_data_layout())
+ layout.fields.offset(id)
}
_ => return None,
};
diff --git a/crates/ide/src/hover/tests.rs b/crates/ide/src/hover/tests.rs
index f630c3b36d..f82fd6d028 100644
--- a/crates/ide/src/hover/tests.rs
+++ b/crates/ide/src/hover/tests.rs
@@ -537,7 +537,7 @@ struct Foo { fiel$0d_a: u8, field_b: i32, field_c: i16 }
```
```rust
- field_a: u8 // size = 1, align = 1, offset = 6
+ field_a: u8 // size = 1, align = 1, offset = 4
```
"#]],
);