diff --git a/rust-toolchain b/rust-toolchain index 3d64e544353..796a468daab 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2026-03-26" +channel = "nightly-2026-04-05" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] diff --git a/src/builder.rs b/src/builder.rs index 6add7f05c2a..3cffd862b9b 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -24,7 +24,8 @@ use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ - FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, LayoutOfHelpers, + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, + LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, AtomicOrdering, Instance, Ty, TyCtxt}; use rustc_span::Span; @@ -943,8 +944,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { .get_address(self.location) } - fn scalable_alloca(&mut self, _elt: u64, _align: Align, _element_ty: Ty<'_>) -> RValue<'gcc> { - todo!() + fn alloca_with_ty(&mut self, ty: TyAndLayout<'tcx>) -> RValue<'gcc> { + self.alloca(ty.layout.size, ty.layout.align.abi) } fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { @@ -1656,10 +1657,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - fn get_funclet_cleanuppad(&self, _funclet: &Funclet) -> RValue<'gcc> { - unimplemented!(); - } - // Atomic Operations fn atomic_cmpxchg( &mut self, @@ -2278,6 +2275,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { }) } + /// Emits a SIMD min/max operation for floats. The semantics for each lane are: if one + /// side is NaN (QNaN or SNaN), the other side is returned. fn vector_extremum( &mut self, a: RValue<'gcc>, @@ -2286,8 +2285,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { ) -> RValue<'gcc> { let vector_type = a.get_type(); - // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and - // b get compared & spliced together, we get the numeric values instead of NaNs. + // Mask out the NaNs (both QNaN and SNaN) in b and replace them with the corresponding lane + // in a, so when a and b get compared & spliced together, we get the numeric values instead + // of NaNs. let b_nan_mask = self.context.new_comparison(self.location, ComparisonOp::NotEquals, b, b); let mask_type = b_nan_mask.get_type(); let b_nan_mask_inverted = @@ -2309,7 +2309,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.context.new_bitcast(self.location, res, vector_type) } - pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + pub fn vector_minimum_number_nsz(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.vector_extremum(a, b, ExtremumOperation::Min) } @@ -2341,7 +2341,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - pub fn vector_fmax(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + pub fn vector_maximum_number_nsz(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.vector_extremum(a, b, ExtremumOperation::Max) } diff --git a/src/common.rs b/src/common.rs index 9e548ac0a8b..dd0064d34bc 100644 --- a/src/common.rs +++ b/src/common.rs @@ -145,6 +145,10 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { self.const_int(self.type_i32(), i as i64) } + fn const_i64(&self, i: i64) -> RValue<'gcc> { + self.const_int(self.type_i64(), i) + } + fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { self.gcc_int(typ, int) } diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 36e5543b0c4..ee17f4e2f55 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -4,9 +4,7 @@ mod simd; #[cfg(feature = "master")] use std::iter; -#[cfg(feature = "master")] -use gccjit::Type; -use gccjit::{ComparisonOp, Function, FunctionType, RValue, ToRValue, UnaryOp}; +use gccjit::{ComparisonOp, Function, FunctionType, RValue, ToRValue, Type, UnaryOp}; #[cfg(feature = "master")] use rustc_abi::ExternAbi; use rustc_abi::{BackendRepr, HasDataLayout, WrappingRange}; @@ -23,11 +21,11 @@ use rustc_codegen_ssa::traits::{ IntrinsicCallBuilderMethods, LayoutTypeCodegenMethods, }; use rustc_data_structures::fx::FxHashSet; -use rustc_middle::bug; #[cfg(feature = "master")] use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::{bug, span_bug}; use rustc_span::{Span, Symbol, sym}; use rustc_target::callconv::{ArgAbi, PassMode}; @@ -40,6 +38,22 @@ use crate::context::CodegenCx; use crate::intrinsic::simd::generic_simd_intrinsic; use crate::type_of::LayoutGccExt; +fn float_intrinsic<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + typ: Type<'gcc>, + name: &str, +) -> Option> { + // GCC doesn't have the intrinsic we want so we use the compiler-builtins one + Some(cx.context.new_function( + None, + FunctionType::Extern, + typ, + &[cx.context.new_parameter(None, typ, "a"), cx.context.new_parameter(None, typ, "b")], + name, + false, + )) +} + fn get_simple_intrinsic<'gcc, 'tcx>( cx: &CodegenCx<'gcc, 'tcx>, name: Symbol, @@ -70,14 +84,19 @@ fn get_simple_intrinsic<'gcc, 'tcx>( // FIXME: calling `fma` from libc without FMA target feature uses expensive software emulation sym::fmuladdf32 => "fmaf", // FIXME: use gcc intrinsic analogous to llvm.fmuladd.f32 sym::fmuladdf64 => "fma", // FIXME: use gcc intrinsic analogous to llvm.fmuladd.f64 - sym::fabsf32 => "fabsf", - sym::fabsf64 => "fabs", + sym::minimumf32 => return float_intrinsic(cx, cx.type_f32(), "fminimumf"), + sym::minimumf64 => return float_intrinsic(cx, cx.type_f64(), "fminimum"), + sym::minimumf128 => return float_intrinsic(cx, cx.type_f128(), "fminimumf128"), + sym::maximumf32 => return float_intrinsic(cx, cx.type_f32(), "fmaximumf"), + sym::maximumf64 => return float_intrinsic(cx, cx.type_f64(), "fmaximum"), + sym::maximumf128 => return float_intrinsic(cx, cx.type_f128(), "fmaximumf128"), sym::copysignf32 => "copysignf", sym::copysignf64 => "copysign", sym::floorf32 => "floorf", sym::floorf64 => "floor", sym::ceilf32 => "ceilf", sym::ceilf64 => "ceil", + sym::powf128 => return float_intrinsic(cx, cx.type_f128(), "powf128"), sym::truncf32 => "truncf", sym::truncf64 => "trunc", // We match the LLVM backend and lower this to `rint`. @@ -92,13 +111,14 @@ fn get_simple_intrinsic<'gcc, 'tcx>( } fn get_simple_function_f128<'gcc, 'tcx>( + span: Span, cx: &CodegenCx<'gcc, 'tcx>, name: Symbol, ) -> Function<'gcc> { let f128_type = cx.type_f128(); let func_name = match name { sym::ceilf128 => "ceilf128", - sym::fabsf128 => "fabsf128", + sym::fabs => "fabsf128", sym::expf128 => "expf128", sym::exp2f128 => "exp2f128", sym::floorf128 => "floorf128", @@ -109,7 +129,7 @@ fn get_simple_function_f128<'gcc, 'tcx>( sym::roundf128 => "roundf128", sym::round_ties_even_f128 => "roundevenf128", sym::sqrtf128 => "sqrtf128", - _ => unreachable!(), + _ => span_bug!(span, "used get_simple_function_f128 for non-unary f128 intrinsic"), }; cx.context.new_function( None, @@ -121,34 +141,6 @@ fn get_simple_function_f128<'gcc, 'tcx>( ) } -fn get_simple_function_f128_2args<'gcc, 'tcx>( - cx: &CodegenCx<'gcc, 'tcx>, - name: Symbol, -) -> Function<'gcc> { - let f128_type = cx.type_f128(); - let func_name = match name { - // GCC doesn't have the intrinsic we want so we use the compiler-builtins one - // https://docs.rs/compiler_builtins/latest/compiler_builtins/math/full_availability/fn.fmaximumf128.html - // https://docs.rs/compiler_builtins/latest/compiler_builtins/math/full_availability/fn.fminimumf128.html - sym::maximumf128 => "fmaximumf128", - sym::minimumf128 => "fminimumf128", - sym::copysignf128 => "copysignf128", - sym::powf128 => "powf128", - _ => unreachable!(), - }; - cx.context.new_function( - None, - FunctionType::Extern, - f128_type, - &[ - cx.context.new_parameter(None, f128_type, "a"), - cx.context.new_parameter(None, f128_type, "b"), - ], - func_name, - false, - ) -} - fn f16_builtin<'gcc, 'tcx>( cx: &CodegenCx<'gcc, 'tcx>, name: Symbol, @@ -158,9 +150,9 @@ fn f16_builtin<'gcc, 'tcx>( let builtin_name = match name { sym::ceilf16 => "__builtin_ceilf", sym::copysignf16 => "__builtin_copysignf", + sym::fabs => "fabsf", sym::expf16 => "expf", sym::exp2f16 => "exp2f", - sym::fabsf16 => "fabsf", sym::floorf16 => "__builtin_floorf", sym::fmaf16 => "fmaf", sym::logf16 => "logf", @@ -236,7 +228,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc | sym::copysignf16 | sym::expf16 | sym::exp2f16 - | sym::fabsf16 + | sym::fabs | sym::floorf16 | sym::fmaf16 | sym::logf16 @@ -248,7 +240,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc | sym::sqrtf16 | sym::truncf16 => f16_builtin(self, name, args), sym::ceilf128 - | sym::fabsf128 | sym::floorf128 | sym::truncf128 | sym::roundf128 @@ -261,18 +252,29 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc | sym::log10f128 if self.cx.supports_f128_type => { + let func = get_simple_function_f128(span, self, name); self.cx.context.new_call( self.location, - get_simple_function_f128(self, name), + func, &args.iter().map(|arg| arg.immediate()).collect::>(), ) } - sym::maximumf128 | sym::minimumf128 | sym::copysignf128 | sym::powf128 - if self.cx.supports_f128_type => - { + sym::copysignf128 if self.cx.supports_f128_type => { + let f128_type = self.cx.type_f128(); + let func = self.cx.context.new_function( + None, + FunctionType::Extern, + f128_type, + &[ + self.cx.context.new_parameter(None, f128_type, "a"), + self.cx.context.new_parameter(None, f128_type, "b"), + ], + "copysignf128", + false, + ); self.cx.context.new_call( self.location, - get_simple_function_f128_2args(self, name), + func, &args.iter().map(|arg| arg.immediate()).collect::>(), ) } @@ -440,6 +442,23 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } } } + sym::fabs => 'fabs: { + let ty = args[0].layout.ty; + let ty::Float(float_ty) = *ty.kind() else { + span_bug!(span, "expected float type for fabs intrinsic: {:?}", ty); + }; + let func = match float_ty { + ty::FloatTy::F16 => break 'fabs f16_builtin(self, name, args), + ty::FloatTy::F32 => self.context.get_builtin_function("fabsf"), + ty::FloatTy::F64 => self.context.get_builtin_function("fabs"), + ty::FloatTy::F128 => get_simple_function_f128(span, self, name), + }; + self.cx.context.new_call( + self.location, + func, + &args.iter().map(|arg| arg.immediate()).collect::>(), + ) + } sym::raw_eq => { use rustc_abi::BackendRepr::*; diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index 1263b2285a8..4ca890fee19 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -811,7 +811,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }}; } let ty::Float(ref f) = *in_elem.kind() else { - return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty }); + return_error!(InvalidMonomorphization::BasicFloatType { span, name, ty: in_ty }); }; let elem_ty = bx.cx.type_float_from_ty(*f); let (elem_ty_str, elem_ty, cast_type) = match f.bit_width() { @@ -1222,8 +1222,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( simd_and: Uint, Int => and; simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors. simd_xor: Uint, Int => xor; - simd_fmin: Float => vector_fmin; - simd_fmax: Float => vector_fmax; + simd_minimum_number_nsz: Float => vector_minimum_number_nsz; + simd_maximum_number_nsz: Float => vector_maximum_number_nsz; } macro_rules! arith_unary { diff --git a/tests/failing-ui-tests.txt b/tests/failing-ui-tests.txt index 5739b2cbd5d..1257193bf96 100644 --- a/tests/failing-ui-tests.txt +++ b/tests/failing-ui-tests.txt @@ -103,3 +103,5 @@ tests/ui/lto/all-crates.rs tests/ui/consts/const-eval/c-variadic.rs tests/ui/eii/default/call_default_panics.rs tests/ui/explicit-tail-calls/indirect.rs +tests/ui/traits/inheritance/self-in-supertype.rs +tests/ui/fmt/fmt_debug/shallow.rs