Auto merge of #145077 - Zalathar:rollup-0k4194x, r=Zalathar

Rollup of 19 pull requests

Successful merges:

 - rust-lang/rust#144400 (`tests/ui/issues/`: The Issues Strike Back [3/N])
 - rust-lang/rust#144764 ([codegen] assume the tag, not the relative discriminant)
 - rust-lang/rust#144807 (Streamline config in bootstrap)
 - rust-lang/rust#144899 (Print CGU reuse statistics in `-Zprint-mono-items`)
 - rust-lang/rust#144909 (Add new `test::print_merged_doctests_times` used by rustdoc to display more detailed time information)
 - rust-lang/rust#144912 (Resolver: introduce a conditionally mutable Resolver for (non-)speculative resolution.)
 - rust-lang/rust#144914 (Add support for `ty::Instance` path shortening in diagnostics)
 - rust-lang/rust#144931 ([win][arm64ec] Fix msvc-wholearchive for Arm64EC)
 - rust-lang/rust#144999 (coverage: Remove all unstable support for MC/DC instrumentation)
 - rust-lang/rust#145009 (A couple small changes for rust-analyzer next-solver work)
 - rust-lang/rust#145030 (GVN:  Do not flatten derefs with ProjectionElem::Index. )
 - rust-lang/rust#145042 (stdarch subtree update)
 - rust-lang/rust#145047 (move `type_check` out of `compute_regions`)
 - rust-lang/rust#145051 (Prevent name collisions with internal implementation details)
 - rust-lang/rust#145053 (Add a lot of NLL `known-bug` tests)
 - rust-lang/rust#145055 (Move metadata symbol export from exported_non_generic_symbols to exported_symbols)
 - rust-lang/rust#145057 (Clean up some resolved test regressions of const trait removals in std)
 - rust-lang/rust#145068 (Readd myself to review queue)
 - rust-lang/rust#145070 (Add minimal `armv7a-vex-v5` tier three target)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2025-08-08 05:59:00 +00:00
commit 2886b36df4
265 changed files with 8661 additions and 9717 deletions

View file

@ -19,6 +19,7 @@ use std::borrow::Cow;
use std::cell::{OnceCell, RefCell};
use std::marker::PhantomData;
use std::ops::{ControlFlow, Deref};
use std::rc::Rc;
use borrow_set::LocalsStateAtExit;
use root_cx::BorrowCheckRootCtxt;
@ -44,6 +45,7 @@ use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces}
use rustc_mir_dataflow::move_paths::{
InitIndex, InitLocation, LookupResult, MoveData, MovePathIndex,
};
use rustc_mir_dataflow::points::DenseLocationMap;
use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor, visit_results};
use rustc_session::lint::builtin::{TAIL_EXPR_DROP_ORDER, UNUSED_MUT};
use rustc_span::{ErrorGuaranteed, Span, Symbol};
@ -60,11 +62,14 @@ use crate::path_utils::*;
use crate::place_ext::PlaceExt;
use crate::places_conflict::{PlaceConflictBias, places_conflict};
use crate::polonius::PoloniusDiagnosticsContext;
use crate::polonius::legacy::{PoloniusLocationTable, PoloniusOutput};
use crate::polonius::legacy::{
PoloniusFacts, PoloniusFactsExt, PoloniusLocationTable, PoloniusOutput,
};
use crate::prefixes::PrefixSet;
use crate::region_infer::RegionInferenceContext;
use crate::renumber::RegionCtxt;
use crate::session_diagnostics::VarNeedNotMut;
use crate::type_check::MirTypeckResults;
mod borrow_set;
mod borrowck_errors;
@ -321,7 +326,34 @@ fn do_mir_borrowck<'tcx>(
let locals_are_invalidated_at_exit = tcx.hir_body_owner_kind(def).is_fn_or_closure();
let borrow_set = BorrowSet::build(tcx, body, locals_are_invalidated_at_exit, &move_data);
// Compute non-lexical lifetimes.
let location_map = Rc::new(DenseLocationMap::new(body));
let polonius_input = root_cx.consumer.as_ref().map_or(false, |c| c.polonius_input())
|| infcx.tcx.sess.opts.unstable_opts.polonius.is_legacy_enabled();
let mut polonius_facts =
(polonius_input || PoloniusFacts::enabled(infcx.tcx)).then_some(PoloniusFacts::default());
// Run the MIR type-checker.
let MirTypeckResults {
constraints,
universal_region_relations,
opaque_type_values,
polonius_context,
} = type_check::type_check(
root_cx,
&infcx,
body,
&promoted,
universal_regions,
&location_table,
&borrow_set,
&mut polonius_facts,
&move_data,
Rc::clone(&location_map),
);
// Compute non-lexical lifetimes using the constraints computed
// by typechecking the MIR body.
let nll::NllOutput {
regioncx,
polonius_input,
@ -332,14 +364,19 @@ fn do_mir_borrowck<'tcx>(
} = nll::compute_regions(
root_cx,
&infcx,
universal_regions,
body,
&promoted,
&location_table,
&move_data,
&borrow_set,
location_map,
universal_region_relations,
constraints,
polonius_facts,
polonius_context,
);
regioncx.infer_opaque_types(root_cx, &infcx, opaque_type_values);
// Dump MIR results into a file, if that is enabled. This lets us
// write unit-tests, as well as helping with debugging.
nll::dump_nll_mir(&infcx, body, &regioncx, &opt_closure_req, &borrow_set);

View file

@ -5,7 +5,8 @@ use std::path::PathBuf;
use std::rc::Rc;
use std::str::FromStr;
use polonius_engine::{Algorithm, Output};
use polonius_engine::{Algorithm, AllFacts, Output};
use rustc_data_structures::frozen::Frozen;
use rustc_index::IndexSlice;
use rustc_middle::mir::pretty::{PrettyPrintMirOptions, dump_mir_with_options};
use rustc_middle::mir::{Body, PassWhere, Promoted, create_dump_file, dump_enabled, dump_mir};
@ -18,14 +19,16 @@ use rustc_span::sym;
use tracing::{debug, instrument};
use crate::borrow_set::BorrowSet;
use crate::consumers::RustcFacts;
use crate::diagnostics::RegionErrors;
use crate::handle_placeholders::compute_sccs_applying_placeholder_outlives_constraints;
use crate::polonius::PoloniusDiagnosticsContext;
use crate::polonius::legacy::{
PoloniusFacts, PoloniusFactsExt, PoloniusLocationTable, PoloniusOutput,
};
use crate::polonius::{PoloniusContext, PoloniusDiagnosticsContext};
use crate::region_infer::RegionInferenceContext;
use crate::type_check::{self, MirTypeckResults};
use crate::type_check::MirTypeckRegionConstraints;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::universal_regions::UniversalRegions;
use crate::{
BorrowCheckRootCtxt, BorrowckInferCtxt, ClosureOutlivesSubject, ClosureRegionRequirements,
@ -76,41 +79,18 @@ pub(crate) fn replace_regions_in_mir<'tcx>(
pub(crate) fn compute_regions<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
infcx: &BorrowckInferCtxt<'tcx>,
universal_regions: UniversalRegions<'tcx>,
body: &Body<'tcx>,
promoted: &IndexSlice<Promoted, Body<'tcx>>,
location_table: &PoloniusLocationTable,
move_data: &MoveData<'tcx>,
borrow_set: &BorrowSet<'tcx>,
location_map: Rc<DenseLocationMap>,
universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
constraints: MirTypeckRegionConstraints<'tcx>,
mut polonius_facts: Option<AllFacts<RustcFacts>>,
polonius_context: Option<PoloniusContext>,
) -> NllOutput<'tcx> {
let is_polonius_legacy_enabled = infcx.tcx.sess.opts.unstable_opts.polonius.is_legacy_enabled();
let polonius_input = root_cx.consumer.as_ref().map_or(false, |c| c.polonius_input())
|| is_polonius_legacy_enabled;
let polonius_output = root_cx.consumer.as_ref().map_or(false, |c| c.polonius_output())
|| is_polonius_legacy_enabled;
let mut polonius_facts =
(polonius_input || PoloniusFacts::enabled(infcx.tcx)).then_some(PoloniusFacts::default());
let location_map = Rc::new(DenseLocationMap::new(body));
// Run the MIR type-checker.
let MirTypeckResults {
constraints,
universal_region_relations,
opaque_type_values,
polonius_context,
} = type_check::type_check(
root_cx,
infcx,
body,
promoted,
universal_regions,
location_table,
borrow_set,
&mut polonius_facts,
move_data,
Rc::clone(&location_map),
);
|| infcx.tcx.sess.opts.unstable_opts.polonius.is_legacy_enabled();
let lowered_constraints = compute_sccs_applying_placeholder_outlives_constraints(
constraints,
@ -173,8 +153,6 @@ pub(crate) fn compute_regions<'tcx>(
infcx.set_tainted_by_errors(guar);
}
regioncx.infer_opaque_types(root_cx, infcx, opaque_type_values);
NllOutput {
regioncx,
polonius_input: polonius_facts.map(Box::new),

View file

@ -310,7 +310,10 @@ fn data_id_for_static(
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
let ref_name = format!(
"_rust_extern_with_linkage_{:016x}_{symbol_name}",
tcx.stable_crate_id(LOCAL_CRATE)
);
let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
let mut data = DataDescription::new();
data.set_align(align);

View file

@ -6,6 +6,7 @@ use rustc_codegen_ssa::traits::{
BaseTypeCodegenMethods, ConstCodegenMethods, StaticCodegenMethods,
};
use rustc_hir::def::DefKind;
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::interpret::{
self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint,
@ -384,8 +385,8 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(sym);
let real_name =
format!("_rust_extern_with_linkage_{:016x}_{sym}", cx.tcx.stable_crate_id(LOCAL_CRATE));
let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section);
// TODO(antoyo): set linkage.
let value = cx.const_ptrcast(global1.get_address(None), gcc_type);

View file

@ -1886,48 +1886,4 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
) {
self.call_intrinsic("llvm.instrprof.increment", &[], &[fn_name, hash, num_counters, index]);
}
/// Emits a call to `llvm.instrprof.mcdc.parameters`.
///
/// This doesn't produce any code directly, but is used as input by
/// the LLVM pass that handles coverage instrumentation.
///
/// (See clang's [`CodeGenPGO::emitMCDCParameters`] for comparison.)
///
/// [`CodeGenPGO::emitMCDCParameters`]:
/// https://github.com/rust-lang/llvm-project/blob/5399a24/clang/lib/CodeGen/CodeGenPGO.cpp#L1124
#[instrument(level = "debug", skip(self))]
pub(crate) fn mcdc_parameters(
&mut self,
fn_name: &'ll Value,
hash: &'ll Value,
bitmap_bits: &'ll Value,
) {
self.call_intrinsic("llvm.instrprof.mcdc.parameters", &[], &[fn_name, hash, bitmap_bits]);
}
#[instrument(level = "debug", skip(self))]
pub(crate) fn mcdc_tvbitmap_update(
&mut self,
fn_name: &'ll Value,
hash: &'ll Value,
bitmap_index: &'ll Value,
mcdc_temp: &'ll Value,
) {
let args = &[fn_name, hash, bitmap_index, mcdc_temp];
self.call_intrinsic("llvm.instrprof.mcdc.tvbitmap.update", &[], args);
}
#[instrument(level = "debug", skip(self))]
pub(crate) fn mcdc_condbitmap_reset(&mut self, mcdc_temp: &'ll Value) {
self.store(self.const_i32(0), mcdc_temp, self.tcx.data_layout.i32_align.abi);
}
#[instrument(level = "debug", skip(self))]
pub(crate) fn mcdc_condbitmap_update(&mut self, cond_index: &'ll Value, mcdc_temp: &'ll Value) {
let align = self.tcx.data_layout.i32_align.abi;
let current_tv_index = self.load(self.cx.type_i32(), mcdc_temp, align);
let new_tv_index = self.add(current_tv_index, cond_index);
self.store(new_tv_index, mcdc_temp, align);
}
}

View file

@ -5,7 +5,7 @@ use rustc_codegen_ssa::common;
use rustc_codegen_ssa::traits::*;
use rustc_hir::LangItem;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::interpret::{
Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer, Scalar as InterpScalar,
@ -191,8 +191,8 @@ fn check_and_apply_linkage<'ll, 'tcx>(
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(sym);
let real_name =
format!("_rust_extern_with_linkage_{:016x}_{sym}", cx.tcx.stable_crate_id(LOCAL_CRATE));
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
cx.sess().dcx().emit_fatal(SymbolAlreadyDefined {
span: cx.tcx.def_span(def_id),

View file

@ -73,48 +73,6 @@ pub(crate) struct CounterExpression {
pub(crate) rhs: Counter,
}
pub(crate) mod mcdc {
use rustc_middle::mir::coverage::{ConditionId, ConditionInfo, DecisionInfo};
/// Must match the layout of `LLVMRustMCDCDecisionParameters`.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
pub(crate) struct DecisionParameters {
bitmap_idx: u32,
num_conditions: u16,
}
type LLVMConditionId = i16;
/// Must match the layout of `LLVMRustMCDCBranchParameters`.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
pub(crate) struct BranchParameters {
condition_id: LLVMConditionId,
condition_ids: [LLVMConditionId; 2],
}
impl From<ConditionInfo> for BranchParameters {
fn from(value: ConditionInfo) -> Self {
let to_llvm_cond_id = |cond_id: Option<ConditionId>| {
cond_id.and_then(|id| LLVMConditionId::try_from(id.as_usize()).ok()).unwrap_or(-1)
};
let ConditionInfo { condition_id, true_next_id, false_next_id } = value;
Self {
condition_id: to_llvm_cond_id(Some(condition_id)),
condition_ids: [to_llvm_cond_id(false_next_id), to_llvm_cond_id(true_next_id)],
}
}
}
impl From<DecisionInfo> for DecisionParameters {
fn from(info: DecisionInfo) -> Self {
let DecisionInfo { bitmap_idx, num_conditions } = info;
Self { bitmap_idx, num_conditions }
}
}
}
/// A span of source code coordinates to be embedded in coverage metadata.
///
/// Must match the layout of `LLVMRustCoverageSpan`.
@ -148,26 +106,14 @@ pub(crate) struct Regions {
pub(crate) code_regions: Vec<CodeRegion>,
pub(crate) expansion_regions: Vec<ExpansionRegion>,
pub(crate) branch_regions: Vec<BranchRegion>,
pub(crate) mcdc_branch_regions: Vec<MCDCBranchRegion>,
pub(crate) mcdc_decision_regions: Vec<MCDCDecisionRegion>,
}
impl Regions {
/// Returns true if none of this structure's tables contain any regions.
pub(crate) fn has_no_regions(&self) -> bool {
let Self {
code_regions,
expansion_regions,
branch_regions,
mcdc_branch_regions,
mcdc_decision_regions,
} = self;
let Self { code_regions, expansion_regions, branch_regions } = self;
code_regions.is_empty()
&& expansion_regions.is_empty()
&& branch_regions.is_empty()
&& mcdc_branch_regions.is_empty()
&& mcdc_decision_regions.is_empty()
code_regions.is_empty() && expansion_regions.is_empty() && branch_regions.is_empty()
}
}
@ -195,21 +141,3 @@ pub(crate) struct BranchRegion {
pub(crate) true_counter: Counter,
pub(crate) false_counter: Counter,
}
/// Must match the layout of `LLVMRustCoverageMCDCBranchRegion`.
#[derive(Clone, Debug)]
#[repr(C)]
pub(crate) struct MCDCBranchRegion {
pub(crate) cov_span: CoverageSpan,
pub(crate) true_counter: Counter,
pub(crate) false_counter: Counter,
pub(crate) mcdc_branch_params: mcdc::BranchParameters,
}
/// Must match the layout of `LLVMRustCoverageMCDCDecisionRegion`.
#[derive(Clone, Debug)]
#[repr(C)]
pub(crate) struct MCDCDecisionRegion {
pub(crate) cov_span: CoverageSpan,
pub(crate) mcdc_decision_params: mcdc::DecisionParameters,
}

View file

@ -63,13 +63,7 @@ pub(crate) fn write_function_mappings_to_buffer(
expressions: &[ffi::CounterExpression],
regions: &ffi::Regions,
) -> Vec<u8> {
let ffi::Regions {
code_regions,
expansion_regions,
branch_regions,
mcdc_branch_regions,
mcdc_decision_regions,
} = regions;
let ffi::Regions { code_regions, expansion_regions, branch_regions } = regions;
// SAFETY:
// - All types are FFI-compatible and have matching representations in Rust/C++.
@ -87,10 +81,6 @@ pub(crate) fn write_function_mappings_to_buffer(
expansion_regions.len(),
branch_regions.as_ptr(),
branch_regions.len(),
mcdc_branch_regions.as_ptr(),
mcdc_branch_regions.len(),
mcdc_decision_regions.as_ptr(),
mcdc_decision_regions.len(),
buffer,
)
})

View file

@ -140,8 +140,6 @@ fn fill_region_tables<'tcx>(
code_regions,
expansion_regions: _, // FIXME(Zalathar): Fill out support for expansion regions
branch_regions,
mcdc_branch_regions,
mcdc_decision_regions,
} = &mut covfun.regions;
// For each counter/region pair in this function+file, convert it to a
@ -161,20 +159,6 @@ fn fill_region_tables<'tcx>(
false_counter: counter_for_bcb(false_bcb),
});
}
MappingKind::MCDCBranch { true_bcb, false_bcb, mcdc_params } => {
mcdc_branch_regions.push(ffi::MCDCBranchRegion {
cov_span,
true_counter: counter_for_bcb(true_bcb),
false_counter: counter_for_bcb(false_bcb),
mcdc_branch_params: ffi::mcdc::BranchParameters::from(mcdc_params),
});
}
MappingKind::MCDCDecision(mcdc_decision_params) => {
mcdc_decision_regions.push(ffi::MCDCDecisionRegion {
cov_span,
mcdc_decision_params: ffi::mcdc::DecisionParameters::from(mcdc_decision_params),
});
}
}
}
}

View file

@ -1,11 +1,10 @@
use std::cell::{OnceCell, RefCell};
use std::ffi::{CStr, CString};
use rustc_abi::Size;
use rustc_codegen_ssa::traits::{
BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods,
ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods,
};
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::fx::FxIndexMap;
use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::ty::Instance;
use tracing::{debug, instrument};
@ -28,34 +27,13 @@ pub(crate) struct CguCoverageContext<'ll, 'tcx> {
/// symbol name, and `llvm-cov` will exit fatally if it can't resolve that
/// hash back to an entry in the binary's `__llvm_prf_names` linker section.
pub(crate) pgo_func_name_var_map: RefCell<FxIndexMap<Instance<'tcx>, &'ll llvm::Value>>,
pub(crate) mcdc_condition_bitmap_map: RefCell<FxHashMap<Instance<'tcx>, Vec<&'ll llvm::Value>>>,
covfun_section_name: OnceCell<CString>,
}
impl<'ll, 'tcx> CguCoverageContext<'ll, 'tcx> {
pub(crate) fn new() -> Self {
Self {
pgo_func_name_var_map: Default::default(),
mcdc_condition_bitmap_map: Default::default(),
covfun_section_name: Default::default(),
}
}
/// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is
/// called condition bitmap. In order to handle nested decisions, several condition bitmaps can
/// be allocated for a function body. These values are named `mcdc.addr.{i}` and are a 32-bit
/// integers. They respectively hold the condition bitmaps for decisions with a depth of `i`.
fn try_get_mcdc_condition_bitmap(
&self,
instance: &Instance<'tcx>,
decision_depth: u16,
) -> Option<&'ll llvm::Value> {
self.mcdc_condition_bitmap_map
.borrow()
.get(instance)
.and_then(|bitmap_map| bitmap_map.get(decision_depth as usize))
.copied() // Dereference Option<&&Value> to Option<&Value>
Self { pgo_func_name_var_map: Default::default(), covfun_section_name: Default::default() }
}
/// Returns the list of instances considered "used" in this CGU, as
@ -105,38 +83,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
}
impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
fn init_coverage(&mut self, instance: Instance<'tcx>) {
let Some(function_coverage_info) =
self.tcx.instance_mir(instance.def).function_coverage_info.as_deref()
else {
return;
};
// If there are no MC/DC bitmaps to set up, return immediately.
if function_coverage_info.mcdc_bitmap_bits == 0 {
return;
}
let fn_name = self.ensure_pgo_func_name_var(instance);
let hash = self.const_u64(function_coverage_info.function_source_hash);
let bitmap_bits = self.const_u32(function_coverage_info.mcdc_bitmap_bits as u32);
self.mcdc_parameters(fn_name, hash, bitmap_bits);
// Create pointers named `mcdc.addr.{i}` to stack-allocated condition bitmaps.
let mut cond_bitmaps = vec![];
for i in 0..function_coverage_info.mcdc_num_condition_bitmaps {
// MC/DC intrinsics will perform loads/stores that use the ABI default
// alignment for i32, so our variable declaration should match.
let align = self.tcx.data_layout.i32_align.abi;
let cond_bitmap = self.alloca(Size::from_bytes(4), align);
llvm::set_value_name(cond_bitmap, format!("mcdc.addr.{i}").as_bytes());
self.store(self.const_i32(0), cond_bitmap, align);
cond_bitmaps.push(cond_bitmap);
}
self.coverage_cx().mcdc_condition_bitmap_map.borrow_mut().insert(instance, cond_bitmaps);
}
#[instrument(level = "debug", skip(self))]
fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) {
// Our caller should have already taken care of inlining subtleties,
@ -153,7 +99,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
// When that happens, we currently just discard those statements, so
// the corresponding code will be undercounted.
// FIXME(Zalathar): Find a better solution for mixed-coverage builds.
let Some(coverage_cx) = &bx.cx.coverage_cx else { return };
let Some(_coverage_cx) = &bx.cx.coverage_cx else { return };
let Some(function_coverage_info) =
bx.tcx.instance_mir(instance.def).function_coverage_info.as_deref()
@ -185,30 +131,6 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
}
// If a BCB doesn't have an associated physical counter, there's nothing to codegen.
CoverageKind::VirtualCounter { .. } => {}
CoverageKind::CondBitmapUpdate { index, decision_depth } => {
let cond_bitmap = coverage_cx
.try_get_mcdc_condition_bitmap(&instance, decision_depth)
.expect("mcdc cond bitmap should have been allocated for updating");
let cond_index = bx.const_i32(index as i32);
bx.mcdc_condbitmap_update(cond_index, cond_bitmap);
}
CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth } => {
let cond_bitmap =
coverage_cx.try_get_mcdc_condition_bitmap(&instance, decision_depth).expect(
"mcdc cond bitmap should have been allocated for merging \
into the global bitmap",
);
assert!(
bitmap_idx as usize <= function_coverage_info.mcdc_bitmap_bits,
"bitmap index of the decision out of range"
);
let fn_name = bx.ensure_pgo_func_name_var(instance);
let hash = bx.const_u64(function_coverage_info.function_source_hash);
let bitmap_index = bx.const_u32(bitmap_idx);
bx.mcdc_tvbitmap_update(fn_name, hash, bitmap_index, cond_bitmap);
bx.mcdc_condbitmap_reset(cond_bitmap);
}
}
}
}

View file

@ -2056,10 +2056,6 @@ unsafe extern "C" {
NumExpansionRegions: size_t,
BranchRegions: *const crate::coverageinfo::ffi::BranchRegion,
NumBranchRegions: size_t,
MCDCBranchRegions: *const crate::coverageinfo::ffi::MCDCBranchRegion,
NumMCDCBranchRegions: size_t,
MCDCDecisionRegions: *const crate::coverageinfo::ffi::MCDCDecisionRegion,
NumMCDCDecisionRegions: size_t,
BufferOut: &RustString,
);

View file

@ -69,6 +69,15 @@ pub fn assert_module_sources(tcx: TyCtxt<'_>, set_reuse: &dyn Fn(&mut CguReuseTr
set_reuse(&mut ams.cgu_reuse_tracker);
if tcx.sess.opts.unstable_opts.print_mono_items
&& let Some(data) = &ams.cgu_reuse_tracker.data
{
data.actual_reuse.items().all(|(cgu, reuse)| {
println!("CGU_REUSE {cgu} {reuse}");
true
});
}
ams.cgu_reuse_tracker.check_expected_reuse(tcx.sess);
});
}

View file

@ -1805,11 +1805,18 @@ pub(crate) fn exported_symbols(
.collect();
}
if let CrateType::ProcMacro = crate_type {
let mut symbols = if let CrateType::ProcMacro = crate_type {
exported_symbols_for_proc_macro_crate(tcx)
} else {
exported_symbols_for_non_proc_macro(tcx, crate_type)
};
if crate_type == CrateType::Dylib || crate_type == CrateType::ProcMacro {
let metadata_symbol_name = exported_symbols::metadata_symbol_name(tcx);
symbols.push((metadata_symbol_name, SymbolExportKind::Data));
}
symbols
}
fn exported_symbols_for_non_proc_macro(
@ -1842,12 +1849,8 @@ fn exported_symbols_for_proc_macro_crate(tcx: TyCtxt<'_>) -> Vec<(String, Symbol
let stable_crate_id = tcx.stable_crate_id(LOCAL_CRATE);
let proc_macro_decls_name = tcx.sess.generate_proc_macro_decls_symbol(stable_crate_id);
let metadata_symbol_name = exported_symbols::metadata_symbol_name(tcx);
vec![
(proc_macro_decls_name, SymbolExportKind::Data),
(metadata_symbol_name, SymbolExportKind::Data),
]
vec![(proc_macro_decls_name, SymbolExportKind::Data)]
}
pub(crate) fn linked_symbols(

View file

@ -8,7 +8,7 @@ use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LOCAL_CRATE, LocalDefId};
use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::exported_symbols::{
ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel, metadata_symbol_name,
ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel,
};
use rustc_middle::query::LocalCrate;
use rustc_middle::ty::{self, GenericArgKind, GenericArgsRef, Instance, SymbolName, Ty, TyCtxt};
@ -289,23 +289,6 @@ fn exported_non_generic_symbols_provider_local<'tcx>(
}));
}
if tcx.crate_types().contains(&CrateType::Dylib)
|| tcx.crate_types().contains(&CrateType::ProcMacro)
{
let symbol_name = metadata_symbol_name(tcx);
let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
symbols.push((
exported_symbol,
SymbolExportInfo {
level: SymbolExportLevel::C,
kind: SymbolExportKind::Data,
used: true,
rustc_std_internal_symbol: false,
},
));
}
// Sort so we get a stable incr. comp. hash.
symbols.sort_by_cached_key(|s| s.0.symbol_name_for_local_instance(tcx));

View file

@ -296,10 +296,6 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// Apply debuginfo to the newly allocated locals.
fx.debug_introduce_locals(&mut start_bx, consts_debug_info.unwrap_or_default());
// If the backend supports coverage, and coverage is enabled for this function,
// do any necessary start-of-function codegen (e.g. locals for MC/DC bitmaps).
start_bx.init_coverage(instance);
// The builders will be created separately for each basic block at `codegen_block`.
// So drop the builder of `start_llbb` to avoid having two at the same time.
drop(start_bx);

View file

@ -498,6 +498,35 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
(is_niche, tagged_discr, 0)
} else {
// Thanks to parameter attributes and load metadata, LLVM already knows
// the general valid range of the tag. It's possible, though, for there
// to be an impossible value *in the middle*, which those ranges don't
// communicate, so it's worth an `assume` to let the optimizer know.
// Most importantly, this means when optimizing a variant test like
// `SELECT(is_niche, complex, CONST) == CONST` it's ok to simplify that
// to `!is_niche` because the `complex` part can't possibly match.
//
// This was previously asserted on `tagged_discr` below, where the
// impossible value is more obvious, but that caused an intermediate
// value to become multi-use and thus not optimize, so instead this
// assumes on the original input which is always multi-use. See
// <https://github.com/llvm/llvm-project/issues/134024#issuecomment-3131782555>
//
// FIXME: If we ever get range assume operand bundles in LLVM (so we
// don't need the `icmp`s in the instruction stream any more), it
// might be worth moving this back to being on the switch argument
// where it's more obviously applicable.
if niche_variants.contains(&untagged_variant)
&& bx.cx().sess().opts.optimize != OptLevel::No
{
let impossible = niche_start
.wrapping_add(u128::from(untagged_variant.as_u32()))
.wrapping_sub(u128::from(niche_variants.start().as_u32()));
let impossible = bx.cx().const_uint_big(tag_llty, impossible);
let ne = bx.icmp(IntPredicate::IntNE, tag, impossible);
bx.assume(ne);
}
// With multiple niched variants we'll have to actually compute
// the variant index from the stored tag.
//
@ -588,20 +617,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let untagged_variant_const =
bx.cx().const_uint(cast_to, u64::from(untagged_variant.as_u32()));
// Thanks to parameter attributes and load metadata, LLVM already knows
// the general valid range of the tag. It's possible, though, for there
// to be an impossible value *in the middle*, which those ranges don't
// communicate, so it's worth an `assume` to let the optimizer know.
// Most importantly, this means when optimizing a variant test like
// `SELECT(is_niche, complex, CONST) == CONST` it's ok to simplify that
// to `!is_niche` because the `complex` part can't possibly match.
if niche_variants.contains(&untagged_variant)
&& bx.cx().sess().opts.optimize != OptLevel::No
{
let ne = bx.icmp(IntPredicate::IntNE, tagged_discr, untagged_variant_const);
bx.assume(ne);
}
let discr = bx.select(is_niche, tagged_discr, untagged_variant_const);
// In principle we could insert assumes on the possible range of `discr`, but

View file

@ -2,11 +2,6 @@ use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::ty::Instance;
pub trait CoverageInfoBuilderMethods<'tcx> {
/// Performs any start-of-function codegen needed for coverage instrumentation.
///
/// Can be a no-op in backends that don't support coverage instrumentation.
fn init_coverage(&mut self, _instance: Instance<'tcx>) {}
/// Handle the MIR coverage info in a backend-specific way.
///
/// This can potentially be a no-op in backends that don't support

View file

@ -777,7 +777,7 @@ fn test_unstable_options_tracking_hash() {
tracked!(
coverage_options,
CoverageOptions {
level: CoverageLevel::Mcdc,
level: CoverageLevel::Branch,
// (don't collapse test-only options onto the same line)
discard_all_spans_in_codegen: true,
}

View file

@ -37,28 +37,6 @@ static coverage::Counter fromRust(LLVMRustCounter Counter) {
report_fatal_error("Bad LLVMRustCounterKind!");
}
struct LLVMRustMCDCDecisionParameters {
uint32_t BitmapIdx;
uint16_t NumConditions;
};
struct LLVMRustMCDCBranchParameters {
int16_t ConditionID;
int16_t ConditionIDs[2];
};
static coverage::mcdc::BranchParameters
fromRust(LLVMRustMCDCBranchParameters Params) {
return coverage::mcdc::BranchParameters(
Params.ConditionID, {Params.ConditionIDs[0], Params.ConditionIDs[1]});
}
static coverage::mcdc::DecisionParameters
fromRust(LLVMRustMCDCDecisionParameters Params) {
return coverage::mcdc::DecisionParameters(Params.BitmapIdx,
Params.NumConditions);
}
// Must match the layout of
// `rustc_codegen_llvm::coverageinfo::ffi::CoverageSpan`.
struct LLVMRustCoverageSpan {
@ -90,22 +68,6 @@ struct LLVMRustCoverageBranchRegion {
LLVMRustCounter FalseCount;
};
// Must match the layout of
// `rustc_codegen_llvm::coverageinfo::ffi::MCDCBranchRegion`.
struct LLVMRustCoverageMCDCBranchRegion {
LLVMRustCoverageSpan Span;
LLVMRustCounter TrueCount;
LLVMRustCounter FalseCount;
LLVMRustMCDCBranchParameters MCDCBranchParams;
};
// Must match the layout of
// `rustc_codegen_llvm::coverageinfo::ffi::MCDCDecisionRegion`.
struct LLVMRustCoverageMCDCDecisionRegion {
LLVMRustCoverageSpan Span;
LLVMRustMCDCDecisionParameters MCDCDecisionParams;
};
// FFI equivalent of enum `llvm::coverage::CounterExpression::ExprKind`
// https://github.com/rust-lang/llvm-project/blob/ea6fa9c2/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L154
enum class LLVMRustCounterExprKind {
@ -159,10 +121,7 @@ extern "C" void LLVMRustCoverageWriteFunctionMappingsToBuffer(
const LLVMRustCoverageExpansionRegion *ExpansionRegions,
size_t NumExpansionRegions,
const LLVMRustCoverageBranchRegion *BranchRegions, size_t NumBranchRegions,
const LLVMRustCoverageMCDCBranchRegion *MCDCBranchRegions,
size_t NumMCDCBranchRegions,
const LLVMRustCoverageMCDCDecisionRegion *MCDCDecisionRegions,
size_t NumMCDCDecisionRegions, RustStringRef BufferOut) {
RustStringRef BufferOut) {
// Convert from FFI representation to LLVM representation.
// Expressions:
@ -176,8 +135,8 @@ extern "C" void LLVMRustCoverageWriteFunctionMappingsToBuffer(
}
std::vector<coverage::CounterMappingRegion> MappingRegions;
MappingRegions.reserve(NumCodeRegions + NumBranchRegions +
NumMCDCBranchRegions + NumMCDCDecisionRegions);
MappingRegions.reserve(NumCodeRegions + NumExpansionRegions +
NumBranchRegions);
// Code regions:
for (const auto &Region : ArrayRef(CodeRegions, NumCodeRegions)) {
@ -201,24 +160,6 @@ extern "C" void LLVMRustCoverageWriteFunctionMappingsToBuffer(
Region.Span.LineEnd, Region.Span.ColumnEnd));
}
// MC/DC branch regions:
for (const auto &Region : ArrayRef(MCDCBranchRegions, NumMCDCBranchRegions)) {
MappingRegions.push_back(coverage::CounterMappingRegion::makeBranchRegion(
fromRust(Region.TrueCount), fromRust(Region.FalseCount),
Region.Span.FileID, Region.Span.LineStart, Region.Span.ColumnStart,
Region.Span.LineEnd, Region.Span.ColumnEnd,
fromRust(Region.MCDCBranchParams)));
}
// MC/DC decision regions:
for (const auto &Region :
ArrayRef(MCDCDecisionRegions, NumMCDCDecisionRegions)) {
MappingRegions.push_back(coverage::CounterMappingRegion::makeDecisionRegion(
fromRust(Region.MCDCDecisionParams), Region.Span.FileID,
Region.Span.LineStart, Region.Span.ColumnStart, Region.Span.LineEnd,
Region.Span.ColumnEnd));
}
// Write the converted expressions and mappings to a byte buffer.
auto CoverageMappingWriter = coverage::CoverageMappingWriter(
ArrayRef<unsigned>(VirtualFileMappingIDs, NumVirtualFileMappingIDs),

View file

@ -19,13 +19,12 @@ use rustc_hir::find_attr;
use rustc_hir_pretty::id_to_string;
use rustc_middle::dep_graph::WorkProductId;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::metadata_symbol_name;
use rustc_middle::mir::interpret;
use rustc_middle::query::Providers;
use rustc_middle::traits::specialization_graph;
use rustc_middle::ty::AssocItemContainer;
use rustc_middle::ty::codec::TyEncoder;
use rustc_middle::ty::fast_reject::{self, TreatParams};
use rustc_middle::ty::{AssocItemContainer, SymbolName};
use rustc_middle::{bug, span_bug};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque};
use rustc_session::config::{CrateType, OptLevel, TargetModifier};
@ -2207,19 +2206,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
exported_symbols: &[(ExportedSymbol<'tcx>, SymbolExportInfo)],
) -> LazyArray<(ExportedSymbol<'static>, SymbolExportInfo)> {
empty_proc_macro!(self);
// The metadata symbol name is special. It should not show up in
// downstream crates.
let metadata_symbol_name = SymbolName::new(self.tcx, &metadata_symbol_name(self.tcx));
self.lazy_array(
exported_symbols
.iter()
.filter(|&(exported_symbol, _)| match *exported_symbol {
ExportedSymbol::NoDefId(symbol_name) => symbol_name != metadata_symbol_name,
_ => true,
})
.cloned(),
)
self.lazy_array(exported_symbols.iter().cloned())
}
fn encode_dylib_dependency_formats(&mut self) -> LazyArray<Option<LinkagePreference>> {

View file

@ -122,8 +122,6 @@ middle_strict_coherence_needs_negative_coherence =
to use `strict_coherence` on this trait, the `with_negative_coherence` feature must be enabled
.label = due to this attribute
middle_type_length_limit = reached the type-length limit while instantiating `{$shrunk}`
middle_type_length_limit = reached the type-length limit while instantiating `{$instance}`
middle_unsupported_union = we don't support unions yet: '{$ty_name}'
middle_written_to_path = the full type name has been written to '{$path}'

View file

@ -1,4 +1,4 @@
use std::path::{Path, PathBuf};
use std::path::Path;
use std::{fmt, io};
use rustc_errors::codes::*;
@ -6,7 +6,7 @@ use rustc_errors::{DiagArgName, DiagArgValue, DiagMessage};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::{Span, Symbol};
use crate::ty::Ty;
use crate::ty::{Instance, Ty};
#[derive(Diagnostic)]
#[diag(middle_drop_check_overflow, code = E0320)]
@ -161,13 +161,10 @@ pub(crate) struct ErroneousConstant {
#[derive(Diagnostic)]
#[diag(middle_type_length_limit)]
#[help(middle_consider_type_length_limit)]
pub(crate) struct TypeLengthLimit {
pub(crate) struct TypeLengthLimit<'tcx> {
#[primary_span]
pub span: Span,
pub shrunk: String,
#[note(middle_written_to_path)]
pub was_written: bool,
pub path: PathBuf,
pub instance: Instance<'tcx>,
pub type_length: usize,
}

View file

@ -50,25 +50,6 @@ rustc_index::newtype_index! {
pub struct ExpressionId {}
}
rustc_index::newtype_index! {
/// ID of a mcdc condition. Used by llvm to check mcdc coverage.
///
/// Note for future: the max limit of 0xFFFF is probably too loose. Actually llvm does not
/// support decisions with too many conditions (7 and more at LLVM 18 while may be hundreds at 19)
/// and represents it with `int16_t`. This max value may be changed once we could
/// figure out an accurate limit.
#[derive(HashStable)]
#[encodable]
#[orderable]
#[max = 0xFFFF]
#[debug_format = "ConditionId({})"]
pub struct ConditionId {}
}
impl ConditionId {
pub const START: Self = Self::from_usize(0);
}
/// Enum that can hold a constant zero value, the ID of an physical coverage
/// counter, or the ID of a coverage-counter expression.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
@ -109,16 +90,6 @@ pub enum CoverageKind {
/// During codegen, this might be lowered to `llvm.instrprof.increment` or
/// to a no-op, depending on the outcome of counter-creation.
VirtualCounter { bcb: BasicCoverageBlock },
/// Marks the point in MIR control flow represented by a evaluated condition.
///
/// This is eventually lowered to instruments updating mcdc temp variables.
CondBitmapUpdate { index: u32, decision_depth: u16 },
/// Marks the point in MIR control flow represented by a evaluated decision.
///
/// This is eventually lowered to `llvm.instrprof.mcdc.tvbitmap.update` in LLVM IR.
TestVectorBitmapUpdate { bitmap_idx: u32, decision_depth: u16 },
}
impl Debug for CoverageKind {
@ -128,12 +99,6 @@ impl Debug for CoverageKind {
SpanMarker => write!(fmt, "SpanMarker"),
BlockMarker { id } => write!(fmt, "BlockMarker({:?})", id.index()),
VirtualCounter { bcb } => write!(fmt, "VirtualCounter({bcb:?})"),
CondBitmapUpdate { index, decision_depth } => {
write!(fmt, "CondBitmapUpdate(index={:?}, depth={:?})", index, decision_depth)
}
TestVectorBitmapUpdate { bitmap_idx, decision_depth } => {
write!(fmt, "TestVectorUpdate({:?}, depth={:?})", bitmap_idx, decision_depth)
}
}
}
}
@ -170,14 +135,6 @@ pub enum MappingKind {
Code { bcb: BasicCoverageBlock },
/// Associates a branch region with separate counters for true and false.
Branch { true_bcb: BasicCoverageBlock, false_bcb: BasicCoverageBlock },
/// Associates a branch region with separate counters for true and false.
MCDCBranch {
true_bcb: BasicCoverageBlock,
false_bcb: BasicCoverageBlock,
mcdc_params: ConditionInfo,
},
/// Associates a decision region with a bitmap and number of conditions.
MCDCDecision(DecisionInfo),
}
#[derive(Clone, Debug)]
@ -201,11 +158,6 @@ pub struct FunctionCoverageInfo {
pub priority_list: Vec<BasicCoverageBlock>,
pub mappings: Vec<Mapping>,
pub mcdc_bitmap_bits: usize,
/// The depth of the deepest decision is used to know how many
/// temp condbitmaps should be allocated for the function.
pub mcdc_num_condition_bitmaps: usize,
}
/// Coverage information for a function, recorded during MIR building and
@ -222,10 +174,6 @@ pub struct CoverageInfoHi {
/// data structures without having to scan the entire body first.
pub num_block_markers: usize,
pub branch_spans: Vec<BranchSpan>,
/// Branch spans generated by mcdc. Because of some limits mcdc builder give up generating
/// decisions including them so that they are handled as normal branch spans.
pub mcdc_degraded_branch_spans: Vec<MCDCBranchSpan>,
pub mcdc_spans: Vec<(MCDCDecisionSpan, Vec<MCDCBranchSpan>)>,
}
#[derive(Clone, Debug)]
@ -236,39 +184,6 @@ pub struct BranchSpan {
pub false_marker: BlockMarkerId,
}
#[derive(Copy, Clone, Debug)]
#[derive(TyEncodable, TyDecodable, Hash, HashStable)]
pub struct ConditionInfo {
pub condition_id: ConditionId,
pub true_next_id: Option<ConditionId>,
pub false_next_id: Option<ConditionId>,
}
#[derive(Clone, Debug)]
#[derive(TyEncodable, TyDecodable, Hash, HashStable)]
pub struct MCDCBranchSpan {
pub span: Span,
pub condition_info: ConditionInfo,
pub true_marker: BlockMarkerId,
pub false_marker: BlockMarkerId,
}
#[derive(Copy, Clone, Debug)]
#[derive(TyEncodable, TyDecodable, Hash, HashStable)]
pub struct DecisionInfo {
pub bitmap_idx: u32,
pub num_conditions: u16,
}
#[derive(Clone, Debug)]
#[derive(TyEncodable, TyDecodable, Hash, HashStable)]
pub struct MCDCDecisionSpan {
pub span: Span,
pub end_markers: Vec<BlockMarkerId>,
pub decision_depth: u16,
pub num_conditions: usize,
}
/// Contains information needed during codegen, obtained by inspecting the
/// function's MIR after MIR optimizations.
///

View file

@ -585,12 +585,7 @@ fn write_coverage_info_hi(
coverage_info_hi: &coverage::CoverageInfoHi,
w: &mut dyn io::Write,
) -> io::Result<()> {
let coverage::CoverageInfoHi {
num_block_markers: _,
branch_spans,
mcdc_degraded_branch_spans,
mcdc_spans,
} = coverage_info_hi;
let coverage::CoverageInfoHi { num_block_markers: _, branch_spans } = coverage_info_hi;
// Only add an extra trailing newline if we printed at least one thing.
let mut did_print = false;
@ -603,38 +598,6 @@ fn write_coverage_info_hi(
did_print = true;
}
for coverage::MCDCBranchSpan { span, true_marker, false_marker, .. } in
mcdc_degraded_branch_spans
{
writeln!(
w,
"{INDENT}coverage branch {{ true: {true_marker:?}, false: {false_marker:?} }} => {span:?}",
)?;
did_print = true;
}
for (
coverage::MCDCDecisionSpan { span, end_markers, decision_depth, num_conditions: _ },
conditions,
) in mcdc_spans
{
let num_conditions = conditions.len();
writeln!(
w,
"{INDENT}coverage mcdc decision {{ num_conditions: {num_conditions:?}, end: {end_markers:?}, depth: {decision_depth:?} }} => {span:?}"
)?;
for coverage::MCDCBranchSpan { span, condition_info, true_marker, false_marker } in
conditions
{
writeln!(
w,
"{INDENT}coverage mcdc branch {{ condition_id: {:?}, true: {true_marker:?}, false: {false_marker:?} }} => {span:?}",
condition_info.condition_id
)?;
}
did_print = true;
}
if did_print {
writeln!(w)?;
}

View file

@ -7,14 +7,14 @@ use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::{
Applicability, Diag, DiagArgValue, IntoDiagArg, into_diag_arg_using_display, listify, pluralize,
};
use rustc_hir::def::DefKind;
use rustc_hir::def::{DefKind, Namespace};
use rustc_hir::def_id::DefId;
use rustc_hir::{self as hir, AmbigArg, LangItem, PredicateOrigin, WherePredicateKind};
use rustc_span::{BytePos, Span};
use rustc_type_ir::TyKind::*;
use crate::ty::{
self, AliasTy, Const, ConstKind, FallibleTypeFolder, InferConst, InferTy, Opaque,
self, AliasTy, Const, ConstKind, FallibleTypeFolder, InferConst, InferTy, Instance, Opaque,
PolyTraitPredicate, Projection, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable,
TypeSuperVisitable, TypeVisitable, TypeVisitor,
};
@ -28,6 +28,15 @@ impl IntoDiagArg for Ty<'_> {
}
}
impl IntoDiagArg for Instance<'_> {
fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue {
ty::tls::with(|tcx| {
let instance = tcx.short_string_namespace(self, path, Namespace::ValueNS);
rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(instance))
})
}
}
into_diag_arg_using_display! {
ty::Region<'_>,
}

View file

@ -160,7 +160,11 @@ impl<'tcx> Ty<'tcx> {
_ => {
let width = tcx.sess.diagnostic_width();
let length_limit = std::cmp::max(width / 4, 40);
format!("`{}`", tcx.string_with_limit(self, length_limit)).into()
format!(
"`{}`",
tcx.string_with_limit(self, length_limit, hir::def::Namespace::TypeNS)
)
.into()
}
}
}
@ -213,12 +217,12 @@ impl<'tcx> Ty<'tcx> {
}
impl<'tcx> TyCtxt<'tcx> {
pub fn string_with_limit<T>(self, t: T, length_limit: usize) -> String
pub fn string_with_limit<T>(self, t: T, length_limit: usize, ns: hir::def::Namespace) -> String
where
T: Copy + for<'a, 'b> Lift<TyCtxt<'b>, Lifted: Print<'b, FmtPrinter<'a, 'b>>>,
{
let mut type_limit = 50;
let regular = FmtPrinter::print_string(self, hir::def::Namespace::TypeNS, |p| {
let regular = FmtPrinter::print_string(self, ns, |p| {
self.lift(t).expect("could not lift for printing").print(p)
})
.expect("could not write to `String`");
@ -229,11 +233,7 @@ impl<'tcx> TyCtxt<'tcx> {
loop {
// Look for the longest properly trimmed path that still fits in length_limit.
short = with_forced_trimmed_paths!({
let mut p = FmtPrinter::new_with_limit(
self,
hir::def::Namespace::TypeNS,
rustc_session::Limit(type_limit),
);
let mut p = FmtPrinter::new_with_limit(self, ns, rustc_session::Limit(type_limit));
self.lift(t)
.expect("could not lift for printing")
.print(&mut p)
@ -251,12 +251,28 @@ impl<'tcx> TyCtxt<'tcx> {
/// When calling this after a `Diag` is constructed, the preferred way of doing so is
/// `tcx.short_string(ty, diag.long_ty_path())`. The diagnostic itself is the one that keeps
/// the existence of a "long type" anywhere in the diagnostic, so the note telling the user
/// where we wrote the file to is only printed once.
/// where we wrote the file to is only printed once. The path will use the type namespace.
pub fn short_string<T>(self, t: T, path: &mut Option<PathBuf>) -> String
where
T: Copy + Hash + for<'a, 'b> Lift<TyCtxt<'b>, Lifted: Print<'b, FmtPrinter<'a, 'b>>>,
{
let regular = FmtPrinter::print_string(self, hir::def::Namespace::TypeNS, |p| {
self.short_string_namespace(t, path, hir::def::Namespace::TypeNS)
}
/// When calling this after a `Diag` is constructed, the preferred way of doing so is
/// `tcx.short_string(ty, diag.long_ty_path())`. The diagnostic itself is the one that keeps
/// the existence of a "long type" anywhere in the diagnostic, so the note telling the user
/// where we wrote the file to is only printed once.
pub fn short_string_namespace<T>(
self,
t: T,
path: &mut Option<PathBuf>,
namespace: hir::def::Namespace,
) -> String
where
T: Copy + Hash + for<'a, 'b> Lift<TyCtxt<'b>, Lifted: Print<'b, FmtPrinter<'a, 'b>>>,
{
let regular = FmtPrinter::print_string(self, namespace, |p| {
self.lift(t).expect("could not lift for printing").print(p)
})
.expect("could not write to `String`");
@ -270,7 +286,7 @@ impl<'tcx> TyCtxt<'tcx> {
if regular.len() <= width * 2 / 3 {
return regular;
}
let short = self.string_with_limit(t, length_limit);
let short = self.string_with_limit(t, length_limit, namespace);
if regular == short {
return regular;
}

View file

@ -1,6 +1,5 @@
use std::assert_matches::assert_matches;
use std::fmt;
use std::path::PathBuf;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::ErrorGuaranteed;
@ -17,7 +16,7 @@ use tracing::{debug, instrument};
use crate::error;
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use crate::ty::normalize_erasing_regions::NormalizationError;
use crate::ty::print::{FmtPrinter, Printer, shrunk_instance_name};
use crate::ty::print::{FmtPrinter, Print};
use crate::ty::{
self, EarlyBinder, GenericArgs, GenericArgsRef, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable,
TypeVisitable, TypeVisitableExt, TypeVisitor,
@ -389,59 +388,15 @@ fn type_length<'tcx>(item: impl TypeVisitable<TyCtxt<'tcx>>) -> usize {
visitor.type_length
}
pub fn fmt_instance(
f: &mut fmt::Formatter<'_>,
instance: Instance<'_>,
type_length: Option<rustc_session::Limit>,
) -> fmt::Result {
ty::tls::with(|tcx| {
let args = tcx.lift(instance.args).expect("could not lift for printing");
let mut p = if let Some(type_length) = type_length {
FmtPrinter::new_with_limit(tcx, Namespace::ValueNS, type_length)
} else {
FmtPrinter::new(tcx, Namespace::ValueNS)
};
p.print_def_path(instance.def_id(), args)?;
let s = p.into_buffer();
f.write_str(&s)
})?;
match instance.def {
InstanceKind::Item(_) => Ok(()),
InstanceKind::VTableShim(_) => write!(f, " - shim(vtable)"),
InstanceKind::ReifyShim(_, None) => write!(f, " - shim(reify)"),
InstanceKind::ReifyShim(_, Some(ReifyReason::FnPtr)) => write!(f, " - shim(reify-fnptr)"),
InstanceKind::ReifyShim(_, Some(ReifyReason::Vtable)) => write!(f, " - shim(reify-vtable)"),
InstanceKind::ThreadLocalShim(_) => write!(f, " - shim(tls)"),
InstanceKind::Intrinsic(_) => write!(f, " - intrinsic"),
InstanceKind::Virtual(_, num) => write!(f, " - virtual#{num}"),
InstanceKind::FnPtrShim(_, ty) => write!(f, " - shim({ty})"),
InstanceKind::ClosureOnceShim { .. } => write!(f, " - shim"),
InstanceKind::ConstructCoroutineInClosureShim { .. } => write!(f, " - shim"),
InstanceKind::DropGlue(_, None) => write!(f, " - shim(None)"),
InstanceKind::DropGlue(_, Some(ty)) => write!(f, " - shim(Some({ty}))"),
InstanceKind::CloneShim(_, ty) => write!(f, " - shim({ty})"),
InstanceKind::FnPtrAddrShim(_, ty) => write!(f, " - shim({ty})"),
InstanceKind::FutureDropPollShim(_, proxy_ty, impl_ty) => {
write!(f, " - dropshim({proxy_ty}-{impl_ty})")
}
InstanceKind::AsyncDropGlue(_, ty) => write!(f, " - shim({ty})"),
InstanceKind::AsyncDropGlueCtorShim(_, ty) => write!(f, " - shim(Some({ty}))"),
}
}
pub struct ShortInstance<'tcx>(pub Instance<'tcx>, pub usize);
impl<'tcx> fmt::Display for ShortInstance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt_instance(f, self.0, Some(rustc_session::Limit(self.1)))
}
}
impl<'tcx> fmt::Display for Instance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt_instance(f, *self, None)
ty::tls::with(|tcx| {
let mut p = FmtPrinter::new(tcx, Namespace::ValueNS);
let instance = tcx.lift(*self).expect("could not lift for printing");
instance.print(&mut p)?;
let s = p.into_buffer();
f.write_str(&s)
})
}
}
@ -610,23 +565,12 @@ impl<'tcx> Instance<'tcx> {
Ok(None) => {
let type_length = type_length(args);
if !tcx.type_length_limit().value_within_limit(type_length) {
let (shrunk, written_to_path) =
shrunk_instance_name(tcx, Instance::new_raw(def_id, args));
let mut path = PathBuf::new();
let was_written = if let Some(path2) = written_to_path {
path = path2;
true
} else {
false
};
tcx.dcx().emit_fatal(error::TypeLengthLimit {
// We don't use `def_span(def_id)` so that diagnostics point
// to the crate root during mono instead of to foreign items.
// This is arguably better.
span: span_or_local_def_span(),
shrunk,
was_written,
path,
instance: Instance::new_raw(def_id, args),
type_length,
});
} else {

View file

@ -82,7 +82,7 @@ pub use self::context::{
TyCtxtFeed, tls,
};
pub use self::fold::*;
pub use self::instance::{Instance, InstanceKind, ReifyReason, ShortInstance, UnusedGenericParams};
pub use self::instance::{Instance, InstanceKind, ReifyReason, UnusedGenericParams};
pub use self::list::{List, ListWithCachedTypeInfo};
pub use self::opaque_types::OpaqueTypeKey;
pub use self::pattern::{Pattern, PatternKind};

View file

@ -6,8 +6,7 @@ use rustc_macros::{HashStable, extension};
use rustc_type_ir as ir;
use crate::ty::{
self, DebruijnIndex, EarlyBinder, PredicatePolarity, Ty, TyCtxt, TypeFlags, Upcast, UpcastFrom,
WithCachedTypeInfo,
self, DebruijnIndex, EarlyBinder, Ty, TyCtxt, TypeFlags, Upcast, UpcastFrom, WithCachedTypeInfo,
};
pub type TraitRef<'tcx> = ir::TraitRef<TyCtxt<'tcx>>;
@ -536,15 +535,6 @@ impl<'tcx> UpcastFrom<TyCtxt<'tcx>, ty::Binder<'tcx, TraitRef<'tcx>>> for Clause
}
}
impl<'tcx> UpcastFrom<TyCtxt<'tcx>, ty::Binder<'tcx, TraitRef<'tcx>>> for PolyTraitPredicate<'tcx> {
fn upcast_from(from: ty::Binder<'tcx, TraitRef<'tcx>>, _tcx: TyCtxt<'tcx>) -> Self {
from.map_bound(|trait_ref| TraitPredicate {
trait_ref,
polarity: PredicatePolarity::Positive,
})
}
}
impl<'tcx> UpcastFrom<TyCtxt<'tcx>, TraitPredicate<'tcx>> for Predicate<'tcx> {
fn upcast_from(from: TraitPredicate<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
PredicateKind::Clause(ClauseKind::Trait(from)).upcast(tcx)

View file

@ -1,5 +1,3 @@
use std::path::PathBuf;
use hir::def::Namespace;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::sso::SsoHashSet;
@ -8,7 +6,7 @@ use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
use tracing::{debug, instrument, trace};
use crate::ty::{self, GenericArg, ShortInstance, Ty, TyCtxt};
use crate::ty::{self, GenericArg, Ty, TyCtxt};
// `pretty` is a separate module only for organization.
mod pretty;
@ -317,6 +315,43 @@ impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for Ty<'tcx> {
}
}
impl<'tcx, P: Printer<'tcx> + std::fmt::Write> Print<'tcx, P> for ty::Instance<'tcx> {
fn print(&self, cx: &mut P) -> Result<(), PrintError> {
cx.print_def_path(self.def_id(), self.args)?;
match self.def {
ty::InstanceKind::Item(_) => {}
ty::InstanceKind::VTableShim(_) => cx.write_str(" - shim(vtable)")?,
ty::InstanceKind::ReifyShim(_, None) => cx.write_str(" - shim(reify)")?,
ty::InstanceKind::ReifyShim(_, Some(ty::ReifyReason::FnPtr)) => {
cx.write_str(" - shim(reify-fnptr)")?
}
ty::InstanceKind::ReifyShim(_, Some(ty::ReifyReason::Vtable)) => {
cx.write_str(" - shim(reify-vtable)")?
}
ty::InstanceKind::ThreadLocalShim(_) => cx.write_str(" - shim(tls)")?,
ty::InstanceKind::Intrinsic(_) => cx.write_str(" - intrinsic")?,
ty::InstanceKind::Virtual(_, num) => cx.write_str(&format!(" - virtual#{num}"))?,
ty::InstanceKind::FnPtrShim(_, ty) => cx.write_str(&format!(" - shim({ty})"))?,
ty::InstanceKind::ClosureOnceShim { .. } => cx.write_str(" - shim")?,
ty::InstanceKind::ConstructCoroutineInClosureShim { .. } => cx.write_str(" - shim")?,
ty::InstanceKind::DropGlue(_, None) => cx.write_str(" - shim(None)")?,
ty::InstanceKind::DropGlue(_, Some(ty)) => {
cx.write_str(&format!(" - shim(Some({ty}))"))?
}
ty::InstanceKind::CloneShim(_, ty) => cx.write_str(&format!(" - shim({ty})"))?,
ty::InstanceKind::FnPtrAddrShim(_, ty) => cx.write_str(&format!(" - shim({ty})"))?,
ty::InstanceKind::FutureDropPollShim(_, proxy_ty, impl_ty) => {
cx.write_str(&format!(" - dropshim({proxy_ty}-{impl_ty})"))?
}
ty::InstanceKind::AsyncDropGlue(_, ty) => cx.write_str(&format!(" - shim({ty})"))?,
ty::InstanceKind::AsyncDropGlueCtorShim(_, ty) => {
cx.write_str(&format!(" - shim(Some({ty}))"))?
}
};
Ok(())
}
}
impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>> {
fn print(&self, p: &mut P) -> Result<(), PrintError> {
p.print_dyn_existential(self)
@ -356,31 +391,3 @@ where
with_no_trimmed_paths!(Self::print(t, fmt))
}
}
/// Format instance name that is already known to be too long for rustc.
/// Show only the first 2 types if it is longer than 32 characters to avoid blasting
/// the user's terminal with thousands of lines of type-name.
///
/// If the type name is longer than before+after, it will be written to a file.
pub fn shrunk_instance_name<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::Instance<'tcx>,
) -> (String, Option<PathBuf>) {
let s = instance.to_string();
// Only use the shrunk version if it's really shorter.
// This also avoids the case where before and after slices overlap.
if s.chars().nth(33).is_some() {
let shrunk = format!("{}", ShortInstance(instance, 4));
if shrunk == s {
return (s, None);
}
let path = tcx.output_filenames(()).temp_path_for_diagnostic("long-type.txt");
let written_to_path = std::fs::write(&path, s).ok().map(|_| path);
(shrunk, written_to_path)
} else {
(s, None)
}
}

View file

@ -121,8 +121,6 @@ mir_build_deref_raw_pointer_requires_unsafe_unsafe_op_in_unsafe_fn_allowed =
.note = raw pointers may be null, dangling or unaligned; they can violate aliasing rules and cause data races: all of these are undefined behavior
.label = dereference of raw pointer
mir_build_exceeds_mcdc_condition_limit = number of conditions in decision ({$num_conditions}) exceeds limit ({$max_conditions}), so MC/DC analysis will not count this expression
mir_build_extern_static_requires_unsafe =
use of extern static is unsafe and requires unsafe block
.note = extern statics are not controlled by the Rust type system: invalid data, aliasing violations or data races will cause undefined behavior

View file

@ -8,11 +8,8 @@ use rustc_middle::thir::{ExprId, ExprKind, Pat, Thir};
use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::LocalDefId;
use crate::builder::coverageinfo::mcdc::MCDCInfoBuilder;
use crate::builder::{Builder, CFG};
mod mcdc;
/// Collects coverage-related information during MIR building, to eventually be
/// turned into a function's [`CoverageInfoHi`] when MIR building is complete.
pub(crate) struct CoverageInfoBuilder {
@ -23,8 +20,6 @@ pub(crate) struct CoverageInfoBuilder {
/// Present if branch coverage is enabled.
branch_info: Option<BranchInfo>,
/// Present if MC/DC coverage is enabled.
mcdc_info: Option<MCDCInfoBuilder>,
}
#[derive(Default)]
@ -83,7 +78,6 @@ impl CoverageInfoBuilder {
nots: FxHashMap::default(),
markers: BlockMarkerGen::default(),
branch_info: tcx.sess.instrument_coverage_branch().then(BranchInfo::default),
mcdc_info: tcx.sess.instrument_coverage_mcdc().then(MCDCInfoBuilder::new),
})
}
@ -135,26 +129,11 @@ impl CoverageInfoBuilder {
fn register_two_way_branch<'tcx>(
&mut self,
tcx: TyCtxt<'tcx>,
cfg: &mut CFG<'tcx>,
source_info: SourceInfo,
true_block: BasicBlock,
false_block: BasicBlock,
) {
// Separate path for handling branches when MC/DC is enabled.
if let Some(mcdc_info) = self.mcdc_info.as_mut() {
let inject_block_marker =
|source_info, block| self.markers.inject_block_marker(cfg, source_info, block);
mcdc_info.visit_evaluated_condition(
tcx,
source_info,
true_block,
false_block,
inject_block_marker,
);
return;
}
// Bail out if branch coverage is not enabled.
let Some(branch_info) = self.branch_info.as_mut() else { return };
@ -169,23 +148,14 @@ impl CoverageInfoBuilder {
}
pub(crate) fn into_done(self) -> Box<CoverageInfoHi> {
let Self { nots: _, markers: BlockMarkerGen { num_block_markers }, branch_info, mcdc_info } =
self;
let Self { nots: _, markers: BlockMarkerGen { num_block_markers }, branch_info } = self;
let branch_spans =
branch_info.map(|branch_info| branch_info.branch_spans).unwrap_or_default();
let (mcdc_spans, mcdc_degraded_branch_spans) =
mcdc_info.map(MCDCInfoBuilder::into_done).unwrap_or_default();
// For simplicity, always return an info struct (without Option), even
// if there's nothing interesting in it.
Box::new(CoverageInfoHi {
num_block_markers,
branch_spans,
mcdc_degraded_branch_spans,
mcdc_spans,
})
Box::new(CoverageInfoHi { num_block_markers, branch_spans })
}
}
@ -238,14 +208,7 @@ impl<'tcx> Builder<'_, 'tcx> {
mir::TerminatorKind::if_(mir::Operand::Copy(place), true_block, false_block),
);
// Separate path for handling branches when MC/DC is enabled.
coverage_info.register_two_way_branch(
self.tcx,
&mut self.cfg,
source_info,
true_block,
false_block,
);
coverage_info.register_two_way_branch(&mut self.cfg, source_info, true_block, false_block);
let join_block = self.cfg.start_new_block();
self.cfg.goto(true_block, source_info, join_block);
@ -276,13 +239,7 @@ impl<'tcx> Builder<'_, 'tcx> {
let source_info = SourceInfo { span: self.thir[expr_id].span, scope: self.source_scope };
coverage_info.register_two_way_branch(
self.tcx,
&mut self.cfg,
source_info,
then_block,
else_block,
);
coverage_info.register_two_way_branch(&mut self.cfg, source_info, then_block, else_block);
}
/// If branch coverage is enabled, inject marker statements into `true_block`
@ -299,12 +256,6 @@ impl<'tcx> Builder<'_, 'tcx> {
let Some(coverage_info) = self.coverage_info.as_mut() else { return };
let source_info = SourceInfo { span: pattern.span, scope: self.source_scope };
coverage_info.register_two_way_branch(
self.tcx,
&mut self.cfg,
source_info,
true_block,
false_block,
);
coverage_info.register_two_way_branch(&mut self.cfg, source_info, true_block, false_block);
}
}

View file

@ -1,295 +0,0 @@
use std::collections::VecDeque;
use rustc_middle::bug;
use rustc_middle::mir::coverage::{
BlockMarkerId, ConditionId, ConditionInfo, MCDCBranchSpan, MCDCDecisionSpan,
};
use rustc_middle::mir::{BasicBlock, SourceInfo};
use rustc_middle::thir::LogicalOp;
use rustc_middle::ty::TyCtxt;
use rustc_span::Span;
use crate::builder::Builder;
use crate::errors::MCDCExceedsConditionLimit;
/// LLVM uses `i16` to represent condition id. Hence `i16::MAX` is the hard limit for number of
/// conditions in a decision.
const MAX_CONDITIONS_IN_DECISION: usize = i16::MAX as usize;
#[derive(Default)]
struct MCDCDecisionCtx {
/// To construct condition evaluation tree.
decision_stack: VecDeque<ConditionInfo>,
processing_decision: Option<MCDCDecisionSpan>,
conditions: Vec<MCDCBranchSpan>,
}
struct MCDCState {
decision_ctx_stack: Vec<MCDCDecisionCtx>,
}
impl MCDCState {
fn new() -> Self {
Self { decision_ctx_stack: vec![MCDCDecisionCtx::default()] }
}
/// Decision depth is given as a u16 to reduce the size of the `CoverageKind`,
/// as it is very unlikely that the depth ever reaches 2^16.
#[inline]
fn decision_depth(&self) -> u16 {
match u16::try_from(self.decision_ctx_stack.len())
.expect(
"decision depth did not fit in u16, this is likely to be an instrumentation error",
)
.checked_sub(1)
{
Some(d) => d,
None => bug!("Unexpected empty decision stack"),
}
}
// At first we assign ConditionIds for each sub expression.
// If the sub expression is composite, re-assign its ConditionId to its LHS and generate a new ConditionId for its RHS.
//
// Example: "x = (A && B) || (C && D) || (D && F)"
//
// Visit Depth1:
// (A && B) || (C && D) || (D && F)
// ^-------LHS--------^ ^-RHS--^
// ID=1 ID=2
//
// Visit LHS-Depth2:
// (A && B) || (C && D)
// ^-LHS--^ ^-RHS--^
// ID=1 ID=3
//
// Visit LHS-Depth3:
// (A && B)
// LHS RHS
// ID=1 ID=4
//
// Visit RHS-Depth3:
// (C && D)
// LHS RHS
// ID=3 ID=5
//
// Visit RHS-Depth2: (D && F)
// LHS RHS
// ID=2 ID=6
//
// Visit Depth1:
// (A && B) || (C && D) || (D && F)
// ID=1 ID=4 ID=3 ID=5 ID=2 ID=6
//
// A node ID of '0' always means MC/DC isn't being tracked.
//
// If a "next" node ID is '0', it means it's the end of the test vector.
//
// As the compiler tracks expression in pre-order, we can ensure that condition info of parents are always properly assigned when their children are visited.
// - If the op is AND, the "false_next" of LHS and RHS should be the parent's "false_next". While "true_next" of the LHS is the RHS, the "true next" of RHS is the parent's "true_next".
// - If the op is OR, the "true_next" of LHS and RHS should be the parent's "true_next". While "false_next" of the LHS is the RHS, the "false next" of RHS is the parent's "false_next".
fn record_conditions(&mut self, op: LogicalOp, span: Span) {
let decision_depth = self.decision_depth();
let Some(decision_ctx) = self.decision_ctx_stack.last_mut() else {
bug!("Unexpected empty decision_ctx_stack")
};
let decision = match decision_ctx.processing_decision.as_mut() {
Some(decision) => {
decision.span = decision.span.to(span);
decision
}
None => decision_ctx.processing_decision.insert(MCDCDecisionSpan {
span,
num_conditions: 0,
end_markers: vec![],
decision_depth,
}),
};
let parent_condition = decision_ctx.decision_stack.pop_back().unwrap_or_else(|| {
assert_eq!(
decision.num_conditions, 0,
"decision stack must be empty only for empty decision"
);
decision.num_conditions += 1;
ConditionInfo {
condition_id: ConditionId::START,
true_next_id: None,
false_next_id: None,
}
});
let lhs_id = parent_condition.condition_id;
let rhs_condition_id = ConditionId::from(decision.num_conditions);
decision.num_conditions += 1;
let (lhs, rhs) = match op {
LogicalOp::And => {
let lhs = ConditionInfo {
condition_id: lhs_id,
true_next_id: Some(rhs_condition_id),
false_next_id: parent_condition.false_next_id,
};
let rhs = ConditionInfo {
condition_id: rhs_condition_id,
true_next_id: parent_condition.true_next_id,
false_next_id: parent_condition.false_next_id,
};
(lhs, rhs)
}
LogicalOp::Or => {
let lhs = ConditionInfo {
condition_id: lhs_id,
true_next_id: parent_condition.true_next_id,
false_next_id: Some(rhs_condition_id),
};
let rhs = ConditionInfo {
condition_id: rhs_condition_id,
true_next_id: parent_condition.true_next_id,
false_next_id: parent_condition.false_next_id,
};
(lhs, rhs)
}
};
// We visit expressions tree in pre-order, so place the left-hand side on the top.
decision_ctx.decision_stack.push_back(rhs);
decision_ctx.decision_stack.push_back(lhs);
}
fn try_finish_decision(
&mut self,
span: Span,
true_marker: BlockMarkerId,
false_marker: BlockMarkerId,
degraded_branches: &mut Vec<MCDCBranchSpan>,
) -> Option<(MCDCDecisionSpan, Vec<MCDCBranchSpan>)> {
let Some(decision_ctx) = self.decision_ctx_stack.last_mut() else {
bug!("Unexpected empty decision_ctx_stack")
};
let Some(condition_info) = decision_ctx.decision_stack.pop_back() else {
let branch = MCDCBranchSpan {
span,
condition_info: ConditionInfo {
condition_id: ConditionId::START,
true_next_id: None,
false_next_id: None,
},
true_marker,
false_marker,
};
degraded_branches.push(branch);
return None;
};
let Some(decision) = decision_ctx.processing_decision.as_mut() else {
bug!("Processing decision should have been created before any conditions are taken");
};
if condition_info.true_next_id.is_none() {
decision.end_markers.push(true_marker);
}
if condition_info.false_next_id.is_none() {
decision.end_markers.push(false_marker);
}
decision_ctx.conditions.push(MCDCBranchSpan {
span,
condition_info,
true_marker,
false_marker,
});
if decision_ctx.decision_stack.is_empty() {
let conditions = std::mem::take(&mut decision_ctx.conditions);
decision_ctx.processing_decision.take().map(|decision| (decision, conditions))
} else {
None
}
}
}
pub(crate) struct MCDCInfoBuilder {
degraded_spans: Vec<MCDCBranchSpan>,
mcdc_spans: Vec<(MCDCDecisionSpan, Vec<MCDCBranchSpan>)>,
state: MCDCState,
}
impl MCDCInfoBuilder {
pub(crate) fn new() -> Self {
Self { degraded_spans: vec![], mcdc_spans: vec![], state: MCDCState::new() }
}
pub(crate) fn visit_evaluated_condition(
&mut self,
tcx: TyCtxt<'_>,
source_info: SourceInfo,
true_block: BasicBlock,
false_block: BasicBlock,
mut inject_block_marker: impl FnMut(SourceInfo, BasicBlock) -> BlockMarkerId,
) {
let true_marker = inject_block_marker(source_info, true_block);
let false_marker = inject_block_marker(source_info, false_block);
// take_condition() returns Some for decision_result when the decision stack
// is empty, i.e. when all the conditions of the decision were instrumented,
// and the decision is "complete".
if let Some((decision, conditions)) = self.state.try_finish_decision(
source_info.span,
true_marker,
false_marker,
&mut self.degraded_spans,
) {
let num_conditions = conditions.len();
assert_eq!(
num_conditions, decision.num_conditions,
"final number of conditions is not correct"
);
match num_conditions {
0 => {
unreachable!("Decision with no condition is not expected");
}
1..=MAX_CONDITIONS_IN_DECISION => {
self.mcdc_spans.push((decision, conditions));
}
_ => {
self.degraded_spans.extend(conditions);
tcx.dcx().emit_warn(MCDCExceedsConditionLimit {
span: decision.span,
num_conditions,
max_conditions: MAX_CONDITIONS_IN_DECISION,
});
}
}
}
}
pub(crate) fn into_done(
self,
) -> (Vec<(MCDCDecisionSpan, Vec<MCDCBranchSpan>)>, Vec<MCDCBranchSpan>) {
(self.mcdc_spans, self.degraded_spans)
}
}
impl Builder<'_, '_> {
pub(crate) fn visit_coverage_branch_operation(&mut self, logical_op: LogicalOp, span: Span) {
if let Some(coverage_info) = self.coverage_info.as_mut()
&& let Some(mcdc_info) = coverage_info.mcdc_info.as_mut()
{
mcdc_info.state.record_conditions(logical_op, span);
}
}
pub(crate) fn mcdc_increment_depth_if_enabled(&mut self) {
if let Some(coverage_info) = self.coverage_info.as_mut()
&& let Some(mcdc_info) = coverage_info.mcdc_info.as_mut()
{
mcdc_info.state.decision_ctx_stack.push(MCDCDecisionCtx::default());
};
}
pub(crate) fn mcdc_decrement_depth_if_enabled(&mut self) {
if let Some(coverage_info) = self.coverage_info.as_mut()
&& let Some(mcdc_info) = coverage_info.mcdc_info.as_mut()
&& mcdc_info.state.decision_ctx_stack.pop().is_none()
{
bug!("Unexpected empty decision stack");
};
}
}

View file

@ -159,8 +159,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let condition_scope = this.local_scope();
let source_info = this.source_info(expr.span);
this.visit_coverage_branch_operation(op, expr.span);
// We first evaluate the left-hand side of the predicate ...
let (then_block, else_block) =
this.in_if_then_scope(condition_scope, expr.span, |this| {

View file

@ -113,15 +113,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let expr_span = expr.span;
match expr.kind {
ExprKind::LogicalOp { op: op @ LogicalOp::And, lhs, rhs } => {
this.visit_coverage_branch_operation(op, expr_span);
ExprKind::LogicalOp { op: LogicalOp::And, lhs, rhs } => {
let lhs_then_block = this.then_else_break_inner(block, lhs, args).into_block();
let rhs_then_block =
this.then_else_break_inner(lhs_then_block, rhs, args).into_block();
rhs_then_block.unit()
}
ExprKind::LogicalOp { op: op @ LogicalOp::Or, lhs, rhs } => {
this.visit_coverage_branch_operation(op, expr_span);
ExprKind::LogicalOp { op: LogicalOp::Or, lhs, rhs } => {
let local_scope = this.local_scope();
let (lhs_success_block, failure_block) =
this.in_if_then_scope(local_scope, expr_span, |this| {
@ -201,9 +199,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let temp_scope = args.temp_scope_override.unwrap_or_else(|| this.local_scope());
let mutability = Mutability::Mut;
// Increment the decision depth, in case we encounter boolean expressions
// further down.
this.mcdc_increment_depth_if_enabled();
let place = unpack!(
block = this.as_temp(
block,
@ -215,7 +210,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
mutability
)
);
this.mcdc_decrement_depth_if_enabled();
let operand = Operand::Move(Place::from(place));

View file

@ -975,15 +975,6 @@ pub(crate) struct NonEmptyNeverPattern<'tcx> {
pub(crate) ty: Ty<'tcx>,
}
#[derive(Diagnostic)]
#[diag(mir_build_exceeds_mcdc_condition_limit)]
pub(crate) struct MCDCExceedsConditionLimit {
#[primary_span]
pub(crate) span: Span,
pub(crate) num_conditions: usize,
pub(crate) max_conditions: usize,
}
#[derive(Diagnostic)]
#[diag(mir_build_pattern_not_covered, code = E0005)]
pub(crate) struct PatternNotCovered<'s, 'tcx> {

View file

@ -9,8 +9,6 @@ mir_transform_const_mut_borrow = taking a mutable reference to a `const` item
.note2 = the mutable reference will refer to this temporary, not the original `const` item
.note3 = mutable reference created due to call to this method
mir_transform_exceeds_mcdc_test_vector_limit = number of total test vectors in one function will exceed limit ({$max_num_test_vectors}) if this decision is instrumented, so MC/DC analysis ignores it
mir_transform_ffi_unwind_call = call to {$foreign ->
[true] foreign function
*[false] function pointer

View file

@ -45,12 +45,6 @@ pub(super) fn is_inline_valid_on_fn<'tcx>(
return Err("#[rustc_no_mir_inline]");
}
// FIXME(#127234): Coverage instrumentation currently doesn't handle inlined
// MIR correctly when Modified Condition/Decision Coverage is enabled.
if tcx.sess.instrument_coverage_mcdc() {
return Err("incompatible with MC/DC coverage");
}
let ty = tcx.type_of(def_id);
if match ty.instantiate_identity().kind() {
ty::FnDef(..) => tcx.fn_sig(def_id).instantiate_identity().c_variadic(),

View file

@ -1,10 +1,5 @@
use std::collections::BTreeSet;
use rustc_data_structures::fx::FxIndexMap;
use rustc_index::IndexVec;
use rustc_middle::mir::coverage::{
BlockMarkerId, BranchSpan, ConditionId, ConditionInfo, CoverageInfoHi, CoverageKind,
};
use rustc_middle::mir::coverage::{BlockMarkerId, BranchSpan, CoverageInfoHi, CoverageKind};
use rustc_middle::mir::{self, BasicBlock, StatementKind};
use rustc_middle::ty::TyCtxt;
use rustc_span::Span;
@ -13,7 +8,6 @@ use crate::coverage::ExtractedHirInfo;
use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
use crate::coverage::spans::extract_refined_covspans;
use crate::coverage::unexpand::unexpand_into_body_span;
use crate::errors::MCDCExceedsTestVectorLimit;
/// Associates an ordinary executable code span with its corresponding BCB.
#[derive(Debug)]
@ -22,9 +16,6 @@ pub(super) struct CodeMapping {
pub(super) bcb: BasicCoverageBlock,
}
/// This is separate from [`MCDCBranch`] to help prepare for larger changes
/// that will be needed for improved branch coverage in the future.
/// (See <https://github.com/rust-lang/rust/pull/124217>.)
#[derive(Debug)]
pub(super) struct BranchPair {
pub(super) span: Span,
@ -32,40 +23,10 @@ pub(super) struct BranchPair {
pub(super) false_bcb: BasicCoverageBlock,
}
/// Associates an MC/DC branch span with condition info besides fields for normal branch.
#[derive(Debug)]
pub(super) struct MCDCBranch {
pub(super) span: Span,
pub(super) true_bcb: BasicCoverageBlock,
pub(super) false_bcb: BasicCoverageBlock,
pub(super) condition_info: ConditionInfo,
// Offset added to test vector idx if this branch is evaluated to true.
pub(super) true_index: usize,
// Offset added to test vector idx if this branch is evaluated to false.
pub(super) false_index: usize,
}
/// Associates an MC/DC decision with its join BCBs.
#[derive(Debug)]
pub(super) struct MCDCDecision {
pub(super) span: Span,
pub(super) end_bcbs: BTreeSet<BasicCoverageBlock>,
pub(super) bitmap_idx: usize,
pub(super) num_test_vectors: usize,
pub(super) decision_depth: u16,
}
// LLVM uses `i32` to index the bitmap. Thus `i32::MAX` is the hard limit for number of all test vectors
// in a function.
const MCDC_MAX_BITMAP_SIZE: usize = i32::MAX as usize;
#[derive(Default)]
pub(super) struct ExtractedMappings {
pub(super) code_mappings: Vec<CodeMapping>,
pub(super) branch_pairs: Vec<BranchPair>,
pub(super) mcdc_bitmap_bits: usize,
pub(super) mcdc_degraded_branches: Vec<MCDCBranch>,
pub(super) mcdc_mappings: Vec<(MCDCDecision, Vec<MCDCBranch>)>,
}
/// Extracts coverage-relevant spans from MIR, and associates them with
@ -78,32 +39,13 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
) -> ExtractedMappings {
let mut code_mappings = vec![];
let mut branch_pairs = vec![];
let mut mcdc_bitmap_bits = 0;
let mut mcdc_degraded_branches = vec![];
let mut mcdc_mappings = vec![];
// Extract ordinary code mappings from MIR statement/terminator spans.
extract_refined_covspans(tcx, mir_body, hir_info, graph, &mut code_mappings);
branch_pairs.extend(extract_branch_pairs(mir_body, hir_info, graph));
extract_mcdc_mappings(
mir_body,
tcx,
hir_info.body_span,
graph,
&mut mcdc_bitmap_bits,
&mut mcdc_degraded_branches,
&mut mcdc_mappings,
);
ExtractedMappings {
code_mappings,
branch_pairs,
mcdc_bitmap_bits,
mcdc_degraded_branches,
mcdc_mappings,
}
ExtractedMappings { code_mappings, branch_pairs }
}
fn resolve_block_markers(
@ -127,12 +69,6 @@ fn resolve_block_markers(
block_markers
}
// FIXME: There is currently a lot of redundancy between
// `extract_branch_pairs` and `extract_mcdc_mappings`. This is needed so
// that they can each be modified without interfering with the other, but in
// the long term we should try to bring them together again when branch coverage
// and MC/DC coverage support are more mature.
pub(super) fn extract_branch_pairs(
mir_body: &mir::Body<'_>,
hir_info: &ExtractedHirInfo,
@ -162,175 +98,3 @@ pub(super) fn extract_branch_pairs(
})
.collect::<Vec<_>>()
}
pub(super) fn extract_mcdc_mappings(
mir_body: &mir::Body<'_>,
tcx: TyCtxt<'_>,
body_span: Span,
graph: &CoverageGraph,
mcdc_bitmap_bits: &mut usize,
mcdc_degraded_branches: &mut impl Extend<MCDCBranch>,
mcdc_mappings: &mut impl Extend<(MCDCDecision, Vec<MCDCBranch>)>,
) {
let Some(coverage_info_hi) = mir_body.coverage_info_hi.as_deref() else { return };
let block_markers = resolve_block_markers(coverage_info_hi, mir_body);
let bcb_from_marker = |marker: BlockMarkerId| graph.bcb_from_bb(block_markers[marker]?);
let check_branch_bcb =
|raw_span: Span, true_marker: BlockMarkerId, false_marker: BlockMarkerId| {
// For now, ignore any branch span that was introduced by
// expansion. This makes things like assert macros less noisy.
if !raw_span.ctxt().outer_expn_data().is_root() {
return None;
}
let span = unexpand_into_body_span(raw_span, body_span)?;
let true_bcb = bcb_from_marker(true_marker)?;
let false_bcb = bcb_from_marker(false_marker)?;
Some((span, true_bcb, false_bcb))
};
let to_mcdc_branch = |&mir::coverage::MCDCBranchSpan {
span: raw_span,
condition_info,
true_marker,
false_marker,
}| {
let (span, true_bcb, false_bcb) = check_branch_bcb(raw_span, true_marker, false_marker)?;
Some(MCDCBranch {
span,
true_bcb,
false_bcb,
condition_info,
true_index: usize::MAX,
false_index: usize::MAX,
})
};
let mut get_bitmap_idx = |num_test_vectors: usize| -> Option<usize> {
let bitmap_idx = *mcdc_bitmap_bits;
let next_bitmap_bits = bitmap_idx.saturating_add(num_test_vectors);
(next_bitmap_bits <= MCDC_MAX_BITMAP_SIZE).then(|| {
*mcdc_bitmap_bits = next_bitmap_bits;
bitmap_idx
})
};
mcdc_degraded_branches
.extend(coverage_info_hi.mcdc_degraded_branch_spans.iter().filter_map(to_mcdc_branch));
mcdc_mappings.extend(coverage_info_hi.mcdc_spans.iter().filter_map(|(decision, branches)| {
if branches.len() == 0 {
return None;
}
let decision_span = unexpand_into_body_span(decision.span, body_span)?;
let end_bcbs = decision
.end_markers
.iter()
.map(|&marker| bcb_from_marker(marker))
.collect::<Option<_>>()?;
let mut branch_mappings: Vec<_> = branches.into_iter().filter_map(to_mcdc_branch).collect();
if branch_mappings.len() != branches.len() {
mcdc_degraded_branches.extend(branch_mappings);
return None;
}
let num_test_vectors = calc_test_vectors_index(&mut branch_mappings);
let Some(bitmap_idx) = get_bitmap_idx(num_test_vectors) else {
tcx.dcx().emit_warn(MCDCExceedsTestVectorLimit {
span: decision_span,
max_num_test_vectors: MCDC_MAX_BITMAP_SIZE,
});
mcdc_degraded_branches.extend(branch_mappings);
return None;
};
// LLVM requires span of the decision contains all spans of its conditions.
// Usually the decision span meets the requirement well but in cases like macros it may not.
let span = branch_mappings
.iter()
.map(|branch| branch.span)
.reduce(|lhs, rhs| lhs.to(rhs))
.map(
|joint_span| {
if decision_span.contains(joint_span) { decision_span } else { joint_span }
},
)
.expect("branch mappings are ensured to be non-empty as checked above");
Some((
MCDCDecision {
span,
end_bcbs,
bitmap_idx,
num_test_vectors,
decision_depth: decision.decision_depth,
},
branch_mappings,
))
}));
}
// LLVM checks the executed test vector by accumulating indices of tested branches.
// We calculate number of all possible test vectors of the decision and assign indices
// to branches here.
// See [the rfc](https://discourse.llvm.org/t/rfc-coverage-new-algorithm-and-file-format-for-mc-dc/76798/)
// for more details about the algorithm.
// This function is mostly like [`TVIdxBuilder::TvIdxBuilder`](https://github.com/llvm/llvm-project/blob/d594d9f7f4dc6eb748b3261917db689fdc348b96/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp#L226)
fn calc_test_vectors_index(conditions: &mut Vec<MCDCBranch>) -> usize {
let mut indegree_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len());
// `num_paths` is `width` described at the llvm rfc, which indicates how many paths reaching the condition node.
let mut num_paths_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len());
let mut next_conditions = conditions
.iter_mut()
.map(|branch| {
let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info;
[true_next_id, false_next_id]
.into_iter()
.flatten()
.for_each(|next_id| indegree_stats[next_id] += 1);
(condition_id, branch)
})
.collect::<FxIndexMap<_, _>>();
let mut queue =
std::collections::VecDeque::from_iter(next_conditions.swap_remove(&ConditionId::START));
num_paths_stats[ConditionId::START] = 1;
let mut decision_end_nodes = Vec::new();
while let Some(branch) = queue.pop_front() {
let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info;
let (false_index, true_index) = (&mut branch.false_index, &mut branch.true_index);
let this_paths_count = num_paths_stats[condition_id];
// Note. First check the false next to ensure conditions are touched in same order with llvm-cov.
for (next, index) in [(false_next_id, false_index), (true_next_id, true_index)] {
if let Some(next_id) = next {
let next_paths_count = &mut num_paths_stats[next_id];
*index = *next_paths_count;
*next_paths_count = next_paths_count.saturating_add(this_paths_count);
let next_indegree = &mut indegree_stats[next_id];
*next_indegree -= 1;
if *next_indegree == 0 {
queue.push_back(next_conditions.swap_remove(&next_id).expect(
"conditions with non-zero indegree before must be in next_conditions",
));
}
} else {
decision_end_nodes.push((this_paths_count, condition_id, index));
}
}
}
assert!(next_conditions.is_empty(), "the decision tree has untouched nodes");
let mut cur_idx = 0;
// LLVM hopes the end nodes are sorted in descending order by `num_paths` so that it can
// optimize bitmap size for decisions in tree form such as `a && b && c && d && ...`.
decision_end_nodes.sort_by_key(|(num_paths, _, _)| usize::MAX - *num_paths);
for (num_paths, condition_id, index) in decision_end_nodes {
assert_eq!(
num_paths, num_paths_stats[condition_id],
"end nodes should not be updated since they were visited"
);
assert_eq!(*index, usize::MAX, "end nodes should not be assigned index before");
*index = cur_idx;
cur_idx += num_paths;
}
cur_idx
}

View file

@ -10,9 +10,7 @@ mod unexpand;
use rustc_hir as hir;
use rustc_hir::intravisit::{Visitor, walk_expr};
use rustc_middle::hir::nested_filter;
use rustc_middle::mir::coverage::{
CoverageKind, DecisionInfo, FunctionCoverageInfo, Mapping, MappingKind,
};
use rustc_middle::mir::coverage::{CoverageKind, FunctionCoverageInfo, Mapping, MappingKind};
use rustc_middle::mir::{self, BasicBlock, Statement, StatementKind, TerminatorKind};
use rustc_middle::ty::TyCtxt;
use rustc_span::Span;
@ -95,14 +93,6 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
// Inject coverage statements into MIR.
inject_coverage_statements(mir_body, &graph);
inject_mcdc_statements(mir_body, &graph, &extracted_mappings);
let mcdc_num_condition_bitmaps = extracted_mappings
.mcdc_mappings
.iter()
.map(|&(mappings::MCDCDecision { decision_depth, .. }, _)| decision_depth)
.max()
.map_or(0, |max| usize::from(max) + 1);
mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
function_source_hash: hir_info.function_source_hash,
@ -111,9 +101,6 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
priority_list,
mappings,
mcdc_bitmap_bits: extracted_mappings.mcdc_bitmap_bits,
mcdc_num_condition_bitmaps,
}));
}
@ -124,13 +111,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
/// function can potentially be simplified even further.
fn create_mappings(extracted_mappings: &ExtractedMappings) -> Vec<Mapping> {
// Fully destructure the mappings struct to make sure we don't miss any kinds.
let ExtractedMappings {
code_mappings,
branch_pairs,
mcdc_bitmap_bits: _,
mcdc_degraded_branches,
mcdc_mappings,
} = extracted_mappings;
let ExtractedMappings { code_mappings, branch_pairs } = extracted_mappings;
let mut mappings = Vec::new();
mappings.extend(code_mappings.iter().map(
@ -148,57 +129,6 @@ fn create_mappings(extracted_mappings: &ExtractedMappings) -> Vec<Mapping> {
},
));
// MCDC branch mappings are appended with their decisions in case decisions were ignored.
mappings.extend(mcdc_degraded_branches.iter().map(
|&mappings::MCDCBranch {
span,
true_bcb,
false_bcb,
condition_info: _,
true_index: _,
false_index: _,
}| { Mapping { kind: MappingKind::Branch { true_bcb, false_bcb }, span } },
));
for (decision, branches) in mcdc_mappings {
// FIXME(#134497): Previously it was possible for some of these branch
// conversions to fail, in which case the remaining branches in the
// decision would be degraded to plain `MappingKind::Branch`.
// The changes in #134497 made that failure impossible, because the
// fallible step was deferred to codegen. But the corresponding code
// in codegen wasn't updated to detect the need for a degrade step.
let conditions = branches
.into_iter()
.map(
|&mappings::MCDCBranch {
span,
true_bcb,
false_bcb,
condition_info,
true_index: _,
false_index: _,
}| {
Mapping {
kind: MappingKind::MCDCBranch {
true_bcb,
false_bcb,
mcdc_params: condition_info,
},
span,
}
},
)
.collect::<Vec<_>>();
// LLVM requires end index for counter mapping regions.
let kind = MappingKind::MCDCDecision(DecisionInfo {
bitmap_idx: (decision.bitmap_idx + decision.num_test_vectors) as u32,
num_conditions: u16::try_from(conditions.len()).unwrap(),
});
let span = decision.span;
mappings.extend(std::iter::once(Mapping { kind, span }).chain(conditions.into_iter()));
}
mappings
}
@ -210,51 +140,6 @@ fn inject_coverage_statements<'tcx>(mir_body: &mut mir::Body<'tcx>, graph: &Cove
}
}
/// For each conditions inject statements to update condition bitmap after it has been evaluated.
/// For each decision inject statements to update test vector bitmap after it has been evaluated.
fn inject_mcdc_statements<'tcx>(
mir_body: &mut mir::Body<'tcx>,
graph: &CoverageGraph,
extracted_mappings: &ExtractedMappings,
) {
for (decision, conditions) in &extracted_mappings.mcdc_mappings {
// Inject test vector update first because `inject_statement` always insert new statement at head.
for &end in &decision.end_bcbs {
let end_bb = graph[end].leader_bb();
inject_statement(
mir_body,
CoverageKind::TestVectorBitmapUpdate {
bitmap_idx: decision.bitmap_idx as u32,
decision_depth: decision.decision_depth,
},
end_bb,
);
}
for &mappings::MCDCBranch {
span: _,
true_bcb,
false_bcb,
condition_info: _,
true_index,
false_index,
} in conditions
{
for (index, bcb) in [(false_index, false_bcb), (true_index, true_bcb)] {
let bb = graph[bcb].leader_bb();
inject_statement(
mir_body,
CoverageKind::CondBitmapUpdate {
index: index as u32,
decision_depth: decision.decision_depth,
},
bb,
);
}
}
}
}
fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb: BasicBlock) {
debug!(" injecting statement {counter_kind:?} for {bb:?}");
let data = &mut mir_body[bb];

View file

@ -111,11 +111,6 @@ fn coverage_ids_info<'tcx>(
bcb_needs_counter.insert(true_bcb);
bcb_needs_counter.insert(false_bcb);
}
MappingKind::MCDCBranch { true_bcb, false_bcb, mcdc_params: _ } => {
bcb_needs_counter.insert(true_bcb);
bcb_needs_counter.insert(false_bcb);
}
MappingKind::MCDCDecision(_) => {}
}
}

View file

@ -101,11 +101,7 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
StatementKind::Coverage(CoverageKind::BlockMarker { .. }) => None,
// These coverage statements should not exist prior to coverage instrumentation.
StatementKind::Coverage(
CoverageKind::VirtualCounter { .. }
| CoverageKind::CondBitmapUpdate { .. }
| CoverageKind::TestVectorBitmapUpdate { .. },
) => bug!(
StatementKind::Coverage(CoverageKind::VirtualCounter { .. }) => bug!(
"Unexpected coverage statement found during coverage instrumentation: {statement:?}"
),
}

View file

@ -117,14 +117,6 @@ pub(crate) struct FnItemRef {
pub ident: Ident,
}
#[derive(Diagnostic)]
#[diag(mir_transform_exceeds_mcdc_test_vector_limit)]
pub(crate) struct MCDCExceedsTestVectorLimit {
#[primary_span]
pub(crate) span: Span,
pub(crate) max_num_test_vectors: usize,
}
pub(crate) struct MustNotSupend<'a, 'tcx> {
pub tcx: TyCtxt<'tcx>,
pub yield_sp: Span,

View file

@ -756,7 +756,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
&& let Some(v) = self.simplify_place_value(&mut pointee, location)
{
value = v;
place_ref = pointee.project_deeper(&place.projection[index..], self.tcx).as_ref();
// `pointee` holds a `Place`, so `ProjectionElem::Index` holds a `Local`.
// That local is SSA, but we otherwise have no guarantee on that local's value at
// the current location compared to its value where `pointee` was borrowed.
if pointee.projection.iter().all(|elem| !matches!(elem, ProjectionElem::Index(_))) {
place_ref =
pointee.project_deeper(&place.projection[index..], self.tcx).as_ref();
}
}
if let Some(local) = self.try_as_local(value, location) {
// Both `local` and `Place { local: place.local, projection: projection[..index] }`
@ -774,7 +780,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
&& let Some(v) = self.simplify_place_value(&mut pointee, location)
{
value = v;
place_ref = pointee.project_deeper(&[], self.tcx).as_ref();
// `pointee` holds a `Place`, so `ProjectionElem::Index` holds a `Local`.
// That local is SSA, but we otherwise have no guarantee on that local's value at
// the current location compared to its value where `pointee` was borrowed.
if pointee.projection.iter().all(|elem| !matches!(elem, ProjectionElem::Index(_))) {
place_ref = pointee.project_deeper(&[], self.tcx).as_ref();
}
}
if let Some(new_local) = self.try_as_local(value, location) {
place_ref = PlaceRef { local: new_local, projection: &[] };

View file

@ -40,7 +40,10 @@ monomorphize_couldnt_dump_mono_stats =
unexpected error occurred while dumping monomorphization stats: {$error}
monomorphize_encountered_error_while_instantiating =
the above error was encountered while instantiating `{$formatted_item}`
the above error was encountered while instantiating `{$kind} {$instance}`
monomorphize_encountered_error_while_instantiating_global_asm =
the above error was encountered while instantiating `global_asm`
monomorphize_large_assignments =
moving {$size} bytes
@ -52,12 +55,10 @@ monomorphize_no_optimized_mir =
.note = missing optimized MIR for this item (was the crate `{$crate_name}` compiled with `--emit=metadata`?)
monomorphize_recursion_limit =
reached the recursion limit while instantiating `{$shrunk}`
reached the recursion limit while instantiating `{$instance}`
.note = `{$def_path_str}` defined here
monomorphize_start_not_found = using `fn main` requires the standard library
.help = use `#![no_main]` to bypass the Rust generated entrypoint and declare a platform specific entrypoint yourself, usually with `#[no_mangle]`
monomorphize_symbol_already_defined = symbol `{$symbol}` is already defined
monomorphize_written_to_path = the full type name has been written to '{$path}'

View file

@ -206,7 +206,6 @@
//! regardless of whether it is actually needed or not.
use std::cell::OnceCell;
use std::path::PathBuf;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::sync::{MTLock, par_for_each_in};
@ -224,7 +223,6 @@ use rustc_middle::mir::{self, Location, MentionedItem, traversal};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCoercion};
use rustc_middle::ty::layout::ValidityRequirement;
use rustc_middle::ty::print::{shrunk_instance_name, with_no_trimmed_paths};
use rustc_middle::ty::{
self, GenericArgs, GenericParamDefKind, Instance, InstanceKind, Ty, TyCtxt, TypeFoldable,
TypeVisitableExt, VtblEntry,
@ -237,7 +235,10 @@ use rustc_span::source_map::{Spanned, dummy_spanned, respan};
use rustc_span::{DUMMY_SP, Span};
use tracing::{debug, instrument, trace};
use crate::errors::{self, EncounteredErrorWhileInstantiating, NoOptimizedMir, RecursionLimit};
use crate::errors::{
self, EncounteredErrorWhileInstantiating, EncounteredErrorWhileInstantiatingGlobalAsm,
NoOptimizedMir, RecursionLimit,
};
#[derive(PartialEq)]
pub(crate) enum MonoItemCollectionStrategy {
@ -525,11 +526,23 @@ fn collect_items_rec<'tcx>(
&& starting_item.node.is_generic_fn()
&& starting_item.node.is_user_defined()
{
let formatted_item = with_no_trimmed_paths!(starting_item.node.to_string());
tcx.dcx().emit_note(EncounteredErrorWhileInstantiating {
span: starting_item.span,
formatted_item,
});
match starting_item.node {
MonoItem::Fn(instance) => tcx.dcx().emit_note(EncounteredErrorWhileInstantiating {
span: starting_item.span,
kind: "fn",
instance,
}),
MonoItem::Static(def_id) => tcx.dcx().emit_note(EncounteredErrorWhileInstantiating {
span: starting_item.span,
kind: "static",
instance: Instance::new_raw(def_id, GenericArgs::empty()),
}),
MonoItem::GlobalAsm(_) => {
tcx.dcx().emit_note(EncounteredErrorWhileInstantiatingGlobalAsm {
span: starting_item.span,
})
}
}
}
// Only updating `usage_map` for used items as otherwise we may be inserting the same item
// multiple times (if it is first 'mentioned' and then later actually used), and the usage map
@ -612,22 +625,7 @@ fn check_recursion_limit<'tcx>(
if !recursion_limit.value_within_limit(adjusted_recursion_depth) {
let def_span = tcx.def_span(def_id);
let def_path_str = tcx.def_path_str(def_id);
let (shrunk, written_to_path) = shrunk_instance_name(tcx, instance);
let mut path = PathBuf::new();
let was_written = if let Some(written_to_path) = written_to_path {
path = written_to_path;
true
} else {
false
};
tcx.dcx().emit_fatal(RecursionLimit {
span,
shrunk,
def_span,
def_path_str,
was_written,
path,
});
tcx.dcx().emit_fatal(RecursionLimit { span, instance, def_span, def_path_str });
}
recursion_depths.insert(def_id, recursion_depth + 1);

View file

@ -1,21 +1,16 @@
use std::path::PathBuf;
use rustc_macros::{Diagnostic, LintDiagnostic};
use rustc_middle::ty::Ty;
use rustc_middle::ty::{Instance, Ty};
use rustc_span::{Span, Symbol};
#[derive(Diagnostic)]
#[diag(monomorphize_recursion_limit)]
pub(crate) struct RecursionLimit {
pub(crate) struct RecursionLimit<'tcx> {
#[primary_span]
pub span: Span,
pub shrunk: String,
pub instance: Instance<'tcx>,
#[note]
pub def_span: Span,
pub def_path_str: String,
#[note(monomorphize_written_to_path)]
pub was_written: bool,
pub path: PathBuf,
}
#[derive(Diagnostic)]
@ -53,10 +48,18 @@ pub(crate) struct CouldntDumpMonoStats {
#[derive(Diagnostic)]
#[diag(monomorphize_encountered_error_while_instantiating)]
pub(crate) struct EncounteredErrorWhileInstantiating {
pub(crate) struct EncounteredErrorWhileInstantiating<'tcx> {
#[primary_span]
pub span: Span,
pub kind: &'static str,
pub instance: Instance<'tcx>,
}
#[derive(Diagnostic)]
#[diag(monomorphize_encountered_error_while_instantiating_global_asm)]
pub(crate) struct EncounteredErrorWhileInstantiatingGlobalAsm {
#[primary_span]
pub span: Span,
pub formatted_item: String,
}
#[derive(Diagnostic)]

View file

@ -223,7 +223,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
pub(crate) fn build_reduced_graph_external(&self, module: Module<'ra>) {
for child in self.tcx.module_children(module.def_id()) {
let parent_scope = ParentScope::module(module, self);
let parent_scope = ParentScope::module(module, self.arenas);
self.build_reduced_graph_for_external_crate_res(child, parent_scope)
}
}
@ -373,7 +373,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> {
res,
))
};
match self.r.resolve_path(
match self.r.cm().resolve_path(
&segments,
None,
parent_scope,
@ -1128,7 +1128,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> {
});
} else {
for ident in single_imports.iter().cloned() {
let result = self.r.maybe_resolve_ident_in_module(
let result = self.r.cm().maybe_resolve_ident_in_module(
ModuleOrUniformRoot::Module(module),
ident,
MacroNS,

View file

@ -469,13 +469,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
pub(crate) fn lint_if_path_starts_with_module(
&mut self,
finalize: Option<Finalize>,
finalize: Finalize,
path: &[Segment],
second_binding: Option<NameBinding<'_>>,
) {
let Some(Finalize { node_id, root_span, .. }) = finalize else {
return;
};
let Finalize { node_id, root_span, .. } = finalize;
let first_name = match path.get(0) {
// In the 2018 edition this lint is a hard error, so nothing to do
@ -1029,7 +1027,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
) -> Option<TypoSuggestion> {
let mut suggestions = Vec::new();
let ctxt = ident.span.ctxt();
self.visit_scopes(scope_set, parent_scope, ctxt, |this, scope, use_prelude, _| {
self.cm().visit_scopes(scope_set, parent_scope, ctxt, |this, scope, use_prelude, _| {
match scope {
Scope::DeriveHelpers(expn_id) => {
let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper);
@ -1048,7 +1046,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
if filter_fn(res) {
for derive in parent_scope.derives {
let parent_scope = &ParentScope { derives: &[], ..*parent_scope };
let Ok((Some(ext), _)) = this.resolve_macro_path(
let Ok((Some(ext), _)) = this.reborrow().resolve_macro_path(
derive,
Some(MacroKind::Derive),
parent_scope,
@ -1482,7 +1480,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
) {
// Bring imported but unused `derive` macros into `macro_map` so we ensure they can be used
// for suggestions.
self.visit_scopes(
self.cm().visit_scopes(
ScopeSet::Macro(MacroKind::Derive),
&parent_scope,
ident.span.ctxt(),
@ -1591,7 +1589,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
});
}
for ns in [Namespace::MacroNS, Namespace::TypeNS, Namespace::ValueNS] {
let Ok(binding) = self.early_resolve_ident_in_lexical_scope(
let Ok(binding) = self.cm().early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::All(ns),
parent_scope,
@ -2271,16 +2269,17 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
if ns == TypeNS || ns == ValueNS {
let ns_to_try = if ns == TypeNS { ValueNS } else { TypeNS };
let binding = if let Some(module) = module {
self.resolve_ident_in_module(
module,
ident,
ns_to_try,
parent_scope,
None,
ignore_binding,
ignore_import,
)
.ok()
self.cm()
.resolve_ident_in_module(
module,
ident,
ns_to_try,
parent_scope,
None,
ignore_binding,
ignore_import,
)
.ok()
} else if let Some(ribs) = ribs
&& let Some(TypeNS | ValueNS) = opt_ns
{
@ -2298,16 +2297,17 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
_ => None,
}
} else {
self.early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::All(ns_to_try),
parent_scope,
None,
false,
ignore_binding,
ignore_import,
)
.ok()
self.cm()
.early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::All(ns_to_try),
parent_scope,
None,
false,
ignore_binding,
ignore_import,
)
.ok()
};
if let Some(binding) = binding {
msg = format!(
@ -2401,7 +2401,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
},
)
});
if let Ok(binding) = self.early_resolve_ident_in_lexical_scope(
if let Ok(binding) = self.cm().early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::All(ValueNS),
parent_scope,
@ -2531,7 +2531,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
) -> Option<(Vec<Segment>, Option<String>)> {
// Replace first ident with `self` and check if that is valid.
path[0].ident.name = kw::SelfLower;
let result = self.maybe_resolve_path(&path, None, parent_scope, None);
let result = self.cm().maybe_resolve_path(&path, None, parent_scope, None);
debug!(?path, ?result);
if let PathResult::Module(..) = result { Some((path, None)) } else { None }
}
@ -2551,7 +2551,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
) -> Option<(Vec<Segment>, Option<String>)> {
// Replace first ident with `crate` and check if that is valid.
path[0].ident.name = kw::Crate;
let result = self.maybe_resolve_path(&path, None, parent_scope, None);
let result = self.cm().maybe_resolve_path(&path, None, parent_scope, None);
debug!(?path, ?result);
if let PathResult::Module(..) = result {
Some((
@ -2583,7 +2583,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
) -> Option<(Vec<Segment>, Option<String>)> {
// Replace first ident with `crate` and check if that is valid.
path[0].ident.name = kw::Super;
let result = self.maybe_resolve_path(&path, None, parent_scope, None);
let result = self.cm().maybe_resolve_path(&path, None, parent_scope, None);
debug!(?path, ?result);
if let PathResult::Module(..) = result { Some((path, None)) } else { None }
}
@ -2618,7 +2618,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
for name in extern_crate_names.into_iter() {
// Replace first ident with a crate name and check if that is valid.
path[0].ident.name = name;
let result = self.maybe_resolve_path(&path, None, parent_scope, None);
let result = self.cm().maybe_resolve_path(&path, None, parent_scope, None);
debug!(?path, ?name, ?result);
if let PathResult::Module(..) = result {
return Some((path, None));

View file

@ -16,10 +16,10 @@ use crate::imports::{Import, NameResolution};
use crate::late::{ConstantHasGenerics, NoConstantGenericsReason, PathSource, Rib, RibKind};
use crate::macros::{MacroRulesScope, sub_namespace_match};
use crate::{
AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BindingKey, Determinacy, Finalize,
ImportKind, LexicalScopeBinding, Module, ModuleKind, ModuleOrUniformRoot, NameBinding,
NameBindingKind, ParentScope, PathResult, PrivacyError, Res, ResolutionError, Resolver, Scope,
ScopeSet, Segment, Used, Weak, errors,
AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BindingKey, CmResolver, Determinacy,
Finalize, ImportKind, LexicalScopeBinding, Module, ModuleKind, ModuleOrUniformRoot,
NameBinding, NameBindingKind, ParentScope, PathResult, PrivacyError, Res, ResolutionError,
Resolver, Scope, ScopeSet, Segment, Used, Weak, errors,
};
#[derive(Copy, Clone)]
@ -44,12 +44,17 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
/// A generic scope visitor.
/// Visits scopes in order to resolve some identifier in them or perform other actions.
/// If the callback returns `Some` result, we stop visiting scopes and return it.
pub(crate) fn visit_scopes<T>(
&mut self,
pub(crate) fn visit_scopes<'r, T>(
mut self: CmResolver<'r, 'ra, 'tcx>,
scope_set: ScopeSet<'ra>,
parent_scope: &ParentScope<'ra>,
ctxt: SyntaxContext,
mut visitor: impl FnMut(&mut Self, Scope<'ra>, UsePrelude, SyntaxContext) -> Option<T>,
mut visitor: impl FnMut(
&mut CmResolver<'r, 'ra, 'tcx>,
Scope<'ra>,
UsePrelude,
SyntaxContext,
) -> Option<T>,
) -> Option<T> {
// General principles:
// 1. Not controlled (user-defined) names should have higher priority than controlled names
@ -146,7 +151,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
if visit {
let use_prelude = if use_prelude { UsePrelude::Yes } else { UsePrelude::No };
if let break_result @ Some(..) = visitor(self, scope, use_prelude, ctxt) {
if let break_result @ Some(..) = visitor(&mut self, scope, use_prelude, ctxt) {
return break_result;
}
}
@ -341,7 +346,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
_ => break,
}
let item = self.resolve_ident_in_module_unadjusted(
let item = self.cm().resolve_ident_in_module_unadjusted(
ModuleOrUniformRoot::Module(module),
ident,
ns,
@ -356,17 +361,18 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
return Some(LexicalScopeBinding::Item(binding));
}
}
self.early_resolve_ident_in_lexical_scope(
orig_ident,
ScopeSet::Late(ns, module, finalize.map(|finalize| finalize.node_id)),
parent_scope,
finalize,
finalize.is_some(),
ignore_binding,
None,
)
.ok()
.map(LexicalScopeBinding::Item)
self.cm()
.early_resolve_ident_in_lexical_scope(
orig_ident,
ScopeSet::Late(ns, module, finalize.map(|finalize| finalize.node_id)),
parent_scope,
finalize,
finalize.is_some(),
ignore_binding,
None,
)
.ok()
.map(LexicalScopeBinding::Item)
}
/// Resolve an identifier in lexical scope.
@ -375,8 +381,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
/// The function is used for resolving initial segments of macro paths (e.g., `foo` in
/// `foo::bar!();` or `foo!();`) and also for import paths on 2018 edition.
#[instrument(level = "debug", skip(self))]
pub(crate) fn early_resolve_ident_in_lexical_scope(
&mut self,
pub(crate) fn early_resolve_ident_in_lexical_scope<'r>(
self: CmResolver<'r, 'ra, 'tcx>,
orig_ident: Ident,
scope_set: ScopeSet<'ra>,
parent_scope: &ParentScope<'ra>,
@ -450,7 +456,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let mut result = Err(Determinacy::Determined);
for derive in parent_scope.derives {
let parent_scope = &ParentScope { derives: &[], ..*parent_scope };
match this.resolve_macro_path(
match this.reborrow().resolve_macro_path(
derive,
Some(MacroKind::Derive),
parent_scope,
@ -497,7 +503,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
finalize.map(|f| Finalize { used: Used::Scope, ..f }),
)
};
let binding = this.resolve_ident_in_module_unadjusted(
let binding = this.reborrow().resolve_ident_in_module_unadjusted(
ModuleOrUniformRoot::Module(module),
ident,
ns,
@ -514,7 +520,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
match binding {
Ok(binding) => {
if let Some(lint_id) = derive_fallback_lint_id {
this.lint_buffer.buffer_lint(
this.get_mut().lint_buffer.buffer_lint(
PROC_MACRO_DERIVE_RESOLUTION_FALLBACK,
lint_id,
orig_ident.span,
@ -556,7 +562,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
None => Err(Determinacy::Determined),
},
Scope::ExternPrelude => {
match this.extern_prelude_get(ident, finalize.is_some()) {
match this.reborrow().extern_prelude_get(ident, finalize.is_some()) {
Some(binding) => Ok((binding, Flags::empty())),
None => Err(Determinacy::determined(
this.graph_root.unexpanded_invocations.borrow().is_empty(),
@ -570,7 +576,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
Scope::StdLibPrelude => {
let mut result = Err(Determinacy::Determined);
if let Some(prelude) = this.prelude
&& let Ok(binding) = this.resolve_ident_in_module_unadjusted(
&& let Ok(binding) = this.reborrow().resolve_ident_in_module_unadjusted(
ModuleOrUniformRoot::Module(prelude),
ident,
ns,
@ -687,7 +693,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
AmbiguityErrorMisc::None
}
};
this.ambiguity_errors.push(AmbiguityError {
this.get_mut().ambiguity_errors.push(AmbiguityError {
kind,
ident: orig_ident,
b1: innermost_binding,
@ -725,8 +731,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
#[instrument(level = "debug", skip(self))]
pub(crate) fn maybe_resolve_ident_in_module(
&mut self,
pub(crate) fn maybe_resolve_ident_in_module<'r>(
self: CmResolver<'r, 'ra, 'tcx>,
module: ModuleOrUniformRoot<'ra>,
ident: Ident,
ns: Namespace,
@ -738,8 +744,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
#[instrument(level = "debug", skip(self))]
pub(crate) fn resolve_ident_in_module(
&mut self,
pub(crate) fn resolve_ident_in_module<'r>(
self: CmResolver<'r, 'ra, 'tcx>,
module: ModuleOrUniformRoot<'ra>,
mut ident: Ident,
ns: Namespace,
@ -776,12 +782,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
ignore_import,
)
}
/// Attempts to resolve `ident` in namespaces `ns` of `module`.
/// Invariant: if `finalize` is `Some`, expansion and import resolution must be complete.
#[instrument(level = "debug", skip(self))]
fn resolve_ident_in_module_unadjusted(
&mut self,
fn resolve_ident_in_module_unadjusted<'r>(
mut self: CmResolver<'r, 'ra, 'tcx>,
module: ModuleOrUniformRoot<'ra>,
ident: Ident,
ns: Namespace,
@ -812,7 +817,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
assert_eq!(shadowing, Shadowing::Unrestricted);
return if ns != TypeNS {
Err((Determined, Weak::No))
} else if let Some(binding) = self.extern_prelude_get(ident, finalize.is_some()) {
} else if let Some(binding) =
self.reborrow().extern_prelude_get(ident, finalize.is_some())
{
Ok(binding)
} else if !self.graph_root.unexpanded_invocations.borrow().is_empty() {
// Macro-expanded `extern crate` items can add names to extern prelude.
@ -865,7 +872,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
.find_map(|binding| if binding == ignore_binding { None } else { binding });
if let Some(finalize) = finalize {
return self.finalize_module_binding(
return self.get_mut().finalize_module_binding(
ident,
binding,
if resolution.non_glob_binding.is_some() { resolution.glob_binding } else { None },
@ -875,7 +882,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
);
}
let check_usable = |this: &Self, binding: NameBinding<'ra>| {
let check_usable = |this: CmResolver<'r, 'ra, 'tcx>, binding: NameBinding<'ra>| {
let usable = this.is_accessible_from(binding.vis, parent_scope.module);
if usable { Ok(binding) } else { Err((Determined, Weak::No)) }
};
@ -891,7 +898,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// Check if one of single imports can still define the name,
// if it can then our result is not determined and can be invalidated.
if self.single_import_can_define_name(
if self.reborrow().single_import_can_define_name(
&resolution,
binding,
ns,
@ -962,7 +969,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
Some(None) => {}
None => continue,
};
let result = self.resolve_ident_in_module_unadjusted(
let result = self.reborrow().resolve_ident_in_module_unadjusted(
ModuleOrUniformRoot::Module(module),
ident,
ns,
@ -1049,8 +1056,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// Checks if a single import can define the `Ident` corresponding to `binding`.
// This is used to check whether we can definitively accept a glob as a resolution.
fn single_import_can_define_name(
&mut self,
fn single_import_can_define_name<'r>(
mut self: CmResolver<'r, 'ra, 'tcx>,
resolution: &NameResolution<'ra>,
binding: Option<NameBinding<'ra>>,
ns: Namespace,
@ -1086,7 +1093,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
match self.resolve_ident_in_module(
match self.reborrow().resolve_ident_in_module(
module,
*source,
ns,
@ -1409,8 +1416,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
#[instrument(level = "debug", skip(self))]
pub(crate) fn maybe_resolve_path(
&mut self,
pub(crate) fn maybe_resolve_path<'r>(
self: CmResolver<'r, 'ra, 'tcx>,
path: &[Segment],
opt_ns: Option<Namespace>, // `None` indicates a module path in import
parent_scope: &ParentScope<'ra>,
@ -1418,10 +1425,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
) -> PathResult<'ra> {
self.resolve_path_with_ribs(path, opt_ns, parent_scope, None, None, None, ignore_import)
}
#[instrument(level = "debug", skip(self))]
pub(crate) fn resolve_path(
&mut self,
pub(crate) fn resolve_path<'r>(
self: CmResolver<'r, 'ra, 'tcx>,
path: &[Segment],
opt_ns: Option<Namespace>, // `None` indicates a module path in import
parent_scope: &ParentScope<'ra>,
@ -1440,8 +1446,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
)
}
pub(crate) fn resolve_path_with_ribs(
&mut self,
pub(crate) fn resolve_path_with_ribs<'r>(
mut self: CmResolver<'r, 'ra, 'tcx>,
path: &[Segment],
opt_ns: Option<Namespace>, // `None` indicates a module path in import
parent_scope: &ParentScope<'ra>,
@ -1457,18 +1463,23 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// We'll provide more context to the privacy errors later, up to `len`.
let privacy_errors_len = self.privacy_errors.len();
fn record_segment_res<'r, 'ra, 'tcx>(
mut this: CmResolver<'r, 'ra, 'tcx>,
finalize: Option<Finalize>,
res: Res,
id: Option<NodeId>,
) {
if finalize.is_some()
&& let Some(id) = id
&& !this.partial_res_map.contains_key(&id)
{
assert!(id != ast::DUMMY_NODE_ID, "Trying to resolve dummy id");
this.get_mut().record_partial_res(id, PartialRes::new(res));
}
}
for (segment_idx, &Segment { ident, id, .. }) in path.iter().enumerate() {
debug!("resolve_path ident {} {:?} {:?}", segment_idx, ident, id);
let record_segment_res = |this: &mut Self, res| {
if finalize.is_some()
&& let Some(id) = id
&& !this.partial_res_map.contains_key(&id)
{
assert!(id != ast::DUMMY_NODE_ID, "Trying to resolve dummy id");
this.record_partial_res(id, PartialRes::new(res));
}
};
let is_last = segment_idx + 1 == path.len();
let ns = if is_last { opt_ns.unwrap_or(TypeNS) } else { TypeNS };
@ -1507,7 +1518,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let mut ctxt = ident.span.ctxt().normalize_to_macros_2_0();
let self_mod = self.resolve_self(&mut ctxt, parent_scope.module);
if let Some(res) = self_mod.res() {
record_segment_res(self, res);
record_segment_res(self.reborrow(), finalize, res, id);
}
module = Some(ModuleOrUniformRoot::Module(self_mod));
continue;
@ -1529,7 +1540,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// `::a::b`, `crate::a::b` or `$crate::a::b`
let crate_root = self.resolve_crate_root(ident);
if let Some(res) = crate_root.res() {
record_segment_res(self, res);
record_segment_res(self.reborrow(), finalize, res, id);
}
module = Some(ModuleOrUniformRoot::Module(crate_root));
continue;
@ -1562,21 +1573,22 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
let binding = if let Some(module) = module {
self.resolve_ident_in_module(
module,
ident,
ns,
parent_scope,
finalize,
ignore_binding,
ignore_import,
)
.map_err(|(determinacy, _)| determinacy)
self.reborrow()
.resolve_ident_in_module(
module,
ident,
ns,
parent_scope,
finalize,
ignore_binding,
ignore_import,
)
.map_err(|(determinacy, _)| determinacy)
} else if let Some(ribs) = ribs
&& let Some(TypeNS | ValueNS) = opt_ns
{
assert!(ignore_import.is_none());
match self.resolve_ident_in_lexical_scope(
match self.get_mut().resolve_ident_in_lexical_scope(
ident,
ns,
parent_scope,
@ -1588,7 +1600,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
Some(LexicalScopeBinding::Item(binding)) => Ok(binding),
// we found a local variable or type param
Some(LexicalScopeBinding::Res(res)) => {
record_segment_res(self, res);
record_segment_res(self.reborrow(), finalize, res, id);
return PathResult::NonModule(PartialRes::with_unresolved_segments(
res,
path.len() - 1,
@ -1597,7 +1609,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
_ => Err(Determinacy::determined(finalize.is_some())),
}
} else {
self.early_resolve_ident_in_lexical_scope(
self.reborrow().early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::All(ns),
parent_scope,
@ -1618,8 +1630,10 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// Mark every privacy error in this path with the res to the last element. This allows us
// to detect the item the user cares about and either find an alternative import, or tell
// the user it is not accessible.
for error in &mut self.privacy_errors[privacy_errors_len..] {
error.outermost_res = Some((res, ident));
if finalize.is_some() {
for error in &mut self.get_mut().privacy_errors[privacy_errors_len..] {
error.outermost_res = Some((res, ident));
}
}
let maybe_assoc = opt_ns != Some(MacroNS) && PathSource::Type.is_expected(res);
@ -1628,7 +1642,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
module_had_parse_errors = true;
}
module = Some(ModuleOrUniformRoot::Module(self.expect_module(def_id)));
record_segment_res(self, res);
record_segment_res(self.reborrow(), finalize, res, id);
} else if res == Res::ToolMod && !is_last && opt_ns.is_some() {
if binding.is_import() {
self.dcx().emit_err(errors::ToolModuleImported {
@ -1641,8 +1655,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
} else if res == Res::Err {
return PathResult::NonModule(PartialRes::new(Res::Err));
} else if opt_ns.is_some() && (is_last || maybe_assoc) {
self.lint_if_path_starts_with_module(finalize, path, second_binding);
record_segment_res(self, res);
if let Some(finalize) = finalize {
self.get_mut().lint_if_path_starts_with_module(
finalize,
path,
second_binding,
);
}
record_segment_res(self.reborrow(), finalize, res, id);
return PathResult::NonModule(PartialRes::with_unresolved_segments(
res,
path.len() - segment_idx - 1,
@ -1677,6 +1697,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
));
}
let mut this = self.reborrow();
return PathResult::failed(
ident,
is_last,
@ -1684,7 +1705,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
module_had_parse_errors,
module,
|| {
self.report_path_resolution_error(
this.get_mut().report_path_resolution_error(
path,
opt_ns,
parent_scope,
@ -1701,7 +1722,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
self.lint_if_path_starts_with_module(finalize, path, second_binding);
if let Some(finalize) = finalize {
self.get_mut().lint_if_path_starts_with_module(finalize, path, second_binding);
}
PathResult::Module(match module {
Some(module) => module,

View file

@ -33,9 +33,10 @@ use crate::errors::{
ConsiderAddingMacroExport, ConsiderMarkingAsPub,
};
use crate::{
AmbiguityError, AmbiguityKind, BindingKey, Determinacy, Finalize, ImportSuggestion, Module,
ModuleOrUniformRoot, NameBinding, NameBindingData, NameBindingKind, ParentScope, PathResult,
PerNS, ResolutionError, Resolver, ScopeSet, Segment, Used, module_to_string, names_to_string,
AmbiguityError, AmbiguityKind, BindingKey, CmResolver, Determinacy, Finalize, ImportSuggestion,
Module, ModuleOrUniformRoot, NameBinding, NameBindingData, NameBindingKind, ParentScope,
PathResult, PerNS, ResolutionError, Resolver, ScopeSet, Segment, Used, module_to_string,
names_to_string,
};
type Res = def::Res<NodeId>;
@ -551,13 +552,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
/// Resolves all imports for the crate. This method performs the fixed-
/// point iteration.
pub(crate) fn resolve_imports(&mut self) {
self.assert_speculative = true;
let mut prev_indeterminate_count = usize::MAX;
let mut indeterminate_count = self.indeterminate_imports.len() * 3;
while indeterminate_count < prev_indeterminate_count {
prev_indeterminate_count = indeterminate_count;
indeterminate_count = 0;
for import in mem::take(&mut self.indeterminate_imports) {
let import_indeterminate_count = self.resolve_import(import);
let import_indeterminate_count = self.cm().resolve_import(import);
indeterminate_count += import_indeterminate_count;
match import_indeterminate_count {
0 => self.determined_imports.push(import),
@ -565,6 +567,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
}
self.assert_speculative = false;
}
pub(crate) fn finalize_imports(&mut self) {
@ -837,7 +840,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
///
/// Meanwhile, if resolve successful, the resolved bindings are written
/// into the module.
fn resolve_import(&mut self, import: Import<'ra>) -> usize {
fn resolve_import<'r>(mut self: CmResolver<'r, 'ra, 'tcx>, import: Import<'ra>) -> usize {
debug!(
"(resolving import for module) resolving import `{}::...` in `{}`",
Segment::names_to_string(&import.module_path),
@ -846,7 +849,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let module = if let Some(module) = import.imported_module.get() {
module
} else {
let path_res = self.maybe_resolve_path(
let path_res = self.reborrow().maybe_resolve_path(
&import.module_path,
None,
&import.parent_scope,
@ -866,19 +869,21 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
(source, target, bindings, type_ns_only)
}
ImportKind::Glob { .. } => {
self.resolve_glob_import(import);
// FIXME: Use mutable resolver directly as a hack, this should be an output of
// specualtive resolution.
self.get_mut_unchecked().resolve_glob_import(import);
return 0;
}
_ => unreachable!(),
};
let mut indeterminate_count = 0;
self.per_ns(|this, ns| {
self.per_ns_cm(|this, ns| {
if !type_ns_only || ns == TypeNS {
if bindings[ns].get() != PendingBinding::Pending {
return;
};
let binding_result = this.maybe_resolve_ident_in_module(
let binding_result = this.reborrow().maybe_resolve_ident_in_module(
module,
source,
ns,
@ -901,16 +906,30 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
// We need the `target`, `source` can be extracted.
let imported_binding = this.import(binding, import);
this.define_binding_local(parent, target, ns, imported_binding);
// FIXME: Use mutable resolver directly as a hack, this should be an output of
// specualtive resolution.
this.get_mut_unchecked().define_binding_local(
parent,
target,
ns,
imported_binding,
);
PendingBinding::Ready(Some(imported_binding))
}
Err(Determinacy::Determined) => {
// Don't remove underscores from `single_imports`, they were never added.
if target.name != kw::Underscore {
let key = BindingKey::new(target, ns);
this.update_local_resolution(parent, key, false, |_, resolution| {
resolution.single_imports.swap_remove(&import);
});
// FIXME: Use mutable resolver directly as a hack, this should be an output of
// specualtive resolution.
this.get_mut_unchecked().update_local_resolution(
parent,
key,
false,
|_, resolution| {
resolution.single_imports.swap_remove(&import);
},
);
}
PendingBinding::Ready(None)
}
@ -943,7 +962,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// We'll provide more context to the privacy errors later, up to `len`.
let privacy_errors_len = self.privacy_errors.len();
let path_res = self.resolve_path(
let path_res = self.cm().resolve_path(
&import.module_path,
None,
&import.parent_scope,
@ -1060,7 +1079,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// 2 segments, so the `resolve_path` above won't trigger it.
let mut full_path = import.module_path.clone();
full_path.push(Segment::from_ident(Ident::dummy()));
self.lint_if_path_starts_with_module(Some(finalize), &full_path, None);
self.lint_if_path_starts_with_module(finalize, &full_path, None);
}
if let ModuleOrUniformRoot::Module(module) = module
@ -1103,7 +1122,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// importing it if available.
let mut path = import.module_path.clone();
path.push(Segment::from_ident(ident));
if let PathResult::Module(ModuleOrUniformRoot::Module(module)) = self.resolve_path(
if let PathResult::Module(ModuleOrUniformRoot::Module(module)) = self.cm().resolve_path(
&path,
None,
&import.parent_scope,
@ -1121,7 +1140,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let mut all_ns_err = true;
self.per_ns(|this, ns| {
if !type_ns_only || ns == TypeNS {
let binding = this.resolve_ident_in_module(
let binding = this.cm().resolve_ident_in_module(
module,
ident,
ns,
@ -1184,7 +1203,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let mut all_ns_failed = true;
self.per_ns(|this, ns| {
if !type_ns_only || ns == TypeNS {
let binding = this.resolve_ident_in_module(
let binding = this.cm().resolve_ident_in_module(
module,
ident,
ns,
@ -1373,7 +1392,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
full_path.push(Segment::from_ident(ident));
self.per_ns(|this, ns| {
if let Some(binding) = bindings[ns].get().binding().map(|b| b.import_source()) {
this.lint_if_path_starts_with_module(Some(finalize), &full_path, Some(binding));
this.lint_if_path_starts_with_module(finalize, &full_path, Some(binding));
}
});
}
@ -1426,7 +1445,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
return;
}
match this.early_resolve_ident_in_lexical_scope(
match this.cm().early_resolve_ident_in_lexical_scope(
target,
ScopeSet::All(ns),
&import.parent_scope,

View file

@ -1424,7 +1424,7 @@ impl<'a, 'ast, 'ra, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
// During late resolution we only track the module component of the parent scope,
// although it may be useful to track other components as well for diagnostics.
let graph_root = resolver.graph_root;
let parent_scope = ParentScope::module(graph_root, resolver);
let parent_scope = ParentScope::module(graph_root, resolver.arenas);
let start_rib_kind = RibKind::Module(graph_root);
LateResolutionVisitor {
r: resolver,
@ -1484,7 +1484,7 @@ impl<'a, 'ast, 'ra, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
opt_ns: Option<Namespace>, // `None` indicates a module path in import
finalize: Option<Finalize>,
) -> PathResult<'ra> {
self.r.resolve_path_with_ribs(
self.r.cm().resolve_path_with_ribs(
path,
opt_ns,
&self.parent_scope,
@ -4466,9 +4466,15 @@ impl<'a, 'ast, 'ra, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
if qself.is_none() {
let path_seg = |seg: &Segment| PathSegment::from_ident(seg.ident);
let path = Path { segments: path.iter().map(path_seg).collect(), span, tokens: None };
if let Ok((_, res)) =
self.r.resolve_macro_path(&path, None, &self.parent_scope, false, false, None, None)
{
if let Ok((_, res)) = self.r.cm().resolve_macro_path(
&path,
None,
&self.parent_scope,
false,
false,
None,
None,
) {
return Ok(Some(PartialRes::new(res)));
}
}

View file

@ -2389,7 +2389,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
// Look for associated items in the current trait.
if let Some((module, _)) = self.current_trait_ref
&& let Ok(binding) = self.r.maybe_resolve_ident_in_module(
&& let Ok(binding) = self.r.cm().maybe_resolve_ident_in_module(
ModuleOrUniformRoot::Module(module),
ident,
ns,

View file

@ -12,6 +12,7 @@
#![allow(rustc::untranslatable_diagnostic)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)]
#![feature(arbitrary_self_types)]
#![feature(assert_matches)]
#![feature(box_patterns)]
#![feature(if_let_guard)]
@ -162,11 +163,11 @@ struct ParentScope<'ra> {
impl<'ra> ParentScope<'ra> {
/// Creates a parent scope with the passed argument used as the module scope component,
/// and other scope components set to default empty values.
fn module(module: Module<'ra>, resolver: &Resolver<'ra, '_>) -> ParentScope<'ra> {
fn module(module: Module<'ra>, arenas: &'ra ResolverArenas<'ra>) -> ParentScope<'ra> {
ParentScope {
module,
expansion: LocalExpnId::ROOT,
macro_rules: resolver.arenas.alloc_macro_rules_scope(MacroRulesScope::Empty),
macro_rules: arenas.alloc_macro_rules_scope(MacroRulesScope::Empty),
derives: &[],
}
}
@ -1054,6 +1055,9 @@ pub struct Resolver<'ra, 'tcx> {
graph_root: Module<'ra>,
/// Assert that we are in speculative resolution mode.
assert_speculative: bool,
prelude: Option<Module<'ra>>,
extern_prelude: FxIndexMap<Macros20NormalizedIdent, ExternPreludeEntry<'ra>>,
@ -1156,10 +1160,11 @@ pub struct Resolver<'ra, 'tcx> {
unused_macro_rules: FxIndexMap<NodeId, DenseBitSet<usize>>,
proc_macro_stubs: FxHashSet<LocalDefId>,
/// Traces collected during macro resolution and validated when it's complete.
// FIXME: Remove interior mutability when speculative resolution produces these as outputs.
single_segment_macro_resolutions:
Vec<(Ident, MacroKind, ParentScope<'ra>, Option<NameBinding<'ra>>, Option<Span>)>,
RefCell<Vec<(Ident, MacroKind, ParentScope<'ra>, Option<NameBinding<'ra>>, Option<Span>)>>,
multi_segment_macro_resolutions:
Vec<(Vec<Segment>, Span, MacroKind, ParentScope<'ra>, Option<Res>, Namespace)>,
RefCell<Vec<(Vec<Segment>, Span, MacroKind, ParentScope<'ra>, Option<Res>, Namespace)>>,
builtin_attrs: Vec<(Ident, ParentScope<'ra>)>,
/// `derive(Copy)` marks items they are applied to so they are treated specially later.
/// Derive macros cannot modify the item themselves and have to store the markers in the global
@ -1527,6 +1532,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// The outermost module has def ID 0; this is not reflected in the
// AST.
graph_root,
assert_speculative: false, // Only set/cleared in Resolver::resolve_imports for now
prelude: None,
extern_prelude,
@ -1644,7 +1650,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
impl_trait_names: Default::default(),
};
let root_parent_scope = ParentScope::module(graph_root, &resolver);
let root_parent_scope = ParentScope::module(graph_root, resolver.arenas);
resolver.invocation_parent_scopes.insert(LocalExpnId::ROOT, root_parent_scope);
resolver.feed_visibility(crate_feed, Visibility::Public);
@ -1792,6 +1798,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
/// Returns a conditionally mutable resolver.
///
/// Currently only dependent on `assert_speculative`, if `assert_speculative` is false,
/// the resolver will allow mutation; otherwise, it will be immutable.
fn cm(&mut self) -> CmResolver<'_, 'ra, 'tcx> {
CmResolver::new(self, !self.assert_speculative)
}
/// Runs the function on each namespace.
fn per_ns<F: FnMut(&mut Self, Namespace)>(&mut self, mut f: F) {
f(self, TypeNS);
@ -1799,6 +1813,15 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
f(self, MacroNS);
}
fn per_ns_cm<'r, F: FnMut(&mut CmResolver<'r, 'ra, 'tcx>, Namespace)>(
mut self: CmResolver<'r, 'ra, 'tcx>,
mut f: F,
) {
f(&mut self, TypeNS);
f(&mut self, ValueNS);
f(&mut self, MacroNS);
}
fn is_builtin_macro(&self, res: Res) -> bool {
self.get_macro(res).is_some_and(|macro_data| macro_data.ext.builtin_name.is_some())
}
@ -1852,14 +1875,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
self.visit_scopes(ScopeSet::All(TypeNS), parent_scope, ctxt, |this, scope, _, _| {
self.cm().visit_scopes(ScopeSet::All(TypeNS), parent_scope, ctxt, |this, scope, _, _| {
match scope {
Scope::Module(module, _) => {
this.traits_in_module(module, assoc_item, &mut found_traits);
this.get_mut().traits_in_module(module, assoc_item, &mut found_traits);
}
Scope::StdLibPrelude => {
if let Some(module) = this.prelude {
this.traits_in_module(module, assoc_item, &mut found_traits);
this.get_mut().traits_in_module(module, assoc_item, &mut found_traits);
}
}
Scope::ExternPrelude | Scope::ToolPrelude | Scope::BuiltinTypes => {}
@ -2002,14 +2025,17 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// Do not report the lint if the macro name resolves in stdlib prelude
// even without the problematic `macro_use` import.
let found_in_stdlib_prelude = self.prelude.is_some_and(|prelude| {
self.maybe_resolve_ident_in_module(
ModuleOrUniformRoot::Module(prelude),
ident,
MacroNS,
&ParentScope::module(self.empty_module, self),
None,
)
.is_ok()
let empty_module = self.empty_module;
let arenas = self.arenas;
self.cm()
.maybe_resolve_ident_in_module(
ModuleOrUniformRoot::Module(prelude),
ident,
MacroNS,
&ParentScope::module(empty_module, arenas),
None,
)
.is_ok()
});
if !found_in_stdlib_prelude {
self.lint_buffer().buffer_lint(
@ -2180,7 +2206,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
fn extern_prelude_get(&mut self, ident: Ident, finalize: bool) -> Option<NameBinding<'ra>> {
fn extern_prelude_get<'r>(
mut self: CmResolver<'r, 'ra, 'tcx>,
ident: Ident,
finalize: bool,
) -> Option<NameBinding<'ra>> {
let mut record_use = None;
let entry = self.extern_prelude.get(&Macros20NormalizedIdent::new(ident));
let binding = entry.and_then(|entry| match entry.binding.get() {
@ -2216,7 +2246,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
});
if let Some(binding) = record_use {
self.record_use(ident, binding, Used::Scope);
self.get_mut().record_use(ident, binding, Used::Scope);
}
binding
@ -2251,7 +2281,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
.collect();
let Ok(segments) = segments else { return None };
match self.maybe_resolve_path(&segments, Some(ns), &parent_scope, None) {
match self.cm().maybe_resolve_path(&segments, Some(ns), &parent_scope, None) {
PathResult::Module(ModuleOrUniformRoot::Module(module)) => Some(module.res().unwrap()),
PathResult::NonModule(path_res) => {
path_res.full_res().filter(|res| !matches!(res, Res::Def(DefKind::Ctor(..), _)))
@ -2330,9 +2360,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
fn resolve_main(&mut self) {
let module = self.graph_root;
let ident = Ident::with_dummy_span(sym::main);
let parent_scope = &ParentScope::module(module, self);
let parent_scope = &ParentScope::module(module, self.arenas);
let Ok(name_binding) = self.maybe_resolve_ident_in_module(
let Ok(name_binding) = self.cm().maybe_resolve_ident_in_module(
ModuleOrUniformRoot::Module(module),
ident,
ValueNS,
@ -2426,3 +2456,63 @@ impl Finalize {
pub fn provide(providers: &mut Providers) {
providers.registered_tools = macros::registered_tools;
}
mod ref_mut {
use std::ops::Deref;
/// A wrapper around a mutable reference that conditionally allows mutable access.
pub(crate) struct RefOrMut<'a, T> {
p: &'a mut T,
mutable: bool,
}
impl<'a, T> Deref for RefOrMut<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.p
}
}
impl<'a, T> AsRef<T> for RefOrMut<'a, T> {
fn as_ref(&self) -> &T {
self.p
}
}
impl<'a, T> RefOrMut<'a, T> {
pub(crate) fn new(p: &'a mut T, mutable: bool) -> Self {
RefOrMut { p, mutable }
}
/// This is needed because this wraps a `&mut T` and is therefore not `Copy`.
pub(crate) fn reborrow(&mut self) -> RefOrMut<'_, T> {
RefOrMut { p: self.p, mutable: self.mutable }
}
/// Returns a mutable reference to the inner value if allowed.
///
/// # Panics
/// Panics if the `mutable` flag is false.
#[track_caller]
pub(crate) fn get_mut(&mut self) -> &mut T {
match self.mutable {
false => panic!("Can't mutably borrow speculative resolver"),
true => self.p,
}
}
/// Returns a mutable reference to the inner value without checking if
/// it's in a mutable state.
pub(crate) fn get_mut_unchecked(&mut self) -> &mut T {
self.p
}
}
}
/// A wrapper around `&mut Resolver` that may be mutable or immutable, depending on a conditions.
///
/// `Cm` stands for "conditionally mutable".
///
/// Prefer constructing it through [`Resolver::cm`] to ensure correctness.
type CmResolver<'r, 'ra, 'tcx> = ref_mut::RefOrMut<'r, Resolver<'ra, 'tcx>>;

View file

@ -41,9 +41,9 @@ use crate::errors::{
};
use crate::imports::Import;
use crate::{
BindingKey, DeriveData, Determinacy, Finalize, InvocationParent, MacroData, ModuleKind,
ModuleOrUniformRoot, NameBinding, NameBindingKind, ParentScope, PathResult, ResolutionError,
Resolver, ScopeSet, Segment, Used,
BindingKey, CmResolver, DeriveData, Determinacy, Finalize, InvocationParent, MacroData,
ModuleKind, ModuleOrUniformRoot, NameBinding, NameBindingKind, ParentScope, PathResult,
ResolutionError, Resolver, ScopeSet, Segment, Used,
};
type Res = def::Res<NodeId>;
@ -403,7 +403,7 @@ impl<'ra, 'tcx> ResolverExpand for Resolver<'ra, 'tcx> {
for (i, resolution) in entry.resolutions.iter_mut().enumerate() {
if resolution.exts.is_none() {
resolution.exts = Some(
match self.resolve_macro_path(
match self.cm().resolve_macro_path(
&resolution.path,
Some(MacroKind::Derive),
&parent_scope,
@ -568,7 +568,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
invoc_in_mod_inert_attr: Option<LocalDefId>,
suggestion_span: Option<Span>,
) -> Result<(Arc<SyntaxExtension>, Res), Indeterminate> {
let (ext, res) = match self.resolve_macro_or_delegation_path(
let (ext, res) = match self.cm().resolve_macro_or_delegation_path(
path,
Some(kind),
parent_scope,
@ -713,8 +713,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
Ok((ext, res))
}
pub(crate) fn resolve_macro_path(
&mut self,
pub(crate) fn resolve_macro_path<'r>(
self: CmResolver<'r, 'ra, 'tcx>,
path: &ast::Path,
kind: Option<MacroKind>,
parent_scope: &ParentScope<'ra>,
@ -736,8 +736,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
)
}
fn resolve_macro_or_delegation_path(
&mut self,
fn resolve_macro_or_delegation_path<'r>(
mut self: CmResolver<'r, 'ra, 'tcx>,
ast_path: &ast::Path,
kind: Option<MacroKind>,
parent_scope: &ParentScope<'ra>,
@ -763,7 +763,12 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let res = if deleg_impl.is_some() || path.len() > 1 {
let ns = if deleg_impl.is_some() { TypeNS } else { MacroNS };
let res = match self.maybe_resolve_path(&path, Some(ns), parent_scope, ignore_import) {
let res = match self.reborrow().maybe_resolve_path(
&path,
Some(ns),
parent_scope,
ignore_import,
) {
PathResult::NonModule(path_res) if let Some(res) = path_res.full_res() => Ok(res),
PathResult::Indeterminate if !force => return Err(Determinacy::Undetermined),
PathResult::NonModule(..)
@ -777,7 +782,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
if trace {
let kind = kind.expect("macro kind must be specified if tracing is enabled");
self.multi_segment_macro_resolutions.push((
// FIXME: Should be an output of Speculative Resolution.
self.multi_segment_macro_resolutions.borrow_mut().push((
path,
path_span,
kind,
@ -791,7 +797,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
res
} else {
let scope_set = kind.map_or(ScopeSet::All(MacroNS), ScopeSet::Macro);
let binding = self.early_resolve_ident_in_lexical_scope(
let binding = self.reborrow().early_resolve_ident_in_lexical_scope(
path[0].ident,
scope_set,
parent_scope,
@ -806,7 +812,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
if trace {
let kind = kind.expect("macro kind must be specified if tracing is enabled");
self.single_segment_macro_resolutions.push((
// FIXME: Should be an output of Speculative Resolution.
self.single_segment_macro_resolutions.borrow_mut().push((
path[0].ident,
kind,
*parent_scope,
@ -817,7 +824,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let res = binding.map(|binding| binding.res());
self.prohibit_imported_non_macro_attrs(binding.ok(), res.ok(), path_span);
self.report_out_of_scope_macro_calls(
self.reborrow().report_out_of_scope_macro_calls(
ast_path,
parent_scope,
invoc_in_mod_inert_attr,
@ -872,13 +879,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
};
let macro_resolutions = mem::take(&mut self.multi_segment_macro_resolutions);
// FIXME: Should be an output of Speculative Resolution.
let macro_resolutions = self.multi_segment_macro_resolutions.take();
for (mut path, path_span, kind, parent_scope, initial_res, ns) in macro_resolutions {
// FIXME: Path resolution will ICE if segment IDs present.
for seg in &mut path {
seg.id = None;
}
match self.resolve_path(
match self.cm().resolve_path(
&path,
Some(ns),
&parent_scope,
@ -905,8 +913,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
path_res
{
// try to suggest if it's not a macro, maybe a function
if let PathResult::NonModule(partial_res) =
self.maybe_resolve_path(&path, Some(ValueNS), &parent_scope, None)
if let PathResult::NonModule(partial_res) = self
.cm()
.maybe_resolve_path(&path, Some(ValueNS), &parent_scope, None)
&& partial_res.unresolved_segments() == 0
{
let sm = self.tcx.sess.source_map();
@ -948,9 +957,10 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
let macro_resolutions = mem::take(&mut self.single_segment_macro_resolutions);
// FIXME: Should be an output of Speculative Resolution.
let macro_resolutions = self.single_segment_macro_resolutions.take();
for (ident, kind, parent_scope, initial_binding, sugg_span) in macro_resolutions {
match self.early_resolve_ident_in_lexical_scope(
match self.cm().early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::Macro(kind),
&parent_scope,
@ -1005,7 +1015,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let builtin_attrs = mem::take(&mut self.builtin_attrs);
for (ident, parent_scope) in builtin_attrs {
let _ = self.early_resolve_ident_in_lexical_scope(
let _ = self.cm().early_resolve_ident_in_lexical_scope(
ident,
ScopeSet::Macro(MacroKind::Attr),
&parent_scope,
@ -1090,8 +1100,8 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
fn report_out_of_scope_macro_calls(
&mut self,
fn report_out_of_scope_macro_calls<'r>(
mut self: CmResolver<'r, 'ra, 'tcx>,
path: &ast::Path,
parent_scope: &ParentScope<'ra>,
invoc_in_mod_inert_attr: Option<(LocalDefId, NodeId)>,
@ -1110,7 +1120,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// If such resolution is successful and gives the same result
// (e.g. if the macro is re-imported), then silence the lint.
let no_macro_rules = self.arenas.alloc_macro_rules_scope(MacroRulesScope::Empty);
let fallback_binding = self.early_resolve_ident_in_lexical_scope(
let fallback_binding = self.reborrow().early_resolve_ident_in_lexical_scope(
path.segments[0].ident,
ScopeSet::Macro(MacroKind::Bang),
&ParentScope { macro_rules: no_macro_rules, ..*parent_scope },
@ -1206,7 +1216,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let mut indeterminate = false;
for ns in namespaces {
match self.maybe_resolve_path(path, Some(*ns), &parent_scope, None) {
match self.cm().maybe_resolve_path(path, Some(*ns), &parent_scope, None) {
PathResult::Module(ModuleOrUniformRoot::Module(_)) => return Ok(true),
PathResult::NonModule(partial_res) if partial_res.unresolved_segments() == 0 => {
return Ok(true);

View file

@ -190,7 +190,7 @@ pub struct CoverageOptions {
pub discard_all_spans_in_codegen: bool,
}
/// Controls whether branch coverage or MC/DC coverage is enabled.
/// Controls whether branch coverage is enabled.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub enum CoverageLevel {
/// Instrument for coverage at the MIR block level.
@ -214,9 +214,6 @@ pub enum CoverageLevel {
/// instrumentation, so it might be removed in the future when MC/DC is
/// sufficiently complete, or if it is making MC/DC changes difficult.
Condition,
/// Instrument for MC/DC. Mostly a superset of condition coverage, but might
/// differ in some corner cases.
Mcdc,
}
// The different settings that the `-Z offload` flag can have.

View file

@ -755,7 +755,7 @@ mod desc {
pub(crate) const parse_linker_flavor: &str = ::rustc_target::spec::LinkerFlavorCli::one_of();
pub(crate) const parse_dump_mono_stats: &str = "`markdown` (default) or `json`";
pub(crate) const parse_instrument_coverage: &str = parse_bool;
pub(crate) const parse_coverage_options: &str = "`block` | `branch` | `condition` | `mcdc`";
pub(crate) const parse_coverage_options: &str = "`block` | `branch` | `condition`";
pub(crate) const parse_instrument_xray: &str = "either a boolean (`yes`, `no`, `on`, `off`, etc), or a comma separated list of settings: `always` or `never` (mutually exclusive), `ignore-loops`, `instruction-threshold=N`, `skip-entry`, `skip-exit`";
pub(crate) const parse_unpretty: &str = "`string` or `string=string`";
pub(crate) const parse_treat_err_as_bug: &str = "either no value or a non-negative number";
@ -1458,7 +1458,6 @@ pub mod parse {
"block" => slot.level = CoverageLevel::Block,
"branch" => slot.level = CoverageLevel::Branch,
"condition" => slot.level = CoverageLevel::Condition,
"mcdc" => slot.level = CoverageLevel::Mcdc,
"discard-all-spans-in-codegen" => slot.discard_all_spans_in_codegen = true,
_ => return false,
}

View file

@ -354,11 +354,6 @@ impl Session {
&& self.opts.unstable_opts.coverage_options.level >= CoverageLevel::Condition
}
pub fn instrument_coverage_mcdc(&self) -> bool {
self.instrument_coverage()
&& self.opts.unstable_opts.coverage_options.level >= CoverageLevel::Mcdc
}
/// Provides direct access to the `CoverageOptions` struct, so that
/// individual flags for debugging/testing coverage instrumetation don't
/// need separate accessors.

View file

@ -60,7 +60,15 @@ impl AbiMap {
"x86_64" => Arch::X86_64,
_ => Arch::Other,
};
let os = if target.is_like_windows { OsKind::Windows } else { OsKind::Other };
let os = if target.is_like_windows {
OsKind::Windows
} else if target.is_like_vexos {
OsKind::VEXos
} else {
OsKind::Other
};
AbiMap { arch, os }
}
@ -82,6 +90,10 @@ impl AbiMap {
(ExternAbi::System { .. }, Arch::X86) if os == OsKind::Windows && !has_c_varargs => {
CanonAbi::X86(X86Call::Stdcall)
}
(ExternAbi::System { .. }, Arch::Arm(..)) if self.os == OsKind::VEXos => {
// Calls to VEXos APIs do not use VFP registers.
CanonAbi::Arm(ArmCall::Aapcs)
}
(ExternAbi::System { .. }, _) => CanonAbi::C,
// fallible lowerings
@ -191,6 +203,7 @@ enum Arch {
#[derive(Debug, PartialEq, Copy, Clone)]
enum OsKind {
Windows,
VEXos,
Other,
}

View file

@ -153,6 +153,7 @@ impl Target {
forward!(is_like_msvc);
forward!(is_like_wasm);
forward!(is_like_android);
forward!(is_like_vexos);
forward!(binary_format);
forward!(default_dwarf_version);
forward!(allows_weak_linkage);
@ -345,6 +346,7 @@ impl ToJson for Target {
target_option_val!(is_like_msvc);
target_option_val!(is_like_wasm);
target_option_val!(is_like_android);
target_option_val!(is_like_vexos);
target_option_val!(binary_format);
target_option_val!(default_dwarf_version);
target_option_val!(allows_weak_linkage);
@ -538,6 +540,7 @@ struct TargetSpecJson {
is_like_msvc: Option<bool>,
is_like_wasm: Option<bool>,
is_like_android: Option<bool>,
is_like_vexos: Option<bool>,
binary_format: Option<BinaryFormat>,
default_dwarf_version: Option<u32>,
allows_weak_linkage: Option<bool>,

View file

@ -2101,6 +2101,7 @@ supported_targets! {
("armv7a-none-eabihf", armv7a_none_eabihf),
("armv7a-nuttx-eabi", armv7a_nuttx_eabi),
("armv7a-nuttx-eabihf", armv7a_nuttx_eabihf),
("armv7a-vex-v5", armv7a_vex_v5),
("msp430-none-elf", msp430_none_elf),
@ -2571,6 +2572,8 @@ pub struct TargetOptions {
pub is_like_wasm: bool,
/// Whether a target toolchain is like Android, implying a Linux kernel and a Bionic libc
pub is_like_android: bool,
/// Whether a target toolchain is like VEXos, the operating system used by the VEX Robotics V5 Brain.
pub is_like_vexos: bool,
/// Target's binary file format. Defaults to BinaryFormat::Elf
pub binary_format: BinaryFormat,
/// Default supported version of DWARF on this platform.
@ -2953,6 +2956,7 @@ impl Default for TargetOptions {
is_like_msvc: false,
is_like_wasm: false,
is_like_android: false,
is_like_vexos: false,
binary_format: BinaryFormat::Elf,
default_dwarf_version: 4,
allows_weak_linkage: true,

View file

@ -0,0 +1,44 @@
use crate::spec::{
Cc, FloatAbi, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetMetadata,
TargetOptions,
};
const LINKER_SCRIPT: &str = include_str!("./armv7a_vex_v5_linker_script.ld");
pub(crate) fn target() -> Target {
let opts = TargetOptions {
vendor: "vex".into(),
env: "v5".into(),
os: "vexos".into(),
cpu: "cortex-a9".into(),
abi: "eabihf".into(),
is_like_vexos: true,
llvm_floatabi: Some(FloatAbi::Hard),
linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
features: "+v7,+neon,+vfp3d16,+thumb2".into(),
relocation_model: RelocModel::Static,
disable_redzone: true,
max_atomic_width: Some(64),
panic_strategy: PanicStrategy::Abort,
emit_debug_gdb_scripts: false,
c_enum_min_bits: Some(8),
default_uwtable: true,
has_thumb_interworking: true,
link_script: Some(LINKER_SCRIPT.into()),
..Default::default()
};
Target {
llvm_target: "armv7a-none-eabihf".into(),
metadata: TargetMetadata {
description: Some("ARMv7-A Cortex-A9 VEX V5 Brain".into()),
tier: Some(3),
host_tools: Some(false),
std: Some(false),
},
pointer_width: 32,
data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
arch: "arm".into(),
options: opts,
}
}

View file

@ -0,0 +1,144 @@
OUTPUT_FORMAT("elf32-littlearm")
ENTRY(_boot)
/*
* PROVIDE() is used here so that users can override default values.
* This is intended to give developers the option to use this Rust
* target even if the default values in this linker script aren't
* suitable for their needs.
*
* For example: `-C link-arg=--defsym=__stack_length=8M` could
* be used to increase the stack size above the value set in this
* file.
*/
PROVIDE(__vcodesig_magic = 0x35585658); /* XVX5 */
PROVIDE(__vcodesig_type = 0); /* V5_SIG_TYPE_USER */
PROVIDE(__vcodesig_owner = 2); /* V5_SIG_OWNER_PARTNER */
PROVIDE(__vcodesig_options = 0); /* none (0) */
PROVIDE(__user_ram_start = 0x03800000);
PROVIDE(__user_ram_length = 48M);
PROVIDE(__user_ram_end = __user_ram_start + __user_ram_length); /* 0x8000000 */
PROVIDE(__code_signature_length = 0x20);
PROVIDE(__stack_length = 4M);
PROVIDE(__heap_end = __user_ram_end - __stack_length);
PROVIDE(__user_length = __heap_start - __user_ram_start);
MEMORY {
USER_RAM (RWX) : ORIGIN = __user_ram_start, LENGTH = __user_ram_length
}
SECTIONS {
/*
* VEXos expects program binaries to have a 32-byte header called a "code signature"
* at their start which tells the OS that we are a valid program and configures some
* miscellaneous startup behavior.
*/
.code_signature : {
LONG(__vcodesig_magic)
LONG(__vcodesig_type)
LONG(__vcodesig_owner)
LONG(__vcodesig_options)
FILL(0)
. = __user_ram_start + __code_signature_length;
} > USER_RAM
/*
* Executable program instructions.
*/
.text : {
/* _boot routine (entry point from VEXos, must be at 0x03800020) */
*(.boot)
/* The rest of the program. */
*(.text .text.*)
} > USER_RAM
/*
* Global/uninitialized/static/constant data sections.
*/
.rodata : {
*(.rodata .rodata1 .rodata.*)
*(.srodata .srodata.*)
} > USER_RAM
/*
* ARM Stack Unwinding Sections
*
* These sections are added by the compiler in some cases to facilitate stack unwinding.
* __eh_frame_start and similar symbols are used by libunwind.
*/
.except_ordered : {
PROVIDE(__extab_start = .);
*(.gcc_except_table *.gcc_except_table.*)
*(.ARM.extab*)
PROVIDE(__extab_end = .);
} > USER_RAM
.eh_frame_hdr : {
/* see https://github.com/llvm/llvm-project/blob/main/libunwind/src/AddressSpace.hpp#L78 */
PROVIDE(__eh_frame_hdr_start = .);
KEEP(*(.eh_frame_hdr))
PROVIDE(__eh_frame_hdr_end = .);
} > USER_RAM
.eh_frame : {
PROVIDE(__eh_frame_start = .);
KEEP(*(.eh_frame))
PROVIDE(__eh_frame_end = .);
} > USER_RAM
.except_unordered : {
PROVIDE(__exidx_start = .);
*(.ARM.exidx*)
PROVIDE(__exidx_end = .);
} > USER_RAM
/* -- Data intended to be mutable at runtime begins here. -- */
.data : {
*(.data .data1 .data.*)
*(.sdata .sdata.* .sdata2.*)
} > USER_RAM
/* -- End of loadable sections - anything beyond this point shouldn't go in the binary uploaded to the device. -- */
.bss (NOLOAD) : {
__bss_start = .;
*(.sbss*)
*(.bss .bss.*)
/* Align the heap */
. = ALIGN(8);
__bss_end = .;
} > USER_RAM
/*
* Active memory sections for the stack/heap.
*
* Because these are (NOLOAD), they will not influence the final size of the binary.
*/
.heap (NOLOAD) : {
__heap_start = .;
. = __heap_end;
} > USER_RAM
.stack (NOLOAD) : ALIGN(8) {
__stack_bottom = .;
. += __stack_length;
__stack_top = .;
} > USER_RAM
/*
* `.ARM.attributes` contains arch metadata for compatibility purposes, but we
* only target one hardware configuration, meaning it'd just take up space.
*/
/DISCARD/ : {
*(.ARM.attributes*)
}
}

View file

@ -92,10 +92,15 @@ rustc_index::newtype_index! {
/// An inference variable for a const, for use in const generics.
#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[derive(TypeVisitable_Generic, TypeFoldable_Generic)]
#[cfg_attr(feature = "nightly", derive(Encodable_NoContext, Decodable_NoContext))]
pub enum InferConst {
/// Infer the value of the const.
Var(ConstVid),
Var(
#[type_foldable(identity)]
#[type_visitable(ignore)]
ConstVid,
),
/// A fresh const variable. See `infer::freshen` for more details.
Fresh(u32),
}

View file

@ -184,6 +184,15 @@ impl<I: Interner> UpcastFrom<I, TraitRef<I>> for TraitPredicate<I> {
}
}
impl<I: Interner> UpcastFrom<I, ty::Binder<I, TraitRef<I>>> for ty::Binder<I, TraitPredicate<I>> {
fn upcast_from(from: ty::Binder<I, TraitRef<I>>, _tcx: I) -> Self {
from.map_bound(|trait_ref| TraitPredicate {
trait_ref,
polarity: PredicatePolarity::Positive,
})
}
}
impl<I: Interner> fmt::Debug for TraitPredicate<I> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "TraitPredicate({:?}, polarity:{:?})", self.trait_ref, self.polarity)

View file

@ -21,7 +21,6 @@ fn test_convert() {
assert!(char::try_from(0xFFFF_FFFF_u32).is_err());
}
/* FIXME(#110395)
#[test]
const fn test_convert_const() {
assert!(u32::from('a') == 0x61);
@ -31,7 +30,6 @@ const fn test_convert_const() {
assert!(char::from(b'a') == 'a');
assert!(char::from(b'\xFF') == '\u{FF}');
}
*/
#[test]
fn test_from_str() {

View file

@ -1,4 +1,3 @@
/* FIXME(#110395)
#[test]
fn convert() {
const fn from(x: i32) -> i32 {
@ -15,4 +14,3 @@ fn convert() {
const BAR: Vec<String> = into(Vec::new());
assert_eq!(BAR, Vec::<String>::new());
}
*/

View file

@ -18,7 +18,9 @@
#![feature(const_deref)]
#![feature(const_destruct)]
#![feature(const_eval_select)]
#![feature(const_from)]
#![feature(const_ops)]
#![feature(const_option_ops)]
#![feature(const_ref_cell)]
#![feature(const_result_trait_fn)]
#![feature(const_trait_impl)]

View file

@ -214,13 +214,11 @@ fn nonzero_const() {
const ONE: Option<NonZero<u8>> = NonZero::new(1);
assert!(ONE.is_some());
/* FIXME(#110395)
const FROM_NONZERO_U8: u8 = u8::from(NONZERO_U8);
assert_eq!(FROM_NONZERO_U8, 5);
const NONZERO_CONVERT: NonZero<u32> = NonZero::<u32>::from(NONZERO_U8);
assert_eq!(NONZERO_CONVERT.get(), 5);
*/
}
#[test]

View file

@ -1,4 +1,3 @@
/* FIXME(#110395)
#[test]
fn from() {
use core::convert::TryFrom;
@ -24,4 +23,3 @@ fn from() {
const I16_FROM_U16: Result<i16, TryFromIntError> = i16::try_from(1u16);
assert_eq!(I16_FROM_U16, Ok(1i16));
}
*/

View file

@ -87,7 +87,6 @@ fn test_and() {
assert_eq!(x.and(Some(2)), None);
assert_eq!(x.and(None::<isize>), None);
/* FIXME(#110395)
const FOO: Option<isize> = Some(1);
const A: Option<isize> = FOO.and(Some(2));
const B: Option<isize> = FOO.and(None);
@ -99,7 +98,6 @@ fn test_and() {
const D: Option<isize> = BAR.and(None);
assert_eq!(C, None);
assert_eq!(D, None);
*/
}
#[test]

View file

@ -12,6 +12,7 @@ jobs:
if: github.repository == 'rust-lang/stdarch'
uses: rust-lang/josh-sync/.github/workflows/rustc-pull.yml@main
with:
github-app-id: ${{ vars.APP_CLIENT_ID }}
# https://rust-lang.zulipchat.com/#narrow/channel/208962-t-libs.2Fstdarch/topic/Subtree.20sync.20automation/with/528461782
zulip-stream-id: 208962
zulip-bot-email: "stdarch-ci-bot@rust-lang.zulipchat.com"
@ -19,4 +20,4 @@ jobs:
branch-name: rustc-pull
secrets:
zulip-api-token: ${{ secrets.ZULIP_API_TOKEN }}
token: ${{ secrets.GITHUB_TOKEN }}
github-app-secret: ${{ secrets.APP_PRIVATE_KEY }}

View file

@ -73,7 +73,7 @@ version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@ -90,9 +90,9 @@ checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "cc"
version = "1.2.30"
version = "1.2.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7"
checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2"
dependencies = [
"shlex",
]
@ -105,9 +105,9 @@ checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
[[package]]
name = "clap"
version = "4.5.41"
version = "4.5.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9"
checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882"
dependencies = [
"clap_builder",
"clap_derive",
@ -115,14 +115,14 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.41"
version = "4.5.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d"
checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim 0.11.1",
"strsim",
]
[[package]]
@ -134,7 +134,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@ -182,32 +182,11 @@ version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "csv"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
dependencies = [
"csv-core",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d"
dependencies = [
"memchr",
]
[[package]]
name = "darling"
version = "0.13.4"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
dependencies = [
"darling_core",
"darling_macro",
@ -215,27 +194,27 @@ dependencies = [
[[package]]
name = "darling_core"
version = "0.13.4"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim 0.10.0",
"syn 1.0.109",
"strsim",
"syn",
]
[[package]]
name = "darling_macro"
version = "0.13.4"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core",
"quote",
"syn 1.0.109",
"syn",
]
[[package]]
@ -357,14 +336,11 @@ name = "intrinsic-test"
version = "0.1.0"
dependencies = [
"clap",
"csv",
"diff",
"itertools",
"lazy_static",
"log",
"pretty_env_logger",
"rayon",
"regex",
"serde",
"serde_json",
]
@ -401,12 +377,6 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "lazy_static"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.174"
@ -576,9 +546,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rustc-demangle"
version = "0.1.25"
version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f"
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
[[package]]
name = "ryu"
@ -618,14 +588,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
version = "1.0.142"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
dependencies = [
"itoa",
"memchr",
@ -635,24 +605,25 @@ dependencies = [
[[package]]
name = "serde_with"
version = "1.14.0"
version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff"
checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
dependencies = [
"serde",
"serde_derive",
"serde_with_macros",
]
[[package]]
name = "serde_with_macros"
version = "1.5.2"
version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082"
checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
dependencies = [
"darling",
"proc-macro2",
"quote",
"syn 1.0.109",
"syn",
]
[[package]]
@ -679,7 +650,7 @@ version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]
[[package]]
@ -724,7 +695,7 @@ dependencies = [
"quote",
"serde",
"serde_json",
"syn 2.0.104",
"syn",
]
[[package]]
@ -736,29 +707,12 @@ dependencies = [
"rand",
]
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.104"
@ -943,5 +897,5 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn",
]

View file

@ -6,7 +6,7 @@ RUN apt-get update && \
gcc-loongarch64-linux-gnu libc6-dev-loong64-cross
ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER=loongarch64-linux-gnu-gcc-14 \
ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER=loongarch64-linux-gnu-gcc \
CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-loongarch64-static -cpu max -L /usr/loongarch64-linux-gnu" \
OBJDUMP=loongarch64-linux-gnu-objdump \
STDARCH_TEST_SKIP_FEATURE=frecipe

View file

@ -1,33 +1,140 @@
types! {
#![unstable(feature = "stdarch_loongarch", issue = "117427")]
/// LOONGARCH-specific 256-bit wide vector of 32 packed `i8`.
pub struct v32i8(32 x pub(crate) i8);
/// 256-bit wide integer vector type, LoongArch-specific
///
/// This type is the same as the `__m256i` type defined in `lasxintrin.h`,
/// representing a 256-bit SIMD register. Usage of this type typically
/// occurs in conjunction with the `lasx` target features for LoongArch.
///
/// Internally this type may be viewed as:
///
/// * `i8x32` - thirty two `i8` values packed together
/// * `i16x16` - sixteen `i16` values packed together
/// * `i32x8` - eight `i32` values packed together
/// * `i64x4` - four `i64` values packed together
///
/// (as well as unsigned versions). Each intrinsic may interpret the
/// internal bits differently, check the documentation of the intrinsic
/// to see how it's being used.
///
/// The in-memory representation of this type is the same as the one of an
/// equivalent array (i.e. the in-memory order of elements is the same, and
/// there is no padding); however, the alignment is different and equal to
/// the size of the type. Note that the ABI for function calls may *not* be
/// the same.
///
/// Note that this means that an instance of `m256i` typically just means
/// a "bag of bits" which is left up to interpretation at the point of use.
///
/// Most intrinsics using `m256i` are prefixed with `lasx_` and the integer
/// types tend to correspond to suffixes like "b", "h", "w" or "d".
pub struct m256i(4 x i64);
/// LOONGARCH-specific 256-bit wide vector of 16 packed `i16`.
pub struct v16i16(16 x pub(crate) i16);
/// 256-bit wide set of eight `f32` values, LoongArch-specific
///
/// This type is the same as the `__m256` type defined in `lasxintrin.h`,
/// representing a 256-bit SIMD register which internally consists of
/// eight packed `f32` instances. Usage of this type typically occurs in
/// conjunction with the `lasx` target features for LoongArch.
///
/// Note that unlike `m256i`, the integer version of the 256-bit registers,
/// this `m256` type has *one* interpretation. Each instance of `m256`
/// always corresponds to `f32x8`, or eight `f32` values packed together.
///
/// The in-memory representation of this type is the same as the one of an
/// equivalent array (i.e. the in-memory order of elements is the same, and
/// there is no padding between two consecutive elements); however, the
/// alignment is different and equal to the size of the type. Note that the
/// ABI for function calls may *not* be the same.
///
/// Most intrinsics using `m256` are prefixed with `lasx_` and are
/// suffixed with "s".
pub struct m256(8 x f32);
/// LOONGARCH-specific 256-bit wide vector of 8 packed `i32`.
pub struct v8i32(8 x pub(crate) i32);
/// 256-bit wide set of four `f64` values, LoongArch-specific
///
/// This type is the same as the `__m256d` type defined in `lasxintrin.h`,
/// representing a 256-bit SIMD register which internally consists of
/// four packed `f64` instances. Usage of this type typically occurs in
/// conjunction with the `lasx` target features for LoongArch.
///
/// Note that unlike `m256i`, the integer version of the 256-bit registers,
/// this `m256d` type has *one* interpretation. Each instance of `m256d`
/// always corresponds to `f64x4`, or four `f64` values packed together.
///
/// The in-memory representation of this type is the same as the one of an
/// equivalent array (i.e. the in-memory order of elements is the same, and
/// there is no padding); however, the alignment is different and equal to
/// the size of the type. Note that the ABI for function calls may *not* be
/// the same.
///
/// Most intrinsics using `m256d` are prefixed with `lasx_` and are suffixed
/// with "d". Not to be confused with "d" which is used for `m256i`.
pub struct m256d(4 x f64);
/// LOONGARCH-specific 256-bit wide vector of 4 packed `i64`.
pub struct v4i64(4 x pub(crate) i64);
/// LOONGARCH-specific 256-bit wide vector of 32 packed `u8`.
pub struct v32u8(32 x pub(crate) u8);
/// LOONGARCH-specific 256-bit wide vector of 16 packed `u16`.
pub struct v16u16(16 x pub(crate) u16);
/// LOONGARCH-specific 256-bit wide vector of 8 packed `u32`.
pub struct v8u32(8 x pub(crate) u32);
/// LOONGARCH-specific 256-bit wide vector of 4 packed `u64`.
pub struct v4u64(4 x pub(crate) u64);
/// LOONGARCH-specific 128-bit wide vector of 8 packed `f32`.
pub struct v8f32(8 x pub(crate) f32);
/// LOONGARCH-specific 256-bit wide vector of 4 packed `f64`.
pub struct v4f64(4 x pub(crate) f64);
}
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v32i8([i8; 32]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v16i16([i16; 16]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v8i32([i32; 8]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v4i64([i64; 4]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v32u8([u8; 32]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v16u16([u16; 16]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v8u32([u32; 8]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v4u64([u64; 4]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v8f32([f32; 8]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v4f64([f64; 4]);
// These type aliases are provided solely for transitional compatibility.
// They are temporary and will be removed when appropriate.
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v32i8 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v16i16 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v8i32 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v4i64 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v32u8 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v16u16 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v8u32 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v4u64 = m256i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v8f32 = m256;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v4f64 = m256d;

View file

@ -1,33 +1,140 @@
types! {
#![unstable(feature = "stdarch_loongarch", issue = "117427")]
/// LOONGARCH-specific 128-bit wide vector of 16 packed `i8`.
pub struct v16i8(16 x pub(crate) i8);
/// 128-bit wide integer vector type, LoongArch-specific
///
/// This type is the same as the `__m128i` type defined in `lsxintrin.h`,
/// representing a 128-bit SIMD register. Usage of this type typically
/// occurs in conjunction with the `lsx` and higher target features for
/// LoongArch.
///
/// Internally this type may be viewed as:
///
/// * `i8x16` - sixteen `i8` values packed together
/// * `i16x8` - eight `i16` values packed together
/// * `i32x4` - four `i32` values packed together
/// * `i64x2` - two `i64` values packed together
///
/// (as well as unsigned versions). Each intrinsic may interpret the
/// internal bits differently, check the documentation of the intrinsic
/// to see how it's being used.
///
/// The in-memory representation of this type is the same as the one of an
/// equivalent array (i.e. the in-memory order of elements is the same, and
/// there is no padding); however, the alignment is different and equal to
/// the size of the type. Note that the ABI for function calls may *not* be
/// the same.
///
/// Note that this means that an instance of `m128i` typically just means
/// a "bag of bits" which is left up to interpretation at the point of use.
///
/// Most intrinsics using `m128i` are prefixed with `lsx_` and the integer
/// types tend to correspond to suffixes like "b", "h", "w" or "d".
pub struct m128i(2 x i64);
/// LOONGARCH-specific 128-bit wide vector of 8 packed `i16`.
pub struct v8i16(8 x pub(crate) i16);
/// 128-bit wide set of four `f32` values, LoongArch-specific
///
/// This type is the same as the `__m128` type defined in `lsxintrin.h`,
/// representing a 128-bit SIMD register which internally consists of
/// four packed `f32` instances. Usage of this type typically occurs in
/// conjunction with the `lsx` and higher target features for LoongArch.
///
/// Note that unlike `m128i`, the integer version of the 128-bit registers,
/// this `m128` type has *one* interpretation. Each instance of `m128`
/// corresponds to `f32x4`, or four `f32` values packed together.
///
/// The in-memory representation of this type is the same as the one of an
/// equivalent array (i.e. the in-memory order of elements is the same, and
/// there is no padding); however, the alignment is different and equal to
/// the size of the type. Note that the ABI for function calls may *not* be
/// the same.
///
/// Most intrinsics using `m128` are prefixed with `lsx_` and are suffixed
/// with "s".
pub struct m128(4 x f32);
/// LOONGARCH-specific 128-bit wide vector of 4 packed `i32`.
pub struct v4i32(4 x pub(crate) i32);
/// LOONGARCH-specific 128-bit wide vector of 2 packed `i64`.
pub struct v2i64(2 x pub(crate) i64);
/// LOONGARCH-specific 128-bit wide vector of 16 packed `u8`.
pub struct v16u8(16 x pub(crate) u8);
/// LOONGARCH-specific 128-bit wide vector of 8 packed `u16`.
pub struct v8u16(8 x pub(crate) u16);
/// LOONGARCH-specific 128-bit wide vector of 4 packed `u32`.
pub struct v4u32(4 x pub(crate) u32);
/// LOONGARCH-specific 128-bit wide vector of 2 packed `u64`.
pub struct v2u64(2 x pub(crate) u64);
/// LOONGARCH-specific 128-bit wide vector of 4 packed `f32`.
pub struct v4f32(4 x pub(crate) f32);
/// LOONGARCH-specific 128-bit wide vector of 2 packed `f64`.
pub struct v2f64(2 x pub(crate) f64);
/// 128-bit wide set of two `f64` values, LoongArch-specific
///
/// This type is the same as the `__m128d` type defined in `lsxintrin.h`,
/// representing a 128-bit SIMD register which internally consists of
/// two packed `f64` instances. Usage of this type typically occurs in
/// conjunction with the `lsx` and higher target features for LoongArch.
///
/// Note that unlike `m128i`, the integer version of the 128-bit registers,
/// this `m128d` type has *one* interpretation. Each instance of `m128d`
/// always corresponds to `f64x2`, or two `f64` values packed together.
///
/// The in-memory representation of this type is the same as the one of an
/// equivalent array (i.e. the in-memory order of elements is the same, and
/// there is no padding); however, the alignment is different and equal to
/// the size of the type. Note that the ABI for function calls may *not* be
/// the same.
///
/// Most intrinsics using `m128d` are prefixed with `lsx_` and are suffixed
/// with "d". Not to be confused with "d" which is used for `m128i`.
pub struct m128d(2 x f64);
}
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v16i8([i8; 16]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v8i16([i16; 8]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v4i32([i32; 4]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v2i64([i64; 2]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v16u8([u8; 16]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v8u16([u16; 8]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v4u32([u32; 4]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v2u64([u64; 2]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v4f32([f32; 4]);
#[allow(non_camel_case_types)]
#[repr(simd)]
pub(crate) struct __v2f64([f64; 2]);
// These type aliases are provided solely for transitional compatibility.
// They are temporary and will be removed when appropriate.
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v16i8 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v8i16 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v4i32 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v2i64 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v16u8 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v8u16 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v4u32 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v2u64 = m128i;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v4f32 = m128;
#[allow(non_camel_case_types)]
#[unstable(feature = "stdarch_loongarch", issue = "117427")]
pub type v2f64 = m128d;

View file

@ -5831,24 +5831,30 @@ mod tests {
use crate::core_arch::simd::*;
use stdarch_test::simd_test;
impl<const N: usize> ShuffleMask<N> {
fn as_array(&self) -> &[u32; N] {
unsafe { std::mem::transmute(self) }
}
}
#[test]
fn reverse_mask() {
assert_eq!(ShuffleMask::<4>::reverse().0, [3, 2, 1, 0]);
assert_eq!(ShuffleMask::<4>::reverse().as_array(), &[3, 2, 1, 0]);
}
#[test]
fn mergel_mask() {
assert_eq!(ShuffleMask::<4>::merge_low().0, [2, 6, 3, 7]);
assert_eq!(ShuffleMask::<4>::merge_low().as_array(), &[2, 6, 3, 7]);
}
#[test]
fn mergeh_mask() {
assert_eq!(ShuffleMask::<4>::merge_high().0, [0, 4, 1, 5]);
assert_eq!(ShuffleMask::<4>::merge_high().as_array(), &[0, 4, 1, 5]);
}
#[test]
fn pack_mask() {
assert_eq!(ShuffleMask::<4>::pack().0, [1, 3, 5, 7]);
assert_eq!(ShuffleMask::<4>::pack().as_array(), &[1, 3, 5, 7]);
}
#[test]

View file

@ -1272,7 +1272,7 @@ pub unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i {
}
/// Conditionally store 8-bit integer elements from `a` into memory using
/// `mask`.
/// `mask` flagged as non-temporal (unlikely to be used again soon).
///
/// Elements are not stored when the highest bit is not set in the
/// corresponding element.
@ -1281,6 +1281,15 @@ pub unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i {
/// to be aligned on any particular boundary.
///
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmoveu_si128)
///
/// # Safety of non-temporal stores
///
/// After using this intrinsic, but before any other access to the memory that this intrinsic
/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
/// return.
///
/// See [`_mm_sfence`] for details.
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(maskmovdqu))]

View file

@ -11,12 +11,9 @@ license = "MIT OR Apache-2.0"
edition = "2024"
[dependencies]
lazy_static = "1.4.0"
serde = { version = "1", features = ["derive"] }
serde_json = "1.0"
csv = "1.1"
clap = { version = "4.4", features = ["derive"] }
regex = "1.4.2"
log = "0.4.11"
pretty_env_logger = "0.5.0"
rayon = "1.5.0"

View file

@ -0,0 +1,15 @@
use crate::arm::intrinsic::ArmIntrinsicType;
use crate::common::argument::Argument;
// This functionality is present due to the nature
// of how intrinsics are defined in the JSON source
// of ARM intrinsics.
impl Argument<ArmIntrinsicType> {
pub fn type_and_name_from_c(arg: &str) -> (&str, &str) {
let split_index = arg
.rfind([' ', '*'])
.expect("Couldn't split type and argname");
(arg[..split_index + 1].trim_end(), &arg[split_index + 1..])
}
}

View file

@ -6,16 +6,16 @@ pub fn build_cpp_compilation(config: &ProcessedCli) -> Option<CppCompilation> {
// -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations
let mut command = CompilationCommandBuilder::new()
.add_arch_flags(vec!["armv8.6-a", "crypto", "crc", "dotprod", "fp16"])
.add_arch_flags(["armv8.6-a", "crypto", "crc", "dotprod", "fp16"])
.set_compiler(cpp_compiler)
.set_target(&config.target)
.set_opt_level("2")
.set_cxx_toolchain_dir(config.cxx_toolchain_dir.as_deref())
.set_project_root("c_programs")
.add_extra_flags(vec!["-ffp-contract=off", "-Wno-narrowing"]);
.add_extra_flags(["-ffp-contract=off", "-Wno-narrowing"]);
if !config.target.contains("v7") {
command = command.add_arch_flags(vec!["faminmax", "lut", "sha3"]);
command = command.add_arch_flags(["faminmax", "lut", "sha3"]);
}
if !cpp_compiler.contains("clang") {

View file

@ -5,19 +5,22 @@ use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, S
use std::ops::{Deref, DerefMut};
#[derive(Debug, Clone, PartialEq)]
pub struct ArmIntrinsicType(pub IntrinsicType);
pub struct ArmIntrinsicType {
pub data: IntrinsicType,
pub target: String,
}
impl Deref for ArmIntrinsicType {
type Target = IntrinsicType;
fn deref(&self) -> &Self::Target {
&self.0
&self.data
}
}
impl DerefMut for ArmIntrinsicType {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
&mut self.data
}
}

View file

@ -2,7 +2,7 @@ use super::intrinsic::ArmIntrinsicType;
use crate::common::argument::{Argument, ArgumentList};
use crate::common::constraint::Constraint;
use crate::common::intrinsic::Intrinsic;
use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition};
use crate::common::intrinsic_helpers::IntrinsicType;
use serde::Deserialize;
use serde_json::Value;
use std::collections::HashMap;
@ -86,18 +86,21 @@ fn json_to_intrinsic(
.into_iter()
.enumerate()
.map(|(i, arg)| {
let arg_name = Argument::<ArmIntrinsicType>::type_and_name_from_c(&arg).1;
let (type_name, arg_name) = Argument::<ArmIntrinsicType>::type_and_name_from_c(&arg);
let metadata = intr.args_prep.as_mut();
let metadata = metadata.and_then(|a| a.remove(arg_name));
let arg_prep: Option<ArgPrep> = metadata.and_then(|a| a.try_into().ok());
let constraint: Option<Constraint> = arg_prep.and_then(|a| a.try_into().ok());
let ty = ArmIntrinsicType::from_c(type_name, target)
.unwrap_or_else(|_| panic!("Failed to parse argument '{arg}'"));
let mut arg = Argument::<ArmIntrinsicType>::from_c(i, &arg, target, constraint);
let mut arg =
Argument::<ArmIntrinsicType>::new(i, String::from(arg_name), ty, constraint);
// The JSON doesn't list immediates as const
let IntrinsicType {
ref mut constant, ..
} = arg.ty.0;
} = arg.ty.data;
if arg.name.starts_with("imm") {
*constant = true
}

View file

@ -1,23 +1,24 @@
mod argument;
mod compile;
mod config;
mod intrinsic;
mod json_parser;
mod types;
use std::fs::File;
use std::fs::{self, File};
use rayon::prelude::*;
use crate::arm::config::POLY128_OSTREAM_DEF;
use crate::common::SupportedArchitectureTest;
use crate::common::cli::ProcessedCli;
use crate::common::compare::compare_outputs;
use crate::common::gen_c::{write_main_cpp, write_mod_cpp};
use crate::common::gen_rust::compile_rust_programs;
use crate::common::intrinsic::{Intrinsic, IntrinsicDefinition};
use crate::common::gen_rust::{
compile_rust_programs, write_bin_cargo_toml, write_lib_cargo_toml, write_lib_rs, write_main_rs,
};
use crate::common::intrinsic::Intrinsic;
use crate::common::intrinsic_helpers::TypeKind;
use crate::common::write_file::write_rust_testfiles;
use config::{AARCH_CONFIGURATIONS, F16_FORMATTING_DEF, build_notices};
use crate::common::{SupportedArchitectureTest, chunk_info};
use config::{AARCH_CONFIGURATIONS, F16_FORMATTING_DEF, POLY128_OSTREAM_DEF, build_notices};
use intrinsic::ArmIntrinsicType;
use json_parser::get_neon_intrinsics;
@ -26,13 +27,6 @@ pub struct ArmArchitectureTest {
cli_options: ProcessedCli,
}
fn chunk_info(intrinsic_count: usize) -> (usize, usize) {
let available_parallelism = std::thread::available_parallelism().unwrap().get();
let chunk_size = intrinsic_count.div_ceil(Ord::min(available_parallelism, intrinsic_count));
(chunk_size, intrinsic_count.div_ceil(chunk_size))
}
impl SupportedArchitectureTest for ArmArchitectureTest {
fn create(cli_options: ProcessedCli) -> Box<Self> {
let a32 = cli_options.target.contains("v7");
@ -68,9 +62,10 @@ impl SupportedArchitectureTest for ArmArchitectureTest {
let (chunk_size, chunk_count) = chunk_info(self.intrinsics.len());
let cpp_compiler = compile::build_cpp_compilation(&self.cli_options).unwrap();
let cpp_compiler_wrapped = compile::build_cpp_compilation(&self.cli_options);
let notice = &build_notices("// ");
fs::create_dir_all("c_programs").unwrap();
self.intrinsics
.par_chunks(chunk_size)
.enumerate()
@ -79,10 +74,15 @@ impl SupportedArchitectureTest for ArmArchitectureTest {
let mut file = File::create(&c_filename).unwrap();
write_mod_cpp(&mut file, notice, c_target, platform_headers, chunk).unwrap();
// compile this cpp file into a .o file
let output = cpp_compiler
.compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o"))?;
assert!(output.status.success(), "{output:?}");
// compile this cpp file into a .o file.
//
// This is done because `cpp_compiler_wrapped` is None when
// the --generate-only flag is passed
if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() {
let output = cpp_compiler
.compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o"))?;
assert!(output.status.success(), "{output:?}");
}
Ok(())
})
@ -98,46 +98,84 @@ impl SupportedArchitectureTest for ArmArchitectureTest {
)
.unwrap();
// compile this cpp file into a .o file
info!("compiling main.cpp");
let output = cpp_compiler
.compile_object_file("main.cpp", "intrinsic-test-programs.o")
.unwrap();
assert!(output.status.success(), "{output:?}");
// This is done because `cpp_compiler_wrapped` is None when
// the --generate-only flag is passed
if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() {
// compile this cpp file into a .o file
info!("compiling main.cpp");
let output = cpp_compiler
.compile_object_file("main.cpp", "intrinsic-test-programs.o")
.unwrap();
assert!(output.status.success(), "{output:?}");
let object_files = (0..chunk_count)
.map(|i| format!("mod_{i}.o"))
.chain(["intrinsic-test-programs.o".to_owned()]);
let object_files = (0..chunk_count)
.map(|i| format!("mod_{i}.o"))
.chain(["intrinsic-test-programs.o".to_owned()]);
let output = cpp_compiler
.link_executable(object_files, "intrinsic-test-programs")
.unwrap();
assert!(output.status.success(), "{output:?}");
let output = cpp_compiler
.link_executable(object_files, "intrinsic-test-programs")
.unwrap();
assert!(output.status.success(), "{output:?}");
}
true
}
fn build_rust_file(&self) -> bool {
let rust_target = if self.cli_options.target.contains("v7") {
std::fs::create_dir_all("rust_programs/src").unwrap();
let architecture = if self.cli_options.target.contains("v7") {
"arm"
} else {
"aarch64"
};
let (chunk_size, chunk_count) = chunk_info(self.intrinsics.len());
let mut cargo = File::create("rust_programs/Cargo.toml").unwrap();
write_bin_cargo_toml(&mut cargo, chunk_count).unwrap();
let mut main_rs = File::create("rust_programs/src/main.rs").unwrap();
write_main_rs(
&mut main_rs,
chunk_count,
AARCH_CONFIGURATIONS,
"",
self.intrinsics.iter().map(|i| i.name.as_str()),
)
.unwrap();
let target = &self.cli_options.target;
let toolchain = self.cli_options.toolchain.as_deref();
let linker = self.cli_options.linker.as_deref();
let intrinsics_name_list = write_rust_testfiles(
self.intrinsics
.iter()
.map(|i| i as &dyn IntrinsicDefinition<_>)
.collect::<Vec<_>>(),
rust_target,
&build_notices("// "),
F16_FORMATTING_DEF,
AARCH_CONFIGURATIONS,
);
compile_rust_programs(intrinsics_name_list, toolchain, target, linker)
let notice = &build_notices("// ");
self.intrinsics
.par_chunks(chunk_size)
.enumerate()
.map(|(i, chunk)| {
std::fs::create_dir_all(format!("rust_programs/mod_{i}/src"))?;
let rust_filename = format!("rust_programs/mod_{i}/src/lib.rs");
trace!("generating `{rust_filename}`");
let mut file = File::create(rust_filename)?;
let cfg = AARCH_CONFIGURATIONS;
let definitions = F16_FORMATTING_DEF;
write_lib_rs(&mut file, architecture, notice, cfg, definitions, chunk)?;
let toml_filename = format!("rust_programs/mod_{i}/Cargo.toml");
trace!("generating `{toml_filename}`");
let mut file = File::create(toml_filename).unwrap();
write_lib_cargo_toml(&mut file, &format!("mod_{i}"))?;
Ok(())
})
.collect::<Result<(), std::io::Error>>()
.unwrap();
compile_rust_programs(toolchain, target, linker)
}
fn compare_outputs(&self) -> bool {

View file

@ -5,12 +5,10 @@ use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, S
impl IntrinsicTypeDefinition for ArmIntrinsicType {
/// Gets a string containing the typename for this type in C format.
fn c_type(&self) -> String {
let prefix = self.0.kind.c_prefix();
let const_prefix = if self.0.constant { "const " } else { "" };
let prefix = self.kind.c_prefix();
let const_prefix = if self.constant { "const " } else { "" };
if let (Some(bit_len), simd_len, vec_len) =
(self.0.bit_len, self.0.simd_len, self.0.vec_len)
{
if let (Some(bit_len), simd_len, vec_len) = (self.bit_len, self.simd_len, self.vec_len) {
match (simd_len, vec_len) {
(None, None) => format!("{const_prefix}{prefix}{bit_len}_t"),
(Some(simd), None) => format!("{prefix}{bit_len}x{simd}_t"),
@ -23,35 +21,16 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
}
fn c_single_vector_type(&self) -> String {
if let (Some(bit_len), Some(simd_len)) = (self.0.bit_len, self.0.simd_len) {
if let (Some(bit_len), Some(simd_len)) = (self.bit_len, self.simd_len) {
format!(
"{prefix}{bit_len}x{simd_len}_t",
prefix = self.0.kind.c_prefix()
prefix = self.kind.c_prefix()
)
} else {
unreachable!("Shouldn't be called on this type")
}
}
fn rust_type(&self) -> String {
let rust_prefix = self.0.kind.rust_prefix();
let c_prefix = self.0.kind.c_prefix();
if self.0.ptr_constant {
self.c_type()
} else if let (Some(bit_len), simd_len, vec_len) =
(self.0.bit_len, self.0.simd_len, self.0.vec_len)
{
match (simd_len, vec_len) {
(None, None) => format!("{rust_prefix}{bit_len}"),
(Some(simd), None) => format!("{c_prefix}{bit_len}x{simd}_t"),
(Some(simd), Some(vec)) => format!("{c_prefix}{bit_len}x{simd}x{vec}_t"),
(None, Some(_)) => todo!("{:#?}", self), // Likely an invalid case
}
} else {
todo!("{:#?}", self)
}
}
/// Determines the load function for this type.
fn get_load_function(&self, language: Language) -> String {
if let IntrinsicType {
@ -59,9 +38,8 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
bit_len: Some(bl),
simd_len,
vec_len,
target,
..
} = &self.0
} = &self.data
{
let quad = if simd_len.unwrap_or(1) * bl > 64 {
"q"
@ -69,7 +47,7 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
""
};
let choose_workaround = language == Language::C && target.contains("v7");
let choose_workaround = language == Language::C && self.target.contains("v7");
format!(
"vld{len}{quad}_{type}{size}",
type = match k {
@ -97,7 +75,7 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
bit_len: Some(bl),
simd_len,
..
} = &self.0
} = &self.data
{
let quad = if (simd_len.unwrap_or(1) * bl) > 64 {
"q"
@ -120,8 +98,10 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
todo!("get_lane_function IntrinsicType: {:#?}", self)
}
}
}
fn from_c(s: &str, target: &str) -> Result<Self, String> {
impl ArmIntrinsicType {
pub fn from_c(s: &str, target: &str) -> Result<Self, String> {
const CONST_STR: &str = "const";
if let Some(s) = s.strip_suffix('*') {
let (s, constant) = match s.trim().strip_suffix(CONST_STR) {
@ -162,32 +142,36 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
),
None => None,
};
Ok(ArmIntrinsicType(IntrinsicType {
ptr: false,
ptr_constant: false,
constant,
kind: arg_kind,
bit_len: Some(bit_len),
simd_len,
vec_len,
Ok(ArmIntrinsicType {
data: IntrinsicType {
ptr: false,
ptr_constant: false,
constant,
kind: arg_kind,
bit_len: Some(bit_len),
simd_len,
vec_len,
},
target: target.to_string(),
}))
})
} else {
let kind = start.parse::<TypeKind>()?;
let bit_len = match kind {
TypeKind::Int(_) => Some(32),
_ => None,
};
Ok(ArmIntrinsicType(IntrinsicType {
ptr: false,
ptr_constant: false,
constant,
kind: start.parse::<TypeKind>()?,
bit_len,
simd_len: None,
vec_len: None,
Ok(ArmIntrinsicType {
data: IntrinsicType {
ptr: false,
ptr_constant: false,
constant,
kind: start.parse::<TypeKind>()?,
bit_len,
simd_len: None,
vec_len: None,
},
target: target.to_string(),
}))
})
}
}
}

View file

@ -20,6 +20,15 @@ impl<T> Argument<T>
where
T: IntrinsicTypeDefinition,
{
pub fn new(pos: usize, name: String, ty: T, constraint: Option<Constraint>) -> Self {
Argument {
pos,
name,
ty,
constraint,
}
}
pub fn to_c_type(&self) -> String {
self.ty.c_type()
}
@ -36,14 +45,6 @@ where
self.constraint.is_some()
}
pub fn type_and_name_from_c(arg: &str) -> (&str, &str) {
let split_index = arg
.rfind([' ', '*'])
.expect("Couldn't split type and argname");
(arg[..split_index + 1].trim_end(), &arg[split_index + 1..])
}
/// The binding keyword (e.g. "const" or "let") for the array of possible test inputs.
fn rust_vals_array_binding(&self) -> impl std::fmt::Display {
if self.ty.is_rust_vals_array_const() {
@ -62,25 +63,6 @@ where
}
}
pub fn from_c(
pos: usize,
arg: &str,
target: &str,
constraint: Option<Constraint>,
) -> Argument<T> {
let (ty, var_name) = Self::type_and_name_from_c(arg);
let ty =
T::from_c(ty, target).unwrap_or_else(|_| panic!("Failed to parse argument '{arg}'"));
Argument {
pos,
name: String::from(var_name),
ty: ty,
constraint,
}
}
fn as_call_param_c(&self) -> String {
self.ty.as_call_param_c(&self.name)
}
@ -114,14 +96,6 @@ where
.join(", ")
}
pub fn as_constraint_parameters_rust(&self) -> String {
self.iter()
.filter(|a| a.has_constraint())
.map(|arg| arg.name.clone())
.collect::<Vec<String>>()
.join(", ")
}
/// Creates a line for each argument that initializes an array for C from which `loads` argument
/// values can be loaded as a sliding window.
/// e.g `const int32x2_t a_vals = {0x3effffff, 0x3effffff, 0x3f7fffff}`, if loads=2.
@ -146,21 +120,25 @@ where
/// Creates a line for each argument that initializes an array for Rust from which `loads` argument
/// values can be loaded as a sliding window, e.g `const A_VALS: [u32; 20] = [...];`
pub fn gen_arglists_rust(&self, indentation: Indentation, loads: u32) -> String {
self.iter()
.filter(|&arg| !arg.has_constraint())
.map(|arg| {
format!(
"{indentation}{bind} {name}: [{ty}; {load_size}] = {values};",
bind = arg.rust_vals_array_binding(),
name = arg.rust_vals_array_name(),
ty = arg.ty.rust_scalar_type(),
load_size = arg.ty.num_lanes() * arg.ty.num_vectors() + loads - 1,
values = arg.ty.populate_random(indentation, loads, &Language::Rust)
)
})
.collect::<Vec<_>>()
.join("\n")
pub fn gen_arglists_rust(
&self,
w: &mut impl std::io::Write,
indentation: Indentation,
loads: u32,
) -> std::io::Result<()> {
for arg in self.iter().filter(|&arg| !arg.has_constraint()) {
writeln!(
w,
"{indentation}{bind} {name}: [{ty}; {load_size}] = {values};",
bind = arg.rust_vals_array_binding(),
name = arg.rust_vals_array_name(),
ty = arg.ty.rust_scalar_type(),
load_size = arg.ty.num_lanes() * arg.ty.num_vectors() + loads - 1,
values = arg.ty.populate_random(indentation, loads, &Language::Rust)
)?
}
Ok(())
}
/// Creates a line for each argument that initializes the argument from an array `[arg]_vals` at

View file

@ -2,25 +2,29 @@ use super::cli::FailureReason;
use rayon::prelude::*;
use std::process::Command;
fn runner_command(runner: &str) -> Command {
let mut it = runner.split_whitespace();
let mut cmd = Command::new(it.next().unwrap());
cmd.args(it);
cmd
}
pub fn compare_outputs(intrinsic_name_list: &Vec<String>, runner: &str, target: &str) -> bool {
fn runner_command(runner: &str) -> Command {
let mut it = runner.split_whitespace();
let mut cmd = Command::new(it.next().unwrap());
cmd.args(it);
cmd
}
let intrinsics = intrinsic_name_list
.par_iter()
.filter_map(|intrinsic_name| {
let c = runner_command(runner)
.arg("./c_programs/intrinsic-test-programs")
.arg("intrinsic-test-programs")
.arg(intrinsic_name)
.current_dir("c_programs")
.output();
let rust = runner_command(runner)
.arg(format!("target/{target}/release/{intrinsic_name}"))
.arg(format!("target/{target}/release/intrinsic-test-programs"))
.arg(intrinsic_name)
.current_dir("rust_programs")
.output();
let (c, rust) = match (c, rust) {
@ -30,7 +34,7 @@ pub fn compare_outputs(intrinsic_name_list: &Vec<String>, runner: &str, target:
if !c.status.success() {
error!(
"Failed to run C program for intrinsic {intrinsic_name}\nstdout: {stdout}\nstderr: {stderr}",
"Failed to run C program for intrinsic `{intrinsic_name}`\nstdout: {stdout}\nstderr: {stderr}",
stdout = std::str::from_utf8(&c.stdout).unwrap_or(""),
stderr = std::str::from_utf8(&c.stderr).unwrap_or(""),
);
@ -39,9 +43,9 @@ pub fn compare_outputs(intrinsic_name_list: &Vec<String>, runner: &str, target:
if !rust.status.success() {
error!(
"Failed to run Rust program for intrinsic {intrinsic_name}\nstdout: {stdout}\nstderr: {stderr}",
stdout = String::from_utf8_lossy(&rust.stdout),
stderr = String::from_utf8_lossy(&rust.stderr),
"Failed to run Rust program for intrinsic `{intrinsic_name}`\nstdout: {stdout}\nstderr: {stderr}",
stdout = std::str::from_utf8(&rust.stdout).unwrap_or(""),
stderr = std::str::from_utf8(&rust.stderr).unwrap_or(""),
);
return Some(FailureReason::RunRust(intrinsic_name.clone()));
}

View file

@ -37,9 +37,9 @@ impl CompilationCommandBuilder {
self
}
pub fn add_arch_flags(mut self, flags: Vec<&str>) -> Self {
let mut new_arch_flags = flags.into_iter().map(|v| v.to_string()).collect();
self.arch_flags.append(&mut new_arch_flags);
pub fn add_arch_flags<'a>(mut self, flags: impl IntoIterator<Item = &'a str>) -> Self {
self.arch_flags
.extend(flags.into_iter().map(|s| s.to_owned()));
self
}
@ -55,14 +55,15 @@ impl CompilationCommandBuilder {
self
}
pub fn add_extra_flags(mut self, flags: Vec<&str>) -> Self {
let mut flags: Vec<String> = flags.into_iter().map(|f| f.to_string()).collect();
self.extra_flags.append(&mut flags);
pub fn add_extra_flags<'a>(mut self, flags: impl IntoIterator<Item = &'a str>) -> Self {
self.extra_flags
.extend(flags.into_iter().map(|s| s.to_owned()));
self
}
pub fn add_extra_flag(self, flag: &str) -> Self {
self.add_extra_flags(vec![flag])
self.add_extra_flags([flag])
}
}

View file

@ -1,17 +1,24 @@
use serde::Deserialize;
use std::ops::Range;
/// Describes the values to test for a const generic parameter.
#[derive(Debug, PartialEq, Clone, Deserialize)]
pub enum Constraint {
/// Test a single value.
Equal(i64),
/// Test a range of values, e.g. `0..16`.
Range(Range<i64>),
/// Test discrete values, e.g. `vec![1, 2, 4, 8]`.
Set(Vec<i64>),
}
impl Constraint {
pub fn to_range(&self) -> Range<i64> {
/// Iterate over the values of this constraint.
pub fn iter<'a>(&'a self) -> impl Iterator<Item = i64> + 'a {
match self {
Constraint::Equal(eq) => *eq..*eq + 1,
Constraint::Range(range) => range.clone(),
Constraint::Equal(i) => std::slice::Iter::default().copied().chain(*i..*i + 1),
Constraint::Range(range) => std::slice::Iter::default().copied().chain(range.clone()),
Constraint::Set(items) => items.iter().copied().chain(std::ops::Range::default()),
}
}
}

View file

@ -40,7 +40,7 @@ pub fn generate_c_constraint_blocks<'a, T: IntrinsicTypeDefinition + 'a>(
};
let body_indentation = indentation.nested();
for i in current.constraint.iter().flat_map(|c| c.to_range()) {
for i in current.constraint.iter().flat_map(|c| c.iter()) {
let ty = current.ty.c_type();
writeln!(w, "{indentation}{{")?;

View file

@ -1,10 +1,6 @@
use itertools::Itertools;
use rayon::prelude::*;
use std::collections::BTreeMap;
use std::fs::File;
use std::process::Command;
use super::argument::Argument;
use super::indentation::Indentation;
use super::intrinsic::{IntrinsicDefinition, format_f16_return_value};
use super::intrinsic_helpers::IntrinsicTypeDefinition;
@ -12,86 +8,144 @@ use super::intrinsic_helpers::IntrinsicTypeDefinition;
// The number of times each intrinsic will be called.
const PASSES: u32 = 20;
pub fn format_rust_main_template(
notices: &str,
definitions: &str,
configurations: &str,
arch_definition: &str,
arglists: &str,
passes: &str,
) -> String {
format!(
r#"{notices}#![feature(simd_ffi)]
#![feature(f16)]
#![allow(unused)]
{configurations}
{definitions}
use core_arch::arch::{arch_definition}::*;
fn main() {{
{arglists}
{passes}
}}
"#,
)
}
fn write_cargo_toml(w: &mut impl std::io::Write, binaries: &[String]) -> std::io::Result<()> {
fn write_cargo_toml_header(w: &mut impl std::io::Write, name: &str) -> std::io::Result<()> {
writeln!(
w,
concat!(
"[package]\n",
"name = \"intrinsic-test-programs\"\n",
"name = \"{name}\"\n",
"version = \"{version}\"\n",
"authors = [{authors}]\n",
"license = \"{license}\"\n",
"edition = \"2018\"\n",
"[workspace]\n",
"[dependencies]\n",
"core_arch = {{ path = \"../crates/core_arch\" }}",
),
name = name,
version = env!("CARGO_PKG_VERSION"),
authors = env!("CARGO_PKG_AUTHORS")
.split(":")
.format_with(", ", |author, fmt| fmt(&format_args!("\"{author}\""))),
license = env!("CARGO_PKG_LICENSE"),
)?;
)
}
for binary in binaries {
writeln!(
w,
concat!(
"[[bin]]\n",
"name = \"{binary}\"\n",
"path = \"{binary}/main.rs\"\n",
),
binary = binary,
)?;
pub fn write_bin_cargo_toml(
w: &mut impl std::io::Write,
module_count: usize,
) -> std::io::Result<()> {
write_cargo_toml_header(w, "intrinsic-test-programs")?;
writeln!(w, "[dependencies]")?;
for i in 0..module_count {
writeln!(w, "mod_{i} = {{ path = \"mod_{i}/\" }}")?;
}
Ok(())
}
pub fn compile_rust_programs(
binaries: Vec<String>,
toolchain: Option<&str>,
target: &str,
linker: Option<&str>,
) -> bool {
let mut cargo = File::create("rust_programs/Cargo.toml").unwrap();
write_cargo_toml(&mut cargo, &binaries).unwrap();
pub fn write_lib_cargo_toml(w: &mut impl std::io::Write, name: &str) -> std::io::Result<()> {
write_cargo_toml_header(w, name)?;
writeln!(w, "[dependencies]")?;
writeln!(w, "core_arch = {{ path = \"../../crates/core_arch\" }}")?;
Ok(())
}
pub fn write_main_rs<'a>(
w: &mut impl std::io::Write,
chunk_count: usize,
cfg: &str,
definitions: &str,
intrinsics: impl Iterator<Item = &'a str> + Clone,
) -> std::io::Result<()> {
writeln!(w, "#![feature(simd_ffi)]")?;
writeln!(w, "#![feature(f16)]")?;
writeln!(w, "#![allow(unused)]")?;
// Cargo will spam the logs if these warnings are not silenced.
writeln!(w, "#![allow(non_upper_case_globals)]")?;
writeln!(w, "#![allow(non_camel_case_types)]")?;
writeln!(w, "#![allow(non_snake_case)]")?;
writeln!(w, "{cfg}")?;
writeln!(w, "{definitions}")?;
for module in 0..chunk_count {
writeln!(w, "use mod_{module}::*;")?;
}
writeln!(w, "fn main() {{")?;
writeln!(w, " match std::env::args().nth(1).unwrap().as_str() {{")?;
for binary in intrinsics {
writeln!(w, " \"{binary}\" => run_{binary}(),")?;
}
writeln!(
w,
" other => panic!(\"unknown intrinsic `{{}}`\", other),"
)?;
writeln!(w, " }}")?;
writeln!(w, "}}")?;
Ok(())
}
pub fn write_lib_rs<T: IntrinsicTypeDefinition>(
w: &mut impl std::io::Write,
architecture: &str,
notice: &str,
cfg: &str,
definitions: &str,
intrinsics: &[impl IntrinsicDefinition<T>],
) -> std::io::Result<()> {
write!(w, "{notice}")?;
writeln!(w, "#![feature(simd_ffi)]")?;
writeln!(w, "#![feature(f16)]")?;
writeln!(w, "#![allow(unused)]")?;
// Cargo will spam the logs if these warnings are not silenced.
writeln!(w, "#![allow(non_upper_case_globals)]")?;
writeln!(w, "#![allow(non_camel_case_types)]")?;
writeln!(w, "#![allow(non_snake_case)]")?;
writeln!(w, "{cfg}")?;
writeln!(w, "use core_arch::arch::{architecture}::*;")?;
writeln!(w, "{definitions}")?;
for intrinsic in intrinsics {
crate::common::gen_rust::create_rust_test_module(w, intrinsic)?;
}
Ok(())
}
pub fn compile_rust_programs(toolchain: Option<&str>, target: &str, linker: Option<&str>) -> bool {
/* If there has been a linker explicitly set from the command line then
* we want to set it via setting it in the RUSTFLAGS*/
// This is done because `toolchain` is None when
// the --generate-only flag is passed
if toolchain.is_none() {
return true;
}
trace!("Building cargo command");
let mut cargo_command = Command::new("cargo");
cargo_command.current_dir("rust_programs");
if let Some(toolchain) = toolchain {
if !toolchain.is_empty() {
cargo_command.arg(toolchain);
}
// Do not use the target directory of the workspace please.
cargo_command.env("CARGO_TARGET_DIR", "target");
if toolchain.is_some_and(|val| !val.is_empty()) {
cargo_command.arg(toolchain.unwrap());
}
cargo_command.args(["build", "--target", target, "--release"]);
@ -105,7 +159,16 @@ pub fn compile_rust_programs(
}
cargo_command.env("RUSTFLAGS", rust_flags);
trace!("running cargo");
if log::log_enabled!(log::Level::Trace) {
cargo_command.stdout(std::process::Stdio::inherit());
cargo_command.stderr(std::process::Stdio::inherit());
}
let output = cargo_command.output();
trace!("cargo is done");
if let Ok(output) = output {
if output.status.success() {
@ -124,119 +187,117 @@ pub fn compile_rust_programs(
}
}
// Creates directory structure and file path mappings
pub fn setup_rust_file_paths(identifiers: &Vec<String>) -> BTreeMap<&String, String> {
identifiers
.par_iter()
.map(|identifier| {
let rust_dir = format!("rust_programs/{identifier}");
let _ = std::fs::create_dir_all(&rust_dir);
let rust_filename = format!("{rust_dir}/main.rs");
(identifier, rust_filename)
})
.collect::<BTreeMap<&String, String>>()
}
pub fn generate_rust_test_loop<T: IntrinsicTypeDefinition>(
w: &mut impl std::io::Write,
intrinsic: &dyn IntrinsicDefinition<T>,
indentation: Indentation,
additional: &str,
specializations: &[Vec<u8>],
passes: u32,
) -> String {
let constraints = intrinsic.arguments().as_constraint_parameters_rust();
let constraints = if !constraints.is_empty() {
format!("::<{constraints}>")
} else {
constraints
};
) -> std::io::Result<()> {
let intrinsic_name = intrinsic.name();
// Each function (and each specialization) has its own type. Erase that type with a cast.
let mut coerce = String::from("unsafe fn(");
for _ in intrinsic.arguments().iter().filter(|a| !a.has_constraint()) {
coerce += "_, ";
}
coerce += ") -> _";
match specializations {
[] => {
writeln!(w, " let specializations = [(\"\", {intrinsic_name})];")?;
}
[const_args] if const_args.is_empty() => {
writeln!(w, " let specializations = [(\"\", {intrinsic_name})];")?;
}
_ => {
writeln!(w, " let specializations = [")?;
for specialization in specializations {
let mut specialization: Vec<_> =
specialization.iter().map(|d| d.to_string()).collect();
let const_args = specialization.join(",");
// The identifier is reversed.
specialization.reverse();
let id = specialization.join("-");
writeln!(
w,
" (\"-{id}\", {intrinsic_name}::<{const_args}> as {coerce}),"
)?;
}
writeln!(w, " ];")?;
}
}
let return_value = format_f16_return_value(intrinsic);
let indentation2 = indentation.nested();
let indentation3 = indentation2.nested();
format!(
"{indentation}for i in 0..{passes} {{\n\
{indentation2}unsafe {{\n\
{loaded_args}\
{indentation3}let __return_value = {intrinsic_call}{const}({args});\n\
{indentation3}println!(\"Result {additional}-{{}}: {{:?}}\", i + 1, {return_value});\n\
{indentation2}}}\n\
{indentation}}}",
writeln!(
w,
"\
for (id, f) in specializations {{\n\
for i in 0..{passes} {{\n\
unsafe {{\n\
{loaded_args}\
let __return_value = f({args});\n\
println!(\"Result {{id}}-{{}}: {{:?}}\", i + 1, {return_value});\n\
}}\n\
}}\n\
}}",
loaded_args = intrinsic.arguments().load_values_rust(indentation3),
intrinsic_call = intrinsic.name(),
const = constraints,
args = intrinsic.arguments().as_call_param_rust(),
)
}
pub fn generate_rust_constraint_blocks<T: IntrinsicTypeDefinition>(
intrinsic: &dyn IntrinsicDefinition<T>,
indentation: Indentation,
constraints: &[&Argument<T>],
name: String,
) -> String {
if let Some((current, constraints)) = constraints.split_last() {
let range = current
.constraint
.iter()
.map(|c| c.to_range())
.flat_map(|r| r.into_iter());
/// Generate the specializations (unique sequences of const-generic arguments) for this intrinsic.
fn generate_rust_specializations<'a>(
constraints: &mut impl Iterator<Item = impl Iterator<Item = i64>>,
) -> Vec<Vec<u8>> {
let mut specializations = vec![vec![]];
let body_indentation = indentation.nested();
range
.map(|i| {
format!(
"{indentation}{{\n\
{body_indentation}const {name}: {ty} = {val};\n\
{pass}\n\
{indentation}}}",
name = current.name,
ty = current.ty.rust_type(),
val = i,
pass = generate_rust_constraint_blocks(
intrinsic,
body_indentation,
constraints,
format!("{name}-{i}")
)
)
for constraint in constraints {
specializations = constraint
.flat_map(|right| {
specializations.iter().map(move |left| {
let mut left = left.clone();
left.push(u8::try_from(right).unwrap());
left
})
})
.join("\n")
} else {
generate_rust_test_loop(intrinsic, indentation, &name, PASSES)
.collect();
}
specializations
}
// Top-level function to create complete test program
pub fn create_rust_test_program<T: IntrinsicTypeDefinition>(
pub fn create_rust_test_module<T: IntrinsicTypeDefinition>(
w: &mut impl std::io::Write,
intrinsic: &dyn IntrinsicDefinition<T>,
target: &str,
notice: &str,
definitions: &str,
cfg: &str,
) -> String {
let arguments = intrinsic.arguments();
let constraints = arguments
.iter()
.filter(|i| i.has_constraint())
.collect_vec();
) -> std::io::Result<()> {
trace!("generating `{}`", intrinsic.name());
let indentation = Indentation::default();
format_rust_main_template(
notice,
definitions,
cfg,
target,
intrinsic
.arguments()
.gen_arglists_rust(indentation.nested(), PASSES)
.as_str(),
generate_rust_constraint_blocks(
intrinsic,
indentation.nested(),
&constraints,
Default::default(),
)
.as_str(),
)
writeln!(w, "pub fn run_{}() {{", intrinsic.name())?;
// Define the arrays of arguments.
let arguments = intrinsic.arguments();
arguments.gen_arglists_rust(w, indentation.nested(), PASSES)?;
// Define any const generics as `const` items, then generate the actual test loop.
let specializations = generate_rust_specializations(
&mut arguments
.iter()
.filter_map(|i| i.constraint.as_ref().map(|v| v.iter())),
);
generate_rust_test_loop(w, intrinsic, indentation, &specializations, PASSES)?;
writeln!(w, "}}")?;
Ok(())
}

View file

@ -120,8 +120,6 @@ pub struct IntrinsicType {
/// rows encoded in the type (e.g. uint8x8_t).
/// A value of `None` can be assumed to be 1 though.
pub vec_len: Option<u32>,
pub target: String,
}
impl IntrinsicType {
@ -321,18 +319,10 @@ pub trait IntrinsicTypeDefinition: Deref<Target = IntrinsicType> {
/// can be implemented in an `impl` block
fn get_lane_function(&self) -> String;
/// can be implemented in an `impl` block
fn from_c(_s: &str, _target: &str) -> Result<Self, String>
where
Self: Sized;
/// Gets a string containing the typename for this type in C format.
/// can be directly defined in `impl` blocks
fn c_type(&self) -> String;
/// can be directly defined in `impl` blocks
fn c_single_vector_type(&self) -> String;
/// can be defined in `impl` blocks
fn rust_type(&self) -> String;
}

View file

@ -11,7 +11,6 @@ pub mod indentation;
pub mod intrinsic;
pub mod intrinsic_helpers;
pub mod values;
pub mod write_file;
/// Architectures must support this trait
/// to be successfully tested.
@ -23,3 +22,10 @@ pub trait SupportedArchitectureTest {
fn build_rust_file(&self) -> bool;
fn compare_outputs(&self) -> bool;
}
pub fn chunk_info(intrinsic_count: usize) -> (usize, usize) {
let available_parallelism = std::thread::available_parallelism().unwrap().get();
let chunk_size = intrinsic_count.div_ceil(Ord::min(available_parallelism, intrinsic_count));
(chunk_size, intrinsic_count.div_ceil(chunk_size))
}

View file

@ -1,33 +0,0 @@
use super::gen_rust::{create_rust_test_program, setup_rust_file_paths};
use super::intrinsic::IntrinsicDefinition;
use super::intrinsic_helpers::IntrinsicTypeDefinition;
use std::fs::File;
use std::io::Write;
pub fn write_file(filename: &String, code: String) {
let mut file = File::create(filename).unwrap();
file.write_all(code.into_bytes().as_slice()).unwrap();
}
pub fn write_rust_testfiles<T: IntrinsicTypeDefinition>(
intrinsics: Vec<&dyn IntrinsicDefinition<T>>,
rust_target: &str,
notice: &str,
definitions: &str,
cfg: &str,
) -> Vec<String> {
let intrinsics_name_list = intrinsics
.iter()
.map(|i| i.name().clone())
.collect::<Vec<_>>();
let filename_mapping = setup_rust_file_paths(&intrinsics_name_list);
intrinsics.iter().for_each(|&i| {
let rust_code = create_rust_test_program(i, rust_target, notice, definitions, cfg);
if let Some(filename) = filename_mapping.get(&i.name()) {
write_file(filename, rust_code)
}
});
intrinsics_name_list
}

View file

@ -17,6 +17,6 @@ proc-macro2 = "1.0"
quote = "1.0"
regex = "1.5"
serde = { version = "1.0", features = ["derive"] }
serde_with = "1.14"
serde_with = { version = "3.2.0", default-features = false, features = ["macros"] }
serde_yaml = "0.8"
walkdir = "2.3.2"

View file

@ -156,6 +156,7 @@ fn gen_bind(in_file: String, ext_name: &str) -> io::Result<()> {
// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- {in_file}
// ```
use crate::mem::transmute;
use super::types::*;
"#
));
@ -239,38 +240,63 @@ fn gen_bind_body(
para_num: i32,
target: TargetFeature,
) -> (String, String) {
let type_to_rst = |t: &str, s: bool| -> &str {
match (t, s) {
("V16QI", _) => "v16i8",
("V32QI", _) => "v32i8",
("V8HI", _) => "v8i16",
("V16HI", _) => "v16i16",
("V4SI", _) => "v4i32",
("V8SI", _) => "v8i32",
("V2DI", _) => "v2i64",
("V4DI", _) => "v4i64",
("UV16QI", _) => "v16u8",
("UV32QI", _) => "v32u8",
("UV8HI", _) => "v8u16",
("UV16HI", _) => "v16u16",
("UV4SI", _) => "v4u32",
("UV8SI", _) => "v8u32",
("UV2DI", _) => "v2u64",
("UV4DI", _) => "v4u64",
("SI", _) => "i32",
("DI", _) => "i64",
("USI", _) => "u32",
("UDI", _) => "u64",
("V4SF", _) => "v4f32",
("V8SF", _) => "v8f32",
("V2DF", _) => "v2f64",
("V4DF", _) => "v4f64",
("UQI", _) => "u32",
("QI", _) => "i32",
("CVPOINTER", false) => "*const i8",
("CVPOINTER", true) => "*mut i8",
("HI", _) => "i32",
(_, _) => panic!("unknown type: {t}"),
enum TypeKind {
Vector,
Intrinsic,
}
use TypeKind::*;
let type_to_rst = |t: &str, s: bool, k: TypeKind| -> &str {
match (t, s, k) {
("V16QI", _, Vector) => "__v16i8",
("V16QI", _, Intrinsic) => "m128i",
("V32QI", _, Vector) => "__v32i8",
("V32QI", _, Intrinsic) => "m256i",
("V8HI", _, Vector) => "__v8i16",
("V8HI", _, Intrinsic) => "m128i",
("V16HI", _, Vector) => "__v16i16",
("V16HI", _, Intrinsic) => "m256i",
("V4SI", _, Vector) => "__v4i32",
("V4SI", _, Intrinsic) => "m128i",
("V8SI", _, Vector) => "__v8i32",
("V8SI", _, Intrinsic) => "m256i",
("V2DI", _, Vector) => "__v2i64",
("V2DI", _, Intrinsic) => "m128i",
("V4DI", _, Vector) => "__v4i64",
("V4DI", _, Intrinsic) => "m256i",
("UV16QI", _, Vector) => "__v16u8",
("UV16QI", _, Intrinsic) => "m128i",
("UV32QI", _, Vector) => "__v32u8",
("UV32QI", _, Intrinsic) => "m256i",
("UV8HI", _, Vector) => "__v8u16",
("UV8HI", _, Intrinsic) => "m128i",
("UV16HI", _, Vector) => "__v16u16",
("UV16HI", _, Intrinsic) => "m256i",
("UV4SI", _, Vector) => "__v4u32",
("UV4SI", _, Intrinsic) => "m128i",
("UV8SI", _, Vector) => "__v8u32",
("UV8SI", _, Intrinsic) => "m256i",
("UV2DI", _, Vector) => "__v2u64",
("UV2DI", _, Intrinsic) => "m128i",
("UV4DI", _, Vector) => "__v4u64",
("UV4DI", _, Intrinsic) => "m256i",
("SI", _, _) => "i32",
("DI", _, _) => "i64",
("USI", _, _) => "u32",
("UDI", _, _) => "u64",
("V4SF", _, Vector) => "__v4f32",
("V4SF", _, Intrinsic) => "m128",
("V8SF", _, Vector) => "__v8f32",
("V8SF", _, Intrinsic) => "m256",
("V2DF", _, Vector) => "__v2f64",
("V2DF", _, Intrinsic) => "m128d",
("V4DF", _, Vector) => "__v4f64",
("V4DF", _, Intrinsic) => "m256d",
("UQI", _, _) => "u32",
("QI", _, _) => "i32",
("CVPOINTER", false, _) => "*const i8",
("CVPOINTER", true, _) => "*mut i8",
("HI", _, _) => "i32",
(_, _, _) => panic!("unknown type: {t}"),
}
};
@ -281,27 +307,27 @@ fn gen_bind_body(
let fn_output = if out_t.to_lowercase() == "void" {
String::new()
} else {
format!(" -> {}", type_to_rst(out_t, is_store))
format!(" -> {}", type_to_rst(out_t, is_store, Vector))
};
let fn_inputs = match para_num {
1 => format!("(a: {})", type_to_rst(in_t[0], is_store)),
1 => format!("(a: {})", type_to_rst(in_t[0], is_store, Vector)),
2 => format!(
"(a: {}, b: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store)
type_to_rst(in_t[0], is_store, Vector),
type_to_rst(in_t[1], is_store, Vector)
),
3 => format!(
"(a: {}, b: {}, c: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store)
type_to_rst(in_t[0], is_store, Vector),
type_to_rst(in_t[1], is_store, Vector),
type_to_rst(in_t[2], is_store, Vector)
),
4 => format!(
"(a: {}, b: {}, c: {}, d: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store),
type_to_rst(in_t[3], is_store)
type_to_rst(in_t[0], is_store, Vector),
type_to_rst(in_t[1], is_store, Vector),
type_to_rst(in_t[2], is_store, Vector),
type_to_rst(in_t[3], is_store, Vector)
),
_ => panic!("unsupported parameter number"),
};
@ -330,34 +356,40 @@ fn gen_bind_body(
let fn_output = if out_t.to_lowercase() == "void" {
String::new()
} else {
format!("-> {} ", type_to_rst(out_t, is_store))
format!("-> {} ", type_to_rst(out_t, is_store, Intrinsic))
};
let mut fn_inputs = match para_num {
1 => format!("(a: {})", type_to_rst(in_t[0], is_store)),
1 => format!("(a: {})", type_to_rst(in_t[0], is_store, Intrinsic)),
2 => format!(
"(a: {}, b: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store)
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic)
),
3 => format!(
"(a: {}, b: {}, c: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store)
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
type_to_rst(in_t[2], is_store, Intrinsic)
),
4 => format!(
"(a: {}, b: {}, c: {}, d: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store),
type_to_rst(in_t[3], is_store)
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
type_to_rst(in_t[2], is_store, Intrinsic),
type_to_rst(in_t[3], is_store, Intrinsic)
),
_ => panic!("unsupported parameter number"),
};
if para_num == 1 && in_t[0] == "HI" {
fn_inputs = match asm_fmts[1].as_str() {
"si13" | "i13" => format!("<const IMM_S13: {}>()", type_to_rst(in_t[0], is_store)),
"si10" => format!("<const IMM_S10: {}>()", type_to_rst(in_t[0], is_store)),
"si13" | "i13" => format!(
"<const IMM_S13: {}>()",
type_to_rst(in_t[0], is_store, Intrinsic)
),
"si10" => format!(
"<const IMM_S10: {}>()",
type_to_rst(in_t[0], is_store, Intrinsic)
),
_ => panic!("unsupported assembly format: {}", asm_fmts[1]),
};
rustc_legacy_const_generics = "rustc_legacy_const_generics(0)";
@ -365,8 +397,8 @@ fn gen_bind_body(
fn_inputs = if asm_fmts[2].starts_with("ui") {
format!(
"<const IMM{2}: {1}>(a: {0})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
asm_fmts[2].get(2..).unwrap()
)
} else {
@ -377,8 +409,8 @@ fn gen_bind_body(
fn_inputs = if asm_fmts[2].starts_with("si") {
format!(
"<const IMM_S{2}: {1}>(a: {0})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
asm_fmts[2].get(2..).unwrap()
)
} else {
@ -389,8 +421,8 @@ fn gen_bind_body(
fn_inputs = if asm_fmts[2].starts_with("si") {
format!(
"<const IMM_S{2}: {1}>(mem_addr: {0})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
asm_fmts[2].get(2..).unwrap()
)
} else {
@ -401,8 +433,8 @@ fn gen_bind_body(
fn_inputs = match asm_fmts[2].as_str() {
"rk" => format!(
"(mem_addr: {}, b: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store)
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic)
),
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
};
@ -410,9 +442,9 @@ fn gen_bind_body(
fn_inputs = if asm_fmts[2].starts_with("ui") {
format!(
"<const IMM{3}: {2}>(a: {0}, b: {1})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store),
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
type_to_rst(in_t[2], is_store, Intrinsic),
asm_fmts[2].get(2..).unwrap()
)
} else {
@ -423,9 +455,9 @@ fn gen_bind_body(
fn_inputs = match asm_fmts[2].as_str() {
"si12" => format!(
"<const IMM_S12: {2}>(a: {0}, mem_addr: {1})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store)
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
type_to_rst(in_t[2], is_store, Intrinsic)
),
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
};
@ -434,9 +466,9 @@ fn gen_bind_body(
fn_inputs = match asm_fmts[2].as_str() {
"rk" => format!(
"(a: {}, mem_addr: {}, b: {})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store)
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
type_to_rst(in_t[2], is_store, Intrinsic)
),
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
};
@ -444,10 +476,10 @@ fn gen_bind_body(
fn_inputs = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) {
("si8", t) => format!(
"<const IMM_S8: {2}, const IMM{4}: {3}>(a: {0}, mem_addr: {1})",
type_to_rst(in_t[0], is_store),
type_to_rst(in_t[1], is_store),
type_to_rst(in_t[2], is_store),
type_to_rst(in_t[3], is_store),
type_to_rst(in_t[0], is_store, Intrinsic),
type_to_rst(in_t[1], is_store, Intrinsic),
type_to_rst(in_t[2], is_store, Intrinsic),
type_to_rst(in_t[3], is_store, Intrinsic),
type_to_imm(t),
),
(_, _) => panic!(
@ -466,10 +498,16 @@ fn gen_bind_body(
let unsafe_end = if !is_mem { " }" } else { "" };
let mut call_params = {
match para_num {
1 => format!("{unsafe_start}__{current_name}(a){unsafe_end}"),
2 => format!("{unsafe_start}__{current_name}(a, b){unsafe_end}"),
3 => format!("{unsafe_start}__{current_name}(a, b, c){unsafe_end}"),
4 => format!("{unsafe_start}__{current_name}(a, b, c, d){unsafe_end}"),
1 => format!("{unsafe_start}transmute(__{current_name}(transmute(a))){unsafe_end}"),
2 => format!(
"{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b))){unsafe_end}"
),
3 => format!(
"{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), transmute(c))){unsafe_end}"
),
4 => format!(
"{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), transmute(c), transmute(d))){unsafe_end}"
),
_ => panic!("unsupported parameter number"),
}
};
@ -477,12 +515,12 @@ fn gen_bind_body(
call_params = match asm_fmts[1].as_str() {
"si10" => {
format!(
"static_assert_simm_bits!(IMM_S10, 10);\n {unsafe_start}__{current_name}(IMM_S10){unsafe_end}"
"static_assert_simm_bits!(IMM_S10, 10);\n {unsafe_start}transmute(__{current_name}(IMM_S10)){unsafe_end}"
)
}
"i13" => {
format!(
"static_assert_simm_bits!(IMM_S13, 13);\n {unsafe_start}__{current_name}(IMM_S13){unsafe_end}"
"static_assert_simm_bits!(IMM_S13, 13);\n {unsafe_start}transmute(__{current_name}(IMM_S13)){unsafe_end}"
)
}
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
@ -490,7 +528,7 @@ fn gen_bind_body(
} else if para_num == 2 && (in_t[1] == "UQI" || in_t[1] == "USI") {
call_params = if asm_fmts[2].starts_with("ui") {
format!(
"static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}__{current_name}(a, IMM{0}){unsafe_end}",
"static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}transmute(__{current_name}(transmute(a), IMM{0})){unsafe_end}",
asm_fmts[2].get(2..).unwrap()
)
} else {
@ -500,7 +538,7 @@ fn gen_bind_body(
call_params = match asm_fmts[2].as_str() {
"si5" => {
format!(
"static_assert_simm_bits!(IMM_S5, 5);\n {unsafe_start}__{current_name}(a, IMM_S5){unsafe_end}"
"static_assert_simm_bits!(IMM_S5, 5);\n {unsafe_start}transmute(__{current_name}(transmute(a), IMM_S5)){unsafe_end}"
)
}
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
@ -508,7 +546,7 @@ fn gen_bind_body(
} else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "SI" {
call_params = if asm_fmts[2].starts_with("si") {
format!(
"static_assert_simm_bits!(IMM_S{0}, {0});\n {unsafe_start}__{current_name}(mem_addr, IMM_S{0}){unsafe_end}",
"static_assert_simm_bits!(IMM_S{0}, {0});\n {unsafe_start}transmute(__{current_name}(mem_addr, IMM_S{0})){unsafe_end}",
asm_fmts[2].get(2..).unwrap()
)
} else {
@ -516,13 +554,15 @@ fn gen_bind_body(
}
} else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "DI" {
call_params = match asm_fmts[2].as_str() {
"rk" => format!("{unsafe_start}__{current_name}(mem_addr, b){unsafe_end}"),
"rk" => format!(
"{unsafe_start}transmute(__{current_name}(mem_addr, transmute(b))){unsafe_end}"
),
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
};
} else if para_num == 3 && (in_t[2] == "USI" || in_t[2] == "UQI") {
call_params = if asm_fmts[2].starts_with("ui") {
format!(
"static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}__{current_name}(a, b, IMM{0}){unsafe_end}",
"static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), IMM{0})){unsafe_end}",
asm_fmts[2].get(2..).unwrap()
)
} else {
@ -531,19 +571,21 @@ fn gen_bind_body(
} else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "SI" {
call_params = match asm_fmts[2].as_str() {
"si12" => format!(
"static_assert_simm_bits!(IMM_S12, 12);\n {unsafe_start}__{current_name}(a, mem_addr, IMM_S12){unsafe_end}"
"static_assert_simm_bits!(IMM_S12, 12);\n {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S12)){unsafe_end}"
),
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
};
} else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "DI" {
call_params = match asm_fmts[2].as_str() {
"rk" => format!("{unsafe_start}__{current_name}(a, mem_addr, b){unsafe_end}"),
"rk" => format!(
"{unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, transmute(b))){unsafe_end}"
),
_ => panic!("unsupported assembly format: {}", asm_fmts[2]),
};
} else if para_num == 4 {
call_params = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) {
("si8", t) => format!(
"static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}__{current_name}(a, mem_addr, IMM_S8, IMM{0}){unsafe_end}",
"static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S8, IMM{0})){unsafe_end}",
type_to_imm(t)
),
(_, _) => panic!(

View file

@ -563,11 +563,7 @@ fn search(pos: &Pos, alpha: i32, beta: i32, depth: i32, _ply: i32) -> i32 {
assert!(bs >= -EVAL_INF && bs <= EVAL_INF);
//best move at the root node, best score elsewhere
if _ply == 0 {
bm
} else {
bs
}
if _ply == 0 { bm } else { bs }
}
/// Evaluation function: give different scores to different patterns after a fixed depth.

View file

@ -1 +1 @@
040e2f8b9ff2d76fbe2146d6003e297ed4532088
32e7a4b92b109c24e9822c862a7c74436b50e564

Some files were not shown because too many files have changed in this diff Show more