Rollup merge of #149209 - lto_refactors8, r=jackh726

Move LTO to OngoingCodegen::join

This will make it easier to in the future move all this code to link_binary.

Follow up to https://github.com/rust-lang/rust/pull/147810
Part of https://github.com/rust-lang/compiler-team/issues/908
This commit is contained in:
Jacob Pratt 2026-01-21 02:04:01 -05:00 committed by GitHub
commit 2206d935f7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 326 additions and 179 deletions

View file

@ -26,11 +26,11 @@ use std::sync::atomic::Ordering;
use gccjit::{Context, OutputKind};
use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::memmap::Mmap;
use rustc_errors::DiagCtxtHandle;
use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_log::tracing::info;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
@ -112,10 +112,11 @@ fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext<GccCodegenBackend>,
shared_emitter: &SharedEmitter,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<GccCodegenBackend>>,
) -> ModuleCodegen<GccContext> {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
/*let symbols_below_threshold =
@ -283,14 +284,13 @@ impl ModuleBufferMethods for ModuleBuffer {
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext<GccCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
if cgcx.opts.cg.linker_plugin_lto.enabled() {
if cgcx.use_linker_plugin_lto {
unreachable!(
"We should never reach this case if the LTO step \
is deferred to the linker"
@ -522,8 +522,6 @@ pub fn optimize_thin_module(
thin_module: ThinModule<GccCodegenBackend>,
_cgcx: &CodegenContext<GccCodegenBackend>,
) -> ModuleCodegen<GccContext> {
//let dcx = cgcx.create_dcx();
//let module_name = &thin_module.shared.module_names[thin_module.idx];
/*let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;*/

View file

@ -2,8 +2,11 @@ use std::{env, fs};
use gccjit::{Context, OutputKind};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, ModuleConfig, SharedEmitter,
};
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
use rustc_errors::DiagCtxt;
use rustc_fs_util::link_or_copy;
use rustc_log::tracing::debug;
use rustc_session::config::OutputType;
@ -15,10 +18,11 @@ use crate::{GccCodegenBackend, GccContext, LtoMode};
pub(crate) fn codegen(
cgcx: &CodegenContext<GccCodegenBackend>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<GccContext>,
config: &ModuleConfig,
) -> CompiledModule {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);

View file

@ -84,7 +84,7 @@ use gccjit::{TargetInfo, Version};
use rustc_ast::expand::allocator::AllocatorMethod;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::base::codegen_crate;
use rustc_codegen_ssa::target_features::cfg_target_feature;
@ -435,23 +435,25 @@ impl WriteBackendMethods for GccCodegenBackend {
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
) -> ModuleCodegen<Self::Module> {
back::lto::run_fat(cgcx, each_linked_rlib_for_lto, modules)
back::lto::run_fat(cgcx, shared_emitter, each_linked_rlib_for_lto, modules)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) {
back::lto::run_thin(cgcx, each_linked_rlib_for_lto, modules, cached_modules)
back::lto::run_thin(cgcx, dcx, each_linked_rlib_for_lto, modules, cached_modules)
}
fn print_pass_timings(&self) {
@ -464,7 +466,7 @@ impl WriteBackendMethods for GccCodegenBackend {
fn optimize(
_cgcx: &CodegenContext<Self>,
_dcx: DiagCtxtHandle<'_>,
_shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) {
@ -473,6 +475,7 @@ impl WriteBackendMethods for GccCodegenBackend {
fn optimize_thin(
cgcx: &CodegenContext<Self>,
_shared_emitter: &SharedEmitter,
thin: ThinModule<Self>,
) -> ModuleCodegen<Self::Module> {
back::lto::optimize_thin_module(thin, cgcx)
@ -480,10 +483,11 @@ impl WriteBackendMethods for GccCodegenBackend {
fn codegen(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> CompiledModule {
back::write::codegen(cgcx, module, config)
back::write::codegen(cgcx, shared_emitter, module, config)
}
fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {

View file

@ -9,12 +9,12 @@ use std::{io, iter, slice};
use object::read::archive::ArchiveFile;
use object::{Object, ObjectSection};
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
use rustc_errors::DiagCtxtHandle;
use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_hir::attrs::SanitizerSet;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
@ -150,17 +150,18 @@ fn get_bitcode_slice_from_object_data<'a>(
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext<LlvmCodegenBackend>,
shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
) -> ModuleCodegen<ModuleLlvm> {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold)
fat_lto(cgcx, dcx, shared_emitter, modules, upstream_modules, &symbols_below_threshold)
}
/// Performs thin LTO by performing necessary global analysis and returning two
@ -168,18 +169,17 @@ pub(crate) fn run_fat(
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
if cgcx.opts.cg.linker_plugin_lto.enabled() {
if cgcx.use_linker_plugin_lto {
unreachable!(
"We should never reach this case if the LTO step \
is deferred to the linker"
@ -197,6 +197,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBu
fn fat_lto(
cgcx: &CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
symbols_below_threshold: &[*const libc::c_char],
@ -265,8 +266,13 @@ fn fat_lto(
// The linking steps below may produce errors and diagnostics within LLVM
// which we'd like to handle and print, so set up our diagnostic handlers
// (which get unregistered when they go out of scope below).
let _handler =
DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::LTO);
let _handler = DiagnosticHandlers::new(
cgcx,
shared_emitter,
llcx,
&module,
CodegenDiagnosticsStage::LTO,
);
// For all other modules we codegened we'll need to link them into our own
// bitcode. All modules were codegened in their own LLVM context, however,
@ -720,10 +726,11 @@ impl Drop for ThinBuffer {
}
pub(crate) fn optimize_thin_module(
thin_module: ThinModule<LlvmCodegenBackend>,
cgcx: &CodegenContext<LlvmCodegenBackend>,
shared_emitter: &SharedEmitter,
thin_module: ThinModule<LlvmCodegenBackend>,
) -> ModuleCodegen<ModuleLlvm> {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let module_name = &thin_module.shared.module_names[thin_module.idx];

View file

@ -9,7 +9,7 @@ use libc::{c_char, c_int, c_void, size_t};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::versioned_llvm_target;
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig,
BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig, SharedEmitter,
TargetMachineFactoryConfig, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::base::wants_wasm_eh;
@ -18,7 +18,7 @@ use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_errors::{DiagCtxtHandle, Level};
use rustc_errors::{DiagCtxt, DiagCtxtHandle, Level};
use rustc_fs_util::{link_or_copy, path_to_c_string};
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
@ -356,7 +356,7 @@ pub(crate) enum CodegenDiagnosticsStage {
}
pub(crate) struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, DiagCtxtHandle<'a>),
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a SharedEmitter),
llcx: &'a llvm::Context,
old_handler: Option<&'a llvm::DiagnosticHandler>,
}
@ -364,7 +364,7 @@ pub(crate) struct DiagnosticHandlers<'a> {
impl<'a> DiagnosticHandlers<'a> {
pub(crate) fn new(
cgcx: &'a CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'a>,
shared_emitter: &'a SharedEmitter,
llcx: &'a llvm::Context,
module: &ModuleCodegen<ModuleLlvm>,
stage: CodegenDiagnosticsStage,
@ -398,8 +398,8 @@ impl<'a> DiagnosticHandlers<'a> {
})
.and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
let pgo_available = cgcx.opts.cg.profile_use.is_some();
let data = Box::into_raw(Box::new((cgcx, dcx)));
let pgo_available = cgcx.module_config.pgo_use.is_some();
let data = Box::into_raw(Box::new((cgcx, shared_emitter)));
unsafe {
let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
llvm::LLVMRustContextConfigureDiagnosticHandler(
@ -461,12 +461,16 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
if user.is_null() {
return;
}
let (cgcx, dcx) =
unsafe { *(user as *const (&CodegenContext<LlvmCodegenBackend>, DiagCtxtHandle<'_>)) };
let (cgcx, shared_emitter) =
unsafe { *(user as *const (&CodegenContext<LlvmCodegenBackend>, &SharedEmitter)) };
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
match unsafe { llvm::diagnostic::Diagnostic::unpack(info) } {
llvm::diagnostic::InlineAsm(inline) => {
cgcx.diag_emitter.inline_asm_error(report_inline_asm(
// FIXME use dcx
shared_emitter.inline_asm_error(report_inline_asm(
cgcx,
inline.message,
inline.level,
@ -776,7 +780,7 @@ pub(crate) unsafe fn llvm_optimize(
&*module.module_llvm.tm.raw(),
to_pass_builder_opt_level(opt_level),
opt_stage,
cgcx.opts.cg.linker_plugin_lto.enabled(),
cgcx.use_linker_plugin_lto,
config.no_prepopulate_passes,
config.verify_llvm_ir,
config.lint_llvm_ir,
@ -887,14 +891,18 @@ pub(crate) unsafe fn llvm_optimize(
// Unsafe due to LLVM calls.
pub(crate) fn optimize(
cgcx: &CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) {
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let llcx = &*module.module_llvm.llcx;
let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt);
let _handlers =
DiagnosticHandlers::new(cgcx, shared_emitter, llcx, module, CodegenDiagnosticsStage::Opt);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext_for_cgu(
@ -911,7 +919,7 @@ pub(crate) fn optimize(
let opt_stage = match cgcx.lto {
Lto::Fat => llvm::OptStage::PreLinkFatLTO,
Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
_ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
_ if cgcx.use_linker_plugin_lto => llvm::OptStage::PreLinkThinLTO,
_ => llvm::OptStage::PreLinkNoLTO,
};
@ -974,19 +982,26 @@ pub(crate) fn optimize(
pub(crate) fn codegen(
cgcx: &CodegenContext<LlvmCodegenBackend>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> CompiledModule {
let dcx = cgcx.create_dcx();
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let _handlers =
DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::Codegen);
let _handlers = DiagnosticHandlers::new(
cgcx,
shared_emitter,
llcx,
&module,
CodegenDiagnosticsStage::Codegen,
);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);

View file

@ -30,12 +30,13 @@ use llvm_util::target_config;
use rustc_ast::expand::allocator::AllocatorMethod;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryConfig,
TargetMachineFactoryFn,
};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig};
use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::DiagCtxtHandle;
use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::TyCtxt;
@ -166,14 +167,20 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
) -> ModuleCodegen<Self::Module> {
let mut module =
back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules);
let mut module = back::lto::run_fat(
cgcx,
shared_emitter,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
modules,
);
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
back::lto::run_pass_manager(cgcx, dcx, &mut module, false);
@ -181,6 +188,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
@ -188,6 +196,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) {
back::lto::run_thin(
cgcx,
dcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
modules,
@ -196,24 +205,26 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn optimize(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) {
back::write::optimize(cgcx, dcx, module, config)
back::write::optimize(cgcx, shared_emitter, module, config)
}
fn optimize_thin(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
thin: ThinModule<Self>,
) -> ModuleCodegen<Self::Module> {
back::lto::optimize_thin_module(thin, cgcx)
back::lto::optimize_thin_module(cgcx, shared_emitter, thin)
}
fn codegen(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> CompiledModule {
back::write::codegen(cgcx, module, config)
back::write::codegen(cgcx, shared_emitter, module, config)
}
fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(module)

View file

@ -2,6 +2,7 @@ use std::ffi::CString;
use std::sync::Arc;
use rustc_data_structures::memmap::Mmap;
use rustc_errors::DiagCtxtHandle;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportLevel};
use rustc_middle::ty::TyCtxt;
@ -124,28 +125,29 @@ pub(super) fn exported_symbols_for_lto(
symbols_below_threshold
}
pub(super) fn check_lto_allowed<B: WriteBackendMethods>(cgcx: &CodegenContext<B>) {
pub(super) fn check_lto_allowed<B: WriteBackendMethods>(
cgcx: &CodegenContext<B>,
dcx: DiagCtxtHandle<'_>,
) {
if cgcx.lto == Lto::ThinLocal {
// Crate local LTO is always allowed
return;
}
let dcx = cgcx.create_dcx();
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
dcx.handle().emit_fatal(LtoDisallowed);
} else if *crate_type == CrateType::Dylib {
if !cgcx.opts.unstable_opts.dylib_lto {
if !cgcx.dylib_lto {
dcx.handle().emit_fatal(LtoDylib);
}
} else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto {
} else if *crate_type == CrateType::ProcMacro && !cgcx.dylib_lto {
dcx.handle().emit_fatal(LtoProcMacro);
}
}
if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
if cgcx.prefer_dynamic && !cgcx.dylib_lto {
dcx.handle().emit_fatal(DynamicLinkingWithLTO);
}
}

View file

@ -15,8 +15,8 @@ use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
use rustc_errors::emitter::Emitter;
use rustc_errors::translation::Translator;
use rustc_errors::{
Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalError, FatalErrorMarker, Level,
MultiSpan, Style, Suggestions,
Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
};
use rustc_fs_util::link_or_copy;
use rustc_incremental::{
@ -326,15 +326,16 @@ pub struct CodegenContext<B: WriteBackendMethods> {
// Resources needed when running LTO
pub prof: SelfProfilerRef,
pub lto: Lto,
pub use_linker_plugin_lto: bool,
pub dylib_lto: bool,
pub prefer_dynamic: bool,
pub save_temps: bool,
pub fewer_names: bool,
pub time_trace: bool,
pub opts: Arc<config::Options>,
pub crate_types: Vec<CrateType>,
pub output_filenames: Arc<OutputFilenames>,
pub invocation_temp: Option<String>,
pub module_config: Arc<ModuleConfig>,
pub allocator_config: Arc<ModuleConfig>,
pub tm_factory: TargetMachineFactoryFn<B>,
pub msvc_imps_needed: bool,
pub is_pe_coff: bool,
@ -347,8 +348,6 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
pub pointer_size: Size,
/// Emitter to use for diagnostics produced during codegen.
pub diag_emitter: SharedEmitter,
/// LLVM optimizations for which we want to print remarks.
pub remark: Passes,
/// Directory into which should the LLVM optimization remarks be written.
@ -363,14 +362,9 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub parallel: bool,
}
impl<B: WriteBackendMethods> CodegenContext<B> {
pub fn create_dcx(&self) -> DiagCtxt {
DiagCtxt::new(Box::new(self.diag_emitter.clone()))
}
}
fn generate_thin_lto_work<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
@ -380,6 +374,7 @@ fn generate_thin_lto_work<B: ExtraBackendMethods>(
let (lto_modules, copy_jobs) = B::run_thin_lto(
cgcx,
dcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_thin_lto,
@ -408,6 +403,29 @@ struct CompiledModules {
allocator_module: Option<CompiledModule>,
}
enum MaybeLtoModules<B: WriteBackendMethods> {
NoLto {
modules: Vec<CompiledModule>,
allocator_module: Option<CompiledModule>,
},
FatLto {
cgcx: CodegenContext<B>,
exported_symbols_for_lto: Arc<Vec<String>>,
each_linked_rlib_file_for_lto: Vec<PathBuf>,
needs_fat_lto: Vec<FatLtoInput<B>>,
lto_import_only_modules:
Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
},
ThinLto {
cgcx: CodegenContext<B>,
exported_symbols_for_lto: Arc<Vec<String>>,
each_linked_rlib_file_for_lto: Vec<PathBuf>,
needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
lto_import_only_modules:
Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
},
}
fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
let sess = tcx.sess;
sess.opts.cg.embed_bitcode
@ -797,20 +815,12 @@ pub(crate) enum ComputedLtoType {
pub(crate) fn compute_per_cgu_lto_type(
sess_lto: &Lto,
opts: &config::Options,
linker_does_lto: bool,
sess_crate_types: &[CrateType],
module_kind: ModuleKind,
) -> ComputedLtoType {
// If the linker does LTO, we don't have to do it. Note that we
// keep doing full LTO, if it is requested, as not to break the
// assumption that the output will be a single module.
let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
// When we're automatically doing ThinLTO for multi-codegen-unit
// builds we don't actually want to LTO the allocator module if
// it shows up. This is due to various linker shenanigans that
// we'll encounter later.
let is_allocator = module_kind == ModuleKind::Allocator;
// We ignore a request for full crate graph LTO if the crate type
// is only an rlib, as there is no full crate graph to process,
@ -823,7 +833,7 @@ pub(crate) fn compute_per_cgu_lto_type(
let is_rlib = matches!(sess_crate_types, [CrateType::Rlib]);
match sess_lto {
Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
Lto::ThinLocal if !linker_does_lto => ComputedLtoType::Thin,
Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
Lto::Fat if !is_rlib => ComputedLtoType::Fat,
_ => ComputedLtoType::No,
@ -832,30 +842,24 @@ pub(crate) fn compute_per_cgu_lto_type(
fn execute_optimize_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
mut module: ModuleCodegen<B::Module>,
) -> WorkItemResult<B> {
let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let module_config = match module.kind {
ModuleKind::Regular => &cgcx.module_config,
ModuleKind::Allocator => &cgcx.allocator_config,
};
B::optimize(cgcx, dcx, &mut module, module_config);
B::optimize(cgcx, &shared_emitter, &mut module, &cgcx.module_config);
// After we've done the initial round of optimizations we need to
// decide whether to synchronously codegen this module or ship it
// back to the coordinator thread for further LTO processing (which
// has to wait for all the initial modules to be optimized).
let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
let lto_type =
compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
// If we're doing some form of incremental LTO then we need to be sure to
// save our module to disk first.
let bitcode = if module_config.emit_pre_lto_bc {
let bitcode = if cgcx.module_config.emit_pre_lto_bc {
let filename = pre_lto_bitcode_filename(&module.name);
cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
} else {
@ -864,7 +868,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
match lto_type {
ComputedLtoType::No => {
let module = B::codegen(cgcx, module, module_config);
let module = B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config);
WorkItemResult::Finished(module)
}
ComputedLtoType::Thin => {
@ -894,12 +898,16 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
module: CachedModuleCodegen,
) -> CompiledModule {
let _timer = cgcx
.prof
.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
let dcx = DiagCtxt::new(Box::new(shared_emitter));
let dcx = dcx.handle();
let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
let mut links_from_incr_cache = Vec::new();
@ -918,11 +926,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
Some(output_path)
}
Err(error) => {
cgcx.create_dcx().handle().emit_err(errors::CopyPathBuf {
source_file,
output_path,
error,
});
dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
None
}
}
@ -965,7 +969,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
if should_emit_obj && object.is_none() {
cgcx.create_dcx().handle().emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
}
CompiledModule {
@ -982,6 +986,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
fn do_fat_lto<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
mut needs_fat_lto: Vec<FatLtoInput<B>>,
@ -989,7 +994,10 @@ fn do_fat_lto<B: ExtraBackendMethods>(
) -> CompiledModule {
let _timer = cgcx.prof.verbose_generic_activity("LLVM_fatlto");
check_lto_allowed(&cgcx);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
check_lto_allowed(&cgcx, dcx);
for (module, wp) in import_only_modules {
needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
@ -997,15 +1005,17 @@ fn do_fat_lto<B: ExtraBackendMethods>(
let module = B::run_and_optimize_fat_lto(
cgcx,
&shared_emitter,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_fat_lto,
);
B::codegen(cgcx, module, &cgcx.module_config)
B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config)
}
fn do_thin_lto<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext<B>,
shared_emitter: SharedEmitter,
exported_symbols_for_lto: Arc<Vec<String>>,
each_linked_rlib_for_lto: Vec<PathBuf>,
needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
@ -1016,7 +1026,10 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
) -> Vec<CompiledModule> {
let _timer = cgcx.prof.verbose_generic_activity("LLVM_thinlto");
check_lto_allowed(&cgcx);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
check_lto_allowed(&cgcx, dcx);
let (coordinator_send, coordinator_receive) = channel();
@ -1041,6 +1054,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
// we don't worry about tokens.
for (work, cost) in generate_thin_lto_work(
cgcx,
dcx,
&exported_symbols_for_lto,
&each_linked_rlib_for_lto,
needs_thin_lto,
@ -1082,7 +1096,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
while used_token_count < tokens.len() + 1
&& let Some((item, _)) = work_items.pop()
{
spawn_thin_lto_work(&cgcx, coordinator_send.clone(), item);
spawn_thin_lto_work(&cgcx, shared_emitter.clone(), coordinator_send.clone(), item);
used_token_count += 1;
}
} else {
@ -1106,7 +1120,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
}
Err(e) => {
let msg = &format!("failed to acquire jobserver token: {e}");
cgcx.diag_emitter.fatal(msg);
shared_emitter.fatal(msg);
codegen_aborted = Some(FatalError);
}
},
@ -1144,12 +1158,13 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
fn execute_thin_lto_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
module: lto::ThinModule<B>,
) -> CompiledModule {
let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
let module = B::optimize_thin(cgcx, module);
B::codegen(cgcx, module, &cgcx.module_config)
let module = B::optimize_thin(cgcx, &shared_emitter, module);
B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config)
}
/// Messages sent to the coordinator.
@ -1245,9 +1260,9 @@ fn start_executing_work<B: ExtraBackendMethods>(
coordinator_receive: Receiver<Message<B>>,
regular_config: Arc<ModuleConfig>,
allocator_config: Arc<ModuleConfig>,
allocator_module: Option<ModuleCodegen<B::Module>>,
mut allocator_module: Option<ModuleCodegen<B::Module>>,
coordinator_send: Sender<Message<B>>,
) -> thread::JoinHandle<Result<CompiledModules, ()>> {
) -> thread::JoinHandle<Result<MaybeLtoModules<B>, ()>> {
let sess = tcx.sess;
let mut each_linked_rlib_for_lto = Vec::new();
@ -1292,18 +1307,18 @@ fn start_executing_work<B: ExtraBackendMethods>(
let cgcx = CodegenContext::<B> {
crate_types: tcx.crate_types().to_vec(),
lto: sess.lto(),
use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
dylib_lto: sess.opts.unstable_opts.dylib_lto,
prefer_dynamic: sess.opts.cg.prefer_dynamic,
fewer_names: sess.fewer_names(),
save_temps: sess.opts.cg.save_temps,
time_trace: sess.opts.unstable_opts.llvm_time_trace,
opts: Arc::new(sess.opts.clone()),
prof: sess.prof.clone(),
remark: sess.opts.cg.remark.clone(),
remark_dir,
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
diag_emitter: shared_emitter.clone(),
output_filenames: Arc::clone(tcx.output_filenames(())),
module_config: regular_config,
allocator_config,
tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features),
msvc_imps_needed: msvc_imps_needed(tcx),
is_pe_coff: tcx.sess.target.is_like_windows,
@ -1497,16 +1512,9 @@ fn start_executing_work<B: ExtraBackendMethods>(
let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
let compiled_allocator_module = allocator_module.and_then(|allocator_module| {
match execute_optimize_work_item(&cgcx, allocator_module) {
WorkItemResult::Finished(compiled_module) => return Some(compiled_module),
WorkItemResult::NeedsFatLto(fat_lto_input) => needs_fat_lto.push(fat_lto_input),
WorkItemResult::NeedsThinLto(name, thin_buffer) => {
needs_thin_lto.push((name, thin_buffer))
}
}
None
});
if let Some(allocator_module) = &mut allocator_module {
B::optimize(&cgcx, &shared_emitter, allocator_module, &allocator_config);
}
// Run the message loop while there's still anything that needs message
// processing. Note that as soon as codegen is aborted we simply want to
@ -1543,7 +1551,13 @@ fn start_executing_work<B: ExtraBackendMethods>(
let (item, _) =
work_items.pop().expect("queue empty - queue_full_enough() broken?");
main_thread_state = MainThreadState::Lending;
spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
spawn_work(
&cgcx,
shared_emitter.clone(),
coordinator_send.clone(),
&mut llvm_start_time,
item,
);
}
}
} else if codegen_state == Completed {
@ -1561,7 +1575,13 @@ fn start_executing_work<B: ExtraBackendMethods>(
MainThreadState::Idle => {
if let Some((item, _)) = work_items.pop() {
main_thread_state = MainThreadState::Lending;
spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
spawn_work(
&cgcx,
shared_emitter.clone(),
coordinator_send.clone(),
&mut llvm_start_time,
item,
);
} else {
// There is no unstarted work, so let the main thread
// take over for a running worker. Otherwise the
@ -1597,7 +1617,13 @@ fn start_executing_work<B: ExtraBackendMethods>(
while running_with_own_token < tokens.len()
&& let Some((item, _)) = work_items.pop()
{
spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
spawn_work(
&cgcx,
shared_emitter.clone(),
coordinator_send.clone(),
&mut llvm_start_time,
item,
);
running_with_own_token += 1;
}
}
@ -1733,36 +1759,51 @@ fn start_executing_work<B: ExtraBackendMethods>(
assert!(compiled_modules.is_empty());
assert!(needs_thin_lto.is_empty());
// This uses the implicit token
let module = do_fat_lto(
&cgcx,
&exported_symbols_for_lto,
&each_linked_rlib_file_for_lto,
if let Some(allocator_module) = allocator_module.take() {
needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
}
return Ok(MaybeLtoModules::FatLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_fat_lto,
lto_import_only_modules,
);
compiled_modules.push(module);
});
} else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
assert!(compiled_modules.is_empty());
assert!(needs_fat_lto.is_empty());
compiled_modules.extend(do_thin_lto(
&cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
));
if cgcx.lto == Lto::ThinLocal {
compiled_modules.extend(do_thin_lto(
&cgcx,
shared_emitter.clone(),
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
));
} else {
if let Some(allocator_module) = allocator_module.take() {
let (name, thin_buffer) = B::prepare_thin(allocator_module);
needs_thin_lto.push((name, thin_buffer));
}
return Ok(MaybeLtoModules::ThinLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
});
}
}
// Regardless of what order these modules completed in, report them to
// the backend in the same order every time to ensure that we're handing
// out deterministic results.
compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
Ok(CompiledModules {
Ok(MaybeLtoModules::NoLto {
modules: compiled_modules,
allocator_module: compiled_allocator_module,
allocator_module: allocator_module.map(|allocator_module| {
B::codegen(&cgcx, &shared_emitter, allocator_module, &allocator_config)
}),
})
})
.expect("failed to spawn coordinator thread");
@ -1831,6 +1872,7 @@ pub(crate) struct WorkerFatalError;
fn spawn_work<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext<B>,
shared_emitter: SharedEmitter,
coordinator_send: Sender<Message<B>>,
llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
work: WorkItem<B>,
@ -1843,10 +1885,10 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, m),
WorkItem::CopyPostLtoArtifacts(m) => {
WorkItemResult::Finished(execute_copy_from_cache_work_item(&cgcx, m))
}
WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, shared_emitter, m),
WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
execute_copy_from_cache_work_item(&cgcx, shared_emitter, m),
),
}));
let msg = match result {
@ -1868,6 +1910,7 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext<B>,
shared_emitter: SharedEmitter,
coordinator_send: Sender<ThinLtoMessage>,
work: ThinLtoWorkItem<B>,
) {
@ -1875,8 +1918,10 @@ fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>(
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
ThinLtoWorkItem::CopyPostLtoArtifacts(m) => execute_copy_from_cache_work_item(&cgcx, m),
ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, m),
ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
execute_copy_from_cache_work_item(&cgcx, shared_emitter, m)
}
ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, shared_emitter, m),
}));
let msg = match result {
@ -2052,13 +2097,13 @@ impl SharedEmitterMain {
pub struct Coordinator<B: ExtraBackendMethods> {
sender: Sender<Message<B>>,
future: Option<thread::JoinHandle<Result<CompiledModules, ()>>>,
future: Option<thread::JoinHandle<Result<MaybeLtoModules<B>, ()>>>,
// Only used for the Message type.
phantom: PhantomData<B>,
}
impl<B: ExtraBackendMethods> Coordinator<B> {
fn join(mut self) -> std::thread::Result<Result<CompiledModules, ()>> {
fn join(mut self) -> std::thread::Result<Result<MaybeLtoModules<B>, ()>> {
self.future.take().unwrap().join()
}
}
@ -2089,8 +2134,9 @@ pub struct OngoingCodegen<B: ExtraBackendMethods> {
impl<B: ExtraBackendMethods> OngoingCodegen<B> {
pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
self.shared_emitter_main.check(sess, true);
let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
Ok(Ok(compiled_modules)) => compiled_modules,
let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
Ok(Err(())) => {
sess.dcx().abort_if_errors();
panic!("expected abort due to worker thread errors")
@ -2102,6 +2148,62 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
sess.dcx().abort_if_errors();
let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
// Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
MaybeLtoModules::NoLto { modules, allocator_module } => {
drop(shared_emitter);
CompiledModules { modules, allocator_module }
}
MaybeLtoModules::FatLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_fat_lto,
lto_import_only_modules,
} => CompiledModules {
modules: vec![do_fat_lto(
&cgcx,
shared_emitter,
&exported_symbols_for_lto,
&each_linked_rlib_file_for_lto,
needs_fat_lto,
lto_import_only_modules,
)],
allocator_module: None,
},
MaybeLtoModules::ThinLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
} => CompiledModules {
modules: do_thin_lto(
&cgcx,
shared_emitter,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
),
allocator_module: None,
},
});
shared_emitter_main.check(sess, true);
sess.dcx().abort_if_errors();
let mut compiled_modules =
compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
// Regardless of what order these modules completed in, report them to
// the backend in the same order every time to ensure that we're handing
// out deterministic results.
compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
let work_products =
copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);

View file

@ -49,9 +49,7 @@ use crate::meth::load_vtable;
use crate::mir::operand::OperandValue;
use crate::mir::place::PlaceRef;
use crate::traits::*;
use crate::{
CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, ModuleKind, errors, meth, mir,
};
use crate::{CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, errors, meth, mir};
pub(crate) fn bin_op_to_icmp_predicate(op: BinOp, signed: bool) -> IntPredicate {
match (op, signed) {
@ -1126,9 +1124,8 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) ->
// reuse pre-LTO artifacts
match compute_per_cgu_lto_type(
&tcx.sess.lto(),
&tcx.sess.opts,
tcx.sess.opts.cg.linker_plugin_lto.enabled(),
tcx.crate_types(),
ModuleKind::Regular,
) {
ComputedLtoType::No => CguReuse::PostLto,
_ => CguReuse::PreLto,

View file

@ -4,7 +4,7 @@ use rustc_errors::DiagCtxtHandle;
use rustc_middle::dep_graph::WorkProduct;
use crate::back::lto::{SerializedModule, ThinModule};
use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig};
use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter};
use crate::{CompiledModule, ModuleCodegen};
pub trait WriteBackendMethods: Clone + 'static {
@ -19,6 +19,7 @@ pub trait WriteBackendMethods: Clone + 'static {
/// if necessary and running any further optimizations
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
@ -28,6 +29,7 @@ pub trait WriteBackendMethods: Clone + 'static {
/// can simply be copied over from the incr. comp. cache.
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
@ -37,16 +39,18 @@ pub trait WriteBackendMethods: Clone + 'static {
fn print_statistics(&self);
fn optimize(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
);
fn optimize_thin(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
thin: ThinModule<Self>,
) -> ModuleCodegen<Self::Module>;
fn codegen(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> CompiledModule;

View file

@ -18,7 +18,7 @@ use std::ffi::OsString;
use std::fmt::Write as _;
use std::fs::{self, File};
use std::io::{self, IsTerminal, Read, Write};
use std::panic::{self, PanicHookInfo, catch_unwind};
use std::panic::{self, PanicHookInfo};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use std::sync::OnceLock;
@ -32,10 +32,11 @@ use rustc_codegen_ssa::{CodegenErrors, CodegenResults};
use rustc_data_structures::profiling::{
TimePassesFormat, get_resident_set_size, print_time_passes_entry,
};
pub use rustc_errors::catch_fatal_errors;
use rustc_errors::emitter::stderr_destination;
use rustc_errors::registry::Registry;
use rustc_errors::translation::Translator;
use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, FatalError, PResult, markdown};
use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, PResult, markdown};
use rustc_feature::find_gated_cfg;
// This avoids a false positive with `-Wunused_crate_dependencies`.
// `rust_index` isn't used in this crate's code, but it must be named in the
@ -1377,21 +1378,6 @@ fn parse_crate_attrs<'a>(sess: &'a Session) -> PResult<'a, ast::AttrVec> {
parser.parse_inner_attributes()
}
/// Runs a closure and catches unwinds triggered by fatal errors.
///
/// The compiler currently unwinds with a special sentinel value to abort
/// compilation on fatal errors. This function catches that sentinel and turns
/// the panic into a `Result` instead.
pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, FatalError> {
catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| {
if value.is::<rustc_errors::FatalErrorMarker>() {
FatalError
} else {
panic::resume_unwind(value);
}
})
}
/// Variant of `catch_fatal_errors` for the `interface::Result` return type
/// that also computes the exit code.
pub fn catch_with_exit_code(f: impl FnOnce()) -> i32 {

View file

@ -66,7 +66,7 @@ use rustc_lint_defs::LintExpectationId;
pub use rustc_lint_defs::{Applicability, listify, pluralize};
use rustc_macros::{Decodable, Encodable};
pub use rustc_span::ErrorGuaranteed;
pub use rustc_span::fatal_error::{FatalError, FatalErrorMarker};
pub use rustc_span::fatal_error::{FatalError, FatalErrorMarker, catch_fatal_errors};
use rustc_span::source_map::SourceMap;
use rustc_span::{BytePos, DUMMY_SP, Loc, Span};
pub use snippet::Style;

View file

@ -3,6 +3,8 @@
#[must_use]
pub struct FatalError;
use std::panic;
pub use rustc_data_structures::FatalErrorMarker;
// Don't implement Send on FatalError. This makes it impossible to `panic_any!(FatalError)`.
@ -22,3 +24,18 @@ impl std::fmt::Display for FatalError {
}
impl std::error::Error for FatalError {}
/// Runs a closure and catches unwinds triggered by fatal errors.
///
/// The compiler currently unwinds with a special sentinel value to abort
/// compilation on fatal errors. This function catches that sentinel and turns
/// the panic into a `Result` instead.
pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, FatalError> {
panic::catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| {
if value.is::<FatalErrorMarker>() {
FatalError
} else {
panic::resume_unwind(value);
}
})
}