AsyncDrop implementation using shim codegen of async_drop_in_place::{closure}, scoped async drop added.

This commit is contained in:
Andrew Zhogin 2024-08-26 16:45:15 +03:00
parent 52c1838fa7
commit c366756a85
116 changed files with 4054 additions and 1879 deletions

View file

@ -795,7 +795,14 @@ impl<'a, 'tcx> ResultsVisitor<'tcx, Borrowck<'a, 'tcx>> for MirBorrowckCtxt<'a,
TerminatorKind::SwitchInt { discr, targets: _ } => {
self.consume_operand(loc, (discr, span), state);
}
TerminatorKind::Drop { place, target: _, unwind: _, replace } => {
TerminatorKind::Drop {
place,
target: _,
unwind: _,
replace,
drop: _,
async_fut: _,
} => {
debug!(
"visit_terminator_drop \
loc: {:?} term: {:?} place: {:?} span: {:?}",

View file

@ -101,7 +101,14 @@ impl<'a, 'tcx> Visitor<'tcx> for LoanInvalidationsGenerator<'a, 'tcx> {
TerminatorKind::SwitchInt { discr, targets: _ } => {
self.consume_operand(location, discr);
}
TerminatorKind::Drop { place: drop_place, target: _, unwind: _, replace } => {
TerminatorKind::Drop {
place: drop_place,
target: _,
unwind: _,
replace,
drop: _,
async_fut: _,
} => {
let write_kind =
if *replace { WriteKind::Replace } else { WriteKind::StorageDeadOrDrop };
self.access_place(

View file

@ -2079,8 +2079,14 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
TerminatorKind::Unreachable => {}
TerminatorKind::Drop { target, unwind, .. }
| TerminatorKind::Assert { target, unwind, .. } => {
TerminatorKind::Drop { target, unwind, drop, .. } => {
self.assert_iscleanup(block_data, target, is_cleanup);
self.assert_iscleanup_unwind(block_data, unwind, is_cleanup);
if let Some(drop) = drop {
self.assert_iscleanup(block_data, drop, is_cleanup);
}
}
TerminatorKind::Assert { target, unwind, .. } => {
self.assert_iscleanup(block_data, target, is_cleanup);
self.assert_iscleanup_unwind(block_data, unwind, is_cleanup);
}

View file

@ -441,7 +441,9 @@ pub(crate) fn codegen_terminator_call<'tcx>(
Err(instance) => Some(instance),
}
}
InstanceKind::DropGlue(_, None) | ty::InstanceKind::AsyncDropGlueCtorShim(_, None) => {
// We don't need AsyncDropGlueCtorShim here because it is not `noop func`,
// it is `func returning noop future`
InstanceKind::DropGlue(_, None) => {
// empty drop glue - a nop.
let dest = target.expect("Non terminating drop_in_place_real???");
let ret_block = fx.get_block(dest);
@ -707,9 +709,8 @@ pub(crate) fn codegen_drop<'tcx>(
let ty = drop_place.layout().ty;
let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty);
if let ty::InstanceKind::DropGlue(_, None) | ty::InstanceKind::AsyncDropGlueCtorShim(_, None) =
drop_instance.def
{
// AsyncDropGlueCtorShim can't be here
if let ty::InstanceKind::DropGlue(_, None) = drop_instance.def {
// we don't actually need to drop anything
} else {
match ty.kind() {

View file

@ -565,7 +565,11 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
| TerminatorKind::CoroutineDrop => {
bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
}
TerminatorKind::Drop { place, target, unwind: _, replace: _ } => {
TerminatorKind::Drop { place, target, unwind: _, replace: _, drop, async_fut } => {
assert!(
async_fut.is_none() && drop.is_none(),
"Async Drop must be expanded or reset to sync before codegen"
);
let drop_place = codegen_place(fx, *place);
crate::abi::codegen_drop(fx, source_info, drop_place, *target);
}

View file

@ -721,8 +721,7 @@ fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>(
_ => unreachable!(),
};
let coroutine_layout =
cx.tcx.coroutine_layout(coroutine_def_id, coroutine_args.kind_ty()).unwrap();
let coroutine_layout = cx.tcx.coroutine_layout(coroutine_def_id, coroutine_args.args).unwrap();
let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(coroutine_def_id);
let variant_range = coroutine_args.variant_range(coroutine_def_id, cx.tcx);

View file

@ -174,10 +174,8 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
DIFlags::FlagZero,
),
|cx, coroutine_type_di_node| {
let coroutine_layout = cx
.tcx
.coroutine_layout(coroutine_def_id, coroutine_args.as_coroutine().kind_ty())
.unwrap();
let coroutine_layout =
cx.tcx.coroutine_layout(coroutine_def_id, coroutine_args).unwrap();
let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } =
coroutine_type_and_layout.variants

View file

@ -374,7 +374,7 @@ fn exported_symbols_provider_local(
));
}
MonoItem::Fn(Instance {
def: InstanceKind::AsyncDropGlueCtorShim(_, Some(ty)),
def: InstanceKind::AsyncDropGlueCtorShim(_, ty),
args,
}) => {
// A little sanity-check
@ -388,6 +388,16 @@ fn exported_symbols_provider_local(
},
));
}
MonoItem::Fn(Instance { def: InstanceKind::AsyncDropGlue(def, ty), args: _ }) => {
symbols.push((
ExportedSymbol::AsyncDropGlue(def, ty),
SymbolExportInfo {
level: SymbolExportLevel::Rust,
kind: SymbolExportKind::Text,
used: false,
},
));
}
_ => {
// Any other symbols don't qualify for sharing
}
@ -429,11 +439,10 @@ fn upstream_monomorphizations_provider(
if let Some(async_drop_in_place_fn_def_id) = async_drop_in_place_fn_def_id {
(async_drop_in_place_fn_def_id, tcx.mk_args(&[ty.into()]))
} else {
// `drop_in_place` in place does not exist, don't try
// to use it.
continue;
}
}
ExportedSymbol::AsyncDropGlue(def_id, ty) => (def_id, tcx.mk_args(&[ty.into()])),
ExportedSymbol::NonGeneric(..)
| ExportedSymbol::ThreadLocalShim(..)
| ExportedSymbol::NoDefId(..) => {
@ -582,6 +591,13 @@ pub(crate) fn symbol_name_for_instance_in_crate<'tcx>(
instantiating_crate,
)
}
ExportedSymbol::AsyncDropGlue(def_id, ty) => {
rustc_symbol_mangling::symbol_name_for_instance_in_crate(
tcx,
Instance::resolve_async_drop_in_place_poll(tcx, def_id, ty),
instantiating_crate,
)
}
ExportedSymbol::NoDefId(symbol_name) => symbol_name.to_string(),
}
}
@ -604,6 +620,7 @@ fn calling_convention_for_symbol<'tcx>(
// AsyncDropGlueCtorShim always use the Rust calling convention and thus follow the
// target's default symbol decoration scheme.
ExportedSymbol::AsyncDropGlueCtorShim(..) => None,
ExportedSymbol::AsyncDropGlue(..) => None,
// NoDefId always follow the target's default symbol decoration scheme.
ExportedSymbol::NoDefId(..) => None,
// ThreadLocalShim always follow the target's default symbol decoration scheme.

View file

@ -926,10 +926,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let def = instance.map(|i| i.def);
if let Some(
ty::InstanceKind::DropGlue(_, None) | ty::InstanceKind::AsyncDropGlueCtorShim(_, None),
) = def
{
// We don't need AsyncDropGlueCtorShim here because it is not `noop func`,
// it is `func returning noop future`
if let Some(ty::InstanceKind::DropGlue(_, None)) = def {
// Empty drop glue; a no-op.
let target = target.unwrap();
return helper.funclet_br(self, bx, target, mergeable_succ);
@ -1386,8 +1385,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
MergingSucc::False
}
mir::TerminatorKind::Drop { place, target, unwind, replace: _ } => self
.codegen_drop_terminator(
mir::TerminatorKind::Drop { place, target, unwind, replace: _, drop, async_fut } => {
assert!(
async_fut.is_none() && drop.is_none(),
"Async Drop must be expanded or reset to sync before codegen"
);
self.codegen_drop_terminator(
helper,
bx,
&terminator.source_info,
@ -1395,7 +1398,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
target,
unwind,
mergeable_succ(),
),
)
}
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, unwind } => self
.codegen_assert_terminator(

View file

@ -502,6 +502,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
ResumedAfterReturn(coroutine_kind) => ResumedAfterReturn(*coroutine_kind),
ResumedAfterPanic(coroutine_kind) => ResumedAfterPanic(*coroutine_kind),
ResumedAfterDrop(coroutine_kind) => ResumedAfterDrop(*coroutine_kind),
MisalignedPointerDereference { required, found } => MisalignedPointerDereference {
required: eval_to_int(required)?,
found: eval_to_int(found)?,

View file

@ -570,6 +570,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
| ty::InstanceKind::FnPtrAddrShim(..)
| ty::InstanceKind::ThreadLocalShim(..)
| ty::InstanceKind::AsyncDropGlueCtorShim(..)
| ty::InstanceKind::AsyncDropGlue(..)
| ty::InstanceKind::FutureDropPollShim(..)
| ty::InstanceKind::Item(_) => {
// We need MIR for this fn.
// Note that this can be an intrinsic, if we are executing its fallback body.

View file

@ -539,7 +539,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
Drop { place, target, unwind, replace: _ } => {
Drop { place, target, unwind, replace: _, drop, async_fut } => {
assert!(
async_fut.is_none() && drop.is_none(),
"Async Drop must be expanded or reset to sync in runtime MIR"
);
let place = self.eval_place(place)?;
let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
if let ty::InstanceKind::DropGlue(_, None) = instance.def {

View file

@ -381,6 +381,8 @@ declare_features! (
(unstable, associated_const_equality, "1.58.0", Some(92827)),
/// Allows associated type defaults.
(unstable, associated_type_defaults, "1.2.0", Some(29661)),
/// Allows implementing `AsyncDrop`.
(incomplete, async_drop, "CURRENT_RUSTC_VERSION", Some(126482)),
/// Allows async functions to be called from `dyn Trait`.
(incomplete, async_fn_in_dyn_trait, "1.85.0", Some(133119)),
/// Allows `#[track_caller]` on async functions.

View file

@ -189,19 +189,8 @@ language_item_table! {
Drop, sym::drop, drop_trait, Target::Trait, GenericRequirement::None;
Destruct, sym::destruct, destruct_trait, Target::Trait, GenericRequirement::None;
AsyncDrop, sym::async_drop, async_drop_trait, Target::Trait, GenericRequirement::Exact(0);
AsyncDestruct, sym::async_destruct, async_destruct_trait, Target::Trait, GenericRequirement::Exact(0);
AsyncDrop, sym::async_drop, async_drop_trait, Target::Trait, GenericRequirement::None;
AsyncDropInPlace, sym::async_drop_in_place, async_drop_in_place_fn, Target::Fn, GenericRequirement::Exact(1);
SurfaceAsyncDropInPlace, sym::surface_async_drop_in_place, surface_async_drop_in_place_fn, Target::Fn, GenericRequirement::Exact(1);
AsyncDropSurfaceDropInPlace, sym::async_drop_surface_drop_in_place, async_drop_surface_drop_in_place_fn, Target::Fn, GenericRequirement::Exact(1);
AsyncDropSlice, sym::async_drop_slice, async_drop_slice_fn, Target::Fn, GenericRequirement::Exact(1);
AsyncDropChain, sym::async_drop_chain, async_drop_chain_fn, Target::Fn, GenericRequirement::Exact(2);
AsyncDropNoop, sym::async_drop_noop, async_drop_noop_fn, Target::Fn, GenericRequirement::Exact(0);
AsyncDropDeferredDropInPlace, sym::async_drop_deferred_drop_in_place, async_drop_deferred_drop_in_place_fn, Target::Fn, GenericRequirement::Exact(1);
AsyncDropFuse, sym::async_drop_fuse, async_drop_fuse_fn, Target::Fn, GenericRequirement::Exact(1);
AsyncDropDefer, sym::async_drop_defer, async_drop_defer_fn, Target::Fn, GenericRequirement::Exact(1);
AsyncDropEither, sym::async_drop_either, async_drop_either_fn, Target::Fn, GenericRequirement::Exact(3);
CoerceUnsized, sym::coerce_unsized, coerce_unsized_trait, Target::Trait, GenericRequirement::Minimum(1);
DispatchFromDyn, sym::dispatch_from_dyn, dispatch_from_dyn_trait, Target::Trait, GenericRequirement::Minimum(1);
@ -321,6 +310,10 @@ language_item_table! {
PanicAsyncGenFnResumedPanic, sym::panic_const_async_gen_fn_resumed_panic, panic_const_async_gen_fn_resumed_panic, Target::Fn, GenericRequirement::None;
PanicGenFnNonePanic, sym::panic_const_gen_fn_none_panic, panic_const_gen_fn_none_panic, Target::Fn, GenericRequirement::None;
PanicNullPointerDereference, sym::panic_null_pointer_dereference, panic_null_pointer_dereference, Target::Fn, GenericRequirement::None;
PanicCoroutineResumedDrop, sym::panic_const_coroutine_resumed_drop, panic_const_coroutine_resumed_drop, Target::Fn, GenericRequirement::None;
PanicAsyncFnResumedDrop, sym::panic_const_async_fn_resumed_drop, panic_const_async_fn_resumed_drop, Target::Fn, GenericRequirement::None;
PanicAsyncGenFnResumedDrop, sym::panic_const_async_gen_fn_resumed_drop, panic_const_async_gen_fn_resumed_drop, Target::Fn, GenericRequirement::None;
PanicGenFnNoneDrop, sym::panic_const_gen_fn_none_drop, panic_const_gen_fn_none_drop, Target::Fn, GenericRequirement::None;
/// libstd panic entry point. Necessary for const eval to be able to catch it
BeginPanic, sym::begin_panic, begin_panic_fn, Target::Fn, GenericRequirement::None;
@ -333,7 +326,6 @@ language_item_table! {
ExchangeMalloc, sym::exchange_malloc, exchange_malloc_fn, Target::Fn, GenericRequirement::None;
DropInPlace, sym::drop_in_place, drop_in_place_fn, Target::Fn, GenericRequirement::Minimum(1);
FallbackSurfaceDrop, sym::fallback_surface_drop, fallback_surface_drop_fn, Target::Fn, GenericRequirement::None;
AllocLayout, sym::alloc_layout, alloc_layout, Target::Struct, GenericRequirement::None;
/// For all binary crates without `#![no_main]`, Rust will generate a "main" function.

View file

@ -34,11 +34,9 @@ pub(crate) fn check_legal_trait_for_method_call(
receiver: Option<Span>,
expr_span: Span,
trait_id: DefId,
body_id: DefId,
_body_id: DefId,
) -> Result<(), ErrorGuaranteed> {
if tcx.is_lang_item(trait_id, LangItem::Drop)
&& !tcx.is_lang_item(body_id, LangItem::FallbackSurfaceDrop)
{
if tcx.is_lang_item(trait_id, LangItem::Drop) {
let sugg = if let Some(receiver) = receiver.filter(|s| !s.is_empty()) {
errors::ExplicitDestructorCallSugg::Snippet {
lo: expr_span.shrink_to_lo(),

View file

@ -982,11 +982,13 @@ fn run_required_analyses(tcx: TyCtxt<'_>) {
let _ = tcx.ensure_ok().check_coroutine_obligations(
tcx.typeck_root_def_id(def_id.to_def_id()).expect_local(),
);
// Eagerly check the unsubstituted layout for cycles.
tcx.ensure_ok().layout_of(
ty::TypingEnv::post_analysis(tcx, def_id.to_def_id())
.as_query_input(tcx.type_of(def_id).instantiate_identity()),
);
if !tcx.is_async_drop_in_place_coroutine(def_id.to_def_id()) {
// Eagerly check the unsubstituted layout for cycles.
tcx.ensure_ok().layout_of(
ty::TypingEnv::post_analysis(tcx, def_id.to_def_id())
.as_query_input(tcx.type_of(def_id).instantiate_identity()),
);
}
}
});
});

View file

@ -1,7 +1,11 @@
middle_assert_async_resume_after_drop = `async fn` resumed after async drop
middle_assert_async_resume_after_panic = `async fn` resumed after panicking
middle_assert_async_resume_after_return = `async fn` resumed after completion
middle_assert_coroutine_resume_after_drop = coroutine resumed after async drop
middle_assert_coroutine_resume_after_panic = coroutine resumed after panicking
middle_assert_coroutine_resume_after_return = coroutine resumed after completion
@ -9,6 +13,8 @@ middle_assert_coroutine_resume_after_return = coroutine resumed after completion
middle_assert_divide_by_zero =
attempt to divide `{$val}` by zero
middle_assert_gen_resume_after_drop = `gen` fn or block cannot be further iterated on after it async dropped
middle_assert_gen_resume_after_panic = `gen` fn or block cannot be further iterated on after it panicked
middle_assert_misaligned_ptr_deref =

View file

@ -9,6 +9,7 @@ macro_rules! arena_types {
($macro:path) => (
$macro!([
[] layout: rustc_abi::LayoutData<rustc_abi::FieldIdx, rustc_abi::VariantIdx>,
[] proxy_coroutine_layout: rustc_middle::mir::CoroutineLayout<'tcx>,
[] fn_abi: rustc_target::callconv::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
// AdtDef are interned and compared by address
[decode] adt_def: rustc_middle::ty::AdtDefData,

View file

@ -44,6 +44,7 @@ pub enum ExportedSymbol<'tcx> {
Generic(DefId, GenericArgsRef<'tcx>),
DropGlue(Ty<'tcx>),
AsyncDropGlueCtorShim(Ty<'tcx>),
AsyncDropGlue(DefId, Ty<'tcx>),
ThreadLocalShim(DefId),
NoDefId(ty::SymbolName<'tcx>),
}
@ -63,6 +64,9 @@ impl<'tcx> ExportedSymbol<'tcx> {
ExportedSymbol::AsyncDropGlueCtorShim(ty) => {
tcx.symbol_name(ty::Instance::resolve_async_drop_in_place(tcx, ty))
}
ExportedSymbol::AsyncDropGlue(def_id, ty) => {
tcx.symbol_name(ty::Instance::resolve_async_drop_in_place_poll(tcx, def_id, ty))
}
ExportedSymbol::ThreadLocalShim(def_id) => tcx.symbol_name(ty::Instance {
def: ty::InstanceKind::ThreadLocalShim(def_id),
args: ty::GenericArgs::empty(),

View file

@ -200,7 +200,13 @@ pub struct CoroutineInfo<'tcx> {
/// Coroutine drop glue. This field is populated after the state transform pass.
pub coroutine_drop: Option<Body<'tcx>>,
/// The layout of a coroutine. This field is populated after the state transform pass.
/// Coroutine async drop glue.
pub coroutine_drop_async: Option<Body<'tcx>>,
/// When coroutine has sync drop, this is async proxy calling `coroutine_drop` sync impl.
pub coroutine_drop_proxy_async: Option<Body<'tcx>>,
/// The layout of a coroutine. Produced by the state transformation.
pub coroutine_layout: Option<CoroutineLayout<'tcx>>,
/// If this is a coroutine then record the type of source expression that caused this coroutine
@ -220,6 +226,8 @@ impl<'tcx> CoroutineInfo<'tcx> {
yield_ty: Some(yield_ty),
resume_ty: Some(resume_ty),
coroutine_drop: None,
coroutine_drop_async: None,
coroutine_drop_proxy_async: None,
coroutine_layout: None,
}
}
@ -587,6 +595,26 @@ impl<'tcx> Body<'tcx> {
self.coroutine.as_ref().and_then(|coroutine| coroutine.coroutine_drop.as_ref())
}
#[inline]
pub fn coroutine_drop_async(&self) -> Option<&Body<'tcx>> {
self.coroutine.as_ref().and_then(|coroutine| coroutine.coroutine_drop_async.as_ref())
}
#[inline]
pub fn coroutine_requires_async_drop(&self) -> bool {
self.coroutine_drop_async().is_some()
}
#[inline]
pub fn future_drop_poll(&self) -> Option<&Body<'tcx>> {
self.coroutine.as_ref().and_then(|coroutine| {
coroutine
.coroutine_drop_async
.as_ref()
.or(coroutine.coroutine_drop_proxy_async.as_ref())
})
}
#[inline]
pub fn coroutine_kind(&self) -> Option<CoroutineKind> {
self.coroutine.as_ref().map(|coroutine| coroutine.coroutine_kind)

View file

@ -530,6 +530,8 @@ impl<'tcx> CodegenUnit<'tcx> {
| InstanceKind::CloneShim(..)
| InstanceKind::ThreadLocalShim(..)
| InstanceKind::FnPtrAddrShim(..)
| InstanceKind::AsyncDropGlue(..)
| InstanceKind::FutureDropPollShim(..)
| InstanceKind::AsyncDropGlueCtorShim(..) => None,
}
}

View file

@ -253,9 +253,7 @@ fn dump_path<'tcx>(
}));
s
}
ty::InstanceKind::AsyncDropGlueCtorShim(_, Some(ty)) => {
// Unfortunately, pretty-printed typed are not very filename-friendly.
// We dome some filtering.
ty::InstanceKind::AsyncDropGlueCtorShim(_, ty) => {
let mut s = ".".to_owned();
s.extend(ty.to_string().chars().filter_map(|c| match c {
' ' => None,
@ -264,6 +262,34 @@ fn dump_path<'tcx>(
}));
s
}
ty::InstanceKind::AsyncDropGlue(_, ty) => {
let ty::Coroutine(_, args) = ty.kind() else {
bug!();
};
let ty = args.first().unwrap().expect_ty();
let mut s = ".".to_owned();
s.extend(ty.to_string().chars().filter_map(|c| match c {
' ' => None,
':' | '<' | '>' => Some('_'),
c => Some(c),
}));
s
}
ty::InstanceKind::FutureDropPollShim(_, proxy_cor, impl_cor) => {
let mut s = ".".to_owned();
s.extend(proxy_cor.to_string().chars().filter_map(|c| match c {
' ' => None,
':' | '<' | '>' => Some('_'),
c => Some(c),
}));
s.push('.');
s.extend(impl_cor.to_string().chars().filter_map(|c| match c {
' ' => None,
':' | '<' | '>' => Some('_'),
c => Some(c),
}));
s
}
_ => String::new(),
};
@ -1050,7 +1076,13 @@ impl<'tcx> TerminatorKind<'tcx> {
Call { target: None, unwind: _, .. } => vec![],
Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
Yield { drop: None, .. } => vec!["resume".into()],
Drop { unwind: UnwindAction::Cleanup(_), .. } => vec!["return".into(), "unwind".into()],
Drop { unwind: UnwindAction::Cleanup(_), drop: Some(_), .. } => {
vec!["return".into(), "unwind".into(), "drop".into()]
}
Drop { unwind: UnwindAction::Cleanup(_), drop: None, .. } => {
vec!["return".into(), "unwind".into()]
}
Drop { unwind: _, drop: Some(_), .. } => vec!["return".into(), "drop".into()],
Drop { unwind: _, .. } => vec!["return".into()],
Assert { unwind: UnwindAction::Cleanup(_), .. } => {
vec!["success".into(), "unwind".into()]

View file

@ -77,6 +77,8 @@ pub enum MirPhase {
/// exception is fields of packed structs. In analysis MIR, `Drop(P)` for a `P` that might be
/// misaligned for this reason implicitly moves `P` to a temporary before dropping. Runtime
/// MIR has no such rules, and dropping a misaligned place is simply UB.
/// - Async drops: after drop elaboration some drops may become async (`drop`, `async_fut` fields).
/// StateTransform pass will expand those async drops or reset to sync.
/// - Unwinding: in analysis MIR, unwinding from a function which may not unwind aborts. In
/// runtime MIR, this is UB.
/// - Retags: If `-Zmir-emit-retag` is enabled, analysis MIR has "implicit" retags in the same
@ -768,7 +770,34 @@ pub enum TerminatorKind<'tcx> {
/// The `replace` flag indicates whether this terminator was created as part of an assignment.
/// This should only be used for diagnostic purposes, and does not have any operational
/// meaning.
Drop { place: Place<'tcx>, target: BasicBlock, unwind: UnwindAction, replace: bool },
///
/// Async drop processing:
/// In compiler/rustc_mir_build/src/build/scope.rs we detect possible async drop:
/// drop of object with `needs_async_drop`.
/// Async drop later, in StateTransform pass, may be expanded into additional yield-point
/// for poll-loop of async drop future.
/// So we need prepared 'drop' target block in the similar way as for `Yield` terminator
/// (see `drops.build_mir::<CoroutineDrop>` in scopes.rs).
/// In compiler/rustc_mir_transform/src/elaborate_drops.rs for object implementing `AsyncDrop` trait
/// we need to prepare async drop feature - resolve `AsyncDrop::drop` and codegen call.
/// `async_fut` is set to the corresponding local.
/// For coroutine drop we don't need this logic because coroutine drop works with the same
/// layout object as coroutine itself. So `async_fut` will be `None` for coroutine drop.
/// Both `drop` and `async_fut` fields are only used in compiler/rustc_mir_transform/src/coroutine.rs,
/// StateTransform pass. In `expand_async_drops` async drops are expanded
/// into one or two yield points with poll ready/pending switch.
/// When a coroutine has any internal async drop, the coroutine drop function will be async
/// (generated by `create_coroutine_drop_shim_async`, not `create_coroutine_drop_shim`).
Drop {
place: Place<'tcx>,
target: BasicBlock,
unwind: UnwindAction,
replace: bool,
/// Cleanup to be done if the coroutine is dropped at this suspend point (for async drop).
drop: Option<BasicBlock>,
/// Prepared async future local (for async drop)
async_fut: Option<Local>,
},
/// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of
/// the referred to function. The operand types must match the argument types of the function.
@ -1043,6 +1072,7 @@ pub enum AssertKind<O> {
RemainderByZero(O),
ResumedAfterReturn(CoroutineKind),
ResumedAfterPanic(CoroutineKind),
ResumedAfterDrop(CoroutineKind),
MisalignedPointerDereference { required: O, found: O },
NullPointerDereference,
}

View file

@ -208,6 +208,16 @@ impl<O> AssertKind<O> {
LangItem::PanicGenFnNonePanic
}
NullPointerDereference => LangItem::PanicNullPointerDereference,
ResumedAfterDrop(CoroutineKind::Coroutine(_)) => LangItem::PanicCoroutineResumedDrop,
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::Async, _)) => {
LangItem::PanicAsyncFnResumedDrop
}
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, _)) => {
LangItem::PanicAsyncGenFnResumedDrop
}
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::Gen, _)) => {
LangItem::PanicGenFnNoneDrop
}
BoundsCheck { .. } | MisalignedPointerDereference { .. } => {
bug!("Unexpected AssertKind")
@ -298,6 +308,18 @@ impl<O> AssertKind<O> {
ResumedAfterPanic(CoroutineKind::Desugared(CoroutineDesugaring::Gen, _)) => {
write!(f, "\"`gen fn` should just keep returning `None` after panicking\"")
}
ResumedAfterDrop(CoroutineKind::Coroutine(_)) => {
write!(f, "\"coroutine resumed after async drop\"")
}
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::Async, _)) => {
write!(f, "\"`async fn` resumed after async drop\"")
}
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, _)) => {
write!(f, "\"`async gen fn` resumed after async drop\"")
}
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::Gen, _)) => {
write!(f, "\"`gen fn` resumed after drop\"")
}
}
}
@ -345,6 +367,19 @@ impl<O> AssertKind<O> {
middle_assert_coroutine_resume_after_panic
}
NullPointerDereference => middle_assert_null_ptr_deref,
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::Async, _)) => {
middle_assert_async_resume_after_drop
}
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, _)) => {
todo!()
}
ResumedAfterDrop(CoroutineKind::Desugared(CoroutineDesugaring::Gen, _)) => {
middle_assert_gen_resume_after_drop
}
ResumedAfterDrop(CoroutineKind::Coroutine(_)) => {
middle_assert_coroutine_resume_after_drop
}
MisalignedPointerDereference { .. } => middle_assert_misaligned_ptr_deref,
}
}
@ -377,7 +412,10 @@ impl<O> AssertKind<O> {
add!("left", format!("{left:#?}"));
add!("right", format!("{right:#?}"));
}
ResumedAfterReturn(_) | ResumedAfterPanic(_) | NullPointerDereference => {}
ResumedAfterReturn(_)
| ResumedAfterPanic(_)
| NullPointerDereference
| ResumedAfterDrop(_) => {}
MisalignedPointerDereference { required, found } => {
add!("required", format!("{required:#?}"));
add!("found", format!("{found:#?}"));
@ -457,7 +495,7 @@ mod helper {
#[define_opaque(Successors)]
pub fn successors_for_value(&self, value: u128) -> Successors<'_> {
let target = self.target_for_value(value);
(&[]).into_iter().copied().chain(Some(target))
(&[]).into_iter().copied().chain(Some(target).into_iter().chain(None))
}
}
@ -467,13 +505,23 @@ mod helper {
pub fn successors(&self) -> Successors<'_> {
use self::TerminatorKind::*;
match *self {
// 3-successors for async drop: target, unwind, dropline (parent coroutine drop)
Drop { target: ref t, unwind: UnwindAction::Cleanup(u), drop: Some(d), .. } => {
slice::from_ref(t)
.into_iter()
.copied()
.chain(Some(u).into_iter().chain(Some(d)))
}
// 2-successors
Call { target: Some(ref t), unwind: UnwindAction::Cleanup(u), .. }
| Yield { resume: ref t, drop: Some(u), .. }
| Drop { target: ref t, unwind: UnwindAction::Cleanup(u), .. }
| Drop { target: ref t, unwind: UnwindAction::Cleanup(u), drop: None, .. }
| Drop { target: ref t, unwind: _, drop: Some(u), .. }
| Assert { target: ref t, unwind: UnwindAction::Cleanup(u), .. }
| FalseUnwind { real_target: ref t, unwind: UnwindAction::Cleanup(u) } => {
slice::from_ref(t).into_iter().copied().chain(Some(u))
slice::from_ref(t).into_iter().copied().chain(Some(u).into_iter().chain(None))
}
// single successor
Goto { target: ref t }
| Call { target: None, unwind: UnwindAction::Cleanup(ref t), .. }
| Call { target: Some(ref t), unwind: _, .. }
@ -481,23 +529,33 @@ mod helper {
| Drop { target: ref t, unwind: _, .. }
| Assert { target: ref t, unwind: _, .. }
| FalseUnwind { real_target: ref t, unwind: _ } => {
slice::from_ref(t).into_iter().copied().chain(None)
slice::from_ref(t).into_iter().copied().chain(None.into_iter().chain(None))
}
// No successors
UnwindResume
| UnwindTerminate(_)
| CoroutineDrop
| Return
| Unreachable
| TailCall { .. }
| Call { target: None, unwind: _, .. } => (&[]).into_iter().copied().chain(None),
| Call { target: None, unwind: _, .. } => {
(&[]).into_iter().copied().chain(None.into_iter().chain(None))
}
// Multiple successors
InlineAsm { ref targets, unwind: UnwindAction::Cleanup(u), .. } => {
targets.iter().copied().chain(Some(u))
targets.iter().copied().chain(Some(u).into_iter().chain(None))
}
InlineAsm { ref targets, unwind: _, .. } => targets.iter().copied().chain(None),
SwitchInt { ref targets, .. } => targets.targets.iter().copied().chain(None),
FalseEdge { ref real_target, imaginary_target } => {
slice::from_ref(real_target).into_iter().copied().chain(Some(imaginary_target))
InlineAsm { ref targets, unwind: _, .. } => {
targets.iter().copied().chain(None.into_iter().chain(None))
}
SwitchInt { ref targets, .. } => {
targets.targets.iter().copied().chain(None.into_iter().chain(None))
}
// FalseEdge
FalseEdge { ref real_target, imaginary_target } => slice::from_ref(real_target)
.into_iter()
.copied()
.chain(Some(imaginary_target).into_iter().chain(None)),
}
}
@ -506,16 +564,31 @@ mod helper {
pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
use self::TerminatorKind::*;
match *self {
// 3-successors for async drop: target, unwind, dropline (parent coroutine drop)
Drop {
target: ref mut t,
unwind: UnwindAction::Cleanup(ref mut u),
drop: Some(ref mut d),
..
} => slice::from_mut(t).into_iter().chain(Some(u).into_iter().chain(Some(d))),
// 2-successors
Call {
target: Some(ref mut t), unwind: UnwindAction::Cleanup(ref mut u), ..
}
| Yield { resume: ref mut t, drop: Some(ref mut u), .. }
| Drop { target: ref mut t, unwind: UnwindAction::Cleanup(ref mut u), .. }
| Drop {
target: ref mut t,
unwind: UnwindAction::Cleanup(ref mut u),
drop: None,
..
}
| Drop { target: ref mut t, unwind: _, drop: Some(ref mut u), .. }
| Assert { target: ref mut t, unwind: UnwindAction::Cleanup(ref mut u), .. }
| FalseUnwind {
real_target: ref mut t,
unwind: UnwindAction::Cleanup(ref mut u),
} => slice::from_mut(t).into_iter().chain(Some(u)),
} => slice::from_mut(t).into_iter().chain(Some(u).into_iter().chain(None)),
// single successor
Goto { target: ref mut t }
| Call { target: None, unwind: UnwindAction::Cleanup(ref mut t), .. }
| Call { target: Some(ref mut t), unwind: _, .. }
@ -523,22 +596,33 @@ mod helper {
| Drop { target: ref mut t, unwind: _, .. }
| Assert { target: ref mut t, unwind: _, .. }
| FalseUnwind { real_target: ref mut t, unwind: _ } => {
slice::from_mut(t).into_iter().chain(None)
slice::from_mut(t).into_iter().chain(None.into_iter().chain(None))
}
// No successors
UnwindResume
| UnwindTerminate(_)
| CoroutineDrop
| Return
| Unreachable
| TailCall { .. }
| Call { target: None, unwind: _, .. } => (&mut []).into_iter().chain(None),
InlineAsm { ref mut targets, unwind: UnwindAction::Cleanup(ref mut u), .. } => {
targets.iter_mut().chain(Some(u))
| Call { target: None, unwind: _, .. } => {
(&mut []).into_iter().chain(None.into_iter().chain(None))
}
InlineAsm { ref mut targets, unwind: _, .. } => targets.iter_mut().chain(None),
SwitchInt { ref mut targets, .. } => targets.targets.iter_mut().chain(None),
// Multiple successors
InlineAsm { ref mut targets, unwind: UnwindAction::Cleanup(ref mut u), .. } => {
targets.iter_mut().chain(Some(u).into_iter().chain(None))
}
InlineAsm { ref mut targets, unwind: _, .. } => {
targets.iter_mut().chain(None.into_iter().chain(None))
}
SwitchInt { ref mut targets, .. } => {
targets.targets.iter_mut().chain(None.into_iter().chain(None))
}
// FalseEdge
FalseEdge { ref mut real_target, ref mut imaginary_target } => {
slice::from_mut(real_target).into_iter().chain(Some(imaginary_target))
slice::from_mut(real_target)
.into_iter()
.chain(Some(imaginary_target).into_iter().chain(None))
}
}
}
@ -671,8 +755,10 @@ impl<'tcx> TerminatorKind<'tcx> {
Goto { target } => TerminatorEdges::Single(target),
// FIXME: Maybe we need also TerminatorEdges::Trio for async drop
// (target + unwind + dropline)
Assert { target, unwind, expected: _, msg: _, cond: _ }
| Drop { target, unwind, place: _, replace: _ }
| Drop { target, unwind, place: _, replace: _, drop: _, async_fut: _ }
| FalseUnwind { real_target: target, unwind } => match unwind {
UnwindAction::Cleanup(unwind) => TerminatorEdges::Double(target, unwind),
UnwindAction::Continue | UnwindAction::Terminate(_) | UnwindAction::Unreachable => {

View file

@ -353,17 +353,21 @@ macro_rules! make_mir_visitor {
coroutine_closure_def_id: _def_id,
receiver_by_ref: _,
}
| ty::InstanceKind::AsyncDropGlueCtorShim(_def_id, None)
| ty::InstanceKind::DropGlue(_def_id, None) => {}
ty::InstanceKind::FnPtrShim(_def_id, ty)
| ty::InstanceKind::DropGlue(_def_id, Some(ty))
| ty::InstanceKind::CloneShim(_def_id, ty)
| ty::InstanceKind::FnPtrAddrShim(_def_id, ty)
| ty::InstanceKind::AsyncDropGlueCtorShim(_def_id, Some(ty)) => {
| ty::InstanceKind::AsyncDropGlue(_def_id, ty)
| ty::InstanceKind::AsyncDropGlueCtorShim(_def_id, ty) => {
// FIXME(eddyb) use a better `TyContext` here.
self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
}
ty::InstanceKind::FutureDropPollShim(_def_id, proxy_ty, impl_ty) => {
self.visit_ty($(& $mutability)? *proxy_ty, TyContext::Location(location));
self.visit_ty($(& $mutability)? *impl_ty, TyContext::Location(location));
}
}
self.visit_args(callee_args, location);
}
@ -521,7 +525,14 @@ macro_rules! make_mir_visitor {
self.visit_operand(discr, location);
}
TerminatorKind::Drop { place, target: _, unwind: _, replace: _ } => {
TerminatorKind::Drop {
place,
target: _,
unwind: _,
replace: _,
drop: _,
async_fut: _,
} => {
self.visit_place(
place,
PlaceContext::MutatingUse(MutatingUseContext::Drop),
@ -634,7 +645,7 @@ macro_rules! make_mir_visitor {
OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
self.visit_operand(op, location);
}
ResumedAfterReturn(_) | ResumedAfterPanic(_) | NullPointerDereference => {
ResumedAfterReturn(_) | ResumedAfterPanic(_) | NullPointerDereference | ResumedAfterDrop(_) => {
// Nothing to visit
}
MisalignedPointerDereference { required, found } => {

View file

@ -1359,7 +1359,11 @@ rustc_queries! {
/// Generates a MIR body for the shim.
query mir_shims(key: ty::InstanceKind<'tcx>) -> &'tcx mir::Body<'tcx> {
arena_cache
desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) }
desc {
|tcx| "generating MIR shim for `{}`, instance={:?}",
tcx.def_path_str(key.def_id()),
key
}
}
/// The `symbol_name` query provides the symbol name for calling a
@ -1590,6 +1594,10 @@ rustc_queries! {
query is_unpin_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is `Unpin`", env.value }
}
/// Query backing `Ty::is_async_drop`.
query is_async_drop_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is `AsyncDrop`", env.value }
}
/// Query backing `Ty::needs_drop`.
query needs_drop_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` needs drop", env.value }
@ -1622,6 +1630,14 @@ rustc_queries! {
cache_on_disk_if { true }
}
/// A list of types where the ADT requires async drop if and only if any of
/// those types require async drop. If the ADT is known to always need async drop
/// then `Err(AlwaysRequiresDrop)` is returned.
query adt_async_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
desc { |tcx| "computing when `{}` needs async drop", tcx.def_path_str(def_id) }
cache_on_disk_if { true }
}
/// A list of types where the ADT requires drop if and only if any of those types
/// has significant drop. A type marked with the attribute `rustc_insignificant_dtor`
/// is considered to not be significant. A drop is significant if it is implemented

View file

@ -717,7 +717,6 @@ macro_rules! bidirectional_lang_item_map {
bidirectional_lang_item_map! {
// tidy-alphabetical-start
AsyncDestruct,
AsyncFn,
AsyncFnKindHelper,
AsyncFnKindUpvars,
@ -1715,6 +1714,10 @@ impl<'tcx> TyCtxt<'tcx> {
self.coroutine_kind(def_id).is_some()
}
pub fn is_async_drop_in_place_coroutine(self, def_id: DefId) -> bool {
self.is_lang_item(self.parent(def_id), LangItem::AsyncDropInPlace)
}
/// Returns the movability of the coroutine of `def_id`, or panics
/// if given a `def_id` that is not a coroutine.
pub fn coroutine_movability(self, def_id: DefId) -> hir::Movability {

View file

@ -147,6 +147,9 @@ pub enum InstanceKind<'tcx> {
/// native support.
ThreadLocalShim(DefId),
/// Proxy shim for async drop of future (def_id, proxy_cor_ty, impl_cor_ty)
FutureDropPollShim(DefId, Ty<'tcx>, Ty<'tcx>),
/// `core::ptr::drop_in_place::<T>`.
///
/// The `DefId` is for `core::ptr::drop_in_place`.
@ -173,7 +176,13 @@ pub enum InstanceKind<'tcx> {
///
/// The `DefId` is for `core::future::async_drop::async_drop_in_place`, the `Ty`
/// is the type `T`.
AsyncDropGlueCtorShim(DefId, Option<Ty<'tcx>>),
AsyncDropGlueCtorShim(DefId, Ty<'tcx>),
/// `core::future::async_drop::async_drop_in_place::<'_, T>::{closure}`.
///
/// async_drop_in_place poll function implementation (for generated coroutine).
/// `Ty` here is `async_drop_in_place<T>::{closure}` coroutine type, not just `T`
AsyncDropGlue(DefId, Ty<'tcx>),
}
impl<'tcx> Instance<'tcx> {
@ -221,7 +230,9 @@ impl<'tcx> Instance<'tcx> {
.upstream_monomorphizations_for(def)
.and_then(|monos| monos.get(&self.args).cloned()),
InstanceKind::DropGlue(_, Some(_)) => tcx.upstream_drop_glue_for(self.args),
InstanceKind::AsyncDropGlueCtorShim(_, Some(_)) => {
InstanceKind::AsyncDropGlue(_, _) => None,
InstanceKind::FutureDropPollShim(_, _, _) => None,
InstanceKind::AsyncDropGlueCtorShim(_, _) => {
tcx.upstream_async_drop_glue_for(self.args)
}
_ => None,
@ -248,6 +259,8 @@ impl<'tcx> InstanceKind<'tcx> {
| InstanceKind::DropGlue(def_id, _)
| InstanceKind::CloneShim(def_id, _)
| InstanceKind::FnPtrAddrShim(def_id, _)
| InstanceKind::FutureDropPollShim(def_id, _, _)
| InstanceKind::AsyncDropGlue(def_id, _)
| InstanceKind::AsyncDropGlueCtorShim(def_id, _) => def_id,
}
}
@ -257,7 +270,9 @@ impl<'tcx> InstanceKind<'tcx> {
match self {
ty::InstanceKind::Item(def) => Some(def),
ty::InstanceKind::DropGlue(def_id, Some(_))
| InstanceKind::AsyncDropGlueCtorShim(def_id, Some(_))
| InstanceKind::AsyncDropGlueCtorShim(def_id, _)
| InstanceKind::AsyncDropGlue(def_id, _)
| InstanceKind::FutureDropPollShim(def_id, ..)
| InstanceKind::ThreadLocalShim(def_id) => Some(def_id),
InstanceKind::VTableShim(..)
| InstanceKind::ReifyShim(..)
@ -267,7 +282,6 @@ impl<'tcx> InstanceKind<'tcx> {
| InstanceKind::ClosureOnceShim { .. }
| ty::InstanceKind::ConstructCoroutineInClosureShim { .. }
| InstanceKind::DropGlue(..)
| InstanceKind::AsyncDropGlueCtorShim(..)
| InstanceKind::CloneShim(..)
| InstanceKind::FnPtrAddrShim(..) => None,
}
@ -292,7 +306,9 @@ impl<'tcx> InstanceKind<'tcx> {
let def_id = match *self {
ty::InstanceKind::Item(def) => def,
ty::InstanceKind::DropGlue(_, Some(_)) => return false,
ty::InstanceKind::AsyncDropGlueCtorShim(_, Some(_)) => return false,
ty::InstanceKind::AsyncDropGlueCtorShim(_, ty) => return ty.is_coroutine(),
ty::InstanceKind::FutureDropPollShim(_, _, _) => return false,
ty::InstanceKind::AsyncDropGlue(_, _) => return false,
ty::InstanceKind::ThreadLocalShim(_) => return false,
_ => return true,
};
@ -325,11 +341,12 @@ impl<'tcx> InstanceKind<'tcx> {
| InstanceKind::FnPtrAddrShim(..)
| InstanceKind::FnPtrShim(..)
| InstanceKind::DropGlue(_, Some(_))
| InstanceKind::AsyncDropGlueCtorShim(_, Some(_)) => false,
| InstanceKind::FutureDropPollShim(..)
| InstanceKind::AsyncDropGlue(_, _) => false,
InstanceKind::AsyncDropGlueCtorShim(_, _) => false,
InstanceKind::ClosureOnceShim { .. }
| InstanceKind::ConstructCoroutineInClosureShim { .. }
| InstanceKind::DropGlue(..)
| InstanceKind::AsyncDropGlueCtorShim(..)
| InstanceKind::Item(_)
| InstanceKind::Intrinsic(..)
| InstanceKind::ReifyShim(..)
@ -406,8 +423,11 @@ pub fn fmt_instance(
InstanceKind::DropGlue(_, Some(ty)) => write!(f, " - shim(Some({ty}))"),
InstanceKind::CloneShim(_, ty) => write!(f, " - shim({ty})"),
InstanceKind::FnPtrAddrShim(_, ty) => write!(f, " - shim({ty})"),
InstanceKind::AsyncDropGlueCtorShim(_, None) => write!(f, " - shim(None)"),
InstanceKind::AsyncDropGlueCtorShim(_, Some(ty)) => write!(f, " - shim(Some({ty}))"),
InstanceKind::FutureDropPollShim(_, proxy_ty, impl_ty) => {
write!(f, " - dropshim({proxy_ty}-{impl_ty})")
}
InstanceKind::AsyncDropGlue(_, ty) => write!(f, " - shim({ty})"),
InstanceKind::AsyncDropGlueCtorShim(_, ty) => write!(f, " - shim(Some({ty}))"),
}
}
@ -425,6 +445,51 @@ impl<'tcx> fmt::Display for Instance<'tcx> {
}
}
// async_drop_in_place<T>::coroutine.poll, when T is a standart coroutine,
// should be resolved to this coroutine's future_drop_poll (through FutureDropPollShim proxy).
// async_drop_in_place<async_drop_in_place<T>::coroutine>::coroutine.poll,
// when T is a standart coroutine, should be resolved to this coroutine's future_drop_poll.
// async_drop_in_place<async_drop_in_place<T>::coroutine>::coroutine.poll,
// when T is not a coroutine, should be resolved to the innermost
// async_drop_in_place<T>::coroutine's poll function (through FutureDropPollShim proxy)
fn resolve_async_drop_poll<'tcx>(mut cor_ty: Ty<'tcx>) -> Instance<'tcx> {
let first_cor = cor_ty;
let ty::Coroutine(poll_def_id, proxy_args) = first_cor.kind() else {
bug!();
};
let poll_def_id = *poll_def_id;
let mut child_ty = cor_ty;
loop {
if let ty::Coroutine(child_def, child_args) = child_ty.kind() {
cor_ty = child_ty;
if *child_def == poll_def_id {
child_ty = child_args.first().unwrap().expect_ty();
continue;
} else {
return Instance {
def: ty::InstanceKind::FutureDropPollShim(poll_def_id, first_cor, cor_ty),
args: proxy_args,
};
}
} else {
let ty::Coroutine(_, child_args) = cor_ty.kind() else {
bug!();
};
if first_cor != cor_ty {
return Instance {
def: ty::InstanceKind::FutureDropPollShim(poll_def_id, first_cor, cor_ty),
args: proxy_args,
};
} else {
return Instance {
def: ty::InstanceKind::AsyncDropGlue(poll_def_id, cor_ty),
args: child_args,
};
}
}
}
}
impl<'tcx> Instance<'tcx> {
pub fn new(def_id: DefId, args: GenericArgsRef<'tcx>) -> Instance<'tcx> {
assert!(
@ -736,6 +801,15 @@ impl<'tcx> Instance<'tcx> {
)
}
pub fn resolve_async_drop_in_place_poll(
tcx: TyCtxt<'tcx>,
def_id: DefId,
ty: Ty<'tcx>,
) -> ty::Instance<'tcx> {
let args = tcx.mk_args(&[ty.into()]);
Instance::expect_resolve(tcx, ty::TypingEnv::fully_monomorphized(), def_id, args, DUMMY_SP)
}
#[instrument(level = "debug", skip(tcx), ret)]
pub fn fn_once_adapter_instance(
tcx: TyCtxt<'tcx>,
@ -800,6 +874,9 @@ impl<'tcx> Instance<'tcx> {
};
if tcx.is_lang_item(trait_item_id, coroutine_callable_item) {
if tcx.is_async_drop_in_place_coroutine(coroutine_def_id) {
return Some(resolve_async_drop_poll(rcvr_args.type_at(0)));
}
let ty::Coroutine(_, id_args) = *tcx.type_of(coroutine_def_id).skip_binder().kind()
else {
bug!()

View file

@ -17,7 +17,7 @@ use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::num::NonZero;
use std::ptr::NonNull;
use std::{fmt, str};
use std::{fmt, iter, str};
pub use adt::*;
pub use assoc::*;
@ -39,6 +39,7 @@ use rustc_hir::LangItem;
use rustc_hir::def::{CtorKind, CtorOf, DefKind, DocLinkResMap, LifetimeRes, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LocalDefIdMap};
use rustc_index::IndexVec;
use rustc_index::bit_set::BitMatrix;
use rustc_macros::{
Decodable, Encodable, HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable,
extension,
@ -103,7 +104,7 @@ pub use self::visit::*;
use crate::error::{OpaqueHiddenTypeMismatch, TypeMismatchReason};
use crate::metadata::ModChild;
use crate::middle::privacy::EffectiveVisibilities;
use crate::mir::{Body, CoroutineLayout};
use crate::mir::{Body, CoroutineLayout, CoroutineSavedLocal, SourceInfo};
use crate::query::{IntoQueryParam, Providers};
use crate::ty;
use crate::ty::codec::{TyDecoder, TyEncoder};
@ -1739,11 +1740,13 @@ impl<'tcx> TyCtxt<'tcx> {
| ty::InstanceKind::Virtual(..)
| ty::InstanceKind::ClosureOnceShim { .. }
| ty::InstanceKind::ConstructCoroutineInClosureShim { .. }
| ty::InstanceKind::FutureDropPollShim(..)
| ty::InstanceKind::DropGlue(..)
| ty::InstanceKind::CloneShim(..)
| ty::InstanceKind::ThreadLocalShim(..)
| ty::InstanceKind::FnPtrAddrShim(..)
| ty::InstanceKind::AsyncDropGlueCtorShim(..) => self.mir_shims(instance),
| ty::InstanceKind::AsyncDropGlueCtorShim(..)
| ty::InstanceKind::AsyncDropGlue(..) => self.mir_shims(instance),
}
}
@ -1859,12 +1862,12 @@ impl<'tcx> TyCtxt<'tcx> {
self.def_kind(trait_def_id) == DefKind::TraitAlias
}
/// Returns layout of a coroutine. Layout might be unavailable if the
/// Returns layout of a non-async-drop coroutine. Layout might be unavailable if the
/// coroutine is tainted by errors.
///
/// Takes `coroutine_kind` which can be acquired from the `CoroutineArgs::kind_ty`,
/// e.g. `args.as_coroutine().kind_ty()`.
pub fn coroutine_layout(
fn ordinary_coroutine_layout(
self,
def_id: DefId,
coroutine_kind_ty: Ty<'tcx>,
@ -1898,6 +1901,51 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
/// Returns layout of a `async_drop_in_place::{closure}` coroutine
/// (returned from `async fn async_drop_in_place<T>(..)`).
/// Layout might be unavailable if the coroutine is tainted by errors.
fn async_drop_coroutine_layout(
self,
def_id: DefId,
args: GenericArgsRef<'tcx>,
) -> Option<&'tcx CoroutineLayout<'tcx>> {
let instance = InstanceKind::AsyncDropGlue(def_id, Ty::new_coroutine(self, def_id, args));
self.mir_shims(instance).coroutine_layout_raw()
}
/// Returns layout of a coroutine. Layout might be unavailable if the
/// coroutine is tainted by errors.
pub fn coroutine_layout(
self,
def_id: DefId,
args: GenericArgsRef<'tcx>,
) -> Option<&'tcx CoroutineLayout<'tcx>> {
if self.is_async_drop_in_place_coroutine(def_id) {
// layout of `async_drop_in_place<T>::{closure}` in case,
// when T is a coroutine, contains this internal coroutine's ptr in upvars
// and doesn't require any locals. Here is an `empty coroutine's layout`
let arg_cor_ty = args.first().unwrap().expect_ty();
if arg_cor_ty.is_coroutine() {
let span = self.def_span(def_id);
let source_info = SourceInfo::outermost(span);
let variant_fields: IndexVec<VariantIdx, IndexVec<FieldIdx, CoroutineSavedLocal>> =
iter::repeat(IndexVec::new()).take(CoroutineArgs::RESERVED_VARIANTS).collect();
let proxy_layout = CoroutineLayout {
field_tys: [].into(),
field_names: [].into(),
variant_fields,
variant_source_info: [source_info].into(),
storage_conflicts: BitMatrix::new(0, 0),
};
return Some(self.arena.alloc(proxy_layout));
} else {
self.async_drop_coroutine_layout(def_id, args)
}
} else {
self.ordinary_coroutine_layout(def_id, args.as_coroutine().kind_ty())
}
}
/// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
/// If it implements no trait, returns `None`.
pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {

View file

@ -4,7 +4,6 @@
use std::assert_matches::debug_assert_matches;
use std::borrow::Cow;
use std::iter;
use std::ops::{ControlFlow, Range};
use hir::def::{CtorKind, DefKind};
@ -19,7 +18,7 @@ use rustc_type_ir::TyKind::*;
use rustc_type_ir::walk::TypeWalker;
use rustc_type_ir::{self as ir, BoundVar, CollectAndApply, DynKind, TypeVisitableExt, elaborate};
use tracing::instrument;
use ty::util::{AsyncDropGlueMorphology, IntTypeExt};
use ty::util::IntTypeExt;
use super::GenericParamDefKind;
use crate::infer::canonical::Canonical;
@ -78,8 +77,7 @@ impl<'tcx> ty::CoroutineArgs<TyCtxt<'tcx>> {
#[inline]
fn variant_range(&self, def_id: DefId, tcx: TyCtxt<'tcx>) -> Range<VariantIdx> {
// FIXME requires optimized MIR
FIRST_VARIANT
..tcx.coroutine_layout(def_id, tcx.types.unit).unwrap().variant_fields.next_index()
FIRST_VARIANT..tcx.coroutine_layout(def_id, self.args).unwrap().variant_fields.next_index()
}
/// The discriminant for the given variant. Panics if the `variant_index` is
@ -139,10 +137,14 @@ impl<'tcx> ty::CoroutineArgs<TyCtxt<'tcx>> {
def_id: DefId,
tcx: TyCtxt<'tcx>,
) -> impl Iterator<Item: Iterator<Item = Ty<'tcx>>> {
let layout = tcx.coroutine_layout(def_id, self.kind_ty()).unwrap();
let layout = tcx.coroutine_layout(def_id, self.args).unwrap();
layout.variant_fields.iter().map(move |variant| {
variant.iter().map(move |field| {
ty::EarlyBinder::bind(layout.field_tys[*field].ty).instantiate(tcx, self.args)
if tcx.is_async_drop_in_place_coroutine(def_id) {
layout.field_tys[*field].ty
} else {
ty::EarlyBinder::bind(layout.field_tys[*field].ty).instantiate(tcx, self.args)
}
})
})
}
@ -1046,10 +1048,6 @@ impl<'tcx> rustc_type_ir::inherent::Ty<TyCtxt<'tcx>> for Ty<'tcx> {
self.discriminant_ty(interner)
}
fn async_destructor_ty(self, interner: TyCtxt<'tcx>) -> Ty<'tcx> {
self.async_destructor_ty(interner)
}
fn has_unsafe_fields(self) -> bool {
Ty::has_unsafe_fields(self)
}
@ -1419,6 +1417,34 @@ impl<'tcx> Ty<'tcx> {
cf.is_break()
}
/// Returns the deepest `async_drop_in_place::{closure}` implementation.
///
/// `async_drop_in_place<T>::{closure}`, when T is a coroutine, is a proxy-impl
/// to call async drop poll from impl coroutine.
pub fn find_async_drop_impl_coroutine<F: FnMut(Ty<'tcx>)>(
self,
tcx: TyCtxt<'tcx>,
mut f: F,
) -> Ty<'tcx> {
assert!(self.is_coroutine());
let mut cor_ty = self;
let mut ty = cor_ty;
loop {
if let ty::Coroutine(def_id, args) = ty.kind() {
cor_ty = ty;
f(ty);
if tcx.is_async_drop_in_place_coroutine(*def_id) {
ty = args.first().unwrap().expect_ty();
continue;
} else {
return cor_ty;
}
} else {
return cor_ty;
}
}
}
/// Returns the type and mutability of `*ty`.
///
/// The parameter `explicit` indicates if this is an *explicit* dereference.
@ -1560,125 +1586,6 @@ impl<'tcx> Ty<'tcx> {
}
}
/// Returns the type of the async destructor of this type.
pub fn async_destructor_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match self.async_drop_glue_morphology(tcx) {
AsyncDropGlueMorphology::Noop => {
return Ty::async_destructor_combinator(tcx, LangItem::AsyncDropNoop)
.instantiate_identity();
}
AsyncDropGlueMorphology::DeferredDropInPlace => {
let drop_in_place =
Ty::async_destructor_combinator(tcx, LangItem::AsyncDropDeferredDropInPlace)
.instantiate(tcx, &[self.into()]);
return Ty::async_destructor_combinator(tcx, LangItem::AsyncDropFuse)
.instantiate(tcx, &[drop_in_place.into()]);
}
AsyncDropGlueMorphology::Custom => (),
}
match *self.kind() {
ty::Param(_) | ty::Alias(..) | ty::Infer(ty::TyVar(_)) => {
let assoc_items = tcx
.associated_item_def_ids(tcx.require_lang_item(LangItem::AsyncDestruct, None));
Ty::new_projection(tcx, assoc_items[0], [self])
}
ty::Array(elem_ty, _) | ty::Slice(elem_ty) => {
let dtor = Ty::async_destructor_combinator(tcx, LangItem::AsyncDropSlice)
.instantiate(tcx, &[elem_ty.into()]);
Ty::async_destructor_combinator(tcx, LangItem::AsyncDropFuse)
.instantiate(tcx, &[dtor.into()])
}
ty::Adt(adt_def, args) if adt_def.is_enum() || adt_def.is_struct() => self
.adt_async_destructor_ty(
tcx,
adt_def.variants().iter().map(|v| v.fields.iter().map(|f| f.ty(tcx, args))),
),
ty::Tuple(tys) => self.adt_async_destructor_ty(tcx, iter::once(tys)),
ty::Closure(_, args) => {
self.adt_async_destructor_ty(tcx, iter::once(args.as_closure().upvar_tys()))
}
ty::CoroutineClosure(_, args) => self
.adt_async_destructor_ty(tcx, iter::once(args.as_coroutine_closure().upvar_tys())),
ty::Adt(adt_def, _) => {
assert!(adt_def.is_union());
let surface_drop = self.surface_async_dropper_ty(tcx).unwrap();
Ty::async_destructor_combinator(tcx, LangItem::AsyncDropFuse)
.instantiate(tcx, &[surface_drop.into()])
}
ty::Bound(..)
| ty::Foreign(_)
| ty::Placeholder(_)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("`async_destructor_ty` applied to unexpected type: {self:?}")
}
_ => bug!("`async_destructor_ty` is not yet implemented for type: {self:?}"),
}
}
fn adt_async_destructor_ty<I>(self, tcx: TyCtxt<'tcx>, variants: I) -> Ty<'tcx>
where
I: Iterator + ExactSizeIterator,
I::Item: IntoIterator<Item = Ty<'tcx>>,
{
debug_assert_eq!(self.async_drop_glue_morphology(tcx), AsyncDropGlueMorphology::Custom);
let defer = Ty::async_destructor_combinator(tcx, LangItem::AsyncDropDefer);
let chain = Ty::async_destructor_combinator(tcx, LangItem::AsyncDropChain);
let noop =
Ty::async_destructor_combinator(tcx, LangItem::AsyncDropNoop).instantiate_identity();
let either = Ty::async_destructor_combinator(tcx, LangItem::AsyncDropEither);
let variants_dtor = variants
.into_iter()
.map(|variant| {
variant
.into_iter()
.map(|ty| defer.instantiate(tcx, &[ty.into()]))
.reduce(|acc, next| chain.instantiate(tcx, &[acc.into(), next.into()]))
.unwrap_or(noop)
})
.reduce(|other, matched| {
either.instantiate(tcx, &[other.into(), matched.into(), self.into()])
})
.unwrap();
let dtor = if let Some(dropper_ty) = self.surface_async_dropper_ty(tcx) {
Ty::async_destructor_combinator(tcx, LangItem::AsyncDropChain)
.instantiate(tcx, &[dropper_ty.into(), variants_dtor.into()])
} else {
variants_dtor
};
Ty::async_destructor_combinator(tcx, LangItem::AsyncDropFuse)
.instantiate(tcx, &[dtor.into()])
}
fn surface_async_dropper_ty(self, tcx: TyCtxt<'tcx>) -> Option<Ty<'tcx>> {
let adt_def = self.ty_adt_def()?;
let dropper = adt_def
.async_destructor(tcx)
.map(|_| LangItem::SurfaceAsyncDropInPlace)
.or_else(|| adt_def.destructor(tcx).map(|_| LangItem::AsyncDropSurfaceDropInPlace))?;
Some(Ty::async_destructor_combinator(tcx, dropper).instantiate(tcx, &[self.into()]))
}
fn async_destructor_combinator(
tcx: TyCtxt<'tcx>,
lang_item: LangItem,
) -> ty::EarlyBinder<'tcx, Ty<'tcx>> {
tcx.fn_sig(tcx.require_lang_item(lang_item, None))
.map_bound(|fn_sig| fn_sig.output().no_bound_vars().unwrap())
}
/// Returns the type of metadata for (potentially wide) pointers to this type,
/// or the struct tail if the metadata type cannot be determined.
pub fn ptr_metadata_ty_or_tail(
@ -2020,6 +1927,13 @@ impl<'tcx> Ty<'tcx> {
}
}
pub fn is_async_drop_in_place_coroutine(self, tcx: TyCtxt<'_>) -> bool {
match self.kind() {
ty::Coroutine(def, ..) => tcx.is_async_drop_in_place_coroutine(*def),
_ => false,
}
}
/// Returns `true` when the outermost type cannot be further normalized,
/// resolved, or instantiated. This includes all primitive types, but also
/// things like ADTs and trait objects, since even if their arguments or

View file

@ -468,25 +468,6 @@ impl<'tcx> TyCtxt<'tcx> {
Some(ty::AsyncDestructor { impl_did: dtor_candidate? })
}
/// Returns async drop glue morphology for a definition. To get async drop
/// glue morphology for a type see [`Ty::async_drop_glue_morphology`].
//
// FIXME: consider making this a query
pub fn async_drop_glue_morphology(self, did: DefId) -> AsyncDropGlueMorphology {
let ty: Ty<'tcx> = self.type_of(did).instantiate_identity();
// Async drop glue morphology is an internal detail, so
// using `TypingMode::PostAnalysis` probably should be fine.
let typing_env = ty::TypingEnv::fully_monomorphized();
if ty.needs_async_drop(self, typing_env) {
AsyncDropGlueMorphology::Custom
} else if ty.needs_drop(self, typing_env) {
AsyncDropGlueMorphology::DeferredDropInPlace
} else {
AsyncDropGlueMorphology::Noop
}
}
/// Returns the set of types that are required to be alive in
/// order to run the destructor of `def` (see RFCs 769 and
/// 1238).
@ -1114,18 +1095,6 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for WeakAliasTypeExpander<'tcx> {
}
}
/// Indicates the form of `AsyncDestruct::Destructor`. Used to simplify async
/// drop glue for types not using async drop.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum AsyncDropGlueMorphology {
/// Async destructor simply does nothing
Noop,
/// Async destructor simply runs `drop_in_place`
DeferredDropInPlace,
/// Async destructor has custom logic
Custom,
}
impl<'tcx> Ty<'tcx> {
/// Returns the `Size` for primitive types (bool, uint, int, char, float).
pub fn primitive_size(self, tcx: TyCtxt<'tcx>) -> Size {
@ -1295,16 +1264,17 @@ impl<'tcx> Ty<'tcx> {
}
}
/// Get morphology of the async drop glue, needed for types which do not
/// use async drop. To get async drop glue morphology for a definition see
/// [`TyCtxt::async_drop_glue_morphology`]. Used for `AsyncDestruct::Destructor`
/// type construction.
//
// FIXME: implement optimization to not instantiate a certain morphology of
// async drop glue too soon to allow per type optimizations, see array case
// for more info. Perhaps then remove this method and use `needs_(async_)drop`
// instead.
pub fn async_drop_glue_morphology(self, tcx: TyCtxt<'tcx>) -> AsyncDropGlueMorphology {
/// Checks whether values of this type `T` implement the `AsyncDrop` trait.
pub fn is_async_drop(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool {
!self.is_trivially_not_async_drop()
&& tcx.is_async_drop_raw(typing_env.as_query_input(self))
}
/// Fast path helper for testing if a type is `AsyncDrop`.
///
/// Returning true means the type is known to be `!AsyncDrop`. Returning
/// `false` means nothing -- could be `AsyncDrop`, might not be.
fn is_trivially_not_async_drop(self) -> bool {
match self.kind() {
ty::Int(_)
| ty::Uint(_)
@ -1316,46 +1286,26 @@ impl<'tcx> Ty<'tcx> {
| ty::Ref(..)
| ty::RawPtr(..)
| ty::FnDef(..)
| ty::FnPtr(..)
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => AsyncDropGlueMorphology::Noop,
| ty::Error(_)
| ty::FnPtr(..) => true,
// FIXME(unsafe_binders):
ty::UnsafeBinder(_) => todo!(),
ty::Tuple(tys) if tys.is_empty() => AsyncDropGlueMorphology::Noop,
ty::Adt(adt_def, _) if adt_def.is_manually_drop() => AsyncDropGlueMorphology::Noop,
// Foreign types can never have destructors.
ty::Foreign(_) => AsyncDropGlueMorphology::Noop,
// FIXME: implement dynamic types async drops
ty::Error(_) | ty::Dynamic(..) => AsyncDropGlueMorphology::DeferredDropInPlace,
ty::Tuple(_) | ty::Array(_, _) | ty::Slice(_) => {
// Assume worst-case scenario, because we can instantiate async
// destructors in different orders:
//
// 1. Instantiate [T; N] with T = String and N = 0
// 2. Instantiate <[String; 0] as AsyncDestruct>::Destructor
//
// And viceversa, thus we cannot rely on String not using async
// drop or array having zero (0) elements
AsyncDropGlueMorphology::Custom
}
ty::Pat(ty, _) => ty.async_drop_glue_morphology(tcx),
ty::Adt(adt_def, _) => tcx.async_drop_glue_morphology(adt_def.did()),
ty::Closure(did, _)
| ty::CoroutineClosure(did, _)
| ty::Coroutine(did, _)
| ty::CoroutineWitness(did, _) => tcx.async_drop_glue_morphology(*did),
ty::Alias(..) | ty::Param(_) | ty::Bound(..) | ty::Placeholder(..) | ty::Infer(_) => {
// No specifics, but would usually mean forwarding async drop glue
AsyncDropGlueMorphology::Custom
ty::Tuple(fields) => fields.iter().all(Self::is_trivially_not_async_drop),
ty::Pat(elem_ty, _) | ty::Slice(elem_ty) | ty::Array(elem_ty, _) => {
elem_ty.is_trivially_not_async_drop()
}
ty::Adt(..)
| ty::Bound(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Dynamic(..)
| ty::Foreign(_)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Infer(_)
| ty::Alias(..)
| ty::Param(_)
| ty::Placeholder(_) => false,
}
}
@ -1401,9 +1351,6 @@ impl<'tcx> Ty<'tcx> {
/// (Note that this implies that if `ty` has an async destructor attached,
/// then `needs_async_drop` will definitely return `true` for `ty`.)
///
/// When constructing `AsyncDestruct::Destructor` type, use
/// [`Ty::async_drop_glue_morphology`] instead.
//
// FIXME(zetanumbers): Note that this method is used to check eligible types
// in unions.
#[inline]

View file

@ -69,6 +69,8 @@ impl<'a, 'tcx> ParseCtxt<'a, 'tcx> {
target: self.parse_return_to(args[1])?,
unwind: self.parse_unwind_action(args[2])?,
replace: false,
drop: None,
async_fut: None,
})
},
@call(mir_call, args) => {

View file

@ -762,6 +762,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
target: success,
unwind: UnwindAction::Continue,
replace: false,
drop: None,
async_fut: None,
},
);
this.diverge_from(block);

View file

@ -89,6 +89,7 @@ use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::middle::region;
use rustc_middle::mir::*;
use rustc_middle::thir::{ExprId, LintLevel};
use rustc_middle::ty::{self, TyCtxt};
use rustc_middle::{bug, span_bug};
use rustc_session::lint::Level;
use rustc_span::source_map::Spanned;
@ -405,6 +406,8 @@ impl DropTree {
unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
place: drop_node.data.local.into(),
replace: false,
drop: None,
async_fut: None,
};
cfg.terminate(block, drop_node.data.source_info, terminator);
}
@ -848,6 +851,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
target: next,
unwind: UnwindAction::Continue,
replace: false,
drop: None,
async_fut: None,
},
);
block = next;
@ -879,6 +884,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block.unit()
}
fn is_async_drop_impl(
tcx: TyCtxt<'tcx>,
local_decls: &IndexVec<Local, LocalDecl<'tcx>>,
typing_env: ty::TypingEnv<'tcx>,
local: Local,
) -> bool {
let ty = local_decls[local].ty;
if ty.is_async_drop(tcx, typing_env) || ty.is_coroutine() {
return true;
}
ty.needs_async_drop(tcx, typing_env)
}
fn is_async_drop(&self, local: Local) -> bool {
Self::is_async_drop_impl(self.tcx, &self.local_decls, self.typing_env(), local)
}
fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
// If we are emitting a `drop` statement, we need to have the cached
// diverge cleanup pads ready in case that drop panics.
@ -887,14 +908,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };
let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
let has_async_drops = is_coroutine
&& scope.drops.iter().any(|v| v.kind == DropKind::Value && self.is_async_drop(v.local));
let dropline_to = if has_async_drops { Some(self.diverge_dropline()) } else { None };
let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
let typing_env = self.typing_env();
build_scope_drops(
&mut self.cfg,
&mut self.scopes.unwind_drops,
&mut self.scopes.coroutine_drops,
scope,
block,
unwind_to,
dropline_to,
is_coroutine && needs_cleanup,
self.arg_count,
|v: Local| Self::is_async_drop_impl(self.tcx, &self.local_decls, typing_env, v),
)
.into_block()
}
@ -1310,6 +1339,44 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.scopes.unwind_drops.add_entry_point(start, next_drop);
}
/// Returns the [DropIdx] for the innermost drop for dropline (coroutine drop path).
/// The `DropIdx` will be created if it doesn't already exist.
fn diverge_dropline(&mut self) -> DropIdx {
// It is okay to use dummy span because the getting scope index on the topmost scope
// must always succeed.
self.diverge_dropline_target(self.scopes.topmost(), DUMMY_SP)
}
/// Similar to diverge_cleanup_target, but for dropline (coroutine drop path)
fn diverge_dropline_target(&mut self, target_scope: region::Scope, span: Span) -> DropIdx {
debug_assert!(
self.coroutine.is_some(),
"diverge_dropline_target is valid only for coroutine"
);
let target = self.scopes.scope_index(target_scope, span);
let (uncached_scope, mut cached_drop) = self.scopes.scopes[..=target]
.iter()
.enumerate()
.rev()
.find_map(|(scope_idx, scope)| {
scope.cached_coroutine_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
})
.unwrap_or((0, ROOT_NODE));
if uncached_scope > target {
return cached_drop;
}
for scope in &mut self.scopes.scopes[uncached_scope..=target] {
for drop in &scope.drops {
cached_drop = self.scopes.coroutine_drops.add_drop(*drop, cached_drop);
}
scope.cached_coroutine_drop_block = Some(cached_drop);
}
cached_drop
}
/// Sets up a path that performs all required cleanup for dropping a
/// coroutine, starting from the given block that ends in
/// [TerminatorKind::Yield].
@ -1323,24 +1390,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
),
"coroutine_drop_cleanup called on block with non-yield terminator."
);
let (uncached_scope, mut cached_drop) = self
.scopes
.scopes
.iter()
.enumerate()
.rev()
.find_map(|(scope_idx, scope)| {
scope.cached_coroutine_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
})
.unwrap_or((0, ROOT_NODE));
for scope in &mut self.scopes.scopes[uncached_scope..] {
for drop in &scope.drops {
cached_drop = self.scopes.coroutine_drops.add_drop(*drop, cached_drop);
}
scope.cached_coroutine_drop_block = Some(cached_drop);
}
let cached_drop = self.diverge_dropline();
self.scopes.coroutine_drops.add_entry_point(yield_block, cached_drop);
}
@ -1371,6 +1421,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
target: assign,
unwind: UnwindAction::Cleanup(assign_unwind),
replace: true,
drop: None,
async_fut: None,
},
);
self.diverge_from(block);
@ -1432,18 +1484,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// * `unwind_to`, describes the drops that would occur at this point in the code if a
/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
/// instructions on unwinding)
/// * `dropline_to`, describes the drops that would occur at this point in the code if a
/// coroutine drop occured.
/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
fn build_scope_drops<'tcx>(
fn build_scope_drops<'tcx, F>(
cfg: &mut CFG<'tcx>,
unwind_drops: &mut DropTree,
coroutine_drops: &mut DropTree,
scope: &Scope,
block: BasicBlock,
unwind_to: DropIdx,
dropline_to: Option<DropIdx>,
storage_dead_on_unwind: bool,
arg_count: usize,
) -> BlockAnd<()> {
debug!("build_scope_drops({:?} -> {:?})", block, scope);
is_async_drop: F,
) -> BlockAnd<()>
where
F: Fn(Local) -> bool,
{
debug!("build_scope_drops({:?} -> {:?}), dropline_to={:?}", block, scope, dropline_to);
// Build up the drops in evaluation order. The end result will
// look like:
@ -1476,6 +1536,9 @@ fn build_scope_drops<'tcx>(
// will branch to `drops[n]`.
let mut block = block;
// `dropline_to` indicates what needs to be dropped should coroutine drop occur.
let mut dropline_to = dropline_to;
for drop_data in scope.drops.iter().rev() {
let source_info = drop_data.source_info;
let local = drop_data.local;
@ -1492,6 +1555,12 @@ fn build_scope_drops<'tcx>(
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].next;
if let Some(idx) = dropline_to {
debug_assert_eq!(coroutine_drops.drops[idx].data.local, drop_data.local);
debug_assert_eq!(coroutine_drops.drops[idx].data.kind, drop_data.kind);
dropline_to = Some(coroutine_drops.drops[idx].next);
}
// If the operand has been moved, and we are not on an unwind
// path, then don't generate the drop. (We only take this into
// account for non-unwind paths so as not to disturb the
@ -1501,6 +1570,12 @@ fn build_scope_drops<'tcx>(
}
unwind_drops.add_entry_point(block, unwind_to);
if let Some(to) = dropline_to
&& is_async_drop(local)
{
coroutine_drops.add_entry_point(block, to);
}
let next = cfg.start_new_block();
cfg.terminate(
block,
@ -1510,6 +1585,8 @@ fn build_scope_drops<'tcx>(
target: next,
unwind: UnwindAction::Continue,
replace: false,
drop: None,
async_fut: None,
},
);
block = next;
@ -1556,6 +1633,11 @@ fn build_scope_drops<'tcx>(
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].next;
}
if let Some(idx) = dropline_to {
debug_assert_eq!(coroutine_drops.drops[idx].data.local, drop_data.local);
debug_assert_eq!(coroutine_drops.drops[idx].data.kind, drop_data.kind);
dropline_to = Some(coroutine_drops.drops[idx].next);
}
// Only temps and vars need their storage dead.
assert!(local.index() > arg_count);
cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) });
@ -1611,6 +1693,39 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
}
}
}
// Link the exit drop tree to dropline drop tree (coroutine drop path) for async drops
if is_coroutine
&& drops.drops.iter().any(|DropNode { data, next: _ }| {
data.kind == DropKind::Value && self.is_async_drop(data.local)
})
{
let dropline_target = self.diverge_dropline_target(else_scope, span);
let mut dropline_indices = IndexVec::from_elem_n(dropline_target, 1);
for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
match drop_data.data.kind {
DropKind::Storage | DropKind::ForLint => {
let coroutine_drop = self
.scopes
.coroutine_drops
.add_drop(drop_data.data, dropline_indices[drop_data.next]);
dropline_indices.push(coroutine_drop);
}
DropKind::Value => {
let coroutine_drop = self
.scopes
.coroutine_drops
.add_drop(drop_data.data, dropline_indices[drop_data.next]);
if self.is_async_drop(drop_data.data.local) {
self.scopes.coroutine_drops.add_entry_point(
blocks[drop_idx].unwrap(),
dropline_indices[drop_data.next],
);
}
dropline_indices.push(coroutine_drop);
}
}
}
}
blocks[ROOT_NODE].map(BasicBlock::unit)
}
@ -1655,9 +1770,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
// to be captured by the coroutine. I'm not sure how important this
// optimization is, but it is here.
for (drop_idx, drop_node) in drops.drops.iter_enumerated() {
if let DropKind::Value = drop_node.data.kind {
if let DropKind::Value = drop_node.data.kind
&& let Some(bb) = blocks[drop_idx]
{
debug_assert!(drop_node.next < drops.drops.next_index());
drops.entry_points.push((drop_node.next, blocks[drop_idx].unwrap()));
drops.entry_points.push((drop_node.next, bb));
}
}
Self::build_unwind_tree(cfg, drops, fn_span, resume_block);
@ -1709,6 +1826,8 @@ impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop {
let term = cfg.block_data_mut(from).terminator_mut();
if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
*drop = Some(to);
} else if let TerminatorKind::Drop { ref mut drop, .. } = term.kind {
*drop = Some(to);
} else {
span_bug!(
term.source_info.span,

View file

@ -376,7 +376,14 @@ impl<'tcx> Analysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
// the result of `is_unwind_dead`.
let mut edges = terminator.edges();
if self.skip_unreachable_unwind
&& let mir::TerminatorKind::Drop { target, unwind, place, replace: _ } = terminator.kind
&& let mir::TerminatorKind::Drop {
target,
unwind,
place,
replace: _,
drop: _,
async_fut: _,
} = terminator.kind
&& matches!(unwind, mir::UnwindAction::Cleanup(_))
&& self.is_unwind_dead(place, state)
{

View file

@ -405,6 +405,9 @@ impl<'tcx> Map<'tcx> {
if exclude.contains(local) {
continue;
}
if decl.ty.is_async_drop_in_place_coroutine(tcx) {
continue;
}
// Create a place for the local.
debug_assert!(self.locals[local].is_none());

View file

@ -83,7 +83,9 @@ fn add_move_for_packed_drop<'tcx>(
is_cleanup: bool,
) {
debug!("add_move_for_packed_drop({:?} @ {:?})", terminator, loc);
let TerminatorKind::Drop { ref place, target, unwind, replace } = terminator.kind else {
let TerminatorKind::Drop { ref place, target, unwind, replace, drop, async_fut } =
terminator.kind
else {
unreachable!();
};
@ -106,6 +108,8 @@ fn add_move_for_packed_drop<'tcx>(
target: storage_dead_block,
unwind,
replace,
drop,
async_fut,
},
);
}

View file

@ -51,9 +51,15 @@
//! Otherwise it drops all the values in scope at the last suspension point.
mod by_move_body;
mod drop;
use std::{iter, ops};
pub(super) use by_move_body::coroutine_by_move_body_def_id;
use drop::{
cleanup_async_drops, create_coroutine_drop_shim, create_coroutine_drop_shim_async,
create_coroutine_drop_shim_proxy_async, elaborate_coroutine_drops, expand_async_drops,
has_expandable_async_drops, insert_clean_drop,
};
use rustc_abi::{FieldIdx, VariantIdx};
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::pluralize;
@ -64,6 +70,7 @@ use rustc_index::bit_set::{BitMatrix, DenseBitSet, GrowableBitSet};
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::util::Discr;
use rustc_middle::ty::{
self, CoroutineArgs, CoroutineArgsExt, GenericArgsRef, InstanceKind, Ty, TyCtxt, TypingMode,
};
@ -74,7 +81,9 @@ use rustc_mir_dataflow::impls::{
};
use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor};
use rustc_span::def_id::{DefId, LocalDefId};
use rustc_span::{Span, sym};
use rustc_span::source_map::dummy_spanned;
use rustc_span::symbol::sym;
use rustc_span::{DUMMY_SP, Span};
use rustc_target::spec::PanicStrategy;
use rustc_trait_selection::error_reporting::InferCtxtErrorExt;
use rustc_trait_selection::infer::TyCtxtInferExt as _;
@ -159,6 +168,7 @@ fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtx
}
const SELF_ARG: Local = Local::from_u32(1);
const CTX_ARG: Local = Local::from_u32(2);
/// A `yield` point in the coroutine.
struct SuspensionPoint<'tcx> {
@ -539,11 +549,11 @@ fn replace_local<'tcx>(
/// The async lowering step and the type / lifetime inference / checking are
/// still using the `ResumeTy` indirection for the time being, and that indirection
/// is removed here. After this transform, the coroutine body only knows about `&mut Context<'_>`.
fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> Ty<'tcx> {
let context_mut_ref = Ty::new_task_context(tcx);
// replace the type of the `resume` argument
replace_resume_ty_local(tcx, body, Local::new(2), context_mut_ref);
replace_resume_ty_local(tcx, body, CTX_ARG, context_mut_ref);
let get_context_def_id = tcx.require_lang_item(LangItem::GetContext, None);
@ -569,6 +579,7 @@ fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
_ => {}
}
}
context_mut_ref
}
fn eliminate_get_context_call<'tcx>(bb_data: &mut BasicBlockData<'tcx>) -> Local {
@ -1036,9 +1047,8 @@ fn insert_switch<'tcx>(
body: &mut Body<'tcx>,
cases: Vec<(usize, BasicBlock)>,
transform: &TransformVisitor<'tcx>,
default: TerminatorKind<'tcx>,
default_block: BasicBlock,
) {
let default_block = insert_term_block(body, default);
let (assign, discr) = transform.get_discr(body);
let switch_targets =
SwitchTargets::new(cases.iter().map(|(i, bb)| ((*i) as u128, *bb)), default_block);
@ -1061,124 +1071,6 @@ fn insert_switch<'tcx>(
}
}
fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
use crate::elaborate_drop::{Unwind, elaborate_drop};
use crate::patch::MirPatch;
use crate::shim::DropShimElaborator;
// Note that `elaborate_drops` only drops the upvars of a coroutine, and
// this is ok because `open_drop` can only be reached within that own
// coroutine's resume function.
let typing_env = body.typing_env(tcx);
let mut elaborator = DropShimElaborator { body, patch: MirPatch::new(body), tcx, typing_env };
for (block, block_data) in body.basic_blocks.iter_enumerated() {
let (target, unwind, source_info) = match block_data.terminator() {
Terminator {
source_info,
kind: TerminatorKind::Drop { place, target, unwind, replace: _ },
} => {
if let Some(local) = place.as_local()
&& local == SELF_ARG
{
(target, unwind, source_info)
} else {
continue;
}
}
_ => continue,
};
let unwind = if block_data.is_cleanup {
Unwind::InCleanup
} else {
Unwind::To(match *unwind {
UnwindAction::Cleanup(tgt) => tgt,
UnwindAction::Continue => elaborator.patch.resume_block(),
UnwindAction::Unreachable => elaborator.patch.unreachable_cleanup_block(),
UnwindAction::Terminate(reason) => elaborator.patch.terminate_block(reason),
})
};
elaborate_drop(
&mut elaborator,
*source_info,
Place::from(SELF_ARG),
(),
*target,
unwind,
block,
);
}
elaborator.patch.apply(body);
}
fn create_coroutine_drop_shim<'tcx>(
tcx: TyCtxt<'tcx>,
transform: &TransformVisitor<'tcx>,
coroutine_ty: Ty<'tcx>,
body: &Body<'tcx>,
drop_clean: BasicBlock,
) -> Body<'tcx> {
let mut body = body.clone();
// Take the coroutine info out of the body, since the drop shim is
// not a coroutine body itself; it just has its drop built out of it.
let _ = body.coroutine.take();
// Make sure the resume argument is not included here, since we're
// building a body for `drop_in_place`.
body.arg_count = 1;
let source_info = SourceInfo::outermost(body.span);
let mut cases = create_cases(&mut body, transform, Operation::Drop);
cases.insert(0, (CoroutineArgs::UNRESUMED, drop_clean));
// The returned state and the poisoned state fall through to the default
// case which is just to return
insert_switch(&mut body, cases, transform, TerminatorKind::Return);
for block in body.basic_blocks_mut() {
let kind = &mut block.terminator_mut().kind;
if let TerminatorKind::CoroutineDrop = *kind {
*kind = TerminatorKind::Return;
}
}
// Replace the return variable
body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(tcx.types.unit, source_info);
make_coroutine_state_argument_indirect(tcx, &mut body);
// Change the coroutine argument from &mut to *mut
body.local_decls[SELF_ARG] =
LocalDecl::with_source_info(Ty::new_mut_ptr(tcx, coroutine_ty), source_info);
// Make sure we remove dead blocks to remove
// unrelated code from the resume part of the function
simplify::remove_dead_blocks(&mut body);
// Update the body's def to become the drop glue.
let coroutine_instance = body.source.instance;
let drop_in_place = tcx.require_lang_item(LangItem::DropInPlace, None);
let drop_instance = InstanceKind::DropGlue(drop_in_place, Some(coroutine_ty));
// Temporary change MirSource to coroutine's instance so that dump_mir produces more sensible
// filename.
body.source.instance = coroutine_instance;
dump_mir(tcx, false, "coroutine_drop", &0, &body, |_, _| Ok(()));
body.source.instance = drop_instance;
// Creating a coroutine drop shim happens on `Analysis(PostCleanup) -> Runtime(Initial)`
// but the pass manager doesn't update the phase of the coroutine drop shim. Update the
// phase of the drop shim so that later on when we run the pass manager on the shim, in
// the `mir_shims` query, we don't ICE on the intra-pass validation before we've updated
// the phase of the body from analysis.
body.phase = MirPhase::Runtime(RuntimePhase::Initial);
body
}
fn insert_term_block<'tcx>(body: &mut Body<'tcx>, kind: TerminatorKind<'tcx>) -> BasicBlock {
let source_info = SourceInfo::outermost(body.span);
body.basic_blocks_mut().push(BasicBlockData {
@ -1188,6 +1080,34 @@ fn insert_term_block<'tcx>(body: &mut Body<'tcx>, kind: TerminatorKind<'tcx>) ->
})
}
fn return_poll_ready_assign<'tcx>(tcx: TyCtxt<'tcx>, source_info: SourceInfo) -> Statement<'tcx> {
// Poll::Ready(())
let poll_def_id = tcx.require_lang_item(LangItem::Poll, None);
let args = tcx.mk_args(&[tcx.types.unit.into()]);
let val = Operand::Constant(Box::new(ConstOperand {
span: source_info.span,
user_ty: None,
const_: Const::zero_sized(tcx.types.unit),
}));
let ready_val = Rvalue::Aggregate(
Box::new(AggregateKind::Adt(poll_def_id, VariantIdx::from_usize(0), args, None, None)),
IndexVec::from_raw(vec![val]),
);
Statement {
kind: StatementKind::Assign(Box::new((Place::return_place(), ready_val))),
source_info,
}
}
fn insert_poll_ready_block<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> BasicBlock {
let source_info = SourceInfo::outermost(body.span);
body.basic_blocks_mut().push(BasicBlockData {
statements: [return_poll_ready_assign(tcx, source_info)].to_vec(),
terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }),
is_cleanup: false,
})
}
fn insert_panic_block<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
@ -1263,45 +1183,50 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
false
}
// Poison the coroutine when it unwinds
fn generate_poison_block_and_redirect_unwinds_there<'tcx>(
transform: &TransformVisitor<'tcx>,
body: &mut Body<'tcx>,
) {
let source_info = SourceInfo::outermost(body.span);
let poison_block = body.basic_blocks_mut().push(BasicBlockData {
statements: vec![
transform.set_discr(VariantIdx::new(CoroutineArgs::POISONED), source_info),
],
terminator: Some(Terminator { source_info, kind: TerminatorKind::UnwindResume }),
is_cleanup: true,
});
for (idx, block) in body.basic_blocks_mut().iter_enumerated_mut() {
let source_info = block.terminator().source_info;
if let TerminatorKind::UnwindResume = block.terminator().kind {
// An existing `Resume` terminator is redirected to jump to our dedicated
// "poisoning block" above.
if idx != poison_block {
*block.terminator_mut() =
Terminator { source_info, kind: TerminatorKind::Goto { target: poison_block } };
}
} else if !block.is_cleanup
// Any terminators that *can* unwind but don't have an unwind target set are also
// pointed at our poisoning block (unless they're part of the cleanup path).
&& let Some(unwind @ UnwindAction::Continue) = block.terminator_mut().unwind_mut()
{
*unwind = UnwindAction::Cleanup(poison_block);
}
}
}
fn create_coroutine_resume_function<'tcx>(
tcx: TyCtxt<'tcx>,
transform: TransformVisitor<'tcx>,
body: &mut Body<'tcx>,
can_return: bool,
can_unwind: bool,
) {
let can_unwind = can_unwind(tcx, body);
// Poison the coroutine when it unwinds
if can_unwind {
let source_info = SourceInfo::outermost(body.span);
let poison_block = body.basic_blocks_mut().push(BasicBlockData {
statements: vec![
transform.set_discr(VariantIdx::new(CoroutineArgs::POISONED), source_info),
],
terminator: Some(Terminator { source_info, kind: TerminatorKind::UnwindResume }),
is_cleanup: true,
});
for (idx, block) in body.basic_blocks_mut().iter_enumerated_mut() {
let source_info = block.terminator().source_info;
if let TerminatorKind::UnwindResume = block.terminator().kind {
// An existing `Resume` terminator is redirected to jump to our dedicated
// "poisoning block" above.
if idx != poison_block {
*block.terminator_mut() = Terminator {
source_info,
kind: TerminatorKind::Goto { target: poison_block },
};
}
} else if !block.is_cleanup
// Any terminators that *can* unwind but don't have an unwind target set are also
// pointed at our poisoning block (unless they're part of the cleanup path).
&& let Some(unwind @ UnwindAction::Continue) = block.terminator_mut().unwind_mut()
{
*unwind = UnwindAction::Cleanup(poison_block);
}
}
generate_poison_block_and_redirect_unwinds_there(&transform, body);
}
let mut cases = create_cases(body, &transform, Operation::Resume);
@ -1326,7 +1251,13 @@ fn create_coroutine_resume_function<'tcx>(
let block = match transform.coroutine_kind {
CoroutineKind::Desugared(CoroutineDesugaring::Async, _)
| CoroutineKind::Coroutine(_) => {
insert_panic_block(tcx, body, ResumedAfterReturn(transform.coroutine_kind))
// For `async_drop_in_place<T>::{closure}` we just keep return Poll::Ready,
// because async drop of such coroutine keeps polling original coroutine
if tcx.is_async_drop_in_place_coroutine(body.source.def_id()) {
insert_poll_ready_block(tcx, body)
} else {
insert_panic_block(tcx, body, ResumedAfterReturn(transform.coroutine_kind))
}
}
CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, _)
| CoroutineKind::Desugared(CoroutineDesugaring::Gen, _) => {
@ -1336,7 +1267,8 @@ fn create_coroutine_resume_function<'tcx>(
cases.insert(1, (CoroutineArgs::RETURNED, block));
}
insert_switch(body, cases, &transform, TerminatorKind::Unreachable);
let default_block = insert_term_block(body, TerminatorKind::Unreachable);
insert_switch(body, cases, &transform, default_block);
make_coroutine_state_argument_indirect(tcx, body);
@ -1360,25 +1292,6 @@ fn create_coroutine_resume_function<'tcx>(
dump_mir(tcx, false, "coroutine_resume", &0, body, |_, _| Ok(()));
}
fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
let return_block = insert_term_block(body, TerminatorKind::Return);
let term = TerminatorKind::Drop {
place: Place::from(SELF_ARG),
target: return_block,
unwind: UnwindAction::Continue,
replace: false,
};
let source_info = SourceInfo::outermost(body.span);
// Create a block to destroy an unresumed coroutines. This can only destroy upvars.
body.basic_blocks_mut().push(BasicBlockData {
statements: Vec::new(),
terminator: Some(Terminator { source_info, kind: term }),
is_cleanup: false,
})
}
/// An operation that can be performed on a coroutine.
#[derive(PartialEq, Copy, Clone)]
enum Operation {
@ -1423,7 +1336,7 @@ fn create_cases<'tcx>(
if operation == Operation::Resume {
// Move the resume argument to the destination place of the `Yield` terminator
let resume_arg = Local::new(2); // 0 = return, 1 = self
let resume_arg = CTX_ARG;
statements.push(Statement {
source_info,
kind: StatementKind::Assign(Box::new((
@ -1530,7 +1443,9 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
};
let old_ret_ty = body.return_ty();
assert!(body.coroutine_drop().is_none());
assert!(body.coroutine_drop().is_none() && body.coroutine_drop_async().is_none());
dump_mir(tcx, false, "coroutine_before", &0, body, |_, _| Ok(()));
// The first argument is the coroutine type passed by value
let coroutine_ty = body.local_decls.raw[1].ty;
@ -1574,19 +1489,32 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
// RETURN_PLACE then is a fresh unused local with type ret_ty.
let old_ret_local = replace_local(RETURN_PLACE, new_ret_ty, body, tcx);
// We need to insert clean drop for unresumed state and perform drop elaboration
// (finally in open_drop_for_tuple) before async drop expansion.
// Async drops, produced by this drop elaboration, will be expanded,
// and corresponding futures kept in layout.
let has_async_drops = matches!(
coroutine_kind,
CoroutineKind::Desugared(CoroutineDesugaring::Async | CoroutineDesugaring::AsyncGen, _)
) && has_expandable_async_drops(tcx, body, coroutine_ty);
// Replace all occurrences of `ResumeTy` with `&mut Context<'_>` within async bodies.
if matches!(
coroutine_kind,
CoroutineKind::Desugared(CoroutineDesugaring::Async | CoroutineDesugaring::AsyncGen, _)
) {
transform_async_context(tcx, body);
let context_mut_ref = transform_async_context(tcx, body);
expand_async_drops(tcx, body, context_mut_ref, coroutine_kind, coroutine_ty);
dump_mir(tcx, false, "coroutine_async_drop_expand", &0, body, |_, _| Ok(()));
} else {
cleanup_async_drops(body);
}
// We also replace the resume argument and insert an `Assign`.
// This is needed because the resume argument `_2` might be live across a `yield`, in which
// case there is no `Assign` to it that the transform can turn into a store to the coroutine
// state. After the yield the slot in the coroutine state would then be uninitialized.
let resume_local = Local::new(2);
let resume_local = CTX_ARG;
let resume_ty = body.local_decls[resume_local].ty;
let old_resume_local = replace_local(resume_local, resume_ty, body, tcx);
@ -1667,10 +1595,14 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
body.coroutine.as_mut().unwrap().resume_ty = None;
body.coroutine.as_mut().unwrap().coroutine_layout = Some(layout);
// FIXME: Drops, produced by insert_clean_drop + elaborate_coroutine_drops,
// are currently sync only. To allow async for them, we need to move those calls
// before expand_async_drops, and fix the related problems.
//
// Insert `drop(coroutine_struct)` which is used to drop upvars for coroutines in
// the unresumed state.
// This is expanded to a drop ladder in `elaborate_coroutine_drops`.
let drop_clean = insert_clean_drop(body);
let drop_clean = insert_clean_drop(tcx, body, has_async_drops);
dump_mir(tcx, false, "coroutine_pre-elab", &0, body, |_, _| Ok(()));
@ -1681,13 +1613,32 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
dump_mir(tcx, false, "coroutine_post-transform", &0, body, |_, _| Ok(()));
// Create a copy of our MIR and use it to create the drop shim for the coroutine
let drop_shim = create_coroutine_drop_shim(tcx, &transform, coroutine_ty, body, drop_clean);
let can_unwind = can_unwind(tcx, body);
body.coroutine.as_mut().unwrap().coroutine_drop = Some(drop_shim);
// Create a copy of our MIR and use it to create the drop shim for the coroutine
if has_async_drops {
// If coroutine has async drops, generating async drop shim
let mut drop_shim =
create_coroutine_drop_shim_async(tcx, &transform, body, drop_clean, can_unwind);
// Run derefer to fix Derefs that are not in the first place
deref_finder(tcx, &mut drop_shim);
body.coroutine.as_mut().unwrap().coroutine_drop_async = Some(drop_shim);
} else {
// If coroutine has no async drops, generating sync drop shim
let mut drop_shim =
create_coroutine_drop_shim(tcx, &transform, coroutine_ty, body, drop_clean);
// Run derefer to fix Derefs that are not in the first place
deref_finder(tcx, &mut drop_shim);
body.coroutine.as_mut().unwrap().coroutine_drop = Some(drop_shim);
// For coroutine with sync drop, generating async proxy for `future_drop_poll` call
let mut proxy_shim = create_coroutine_drop_shim_proxy_async(tcx, body);
deref_finder(tcx, &mut proxy_shim);
body.coroutine.as_mut().unwrap().coroutine_drop_proxy_async = Some(proxy_shim);
}
// Create the Coroutine::resume / Future::poll function
create_coroutine_resume_function(tcx, transform, body, can_return);
create_coroutine_resume_function(tcx, transform, body, can_return, can_unwind);
// Run derefer to fix Derefs that are not in the first place
deref_finder(tcx, body);

View file

@ -0,0 +1,725 @@
//! Drops and async drops related logic for coroutine transformation pass
use super::*;
// Fix return Poll<Rv>::Pending statement into Poll<()>::Pending for async drop function
struct FixReturnPendingVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
}
impl<'tcx> MutVisitor<'tcx> for FixReturnPendingVisitor<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_assign(
&mut self,
place: &mut Place<'tcx>,
rvalue: &mut Rvalue<'tcx>,
_location: Location,
) {
if place.local != RETURN_PLACE {
return;
}
// Converting `_0 = Poll::<Rv>::Pending` to `_0 = Poll::<()>::Pending`
if let Rvalue::Aggregate(kind, _) = rvalue {
if let AggregateKind::Adt(_, _, ref mut args, _, _) = **kind {
*args = self.tcx.mk_args(&[self.tcx.types.unit.into()]);
}
}
}
}
// rv = call fut.poll()
fn build_poll_call<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
poll_unit_place: &Place<'tcx>,
switch_block: BasicBlock,
fut_pin_place: &Place<'tcx>,
fut_ty: Ty<'tcx>,
context_ref_place: &Place<'tcx>,
unwind: UnwindAction,
) -> BasicBlock {
let poll_fn = tcx.require_lang_item(LangItem::FuturePoll, None);
let poll_fn = Ty::new_fn_def(tcx, poll_fn, [fut_ty]);
let poll_fn = Operand::Constant(Box::new(ConstOperand {
span: DUMMY_SP,
user_ty: None,
const_: Const::zero_sized(poll_fn),
}));
let call = TerminatorKind::Call {
func: poll_fn.clone(),
args: [
dummy_spanned(Operand::Move(*fut_pin_place)),
dummy_spanned(Operand::Move(*context_ref_place)),
]
.into(),
destination: *poll_unit_place,
target: Some(switch_block),
unwind,
call_source: CallSource::Misc,
fn_span: DUMMY_SP,
};
insert_term_block(body, call)
}
// pin_fut = Pin::new_unchecked(&mut fut)
fn build_pin_fut<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
fut_place: Place<'tcx>,
unwind: UnwindAction,
) -> (BasicBlock, Place<'tcx>) {
let span = body.span;
let source_info = SourceInfo::outermost(span);
let fut_ty = fut_place.ty(&body.local_decls, tcx).ty;
let fut_ref_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, fut_ty);
let fut_ref_place = Place::from(body.local_decls.push(LocalDecl::new(fut_ref_ty, span)));
let pin_fut_new_unchecked_fn = Ty::new_fn_def(
tcx,
tcx.require_lang_item(LangItem::PinNewUnchecked, Some(span)),
[fut_ref_ty],
);
let fut_pin_ty = pin_fut_new_unchecked_fn.fn_sig(tcx).output().skip_binder();
let fut_pin_place = Place::from(body.local_decls.push(LocalDecl::new(fut_pin_ty, span)));
let pin_fut_new_unchecked_fn = Operand::Constant(Box::new(ConstOperand {
span,
user_ty: None,
const_: Const::zero_sized(pin_fut_new_unchecked_fn),
}));
let storage_live =
Statement { source_info, kind: StatementKind::StorageLive(fut_pin_place.local) };
let fut_ref_assign = Statement {
source_info,
kind: StatementKind::Assign(Box::new((
fut_ref_place,
Rvalue::Ref(
tcx.lifetimes.re_erased,
BorrowKind::Mut { kind: MutBorrowKind::Default },
fut_place,
),
))),
};
// call Pin<FutTy>::new_unchecked(&mut fut)
let pin_fut_bb = body.basic_blocks_mut().push(BasicBlockData {
statements: [storage_live, fut_ref_assign].to_vec(),
terminator: Some(Terminator {
source_info,
kind: TerminatorKind::Call {
func: pin_fut_new_unchecked_fn,
args: [dummy_spanned(Operand::Move(fut_ref_place))].into(),
destination: fut_pin_place,
target: None, // will be fixed later
unwind,
call_source: CallSource::Misc,
fn_span: span,
},
}),
is_cleanup: false,
});
(pin_fut_bb, fut_pin_place)
}
// Build Poll switch for async drop
// match rv {
// Ready() => ready_block
// Pending => yield_block
//}
fn build_poll_switch<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
poll_enum: Ty<'tcx>,
poll_unit_place: &Place<'tcx>,
ready_block: BasicBlock,
yield_block: BasicBlock,
) -> BasicBlock {
let poll_enum_adt = poll_enum.ty_adt_def().unwrap();
let Discr { val: poll_ready_discr, ty: poll_discr_ty } = poll_enum
.discriminant_for_variant(
tcx,
poll_enum_adt.variant_index_with_id(tcx.require_lang_item(LangItem::PollReady, None)),
)
.unwrap();
let poll_pending_discr = poll_enum
.discriminant_for_variant(
tcx,
poll_enum_adt.variant_index_with_id(tcx.require_lang_item(LangItem::PollPending, None)),
)
.unwrap()
.val;
let source_info = SourceInfo::outermost(body.span);
let poll_discr_place =
Place::from(body.local_decls.push(LocalDecl::new(poll_discr_ty, source_info.span)));
let discr_assign = Statement {
source_info,
kind: StatementKind::Assign(Box::new((
poll_discr_place,
Rvalue::Discriminant(*poll_unit_place),
))),
};
let unreachable_block = insert_term_block(body, TerminatorKind::Unreachable);
body.basic_blocks_mut().push(BasicBlockData {
statements: [discr_assign].to_vec(),
terminator: Some(Terminator {
source_info,
kind: TerminatorKind::SwitchInt {
discr: Operand::Move(poll_discr_place),
targets: SwitchTargets::new(
[(poll_ready_discr, ready_block), (poll_pending_discr, yield_block)]
.into_iter(),
unreachable_block,
),
},
}),
is_cleanup: false,
})
}
// Gather blocks, reachable through 'drop' targets of Yield and Drop terminators (chained)
fn gather_dropline_blocks<'tcx>(body: &mut Body<'tcx>) -> DenseBitSet<BasicBlock> {
let mut dropline: DenseBitSet<BasicBlock> = DenseBitSet::new_empty(body.basic_blocks.len());
for (bb, data) in traversal::reverse_postorder(body) {
if dropline.contains(bb) {
data.terminator().successors().for_each(|v| {
dropline.insert(v);
});
} else {
match data.terminator().kind {
TerminatorKind::Yield { drop: Some(v), .. } => {
dropline.insert(v);
}
TerminatorKind::Drop { drop: Some(v), .. } => {
dropline.insert(v);
}
_ => (),
}
}
}
dropline
}
/// Cleanup all async drops (reset to sync)
pub(super) fn cleanup_async_drops<'tcx>(body: &mut Body<'tcx>) {
for block in body.basic_blocks_mut() {
if let TerminatorKind::Drop {
place: _,
target: _,
unwind: _,
replace: _,
ref mut drop,
ref mut async_fut,
} = block.terminator_mut().kind
{
if drop.is_some() || async_fut.is_some() {
*drop = None;
*async_fut = None;
}
}
}
}
pub(super) fn has_expandable_async_drops<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
coroutine_ty: Ty<'tcx>,
) -> bool {
for bb in START_BLOCK..body.basic_blocks.next_index() {
// Drops in unwind path (cleanup blocks) are not expanded to async drops, only sync drops in unwind path
if body[bb].is_cleanup {
continue;
}
let TerminatorKind::Drop { place, target: _, unwind: _, replace: _, drop: _, async_fut } =
body[bb].terminator().kind
else {
continue;
};
let place_ty = place.ty(&body.local_decls, tcx).ty;
if place_ty == coroutine_ty {
continue;
}
if async_fut.is_none() {
continue;
}
return true;
}
return false;
}
/// Expand Drop terminator for async drops into mainline poll-switch and dropline poll-switch
pub(super) fn expand_async_drops<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
context_mut_ref: Ty<'tcx>,
coroutine_kind: hir::CoroutineKind,
coroutine_ty: Ty<'tcx>,
) {
let dropline = gather_dropline_blocks(body);
// Clean drop and async_fut fields if potentially async drop is not expanded (stays sync)
let remove_asyncness = |block: &mut BasicBlockData<'tcx>| {
if let TerminatorKind::Drop {
place: _,
target: _,
unwind: _,
replace: _,
ref mut drop,
ref mut async_fut,
} = block.terminator_mut().kind
{
*drop = None;
*async_fut = None;
}
};
for bb in START_BLOCK..body.basic_blocks.next_index() {
// Drops in unwind path (cleanup blocks) are not expanded to async drops, only sync drops in unwind path
if body[bb].is_cleanup {
remove_asyncness(&mut body[bb]);
continue;
}
let TerminatorKind::Drop { place, target, unwind, replace: _, drop, async_fut } =
body[bb].terminator().kind
else {
continue;
};
let place_ty = place.ty(&body.local_decls, tcx).ty;
if place_ty == coroutine_ty {
remove_asyncness(&mut body[bb]);
continue;
}
let Some(fut_local) = async_fut else {
remove_asyncness(&mut body[bb]);
continue;
};
let is_dropline_bb = dropline.contains(bb);
if !is_dropline_bb && drop.is_none() {
remove_asyncness(&mut body[bb]);
continue;
}
let fut_place = Place::from(fut_local);
let fut_ty = fut_place.ty(&body.local_decls, tcx).ty;
// poll-code:
// state_call_drop:
// #bb_pin: fut_pin = Pin<FutT>::new_unchecked(&mut fut)
// #bb_call: rv = call fut.poll() (or future_drop_poll(fut) for internal future drops)
// #bb_check: match (rv)
// pending => return rv (yield)
// ready => *continue_bb|drop_bb*
// Compute Poll<> (aka Poll with void return)
let poll_adt_ref = tcx.adt_def(tcx.require_lang_item(LangItem::Poll, None));
let poll_enum = Ty::new_adt(tcx, poll_adt_ref, tcx.mk_args(&[tcx.types.unit.into()]));
let poll_decl = LocalDecl::new(poll_enum, body.span);
let poll_unit_place = Place::from(body.local_decls.push(poll_decl));
// First state-loop yield for mainline
let context_ref_place =
Place::from(body.local_decls.push(LocalDecl::new(context_mut_ref, body.span)));
let source_info = body[bb].terminator.as_ref().unwrap().source_info;
let arg = Rvalue::Use(Operand::Move(Place::from(CTX_ARG)));
body[bb].statements.push(Statement {
source_info,
kind: StatementKind::Assign(Box::new((context_ref_place, arg))),
});
let yield_block = insert_term_block(body, TerminatorKind::Unreachable); // `kind` replaced later to yield
let switch_block =
build_poll_switch(tcx, body, poll_enum, &poll_unit_place, target, yield_block);
let (pin_bb, fut_pin_place) =
build_pin_fut(tcx, body, fut_place.clone(), UnwindAction::Continue);
let call_bb = build_poll_call(
tcx,
body,
&poll_unit_place,
switch_block,
&fut_pin_place,
fut_ty,
&context_ref_place,
unwind,
);
// Second state-loop yield for transition to dropline (when coroutine async drop started)
let mut dropline_transition_bb: Option<BasicBlock> = None;
let mut dropline_yield_bb: Option<BasicBlock> = None;
let mut dropline_context_ref: Option<Place<'_>> = None;
let mut dropline_call_bb: Option<BasicBlock> = None;
if !is_dropline_bb {
let context_ref_place2: Place<'_> =
Place::from(body.local_decls.push(LocalDecl::new(context_mut_ref, body.span)));
let drop_yield_block = insert_term_block(body, TerminatorKind::Unreachable); // `kind` replaced later to yield
let drop_switch_block = build_poll_switch(
tcx,
body,
poll_enum,
&poll_unit_place,
drop.unwrap(),
drop_yield_block,
);
let (pin_bb2, fut_pin_place2) =
build_pin_fut(tcx, body, fut_place, UnwindAction::Continue);
let drop_call_bb = build_poll_call(
tcx,
body,
&poll_unit_place,
drop_switch_block,
&fut_pin_place2,
fut_ty,
&context_ref_place2,
unwind,
);
dropline_transition_bb = Some(pin_bb2);
dropline_yield_bb = Some(drop_yield_block);
dropline_context_ref = Some(context_ref_place2);
dropline_call_bb = Some(drop_call_bb);
}
// value needed only for return-yields or gen-coroutines, so just const here
let value = Operand::Constant(Box::new(ConstOperand {
span: body.span,
user_ty: None,
const_: Const::from_bool(tcx, false),
}));
use rustc_middle::mir::AssertKind::ResumedAfterDrop;
let panic_bb = insert_panic_block(tcx, body, ResumedAfterDrop(coroutine_kind));
if is_dropline_bb {
body[yield_block].terminator_mut().kind = TerminatorKind::Yield {
value: value.clone(),
resume: panic_bb,
resume_arg: context_ref_place,
drop: Some(pin_bb),
};
} else {
body[yield_block].terminator_mut().kind = TerminatorKind::Yield {
value: value.clone(),
resume: pin_bb,
resume_arg: context_ref_place,
drop: dropline_transition_bb,
};
body[dropline_yield_bb.unwrap()].terminator_mut().kind = TerminatorKind::Yield {
value,
resume: panic_bb,
resume_arg: dropline_context_ref.unwrap(),
drop: dropline_transition_bb,
};
}
if let TerminatorKind::Call { ref mut target, .. } = body[pin_bb].terminator_mut().kind {
*target = Some(call_bb);
} else {
bug!()
}
if !is_dropline_bb {
if let TerminatorKind::Call { ref mut target, .. } =
body[dropline_transition_bb.unwrap()].terminator_mut().kind
{
*target = dropline_call_bb;
} else {
bug!()
}
}
body[bb].terminator_mut().kind = TerminatorKind::Goto { target: pin_bb };
}
}
pub(super) fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
use crate::elaborate_drop::{Unwind, elaborate_drop};
use crate::patch::MirPatch;
use crate::shim::DropShimElaborator;
// Note that `elaborate_drops` only drops the upvars of a coroutine, and
// this is ok because `open_drop` can only be reached within that own
// coroutine's resume function.
let typing_env = body.typing_env(tcx);
let mut elaborator = DropShimElaborator {
body,
patch: MirPatch::new(body),
tcx,
typing_env,
produce_async_drops: false,
};
for (block, block_data) in body.basic_blocks.iter_enumerated() {
let (target, unwind, source_info, dropline) = match block_data.terminator() {
Terminator {
source_info,
kind: TerminatorKind::Drop { place, target, unwind, replace: _, drop, async_fut: _ },
} => {
if let Some(local) = place.as_local()
&& local == SELF_ARG
{
(target, unwind, source_info, *drop)
} else {
continue;
}
}
_ => continue,
};
let unwind = if block_data.is_cleanup {
Unwind::InCleanup
} else {
Unwind::To(match *unwind {
UnwindAction::Cleanup(tgt) => tgt,
UnwindAction::Continue => elaborator.patch.resume_block(),
UnwindAction::Unreachable => elaborator.patch.unreachable_cleanup_block(),
UnwindAction::Terminate(reason) => elaborator.patch.terminate_block(reason),
})
};
elaborate_drop(
&mut elaborator,
*source_info,
Place::from(SELF_ARG),
(),
*target,
unwind,
block,
dropline,
);
}
elaborator.patch.apply(body);
}
pub(super) fn insert_clean_drop<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
has_async_drops: bool,
) -> BasicBlock {
let source_info = SourceInfo::outermost(body.span);
let return_block = if has_async_drops {
insert_poll_ready_block(tcx, body)
} else {
insert_term_block(body, TerminatorKind::Return)
};
// FIXME: When move insert_clean_drop + elaborate_coroutine_drops before async drops expand,
// also set dropline here:
// let dropline = if has_async_drops { Some(return_block) } else { None };
let dropline = None;
let term = TerminatorKind::Drop {
place: Place::from(SELF_ARG),
target: return_block,
unwind: UnwindAction::Continue,
replace: false,
drop: dropline,
async_fut: None,
};
// Create a block to destroy an unresumed coroutines. This can only destroy upvars.
body.basic_blocks_mut().push(BasicBlockData {
statements: Vec::new(),
terminator: Some(Terminator { source_info, kind: term }),
is_cleanup: false,
})
}
pub(super) fn create_coroutine_drop_shim<'tcx>(
tcx: TyCtxt<'tcx>,
transform: &TransformVisitor<'tcx>,
coroutine_ty: Ty<'tcx>,
body: &Body<'tcx>,
drop_clean: BasicBlock,
) -> Body<'tcx> {
let mut body = body.clone();
// Take the coroutine info out of the body, since the drop shim is
// not a coroutine body itself; it just has its drop built out of it.
let _ = body.coroutine.take();
// Make sure the resume argument is not included here, since we're
// building a body for `drop_in_place`.
body.arg_count = 1;
let source_info = SourceInfo::outermost(body.span);
let mut cases = create_cases(&mut body, transform, Operation::Drop);
cases.insert(0, (CoroutineArgs::UNRESUMED, drop_clean));
// The returned state and the poisoned state fall through to the default
// case which is just to return
let default_block = insert_term_block(&mut body, TerminatorKind::Return);
insert_switch(&mut body, cases, transform, default_block);
for block in body.basic_blocks_mut() {
let kind = &mut block.terminator_mut().kind;
if let TerminatorKind::CoroutineDrop = *kind {
*kind = TerminatorKind::Return;
}
}
// Replace the return variable
body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(tcx.types.unit, source_info);
make_coroutine_state_argument_indirect(tcx, &mut body);
// Change the coroutine argument from &mut to *mut
body.local_decls[SELF_ARG] =
LocalDecl::with_source_info(Ty::new_mut_ptr(tcx, coroutine_ty), source_info);
// Make sure we remove dead blocks to remove
// unrelated code from the resume part of the function
simplify::remove_dead_blocks(&mut body);
// Update the body's def to become the drop glue.
let coroutine_instance = body.source.instance;
let drop_in_place = tcx.require_lang_item(LangItem::DropInPlace, None);
let drop_instance = InstanceKind::DropGlue(drop_in_place, Some(coroutine_ty));
// Temporary change MirSource to coroutine's instance so that dump_mir produces more sensible
// filename.
body.source.instance = coroutine_instance;
dump_mir(tcx, false, "coroutine_drop", &0, &body, |_, _| Ok(()));
body.source.instance = drop_instance;
// Creating a coroutine drop shim happens on `Analysis(PostCleanup) -> Runtime(Initial)`
// but the pass manager doesn't update the phase of the coroutine drop shim. Update the
// phase of the drop shim so that later on when we run the pass manager on the shim, in
// the `mir_shims` query, we don't ICE on the intra-pass validation before we've updated
// the phase of the body from analysis.
body.phase = MirPhase::Runtime(RuntimePhase::Initial);
body
}
// Create async drop shim function to drop coroutine itself
pub(super) fn create_coroutine_drop_shim_async<'tcx>(
tcx: TyCtxt<'tcx>,
transform: &TransformVisitor<'tcx>,
body: &Body<'tcx>,
drop_clean: BasicBlock,
can_unwind: bool,
) -> Body<'tcx> {
let mut body = body.clone();
// Take the coroutine info out of the body, since the drop shim is
// not a coroutine body itself; it just has its drop built out of it.
let _ = body.coroutine.take();
FixReturnPendingVisitor { tcx }.visit_body(&mut body);
// Poison the coroutine when it unwinds
if can_unwind {
generate_poison_block_and_redirect_unwinds_there(transform, &mut body);
}
let source_info = SourceInfo::outermost(body.span);
let mut cases = create_cases(&mut body, transform, Operation::Drop);
cases.insert(0, (CoroutineArgs::UNRESUMED, drop_clean));
use rustc_middle::mir::AssertKind::ResumedAfterPanic;
// Panic when resumed on the returned or poisoned state
if can_unwind {
cases.insert(
1,
(
CoroutineArgs::POISONED,
insert_panic_block(tcx, &mut body, ResumedAfterPanic(transform.coroutine_kind)),
),
);
}
// RETURNED state also goes to default_block with `return Ready<()>`.
// For fully-polled coroutine, async drop has nothing to do.
let default_block = insert_poll_ready_block(tcx, &mut body);
insert_switch(&mut body, cases, transform, default_block);
for block in body.basic_blocks_mut() {
let kind = &mut block.terminator_mut().kind;
if let TerminatorKind::CoroutineDrop = *kind {
*kind = TerminatorKind::Return;
block.statements.push(return_poll_ready_assign(tcx, source_info));
}
}
// Replace the return variable: Poll<RetT> to Poll<()>
let poll_adt_ref = tcx.adt_def(tcx.require_lang_item(LangItem::Poll, None));
let poll_enum = Ty::new_adt(tcx, poll_adt_ref, tcx.mk_args(&[tcx.types.unit.into()]));
body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(poll_enum, source_info);
make_coroutine_state_argument_indirect(tcx, &mut body);
match transform.coroutine_kind {
// Iterator::next doesn't accept a pinned argument,
// unlike for all other coroutine kinds.
CoroutineKind::Desugared(CoroutineDesugaring::Gen, _) => {}
_ => {
make_coroutine_state_argument_pinned(tcx, &mut body);
}
}
// Make sure we remove dead blocks to remove
// unrelated code from the resume part of the function
simplify::remove_dead_blocks(&mut body);
pm::run_passes_no_validate(
tcx,
&mut body,
&[&abort_unwinding_calls::AbortUnwindingCalls],
None,
);
dump_mir(tcx, false, "coroutine_drop_async", &0, &body, |_, _| Ok(()));
body
}
// Create async drop shim proxy function for future_drop_poll
// It is just { call coroutine_drop(); return Poll::Ready(); }
pub(super) fn create_coroutine_drop_shim_proxy_async<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
) -> Body<'tcx> {
let mut body = body.clone();
// Take the coroutine info out of the body, since the drop shim is
// not a coroutine body itself; it just has its drop built out of it.
let _ = body.coroutine.take();
let basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>> = IndexVec::new();
body.basic_blocks = BasicBlocks::new(basic_blocks);
body.var_debug_info.clear();
// Keeping return value and args
body.local_decls.truncate(1 + body.arg_count);
let source_info = SourceInfo::outermost(body.span);
// Replace the return variable: Poll<RetT> to Poll<()>
let poll_adt_ref = tcx.adt_def(tcx.require_lang_item(LangItem::Poll, None));
let poll_enum = Ty::new_adt(tcx, poll_adt_ref, tcx.mk_args(&[tcx.types.unit.into()]));
body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(poll_enum, source_info);
// call coroutine_drop()
let call_bb = body.basic_blocks_mut().push(BasicBlockData {
statements: Vec::new(),
terminator: None,
is_cleanup: false,
});
// return Poll::Ready()
let ret_bb = insert_poll_ready_block(tcx, &mut body);
let kind = TerminatorKind::Drop {
place: Place::from(SELF_ARG),
target: ret_bb,
unwind: UnwindAction::Continue,
replace: false,
drop: None,
async_fut: None,
};
body.basic_blocks_mut()[call_bb].terminator = Some(Terminator { source_info, kind });
dump_mir(tcx, false, "coroutine_drop_proxy_async", &0, &body, |_, _| Ok(()));
body
}

View file

@ -4,12 +4,12 @@ use rustc_abi::{FIRST_VARIANT, FieldIdx, VariantIdx};
use rustc_hir::lang_items::LangItem;
use rustc_index::Idx;
use rustc_middle::mir::*;
use rustc_middle::span_bug;
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::util::IntTypeExt;
use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt};
use rustc_middle::ty::{self, GenericArg, GenericArgsRef, Ty, TyCtxt};
use rustc_middle::{bug, span_bug, traits};
use rustc_span::DUMMY_SP;
use rustc_span::source_map::Spanned;
use rustc_span::source_map::{Spanned, dummy_spanned};
use tracing::{debug, instrument};
use crate::patch::MirPatch;
@ -94,6 +94,9 @@ pub(crate) trait DropElaborator<'a, 'tcx>: fmt::Debug {
fn body(&self) -> &'a Body<'tcx>;
fn tcx(&self) -> TyCtxt<'tcx>;
fn typing_env(&self) -> ty::TypingEnv<'tcx>;
fn allow_async_drops(&self) -> bool;
fn terminator_loc(&self, bb: BasicBlock) -> Location;
// Drop logic
@ -149,6 +152,7 @@ where
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
}
/// "Elaborates" a drop of `place`/`path` and patches `bb`'s terminator to execute it.
@ -167,11 +171,12 @@ pub(crate) fn elaborate_drop<'b, 'tcx, D>(
succ: BasicBlock,
unwind: Unwind,
bb: BasicBlock,
dropline: Option<BasicBlock>,
) where
D: DropElaborator<'b, 'tcx>,
'tcx: 'b,
{
DropCtxt { elaborator, source_info, place, path, succ, unwind }.elaborate_drop(bb)
DropCtxt { elaborator, source_info, place, path, succ, unwind, dropline }.elaborate_drop(bb)
}
impl<'a, 'b, 'tcx, D> DropCtxt<'a, 'b, 'tcx, D>
@ -195,6 +200,209 @@ where
self.elaborator.tcx()
}
// Generates three blocks:
// * #1:pin_obj_bb: call Pin<ObjTy>::new_unchecked(&mut obj)
// * #2:call_drop_bb: fut = call obj.<AsyncDrop::drop>() OR call async_drop_in_place<T>(obj)
// * #3:drop_term_bb: drop (obj, fut, ...)
// We keep async drop unexpanded to poll-loop here, to expand it later, at StateTransform -
// into states expand.
// call_destructor_only - to call only AsyncDrop::drop, not full async_drop_in_place glue
fn build_async_drop(
&mut self,
place: Place<'tcx>,
drop_ty: Ty<'tcx>,
bb: Option<BasicBlock>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
call_destructor_only: bool,
) -> BasicBlock {
let tcx = self.tcx();
let span = self.source_info.span;
let pin_obj_bb = bb.unwrap_or_else(|| {
self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
// Temporary terminator, will be replaced by patch
source_info: self.source_info,
kind: TerminatorKind::Return,
}),
is_cleanup: false,
})
});
let (fut_ty, drop_fn_def_id, trait_args) = if call_destructor_only {
// Resolving obj.<AsyncDrop::drop>()
let trait_ref = ty::TraitRef::new(
tcx,
tcx.require_lang_item(LangItem::AsyncDrop, Some(span)),
[drop_ty],
);
let (drop_trait, trait_args) = match tcx.codegen_select_candidate(
ty::TypingEnv::fully_monomorphized().as_query_input(trait_ref),
) {
Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData {
impl_def_id,
args,
..
})) => (*impl_def_id, *args),
impl_source => {
span_bug!(span, "invalid `AsyncDrop` impl_source: {:?}", impl_source);
}
};
let drop_fn_def_id = tcx.associated_item_def_ids(drop_trait)[0];
let drop_fn = Ty::new_fn_def(tcx, drop_fn_def_id, trait_args);
let sig = drop_fn.fn_sig(tcx);
let sig = tcx.instantiate_bound_regions_with_erased(sig);
(sig.output(), drop_fn_def_id, trait_args)
} else {
// Resolving async_drop_in_place<T> function for drop_ty
let drop_fn_def_id = tcx.require_lang_item(LangItem::AsyncDropInPlace, Some(span));
let trait_args = tcx.mk_args(&[drop_ty.into()]);
let sig = tcx.fn_sig(drop_fn_def_id).instantiate(tcx, trait_args);
let sig = tcx.instantiate_bound_regions_with_erased(sig);
(sig.output(), drop_fn_def_id, trait_args)
};
let fut = Place::from(self.new_temp(fut_ty));
// #1:pin_obj_bb >>> obj_ref = &mut obj
let obj_ref_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, drop_ty);
let obj_ref_place = Place::from(self.new_temp(obj_ref_ty));
let term_loc = self.elaborator.terminator_loc(pin_obj_bb);
self.elaborator.patch().add_assign(
term_loc,
obj_ref_place,
Rvalue::Ref(
tcx.lifetimes.re_erased,
BorrowKind::Mut { kind: MutBorrowKind::Default },
place,
),
);
// pin_obj_place preparation
let pin_obj_new_unchecked_fn = Ty::new_fn_def(
tcx,
tcx.require_lang_item(LangItem::PinNewUnchecked, Some(span)),
[GenericArg::from(obj_ref_ty)],
);
let pin_obj_ty = pin_obj_new_unchecked_fn.fn_sig(tcx).output().no_bound_vars().unwrap();
let pin_obj_place = Place::from(self.new_temp(pin_obj_ty));
let pin_obj_new_unchecked_fn = Operand::Constant(Box::new(ConstOperand {
span,
user_ty: None,
const_: Const::zero_sized(pin_obj_new_unchecked_fn),
}));
// #3:drop_term_bb
let drop_term_bb = self.new_block(
unwind,
TerminatorKind::Drop {
place,
target: succ,
unwind: unwind.into_action(),
replace: false,
drop: dropline,
async_fut: Some(fut.local),
},
);
// #2:call_drop_bb
let mut call_statements = Vec::new();
let drop_arg = if call_destructor_only {
pin_obj_place
} else {
let ty::Adt(adt_def, adt_args) = pin_obj_ty.kind() else {
bug!();
};
let obj_ptr_ty = Ty::new_mut_ptr(tcx, drop_ty);
let obj_ptr_place = Place::from(self.new_temp(obj_ptr_ty));
let unwrap_ty = adt_def.non_enum_variant().fields[FieldIdx::ZERO].ty(tcx, adt_args);
let addr = Rvalue::RawPtr(
RawPtrKind::Mut,
pin_obj_place.project_deeper(
&[ProjectionElem::Field(FieldIdx::ZERO, unwrap_ty), ProjectionElem::Deref],
tcx,
),
);
call_statements.push(self.assign(obj_ptr_place, addr));
obj_ptr_place
};
call_statements.push(Statement {
source_info: self.source_info,
kind: StatementKind::StorageLive(fut.local),
});
let call_drop_bb = self.new_block_with_statements(
unwind,
call_statements,
TerminatorKind::Call {
func: Operand::function_handle(tcx, drop_fn_def_id, trait_args, span),
args: [Spanned { node: Operand::Move(drop_arg), span: DUMMY_SP }].into(),
destination: fut,
target: Some(drop_term_bb),
unwind: unwind.into_action(),
call_source: CallSource::Misc,
fn_span: self.source_info.span,
},
);
// StorageDead(fut) in self.succ block (at the begin)
self.elaborator.patch().add_statement(
Location { block: self.succ, statement_index: 0 },
StatementKind::StorageDead(fut.local),
);
// #1:pin_obj_bb >>> call Pin<ObjTy>::new_unchecked(&mut obj)
self.elaborator.patch().patch_terminator(
pin_obj_bb,
TerminatorKind::Call {
func: pin_obj_new_unchecked_fn,
args: [dummy_spanned(Operand::Move(obj_ref_place))].into(),
destination: pin_obj_place,
target: Some(call_drop_bb),
unwind: unwind.into_action(),
call_source: CallSource::Misc,
fn_span: span,
},
);
pin_obj_bb
}
fn build_drop(&mut self, bb: BasicBlock) {
let drop_ty = self.place_ty(self.place);
if self.tcx().features().async_drop()
&& self.elaborator.body().coroutine.is_some()
&& self.elaborator.allow_async_drops()
&& !self.elaborator.body()[bb].is_cleanup
&& drop_ty.needs_async_drop(self.tcx(), self.elaborator.typing_env())
{
self.build_async_drop(
self.place,
drop_ty,
Some(bb),
self.succ,
self.unwind,
self.dropline,
false,
);
} else {
self.elaborator.patch().patch_terminator(
bb,
TerminatorKind::Drop {
place: self.place,
target: self.succ,
unwind: self.unwind.into_action(),
replace: false,
drop: None,
async_fut: None,
},
);
}
}
/// This elaborates a single drop instruction, located at `bb`, and
/// patches over it.
///
@ -222,15 +430,7 @@ where
.patch_terminator(bb, TerminatorKind::Goto { target: self.succ });
}
DropStyle::Static => {
self.elaborator.patch().patch_terminator(
bb,
TerminatorKind::Drop {
place: self.place,
target: self.succ,
unwind: self.unwind.into_action(),
replace: false,
},
);
self.build_drop(bb);
}
DropStyle::Conditional => {
let drop_bb = self.complete_drop(self.succ, self.unwind);
@ -289,6 +489,7 @@ where
path: Option<D::Path>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
) -> BasicBlock {
if let Some(path) = path {
debug!("drop_subpath: for std field {:?}", place);
@ -300,6 +501,7 @@ where
place,
succ,
unwind,
dropline,
}
.elaborated_drop_block()
} else {
@ -311,6 +513,7 @@ where
place,
succ,
unwind,
dropline,
// Using `self.path` here to condition the drop on
// our own drop flag.
path: self.path,
@ -325,25 +528,36 @@ where
///
/// `unwind_ladder` is such a list of steps in reverse order,
/// which is called if the matching step of the drop glue panics.
///
/// `dropline_ladder` is a similar list of steps in reverse order,
/// which is called if the matching step of the drop glue will contain async drop
/// (expanded later to Yield) and the containing coroutine will be dropped at this point.
fn drop_halfladder(
&mut self,
unwind_ladder: &[Unwind],
dropline_ladder: &[Option<BasicBlock>],
mut succ: BasicBlock,
fields: &[(Place<'tcx>, Option<D::Path>)],
) -> Vec<BasicBlock> {
iter::once(succ)
.chain(fields.iter().rev().zip(unwind_ladder).map(|(&(place, path), &unwind_succ)| {
succ = self.drop_subpath(place, path, succ, unwind_succ);
succ
}))
.chain(itertools::izip!(fields.iter().rev(), unwind_ladder, dropline_ladder).map(
|(&(place, path), &unwind_succ, &dropline_to)| {
succ = self.drop_subpath(place, path, succ, unwind_succ, dropline_to);
succ
},
))
.collect()
}
fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind) {
fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind, Option<BasicBlock>) {
// Clear the "master" drop flag at the end. This is needed
// because the "master" drop protects the ADT's discriminant,
// which is invalidated after the ADT is dropped.
(self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind), self.unwind)
(
self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind),
self.unwind,
self.dropline,
)
}
/// Creates a full drop ladder, consisting of 2 connected half-drop-ladders
@ -361,6 +575,22 @@ where
/// .c2:
/// ELAB(drop location.2 [target=`self.unwind`])
///
/// For possible-async drops in coroutines we also need dropline ladder
/// .d0 (mainline):
/// ELAB(drop location.0 [target=.d1, unwind=.c1, drop=.e1])
/// .d1 (mainline):
/// ELAB(drop location.1 [target=.d2, unwind=.c2, drop=.e2])
/// .d2 (mainline):
/// ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`, drop=`self.drop`])
/// .c1 (unwind):
/// ELAB(drop location.1 [target=.c2])
/// .c2 (unwind):
/// ELAB(drop location.2 [target=`self.unwind`])
/// .e1 (dropline):
/// ELAB(drop location.1 [target=.e2, unwind=.c2])
/// .e2 (dropline):
/// ELAB(drop location.2 [target=`self.drop`, unwind=`self.unwind`])
///
/// NOTE: this does not clear the master drop flag, so you need
/// to point succ/unwind on a `drop_ladder_bottom`.
fn drop_ladder(
@ -368,8 +598,13 @@ where
fields: Vec<(Place<'tcx>, Option<D::Path>)>,
succ: BasicBlock,
unwind: Unwind,
) -> (BasicBlock, Unwind) {
dropline: Option<BasicBlock>,
) -> (BasicBlock, Unwind, Option<BasicBlock>) {
debug!("drop_ladder({:?}, {:?})", self, fields);
assert!(
if unwind.is_cleanup() { dropline.is_none() } else { true },
"Dropline is set for cleanup drop ladder"
);
let mut fields = fields;
fields.retain(|&(place, _)| {
@ -378,17 +613,28 @@ where
debug!("drop_ladder - fields needing drop: {:?}", fields);
let dropline_ladder: Vec<Option<BasicBlock>> = vec![None; fields.len() + 1];
let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind {
let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields);
let unwind_ladder: Vec<_> = if let Unwind::To(succ) = unwind {
let halfladder = self.drop_halfladder(&unwind_ladder, &dropline_ladder, succ, &fields);
halfladder.into_iter().map(Unwind::To).collect()
} else {
unwind_ladder
};
let dropline_ladder: Vec<_> = if let Some(succ) = dropline {
let halfladder = self.drop_halfladder(&unwind_ladder, &dropline_ladder, succ, &fields);
halfladder.into_iter().map(Some).collect()
} else {
dropline_ladder
};
let normal_ladder = self.drop_halfladder(&unwind_ladder, succ, &fields);
let normal_ladder = self.drop_halfladder(&unwind_ladder, &dropline_ladder, succ, &fields);
(*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap())
(
*normal_ladder.last().unwrap(),
*unwind_ladder.last().unwrap(),
*dropline_ladder.last().unwrap(),
)
}
fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
@ -405,8 +651,8 @@ where
})
.collect();
let (succ, unwind) = self.drop_ladder_bottom();
self.drop_ladder(fields, succ, unwind).0
let (succ, unwind, dropline) = self.drop_ladder_bottom();
self.drop_ladder(fields, succ, unwind, dropline).0
}
/// Drops the T contained in a `Box<T>` if it has not been moved out of
@ -417,6 +663,7 @@ where
args: GenericArgsRef<'tcx>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
) -> BasicBlock {
// drop glue is sent straight to codegen
// box cannot be directly dereferenced
@ -433,7 +680,7 @@ where
let interior = self.tcx().mk_place_deref(Place::from(ptr_local));
let interior_path = self.elaborator.deref_subpath(self.path);
let do_drop_bb = self.drop_subpath(interior, interior_path, succ, unwind);
let do_drop_bb = self.drop_subpath(interior, interior_path, succ, unwind, dropline);
let setup_bbd = BasicBlockData {
statements: vec![self.assign(
@ -468,19 +715,22 @@ where
let skip_contents = adt.is_union() || adt.is_manually_drop();
let contents_drop = if skip_contents {
(self.succ, self.unwind)
(self.succ, self.unwind, self.dropline)
} else {
self.open_drop_for_adt_contents(adt, args)
};
if adt.is_box() {
// we need to drop the inside of the box before running the destructor
let succ = self.destructor_call_block(contents_drop);
let succ = self.destructor_call_block_sync((contents_drop.0, contents_drop.1));
let unwind = contents_drop
.1
.map(|unwind| self.destructor_call_block((unwind, Unwind::InCleanup)));
.map(|unwind| self.destructor_call_block_sync((unwind, Unwind::InCleanup)));
let dropline = contents_drop
.2
.map(|dropline| self.destructor_call_block_sync((dropline, contents_drop.1)));
self.open_drop_for_box_contents(adt, args, succ, unwind)
self.open_drop_for_box_contents(adt, args, succ, unwind, dropline)
} else if adt.has_dtor(self.tcx()) {
self.destructor_call_block(contents_drop)
} else {
@ -492,14 +742,14 @@ where
&mut self,
adt: ty::AdtDef<'tcx>,
args: GenericArgsRef<'tcx>,
) -> (BasicBlock, Unwind) {
let (succ, unwind) = self.drop_ladder_bottom();
) -> (BasicBlock, Unwind, Option<BasicBlock>) {
let (succ, unwind, dropline) = self.drop_ladder_bottom();
if !adt.is_enum() {
let fields =
self.move_paths_for_fields(self.place, self.path, adt.variant(FIRST_VARIANT), args);
self.drop_ladder(fields, succ, unwind)
self.drop_ladder(fields, succ, unwind, dropline)
} else {
self.open_drop_for_multivariant(adt, args, succ, unwind)
self.open_drop_for_multivariant(adt, args, succ, unwind, dropline)
}
}
@ -509,11 +759,14 @@ where
args: GenericArgsRef<'tcx>,
succ: BasicBlock,
unwind: Unwind,
) -> (BasicBlock, Unwind) {
dropline: Option<BasicBlock>,
) -> (BasicBlock, Unwind, Option<BasicBlock>) {
let mut values = Vec::with_capacity(adt.variants().len());
let mut normal_blocks = Vec::with_capacity(adt.variants().len());
let mut unwind_blocks =
if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants().len())) };
let mut dropline_blocks =
if dropline.is_none() { None } else { Some(Vec::with_capacity(adt.variants().len())) };
let mut have_otherwise_with_drop_glue = false;
let mut have_otherwise = false;
@ -551,11 +804,16 @@ where
let unwind_blocks = unwind_blocks.as_mut().unwrap();
let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
let halfladder = self.drop_halfladder(&unwind_ladder, unwind, &fields);
let dropline_ladder: Vec<Option<BasicBlock>> = vec![None; fields.len() + 1];
let halfladder =
self.drop_halfladder(&unwind_ladder, &dropline_ladder, unwind, &fields);
unwind_blocks.push(halfladder.last().cloned().unwrap());
}
let (normal, _) = self.drop_ladder(fields, succ, unwind);
let (normal, _, drop_bb) = self.drop_ladder(fields, succ, unwind, dropline);
normal_blocks.push(normal);
if dropline.is_some() {
dropline_blocks.as_mut().unwrap().push(drop_bb.unwrap());
}
} else {
have_otherwise = true;
@ -595,6 +853,9 @@ where
Unwind::InCleanup,
)
}),
dropline.map(|dropline| {
self.adt_switch_block(adt, dropline_blocks.unwrap(), &values, dropline, unwind)
}),
)
}
@ -634,8 +895,8 @@ where
self.drop_flag_test_block(switch_block, succ, unwind)
}
fn destructor_call_block(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock {
debug!("destructor_call_block({:?}, {:?})", self, succ);
fn destructor_call_block_sync(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock {
debug!("destructor_call_block_sync({:?}, {:?})", self, succ);
let tcx = self.tcx();
let drop_trait = tcx.require_lang_item(LangItem::Drop, None);
let drop_fn = tcx.associated_item_def_ids(drop_trait)[0];
@ -683,6 +944,30 @@ where
self.drop_flag_test_block(destructor_block, succ, unwind)
}
fn destructor_call_block(
&mut self,
(succ, unwind, dropline): (BasicBlock, Unwind, Option<BasicBlock>),
) -> BasicBlock {
debug!("destructor_call_block({:?}, {:?})", self, succ);
let ty = self.place_ty(self.place);
if self.tcx().features().async_drop()
&& self.elaborator.body().coroutine.is_some()
&& self.elaborator.allow_async_drops()
&& !unwind.is_cleanup()
&& ty.is_async_drop(self.tcx(), self.elaborator.typing_env())
{
let destructor_block =
self.build_async_drop(self.place, ty, None, succ, unwind, dropline, true);
let block_start = Location { block: destructor_block, statement_index: 0 };
self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
self.drop_flag_test_block(destructor_block, succ, unwind)
} else {
self.destructor_call_block_sync((succ, unwind))
}
}
/// Create a loop that drops an array:
///
/// ```text
@ -701,6 +986,7 @@ where
len: Local,
ety: Ty<'tcx>,
unwind: Unwind,
dropline: Option<BasicBlock>,
) -> BasicBlock {
let copy = |place: Place<'tcx>| Operand::Copy(place);
let move_ = |place: Place<'tcx>| Operand::Move(place);
@ -744,16 +1030,35 @@ where
};
let loop_block = self.elaborator.patch().new_block(loop_block);
self.elaborator.patch().patch_terminator(
drop_block,
TerminatorKind::Drop {
place: tcx.mk_place_deref(ptr),
target: loop_block,
unwind: unwind.into_action(),
replace: false,
},
);
let place = tcx.mk_place_deref(ptr);
if self.tcx().features().async_drop()
&& self.elaborator.body().coroutine.is_some()
&& self.elaborator.allow_async_drops()
&& !unwind.is_cleanup()
&& ety.needs_async_drop(self.tcx(), self.elaborator.typing_env())
{
self.build_async_drop(
place,
ety,
Some(drop_block),
loop_block,
unwind,
dropline,
false,
);
} else {
self.elaborator.patch().patch_terminator(
drop_block,
TerminatorKind::Drop {
place,
target: loop_block,
unwind: unwind.into_action(),
replace: false,
drop: None,
async_fut: None,
},
);
}
loop_block
}
@ -820,8 +1125,8 @@ where
(tcx.mk_place_elem(self.place, project), path)
})
.collect::<Vec<_>>();
let (succ, unwind) = self.drop_ladder_bottom();
return self.drop_ladder(fields, succ, unwind).0;
let (succ, unwind, dropline) = self.drop_ladder_bottom();
return self.drop_ladder(fields, succ, unwind, dropline).0;
}
}
@ -855,7 +1160,7 @@ where
&mut self.place,
Place::from(slice_ptr).project_deeper(&[PlaceElem::Deref], tcx),
);
let slice_block = self.drop_loop_pair_for_slice(ety);
let slice_block = self.drop_loop_trio_for_slice(ety);
self.place = array_place;
delegate_block.terminator = Some(Terminator {
@ -865,18 +1170,22 @@ where
self.elaborator.patch().new_block(delegate_block)
}
/// Creates a pair of drop-loops of `place`, which drops its contents, even
/// in the case of 1 panic.
fn drop_loop_pair_for_slice(&mut self, ety: Ty<'tcx>) -> BasicBlock {
debug!("drop_loop_pair_for_slice({:?})", ety);
/// Creates a trio of drop-loops of `place`, which drops its contents, even
/// in the case of 1 panic or in the case of coroutine drop
fn drop_loop_trio_for_slice(&mut self, ety: Ty<'tcx>) -> BasicBlock {
debug!("drop_loop_trio_for_slice({:?})", ety);
let tcx = self.tcx();
let len = self.new_temp(tcx.types.usize);
let cur = self.new_temp(tcx.types.usize);
let unwind =
self.unwind.map(|unwind| self.drop_loop(unwind, cur, len, ety, Unwind::InCleanup));
let unwind = self
.unwind
.map(|unwind| self.drop_loop(unwind, cur, len, ety, Unwind::InCleanup, None));
let loop_block = self.drop_loop(self.succ, cur, len, ety, unwind);
let dropline =
self.dropline.map(|dropline| self.drop_loop(dropline, cur, len, ety, unwind, None));
let loop_block = self.drop_loop(self.succ, cur, len, ety, unwind, dropline);
let [PlaceElem::Deref] = self.place.projection.as_slice() else {
span_bug!(
@ -940,7 +1249,7 @@ where
let size = size.try_to_target_usize(self.tcx());
self.open_drop_for_array(ty, *ety, size)
}
ty::Slice(ety) => self.drop_loop_pair_for_slice(*ety),
ty::Slice(ety) => self.drop_loop_trio_for_slice(*ety),
_ => span_bug!(self.source_info.span, "open drop from non-ADT `{:?}`", ty),
}
@ -977,21 +1286,53 @@ where
fn elaborated_drop_block(&mut self) -> BasicBlock {
debug!("elaborated_drop_block({:?})", self);
let blk = self.drop_block(self.succ, self.unwind);
let blk = self.drop_block_simple(self.succ, self.unwind);
self.elaborate_drop(blk);
blk
}
fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
fn drop_block_simple(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
let block = TerminatorKind::Drop {
place: self.place,
target,
unwind: unwind.into_action(),
replace: false,
drop: self.dropline,
async_fut: None,
};
self.new_block(unwind, block)
}
fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
let drop_ty = self.place_ty(self.place);
if self.tcx().features().async_drop()
&& self.elaborator.body().coroutine.is_some()
&& self.elaborator.allow_async_drops()
&& !unwind.is_cleanup()
&& drop_ty.needs_async_drop(self.tcx(), self.elaborator.typing_env())
{
self.build_async_drop(
self.place,
drop_ty,
None,
self.succ,
unwind,
self.dropline,
false,
)
} else {
let block = TerminatorKind::Drop {
place: self.place,
target,
unwind: unwind.into_action(),
replace: false,
drop: None,
async_fut: None,
};
self.new_block(unwind, block)
}
}
fn goto_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
let block = TerminatorKind::Goto { target };
self.new_block(unwind, block)
@ -1033,6 +1374,19 @@ where
})
}
fn new_block_with_statements(
&mut self,
unwind: Unwind,
statements: Vec<Statement<'tcx>>,
k: TerminatorKind<'tcx>,
) -> BasicBlock {
self.elaborator.patch().new_block(BasicBlockData {
statements,
terminator: Some(Terminator { source_info: self.source_info, kind: k }),
is_cleanup: unwind.is_cleanup(),
})
}
fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
self.elaborator.patch().new_temp(ty, self.source_info.span)
}

View file

@ -158,6 +158,14 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for ElaborateDropsCtxt<'a, 'tcx> {
self.env.typing_env
}
fn allow_async_drops(&self) -> bool {
true
}
fn terminator_loc(&self, bb: BasicBlock) -> Location {
self.patch.terminator_loc(self.body, bb)
}
#[instrument(level = "debug", skip(self), ret)]
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
let ((maybe_init, maybe_uninit), multipart) = match mode {
@ -328,7 +336,9 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> {
// This function should mirror what `collect_drop_flags` does.
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
let terminator = data.terminator();
let TerminatorKind::Drop { place, target, unwind, replace } = terminator.kind else {
let TerminatorKind::Drop { place, target, unwind, replace, drop, async_fut: _ } =
terminator.kind
else {
continue;
};
@ -364,7 +374,16 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> {
}
};
self.init_data.seek_before(self.body.terminator_loc(bb));
elaborate_drop(self, terminator.source_info, place, path, target, unwind, bb)
elaborate_drop(
self,
terminator.source_info,
place,
path,
target,
unwind,
bb,
drop,
)
}
LookupResult::Parent(None) => {}
LookupResult::Parent(Some(_)) => {

View file

@ -413,7 +413,15 @@ impl<'tcx> Inliner<'tcx> for NormalInliner<'tcx> {
let term = blk.terminator();
let caller_attrs = tcx.codegen_fn_attrs(self.caller_def_id());
if let TerminatorKind::Drop { ref place, target, unwind, replace: _ } = term.kind {
if let TerminatorKind::Drop {
ref place,
target,
unwind,
replace: _,
drop: _,
async_fut: _,
} = term.kind
{
work_list.push(target);
// If the place doesn't actually need dropping, treat it like a regular goto.
@ -726,6 +734,20 @@ fn check_mir_is_available<'tcx, I: Inliner<'tcx>>(
debug!("still needs substitution");
return Err("implementation limitation -- HACK for dropping polymorphic type");
}
InstanceKind::AsyncDropGlue(_, ty) | InstanceKind::AsyncDropGlueCtorShim(_, ty) => {
return if ty.still_further_specializable() {
Err("still needs substitution")
} else {
Ok(())
};
}
InstanceKind::FutureDropPollShim(_, ty, ty2) => {
return if ty.still_further_specializable() || ty2.still_further_specializable() {
Err("still needs substitution")
} else {
Ok(())
};
}
// This cannot result in an immediate cycle since the callee MIR is a shim, which does
// not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
@ -739,8 +761,7 @@ fn check_mir_is_available<'tcx, I: Inliner<'tcx>>(
| InstanceKind::DropGlue(..)
| InstanceKind::CloneShim(..)
| InstanceKind::ThreadLocalShim(..)
| InstanceKind::FnPtrAddrShim(..)
| InstanceKind::AsyncDropGlueCtorShim(..) => return Ok(()),
| InstanceKind::FnPtrAddrShim(..) => return Ok(()),
}
if inliner.tcx().is_constructor(callee_def_id) {
@ -1345,8 +1366,8 @@ fn try_instance_mir<'tcx>(
tcx: TyCtxt<'tcx>,
instance: InstanceKind<'tcx>,
) -> Result<&'tcx Body<'tcx>, &'static str> {
if let ty::InstanceKind::DropGlue(_, Some(ty))
| ty::InstanceKind::AsyncDropGlueCtorShim(_, Some(ty)) = instance
if let ty::InstanceKind::DropGlue(_, Some(ty)) | ty::InstanceKind::AsyncDropGlueCtorShim(_, ty) =
instance
&& let ty::Adt(def, args) = ty.kind()
{
let fields = def.all_fields();

View file

@ -95,7 +95,10 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
InstanceKind::FnPtrAddrShim(..) => {
continue;
}
InstanceKind::DropGlue(..) | InstanceKind::AsyncDropGlueCtorShim(..) => {
InstanceKind::DropGlue(..)
| InstanceKind::FutureDropPollShim(..)
| InstanceKind::AsyncDropGlue(..)
| InstanceKind::AsyncDropGlueCtorShim(..) => {
// FIXME: A not fully instantiated drop shim can cause ICEs if one attempts to
// have its MIR built. Likely oli-obk just screwed up the `ParamEnv`s, so this
// needs some more analysis.

View file

@ -888,7 +888,14 @@ impl CanConstProp {
};
for (local, val) in cpv.can_const_prop.iter_enumerated_mut() {
let ty = body.local_decls[local].ty;
if ty.is_union() {
if ty.is_async_drop_in_place_coroutine(tcx) {
// No const propagation for async drop coroutine (AsyncDropGlue).
// Otherwise, tcx.layout_of(typing_env.as_query_input(ty)) will be called
// (early layout request for async drop coroutine) to calculate layout size.
// Layout for `async_drop_in_place<T>::{closure}` may only be known with known T.
*val = ConstPropMode::NoPropagation;
continue;
} else if ty.is_union() {
// Unions are incompatible with the current implementation of
// const prop because Rust has no concept of an active
// variant of a union

View file

@ -652,7 +652,7 @@ fn run_runtime_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
}
}
fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
pub(crate) fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
fn o1<T>(x: T) -> WithMinOptLevel<T> {
WithMinOptLevel(1, x)
}

View file

@ -134,6 +134,8 @@ impl<'a, 'mir, 'tcx> DropsReachable<'a, 'mir, 'tcx> {
target: _,
unwind: _,
replace: _,
drop: _,
async_fut: _,
} = &terminator.kind
&& place_has_common_prefix(dropped_place, self.place)
{

View file

@ -148,6 +148,14 @@ impl<'tcx> MirPatch<'tcx> {
self.term_patch_map[bb].is_some()
}
pub(crate) fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
let offset = match bb.index().checked_sub(body.basic_blocks.len()) {
Some(index) => self.new_blocks[index].statements.len(),
None => body[bb].statements.len(),
};
Location { block: bb, statement_index: offset }
}
/// Queues the addition of a new temporary with additional local info.
pub(crate) fn new_local_with_info(
&mut self,

View file

@ -59,6 +59,11 @@ fn trivially_zst<'tcx>(ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Option<bool> {
| ty::RawPtr(..)
| ty::Ref(..)
| ty::FnPtr(..) => Some(false),
ty::Coroutine(def_id, _) => {
// For async_drop_in_place::{closure} this is load bearing, not just a perf fix,
// because we don't want to compute the layout before mir analysis is done
if tcx.is_async_drop_in_place_coroutine(*def_id) { Some(false) } else { None }
}
// check `layout_of` to see (including unreachable things we won't actually see)
_ => None,
}

View file

@ -6,13 +6,14 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::visit::{MutVisitor, PlaceContext};
use rustc_middle::mir::*;
use rustc_middle::query::Providers;
use rustc_middle::ty::{
self, CoroutineArgs, CoroutineArgsExt, EarlyBinder, GenericArgs, Ty, TyCtxt,
};
use rustc_middle::{bug, span_bug};
use rustc_span::source_map::Spanned;
use rustc_span::source_map::{Spanned, dummy_spanned};
use rustc_span::{DUMMY_SP, Span};
use tracing::{debug, instrument};
@ -20,7 +21,8 @@ use crate::elaborate_drop::{DropElaborator, DropFlagMode, DropStyle, Unwind, ela
use crate::patch::MirPatch;
use crate::{
abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, deref_separator, inline,
instsimplify, mentioned_items, pass_manager as pm, remove_noop_landing_pads, simplify,
instsimplify, mentioned_items, pass_manager as pm, remove_noop_landing_pads,
run_optimization_passes, simplify,
};
mod async_destructor_ctor;
@ -29,6 +31,40 @@ pub(super) fn provide(providers: &mut Providers) {
providers.mir_shims = make_shim;
}
// Replace Pin<&mut ImplCoroutine> accesses (_1.0) into Pin<&mut ProxyCoroutine> acceses
struct FixProxyFutureDropVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
replace_to: Local,
}
impl<'tcx> MutVisitor<'tcx> for FixProxyFutureDropVisitor<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_place(
&mut self,
place: &mut Place<'tcx>,
_context: PlaceContext,
_location: Location,
) {
if place.local == Local::from_u32(1) {
if place.projection.len() == 1 {
assert!(matches!(
place.projection.first(),
Some(ProjectionElem::Field(FieldIdx::ZERO, _))
));
*place = Place::from(self.replace_to);
} else if place.projection.len() == 2 {
assert!(matches!(place.projection[0], ProjectionElem::Field(FieldIdx::ZERO, _)));
assert!(matches!(place.projection[1], ProjectionElem::Deref));
*place =
Place::from(self.replace_to).project_deeper(&[ProjectionElem::Deref], self.tcx);
}
}
}
}
fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body<'tcx> {
debug!("make_shim({:?})", instance);
@ -129,8 +165,53 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body<
ty::InstanceKind::ThreadLocalShim(..) => build_thread_local_shim(tcx, instance),
ty::InstanceKind::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty),
ty::InstanceKind::FnPtrAddrShim(def_id, ty) => build_fn_ptr_addr_shim(tcx, def_id, ty),
ty::InstanceKind::FutureDropPollShim(def_id, proxy_ty, impl_ty) => {
let mut body =
async_destructor_ctor::build_future_drop_poll_shim(tcx, def_id, proxy_ty, impl_ty);
pm::run_passes(
tcx,
&mut body,
&[
&mentioned_items::MentionedItems,
&abort_unwinding_calls::AbortUnwindingCalls,
&add_call_guards::CriticalCallEdges,
],
Some(MirPhase::Runtime(RuntimePhase::PostCleanup)),
pm::Optimizations::Allowed,
);
run_optimization_passes(tcx, &mut body);
debug!("make_shim({:?}) = {:?}", instance, body);
return body;
}
ty::InstanceKind::AsyncDropGlue(def_id, ty) => {
let mut body = async_destructor_ctor::build_async_drop_shim(tcx, def_id, ty);
// Main pass required here is StateTransform to convert sync drop ladder
// into coroutine.
// Others are minimal passes as for sync drop glue shim
pm::run_passes(
tcx,
&mut body,
&[
&mentioned_items::MentionedItems,
&abort_unwinding_calls::AbortUnwindingCalls,
&add_call_guards::CriticalCallEdges,
&simplify::SimplifyCfg::MakeShim,
&crate::coroutine::StateTransform,
],
Some(MirPhase::Runtime(RuntimePhase::PostCleanup)),
pm::Optimizations::Allowed,
);
run_optimization_passes(tcx, &mut body);
debug!("make_shim({:?}) = {:?}", instance, body);
return body;
}
ty::InstanceKind::AsyncDropGlueCtorShim(def_id, ty) => {
async_destructor_ctor::build_async_destructor_ctor_shim(tcx, def_id, ty)
let body = async_destructor_ctor::build_async_destructor_ctor_shim(tcx, def_id, ty);
debug!("make_shim({:?}) = {:?}", instance, body);
return body;
}
ty::InstanceKind::Virtual(..) => {
bug!("InstanceKind::Virtual ({:?}) is for direct calls only", instance)
@ -290,8 +371,13 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>)
if ty.is_some() {
let patch = {
let typing_env = ty::TypingEnv::post_analysis(tcx, def_id);
let mut elaborator =
DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, typing_env };
let mut elaborator = DropShimElaborator {
body: &body,
patch: MirPatch::new(&body),
tcx,
typing_env,
produce_async_drops: false,
};
let dropee = tcx.mk_place_deref(dropee_ptr);
let resume_block = elaborator.patch.resume_block();
elaborate_drop(
@ -302,6 +388,7 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>)
return_block,
Unwind::To(resume_block),
START_BLOCK,
None,
);
elaborator.patch
};
@ -350,6 +437,7 @@ pub(super) struct DropShimElaborator<'a, 'tcx> {
pub patch: MirPatch<'tcx>,
pub tcx: TyCtxt<'tcx>,
pub typing_env: ty::TypingEnv<'tcx>,
pub produce_async_drops: bool,
}
impl fmt::Debug for DropShimElaborator<'_, '_> {
@ -377,6 +465,13 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
self.typing_env
}
fn terminator_loc(&self, bb: BasicBlock) -> Location {
self.patch.terminator_loc(self.body, bb)
}
fn allow_async_drops(&self) -> bool {
self.produce_async_drops
}
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
match mode {
DropFlagMode::Shallow => {
@ -633,6 +728,8 @@ impl<'tcx> CloneShimBuilder<'tcx> {
target: unwind,
unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
replace: false,
drop: None,
async_fut: None,
},
/* is_cleanup */ true,
);
@ -898,6 +995,8 @@ fn build_call_shim<'tcx>(
target: BasicBlock::new(2),
unwind: UnwindAction::Continue,
replace: false,
drop: None,
async_fut: None,
},
false,
);
@ -915,6 +1014,8 @@ fn build_call_shim<'tcx>(
target: BasicBlock::new(4),
unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
replace: false,
drop: None,
async_fut: None,
},
/* is_cleanup */ true,
);

View file

@ -372,9 +372,12 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
);
}
}
TerminatorKind::Drop { target, unwind, .. } => {
TerminatorKind::Drop { target, unwind, drop, .. } => {
self.check_edge(location, *target, EdgeKind::Normal);
self.check_unwind_edge(location, *unwind);
if let Some(drop) = drop {
self.check_edge(location, *drop, EdgeKind::Normal);
}
}
TerminatorKind::Call { func, args, .. }
| TerminatorKind::TailCall { func, args, .. } => {
@ -747,7 +750,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// since we may be in the process of computing this MIR in the
// first place.
let layout = if def_id == self.caller_body.source.def_id() {
self.caller_body.coroutine_layout_raw()
self.caller_body
.coroutine_layout_raw()
.or_else(|| self.tcx.coroutine_layout(def_id, args))
} else if self.tcx.needs_coroutine_by_move_body_def_id(def_id)
&& let ty::ClosureKind::FnOnce =
args.as_coroutine().kind_ty().to_opt_closure_kind().unwrap()
@ -757,7 +762,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// Same if this is the by-move body of a coroutine-closure.
self.caller_body.coroutine_layout_raw()
} else {
self.tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty())
self.tcx.coroutine_layout(def_id, args)
};
let Some(layout) = layout else {

View file

@ -929,14 +929,16 @@ fn visit_instance_use<'tcx>(
ty::InstanceKind::ThreadLocalShim(..) => {
bug!("{:?} being reified", instance);
}
ty::InstanceKind::DropGlue(_, None) | ty::InstanceKind::AsyncDropGlueCtorShim(_, None) => {
ty::InstanceKind::DropGlue(_, None) => {
// Don't need to emit noop drop glue if we are calling directly.
if !is_direct_call {
output.push(create_fn_mono_item(tcx, instance, source));
}
}
ty::InstanceKind::DropGlue(_, Some(_))
| ty::InstanceKind::AsyncDropGlueCtorShim(_, Some(_))
| ty::InstanceKind::FutureDropPollShim(..)
| ty::InstanceKind::AsyncDropGlue(_, _)
| ty::InstanceKind::AsyncDropGlueCtorShim(_, _)
| ty::InstanceKind::VTableShim(..)
| ty::InstanceKind::ReifyShim(..)
| ty::InstanceKind::ClosureOnceShim { .. }

View file

@ -644,6 +644,8 @@ fn characteristic_def_id_of_mono_item<'tcx>(
| ty::InstanceKind::CloneShim(..)
| ty::InstanceKind::ThreadLocalShim(..)
| ty::InstanceKind::FnPtrAddrShim(..)
| ty::InstanceKind::FutureDropPollShim(..)
| ty::InstanceKind::AsyncDropGlue(..)
| ty::InstanceKind::AsyncDropGlueCtorShim(..) => return None,
};
@ -796,7 +798,9 @@ fn mono_item_visibility<'tcx>(
let def_id = match instance.def {
InstanceKind::Item(def_id)
| InstanceKind::DropGlue(def_id, Some(_))
| InstanceKind::AsyncDropGlueCtorShim(def_id, Some(_)) => def_id,
| InstanceKind::FutureDropPollShim(def_id, _, _)
| InstanceKind::AsyncDropGlue(def_id, _)
| InstanceKind::AsyncDropGlueCtorShim(def_id, _) => def_id,
// We match the visibility of statics here
InstanceKind::ThreadLocalShim(def_id) => {
@ -812,7 +816,6 @@ fn mono_item_visibility<'tcx>(
| InstanceKind::ClosureOnceShim { .. }
| InstanceKind::ConstructCoroutineInClosureShim { .. }
| InstanceKind::DropGlue(..)
| InstanceKind::AsyncDropGlueCtorShim(..)
| InstanceKind::CloneShim(..)
| InstanceKind::FnPtrAddrShim(..) => return Visibility::Hidden,
};

View file

@ -256,11 +256,6 @@ where
goal: Goal<I, Self>,
) -> Result<Candidate<I>, NoSolution>;
fn consider_builtin_async_destruct_candidate(
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,
) -> Result<Candidate<I>, NoSolution>;
fn consider_builtin_destruct_candidate(
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,
@ -477,9 +472,6 @@ where
Some(TraitSolverLangItem::DiscriminantKind) => {
G::consider_builtin_discriminant_kind_candidate(self, goal)
}
Some(TraitSolverLangItem::AsyncDestruct) => {
G::consider_builtin_async_destruct_candidate(self, goal)
}
Some(TraitSolverLangItem::Destruct) => {
G::consider_builtin_destruct_candidate(self, goal)
}

View file

@ -336,13 +336,6 @@ where
unreachable!("DiscriminantKind is not const")
}
fn consider_builtin_async_destruct_candidate(
_ecx: &mut EvalCtxt<'_, D>,
_goal: Goal<I, Self>,
) -> Result<Candidate<I>, NoSolution> {
unreachable!("AsyncDestruct is not const")
}
fn consider_builtin_destruct_candidate(
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,

View file

@ -887,66 +887,6 @@ where
})
}
fn consider_builtin_async_destruct_candidate(
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,
) -> Result<Candidate<I>, NoSolution> {
let self_ty = goal.predicate.self_ty();
let async_destructor_ty = match self_ty.kind() {
ty::Bool
| ty::Char
| ty::Int(..)
| ty::Uint(..)
| ty::Float(..)
| ty::Array(..)
| ty::RawPtr(..)
| ty::Ref(..)
| ty::FnDef(..)
| ty::FnPtr(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Infer(ty::IntVar(..) | ty::FloatVar(..))
| ty::Never
| ty::Adt(_, _)
| ty::Str
| ty::Slice(_)
| ty::Tuple(_)
| ty::Error(_) => self_ty.async_destructor_ty(ecx.cx()),
ty::UnsafeBinder(_) => {
// FIXME(unsafe_binders): Instantiate the binder with placeholders I guess.
todo!()
}
// Given an alias, parameter, or placeholder we add an impl candidate normalizing to a rigid
// alias. In case there's a where-bound further constraining this alias it is preferred over
// this impl candidate anyways. It's still a bit scuffed.
ty::Alias(_, _) | ty::Param(_) | ty::Placeholder(..) => {
return ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
ecx.structurally_instantiate_normalizes_to_term(goal, goal.predicate.alias);
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
});
}
ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
| ty::Foreign(..)
| ty::Bound(..) => panic!(
"unexpected self ty `{:?}` when normalizing `<T as AsyncDestruct>::AsyncDestructor`",
goal.predicate.self_ty()
),
ty::Pat(..) | ty::Dynamic(..) | ty::Coroutine(..) | ty::CoroutineWitness(..) => panic!(
"`consider_builtin_async_destruct_candidate` is not yet implemented for type: {self_ty:?}"
),
};
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
ecx.eq(goal.param_env, goal.predicate.term, async_destructor_ty.into())
.expect("expected goal term to be fully unconstrained");
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
}
fn consider_builtin_destruct_candidate(
_ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,

View file

@ -599,19 +599,6 @@ where
.enter(|ecx| ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes))
}
fn consider_builtin_async_destruct_candidate(
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,
) -> Result<Candidate<I>, NoSolution> {
if goal.predicate.polarity != ty::PredicatePolarity::Positive {
return Err(NoSolution);
}
// `AsyncDestruct` is automatically implemented for every type.
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc)
.enter(|ecx| ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes))
}
fn consider_builtin_destruct_candidate(
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,

View file

@ -9,6 +9,7 @@
#![cfg_attr(bootstrap, feature(let_chains))]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)]
#![feature(box_patterns)]
#![feature(map_try_insert)]
#![feature(rustdoc_internals)]
#![feature(try_blocks)]

View file

@ -695,13 +695,6 @@ impl<'tcx> SmirCtxt<'tcx> {
matches!(instance.def, ty::InstanceKind::DropGlue(_, None))
}
/// Check if this is an empty AsyncDropGlueCtor shim.
pub fn is_empty_async_drop_ctor_shim(&self, def: InstanceDef) -> bool {
let tables = self.0.borrow_mut();
let instance = tables.instances[def];
matches!(instance.def, ty::InstanceKind::AsyncDropGlueCtorShim(_, None))
}
/// Convert a non-generic crate item into an instance.
/// This function will panic if the item is generic.
pub fn mono_instance(&self, def_id: stable_mir::DefId) -> stable_mir::mir::mono::Instance {

View file

@ -494,6 +494,9 @@ impl<'tcx> Stable<'tcx> for mir::AssertMessage<'tcx> {
AssertKind::ResumedAfterPanic(coroutine) => {
stable_mir::mir::AssertMessage::ResumedAfterPanic(coroutine.stable(tables))
}
AssertKind::ResumedAfterDrop(coroutine) => {
stable_mir::mir::AssertMessage::ResumedAfterDrop(coroutine.stable(tables))
}
AssertKind::MisalignedPointerDereference { required, found } => {
stable_mir::mir::AssertMessage::MisalignedPointerDereference {
required: required.stable(tables),
@ -648,13 +651,18 @@ impl<'tcx> Stable<'tcx> for mir::TerminatorKind<'tcx> {
mir::TerminatorKind::UnwindTerminate(_) => TerminatorKind::Abort,
mir::TerminatorKind::Return => TerminatorKind::Return,
mir::TerminatorKind::Unreachable => TerminatorKind::Unreachable,
mir::TerminatorKind::Drop { place, target, unwind, replace: _ } => {
TerminatorKind::Drop {
place: place.stable(tables),
target: target.as_usize(),
unwind: unwind.stable(tables),
}
}
mir::TerminatorKind::Drop {
place,
target,
unwind,
replace: _,
drop: _,
async_fut: _,
} => TerminatorKind::Drop {
place: place.stable(tables),
target: target.as_usize(),
unwind: unwind.stable(tables),
},
mir::TerminatorKind::Call {
func,
args,

View file

@ -813,6 +813,8 @@ impl<'tcx> Stable<'tcx> for ty::Instance<'tcx> {
| ty::InstanceKind::DropGlue(..)
| ty::InstanceKind::CloneShim(..)
| ty::InstanceKind::FnPtrShim(..)
| ty::InstanceKind::FutureDropPollShim(..)
| ty::InstanceKind::AsyncDropGlue(..)
| ty::InstanceKind::AsyncDropGlueCtorShim(..) => {
stable_mir::mir::mono::InstanceKind::Shim
}

View file

@ -354,11 +354,6 @@ impl<'tcx> SmirInterface<'tcx> {
self.cx.is_empty_drop_shim(def)
}
/// Check if this is an empty AsyncDropGlueCtor shim.
pub(crate) fn is_empty_async_drop_ctor_shim(&self, def: InstanceDef) -> bool {
self.cx.is_empty_async_drop_ctor_shim(def)
}
/// Convert a non-generic crate item into an instance.
/// This function will panic if the item is generic.
pub(crate) fn mono_instance(&self, def_id: DefId) -> Instance {

View file

@ -267,6 +267,7 @@ pub enum AssertMessage {
RemainderByZero(Operand),
ResumedAfterReturn(CoroutineKind),
ResumedAfterPanic(CoroutineKind),
ResumedAfterDrop(CoroutineKind),
MisalignedPointerDereference { required: Operand, found: Operand },
NullPointerDereference,
}
@ -320,6 +321,22 @@ impl AssertMessage {
_,
)) => Ok("`gen fn` should just keep returning `AssertMessage::None` after panicking"),
AssertMessage::ResumedAfterDrop(CoroutineKind::Coroutine(_)) => {
Ok("coroutine resumed after async drop")
}
AssertMessage::ResumedAfterDrop(CoroutineKind::Desugared(
CoroutineDesugaring::Async,
_,
)) => Ok("`async fn` resumed after async drop"),
AssertMessage::ResumedAfterDrop(CoroutineKind::Desugared(
CoroutineDesugaring::Gen,
_,
)) => Ok("`async gen fn` resumed after async drop"),
AssertMessage::ResumedAfterDrop(CoroutineKind::Desugared(
CoroutineDesugaring::AsyncGen,
_,
)) => Ok("`gen fn` should just keep returning `AssertMessage::None` after async drop"),
AssertMessage::BoundsCheck { .. } => Ok("index out of bounds"),
AssertMessage::MisalignedPointerDereference { .. } => {
Ok("misaligned pointer dereference")

View file

@ -163,10 +163,7 @@ impl Instance {
/// When generating code for a Drop terminator, users can ignore an empty drop glue.
/// These shims are only needed to generate a valid Drop call done via VTable.
pub fn is_empty_shim(&self) -> bool {
self.kind == InstanceKind::Shim
&& with(|cx| {
cx.is_empty_drop_shim(self.def) || cx.is_empty_async_drop_ctor_shim(self.def)
})
self.kind == InstanceKind::Shim && with(|cx| cx.is_empty_drop_shim(self.def))
}
/// Try to constant evaluate the instance into a constant with the given type.

View file

@ -313,7 +313,9 @@ fn pretty_assert_message<W: Write>(writer: &mut W, msg: &AssertMessage) -> io::R
AssertMessage::NullPointerDereference => {
write!(writer, "\"null pointer dereference occurred\"")
}
AssertMessage::ResumedAfterReturn(_) | AssertMessage::ResumedAfterPanic(_) => {
AssertMessage::ResumedAfterReturn(_)
| AssertMessage::ResumedAfterPanic(_)
| AssertMessage::ResumedAfterDrop(_) => {
write!(writer, "{}", msg.description().unwrap())
}
}

View file

@ -372,7 +372,8 @@ macro_rules! make_mir_visitor {
}
AssertMessage::ResumedAfterReturn(_)
| AssertMessage::ResumedAfterPanic(_)
| AssertMessage::NullPointerDereference => {
| AssertMessage::NullPointerDereference
| AssertMessage::ResumedAfterDrop(_) => {
//nothing to visit
}
AssertMessage::MisalignedPointerDereference { required, found } => {

View file

@ -502,17 +502,8 @@ symbols! {
async_call_mut,
async_call_once,
async_closure,
async_destruct,
async_drop,
async_drop_chain,
async_drop_defer,
async_drop_deferred_drop_in_place,
async_drop_either,
async_drop_fuse,
async_drop_in_place,
async_drop_noop,
async_drop_slice,
async_drop_surface_drop_in_place,
async_fn,
async_fn_in_dyn_trait,
async_fn_in_trait,
@ -983,7 +974,6 @@ symbols! {
fadd_fast,
fake_variadic,
fallback,
fallback_surface_drop,
fdiv_algebraic,
fdiv_fast,
feature,
@ -1064,6 +1054,7 @@ symbols! {
fundamental,
fused_iterator,
future,
future_drop_poll,
future_output,
future_trait,
gdb_script_file,
@ -1522,14 +1513,18 @@ symbols! {
panic_cannot_unwind,
panic_const_add_overflow,
panic_const_async_fn_resumed,
panic_const_async_fn_resumed_drop,
panic_const_async_fn_resumed_panic,
panic_const_async_gen_fn_resumed,
panic_const_async_gen_fn_resumed_drop,
panic_const_async_gen_fn_resumed_panic,
panic_const_coroutine_resumed,
panic_const_coroutine_resumed_drop,
panic_const_coroutine_resumed_panic,
panic_const_div_by_zero,
panic_const_div_overflow,
panic_const_gen_fn_none,
panic_const_gen_fn_none_drop,
panic_const_gen_fn_none_panic,
panic_const_mul_overflow,
panic_const_neg_overflow,
@ -2057,7 +2052,6 @@ symbols! {
suggestion,
super_let,
supertrait_item_shadowing,
surface_async_drop_in_place,
sym,
sync,
synthetic,

View file

@ -63,10 +63,17 @@ pub(super) fn mangle<'tcx>(
.print_def_path(
def_id,
if let ty::InstanceKind::DropGlue(_, _)
| ty::InstanceKind::AsyncDropGlueCtorShim(_, _) = instance.def
| ty::InstanceKind::AsyncDropGlueCtorShim(_, _)
| ty::InstanceKind::FutureDropPollShim(_, _, _) = instance.def
{
// Add the name of the dropped type to the symbol name
&*instance.args
} else if let ty::InstanceKind::AsyncDropGlue(_, ty) = instance.def {
let ty::Coroutine(_, cor_args) = ty.kind() else {
bug!();
};
let drop_ty = cor_args.first().unwrap().expect_ty();
tcx.mk_args(&[GenericArg::from(drop_ty)])
} else {
&[]
},
@ -99,6 +106,10 @@ pub(super) fn mangle<'tcx>(
_ => {}
}
if let ty::InstanceKind::FutureDropPollShim(..) = instance.def {
let _ = printer.write_str("{{drop-shim}}");
}
printer.path.finish(hash)
}

View file

@ -58,11 +58,17 @@ pub(super) fn mangle<'tcx>(
ty::InstanceKind::ConstructCoroutineInClosureShim { receiver_by_ref: false, .. } => {
Some("by_ref")
}
ty::InstanceKind::FutureDropPollShim(_, _, _) => Some("drop"),
_ => None,
};
if let Some(shim_kind) = shim_kind {
if let ty::InstanceKind::AsyncDropGlue(_, ty) = instance.def {
let ty::Coroutine(_, cor_args) = ty.kind() else {
bug!();
};
let drop_ty = cor_args.first().unwrap().expect_ty();
cx.print_def_path(def_id, tcx.mk_args(&[GenericArg::from(drop_ty)])).unwrap()
} else if let Some(shim_kind) = shim_kind {
cx.path_append_ns(|cx| cx.print_def_path(def_id, args), 'S', 0, shim_kind).unwrap()
} else {
cx.print_def_path(def_id, args).unwrap()

View file

@ -1034,42 +1034,6 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
| ty::Infer(..)
| ty::Error(_) => false,
},
Some(LangItem::AsyncDestruct) => match self_ty.kind() {
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Adt(..)
| ty::Str
| ty::Array(..)
| ty::Slice(_)
| ty::RawPtr(..)
| ty::Ref(..)
| ty::FnDef(..)
| ty::FnPtr(..)
| ty::UnsafeBinder(_)
| ty::Dynamic(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Pat(..)
| ty::Never
| ty::Tuple(..)
| ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..)) => true,
// type parameters, opaques, and unnormalized projections don't have
// a known async destructor and may need to be normalized further or rely
// on param env for async destructor projections
ty::Param(_)
| ty::Foreign(_)
| ty::Alias(..)
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Infer(_)
| ty::Error(_) => false,
},
Some(LangItem::PointeeTrait) => {
let tail = selcx.tcx().struct_tail_raw(
self_ty,
@ -1528,11 +1492,6 @@ fn confirm_builtin_candidate<'cx, 'tcx>(
assert_eq!(discriminant_def_id, item_def_id);
(self_ty.discriminant_ty(tcx).into(), PredicateObligations::new())
} else if tcx.is_lang_item(trait_def_id, LangItem::AsyncDestruct) {
let destructor_def_id = tcx.associated_item_def_ids(trait_def_id)[0];
assert_eq!(destructor_def_id, item_def_id);
(self_ty.async_destructor_ty(tcx).into(), PredicateObligations::new())
} else if tcx.is_lang_item(trait_def_id, LangItem::PointeeTrait) {
let metadata_def_id = tcx.require_lang_item(LangItem::Metadata, None);
assert_eq!(metadata_def_id, item_def_id);

View file

@ -79,9 +79,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
} else if tcx.is_lang_item(def_id, LangItem::DiscriminantKind) {
// `DiscriminantKind` is automatically implemented for every type.
candidates.vec.push(BuiltinCandidate { has_nested: false });
} else if tcx.is_lang_item(def_id, LangItem::AsyncDestruct) {
// `AsyncDestruct` is automatically implemented for every type.
candidates.vec.push(BuiltinCandidate { has_nested: false });
} else if tcx.is_lang_item(def_id, LangItem::PointeeTrait) {
// `Pointee` is automatically implemented for every type.
candidates.vec.push(BuiltinCandidate { has_nested: false });

View file

@ -551,8 +551,10 @@ fn fn_abi_new_uncached<'tcx>(
extra_args
};
let is_drop_in_place =
determined_fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace));
let is_drop_in_place = determined_fn_def_id.is_some_and(|def_id| {
tcx.is_lang_item(def_id, LangItem::DropInPlace)
|| tcx.is_lang_item(def_id, LangItem::AsyncDropInPlace)
});
let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, &'tcx FnAbiError<'tcx>> {
let span = tracing::debug_span!("arg_of");

View file

@ -29,6 +29,13 @@ fn is_unpin_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::PseudoCanonicalInput<'tcx, T
is_item_raw(tcx, query, LangItem::Unpin)
}
fn is_async_drop_raw<'tcx>(
tcx: TyCtxt<'tcx>,
query: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>,
) -> bool {
is_item_raw(tcx, query, LangItem::AsyncDrop)
}
fn is_item_raw<'tcx>(
tcx: TyCtxt<'tcx>,
query: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>,
@ -46,6 +53,7 @@ pub(crate) fn provide(providers: &mut Providers) {
is_sized_raw,
is_freeze_raw,
is_unpin_raw,
is_async_drop_raw,
..*providers
};
}

View file

@ -5,7 +5,6 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::bug;
use rustc_middle::query::Providers;
use rustc_middle::traits::{BuiltinImplSource, CodegenObligationError};
use rustc_middle::ty::util::AsyncDropGlueMorphology;
use rustc_middle::ty::{
self, ClosureKind, GenericArgsRef, Instance, PseudoCanonicalInput, TyCtxt, TypeVisitableExt,
};
@ -41,20 +40,26 @@ fn resolve_instance_raw<'tcx>(
if ty.needs_drop(tcx, typing_env) {
debug!(" => nontrivial drop glue");
match *ty.kind() {
ty::Coroutine(coroutine_def_id, ..) => {
// FIXME: sync drop of coroutine with async drop (generate both versions?)
// Currently just ignored
if tcx.optimized_mir(coroutine_def_id).coroutine_drop_async().is_some() {
ty::InstanceKind::DropGlue(def_id, None)
} else {
ty::InstanceKind::DropGlue(def_id, Some(ty))
}
}
ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(..)
| ty::Tuple(..)
| ty::Adt(..)
| ty::Dynamic(..)
| ty::Array(..)
| ty::Slice(..)
| ty::UnsafeBinder(..) => {}
| ty::UnsafeBinder(..) => ty::InstanceKind::DropGlue(def_id, Some(ty)),
// Drop shims can only be built from ADTs.
_ => return Ok(None),
}
ty::InstanceKind::DropGlue(def_id, Some(ty))
} else {
debug!(" => trivial drop glue");
ty::InstanceKind::DropGlue(def_id, None)
@ -62,7 +67,7 @@ fn resolve_instance_raw<'tcx>(
} else if tcx.is_lang_item(def_id, LangItem::AsyncDropInPlace) {
let ty = args.type_at(0);
if ty.async_drop_glue_morphology(tcx) != AsyncDropGlueMorphology::Noop {
if ty.needs_async_drop(tcx, typing_env) {
match *ty.kind() {
ty::Closure(..)
| ty::CoroutineClosure(..)
@ -76,11 +81,14 @@ fn resolve_instance_raw<'tcx>(
_ => return Ok(None),
}
debug!(" => nontrivial async drop glue ctor");
ty::InstanceKind::AsyncDropGlueCtorShim(def_id, Some(ty))
ty::InstanceKind::AsyncDropGlueCtorShim(def_id, ty)
} else {
debug!(" => trivial async drop glue ctor");
ty::InstanceKind::AsyncDropGlueCtorShim(def_id, None)
ty::InstanceKind::AsyncDropGlueCtorShim(def_id, ty)
}
} else if tcx.is_async_drop_in_place_coroutine(def_id) {
let ty = args.type_at(0);
ty::InstanceKind::AsyncDropGlue(def_id, ty)
} else {
debug!(" => free item");
ty::InstanceKind::Item(def_id)

View file

@ -184,6 +184,10 @@ fn layout_of_uncached<'tcx>(
}
let tcx = cx.tcx();
// layout of `async_drop_in_place<T>::{closure}` in case,
// when T is a coroutine, contains this internal coroutine's ref
let dl = cx.data_layout();
let map_layout = |result: Result<_, _>| match result {
Ok(layout) => Ok(tcx.mk_layout(layout)),
@ -406,7 +410,7 @@ fn layout_of_uncached<'tcx>(
ty::Coroutine(def_id, args) => {
use rustc_middle::ty::layout::PrimitiveExt as _;
let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else {
let Some(info) = tcx.coroutine_layout(def_id, args) else {
return Err(error(cx, LayoutError::Unknown(ty)));
};
@ -770,7 +774,7 @@ fn variant_info_for_coroutine<'tcx>(
return (vec![], None);
};
let coroutine = cx.tcx().coroutine_layout(def_id, args.as_coroutine().kind_ty()).unwrap();
let coroutine = cx.tcx().coroutine_layout(def_id, args).unwrap();
let upvar_names = cx.tcx().closure_saved_names_of_captured_variables(def_id);
let mut upvars_size = Size::ZERO;

View file

@ -42,11 +42,11 @@ fn needs_async_drop_raw<'tcx>(
let adt_has_async_dtor =
|adt_def: ty::AdtDef<'tcx>| adt_def.async_destructor(tcx).map(|_| DtorType::Significant);
let res = drop_tys_helper(tcx, query.value, query.typing_env, adt_has_async_dtor, false, false)
.filter(filter_array_elements(tcx, query.typing_env))
.filter(filter_array_elements_async(tcx, query.typing_env))
.next()
.is_some();
debug!("needs_drop_raw({:?}) = {:?}", query, res);
debug!("needs_async_drop_raw({:?}) = {:?}", query, res);
res
}
@ -66,6 +66,18 @@ fn filter_array_elements<'tcx>(
Err(AlwaysRequiresDrop) => true,
}
}
fn filter_array_elements_async<'tcx>(
tcx: TyCtxt<'tcx>,
typing_env: ty::TypingEnv<'tcx>,
) -> impl Fn(&Result<Ty<'tcx>, AlwaysRequiresDrop>) -> bool {
move |ty| match ty {
Ok(ty) => match *ty.kind() {
ty::Array(elem, _) => tcx.needs_async_drop_raw(typing_env.as_query_input(elem)),
_ => true,
},
Err(AlwaysRequiresDrop) => true,
}
}
fn has_significant_drop_raw<'tcx>(
tcx: TyCtxt<'tcx>,
@ -414,6 +426,27 @@ fn adt_drop_tys<'tcx>(
.collect::<Result<Vec<_>, _>>()
.map(|components| tcx.mk_type_list(&components))
}
fn adt_async_drop_tys<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
// This is for the "adt_async_drop_tys" query, that considers all `AsyncDrop` impls.
let adt_has_dtor =
|adt_def: ty::AdtDef<'tcx>| adt_def.async_destructor(tcx).map(|_| DtorType::Significant);
// `tcx.type_of(def_id)` identical to `tcx.make_adt(def, identity_args)`
drop_tys_helper(
tcx,
tcx.type_of(def_id).instantiate_identity(),
ty::TypingEnv::non_body_analysis(tcx, def_id),
adt_has_dtor,
false,
false,
)
.collect::<Result<Vec<_>, _>>()
.map(|components| tcx.mk_type_list(&components))
}
// If `def_id` refers to a generic ADT, the queries above and below act as if they had been handed
// a `tcx.make_ty(def, identity_args)` and as such it is legal to instantiate the generic parameters
// of the ADT into the outputted `ty`s.
@ -458,6 +491,7 @@ pub(crate) fn provide(providers: &mut Providers) {
needs_async_drop_raw,
has_significant_drop_raw,
adt_drop_tys,
adt_async_drop_tys,
adt_significant_drop_tys,
list_significant_drop_tys,
..*providers

View file

@ -151,7 +151,6 @@ pub trait Ty<I: Interner<Ty = Self>>:
fn discriminant_ty(self, interner: I) -> I::Ty;
fn async_destructor_ty(self, interner: I) -> I::Ty;
fn is_known_rigid(self) -> bool {
self.kind().is_known_rigid()
}

View file

@ -2,7 +2,6 @@
/// representation of `LangItem`s used in the underlying compiler implementation.
pub enum TraitSolverLangItem {
// tidy-alphabetical-start
AsyncDestruct,
AsyncFn,
AsyncFnKindHelper,
AsyncFnKindUpvars,

View file

@ -1,284 +1,51 @@
#![unstable(feature = "async_drop", issue = "126482")]
use crate::fmt;
use crate::future::{Future, IntoFuture};
use crate::intrinsics::discriminant_value;
use crate::marker::{DiscriminantKind, PhantomPinned};
use crate::mem::MaybeUninit;
#[allow(unused_imports)]
use core::future::Future;
#[allow(unused_imports)]
use crate::pin::Pin;
use crate::task::{Context, Poll, ready};
#[allow(unused_imports)]
use crate::task::{Context, Poll};
/// Asynchronously drops a value by running `AsyncDrop::async_drop`
/// on a value and its fields recursively.
#[unstable(feature = "async_drop", issue = "126482")]
pub fn async_drop<T>(value: T) -> AsyncDropOwning<T> {
AsyncDropOwning { value: MaybeUninit::new(value), dtor: None, _pinned: PhantomPinned }
}
/// A future returned by the [`async_drop`].
#[unstable(feature = "async_drop", issue = "126482")]
pub struct AsyncDropOwning<T> {
value: MaybeUninit<T>,
dtor: Option<AsyncDropInPlace<T>>,
_pinned: PhantomPinned,
}
#[unstable(feature = "async_drop", issue = "126482")]
impl<T> fmt::Debug for AsyncDropOwning<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AsyncDropOwning").finish_non_exhaustive()
}
}
#[unstable(feature = "async_drop", issue = "126482")]
impl<T> Future for AsyncDropOwning<T> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// SAFETY: Self is pinned thus it is ok to store references to self
unsafe {
let this = self.get_unchecked_mut();
let dtor = Pin::new_unchecked(
this.dtor.get_or_insert_with(|| async_drop_in_place(this.value.as_mut_ptr())),
);
// AsyncDestuctors are idempotent so Self gets idempotency as well
dtor.poll(cx)
}
}
}
#[lang = "async_drop_in_place"]
#[allow(unconditional_recursion)]
// FIXME: Consider if `#[rustc_diagnostic_item = "ptr_drop_in_place"]` is needed?
unsafe fn async_drop_in_place_raw<T: ?Sized>(
to_drop: *mut T,
) -> <T as AsyncDestruct>::AsyncDestructor {
// Code here does not matter - this is replaced by the
// real async drop glue constructor by the compiler.
// SAFETY: see comment above
unsafe { async_drop_in_place_raw(to_drop) }
}
/// Creates the asynchronous destructor of the pointed-to value.
/// Async version of Drop trait.
///
/// # Safety
/// When a value is no longer needed, Rust will run a "destructor" on that value.
/// The most common way that a value is no longer needed is when it goes out of
/// scope. Destructors may still run in other circumstances, but we're going to
/// focus on scope for the examples here. To learn about some of those other cases,
/// please see [the reference] section on destructors.
///
/// Behavior is undefined if any of the following conditions are violated:
/// [the reference]: https://doc.rust-lang.org/reference/destructors.html
///
/// * `to_drop` must be [valid](crate::ptr#safety) for both reads and writes.
/// ## `Copy` and ([`Drop`]|`AsyncDrop`) are exclusive
///
/// * `to_drop` must be properly aligned, even if `T` has size 0.
///
/// * `to_drop` must be nonnull, even if `T` has size 0.
///
/// * The value `to_drop` points to must be valid for async dropping,
/// which may mean it must uphold additional invariants. These
/// invariants depend on the type of the value being dropped. For
/// instance, when dropping a Box, the box's pointer to the heap must
/// be valid.
///
/// * While `async_drop_in_place` is executing or the returned async
/// destructor is alive, the only way to access parts of `to_drop`
/// is through the `self: Pin<&mut Self>` references supplied to
/// the `AsyncDrop::async_drop` methods that `async_drop_in_place`
/// or `AsyncDropInPlace<T>::poll` invokes. This usually means the
/// returned future stores the `to_drop` pointer and user is required
/// to guarantee that dropped value doesn't move.
///
#[unstable(feature = "async_drop", issue = "126482")]
pub unsafe fn async_drop_in_place<T: ?Sized>(to_drop: *mut T) -> AsyncDropInPlace<T> {
// SAFETY: `async_drop_in_place_raw` has the same safety requirements
unsafe { AsyncDropInPlace(async_drop_in_place_raw(to_drop)) }
}
/// A future returned by the [`async_drop_in_place`].
#[unstable(feature = "async_drop", issue = "126482")]
pub struct AsyncDropInPlace<T: ?Sized>(<T as AsyncDestruct>::AsyncDestructor);
#[unstable(feature = "async_drop", issue = "126482")]
impl<T: ?Sized> fmt::Debug for AsyncDropInPlace<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AsyncDropInPlace").finish_non_exhaustive()
}
}
#[unstable(feature = "async_drop", issue = "126482")]
impl<T: ?Sized> Future for AsyncDropInPlace<T> {
type Output = ();
#[inline(always)]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// SAFETY: This code simply forwards poll call to the inner future
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().0) }.poll(cx)
}
}
// FIXME(zetanumbers): Add same restrictions on AsyncDrop impls as
// with Drop impls
/// Custom code within the asynchronous destructor.
/// You cannot implement both [`Copy`] and ([`Drop`]|`AsyncDrop`) on the same type. Types that
/// are `Copy` get implicitly duplicated by the compiler, making it very
/// hard to predict when, and how often destructors will be executed. As such,
/// these types cannot have destructors.
#[cfg(not(bootstrap))]
#[unstable(feature = "async_drop", issue = "126482")]
#[lang = "async_drop"]
pub trait AsyncDrop {
/// A future returned by the [`AsyncDrop::async_drop`] to be part
/// of the async destructor.
#[unstable(feature = "async_drop", issue = "126482")]
type Dropper<'a>: Future<Output = ()>
where
Self: 'a;
/// Constructs the asynchronous destructor for this type.
#[unstable(feature = "async_drop", issue = "126482")]
fn async_drop(self: Pin<&mut Self>) -> Self::Dropper<'_>;
/// Executes the async destructor for this type.
///
/// This method is called implicitly when the value goes out of scope,
/// and cannot be called explicitly.
///
/// When this method has been called, `self` has not yet been deallocated.
/// That only happens after the method is over.
///
/// # Panics
#[allow(async_fn_in_trait)]
async fn drop(self: Pin<&mut Self>);
}
#[lang = "async_destruct"]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
trait AsyncDestruct {
type AsyncDestructor: Future<Output = ()>;
}
/// Basically calls `AsyncDrop::async_drop` with pointer. Used to simplify
/// generation of the code for `async_drop_in_place_raw`
#[lang = "surface_async_drop_in_place"]
async unsafe fn surface_async_drop_in_place<T: AsyncDrop + ?Sized>(ptr: *mut T) {
// SAFETY: We call this from async drop `async_drop_in_place_raw`
// which has the same safety requirements
unsafe { <T as AsyncDrop>::async_drop(Pin::new_unchecked(&mut *ptr)).await }
}
/// Basically calls `Drop::drop` with pointer. Used to simplify generation
/// of the code for `async_drop_in_place_raw`
#[allow(drop_bounds)]
#[lang = "async_drop_surface_drop_in_place"]
async unsafe fn surface_drop_in_place<T: Drop + ?Sized>(ptr: *mut T) {
// SAFETY: We call this from async drop `async_drop_in_place_raw`
// which has the same safety requirements
unsafe { crate::ops::fallback_surface_drop(&mut *ptr) }
}
/// Wraps a future to continue outputting `Poll::Ready(())` once after
/// wrapped future completes by returning `Poll::Ready(())` on poll. This
/// is useful for constructing async destructors to guarantee this
/// "fuse" property
//
// FIXME: Consider optimizing combinators to not have to use fuse in majority
// of cases, perhaps by adding `#[(rustc_)idempotent(_future)]` attribute for
// async functions and blocks with the unit return type. However current layout
// optimizations currently encode `None` case into the async block's discriminant.
struct Fuse<T> {
inner: Option<T>,
}
#[lang = "async_drop_fuse"]
fn fuse<T>(inner: T) -> Fuse<T> {
Fuse { inner: Some(inner) }
}
impl<T> Future for Fuse<T>
where
T: Future<Output = ()>,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// SAFETY: pin projection into `self.inner`
unsafe {
let this = self.get_unchecked_mut();
if let Some(inner) = &mut this.inner {
ready!(Pin::new_unchecked(inner).poll(cx));
this.inner = None;
}
}
Poll::Ready(())
}
}
/// Async destructor for arrays and slices.
#[lang = "async_drop_slice"]
async unsafe fn slice<T>(s: *mut [T]) {
let len = s.len();
let ptr = s.as_mut_ptr();
for i in 0..len {
// SAFETY: we iterate over elements of `s` slice
unsafe { async_drop_in_place_raw(ptr.add(i)).await }
}
}
/// Constructs a chain of two futures, which awaits them sequentially as
/// a future.
#[lang = "async_drop_chain"]
async fn chain<F, G>(first: F, last: G)
where
F: IntoFuture<Output = ()>,
G: IntoFuture<Output = ()>,
{
first.await;
last.await;
}
/// Basically a lazy version of `async_drop_in_place`. Returns a future
/// that would call `AsyncDrop::async_drop` on a first poll.
///
/// # Safety
///
/// Same as `async_drop_in_place` except is lazy to avoid creating
/// multiple mutable references.
#[lang = "async_drop_defer"]
async unsafe fn defer<T: ?Sized>(to_drop: *mut T) {
// SAFETY: same safety requirements as `async_drop_in_place`
unsafe { async_drop_in_place(to_drop) }.await
}
/// If `T`'s discriminant is equal to the stored one then awaits `M`
/// otherwise awaits the `O`.
///
/// # Safety
///
/// Users should carefully manage the returned future, since it would
/// try creating an immutable reference from `this` and get pointee's
/// discriminant.
// FIXME(zetanumbers): Send and Sync impls
#[lang = "async_drop_either"]
async unsafe fn either<O: IntoFuture<Output = ()>, M: IntoFuture<Output = ()>, T>(
other: O,
matched: M,
this: *mut T,
discr: <T as DiscriminantKind>::Discriminant,
) {
// SAFETY: Guaranteed by the safety section of this funtion's documentation
if unsafe { discriminant_value(&*this) } == discr {
drop(other);
matched.await
} else {
drop(matched);
other.await
}
}
#[lang = "async_drop_deferred_drop_in_place"]
async unsafe fn deferred_drop_in_place<T>(to_drop: *mut T) {
// SAFETY: same safety requirements as with drop_in_place (implied by
// function's name)
unsafe { crate::ptr::drop_in_place(to_drop) }
}
/// Used for noop async destructors. We don't use [`core::future::Ready`]
/// because it panics after its second poll, which could be potentially
/// bad if that would happen during the cleanup.
#[derive(Clone, Copy)]
struct Noop;
#[lang = "async_drop_noop"]
fn noop() -> Noop {
Noop
}
impl Future for Noop {
type Output = ();
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(())
}
/// Async drop.
#[cfg(not(bootstrap))]
#[unstable(feature = "async_drop", issue = "126482")]
#[lang = "async_drop_in_place"]
pub async unsafe fn async_drop_in_place<T: ?Sized>(_to_drop: *mut T) {
// Code here does not matter - this is replaced by the
// real implementation by the compiler.
}

View file

@ -20,8 +20,9 @@ mod pending;
mod poll_fn;
mod ready;
#[cfg(not(bootstrap))]
#[unstable(feature = "async_drop", issue = "126482")]
pub use async_drop::{AsyncDrop, AsyncDropInPlace, async_drop, async_drop_in_place};
pub use async_drop::{AsyncDrop, async_drop_in_place};
#[stable(feature = "into_future", since = "1.64.0")]
pub use into_future::IntoFuture;
#[stable(feature = "future_readiness_fns", since = "1.48.0")]

View file

@ -240,10 +240,3 @@ pub trait Drop {
#[stable(feature = "rust1", since = "1.0.0")]
fn drop(&mut self);
}
/// Fallback function to call surface level `Drop::drop` function
#[allow(drop_bounds)]
#[lang = "fallback_surface_drop"]
pub(crate) fn fallback_surface_drop<T: Drop + ?Sized>(x: &mut T) {
<T as Drop>::drop(x)
}

View file

@ -176,7 +176,6 @@ pub use self::deref::Receiver;
pub use self::deref::{Deref, DerefMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::drop::Drop;
pub(crate) use self::drop::fallback_surface_drop;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::function::{Fn, FnMut, FnOnce};
#[stable(feature = "rust1", since = "1.0.0")]

View file

@ -155,30 +155,26 @@ pub const fn panic(expr: &'static str) -> ! {
// reducing binary size impact.
macro_rules! panic_const {
($($lang:ident = $message:expr,)+) => {
pub mod panic_const {
use super::*;
$(
/// This is a panic called with a message that's a result of a MIR-produced Assert.
//
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[rustc_const_stable_indirect] // must follow stable const rules since it is exposed to stable
#[lang = stringify!($lang)]
pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
panic_fmt(fmt::Arguments::new_const(&[$message]));
}
)+
}
$(
/// This is a panic called with a message that's a result of a MIR-produced Assert.
//
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[rustc_const_stable_indirect] // must follow stable const rules since it is exposed to stable
#[lang = stringify!($lang)]
pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
panic_fmt(fmt::Arguments::new_const(&[$message]));
}
)+
}
}
@ -186,25 +182,37 @@ macro_rules! panic_const {
// slightly different forms. It's not clear if there's a good way to deduplicate without adding
// special cases to the compiler (e.g., a const generic function wouldn't have a single definition
// shared across crates, which is exactly what we want here).
panic_const! {
panic_const_add_overflow = "attempt to add with overflow",
panic_const_sub_overflow = "attempt to subtract with overflow",
panic_const_mul_overflow = "attempt to multiply with overflow",
panic_const_div_overflow = "attempt to divide with overflow",
panic_const_rem_overflow = "attempt to calculate the remainder with overflow",
panic_const_neg_overflow = "attempt to negate with overflow",
panic_const_shr_overflow = "attempt to shift right with overflow",
panic_const_shl_overflow = "attempt to shift left with overflow",
panic_const_div_by_zero = "attempt to divide by zero",
panic_const_rem_by_zero = "attempt to calculate the remainder with a divisor of zero",
panic_const_coroutine_resumed = "coroutine resumed after completion",
panic_const_async_fn_resumed = "`async fn` resumed after completion",
panic_const_async_gen_fn_resumed = "`async gen fn` resumed after completion",
panic_const_gen_fn_none = "`gen fn` should just keep returning `None` after completion",
panic_const_coroutine_resumed_panic = "coroutine resumed after panicking",
panic_const_async_fn_resumed_panic = "`async fn` resumed after panicking",
panic_const_async_gen_fn_resumed_panic = "`async gen fn` resumed after panicking",
panic_const_gen_fn_none_panic = "`gen fn` should just keep returning `None` after panicking",
pub mod panic_const {
use super::*;
panic_const! {
panic_const_add_overflow = "attempt to add with overflow",
panic_const_sub_overflow = "attempt to subtract with overflow",
panic_const_mul_overflow = "attempt to multiply with overflow",
panic_const_div_overflow = "attempt to divide with overflow",
panic_const_rem_overflow = "attempt to calculate the remainder with overflow",
panic_const_neg_overflow = "attempt to negate with overflow",
panic_const_shr_overflow = "attempt to shift right with overflow",
panic_const_shl_overflow = "attempt to shift left with overflow",
panic_const_div_by_zero = "attempt to divide by zero",
panic_const_rem_by_zero = "attempt to calculate the remainder with a divisor of zero",
panic_const_coroutine_resumed = "coroutine resumed after completion",
panic_const_async_fn_resumed = "`async fn` resumed after completion",
panic_const_async_gen_fn_resumed = "`async gen fn` resumed after completion",
panic_const_gen_fn_none = "`gen fn` should just keep returning `None` after completion",
panic_const_coroutine_resumed_panic = "coroutine resumed after panicking",
panic_const_async_fn_resumed_panic = "`async fn` resumed after panicking",
panic_const_async_gen_fn_resumed_panic = "`async gen fn` resumed after panicking",
panic_const_gen_fn_none_panic = "`gen fn` should just keep returning `None` after panicking",
}
// Separated panic constants list for async drop feature
// (May be joined when the corresponding lang items will be in the bootstrap)
#[cfg(not(bootstrap))]
panic_const! {
panic_const_coroutine_resumed_drop = "coroutine resumed after async drop",
panic_const_async_fn_resumed_drop = "`async fn` resumed after async drop",
panic_const_async_gen_fn_resumed_drop = "`async gen fn` resumed after async drop",
panic_const_gen_fn_none_drop = "`gen fn` resumed after async drop",
}
}
/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize on the caller.

View file

@ -4,16 +4,16 @@
// WARNING: If you would ever want to modify this test,
// please consider modifying rustc's async drop test at
// `tests/ui/async-await/async-drop.rs`.
// `tests/ui/async-await/async-drop/async-drop-initial.rs`.
#![feature(async_drop, impl_trait_in_assoc_type)]
#![allow(incomplete_features, dead_code)]
// FIXME(zetanumbers): consider AsyncDestruct::async_drop cleanup tests
use core::future::{AsyncDrop, Future, async_drop_in_place};
use core::future::{async_drop_in_place, AsyncDrop, Future};
use core::hint::black_box;
use core::mem::{self, ManuallyDrop};
use core::pin::{Pin, pin};
use core::pin::{pin, Pin};
use core::task::{Context, Poll, Waker};
async fn test_async_drop<T>(x: T) {
@ -68,7 +68,8 @@ fn main() {
test_async_drop(SyncThenAsync { i: 15, a: AsyncInt(16), b: SyncInt(17), c: AsyncInt(18) })
.await;
let async_drop_fut = pin!(core::future::async_drop(AsyncInt(19)));
let mut ptr19 = mem::MaybeUninit::new(AsyncInt(19));
let async_drop_fut = pin!(unsafe { async_drop_in_place(ptr19.as_mut_ptr()) });
test_idempotency(async_drop_fut).await;
let foo = AsyncInt(20);
@ -89,13 +90,14 @@ fn main() {
struct AsyncInt(i32);
impl Drop for AsyncInt {
fn drop(&mut self) {
println!("AsyncInt::drop: {}", self.0);
}
}
impl AsyncDrop for AsyncInt {
type Dropper<'a> = impl Future<Output = ()>;
fn async_drop(self: Pin<&mut Self>) -> Self::Dropper<'_> {
async move {
println!("AsyncInt::Dropper::poll: {}", self.0);
}
async fn drop(self: Pin<&mut Self>) {
println!("AsyncInt::async_drop: {}", self.0);
}
}
@ -124,16 +126,14 @@ struct AsyncReference<'a> {
foo: &'a AsyncInt,
}
impl Drop for AsyncReference<'_> {
fn drop(&mut self) {
println!("AsyncReference::drop: {}", self.foo.0);
}
}
impl AsyncDrop for AsyncReference<'_> {
type Dropper<'a>
= impl Future<Output = ()>
where
Self: 'a;
fn async_drop(self: Pin<&mut Self>) -> Self::Dropper<'_> {
async move {
println!("AsyncReference::Dropper::poll: {}", self.foo.0);
}
async fn drop(self: Pin<&mut Self>) {
println!("AsyncReference::async_drop: {}", self.foo.0);
}
}
@ -145,13 +145,14 @@ struct AsyncStruct {
b: AsyncInt,
}
impl Drop for AsyncStruct {
fn drop(&mut self) {
println!("AsyncStruct::drop: {}", self.i);
}
}
impl AsyncDrop for AsyncStruct {
type Dropper<'a> = impl Future<Output = ()>;
fn async_drop(self: Pin<&mut Self>) -> Self::Dropper<'_> {
async move {
println!("AsyncStruct::Dropper::poll: {}", self.i);
}
async fn drop(self: Pin<&mut Self>) {
println!("AsyncStruct::async_drop: {}", self.i);
}
}
@ -160,23 +161,34 @@ enum AsyncEnum {
B(SyncInt),
}
impl Drop for AsyncEnum {
fn drop(&mut self) {
let new_self = match self {
AsyncEnum::A(foo) => {
println!("AsyncEnum(A)::drop: {}", foo.0);
AsyncEnum::B(SyncInt(foo.0))
}
AsyncEnum::B(foo) => {
println!("AsyncEnum(B)::drop: {}", foo.0);
AsyncEnum::A(AsyncInt(foo.0))
}
};
mem::forget(mem::replace(&mut *self, new_self));
}
}
impl AsyncDrop for AsyncEnum {
type Dropper<'a> = impl Future<Output = ()>;
fn async_drop(mut self: Pin<&mut Self>) -> Self::Dropper<'_> {
async move {
let new_self = match &*self {
AsyncEnum::A(foo) => {
println!("AsyncEnum(A)::Dropper::poll: {}", foo.0);
AsyncEnum::B(SyncInt(foo.0))
}
AsyncEnum::B(foo) => {
println!("AsyncEnum(B)::Dropper::poll: {}", foo.0);
AsyncEnum::A(AsyncInt(foo.0))
}
};
mem::forget(mem::replace(&mut *self, new_self));
}
async fn drop(mut self: Pin<&mut Self>) {
let new_self = match &*self {
AsyncEnum::A(foo) => {
println!("AsyncEnum(A)::async_drop: {}", foo.0);
AsyncEnum::B(SyncInt(foo.0))
}
AsyncEnum::B(foo) => {
println!("AsyncEnum(B)::async_drop: {}", foo.0);
AsyncEnum::A(AsyncInt(foo.0))
}
};
mem::forget(mem::replace(&mut *self, new_self));
}
}
@ -186,14 +198,19 @@ union AsyncUnion {
unsigned: u32,
}
impl AsyncDrop for AsyncUnion {
type Dropper<'a> = impl Future<Output = ()>;
fn async_drop(self: Pin<&mut Self>) -> Self::Dropper<'_> {
async move {
println!("AsyncUnion::Dropper::poll: {}, {}", unsafe { self.signed }, unsafe {
self.unsigned
});
}
impl Drop for AsyncUnion {
fn drop(&mut self) {
println!(
"AsyncUnion::drop: {}, {}",
unsafe { self.signed },
unsafe { self.unsigned },
);
}
}
impl AsyncDrop for AsyncUnion {
async fn drop(self: Pin<&mut Self>) {
println!("AsyncUnion::async_drop: {}, {}", unsafe { self.signed }, unsafe {
self.unsigned
});
}
}

View file

@ -1,22 +1,23 @@
AsyncInt::Dropper::poll: 0
AsyncInt::Dropper::poll: 1
AsyncInt::Dropper::poll: 2
AsyncInt::Dropper::poll: 3
AsyncInt::Dropper::poll: 4
AsyncStruct::Dropper::poll: 6
AsyncInt::Dropper::poll: 7
AsyncInt::Dropper::poll: 8
AsyncReference::Dropper::poll: 10
AsyncInt::Dropper::poll: 11
AsyncEnum(A)::Dropper::poll: 12
AsyncInt::async_drop: 0
AsyncInt::async_drop: 1
AsyncInt::async_drop: 2
AsyncInt::async_drop: 3
AsyncInt::async_drop: 4
AsyncStruct::async_drop: 6
AsyncInt::async_drop: 7
AsyncInt::async_drop: 8
AsyncReference::async_drop: 10
AsyncInt::async_drop: 11
AsyncEnum(A)::async_drop: 12
SyncInt::drop: 12
AsyncEnum(B)::Dropper::poll: 13
AsyncInt::Dropper::poll: 13
AsyncEnum(B)::async_drop: 13
AsyncInt::async_drop: 13
SyncInt::drop: 14
SyncThenAsync::drop: 15
AsyncInt::Dropper::poll: 16
AsyncInt::async_drop: 16
SyncInt::drop: 17
AsyncInt::Dropper::poll: 18
AsyncInt::Dropper::poll: 19
AsyncInt::Dropper::poll: 20
AsyncUnion::Dropper::poll: 21, 21
AsyncInt::async_drop: 18
AsyncInt::async_drop: 19
AsyncInt::async_drop: 20
AsyncUnion::async_drop: 21, 21
AsyncInt::async_drop: 10

View file

@ -1,22 +1,23 @@
AsyncInt::Dropper::poll: 0
AsyncInt::Dropper::poll: 1
AsyncInt::Dropper::poll: 2
AsyncInt::Dropper::poll: 3
AsyncInt::Dropper::poll: 4
AsyncStruct::Dropper::poll: 6
AsyncInt::Dropper::poll: 7
AsyncInt::Dropper::poll: 8
AsyncReference::Dropper::poll: 10
AsyncInt::Dropper::poll: 11
AsyncEnum(A)::Dropper::poll: 12
AsyncInt::async_drop: 0
AsyncInt::async_drop: 1
AsyncInt::async_drop: 2
AsyncInt::async_drop: 3
AsyncInt::async_drop: 4
AsyncStruct::async_drop: 6
AsyncInt::async_drop: 7
AsyncInt::async_drop: 8
AsyncReference::async_drop: 10
AsyncInt::async_drop: 11
AsyncEnum(A)::async_drop: 12
SyncInt::drop: 12
AsyncEnum(B)::Dropper::poll: 13
AsyncInt::Dropper::poll: 13
AsyncEnum(B)::async_drop: 13
AsyncInt::async_drop: 13
SyncInt::drop: 14
SyncThenAsync::drop: 15
AsyncInt::Dropper::poll: 16
AsyncInt::async_drop: 16
SyncInt::drop: 17
AsyncInt::Dropper::poll: 18
AsyncInt::Dropper::poll: 19
AsyncInt::Dropper::poll: 20
AsyncUnion::Dropper::poll: 21, 21
AsyncInt::async_drop: 18
AsyncInt::async_drop: 19
AsyncInt::async_drop: 20
AsyncUnion::async_drop: 21, 21
AsyncInt::async_drop: 10

View file

@ -1,11 +0,0 @@
//@ known-bug: rust-lang/rust#128695
//@ edition: 2021
use core::pin::{pin, Pin};
fn main() {
let fut = pin!(async {
let async_drop_fut = pin!(core::future::async_drop(async {}));
(async_drop_fut).await;
});
}

View file

@ -68,14 +68,18 @@ yields ()
}
bb10: {
drop(_1) -> [return: bb11, unwind: bb12];
drop(_1) -> [return: bb11, unwind: bb13, drop: bb12];
}
bb11: {
return;
}
bb12 (cleanup): {
bb12: {
coroutine_drop;
}
bb13 (cleanup): {
resume;
}
}

View file

@ -51,14 +51,18 @@ yields ()
}
bb6: {
drop(_1) -> [return: bb7, unwind: bb8];
drop(_1) -> [return: bb7, unwind: bb9, drop: bb8];
}
bb7: {
return;
}
bb8 (cleanup): {
bb8: {
coroutine_drop;
}
bb9 (cleanup): {
resume;
}
}

View file

@ -34,14 +34,18 @@ yields ()
StorageDead(_5);
StorageDead(_4);
StorageDead(_3);
drop(_1) -> [return: bb1, unwind: bb2];
drop(_1) -> [return: bb1, unwind: bb3, drop: bb2];
}
bb1: {
return;
}
bb2 (cleanup): {
bb2: {
coroutine_drop;
}
bb3 (cleanup): {
resume;
}
}

View file

@ -34,14 +34,18 @@ yields ()
StorageDead(_5);
StorageDead(_4);
StorageDead(_3);
drop(_1) -> [return: bb1, unwind: bb2];
drop(_1) -> [return: bb1, unwind: bb3, drop: bb2];
}
bb1: {
return;
}
bb2 (cleanup): {
bb2: {
coroutine_drop;
}
bb3 (cleanup): {
resume;
}
}

View file

@ -34,14 +34,18 @@ yields ()
StorageDead(_5);
StorageDead(_4);
StorageDead(_3);
drop(_1) -> [return: bb1, unwind: bb2];
drop(_1) -> [return: bb1, unwind: bb3, drop: bb2];
}
bb1: {
return;
}
bb2 (cleanup): {
bb2: {
coroutine_drop;
}
bb3 (cleanup): {
resume;
}
}

View file

@ -34,14 +34,18 @@ yields ()
StorageDead(_5);
StorageDead(_4);
StorageDead(_3);
drop(_1) -> [return: bb1, unwind: bb2];
drop(_1) -> [return: bb1, unwind: bb3, drop: bb2];
}
bb1: {
return;
}
bb2 (cleanup): {
bb2: {
coroutine_drop;
}
bb3 (cleanup): {
resume;
}
}

View file

@ -0,0 +1,101 @@
//@ run-pass
//@ check-run-results
// Future `bar` with internal async drop `Foo` will have async drop itself.
// And we trying to drop this future in sync context (`block_on` func)
#![feature(async_drop)]
#![allow(incomplete_features)]
use std::mem::ManuallyDrop;
//@ edition: 2021
use std::{
future::{Future, async_drop_in_place, AsyncDrop},
pin::{pin, Pin},
sync::{mpsc, Arc},
task::{Context, Poll, Wake, Waker},
};
struct Foo {
my_resource_handle: usize,
}
impl Foo {
fn new(my_resource_handle: usize) -> Self {
let out = Foo {
my_resource_handle,
};
println!("Foo::new({})", my_resource_handle);
out
}
}
impl Drop for Foo {
fn drop(&mut self) {
println!("Foo::drop({})", self.my_resource_handle);
}
}
impl AsyncDrop for Foo {
async fn drop(self: Pin<&mut Self>) {
println!("Foo::async drop({})", self.my_resource_handle);
}
}
fn main() {
block_on(bar(10));
println!("done")
}
async fn baz(ident_base: usize) {
let mut _first = Foo::new(ident_base);
}
async fn bar(ident_base: usize) {
let mut _first = Foo::new(ident_base);
baz(ident_base + 1).await;
}
fn block_on<F>(fut_unpin: F) -> F::Output
where
F: Future,
{
let mut fut_pin = pin!(ManuallyDrop::new(fut_unpin));
let mut fut: Pin<&mut F> = unsafe {
Pin::map_unchecked_mut(fut_pin.as_mut(), |x| &mut **x)
};
let (waker, rx) = simple_waker();
let mut context = Context::from_waker(&waker);
let rv = loop {
match fut.as_mut().poll(&mut context) {
Poll::Ready(out) => break out,
// expect wake in polls
Poll::Pending => rx.try_recv().unwrap(),
}
};
let drop_fut_unpin = unsafe { async_drop_in_place(fut.get_unchecked_mut()) };
let mut drop_fut: Pin<&mut _> = pin!(drop_fut_unpin);
loop {
match drop_fut.as_mut().poll(&mut context) {
Poll::Ready(()) => break,
Poll::Pending => rx.try_recv().unwrap(),
}
}
rv
}
fn simple_waker() -> (Waker, mpsc::Receiver<()>) {
struct SimpleWaker {
tx: std::sync::mpsc::Sender<()>,
}
impl Wake for SimpleWaker {
fn wake(self: Arc<Self>) {
self.tx.send(()).unwrap();
}
}
let (tx, rx) = mpsc::channel();
(Waker::from(Arc::new(SimpleWaker { tx })), rx)
}

View file

@ -0,0 +1,5 @@
Foo::new(10)
Foo::new(11)
Foo::async drop(11)
Foo::async drop(10)
done

View file

@ -0,0 +1,82 @@
//@ run-pass
//@ check-run-results
// Future `bar` with internal async drop `Foo` will have async drop itself.
// And we trying to drop this future in sync context (`block_on` func)
#![feature(async_drop)]
#![allow(incomplete_features)]
//@ edition: 2021
use std::{
future::{Future, AsyncDrop},
pin::{pin, Pin},
sync::{mpsc, Arc},
task::{Context, Poll, Wake, Waker},
};
struct Foo {
my_resource_handle: usize,
}
impl Foo {
fn new(my_resource_handle: usize) -> Self {
let out = Foo {
my_resource_handle,
};
println!("Foo::new({})", my_resource_handle);
out
}
}
impl Drop for Foo {
fn drop(&mut self) {
println!("Foo::drop({})", self.my_resource_handle);
}
}
impl AsyncDrop for Foo {
async fn drop(self: Pin<&mut Self>) {
println!("Foo::async drop({})", self.my_resource_handle);
}
}
fn main() {
block_on(bar(10));
println!("done")
}
async fn bar(ident_base: usize) {
let mut _first = Foo::new(ident_base);
}
fn block_on<F>(fut: F) -> F::Output
where
F: Future,
{
let mut fut = pin!(fut);
let (waker, rx) = simple_waker();
let mut context = Context::from_waker(&waker);
loop {
match fut.as_mut().poll(&mut context) {
Poll::Ready(out) => break out,
// expect wake in polls
Poll::Pending => rx.try_recv().unwrap(),
}
}
}
fn simple_waker() -> (Waker, mpsc::Receiver<()>) {
struct SimpleWaker {
tx: std::sync::mpsc::Sender<()>,
}
impl Wake for SimpleWaker {
fn wake(self: Arc<Self>) {
self.tx.send(()).unwrap();
}
}
let (tx, rx) = mpsc::channel();
(Waker::from(Arc::new(SimpleWaker { tx })), rx)
}

View file

@ -0,0 +1,3 @@
Foo::new(10)
Foo::async drop(10)
done

View file

@ -0,0 +1,112 @@
//@ run-pass
//@ check-run-results
// struct `Foo` has both sync and async drop.
// Struct `Complex` contains three `Foo` fields and has complex async drop glue.
#![feature(async_drop)]
#![allow(incomplete_features)]
use std::mem::ManuallyDrop;
//@ edition: 2021
#[inline(never)]
fn myprintln(msg: &str, my_resource_handle: usize) {
println!("{} : {}", msg, my_resource_handle);
}
use std::{
future::{Future, async_drop_in_place, AsyncDrop},
pin::{pin, Pin},
sync::{mpsc, Arc},
task::{Context, Poll, Wake, Waker},
};
struct Foo {
my_resource_handle: usize,
}
impl Foo {
fn new(my_resource_handle: usize) -> Self {
let out = Foo {
my_resource_handle,
};
myprintln("Foo::new()", my_resource_handle);
out
}
}
impl Drop for Foo {
fn drop(&mut self) {
myprintln("Foo::drop()", self.my_resource_handle);
}
}
impl AsyncDrop for Foo {
async fn drop(self: Pin<&mut Self>) {
myprintln("Foo::async drop()", self.my_resource_handle);
}
}
fn main() {
{
let _ = Foo::new(7);
}
println!("Middle");
{
block_on(bar(10));
}
println!("Done")
}
async fn bar(ident_base: usize) {
let _vec: [Foo; 4] = [
Foo::new(ident_base),
Foo::new(ident_base + 1),
Foo::new(ident_base + 2),
Foo::new(ident_base + 3)
];
}
fn block_on<F>(fut_unpin: F) -> F::Output
where
F: Future,
{
let mut fut_pin = pin!(ManuallyDrop::new(fut_unpin));
let mut fut: Pin<&mut F> = unsafe {
Pin::map_unchecked_mut(fut_pin.as_mut(), |x| &mut **x)
};
let (waker, rx) = simple_waker();
let mut context = Context::from_waker(&waker);
let rv = loop {
match fut.as_mut().poll(&mut context) {
Poll::Ready(out) => break out,
// expect wake in polls
Poll::Pending => rx.try_recv().unwrap(),
}
};
let drop_fut_unpin = unsafe { async_drop_in_place(fut.get_unchecked_mut()) };
let mut drop_fut: Pin<&mut _> = pin!(drop_fut_unpin);
loop {
match drop_fut.as_mut().poll(&mut context) {
Poll::Ready(()) => break,
Poll::Pending => rx.try_recv().unwrap(),
}
}
rv
}
fn simple_waker() -> (Waker, mpsc::Receiver<()>) {
struct SimpleWaker {
tx: std::sync::mpsc::Sender<()>,
}
impl Wake for SimpleWaker {
fn wake(self: Arc<Self>) {
self.tx.send(()).unwrap();
}
}
let (tx, rx) = mpsc::channel();
(Waker::from(Arc::new(SimpleWaker { tx })), rx)
}

View file

@ -0,0 +1,12 @@
Foo::new() : 7
Foo::drop() : 7
Middle
Foo::new() : 10
Foo::new() : 11
Foo::new() : 12
Foo::new() : 13
Foo::async drop() : 10
Foo::async drop() : 11
Foo::async drop() : 12
Foo::async drop() : 13
Done

View file

@ -0,0 +1,111 @@
//@ run-pass
//@ check-run-results
// struct `Foo` has both sync and async drop.
#![feature(async_drop)]
#![allow(incomplete_features)]
use std::mem::ManuallyDrop;
//@ edition: 2021
#[inline(never)]
fn myprintln(msg: &str, my_resource_handle: usize) {
println!("{} : {}", msg, my_resource_handle);
}
use std::{
future::{Future, async_drop_in_place, AsyncDrop},
pin::{pin, Pin},
sync::{mpsc, Arc},
task::{Context, Poll, Wake, Waker},
};
struct Foo {
my_resource_handle: usize,
}
impl Foo {
fn new(my_resource_handle: usize) -> Self {
let out = Foo {
my_resource_handle,
};
myprintln("Foo::new()", my_resource_handle);
out
}
}
impl Drop for Foo {
fn drop(&mut self) {
myprintln("Foo::drop()", self.my_resource_handle);
}
}
impl AsyncDrop for Foo {
async fn drop(self: Pin<&mut Self>) {
myprintln("Foo::async drop()", self.my_resource_handle);
}
}
fn main() {
{
let _ = Foo::new(7);
}
println!("Middle");
{
block_on(bar(6, 10));
}
println!("Done")
}
async fn bar<T>(_arg: T, ident_base: usize) {
let _vec: [Foo; 4] = [
Foo::new(ident_base),
Foo::new(ident_base + 1),
Foo::new(ident_base + 2),
Foo::new(ident_base + 3)
];
}
fn block_on<F>(fut_unpin: F) -> F::Output
where
F: Future,
{
let mut fut_pin = pin!(ManuallyDrop::new(fut_unpin));
let mut fut: Pin<&mut F> = unsafe {
Pin::map_unchecked_mut(fut_pin.as_mut(), |x| &mut **x)
};
let (waker, rx) = simple_waker();
let mut context = Context::from_waker(&waker);
let rv = loop {
match fut.as_mut().poll(&mut context) {
Poll::Ready(out) => break out,
// expect wake in polls
Poll::Pending => rx.try_recv().unwrap(),
}
};
let drop_fut_unpin = unsafe { async_drop_in_place(fut.get_unchecked_mut()) };
let mut drop_fut: Pin<&mut _> = pin!(drop_fut_unpin);
loop {
match drop_fut.as_mut().poll(&mut context) {
Poll::Ready(()) => break,
Poll::Pending => rx.try_recv().unwrap(),
}
}
rv
}
fn simple_waker() -> (Waker, mpsc::Receiver<()>) {
struct SimpleWaker {
tx: std::sync::mpsc::Sender<()>,
}
impl Wake for SimpleWaker {
fn wake(self: Arc<Self>) {
self.tx.send(()).unwrap();
}
}
let (tx, rx) = mpsc::channel();
(Waker::from(Arc::new(SimpleWaker { tx })), rx)
}

Some files were not shown because too many files have changed in this diff Show more