Wrap platform-specific QoS in r-a-specific “thread intent”

This commit is contained in:
Luna Razzaghipour 2023-05-27 03:18:17 +10:00
parent d0b001eed2
commit 74bc2a47e0
No known key found for this signature in database
13 changed files with 354 additions and 295 deletions

View file

@ -79,13 +79,15 @@ fn try_main(flags: flags::RustAnalyzer) -> Result<()> {
return Ok(());
}
// rust-analyzers “main thread” is actually a secondary thread
// with an increased stack size at the User Initiated QoS class.
// We use this QoS class because any delay in the main loop
// rust-analyzers “main thread” is actually
// a secondary latency-sensitive thread with an increased stack size.
// We use this thread intent because any delay in the main loop
// will make actions like hitting enter in the editor slow.
// rust-analyzer does not block the editors render loop,
// so we dont use User Interactive.
with_extra_thread("LspServer", stdx::thread::QoSClass::UserInitiated, run_server)?;
with_extra_thread(
"LspServer",
stdx::thread::ThreadIntent::LatencySensitive,
run_server,
)?;
}
flags::RustAnalyzerCmd::Parse(cmd) => cmd.run()?,
flags::RustAnalyzerCmd::Symbols(cmd) => cmd.run()?,
@ -143,10 +145,10 @@ const STACK_SIZE: usize = 1024 * 1024 * 8;
/// space.
fn with_extra_thread(
thread_name: impl Into<String>,
qos_class: stdx::thread::QoSClass,
thread_intent: stdx::thread::ThreadIntent,
f: impl FnOnce() -> Result<()> + Send + 'static,
) -> Result<()> {
let handle = stdx::thread::Builder::new(qos_class)
let handle = stdx::thread::Builder::new(thread_intent)
.name(thread_name.into())
.stack_size(STACK_SIZE)
.spawn(f)?;

View file

@ -4,7 +4,7 @@ use std::{fmt, panic, thread};
use ide::Cancelled;
use lsp_server::ExtractError;
use serde::{de::DeserializeOwned, Serialize};
use stdx::thread::QoSClass;
use stdx::thread::ThreadIntent;
use crate::{
global_state::{GlobalState, GlobalStateSnapshot},
@ -104,7 +104,7 @@ impl<'a> RequestDispatcher<'a> {
None => return self,
};
self.global_state.task_pool.handle.spawn(QoSClass::Utility, {
self.global_state.task_pool.handle.spawn(ThreadIntent::Worker, {
let world = self.global_state.snapshot();
move || {
let result = panic::catch_unwind(move || {
@ -135,7 +135,7 @@ impl<'a> RequestDispatcher<'a> {
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize,
{
self.on_with_qos::<R>(QoSClass::Utility, f)
self.on_with_thread_intent::<R>(ThreadIntent::Worker, f)
}
/// Dispatches a latency-sensitive request onto the thread pool.
@ -148,7 +148,7 @@ impl<'a> RequestDispatcher<'a> {
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize,
{
self.on_with_qos::<R>(QoSClass::UserInitiated, f)
self.on_with_thread_intent::<R>(ThreadIntent::LatencySensitive, f)
}
pub(crate) fn finish(&mut self) {
@ -163,9 +163,9 @@ impl<'a> RequestDispatcher<'a> {
}
}
fn on_with_qos<R>(
fn on_with_thread_intent<R>(
&mut self,
qos_class: QoSClass,
intent: ThreadIntent,
f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>,
) -> &mut Self
where
@ -178,7 +178,7 @@ impl<'a> RequestDispatcher<'a> {
None => return self,
};
self.global_state.task_pool.handle.spawn(qos_class, {
self.global_state.task_pool.handle.spawn(intent, {
let world = self.global_state.snapshot();
move || {
let result = panic::catch_unwind(move || {

View file

@ -291,7 +291,7 @@ fn run_flycheck(state: &mut GlobalState, vfs_path: VfsPath) -> bool {
}
Ok(())
};
state.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, move |_| {
state.task_pool.handle.spawn_with_sender(stdx::thread::ThreadIntent::Worker, move |_| {
if let Err(e) = std::panic::catch_unwind(task) {
tracing::error!("flycheck task panicked: {e:?}")
}

View file

@ -397,7 +397,7 @@ impl GlobalState {
tracing::debug!(%cause, "will prime caches");
let num_worker_threads = self.config.prime_caches_num_threads();
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, {
self.task_pool.handle.spawn_with_sender(stdx::thread::ThreadIntent::Worker, {
let analysis = self.snapshot().analysis;
move |sender| {
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
@ -680,7 +680,7 @@ impl GlobalState {
.on_sync::<lsp_ext::OnTypeFormatting>(handlers::handle_on_type_formatting)
// We cant run latency-sensitive request handlers which do semantic
// analysis on the main thread because that would block other
// requests. Instead, we run these request handlers on higher QoS
// requests. Instead, we run these request handlers on higher priority
// threads in the threadpool.
.on_latency_sensitive::<lsp_types::request::Completion>(handlers::handle_completion)
.on_latency_sensitive::<lsp_types::request::ResolveCompletionItem>(
@ -789,8 +789,8 @@ impl GlobalState {
let snapshot = self.snapshot();
// Diagnostics are triggered by the user typing
// so we want computing them to run at the User Initiated QoS.
self.task_pool.handle.spawn(stdx::thread::QoSClass::UserInitiated, move || {
// so we run them on a latency sensitive thread.
self.task_pool.handle.spawn(stdx::thread::ThreadIntent::LatencySensitive, move || {
let _p = profile::span("publish_diagnostics");
let diagnostics = subscriptions
.into_iter()

View file

@ -27,7 +27,7 @@ use ide_db::{
use itertools::Itertools;
use proc_macro_api::{MacroDylib, ProcMacroServer};
use project_model::{PackageRoot, ProjectWorkspace, WorkspaceBuildScripts};
use stdx::format_to;
use stdx::{format_to, thread::ThreadIntent};
use syntax::SmolStr;
use triomphe::Arc;
use vfs::{file_set::FileSetConfig, AbsPath, AbsPathBuf, ChangeKind};
@ -185,7 +185,7 @@ impl GlobalState {
pub(crate) fn fetch_workspaces(&mut self, cause: Cause) {
tracing::info!(%cause, "will fetch workspaces");
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, {
self.task_pool.handle.spawn_with_sender(ThreadIntent::Worker, {
let linked_projects = self.config.linked_projects();
let detached_files = self.config.detached_files().to_vec();
let cargo_config = self.config.cargo();
@ -260,7 +260,7 @@ impl GlobalState {
tracing::info!(%cause, "will fetch build data");
let workspaces = Arc::clone(&self.workspaces);
let config = self.config.cargo();
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, move |sender| {
self.task_pool.handle.spawn_with_sender(ThreadIntent::Worker, move |sender| {
sender.send(Task::FetchBuildData(BuildDataProgress::Begin)).unwrap();
let progress = {
@ -280,7 +280,7 @@ impl GlobalState {
let dummy_replacements = self.config.dummy_replacements().clone();
let proc_macro_clients = self.proc_macro_clients.clone();
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, move |sender| {
self.task_pool.handle.spawn_with_sender(ThreadIntent::Worker, move |sender| {
sender.send(Task::LoadProcMacros(ProcMacroProgress::Begin)).unwrap();
let dummy_replacements = &dummy_replacements;

View file

@ -2,7 +2,7 @@
//! It is used in [`crate::global_state::GlobalState`] throughout the main loop.
use crossbeam_channel::Sender;
use stdx::thread::{Pool, QoSClass};
use stdx::thread::{Pool, ThreadIntent};
pub(crate) struct TaskPool<T> {
sender: Sender<T>,
@ -14,23 +14,23 @@ impl<T> TaskPool<T> {
TaskPool { sender, pool: Pool::new(threads) }
}
pub(crate) fn spawn<F>(&mut self, qos_class: QoSClass, task: F)
pub(crate) fn spawn<F>(&mut self, intent: ThreadIntent, task: F)
where
F: FnOnce() -> T + Send + 'static,
T: Send + 'static,
{
self.pool.spawn(qos_class, {
self.pool.spawn(intent, {
let sender = self.sender.clone();
move || sender.send(task()).unwrap()
})
}
pub(crate) fn spawn_with_sender<F>(&mut self, qos_class: QoSClass, task: F)
pub(crate) fn spawn_with_sender<F>(&mut self, intent: ThreadIntent, task: F)
where
F: FnOnce(Sender<T>) + Send + 'static,
T: Send + 'static,
{
self.pool.spawn(qos_class, {
self.pool.spawn(intent, {
let sender = self.sender.clone();
move || task(sender)
})