Line data Source code
1 : //!
2 : //! This module provides centralized handling of tokio tasks in the Page Server.
3 : //!
4 : //! We provide a few basic facilities:
5 : //! - A global registry of tasks that lists what kind of tasks they are, and
6 : //! which tenant or timeline they are working on
7 : //!
8 : //! - The ability to request a task to shut down.
9 : //!
10 : //!
11 : //! # How it works?
12 : //!
13 : //! There is a global hashmap of all the tasks (`TASKS`). Whenever a new
14 : //! task is spawned, a PageServerTask entry is added there, and when a
15 : //! task dies, it removes itself from the hashmap. If you want to kill a
16 : //! task, you can scan the hashmap to find it.
17 : //!
18 : //! # Task shutdown
19 : //!
20 : //! To kill a task, we rely on co-operation from the victim. Each task is
21 : //! expected to periodically call the `is_shutdown_requested()` function, and
22 : //! if it returns true, exit gracefully. In addition to that, when waiting for
23 : //! the network or other long-running operation, you can use
24 : //! `shutdown_watcher()` function to get a Future that will become ready if
25 : //! the current task has been requested to shut down. You can use that with
26 : //! Tokio select!().
27 : //!
28 : //! TODO: This would be a good place to also handle panics in a somewhat sane way.
29 : //! Depending on what task panics, we might want to kill the whole server, or
30 : //! only a single tenant or timeline.
31 : //!
32 :
33 : use std::collections::HashMap;
34 : use std::fmt;
35 : use std::future::Future;
36 : use std::num::NonZeroUsize;
37 : use std::panic::AssertUnwindSafe;
38 : use std::str::FromStr;
39 : use std::sync::atomic::{AtomicU64, Ordering};
40 : use std::sync::{Arc, Mutex};
41 :
42 : use futures::FutureExt;
43 : use once_cell::sync::Lazy;
44 : use pageserver_api::shard::TenantShardId;
45 : use tokio::task::JoinHandle;
46 : use tokio::task_local;
47 : use tokio_util::sync::CancellationToken;
48 : use tracing::{debug, error, info, warn};
49 : use utils::env;
50 : use utils::id::TimelineId;
51 :
52 : use crate::metrics::set_tokio_runtime_setup;
53 :
54 : //
55 : // There are four runtimes:
56 : //
57 : // Compute request runtime
58 : // - used to handle connections from compute nodes. Any tasks related to satisfying
59 : // GetPage requests, base backups, import, and other such compute node operations
60 : // are handled by the Compute request runtime
61 : // - page_service.rs
62 : // - this includes layer downloads from remote storage, if a layer is needed to
63 : // satisfy a GetPage request
64 : //
65 : // Management request runtime
66 : // - used to handle HTTP API requests
67 : //
68 : // WAL receiver runtime:
69 : // - used to handle WAL receiver connections.
70 : // - and to receiver updates from storage_broker
71 : //
72 : // Background runtime
73 : // - layer flushing
74 : // - garbage collection
75 : // - compaction
76 : // - remote storage uploads
77 : // - initial tenant loading
78 : //
79 : // Everything runs in a tokio task. If you spawn new tasks, spawn it using the correct
80 : // runtime.
81 : //
82 : // There might be situations when one task needs to wait for a task running in another
83 : // Runtime to finish. For example, if a background operation needs a layer from remote
84 : // storage, it will start to download it. If a background operation needs a remote layer,
85 : // and the download was already initiated by a GetPage request, the background task
86 : // will wait for the download - running in the Page server runtime - to finish.
87 : // Another example: the initial tenant loading tasks are launched in the background ops
88 : // runtime. If a GetPage request comes in before the load of a tenant has finished, the
89 : // GetPage request will wait for the tenant load to finish.
90 : //
91 : // The core Timeline code is synchronous, and uses a bunch of std Mutexes and RWLocks to
92 : // protect data structures. Let's keep it that way. Synchronous code is easier to debug
93 : // and analyze, and there's a lot of hairy, low-level, performance critical code there.
94 : //
95 : // It's nice to have different runtimes, so that you can quickly eyeball how much CPU
96 : // time each class of operations is taking, with 'top -H' or similar.
97 : //
98 : // It's also good to avoid hogging all threads that would be needed to process
99 : // other operations, if the upload tasks e.g. get blocked on locks. It shouldn't
100 : // happen, but still.
101 : //
102 :
103 404 : pub(crate) static TOKIO_WORKER_THREADS: Lazy<NonZeroUsize> = Lazy::new(|| {
104 404 : // replicates tokio-1.28.1::loom::sys::num_cpus which is not available publicly
105 404 : // tokio would had already panicked for parsing errors or NotUnicode
106 404 : //
107 404 : // this will be wrong if any of the runtimes gets their worker threads configured to something
108 404 : // else, but that has not been needed in a long time.
109 404 : NonZeroUsize::new(
110 404 : std::env::var("TOKIO_WORKER_THREADS")
111 404 : .map(|s| s.parse::<usize>().unwrap())
112 404 : .unwrap_or_else(|_e| usize::max(2, num_cpus::get())),
113 404 : )
114 404 : .expect("the max() ensures that this is not zero")
115 404 : });
116 :
117 : enum TokioRuntimeMode {
118 : SingleThreaded,
119 : MultiThreaded { num_workers: NonZeroUsize },
120 : }
121 :
122 : impl FromStr for TokioRuntimeMode {
123 : type Err = String;
124 :
125 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
126 0 : match s {
127 0 : "current_thread" => Ok(TokioRuntimeMode::SingleThreaded),
128 0 : s => match s.strip_prefix("multi_thread:") {
129 0 : Some("default") => Ok(TokioRuntimeMode::MultiThreaded {
130 0 : num_workers: *TOKIO_WORKER_THREADS,
131 0 : }),
132 0 : Some(suffix) => {
133 0 : let num_workers = suffix.parse::<NonZeroUsize>().map_err(|e| {
134 0 : format!(
135 0 : "invalid number of multi-threaded runtime workers ({suffix:?}): {e}",
136 0 : )
137 0 : })?;
138 0 : Ok(TokioRuntimeMode::MultiThreaded { num_workers })
139 : }
140 0 : None => Err(format!("invalid runtime config: {s:?}")),
141 : },
142 : }
143 0 : }
144 : }
145 :
146 404 : static TOKIO_THREAD_STACK_SIZE: Lazy<NonZeroUsize> = Lazy::new(|| {
147 404 : env::var("NEON_PAGESERVER_TOKIO_THREAD_STACK_SIZE")
148 404 : // the default 2MiB are insufficent, especially in debug mode
149 404 : .unwrap_or_else(|| NonZeroUsize::new(4 * 1024 * 1024).unwrap())
150 404 : });
151 :
152 404 : static ONE_RUNTIME: Lazy<Option<tokio::runtime::Runtime>> = Lazy::new(|| {
153 404 : let thread_name = "pageserver-tokio";
154 404 : let Some(mode) = env::var("NEON_PAGESERVER_USE_ONE_RUNTIME") else {
155 : // If the env var is not set, leave this static as None.
156 404 : set_tokio_runtime_setup(
157 404 : "multiple-runtimes",
158 404 : NUM_MULTIPLE_RUNTIMES
159 404 : .checked_mul(*TOKIO_WORKER_THREADS)
160 404 : .unwrap(),
161 404 : );
162 404 : return None;
163 : };
164 0 : Some(match mode {
165 : TokioRuntimeMode::SingleThreaded => {
166 0 : set_tokio_runtime_setup("one-runtime-single-threaded", NonZeroUsize::new(1).unwrap());
167 0 : tokio::runtime::Builder::new_current_thread()
168 0 : .thread_name(thread_name)
169 0 : .enable_all()
170 0 : .thread_stack_size(TOKIO_THREAD_STACK_SIZE.get())
171 0 : .build()
172 0 : .expect("failed to create one single runtime")
173 : }
174 0 : TokioRuntimeMode::MultiThreaded { num_workers } => {
175 0 : set_tokio_runtime_setup("one-runtime-multi-threaded", num_workers);
176 0 : tokio::runtime::Builder::new_multi_thread()
177 0 : .thread_name(thread_name)
178 0 : .enable_all()
179 0 : .worker_threads(num_workers.get())
180 0 : .thread_stack_size(TOKIO_THREAD_STACK_SIZE.get())
181 0 : .build()
182 0 : .expect("failed to create one multi-threaded runtime")
183 : }
184 : })
185 404 : });
186 :
187 : /// Declare a lazy static variable named `$varname` that will resolve
188 : /// to a tokio runtime handle. If the env var `NEON_PAGESERVER_USE_ONE_RUNTIME`
189 : /// is set, this will resolve to `ONE_RUNTIME`. Otherwise, the macro invocation
190 : /// declares a separate runtime and the lazy static variable `$varname`
191 : /// will resolve to that separate runtime.
192 : ///
193 : /// The result is is that `$varname.spawn()` will use `ONE_RUNTIME` if
194 : /// `NEON_PAGESERVER_USE_ONE_RUNTIME` is set, and will use the separate runtime
195 : /// otherwise.
196 : macro_rules! pageserver_runtime {
197 : ($varname:ident, $name:literal) => {
198 424 : pub static $varname: Lazy<&'static tokio::runtime::Runtime> = Lazy::new(|| {
199 424 : if let Some(runtime) = &*ONE_RUNTIME {
200 0 : return runtime;
201 424 : }
202 424 : static RUNTIME: Lazy<tokio::runtime::Runtime> = Lazy::new(|| {
203 424 : tokio::runtime::Builder::new_multi_thread()
204 424 : .thread_name($name)
205 424 : .worker_threads(TOKIO_WORKER_THREADS.get())
206 424 : .enable_all()
207 424 : .thread_stack_size(TOKIO_THREAD_STACK_SIZE.get())
208 424 : .build()
209 424 : .expect(std::concat!("Failed to create runtime ", $name))
210 424 : });
211 424 : &*RUNTIME
212 424 : });
213 : };
214 : }
215 :
216 : pageserver_runtime!(COMPUTE_REQUEST_RUNTIME, "compute request worker");
217 : pageserver_runtime!(MGMT_REQUEST_RUNTIME, "mgmt request worker");
218 : pageserver_runtime!(WALRECEIVER_RUNTIME, "walreceiver worker");
219 : pageserver_runtime!(BACKGROUND_RUNTIME, "background op worker");
220 : // Bump this number when adding a new pageserver_runtime!
221 : const NUM_MULTIPLE_RUNTIMES: NonZeroUsize = NonZeroUsize::new(4).unwrap();
222 :
223 : #[derive(Debug, Clone, Copy)]
224 : pub struct PageserverTaskId(u64);
225 :
226 : impl fmt::Display for PageserverTaskId {
227 0 : fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
228 0 : self.0.fmt(f)
229 0 : }
230 : }
231 :
232 : /// Each task that we track is associated with a "task ID". It's just an
233 : /// increasing number that we assign. Note that it is different from tokio::task::Id.
234 : static NEXT_TASK_ID: AtomicU64 = AtomicU64::new(1);
235 :
236 : /// Global registry of tasks
237 : static TASKS: Lazy<Mutex<HashMap<u64, Arc<PageServerTask>>>> =
238 412 : Lazy::new(|| Mutex::new(HashMap::new()));
239 :
240 : task_local! {
241 : // This is a cancellation token which will be cancelled when a task needs to shut down. The
242 : // root token is kept in the global registry, so that anyone can send the signal to request
243 : // task shutdown.
244 : static SHUTDOWN_TOKEN: CancellationToken;
245 :
246 : // Each task holds reference to its own PageServerTask here.
247 : static CURRENT_TASK: Arc<PageServerTask>;
248 : }
249 :
250 : ///
251 : /// There are many kinds of tasks in the system. Some are associated with a particular
252 : /// tenant or timeline, while others are global.
253 : ///
254 : /// Note that we don't try to limit how many task of a certain kind can be running
255 : /// at the same time.
256 : ///
257 : #[derive(
258 : Debug,
259 : // NB: enumset::EnumSetType derives PartialEq, Eq, Clone, Copy
260 0 : enumset::EnumSetType,
261 : enum_map::Enum,
262 : serde::Serialize,
263 0 : serde::Deserialize,
264 : strum_macros::IntoStaticStr,
265 0 : strum_macros::EnumString,
266 : )]
267 : pub enum TaskKind {
268 : // Pageserver startup, i.e., `main`
269 : Startup,
270 :
271 : // libpq listener task. It just accepts connection and spawns a
272 : // PageRequestHandler task for each connection.
273 : LibpqEndpointListener,
274 :
275 : // HTTP endpoint listener.
276 : HttpEndpointListener,
277 :
278 : // Task that handles a single connection. A PageRequestHandler task
279 : // starts detached from any particular tenant or timeline, but it can be
280 : // associated with one later, after receiving a command from the client.
281 : PageRequestHandler,
282 :
283 : /// Manages the WAL receiver connection for one timeline.
284 : /// It subscribes to events from storage_broker and decides which safekeeper to connect to.
285 : /// Once the decision has been made, it establishes the connection using the `tokio-postgres` library.
286 : /// There is at most one connection at any given time.
287 : ///
288 : /// That `tokio-postgres` library represents a connection as two objects: a `Client` and a `Connection`.
289 : /// The `Client` object is what library users use to make requests & get responses.
290 : /// Internally, `Client` hands over requests to the `Connection` object.
291 : /// The `Connection` object is responsible for speaking the wire protocol.
292 : ///
293 : /// Walreceiver uses a legacy abstraction called `TaskHandle` to represent the activity of establishing and handling a connection.
294 : /// The `WalReceiverManager` task ensures that this `TaskHandle` task does not outlive the `WalReceiverManager` task.
295 : /// For the `RequestContext` that we hand to the TaskHandle, we use the [`WalReceiverConnectionHandler`] task kind.
296 : ///
297 : /// Once the connection is established, the `TaskHandle` task spawns a
298 : /// [`WalReceiverConnectionPoller`] task that is responsible for polling
299 : /// the `Connection` object.
300 : /// A `CancellationToken` created by the `TaskHandle` task ensures
301 : /// that the [`WalReceiverConnectionPoller`] task will cancel soon after as the `TaskHandle` is dropped.
302 : ///
303 : /// [`WalReceiverConnectionHandler`]: Self::WalReceiverConnectionHandler
304 : /// [`WalReceiverConnectionPoller`]: Self::WalReceiverConnectionPoller
305 : WalReceiverManager,
306 :
307 : /// The `TaskHandle` task that executes `handle_walreceiver_connection`.
308 : /// See the comment on [`WalReceiverManager`].
309 : ///
310 : /// [`WalReceiverManager`]: Self::WalReceiverManager
311 : WalReceiverConnectionHandler,
312 :
313 : /// The task that polls the `tokio-postgres::Connection` object.
314 : /// Spawned by task [`WalReceiverConnectionHandler`](Self::WalReceiverConnectionHandler).
315 : /// See the comment on [`WalReceiverManager`](Self::WalReceiverManager).
316 : WalReceiverConnectionPoller,
317 :
318 : // Garbage collection worker. One per tenant
319 : GarbageCollector,
320 :
321 : // Compaction. One per tenant.
322 : Compaction,
323 :
324 : // Eviction. One per timeline.
325 : Eviction,
326 :
327 : // Tenant housekeeping (flush idle ephemeral layers, shut down idle walredo, etc.).
328 : TenantHousekeeping,
329 :
330 : /// See [`crate::disk_usage_eviction_task`].
331 : DiskUsageEviction,
332 :
333 : /// See [`crate::tenant::secondary`].
334 : SecondaryDownloads,
335 :
336 : /// See [`crate::tenant::secondary`].
337 : SecondaryUploads,
338 :
339 : // Initial logical size calculation
340 : InitialLogicalSizeCalculation,
341 :
342 : OndemandLogicalSizeCalculation,
343 :
344 : // Task that flushes frozen in-memory layers to disk
345 : LayerFlushTask,
346 :
347 : // Task that uploads a file to remote storage
348 : RemoteUploadTask,
349 :
350 : // task that handles the initial downloading of all tenants
351 : InitialLoad,
352 :
353 : // task that handles attaching a tenant
354 : Attach,
355 :
356 : // Used mostly for background deletion from s3
357 : TimelineDeletionWorker,
358 :
359 : // task that handhes metrics collection
360 : MetricsCollection,
361 :
362 : // task that drives downloading layers
363 : DownloadAllRemoteLayers,
364 : // Task that calculates synthetis size for all active tenants
365 : CalculateSyntheticSize,
366 :
367 : // A request that comes in via the pageserver HTTP API.
368 : MgmtRequest,
369 :
370 : DebugTool,
371 :
372 : EphemeralFilePreWarmPageCache,
373 :
374 : LayerDownload,
375 :
376 : #[cfg(test)]
377 : UnitTest,
378 :
379 : DetachAncestor,
380 :
381 : ImportPgdata,
382 : }
383 :
384 : #[derive(Default)]
385 : struct MutableTaskState {
386 : /// Handle for waiting for the task to exit. It can be None, if the
387 : /// the task has already exited.
388 : join_handle: Option<JoinHandle<()>>,
389 : }
390 :
391 : struct PageServerTask {
392 : task_id: PageserverTaskId,
393 :
394 : kind: TaskKind,
395 :
396 : name: String,
397 :
398 : // To request task shutdown, just cancel this token.
399 : cancel: CancellationToken,
400 :
401 : /// Tasks may optionally be launched for a particular tenant/timeline, enabling
402 : /// later cancelling tasks for that tenant/timeline in [`shutdown_tasks`]
403 : tenant_shard_id: TenantShardId,
404 : timeline_id: Option<TimelineId>,
405 :
406 : mutable: Mutex<MutableTaskState>,
407 : }
408 :
409 : /// Launch a new task
410 : /// Note: if shutdown_process_on_error is set to true failure
411 : /// of the task will lead to shutdown of entire process
412 8035 : pub fn spawn<F>(
413 8035 : runtime: &tokio::runtime::Handle,
414 8035 : kind: TaskKind,
415 8035 : tenant_shard_id: TenantShardId,
416 8035 : timeline_id: Option<TimelineId>,
417 8035 : name: &str,
418 8035 : future: F,
419 8035 : ) -> PageserverTaskId
420 8035 : where
421 8035 : F: Future<Output = anyhow::Result<()>> + Send + 'static,
422 8035 : {
423 8035 : let cancel = CancellationToken::new();
424 8035 : let task_id = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed);
425 8035 : let task = Arc::new(PageServerTask {
426 8035 : task_id: PageserverTaskId(task_id),
427 8035 : kind,
428 8035 : name: name.to_string(),
429 8035 : cancel: cancel.clone(),
430 8035 : tenant_shard_id,
431 8035 : timeline_id,
432 8035 : mutable: Mutex::new(MutableTaskState { join_handle: None }),
433 8035 : });
434 8035 :
435 8035 : TASKS.lock().unwrap().insert(task_id, Arc::clone(&task));
436 8035 :
437 8035 : let mut task_mut = task.mutable.lock().unwrap();
438 8035 :
439 8035 : let task_name = name.to_string();
440 8035 : let task_cloned = Arc::clone(&task);
441 8035 : let join_handle = runtime.spawn(task_wrapper(
442 8035 : task_name,
443 8035 : task_id,
444 8035 : task_cloned,
445 8035 : cancel,
446 8035 : future,
447 8035 : ));
448 8035 : task_mut.join_handle = Some(join_handle);
449 8035 : drop(task_mut);
450 8035 :
451 8035 : // The task is now running. Nothing more to do here
452 8035 : PageserverTaskId(task_id)
453 8035 : }
454 :
455 : /// This wrapper function runs in a newly-spawned task. It initializes the
456 : /// task-local variables and calls the payload function.
457 8035 : async fn task_wrapper<F>(
458 8035 : task_name: String,
459 8035 : task_id: u64,
460 8035 : task: Arc<PageServerTask>,
461 8035 : shutdown_token: CancellationToken,
462 8035 : future: F,
463 8035 : ) where
464 8035 : F: Future<Output = anyhow::Result<()>> + Send + 'static,
465 8035 : {
466 7887 : debug!("Starting task '{}'", task_name);
467 :
468 : // wrap the future so we log panics and errors
469 7887 : let tenant_shard_id = task.tenant_shard_id;
470 7887 : let timeline_id = task.timeline_id;
471 7887 : let fut = async move {
472 : // We use AssertUnwindSafe here so that the payload function
473 : // doesn't need to be UnwindSafe. We don't do anything after the
474 : // unwinding that would expose us to unwind-unsafe behavior.
475 7887 : let result = AssertUnwindSafe(future).catch_unwind().await;
476 6696 : match result {
477 : Ok(Ok(())) => {
478 6696 : debug!("Task '{}' exited normally", task_name);
479 : }
480 0 : Ok(Err(err)) => {
481 0 : error!(
482 0 : "Task '{}' tenant_shard_id: {:?}, timeline_id: {:?} exited with error: {:?}",
483 : task_name, tenant_shard_id, timeline_id, err
484 : );
485 : }
486 8 : Err(err) => {
487 8 : error!(
488 0 : "Task '{}' tenant_shard_id: {:?}, timeline_id: {:?} panicked: {:?}",
489 : task_name, tenant_shard_id, timeline_id, err
490 : );
491 : }
492 : }
493 6704 : };
494 :
495 : // add the task-locals
496 7887 : let fut = CURRENT_TASK.scope(task, fut);
497 7887 : let fut = SHUTDOWN_TOKEN.scope(shutdown_token, fut);
498 7887 :
499 7887 : // poll future to completion
500 7887 : fut.await;
501 :
502 : // Remove our entry from the global hashmap.
503 6704 : TASKS
504 6704 : .lock()
505 6704 : .unwrap()
506 6704 : .remove(&task_id)
507 6704 : .expect("no task in registry");
508 6704 : }
509 :
510 0 : pub async fn exit_on_panic_or_error<T, E>(
511 0 : task_name: &'static str,
512 0 : future: impl Future<Output = Result<T, E>>,
513 0 : ) -> T
514 0 : where
515 0 : E: std::fmt::Debug,
516 0 : {
517 : // We use AssertUnwindSafe here so that the payload function
518 : // doesn't need to be UnwindSafe. We don't do anything after the
519 : // unwinding that would expose us to unwind-unsafe behavior.
520 0 : let result = AssertUnwindSafe(future).catch_unwind().await;
521 0 : match result {
522 0 : Ok(Ok(val)) => val,
523 0 : Ok(Err(err)) => {
524 0 : error!(
525 : task_name,
526 0 : "Task exited with error, exiting process: {err:?}"
527 : );
528 0 : std::process::exit(1);
529 : }
530 0 : Err(panic_obj) => {
531 0 : error!(task_name, "Task panicked, exiting process: {panic_obj:?}");
532 0 : std::process::exit(1);
533 : }
534 : }
535 0 : }
536 :
537 : /// Signal and wait for tasks to shut down.
538 : ///
539 : ///
540 : /// The arguments are used to select the tasks to kill. Any None arguments are
541 : /// ignored. For example, to shut down all WalReceiver tasks:
542 : ///
543 : /// shutdown_tasks(Some(TaskKind::WalReceiver), None, None)
544 : ///
545 : /// Or to shut down all tasks for given timeline:
546 : ///
547 : /// shutdown_tasks(None, Some(tenant_shard_id), Some(timeline_id))
548 : ///
549 52 : pub async fn shutdown_tasks(
550 52 : kind: Option<TaskKind>,
551 52 : tenant_shard_id: Option<TenantShardId>,
552 52 : timeline_id: Option<TimelineId>,
553 52 : ) {
554 52 : let mut victim_tasks = Vec::new();
555 52 :
556 52 : {
557 52 : let tasks = TASKS.lock().unwrap();
558 54 : for task in tasks.values() {
559 54 : if (kind.is_none() || Some(task.kind) == kind)
560 30 : && (tenant_shard_id.is_none() || Some(task.tenant_shard_id) == tenant_shard_id)
561 30 : && (timeline_id.is_none() || task.timeline_id == timeline_id)
562 16 : {
563 16 : task.cancel.cancel();
564 16 : victim_tasks.push((
565 16 : Arc::clone(task),
566 16 : task.kind,
567 16 : task.tenant_shard_id,
568 16 : task.timeline_id,
569 16 : ));
570 38 : }
571 : }
572 : }
573 :
574 52 : let log_all = kind.is_none() && tenant_shard_id.is_none() && timeline_id.is_none();
575 :
576 68 : for (task, task_kind, tenant_shard_id, timeline_id) in victim_tasks {
577 16 : let join_handle = {
578 16 : let mut task_mut = task.mutable.lock().unwrap();
579 16 : task_mut.join_handle.take()
580 : };
581 16 : if let Some(mut join_handle) = join_handle {
582 16 : if log_all {
583 : // warn to catch these in tests; there shouldn't be any
584 0 : warn!(name = task.name, tenant_shard_id = ?tenant_shard_id, timeline_id = ?timeline_id, kind = ?task_kind, "stopping left-over");
585 16 : }
586 16 : if tokio::time::timeout(std::time::Duration::from_secs(1), &mut join_handle)
587 16 : .await
588 16 : .is_err()
589 : {
590 : // allow some time to elapse before logging to cut down the number of log
591 : // lines.
592 0 : info!("waiting for task {} to shut down", task.name);
593 : // we never handled this return value, but:
594 : // - we don't deschedule which would lead to is_cancelled
595 : // - panics are already logged (is_panicked)
596 : // - task errors are already logged in the wrapper
597 0 : let _ = join_handle.await;
598 0 : info!("task {} completed", task.name);
599 16 : }
600 0 : } else {
601 0 : // Possibly one of:
602 0 : // * The task had not even fully started yet.
603 0 : // * It was shut down concurrently and already exited
604 0 : }
605 : }
606 52 : }
607 :
608 0 : pub fn current_task_kind() -> Option<TaskKind> {
609 0 : CURRENT_TASK.try_with(|ct| ct.kind).ok()
610 0 : }
611 :
612 0 : pub fn current_task_id() -> Option<PageserverTaskId> {
613 0 : CURRENT_TASK.try_with(|ct| ct.task_id).ok()
614 0 : }
615 :
616 : /// A Future that can be used to check if the current task has been requested to
617 : /// shut down.
618 0 : pub async fn shutdown_watcher() {
619 0 : let token = SHUTDOWN_TOKEN
620 0 : .try_with(|t| t.clone())
621 0 : .expect("shutdown_watcher() called in an unexpected task or thread");
622 0 :
623 0 : token.cancelled().await;
624 0 : }
625 :
626 : /// Clone the current task's cancellation token, which can be moved across tasks.
627 : ///
628 : /// When the task which is currently executing is shutdown, the cancellation token will be
629 : /// cancelled. It can however be moved to other tasks, such as `tokio::task::spawn_blocking` or
630 : /// `tokio::task::JoinSet::spawn`.
631 6995 : pub fn shutdown_token() -> CancellationToken {
632 6995 : let res = SHUTDOWN_TOKEN.try_with(|t| t.clone());
633 6995 :
634 6995 : if cfg!(test) {
635 : // in tests this method is called from non-taskmgr spawned tasks, and that is all ok.
636 6995 : res.unwrap_or_default()
637 : } else {
638 0 : res.expect("shutdown_token() called in an unexpected task or thread")
639 : }
640 6995 : }
641 :
642 : /// Has the current task been requested to shut down?
643 0 : pub fn is_shutdown_requested() -> bool {
644 0 : if let Ok(true_or_false) = SHUTDOWN_TOKEN.try_with(|t| t.is_cancelled()) {
645 0 : true_or_false
646 : } else {
647 0 : if !cfg!(test) {
648 0 : warn!("is_shutdown_requested() called in an unexpected task or thread");
649 0 : }
650 0 : false
651 : }
652 0 : }
|