LCOV - code coverage report
Current view: top level - pageserver/src - task_mgr.rs (source / functions) Coverage Total Hit
Test: 36bb8dd7c7efcb53483d1a7d9f7cb33e8406dcf0.info Lines: 71.4 % 245 175
Test Date: 2024-04-08 10:22:05 Functions: 15.9 % 132 21

            Line data    Source code
       1              : //!
       2              : //! This module provides centralized handling of tokio tasks in the Page Server.
       3              : //!
       4              : //! We provide a few basic facilities:
       5              : //! - A global registry of tasks that lists what kind of tasks they are, and
       6              : //!   which tenant or timeline they are working on
       7              : //!
       8              : //! - The ability to request a task to shut down.
       9              : //!
      10              : //!
      11              : //! # How it works?
      12              : //!
      13              : //! There is a global hashmap of all the tasks (`TASKS`). Whenever a new
      14              : //! task is spawned, a PageServerTask entry is added there, and when a
      15              : //! task dies, it removes itself from the hashmap. If you want to kill a
      16              : //! task, you can scan the hashmap to find it.
      17              : //!
      18              : //! # Task shutdown
      19              : //!
      20              : //! To kill a task, we rely on co-operation from the victim. Each task is
      21              : //! expected to periodically call the `is_shutdown_requested()` function, and
      22              : //! if it returns true, exit gracefully. In addition to that, when waiting for
      23              : //! the network or other long-running operation, you can use
      24              : //! `shutdown_watcher()` function to get a Future that will become ready if
      25              : //! the current task has been requested to shut down. You can use that with
      26              : //! Tokio select!().
      27              : //!
      28              : //! TODO: This would be a good place to also handle panics in a somewhat sane way.
      29              : //! Depending on what task panics, we might want to kill the whole server, or
      30              : //! only a single tenant or timeline.
      31              : //!
      32              : 
      33              : use std::collections::HashMap;
      34              : use std::fmt;
      35              : use std::future::Future;
      36              : use std::panic::AssertUnwindSafe;
      37              : use std::sync::atomic::{AtomicU64, Ordering};
      38              : use std::sync::{Arc, Mutex};
      39              : 
      40              : use futures::FutureExt;
      41              : use pageserver_api::shard::TenantShardId;
      42              : use tokio::runtime::Runtime;
      43              : use tokio::task::JoinHandle;
      44              : use tokio::task_local;
      45              : use tokio_util::sync::CancellationToken;
      46              : 
      47              : use tracing::{debug, error, info, warn};
      48              : 
      49              : use once_cell::sync::Lazy;
      50              : 
      51              : use utils::id::TimelineId;
      52              : 
      53              : //
      54              : // There are four runtimes:
      55              : //
      56              : // Compute request runtime
      57              : //  - used to handle connections from compute nodes. Any tasks related to satisfying
      58              : //    GetPage requests, base backups, import, and other such compute node operations
      59              : //    are handled by the Compute request runtime
      60              : //  - page_service.rs
      61              : //  - this includes layer downloads from remote storage, if a layer is needed to
      62              : //    satisfy a GetPage request
      63              : //
      64              : // Management request runtime
      65              : //  - used to handle HTTP API requests
      66              : //
      67              : // WAL receiver runtime:
      68              : //  - used to handle WAL receiver connections.
      69              : //  - and to receiver updates from storage_broker
      70              : //
      71              : // Background runtime
      72              : //  - layer flushing
      73              : //  - garbage collection
      74              : //  - compaction
      75              : //  - remote storage uploads
      76              : //  - initial tenant loading
      77              : //
      78              : // Everything runs in a tokio task. If you spawn new tasks, spawn it using the correct
      79              : // runtime.
      80              : //
      81              : // There might be situations when one task needs to wait for a task running in another
      82              : // Runtime to finish. For example, if a background operation needs a layer from remote
      83              : // storage, it will start to download it. If a background operation needs a remote layer,
      84              : // and the download was already initiated by a GetPage request, the background task
      85              : // will wait for the download - running in the Page server runtime - to finish.
      86              : // Another example: the initial tenant loading tasks are launched in the background ops
      87              : // runtime. If a GetPage request comes in before the load of a tenant has finished, the
      88              : // GetPage request will wait for the tenant load to finish.
      89              : //
      90              : // The core Timeline code is synchronous, and uses a bunch of std Mutexes and RWLocks to
      91              : // protect data structures. Let's keep it that way. Synchronous code is easier to debug
      92              : // and analyze, and there's a lot of hairy, low-level, performance critical code there.
      93              : //
      94              : // It's nice to have different runtimes, so that you can quickly eyeball how much CPU
      95              : // time each class of operations is taking, with 'top -H' or similar.
      96              : //
      97              : // It's also good to avoid hogging all threads that would be needed to process
      98              : // other operations, if the upload tasks e.g. get blocked on locks. It shouldn't
      99              : // happen, but still.
     100              : //
     101            0 : pub static COMPUTE_REQUEST_RUNTIME: Lazy<Runtime> = Lazy::new(|| {
     102            0 :     tokio::runtime::Builder::new_multi_thread()
     103            0 :         .thread_name("compute request worker")
     104            0 :         .enable_all()
     105            0 :         .build()
     106            0 :         .expect("Failed to create compute request runtime")
     107            0 : });
     108              : 
     109            0 : pub static MGMT_REQUEST_RUNTIME: Lazy<Runtime> = Lazy::new(|| {
     110            0 :     tokio::runtime::Builder::new_multi_thread()
     111            0 :         .thread_name("mgmt request worker")
     112            0 :         .enable_all()
     113            0 :         .build()
     114            0 :         .expect("Failed to create mgmt request runtime")
     115            0 : });
     116              : 
     117           10 : pub static WALRECEIVER_RUNTIME: Lazy<Runtime> = Lazy::new(|| {
     118           10 :     tokio::runtime::Builder::new_multi_thread()
     119           10 :         .thread_name("walreceiver worker")
     120           10 :         .enable_all()
     121           10 :         .build()
     122           10 :         .expect("Failed to create walreceiver runtime")
     123           10 : });
     124              : 
     125           92 : pub static BACKGROUND_RUNTIME: Lazy<Runtime> = Lazy::new(|| {
     126           92 :     tokio::runtime::Builder::new_multi_thread()
     127           92 :         .thread_name("background op worker")
     128           92 :         // if you change the number of worker threads please change the constant below
     129           92 :         .enable_all()
     130           92 :         .build()
     131           92 :         .expect("Failed to create background op runtime")
     132           92 : });
     133              : 
     134           14 : pub(crate) static BACKGROUND_RUNTIME_WORKER_THREADS: Lazy<usize> = Lazy::new(|| {
     135           14 :     // force init and thus panics
     136           14 :     let _ = BACKGROUND_RUNTIME.handle();
     137           14 :     // replicates tokio-1.28.1::loom::sys::num_cpus which is not available publicly
     138           14 :     // tokio would had already panicked for parsing errors or NotUnicode
     139           14 :     //
     140           14 :     // this will be wrong if any of the runtimes gets their worker threads configured to something
     141           14 :     // else, but that has not been needed in a long time.
     142           14 :     std::env::var("TOKIO_WORKER_THREADS")
     143           14 :         .map(|s| s.parse::<usize>().unwrap())
     144           14 :         .unwrap_or_else(|_e| usize::max(2, num_cpus::get()))
     145           14 : });
     146              : 
     147              : #[derive(Debug, Clone, Copy)]
     148              : pub struct PageserverTaskId(u64);
     149              : 
     150              : impl fmt::Display for PageserverTaskId {
     151            0 :     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
     152            0 :         self.0.fmt(f)
     153            0 :     }
     154              : }
     155              : 
     156              : /// Each task that we track is associated with a "task ID". It's just an
     157              : /// increasing number that we assign. Note that it is different from tokio::task::Id.
     158              : static NEXT_TASK_ID: AtomicU64 = AtomicU64::new(1);
     159              : 
     160              : /// Global registry of tasks
     161              : static TASKS: Lazy<Mutex<HashMap<u64, Arc<PageServerTask>>>> =
     162           96 :     Lazy::new(|| Mutex::new(HashMap::new()));
     163              : 
     164          184 : task_local! {
     165          184 :     // This is a cancellation token which will be cancelled when a task needs to shut down. The
     166          184 :     // root token is kept in the global registry, so that anyone can send the signal to request
     167          184 :     // task shutdown.
     168          184 :     static SHUTDOWN_TOKEN: CancellationToken;
     169          184 : 
     170          184 :     // Each task holds reference to its own PageServerTask here.
     171          184 :     static CURRENT_TASK: Arc<PageServerTask>;
     172          184 : }
     173              : 
     174              : ///
     175              : /// There are many kinds of tasks in the system. Some are associated with a particular
     176              : /// tenant or timeline, while others are global.
     177              : ///
     178              : /// Note that we don't try to limit how many task of a certain kind can be running
     179              : /// at the same time.
     180              : ///
     181              : #[derive(
     182              :     Debug,
     183              :     // NB: enumset::EnumSetType derives PartialEq, Eq, Clone, Copy
     184            0 :     enumset::EnumSetType,
     185              :     enum_map::Enum,
     186              :     serde::Serialize,
     187            0 :     serde::Deserialize,
     188         2676 :     strum_macros::IntoStaticStr,
     189            0 :     strum_macros::EnumString,
     190              : )]
     191              : pub enum TaskKind {
     192              :     // Pageserver startup, i.e., `main`
     193              :     Startup,
     194              : 
     195              :     // libpq listener task. It just accepts connection and spawns a
     196              :     // PageRequestHandler task for each connection.
     197              :     LibpqEndpointListener,
     198              : 
     199              :     // HTTP endpoint listener.
     200              :     HttpEndpointListener,
     201              : 
     202              :     // Task that handles a single connection. A PageRequestHandler task
     203              :     // starts detached from any particular tenant or timeline, but it can be
     204              :     // associated with one later, after receiving a command from the client.
     205              :     PageRequestHandler,
     206              : 
     207              :     /// Manages the WAL receiver connection for one timeline.
     208              :     /// It subscribes to events from storage_broker and decides which safekeeper to connect to.
     209              :     /// Once the decision has been made, it establishes the connection using the `tokio-postgres` library.
     210              :     /// There is at most one connection at any given time.
     211              :     ///
     212              :     /// That `tokio-postgres` library represents a connection as two objects: a `Client` and a `Connection`.
     213              :     /// The `Client` object is what library users use to make requests & get responses.
     214              :     /// Internally, `Client` hands over requests to the `Connection` object.
     215              :     /// The `Connection` object is responsible for speaking the wire protocol.
     216              :     ///
     217              :     /// Walreceiver uses a legacy abstraction called `TaskHandle` to represent the activity of establishing and handling a connection.
     218              :     /// The `WalReceiverManager` task ensures that this `TaskHandle` task does not outlive the `WalReceiverManager` task.
     219              :     /// For the `RequestContext` that we hand to the TaskHandle, we use the [`WalReceiverConnectionHandler`] task kind.
     220              :     ///
     221              :     /// Once the connection is established, the `TaskHandle` task spawns a
     222              :     /// [`WalReceiverConnectionPoller`] task that is responsible for polling
     223              :     /// the `Connection` object.
     224              :     /// A `CancellationToken` created by the `TaskHandle` task ensures
     225              :     /// that the [`WalReceiverConnectionPoller`] task will cancel soon after as the `TaskHandle` is dropped.
     226              :     ///
     227              :     /// [`WalReceiverConnectionHandler`]: Self::WalReceiverConnectionHandler
     228              :     /// [`WalReceiverConnectionPoller`]: Self::WalReceiverConnectionPoller
     229              :     WalReceiverManager,
     230              : 
     231              :     /// The `TaskHandle` task that executes `handle_walreceiver_connection`.
     232              :     /// See the comment on [`WalReceiverManager`].
     233              :     ///
     234              :     /// [`WalReceiverManager`]: Self::WalReceiverManager
     235              :     WalReceiverConnectionHandler,
     236              : 
     237              :     /// The task that polls the `tokio-postgres::Connection` object.
     238              :     /// Spawned by task [`WalReceiverConnectionHandler`](Self::WalReceiverConnectionHandler).
     239              :     /// See the comment on [`WalReceiverManager`](Self::WalReceiverManager).
     240              :     WalReceiverConnectionPoller,
     241              : 
     242              :     // Garbage collection worker. One per tenant
     243              :     GarbageCollector,
     244              : 
     245              :     // Compaction. One per tenant.
     246              :     Compaction,
     247              : 
     248              :     // Eviction. One per timeline.
     249              :     Eviction,
     250              : 
     251              :     /// See [`crate::disk_usage_eviction_task`].
     252              :     DiskUsageEviction,
     253              : 
     254              :     /// See [`crate::tenant::secondary`].
     255              :     SecondaryDownloads,
     256              : 
     257              :     /// See [`crate::tenant::secondary`].
     258              :     SecondaryUploads,
     259              : 
     260              :     // Initial logical size calculation
     261              :     InitialLogicalSizeCalculation,
     262              : 
     263              :     OndemandLogicalSizeCalculation,
     264              : 
     265              :     // Task that flushes frozen in-memory layers to disk
     266              :     LayerFlushTask,
     267              : 
     268              :     // Task that uploads a file to remote storage
     269              :     RemoteUploadTask,
     270              : 
     271              :     // task that handles the initial downloading of all tenants
     272              :     InitialLoad,
     273              : 
     274              :     // task that handles attaching a tenant
     275              :     Attach,
     276              : 
     277              :     // Used mostly for background deletion from s3
     278              :     TimelineDeletionWorker,
     279              : 
     280              :     // task that handhes metrics collection
     281              :     MetricsCollection,
     282              : 
     283              :     // task that drives downloading layers
     284              :     DownloadAllRemoteLayers,
     285              :     // Task that calculates synthetis size for all active tenants
     286              :     CalculateSyntheticSize,
     287              : 
     288              :     // A request that comes in via the pageserver HTTP API.
     289              :     MgmtRequest,
     290              : 
     291              :     DebugTool,
     292              : 
     293              :     #[cfg(test)]
     294              :     UnitTest,
     295              : }
     296              : 
     297              : #[derive(Default)]
     298              : struct MutableTaskState {
     299              :     /// Handle for waiting for the task to exit. It can be None, if the
     300              :     /// the task has already exited.
     301              :     join_handle: Option<JoinHandle<()>>,
     302              : }
     303              : 
     304              : struct PageServerTask {
     305              :     task_id: PageserverTaskId,
     306              : 
     307              :     kind: TaskKind,
     308              : 
     309              :     name: String,
     310              : 
     311              :     // To request task shutdown, just cancel this token.
     312              :     cancel: CancellationToken,
     313              : 
     314              :     /// Tasks may optionally be launched for a particular tenant/timeline, enabling
     315              :     /// later cancelling tasks for that tenant/timeline in [`shutdown_tasks`]
     316              :     tenant_shard_id: Option<TenantShardId>,
     317              :     timeline_id: Option<TimelineId>,
     318              : 
     319              :     mutable: Mutex<MutableTaskState>,
     320              : }
     321              : 
     322              : /// Launch a new task
     323              : /// Note: if shutdown_process_on_error is set to true failure
     324              : ///   of the task will lead to shutdown of entire process
     325         2507 : pub fn spawn<F>(
     326         2507 :     runtime: &tokio::runtime::Handle,
     327         2507 :     kind: TaskKind,
     328         2507 :     tenant_shard_id: Option<TenantShardId>,
     329         2507 :     timeline_id: Option<TimelineId>,
     330         2507 :     name: &str,
     331         2507 :     shutdown_process_on_error: bool,
     332         2507 :     future: F,
     333         2507 : ) -> PageserverTaskId
     334         2507 : where
     335         2507 :     F: Future<Output = anyhow::Result<()>> + Send + 'static,
     336         2507 : {
     337         2507 :     let cancel = CancellationToken::new();
     338         2507 :     let task_id = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed);
     339         2507 :     let task = Arc::new(PageServerTask {
     340         2507 :         task_id: PageserverTaskId(task_id),
     341         2507 :         kind,
     342         2507 :         name: name.to_string(),
     343         2507 :         cancel: cancel.clone(),
     344         2507 :         tenant_shard_id,
     345         2507 :         timeline_id,
     346         2507 :         mutable: Mutex::new(MutableTaskState { join_handle: None }),
     347         2507 :     });
     348         2507 : 
     349         2507 :     TASKS.lock().unwrap().insert(task_id, Arc::clone(&task));
     350         2507 : 
     351         2507 :     let mut task_mut = task.mutable.lock().unwrap();
     352         2507 : 
     353         2507 :     let task_name = name.to_string();
     354         2507 :     let task_cloned = Arc::clone(&task);
     355         2507 :     let join_handle = runtime.spawn(task_wrapper(
     356         2507 :         task_name,
     357         2507 :         task_id,
     358         2507 :         task_cloned,
     359         2507 :         cancel,
     360         2507 :         shutdown_process_on_error,
     361         2507 :         future,
     362         2507 :     ));
     363         2507 :     task_mut.join_handle = Some(join_handle);
     364         2507 :     drop(task_mut);
     365         2507 : 
     366         2507 :     // The task is now running. Nothing more to do here
     367         2507 :     PageserverTaskId(task_id)
     368         2507 : }
     369              : 
     370              : /// This wrapper function runs in a newly-spawned task. It initializes the
     371              : /// task-local variables and calls the payload function.
     372         2507 : async fn task_wrapper<F>(
     373         2507 :     task_name: String,
     374         2507 :     task_id: u64,
     375         2507 :     task: Arc<PageServerTask>,
     376         2507 :     shutdown_token: CancellationToken,
     377         2507 :     shutdown_process_on_error: bool,
     378         2507 :     future: F,
     379         2507 : ) where
     380         2507 :     F: Future<Output = anyhow::Result<()>> + Send + 'static,
     381         2507 : {
     382         2400 :     debug!("Starting task '{}'", task_name);
     383              : 
     384         2400 :     let result = SHUTDOWN_TOKEN
     385         2400 :         .scope(
     386         2400 :             shutdown_token,
     387         2400 :             CURRENT_TASK.scope(task, {
     388         2400 :                 // We use AssertUnwindSafe here so that the payload function
     389         2400 :                 // doesn't need to be UnwindSafe. We don't do anything after the
     390         2400 :                 // unwinding that would expose us to unwind-unsafe behavior.
     391         2400 :                 AssertUnwindSafe(future).catch_unwind()
     392         2400 :             }),
     393         2400 :         )
     394        92452 :         .await;
     395         2025 :     task_finish(result, task_name, task_id, shutdown_process_on_error).await;
     396         2025 : }
     397              : 
     398         2025 : async fn task_finish(
     399         2025 :     result: std::result::Result<
     400         2025 :         anyhow::Result<()>,
     401         2025 :         std::boxed::Box<dyn std::any::Any + std::marker::Send>,
     402         2025 :     >,
     403         2025 :     task_name: String,
     404         2025 :     task_id: u64,
     405         2025 :     shutdown_process_on_error: bool,
     406         2025 : ) {
     407         2025 :     // Remove our entry from the global hashmap.
     408         2025 :     let task = TASKS
     409         2025 :         .lock()
     410         2025 :         .unwrap()
     411         2025 :         .remove(&task_id)
     412         2025 :         .expect("no task in registry");
     413         2025 : 
     414         2025 :     let mut shutdown_process = false;
     415              :     {
     416         2025 :         match result {
     417              :             Ok(Ok(())) => {
     418         2025 :                 debug!("Task '{}' exited normally", task_name);
     419              :             }
     420            0 :             Ok(Err(err)) => {
     421            0 :                 if shutdown_process_on_error {
     422            0 :                     error!(
     423            0 :                         "Shutting down: task '{}' tenant_shard_id: {:?}, timeline_id: {:?} exited with error: {:?}",
     424            0 :                         task_name, task.tenant_shard_id, task.timeline_id, err
     425            0 :                     );
     426            0 :                     shutdown_process = true;
     427              :                 } else {
     428            0 :                     error!(
     429            0 :                         "Task '{}' tenant_shard_id: {:?}, timeline_id: {:?} exited with error: {:?}",
     430            0 :                         task_name, task.tenant_shard_id, task.timeline_id, err
     431            0 :                     );
     432              :                 }
     433              :             }
     434            0 :             Err(err) => {
     435            0 :                 if shutdown_process_on_error {
     436            0 :                     error!(
     437            0 :                         "Shutting down: task '{}' tenant_shard_id: {:?}, timeline_id: {:?} panicked: {:?}",
     438            0 :                         task_name, task.tenant_shard_id, task.timeline_id, err
     439            0 :                     );
     440            0 :                     shutdown_process = true;
     441              :                 } else {
     442            0 :                     error!(
     443            0 :                         "Task '{}' tenant_shard_id: {:?}, timeline_id: {:?} panicked: {:?}",
     444            0 :                         task_name, task.tenant_shard_id, task.timeline_id, err
     445            0 :                     );
     446              :                 }
     447              :             }
     448              :         }
     449              :     }
     450              : 
     451         2025 :     if shutdown_process {
     452            0 :         std::process::exit(1);
     453         2025 :     }
     454         2025 : }
     455              : 
     456              : /// Signal and wait for tasks to shut down.
     457              : ///
     458              : ///
     459              : /// The arguments are used to select the tasks to kill. Any None arguments are
     460              : /// ignored. For example, to shut down all WalReceiver tasks:
     461              : ///
     462              : ///   shutdown_tasks(Some(TaskKind::WalReceiver), None, None)
     463              : ///
     464              : /// Or to shut down all tasks for given timeline:
     465              : ///
     466              : ///   shutdown_tasks(None, Some(tenant_shard_id), Some(timeline_id))
     467              : ///
     468           22 : pub async fn shutdown_tasks(
     469           22 :     kind: Option<TaskKind>,
     470           22 :     tenant_shard_id: Option<TenantShardId>,
     471           22 :     timeline_id: Option<TimelineId>,
     472           22 : ) {
     473           22 :     let mut victim_tasks = Vec::new();
     474           22 : 
     475           22 :     {
     476           22 :         let tasks = TASKS.lock().unwrap();
     477           22 :         for task in tasks.values() {
     478           20 :             if (kind.is_none() || Some(task.kind) == kind)
     479           12 :                 && (tenant_shard_id.is_none() || task.tenant_shard_id == tenant_shard_id)
     480           12 :                 && (timeline_id.is_none() || task.timeline_id == timeline_id)
     481            6 :             {
     482            6 :                 task.cancel.cancel();
     483            6 :                 victim_tasks.push((
     484            6 :                     Arc::clone(task),
     485            6 :                     task.kind,
     486            6 :                     task.tenant_shard_id,
     487            6 :                     task.timeline_id,
     488            6 :                 ));
     489           14 :             }
     490              :         }
     491              :     }
     492              : 
     493           22 :     let log_all = kind.is_none() && tenant_shard_id.is_none() && timeline_id.is_none();
     494              : 
     495           28 :     for (task, task_kind, tenant_shard_id, timeline_id) in victim_tasks {
     496            6 :         let join_handle = {
     497            6 :             let mut task_mut = task.mutable.lock().unwrap();
     498            6 :             task_mut.join_handle.take()
     499              :         };
     500            6 :         if let Some(mut join_handle) = join_handle {
     501            6 :             if log_all {
     502            0 :                 if tenant_shard_id.is_none() {
     503              :                     // there are quite few of these
     504            0 :                     info!(name = task.name, kind = ?task_kind, "stopping global task");
     505              :                 } else {
     506              :                     // warn to catch these in tests; there shouldn't be any
     507            0 :                     warn!(name = task.name, tenant_shard_id = ?tenant_shard_id, timeline_id = ?timeline_id, kind = ?task_kind, "stopping left-over");
     508              :                 }
     509            6 :             }
     510            6 :             if tokio::time::timeout(std::time::Duration::from_secs(1), &mut join_handle)
     511            6 :                 .await
     512            6 :                 .is_err()
     513              :             {
     514              :                 // allow some time to elapse before logging to cut down the number of log
     515              :                 // lines.
     516            0 :                 info!("waiting for task {} to shut down", task.name);
     517              :                 // we never handled this return value, but:
     518              :                 // - we don't deschedule which would lead to is_cancelled
     519              :                 // - panics are already logged (is_panicked)
     520              :                 // - task errors are already logged in the wrapper
     521            0 :                 let _ = join_handle.await;
     522            0 :                 info!("task {} completed", task.name);
     523            6 :             }
     524            0 :         } else {
     525            0 :             // Possibly one of:
     526            0 :             //  * The task had not even fully started yet.
     527            0 :             //  * It was shut down concurrently and already exited
     528            0 :         }
     529              :     }
     530           22 : }
     531              : 
     532            0 : pub fn current_task_kind() -> Option<TaskKind> {
     533            0 :     CURRENT_TASK.try_with(|ct| ct.kind).ok()
     534            0 : }
     535              : 
     536            0 : pub fn current_task_id() -> Option<PageserverTaskId> {
     537            0 :     CURRENT_TASK.try_with(|ct| ct.task_id).ok()
     538            0 : }
     539              : 
     540              : /// A Future that can be used to check if the current task has been requested to
     541              : /// shut down.
     542            0 : pub async fn shutdown_watcher() {
     543            0 :     let token = SHUTDOWN_TOKEN
     544            0 :         .try_with(|t| t.clone())
     545            0 :         .expect("shutdown_watcher() called in an unexpected task or thread");
     546            0 : 
     547            0 :     token.cancelled().await;
     548            0 : }
     549              : 
     550              : /// Clone the current task's cancellation token, which can be moved across tasks.
     551              : ///
     552              : /// When the task which is currently executing is shutdown, the cancellation token will be
     553              : /// cancelled. It can however be moved to other tasks, such as `tokio::task::spawn_blocking` or
     554              : /// `tokio::task::JoinSet::spawn`.
     555         2598 : pub fn shutdown_token() -> CancellationToken {
     556         2598 :     let res = SHUTDOWN_TOKEN.try_with(|t| t.clone());
     557         2598 : 
     558         2598 :     if cfg!(test) {
     559              :         // in tests this method is called from non-taskmgr spawned tasks, and that is all ok.
     560         2598 :         res.unwrap_or_default()
     561              :     } else {
     562            0 :         res.expect("shutdown_token() called in an unexpected task or thread")
     563              :     }
     564         2598 : }
     565              : 
     566              : /// Has the current task been requested to shut down?
     567            8 : pub fn is_shutdown_requested() -> bool {
     568            8 :     if let Ok(true_or_false) = SHUTDOWN_TOKEN.try_with(|t| t.is_cancelled()) {
     569            0 :         true_or_false
     570              :     } else {
     571            8 :         if !cfg!(test) {
     572            0 :             warn!("is_shutdown_requested() called in an unexpected task or thread");
     573            8 :         }
     574            8 :         false
     575              :     }
     576            8 : }
        

Generated by: LCOV version 2.1-beta