Line data Source code
1 : #![recursion_limit = "300"]
2 : #![deny(clippy::undocumented_unsafe_blocks)]
3 :
4 : mod auth;
5 : pub mod basebackup;
6 : pub mod config;
7 : pub mod consumption_metrics;
8 : pub mod context;
9 : pub mod control_plane_client;
10 : pub mod deletion_queue;
11 : pub mod disk_usage_eviction_task;
12 : pub mod http;
13 : pub mod import_datadir;
14 : pub use pageserver_api::keyspace;
15 : pub mod metrics;
16 : pub mod page_cache;
17 : pub mod page_service;
18 : pub mod pgdatadir_mapping;
19 : pub mod repository;
20 : pub mod span;
21 : pub(crate) mod statvfs;
22 : pub mod task_mgr;
23 : pub mod tenant;
24 : pub mod trace;
25 : pub mod utilization;
26 : pub mod virtual_file;
27 : pub mod walingest;
28 : pub mod walrecord;
29 : pub mod walredo;
30 :
31 : use crate::task_mgr::TaskKind;
32 : use camino::Utf8Path;
33 : use deletion_queue::DeletionQueue;
34 : use tenant::mgr::TenantManager;
35 : use tracing::info;
36 :
37 : /// Current storage format version
38 : ///
39 : /// This is embedded in the header of all the layer files.
40 : /// If you make any backwards-incompatible changes to the storage
41 : /// format, bump this!
42 : /// Note that TimelineMetadata uses its own version number to track
43 : /// backwards-compatible changes to the metadata format.
44 : pub const STORAGE_FORMAT_VERSION: u16 = 3;
45 :
46 : pub const DEFAULT_PG_VERSION: u32 = 15;
47 :
48 : // Magic constants used to identify different kinds of files
49 : pub const IMAGE_FILE_MAGIC: u16 = 0x5A60;
50 : pub const DELTA_FILE_MAGIC: u16 = 0x5A61;
51 :
52 : static ZERO_PAGE: bytes::Bytes = bytes::Bytes::from_static(&[0u8; 8192]);
53 :
54 : pub use crate::metrics::preinitialize_metrics;
55 :
56 0 : #[tracing::instrument(skip_all, fields(%exit_code))]
57 : pub async fn shutdown_pageserver(
58 : tenant_manager: &TenantManager,
59 : deletion_queue: Option<DeletionQueue>,
60 : exit_code: i32,
61 : ) {
62 : use std::time::Duration;
63 : // Shut down the libpq endpoint task. This prevents new connections from
64 : // being accepted.
65 : timed(
66 : task_mgr::shutdown_tasks(Some(TaskKind::LibpqEndpointListener), None, None),
67 : "shutdown LibpqEndpointListener",
68 : Duration::from_secs(1),
69 : )
70 : .await;
71 :
72 : // Shut down all the tenants. This flushes everything to disk and kills
73 : // the checkpoint and GC tasks.
74 : timed(
75 : tenant_manager.shutdown(),
76 : "shutdown all tenants",
77 : Duration::from_secs(5),
78 : )
79 : .await;
80 :
81 : // Shut down any page service tasks: any in-progress work for particular timelines or tenants
82 : // should already have been canclled via mgr::shutdown_all_tenants
83 : timed(
84 : task_mgr::shutdown_tasks(Some(TaskKind::PageRequestHandler), None, None),
85 : "shutdown PageRequestHandlers",
86 : Duration::from_secs(1),
87 : )
88 : .await;
89 :
90 : // Best effort to persist any outstanding deletions, to avoid leaking objects
91 : if let Some(mut deletion_queue) = deletion_queue {
92 : deletion_queue.shutdown(Duration::from_secs(5)).await;
93 : }
94 :
95 : // Shut down the HTTP endpoint last, so that you can still check the server's
96 : // status while it's shutting down.
97 : // FIXME: We should probably stop accepting commands like attach/detach earlier.
98 : timed(
99 : task_mgr::shutdown_tasks(Some(TaskKind::HttpEndpointListener), None, None),
100 : "shutdown http",
101 : Duration::from_secs(1),
102 : )
103 : .await;
104 :
105 : // There should be nothing left, but let's be sure
106 : timed(
107 : task_mgr::shutdown_tasks(None, None, None),
108 : "shutdown leftovers",
109 : Duration::from_secs(1),
110 : )
111 : .await;
112 0 : info!("Shut down successfully completed");
113 : std::process::exit(exit_code);
114 : }
115 :
116 : /// The name of the metadata file pageserver creates per timeline.
117 : /// Full path: `tenants/<tenant_id>/timelines/<timeline_id>/metadata`.
118 : pub const METADATA_FILE_NAME: &str = "metadata";
119 :
120 : /// Per-tenant configuration file.
121 : /// Full path: `tenants/<tenant_id>/config`.
122 : pub(crate) const TENANT_CONFIG_NAME: &str = "config";
123 :
124 : /// Per-tenant configuration file.
125 : /// Full path: `tenants/<tenant_id>/config`.
126 : pub(crate) const TENANT_LOCATION_CONFIG_NAME: &str = "config-v1";
127 :
128 : /// Per-tenant copy of their remote heatmap, downloaded into the local
129 : /// tenant path while in secondary mode.
130 : pub(crate) const TENANT_HEATMAP_BASENAME: &str = "heatmap-v1.json";
131 :
132 : /// A suffix used for various temporary files. Any temporary files found in the
133 : /// data directory at pageserver startup can be automatically removed.
134 : pub(crate) const TEMP_FILE_SUFFIX: &str = "___temp";
135 :
136 : /// A marker file to mark that a timeline directory was not fully initialized.
137 : /// If a timeline directory with this marker is encountered at pageserver startup,
138 : /// the timeline directory and the marker file are both removed.
139 : /// Full path: `tenants/<tenant_id>/timelines/<timeline_id>___uninit`.
140 : pub(crate) const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit";
141 :
142 : pub(crate) const TIMELINE_DELETE_MARK_SUFFIX: &str = "___delete";
143 :
144 : /// A marker file to prevent pageserver from loading a certain tenant on restart.
145 : /// Different from [`TIMELINE_UNINIT_MARK_SUFFIX`] due to semantics of the corresponding
146 : /// `ignore` management API command, that expects the ignored tenant to be properly loaded
147 : /// into pageserver's memory before being ignored.
148 : /// Full path: `tenants/<tenant_id>/___ignored_tenant`.
149 : pub const IGNORED_TENANT_FILE_NAME: &str = "___ignored_tenant";
150 :
151 8 : pub fn is_temporary(path: &Utf8Path) -> bool {
152 8 : match path.file_name() {
153 8 : Some(name) => name.ends_with(TEMP_FILE_SUFFIX),
154 0 : None => false,
155 : }
156 8 : }
157 :
158 16 : fn ends_with_suffix(path: &Utf8Path, suffix: &str) -> bool {
159 16 : match path.file_name() {
160 16 : Some(name) => name.ends_with(suffix),
161 0 : None => false,
162 : }
163 16 : }
164 :
165 : // FIXME: DO NOT ADD new query methods like this, which will have a next step of parsing timelineid
166 : // from the directory name. Instead create type "UninitMark(TimelineId)" and only parse it once
167 : // from the name.
168 :
169 8 : pub(crate) fn is_uninit_mark(path: &Utf8Path) -> bool {
170 8 : ends_with_suffix(path, TIMELINE_UNINIT_MARK_SUFFIX)
171 8 : }
172 :
173 8 : pub(crate) fn is_delete_mark(path: &Utf8Path) -> bool {
174 8 : ends_with_suffix(path, TIMELINE_DELETE_MARK_SUFFIX)
175 8 : }
176 :
177 : /// During pageserver startup, we need to order operations not to exhaust tokio worker threads by
178 : /// blocking.
179 : ///
180 : /// The instances of this value exist only during startup, otherwise `None` is provided, meaning no
181 : /// delaying is needed.
182 : #[derive(Clone)]
183 : pub struct InitializationOrder {
184 : /// Each initial tenant load task carries this until it is done loading timelines from remote storage
185 : pub initial_tenant_load_remote: Option<utils::completion::Completion>,
186 :
187 : /// Each initial tenant load task carries this until completion.
188 : pub initial_tenant_load: Option<utils::completion::Completion>,
189 :
190 : /// Barrier for when we can start any background jobs.
191 : ///
192 : /// This can be broken up later on, but right now there is just one class of a background job.
193 : pub background_jobs_can_start: utils::completion::Barrier,
194 : }
195 :
196 : /// Time the future with a warning when it exceeds a threshold.
197 4 : async fn timed<Fut: std::future::Future>(
198 4 : fut: Fut,
199 4 : name: &str,
200 4 : warn_at: std::time::Duration,
201 4 : ) -> <Fut as std::future::Future>::Output {
202 4 : let started = std::time::Instant::now();
203 4 :
204 4 : let mut fut = std::pin::pin!(fut);
205 4 :
206 4 : match tokio::time::timeout(warn_at, &mut fut).await {
207 2 : Ok(ret) => {
208 2 : tracing::info!(
209 0 : stage = name,
210 0 : elapsed_ms = started.elapsed().as_millis(),
211 0 : "completed"
212 0 : );
213 2 : ret
214 : }
215 : Err(_) => {
216 2 : tracing::info!(
217 0 : stage = name,
218 0 : elapsed_ms = started.elapsed().as_millis(),
219 0 : "still waiting, taking longer than expected..."
220 0 : );
221 :
222 2 : let ret = fut.await;
223 :
224 : // this has a global allowed_errors
225 2 : tracing::warn!(
226 0 : stage = name,
227 0 : elapsed_ms = started.elapsed().as_millis(),
228 0 : "completed, took longer than expected"
229 0 : );
230 :
231 2 : ret
232 : }
233 : }
234 4 : }
235 :
236 : #[cfg(test)]
237 : mod timed_tests {
238 : use super::timed;
239 : use std::time::Duration;
240 :
241 : #[tokio::test]
242 2 : async fn timed_completes_when_inner_future_completes() {
243 2 : // A future that completes on time should have its result returned
244 2 : let r1 = timed(
245 2 : async move {
246 2 : tokio::time::sleep(Duration::from_millis(10)).await;
247 2 : 123
248 2 : },
249 2 : "test 1",
250 2 : Duration::from_millis(50),
251 2 : )
252 2 : .await;
253 2 : assert_eq!(r1, 123);
254 2 :
255 2 : // A future that completes too slowly should also have its result returned
256 2 : let r1 = timed(
257 2 : async move {
258 6 : tokio::time::sleep(Duration::from_millis(50)).await;
259 2 : 456
260 2 : },
261 2 : "test 1",
262 2 : Duration::from_millis(10),
263 2 : )
264 4 : .await;
265 2 : assert_eq!(r1, 456);
266 2 : }
267 : }
|