Line data Source code
1 : //! Code to manage pageservers
2 : //!
3 : //! In the local test environment, the data for each pageserver is stored in
4 : //!
5 : //! ```text
6 : //! .neon/pageserver_<pageserver_id>
7 : //! ```
8 : //!
9 : use std::collections::HashMap;
10 : use std::io;
11 : use std::io::Write;
12 : use std::num::NonZeroU64;
13 : use std::path::PathBuf;
14 : use std::str::FromStr;
15 : use std::time::Duration;
16 :
17 : use anyhow::{Context, bail};
18 : use camino::Utf8PathBuf;
19 : use pageserver_api::config::{DEFAULT_GRPC_LISTEN_PORT, DEFAULT_HTTP_LISTEN_PORT};
20 : use pageserver_api::models::{self, TenantInfo, TimelineInfo};
21 : use pageserver_api::shard::TenantShardId;
22 : use pageserver_client::mgmt_api;
23 : use postgres_backend::AuthType;
24 : use postgres_connection::{PgConnectionConfig, parse_host_port};
25 : use safekeeper_api::PgMajorVersion;
26 : use utils::auth::{Claims, Scope};
27 : use utils::id::{NodeId, TenantId, TimelineId};
28 : use utils::lsn::Lsn;
29 :
30 : use crate::background_process;
31 : use crate::local_env::{LocalEnv, NeonLocalInitPageserverConf, PageServerConf};
32 :
33 : /// Directory within .neon which will be used by default for LocalFs remote storage.
34 : pub const PAGESERVER_REMOTE_STORAGE_DIR: &str = "local_fs_remote_storage/pageserver";
35 :
36 : //
37 : // Control routines for pageserver.
38 : //
39 : // Used in CLI and tests.
40 : //
41 : #[derive(Debug)]
42 : pub struct PageServerNode {
43 : pub pg_connection_config: PgConnectionConfig,
44 : pub conf: PageServerConf,
45 : pub env: LocalEnv,
46 : pub http_client: mgmt_api::Client,
47 : }
48 :
49 : impl PageServerNode {
50 0 : pub fn from_env(env: &LocalEnv, conf: &PageServerConf) -> PageServerNode {
51 0 : let (host, port) =
52 0 : parse_host_port(&conf.listen_pg_addr).expect("Unable to parse listen_pg_addr");
53 0 : let port = port.unwrap_or(5432);
54 :
55 0 : let endpoint = if env.storage_controller.use_https_pageserver_api {
56 0 : format!(
57 0 : "https://{}",
58 0 : conf.listen_https_addr.as_ref().expect(
59 0 : "listen https address should be specified if use_https_pageserver_api is on"
60 : )
61 : )
62 : } else {
63 0 : format!("http://{}", conf.listen_http_addr)
64 : };
65 :
66 : Self {
67 0 : pg_connection_config: PgConnectionConfig::new_host_port(host, port),
68 0 : conf: conf.clone(),
69 0 : env: env.clone(),
70 0 : http_client: mgmt_api::Client::new(
71 0 : env.create_http_client(),
72 0 : endpoint,
73 : {
74 0 : match conf.http_auth_type {
75 0 : AuthType::Trust => None,
76 0 : AuthType::NeonJWT | AuthType::HadronJWT => Some(
77 0 : env.generate_auth_token(&Claims::new(None, Scope::PageServerApi))
78 0 : .unwrap(),
79 0 : ),
80 : }
81 : }
82 0 : .as_deref(),
83 : ),
84 : }
85 0 : }
86 :
87 0 : fn pageserver_make_identity_toml(&self, node_id: NodeId) -> toml_edit::DocumentMut {
88 0 : toml_edit::DocumentMut::from_str(&format!("id={node_id}")).unwrap()
89 0 : }
90 :
91 0 : fn pageserver_init_make_toml(
92 0 : &self,
93 0 : conf: NeonLocalInitPageserverConf,
94 0 : ) -> anyhow::Result<toml_edit::DocumentMut> {
95 0 : assert_eq!(
96 0 : &PageServerConf::from(&conf),
97 0 : &self.conf,
98 0 : "during neon_local init, we derive the runtime state of ps conf (self.conf) from the --config flag fully"
99 : );
100 :
101 : // TODO(christian): instead of what we do here, create a pageserver_api::config::ConfigToml (PR #7656)
102 :
103 : // FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
104 0 : let pg_distrib_dir_param = format!(
105 0 : "pg_distrib_dir='{}'",
106 0 : self.env.pg_distrib_dir_raw().display()
107 : );
108 :
109 0 : let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
110 :
111 0 : let mut overrides = vec![pg_distrib_dir_param, broker_endpoint_param];
112 :
113 0 : overrides.push(format!(
114 0 : "control_plane_api='{}'",
115 0 : self.env.control_plane_api.as_str()
116 : ));
117 :
118 : // Storage controller uses the same auth as pageserver: if JWT is enabled
119 : // for us, we will also need it to talk to them.
120 : // Note: In Hadron the "control plane" is HCC. HCC does not require a token on the trusted port PS connects
121 : // to, so we do not need to set any tokens when using HadronJWT. In the future we may consider using mTLS
122 : // instead of JWT for HTTP auth.
123 0 : if matches!(conf.http_auth_type, AuthType::NeonJWT | AuthType::HadronJWT) {
124 0 : let jwt_token = self
125 0 : .env
126 0 : .generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
127 0 : .unwrap();
128 0 : overrides.push(format!("control_plane_api_token='{jwt_token}'"));
129 0 : }
130 :
131 0 : if !conf.other.contains_key("remote_storage") {
132 0 : overrides.push(format!(
133 0 : "remote_storage={{local_path='../{PAGESERVER_REMOTE_STORAGE_DIR}'}}"
134 0 : ));
135 0 : }
136 :
137 0 : if [conf.http_auth_type, conf.pg_auth_type, conf.grpc_auth_type]
138 0 : .iter()
139 0 : .any(|auth_type| *auth_type == AuthType::NeonJWT || *auth_type == AuthType::HadronJWT)
140 0 : {
141 0 : // Keys are generated in the toplevel repo dir, pageservers' workdirs
142 0 : // are one level below that, so refer to keys with ../
143 0 : overrides.push("auth_validation_public_key_path='../auth_public_key.pem'".to_owned());
144 0 : }
145 :
146 0 : if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() {
147 0 : overrides.push(format!("ssl_ca_file='{}'", ssl_ca_file.to_str().unwrap()));
148 0 : }
149 :
150 : // Apply the user-provided overrides
151 0 : overrides.push({
152 0 : let mut doc =
153 0 : toml_edit::ser::to_document(&conf).expect("we deserialized this from toml earlier");
154 : // `id` is written out to `identity.toml` instead of `pageserver.toml`
155 0 : doc.remove("id").expect("it's part of the struct");
156 0 : doc.to_string()
157 : });
158 :
159 : // Turn `overrides` into a toml document.
160 : // TODO: above code is legacy code, it should be refactored to use toml_edit directly.
161 0 : let mut config_toml = toml_edit::DocumentMut::new();
162 0 : for fragment_str in overrides {
163 0 : let fragment = toml_edit::DocumentMut::from_str(&fragment_str)
164 0 : .expect("all fragments in `overrides` are valid toml documents, this function controls that");
165 0 : for (key, item) in fragment.iter() {
166 0 : config_toml.insert(key, item.clone());
167 0 : }
168 : }
169 0 : Ok(config_toml)
170 0 : }
171 :
172 : /// Initializes a pageserver node by creating its config with the overrides provided.
173 0 : pub fn initialize(&self, conf: NeonLocalInitPageserverConf) -> anyhow::Result<()> {
174 0 : self.pageserver_init(conf)
175 0 : .with_context(|| format!("Failed to run init for pageserver node {}", self.conf.id))
176 0 : }
177 :
178 0 : pub fn repo_path(&self) -> PathBuf {
179 0 : self.env.pageserver_data_dir(self.conf.id)
180 0 : }
181 :
182 : /// The pid file is created by the pageserver process, with its pid stored inside.
183 : /// Other pageservers cannot lock the same file and overwrite it for as long as the current
184 : /// pageserver runs. (Unless someone removes the file manually; never do that!)
185 0 : fn pid_file(&self) -> Utf8PathBuf {
186 0 : Utf8PathBuf::from_path_buf(self.repo_path().join("pageserver.pid"))
187 0 : .expect("non-Unicode path")
188 0 : }
189 :
190 0 : pub async fn start(&self, retry_timeout: &Duration) -> anyhow::Result<()> {
191 0 : self.start_node(retry_timeout).await
192 0 : }
193 :
194 0 : fn pageserver_init(&self, conf: NeonLocalInitPageserverConf) -> anyhow::Result<()> {
195 0 : let datadir = self.repo_path();
196 0 : let node_id = self.conf.id;
197 0 : println!(
198 0 : "Initializing pageserver node {} at '{}' in {:?}",
199 : node_id,
200 0 : self.pg_connection_config.raw_address(),
201 : datadir
202 : );
203 0 : io::stdout().flush()?;
204 :
205 : // If the config file we got as a CLI argument includes the `availability_zone`
206 : // config, then use that to populate the `metadata.json` file for the pageserver.
207 : // In production the deployment orchestrator does this for us.
208 0 : let az_id = conf
209 0 : .other
210 0 : .get("availability_zone")
211 0 : .map(|toml| {
212 0 : let az_str = toml.to_string();
213 : // Trim the (") chars from the toml representation
214 0 : if az_str.starts_with('"') && az_str.ends_with('"') {
215 0 : az_str[1..az_str.len() - 1].to_string()
216 : } else {
217 0 : az_str
218 : }
219 0 : })
220 0 : .unwrap_or("local".to_string());
221 :
222 0 : let config = self
223 0 : .pageserver_init_make_toml(conf)
224 0 : .context("make pageserver toml")?;
225 0 : let config_file_path = datadir.join("pageserver.toml");
226 0 : let mut config_file = std::fs::OpenOptions::new()
227 0 : .create_new(true)
228 0 : .write(true)
229 0 : .open(&config_file_path)
230 0 : .with_context(|| format!("open pageserver toml for write: {config_file_path:?}"))?;
231 0 : config_file
232 0 : .write_all(config.to_string().as_bytes())
233 0 : .context("write pageserver toml")?;
234 0 : drop(config_file);
235 :
236 0 : let identity_file_path = datadir.join("identity.toml");
237 0 : let mut identity_file = std::fs::OpenOptions::new()
238 0 : .create_new(true)
239 0 : .write(true)
240 0 : .open(identity_file_path)
241 0 : .with_context(|| format!("open identity toml for write: {config_file_path:?}"))?;
242 0 : let identity_toml = self.pageserver_make_identity_toml(node_id);
243 0 : identity_file
244 0 : .write_all(identity_toml.to_string().as_bytes())
245 0 : .context("write identity toml")?;
246 0 : drop(identity_toml);
247 :
248 0 : if self.env.generate_local_ssl_certs {
249 0 : self.env.generate_ssl_cert(
250 0 : datadir.join("server.crt").as_path(),
251 0 : datadir.join("server.key").as_path(),
252 0 : )?;
253 0 : }
254 :
255 : // TODO: invoke a TBD config-check command to validate that pageserver will start with the written config
256 :
257 : // Write metadata file, used by pageserver on startup to register itself with
258 : // the storage controller
259 0 : let metadata_path = datadir.join("metadata.json");
260 :
261 0 : let http_host = "localhost".to_string();
262 0 : let (_, http_port) =
263 0 : parse_host_port(&self.conf.listen_http_addr).expect("Unable to parse listen_http_addr");
264 0 : let http_port = http_port.unwrap_or(DEFAULT_HTTP_LISTEN_PORT);
265 :
266 0 : let https_port = match self.conf.listen_https_addr.as_ref() {
267 0 : Some(https_addr) => {
268 0 : let (_https_host, https_port) =
269 0 : parse_host_port(https_addr).expect("Unable to parse listen_https_addr");
270 0 : Some(https_port.unwrap_or(9899))
271 : }
272 0 : None => None,
273 : };
274 :
275 0 : let (mut grpc_host, mut grpc_port) = (None, None);
276 0 : if let Some(grpc_addr) = &self.conf.listen_grpc_addr {
277 0 : let (_, port) = parse_host_port(grpc_addr).expect("Unable to parse listen_grpc_addr");
278 0 : grpc_host = Some("localhost".to_string());
279 0 : grpc_port = Some(port.unwrap_or(DEFAULT_GRPC_LISTEN_PORT));
280 0 : }
281 :
282 : // Intentionally hand-craft JSON: this acts as an implicit format compat test
283 : // in case the pageserver-side structure is edited, and reflects the real life
284 : // situation: the metadata is written by some other script.
285 0 : std::fs::write(
286 0 : metadata_path,
287 0 : serde_json::to_vec(&pageserver_api::config::NodeMetadata {
288 0 : postgres_host: "localhost".to_string(),
289 0 : postgres_port: self.pg_connection_config.port(),
290 0 : grpc_host,
291 0 : grpc_port,
292 0 : http_host,
293 0 : http_port,
294 0 : https_port,
295 0 : other: HashMap::from([(
296 0 : "availability_zone_id".to_string(),
297 0 : serde_json::json!(az_id),
298 0 : )]),
299 0 : })
300 0 : .unwrap(),
301 : )
302 0 : .expect("Failed to write metadata file");
303 :
304 0 : Ok(())
305 0 : }
306 :
307 0 : async fn start_node(&self, retry_timeout: &Duration) -> anyhow::Result<()> {
308 : // TODO: using a thread here because start_process() is not async but we need to call check_status()
309 0 : let datadir = self.repo_path();
310 0 : println!(
311 0 : "Starting pageserver node {} at '{}' in {:?}, retrying for {:?}",
312 : self.conf.id,
313 0 : self.pg_connection_config.raw_address(),
314 : datadir,
315 : retry_timeout
316 : );
317 0 : io::stdout().flush().context("flush stdout")?;
318 :
319 0 : let datadir_path_str = datadir.to_str().with_context(|| {
320 0 : format!(
321 0 : "Cannot start pageserver node {} in path that has no string representation: {:?}",
322 : self.conf.id, datadir,
323 : )
324 0 : })?;
325 0 : let args = vec!["-D", datadir_path_str];
326 :
327 0 : background_process::start_process(
328 0 : "pageserver",
329 0 : &datadir,
330 0 : &self.env.pageserver_bin(),
331 0 : args,
332 0 : self.pageserver_env_variables()?,
333 0 : background_process::InitialPidFile::Expect(self.pid_file()),
334 0 : retry_timeout,
335 0 : || async {
336 0 : let st = self.check_status().await;
337 0 : match st {
338 0 : Ok(()) => Ok(true),
339 0 : Err(mgmt_api::Error::ReceiveBody(_)) => Ok(false),
340 0 : Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
341 : }
342 0 : },
343 : )
344 0 : .await?;
345 :
346 0 : Ok(())
347 0 : }
348 :
349 0 : fn pageserver_env_variables(&self) -> anyhow::Result<Vec<(String, String)>> {
350 : // FIXME: why is this tied to pageserver's auth type? Whether or not the safekeeper
351 : // needs a token, and how to generate that token, seems independent to whether
352 : // the pageserver requires a token in incoming requests.
353 0 : Ok(if self.conf.http_auth_type != AuthType::Trust {
354 : // Generate a token to connect from the pageserver to a safekeeper
355 0 : let token = self
356 0 : .env
357 0 : .generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?;
358 0 : vec![("NEON_AUTH_TOKEN".to_owned(), token)]
359 : } else {
360 0 : Vec::new()
361 : })
362 0 : }
363 :
364 : ///
365 : /// Stop the server.
366 : ///
367 : /// If 'immediate' is true, we use SIGQUIT, killing the process immediately.
368 : /// Otherwise we use SIGTERM, triggering a clean shutdown
369 : ///
370 : /// If the server is not running, returns success
371 : ///
372 0 : pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
373 0 : background_process::stop_process(immediate, "pageserver", &self.pid_file())
374 0 : }
375 :
376 0 : pub async fn check_status(&self) -> mgmt_api::Result<()> {
377 0 : self.http_client.status().await
378 0 : }
379 :
380 0 : pub async fn tenant_list(&self) -> mgmt_api::Result<Vec<TenantInfo>> {
381 0 : self.http_client.list_tenants().await
382 0 : }
383 0 : pub fn parse_config(mut settings: HashMap<&str, &str>) -> anyhow::Result<models::TenantConfig> {
384 0 : let result = models::TenantConfig {
385 0 : checkpoint_distance: settings
386 0 : .remove("checkpoint_distance")
387 0 : .map(|x| x.parse::<u64>())
388 0 : .transpose()
389 0 : .context("Failed to parse 'checkpoint_distance' as an integer")?,
390 0 : checkpoint_timeout: settings
391 0 : .remove("checkpoint_timeout")
392 0 : .map(humantime::parse_duration)
393 0 : .transpose()
394 0 : .context("Failed to parse 'checkpoint_timeout' as duration")?,
395 0 : compaction_target_size: settings
396 0 : .remove("compaction_target_size")
397 0 : .map(|x| x.parse::<u64>())
398 0 : .transpose()
399 0 : .context("Failed to parse 'compaction_target_size' as an integer")?,
400 0 : compaction_period: settings
401 0 : .remove("compaction_period")
402 0 : .map(humantime::parse_duration)
403 0 : .transpose()
404 0 : .context("Failed to parse 'compaction_period' as duration")?,
405 0 : compaction_threshold: settings
406 0 : .remove("compaction_threshold")
407 0 : .map(|x| x.parse::<usize>())
408 0 : .transpose()
409 0 : .context("Failed to parse 'compaction_threshold' as an integer")?,
410 0 : compaction_upper_limit: settings
411 0 : .remove("compaction_upper_limit")
412 0 : .map(|x| x.parse::<usize>())
413 0 : .transpose()
414 0 : .context("Failed to parse 'compaction_upper_limit' as an integer")?,
415 0 : compaction_algorithm: settings
416 0 : .remove("compaction_algorithm")
417 0 : .map(serde_json::from_str)
418 0 : .transpose()
419 0 : .context("Failed to parse 'compaction_algorithm' json")?,
420 0 : compaction_shard_ancestor: settings
421 0 : .remove("compaction_shard_ancestor")
422 0 : .map(|x| x.parse::<bool>())
423 0 : .transpose()
424 0 : .context("Failed to parse 'compaction_shard_ancestor' as a bool")?,
425 0 : compaction_l0_first: settings
426 0 : .remove("compaction_l0_first")
427 0 : .map(|x| x.parse::<bool>())
428 0 : .transpose()
429 0 : .context("Failed to parse 'compaction_l0_first' as a bool")?,
430 0 : compaction_l0_semaphore: settings
431 0 : .remove("compaction_l0_semaphore")
432 0 : .map(|x| x.parse::<bool>())
433 0 : .transpose()
434 0 : .context("Failed to parse 'compaction_l0_semaphore' as a bool")?,
435 0 : l0_flush_delay_threshold: settings
436 0 : .remove("l0_flush_delay_threshold")
437 0 : .map(|x| x.parse::<usize>())
438 0 : .transpose()
439 0 : .context("Failed to parse 'l0_flush_delay_threshold' as an integer")?,
440 0 : l0_flush_stall_threshold: settings
441 0 : .remove("l0_flush_stall_threshold")
442 0 : .map(|x| x.parse::<usize>())
443 0 : .transpose()
444 0 : .context("Failed to parse 'l0_flush_stall_threshold' as an integer")?,
445 0 : gc_horizon: settings
446 0 : .remove("gc_horizon")
447 0 : .map(|x| x.parse::<u64>())
448 0 : .transpose()
449 0 : .context("Failed to parse 'gc_horizon' as an integer")?,
450 0 : gc_period: settings.remove("gc_period")
451 0 : .map(humantime::parse_duration)
452 0 : .transpose()
453 0 : .context("Failed to parse 'gc_period' as duration")?,
454 0 : image_creation_threshold: settings
455 0 : .remove("image_creation_threshold")
456 0 : .map(|x| x.parse::<usize>())
457 0 : .transpose()
458 0 : .context("Failed to parse 'image_creation_threshold' as non zero integer")?,
459 : // HADRON
460 0 : image_layer_force_creation_period: settings
461 0 : .remove("image_layer_force_creation_period")
462 0 : .map(humantime::parse_duration)
463 0 : .transpose()
464 0 : .context("Failed to parse 'image_layer_force_creation_period' as duration")?,
465 0 : image_layer_creation_check_threshold: settings
466 0 : .remove("image_layer_creation_check_threshold")
467 0 : .map(|x| x.parse::<u8>())
468 0 : .transpose()
469 0 : .context("Failed to parse 'image_creation_check_threshold' as integer")?,
470 0 : image_creation_preempt_threshold: settings
471 0 : .remove("image_creation_preempt_threshold")
472 0 : .map(|x| x.parse::<usize>())
473 0 : .transpose()
474 0 : .context("Failed to parse 'image_creation_preempt_threshold' as integer")?,
475 0 : pitr_interval: settings.remove("pitr_interval")
476 0 : .map(humantime::parse_duration)
477 0 : .transpose()
478 0 : .context("Failed to parse 'pitr_interval' as duration")?,
479 0 : walreceiver_connect_timeout: settings
480 0 : .remove("walreceiver_connect_timeout")
481 0 : .map(humantime::parse_duration)
482 0 : .transpose()
483 0 : .context("Failed to parse 'walreceiver_connect_timeout' as duration")?,
484 0 : lagging_wal_timeout: settings
485 0 : .remove("lagging_wal_timeout")
486 0 : .map(humantime::parse_duration)
487 0 : .transpose()
488 0 : .context("Failed to parse 'lagging_wal_timeout' as duration")?,
489 0 : max_lsn_wal_lag: settings
490 0 : .remove("max_lsn_wal_lag")
491 0 : .map(|x| x.parse::<NonZeroU64>())
492 0 : .transpose()
493 0 : .context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
494 0 : eviction_policy: settings
495 0 : .remove("eviction_policy")
496 0 : .map(serde_json::from_str)
497 0 : .transpose()
498 0 : .context("Failed to parse 'eviction_policy' json")?,
499 0 : min_resident_size_override: settings
500 0 : .remove("min_resident_size_override")
501 0 : .map(|x| x.parse::<u64>())
502 0 : .transpose()
503 0 : .context("Failed to parse 'min_resident_size_override' as integer")?,
504 0 : evictions_low_residence_duration_metric_threshold: settings
505 0 : .remove("evictions_low_residence_duration_metric_threshold")
506 0 : .map(humantime::parse_duration)
507 0 : .transpose()
508 0 : .context("Failed to parse 'evictions_low_residence_duration_metric_threshold' as duration")?,
509 0 : heatmap_period: settings
510 0 : .remove("heatmap_period")
511 0 : .map(humantime::parse_duration)
512 0 : .transpose()
513 0 : .context("Failed to parse 'heatmap_period' as duration")?,
514 0 : lazy_slru_download: settings
515 0 : .remove("lazy_slru_download")
516 0 : .map(|x| x.parse::<bool>())
517 0 : .transpose()
518 0 : .context("Failed to parse 'lazy_slru_download' as bool")?,
519 0 : timeline_get_throttle: settings
520 0 : .remove("timeline_get_throttle")
521 0 : .map(serde_json::from_str)
522 0 : .transpose()
523 0 : .context("parse `timeline_get_throttle` from json")?,
524 0 : lsn_lease_length: settings.remove("lsn_lease_length")
525 0 : .map(humantime::parse_duration)
526 0 : .transpose()
527 0 : .context("Failed to parse 'lsn_lease_length' as duration")?,
528 0 : lsn_lease_length_for_ts: settings
529 0 : .remove("lsn_lease_length_for_ts")
530 0 : .map(humantime::parse_duration)
531 0 : .transpose()
532 0 : .context("Failed to parse 'lsn_lease_length_for_ts' as duration")?,
533 0 : timeline_offloading: settings
534 0 : .remove("timeline_offloading")
535 0 : .map(|x| x.parse::<bool>())
536 0 : .transpose()
537 0 : .context("Failed to parse 'timeline_offloading' as bool")?,
538 0 : rel_size_v2_enabled: settings
539 0 : .remove("rel_size_v2_enabled")
540 0 : .map(|x| x.parse::<bool>())
541 0 : .transpose()
542 0 : .context("Failed to parse 'rel_size_v2_enabled' as bool")?,
543 0 : gc_compaction_enabled: settings
544 0 : .remove("gc_compaction_enabled")
545 0 : .map(|x| x.parse::<bool>())
546 0 : .transpose()
547 0 : .context("Failed to parse 'gc_compaction_enabled' as bool")?,
548 0 : gc_compaction_verification: settings
549 0 : .remove("gc_compaction_verification")
550 0 : .map(|x| x.parse::<bool>())
551 0 : .transpose()
552 0 : .context("Failed to parse 'gc_compaction_verification' as bool")?,
553 0 : gc_compaction_initial_threshold_kb: settings
554 0 : .remove("gc_compaction_initial_threshold_kb")
555 0 : .map(|x| x.parse::<u64>())
556 0 : .transpose()
557 0 : .context("Failed to parse 'gc_compaction_initial_threshold_kb' as integer")?,
558 0 : gc_compaction_ratio_percent: settings
559 0 : .remove("gc_compaction_ratio_percent")
560 0 : .map(|x| x.parse::<u64>())
561 0 : .transpose()
562 0 : .context("Failed to parse 'gc_compaction_ratio_percent' as integer")?,
563 0 : sampling_ratio: settings
564 0 : .remove("sampling_ratio")
565 0 : .map(serde_json::from_str)
566 0 : .transpose()
567 0 : .context("Falied to parse 'sampling_ratio'")?,
568 0 : relsize_snapshot_cache_capacity: settings
569 0 : .remove("relsize snapshot cache capacity")
570 0 : .map(|x| x.parse::<usize>())
571 0 : .transpose()
572 0 : .context("Falied to parse 'relsize_snapshot_cache_capacity' as integer")?,
573 0 : basebackup_cache_enabled: settings
574 0 : .remove("basebackup_cache_enabled")
575 0 : .map(|x| x.parse::<bool>())
576 0 : .transpose()
577 0 : .context("Failed to parse 'basebackup_cache_enabled' as bool")?,
578 : };
579 0 : if !settings.is_empty() {
580 0 : bail!("Unrecognized tenant settings: {settings:?}")
581 : } else {
582 0 : Ok(result)
583 : }
584 0 : }
585 :
586 0 : pub async fn tenant_config(
587 0 : &self,
588 0 : tenant_id: TenantId,
589 0 : settings: HashMap<&str, &str>,
590 0 : ) -> anyhow::Result<()> {
591 0 : let config = Self::parse_config(settings)?;
592 0 : self.http_client
593 0 : .set_tenant_config(&models::TenantConfigRequest { tenant_id, config })
594 0 : .await?;
595 :
596 0 : Ok(())
597 0 : }
598 :
599 0 : pub async fn timeline_list(
600 0 : &self,
601 0 : tenant_shard_id: &TenantShardId,
602 0 : ) -> anyhow::Result<Vec<TimelineInfo>> {
603 0 : Ok(self.http_client.list_timelines(*tenant_shard_id).await?)
604 0 : }
605 :
606 : /// Import a basebackup prepared using either:
607 : /// a) `pg_basebackup -F tar`, or
608 : /// b) The `fullbackup` pageserver endpoint
609 : ///
610 : /// # Arguments
611 : /// * `tenant_id` - tenant to import into. Created if not exists
612 : /// * `timeline_id` - id to assign to imported timeline
613 : /// * `base` - (start lsn of basebackup, path to `base.tar` file)
614 : /// * `pg_wal` - if there's any wal to import: (end lsn, path to `pg_wal.tar`)
615 0 : pub async fn timeline_import(
616 0 : &self,
617 0 : tenant_id: TenantId,
618 0 : timeline_id: TimelineId,
619 0 : base: (Lsn, PathBuf),
620 0 : pg_wal: Option<(Lsn, PathBuf)>,
621 0 : pg_version: PgMajorVersion,
622 0 : ) -> anyhow::Result<()> {
623 : // Init base reader
624 0 : let (start_lsn, base_tarfile_path) = base;
625 0 : let base_tarfile = tokio::fs::File::open(base_tarfile_path).await?;
626 0 : let base_tarfile =
627 0 : mgmt_api::ReqwestBody::wrap_stream(tokio_util::io::ReaderStream::new(base_tarfile));
628 :
629 : // Init wal reader if necessary
630 0 : let (end_lsn, wal_reader) = if let Some((end_lsn, wal_tarfile_path)) = pg_wal {
631 0 : let wal_tarfile = tokio::fs::File::open(wal_tarfile_path).await?;
632 0 : let wal_reader =
633 0 : mgmt_api::ReqwestBody::wrap_stream(tokio_util::io::ReaderStream::new(wal_tarfile));
634 0 : (end_lsn, Some(wal_reader))
635 : } else {
636 0 : (start_lsn, None)
637 : };
638 :
639 : // Import base
640 0 : self.http_client
641 0 : .import_basebackup(
642 0 : tenant_id,
643 0 : timeline_id,
644 0 : start_lsn,
645 0 : end_lsn,
646 0 : pg_version,
647 0 : base_tarfile,
648 0 : )
649 0 : .await?;
650 :
651 : // Import wal if necessary
652 0 : if let Some(wal_reader) = wal_reader {
653 0 : self.http_client
654 0 : .import_wal(tenant_id, timeline_id, start_lsn, end_lsn, wal_reader)
655 0 : .await?;
656 0 : }
657 :
658 0 : Ok(())
659 0 : }
660 0 : pub async fn timeline_info(
661 0 : &self,
662 0 : tenant_shard_id: TenantShardId,
663 0 : timeline_id: TimelineId,
664 0 : force_await_logical_size: mgmt_api::ForceAwaitLogicalSize,
665 0 : ) -> anyhow::Result<TimelineInfo> {
666 0 : let timeline_info = self
667 0 : .http_client
668 0 : .timeline_info(tenant_shard_id, timeline_id, force_await_logical_size)
669 0 : .await?;
670 0 : Ok(timeline_info)
671 0 : }
672 : }
|