Line data Source code
1 : //! Code to manage pageservers
2 : //!
3 : //! In the local test environment, the pageserver stores its data directly in
4 : //!
5 : //! .neon/
6 : //!
7 : use std::borrow::Cow;
8 : use std::collections::HashMap;
9 :
10 : use std::io;
11 : use std::io::Write;
12 : use std::num::NonZeroU64;
13 : use std::path::PathBuf;
14 : use std::process::Command;
15 : use std::time::Duration;
16 :
17 : use anyhow::{bail, Context};
18 : use camino::Utf8PathBuf;
19 : use futures::SinkExt;
20 : use pageserver_api::models::{
21 : self, LocationConfig, ShardParameters, TenantHistorySize, TenantInfo, TimelineInfo,
22 : };
23 : use pageserver_api::shard::TenantShardId;
24 : use pageserver_client::mgmt_api;
25 : use postgres_backend::AuthType;
26 : use postgres_connection::{parse_host_port, PgConnectionConfig};
27 : use utils::auth::{Claims, Scope};
28 : use utils::{
29 : id::{TenantId, TimelineId},
30 : lsn::Lsn,
31 : };
32 :
33 : use crate::local_env::PageServerConf;
34 : use crate::{background_process, local_env::LocalEnv};
35 :
36 : /// Directory within .neon which will be used by default for LocalFs remote storage.
37 : pub const PAGESERVER_REMOTE_STORAGE_DIR: &str = "local_fs_remote_storage/pageserver";
38 :
39 : //
40 : // Control routines for pageserver.
41 : //
42 : // Used in CLI and tests.
43 : //
44 : #[derive(Debug)]
45 : pub struct PageServerNode {
46 : pub pg_connection_config: PgConnectionConfig,
47 : pub conf: PageServerConf,
48 : pub env: LocalEnv,
49 : pub http_client: mgmt_api::Client,
50 : }
51 :
52 : impl PageServerNode {
53 0 : pub fn from_env(env: &LocalEnv, conf: &PageServerConf) -> PageServerNode {
54 0 : let (host, port) =
55 0 : parse_host_port(&conf.listen_pg_addr).expect("Unable to parse listen_pg_addr");
56 0 : let port = port.unwrap_or(5432);
57 0 : Self {
58 0 : pg_connection_config: PgConnectionConfig::new_host_port(host, port),
59 0 : conf: conf.clone(),
60 0 : env: env.clone(),
61 0 : http_client: mgmt_api::Client::new(
62 0 : format!("http://{}", conf.listen_http_addr),
63 0 : {
64 0 : match conf.http_auth_type {
65 0 : AuthType::Trust => None,
66 0 : AuthType::NeonJWT => Some(
67 0 : env.generate_auth_token(&Claims::new(None, Scope::PageServerApi))
68 0 : .unwrap(),
69 0 : ),
70 : }
71 : }
72 0 : .as_deref(),
73 0 : ),
74 0 : }
75 0 : }
76 :
77 : /// Merge overrides provided by the user on the command line with our default overides derived from neon_local configuration.
78 : ///
79 : /// These all end up on the command line of the `pageserver` binary.
80 0 : fn neon_local_overrides(&self, cli_overrides: &[&str]) -> Vec<String> {
81 0 : // FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
82 0 : let pg_distrib_dir_param = format!(
83 0 : "pg_distrib_dir='{}'",
84 0 : self.env.pg_distrib_dir_raw().display()
85 0 : );
86 0 :
87 0 : let PageServerConf {
88 0 : id,
89 0 : listen_pg_addr,
90 0 : listen_http_addr,
91 0 : pg_auth_type,
92 0 : http_auth_type,
93 0 : virtual_file_io_engine,
94 0 : get_vectored_impl,
95 0 : } = &self.conf;
96 0 :
97 0 : let id = format!("id={}", id);
98 0 :
99 0 : let http_auth_type_param = format!("http_auth_type='{}'", http_auth_type);
100 0 : let listen_http_addr_param = format!("listen_http_addr='{}'", listen_http_addr);
101 0 :
102 0 : let pg_auth_type_param = format!("pg_auth_type='{}'", pg_auth_type);
103 0 : let listen_pg_addr_param = format!("listen_pg_addr='{}'", listen_pg_addr);
104 0 : let virtual_file_io_engine = if let Some(virtual_file_io_engine) = virtual_file_io_engine {
105 0 : format!("virtual_file_io_engine='{virtual_file_io_engine}'")
106 : } else {
107 0 : String::new()
108 : };
109 0 : let get_vectored_impl = if let Some(get_vectored_impl) = get_vectored_impl {
110 0 : format!("get_vectored_impl='{get_vectored_impl}'")
111 : } else {
112 0 : String::new()
113 : };
114 :
115 0 : let broker_endpoint_param = format!("broker_endpoint='{}'", self.env.broker.client_url());
116 0 :
117 0 : let mut overrides = vec![
118 0 : id,
119 0 : pg_distrib_dir_param,
120 0 : http_auth_type_param,
121 0 : pg_auth_type_param,
122 0 : listen_http_addr_param,
123 0 : listen_pg_addr_param,
124 0 : broker_endpoint_param,
125 0 : virtual_file_io_engine,
126 0 : get_vectored_impl,
127 0 : ];
128 :
129 0 : if let Some(control_plane_api) = &self.env.control_plane_api {
130 0 : overrides.push(format!(
131 0 : "control_plane_api='{}'",
132 0 : control_plane_api.as_str()
133 0 : ));
134 :
135 : // Storage controller uses the same auth as pageserver: if JWT is enabled
136 : // for us, we will also need it to talk to them.
137 0 : if matches!(http_auth_type, AuthType::NeonJWT) {
138 0 : let jwt_token = self
139 0 : .env
140 0 : .generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
141 0 : .unwrap();
142 0 : overrides.push(format!("control_plane_api_token='{}'", jwt_token));
143 0 : }
144 0 : }
145 :
146 0 : if !cli_overrides
147 0 : .iter()
148 0 : .any(|c| c.starts_with("remote_storage"))
149 0 : {
150 0 : overrides.push(format!(
151 0 : "remote_storage={{local_path='../{PAGESERVER_REMOTE_STORAGE_DIR}'}}"
152 0 : ));
153 0 : }
154 :
155 0 : if *http_auth_type != AuthType::Trust || *pg_auth_type != AuthType::Trust {
156 0 : // Keys are generated in the toplevel repo dir, pageservers' workdirs
157 0 : // are one level below that, so refer to keys with ../
158 0 : overrides.push("auth_validation_public_key_path='../auth_public_key.pem'".to_owned());
159 0 : }
160 :
161 : // Apply the user-provided overrides
162 0 : overrides.extend(cli_overrides.iter().map(|&c| c.to_owned()));
163 0 :
164 0 : overrides
165 0 : }
166 :
167 : /// Initializes a pageserver node by creating its config with the overrides provided.
168 0 : pub fn initialize(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
169 0 : // First, run `pageserver --init` and wait for it to write a config into FS and exit.
170 0 : self.pageserver_init(config_overrides)
171 0 : .with_context(|| format!("Failed to run init for pageserver node {}", self.conf.id))
172 0 : }
173 :
174 0 : pub fn repo_path(&self) -> PathBuf {
175 0 : self.env.pageserver_data_dir(self.conf.id)
176 0 : }
177 :
178 : /// The pid file is created by the pageserver process, with its pid stored inside.
179 : /// Other pageservers cannot lock the same file and overwrite it for as long as the current
180 : /// pageserver runs. (Unless someone removes the file manually; never do that!)
181 0 : fn pid_file(&self) -> Utf8PathBuf {
182 0 : Utf8PathBuf::from_path_buf(self.repo_path().join("pageserver.pid"))
183 0 : .expect("non-Unicode path")
184 0 : }
185 :
186 0 : pub async fn start(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
187 0 : self.start_node(config_overrides, false).await
188 0 : }
189 :
190 0 : fn pageserver_init(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
191 0 : let datadir = self.repo_path();
192 0 : let node_id = self.conf.id;
193 0 : println!(
194 0 : "Initializing pageserver node {} at '{}' in {:?}",
195 0 : node_id,
196 0 : self.pg_connection_config.raw_address(),
197 0 : datadir
198 0 : );
199 0 : io::stdout().flush()?;
200 :
201 0 : if !datadir.exists() {
202 0 : std::fs::create_dir(&datadir)?;
203 0 : }
204 :
205 0 : let datadir_path_str = datadir.to_str().with_context(|| {
206 0 : format!("Cannot start pageserver node {node_id} in path that has no string representation: {datadir:?}")
207 0 : })?;
208 0 : let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
209 0 : args.push(Cow::Borrowed("--init"));
210 :
211 0 : let init_output = Command::new(self.env.pageserver_bin())
212 0 : .args(args.iter().map(Cow::as_ref))
213 0 : .envs(self.pageserver_env_variables()?)
214 0 : .output()
215 0 : .with_context(|| format!("Failed to run pageserver init for node {node_id}"))?;
216 :
217 0 : anyhow::ensure!(
218 0 : init_output.status.success(),
219 0 : "Pageserver init for node {} did not finish successfully, stdout: {}, stderr: {}",
220 0 : node_id,
221 0 : String::from_utf8_lossy(&init_output.stdout),
222 0 : String::from_utf8_lossy(&init_output.stderr),
223 : );
224 :
225 : // Write metadata file, used by pageserver on startup to register itself with
226 : // the storage controller
227 0 : let metadata_path = datadir.join("metadata.json");
228 0 :
229 0 : let (_http_host, http_port) =
230 0 : parse_host_port(&self.conf.listen_http_addr).expect("Unable to parse listen_http_addr");
231 0 : let http_port = http_port.unwrap_or(9898);
232 0 : // Intentionally hand-craft JSON: this acts as an implicit format compat test
233 0 : // in case the pageserver-side structure is edited, and reflects the real life
234 0 : // situation: the metadata is written by some other script.
235 0 : std::fs::write(
236 0 : metadata_path,
237 0 : serde_json::to_vec(&serde_json::json!({
238 0 : "host": "localhost",
239 0 : "port": self.pg_connection_config.port(),
240 0 : "http_host": "localhost",
241 0 : "http_port": http_port,
242 0 : }))
243 0 : .unwrap(),
244 0 : )
245 0 : .expect("Failed to write metadata file");
246 0 :
247 0 : Ok(())
248 0 : }
249 :
250 0 : async fn start_node(
251 0 : &self,
252 0 : config_overrides: &[&str],
253 0 : update_config: bool,
254 0 : ) -> anyhow::Result<()> {
255 0 : // TODO: using a thread here because start_process() is not async but we need to call check_status()
256 0 : let datadir = self.repo_path();
257 0 : print!(
258 0 : "Starting pageserver node {} at '{}' in {:?}",
259 0 : self.conf.id,
260 0 : self.pg_connection_config.raw_address(),
261 0 : datadir
262 0 : );
263 0 : io::stdout().flush().context("flush stdout")?;
264 :
265 0 : let datadir_path_str = datadir.to_str().with_context(|| {
266 0 : format!(
267 0 : "Cannot start pageserver node {} in path that has no string representation: {:?}",
268 0 : self.conf.id, datadir,
269 0 : )
270 0 : })?;
271 0 : let mut args = self.pageserver_basic_args(config_overrides, datadir_path_str);
272 0 : if update_config {
273 0 : args.push(Cow::Borrowed("--update-config"));
274 0 : }
275 : background_process::start_process(
276 0 : "pageserver",
277 0 : &datadir,
278 0 : &self.env.pageserver_bin(),
279 0 : args.iter().map(Cow::as_ref),
280 0 : self.pageserver_env_variables()?,
281 0 : background_process::InitialPidFile::Expect(self.pid_file()),
282 0 : || async {
283 0 : let st = self.check_status().await;
284 0 : match st {
285 0 : Ok(()) => Ok(true),
286 0 : Err(mgmt_api::Error::ReceiveBody(_)) => Ok(false),
287 0 : Err(e) => Err(anyhow::anyhow!("Failed to check node status: {e}")),
288 : }
289 0 : },
290 : )
291 0 : .await?;
292 :
293 0 : Ok(())
294 0 : }
295 :
296 0 : fn pageserver_basic_args<'a>(
297 0 : &self,
298 0 : config_overrides: &'a [&'a str],
299 0 : datadir_path_str: &'a str,
300 0 : ) -> Vec<Cow<'a, str>> {
301 0 : let mut args = vec![Cow::Borrowed("-D"), Cow::Borrowed(datadir_path_str)];
302 0 :
303 0 : let overrides = self.neon_local_overrides(config_overrides);
304 0 : for config_override in overrides {
305 0 : args.push(Cow::Borrowed("-c"));
306 0 : args.push(Cow::Owned(config_override));
307 0 : }
308 :
309 0 : args
310 0 : }
311 :
312 0 : fn pageserver_env_variables(&self) -> anyhow::Result<Vec<(String, String)>> {
313 0 : // FIXME: why is this tied to pageserver's auth type? Whether or not the safekeeper
314 0 : // needs a token, and how to generate that token, seems independent to whether
315 0 : // the pageserver requires a token in incoming requests.
316 0 : Ok(if self.conf.http_auth_type != AuthType::Trust {
317 : // Generate a token to connect from the pageserver to a safekeeper
318 0 : let token = self
319 0 : .env
320 0 : .generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?;
321 0 : vec![("NEON_AUTH_TOKEN".to_owned(), token)]
322 : } else {
323 0 : Vec::new()
324 : })
325 0 : }
326 :
327 : ///
328 : /// Stop the server.
329 : ///
330 : /// If 'immediate' is true, we use SIGQUIT, killing the process immediately.
331 : /// Otherwise we use SIGTERM, triggering a clean shutdown
332 : ///
333 : /// If the server is not running, returns success
334 : ///
335 0 : pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
336 0 : background_process::stop_process(immediate, "pageserver", &self.pid_file())
337 0 : }
338 :
339 0 : pub async fn page_server_psql_client(
340 0 : &self,
341 0 : ) -> anyhow::Result<(
342 0 : tokio_postgres::Client,
343 0 : tokio_postgres::Connection<tokio_postgres::Socket, tokio_postgres::tls::NoTlsStream>,
344 0 : )> {
345 0 : let mut config = self.pg_connection_config.clone();
346 0 : if self.conf.pg_auth_type == AuthType::NeonJWT {
347 0 : let token = self
348 0 : .env
349 0 : .generate_auth_token(&Claims::new(None, Scope::PageServerApi))?;
350 0 : config = config.set_password(Some(token));
351 0 : }
352 0 : Ok(config.connect_no_tls().await?)
353 0 : }
354 :
355 0 : pub async fn check_status(&self) -> mgmt_api::Result<()> {
356 0 : self.http_client.status().await
357 0 : }
358 :
359 0 : pub async fn tenant_list(&self) -> mgmt_api::Result<Vec<TenantInfo>> {
360 0 : self.http_client.list_tenants().await
361 0 : }
362 0 : pub fn parse_config(mut settings: HashMap<&str, &str>) -> anyhow::Result<models::TenantConfig> {
363 0 : let result = models::TenantConfig {
364 0 : checkpoint_distance: settings
365 0 : .remove("checkpoint_distance")
366 0 : .map(|x| x.parse::<u64>())
367 0 : .transpose()?,
368 0 : checkpoint_timeout: settings.remove("checkpoint_timeout").map(|x| x.to_string()),
369 0 : compaction_target_size: settings
370 0 : .remove("compaction_target_size")
371 0 : .map(|x| x.parse::<u64>())
372 0 : .transpose()?,
373 0 : compaction_period: settings.remove("compaction_period").map(|x| x.to_string()),
374 0 : compaction_threshold: settings
375 0 : .remove("compaction_threshold")
376 0 : .map(|x| x.parse::<usize>())
377 0 : .transpose()?,
378 0 : compaction_algorithm: settings
379 0 : .remove("compaction_algorithm")
380 0 : .map(serde_json::from_str)
381 0 : .transpose()
382 0 : .context("Failed to parse 'compaction_algorithm' json")?,
383 0 : gc_horizon: settings
384 0 : .remove("gc_horizon")
385 0 : .map(|x| x.parse::<u64>())
386 0 : .transpose()?,
387 0 : gc_period: settings.remove("gc_period").map(|x| x.to_string()),
388 0 : image_creation_threshold: settings
389 0 : .remove("image_creation_threshold")
390 0 : .map(|x| x.parse::<usize>())
391 0 : .transpose()?,
392 0 : image_layer_creation_check_threshold: settings
393 0 : .remove("image_layer_creation_check_threshold")
394 0 : .map(|x| x.parse::<u8>())
395 0 : .transpose()?,
396 0 : pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
397 0 : walreceiver_connect_timeout: settings
398 0 : .remove("walreceiver_connect_timeout")
399 0 : .map(|x| x.to_string()),
400 0 : lagging_wal_timeout: settings
401 0 : .remove("lagging_wal_timeout")
402 0 : .map(|x| x.to_string()),
403 0 : max_lsn_wal_lag: settings
404 0 : .remove("max_lsn_wal_lag")
405 0 : .map(|x| x.parse::<NonZeroU64>())
406 0 : .transpose()
407 0 : .context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
408 0 : trace_read_requests: settings
409 0 : .remove("trace_read_requests")
410 0 : .map(|x| x.parse::<bool>())
411 0 : .transpose()
412 0 : .context("Failed to parse 'trace_read_requests' as bool")?,
413 0 : eviction_policy: settings
414 0 : .remove("eviction_policy")
415 0 : .map(serde_json::from_str)
416 0 : .transpose()
417 0 : .context("Failed to parse 'eviction_policy' json")?,
418 0 : min_resident_size_override: settings
419 0 : .remove("min_resident_size_override")
420 0 : .map(|x| x.parse::<u64>())
421 0 : .transpose()
422 0 : .context("Failed to parse 'min_resident_size_override' as integer")?,
423 0 : evictions_low_residence_duration_metric_threshold: settings
424 0 : .remove("evictions_low_residence_duration_metric_threshold")
425 0 : .map(|x| x.to_string()),
426 0 : heatmap_period: settings.remove("heatmap_period").map(|x| x.to_string()),
427 0 : lazy_slru_download: settings
428 0 : .remove("lazy_slru_download")
429 0 : .map(|x| x.parse::<bool>())
430 0 : .transpose()
431 0 : .context("Failed to parse 'lazy_slru_download' as bool")?,
432 0 : timeline_get_throttle: settings
433 0 : .remove("timeline_get_throttle")
434 0 : .map(serde_json::from_str)
435 0 : .transpose()
436 0 : .context("parse `timeline_get_throttle` from json")?,
437 : };
438 0 : if !settings.is_empty() {
439 0 : bail!("Unrecognized tenant settings: {settings:?}")
440 : } else {
441 0 : Ok(result)
442 : }
443 0 : }
444 :
445 0 : pub async fn tenant_create(
446 0 : &self,
447 0 : new_tenant_id: TenantId,
448 0 : generation: Option<u32>,
449 0 : settings: HashMap<&str, &str>,
450 0 : ) -> anyhow::Result<TenantId> {
451 0 : let config = Self::parse_config(settings.clone())?;
452 :
453 0 : let request = models::TenantCreateRequest {
454 0 : new_tenant_id: TenantShardId::unsharded(new_tenant_id),
455 0 : generation,
456 0 : config,
457 0 : shard_parameters: ShardParameters::default(),
458 0 : // Placement policy is not meaningful for creations not done via storage controller
459 0 : placement_policy: None,
460 0 : };
461 0 : if !settings.is_empty() {
462 0 : bail!("Unrecognized tenant settings: {settings:?}")
463 0 : }
464 0 : Ok(self.http_client.tenant_create(&request).await?)
465 0 : }
466 :
467 0 : pub async fn tenant_config(
468 0 : &self,
469 0 : tenant_id: TenantId,
470 0 : mut settings: HashMap<&str, &str>,
471 0 : ) -> anyhow::Result<()> {
472 0 : let config = {
473 : // Braces to make the diff easier to read
474 : models::TenantConfig {
475 0 : checkpoint_distance: settings
476 0 : .remove("checkpoint_distance")
477 0 : .map(|x| x.parse::<u64>())
478 0 : .transpose()
479 0 : .context("Failed to parse 'checkpoint_distance' as an integer")?,
480 0 : checkpoint_timeout: settings.remove("checkpoint_timeout").map(|x| x.to_string()),
481 0 : compaction_target_size: settings
482 0 : .remove("compaction_target_size")
483 0 : .map(|x| x.parse::<u64>())
484 0 : .transpose()
485 0 : .context("Failed to parse 'compaction_target_size' as an integer")?,
486 0 : compaction_period: settings.remove("compaction_period").map(|x| x.to_string()),
487 0 : compaction_threshold: settings
488 0 : .remove("compaction_threshold")
489 0 : .map(|x| x.parse::<usize>())
490 0 : .transpose()
491 0 : .context("Failed to parse 'compaction_threshold' as an integer")?,
492 0 : compaction_algorithm: settings
493 0 : .remove("compactin_algorithm")
494 0 : .map(serde_json::from_str)
495 0 : .transpose()
496 0 : .context("Failed to parse 'compaction_algorithm' json")?,
497 0 : gc_horizon: settings
498 0 : .remove("gc_horizon")
499 0 : .map(|x| x.parse::<u64>())
500 0 : .transpose()
501 0 : .context("Failed to parse 'gc_horizon' as an integer")?,
502 0 : gc_period: settings.remove("gc_period").map(|x| x.to_string()),
503 0 : image_creation_threshold: settings
504 0 : .remove("image_creation_threshold")
505 0 : .map(|x| x.parse::<usize>())
506 0 : .transpose()
507 0 : .context("Failed to parse 'image_creation_threshold' as non zero integer")?,
508 0 : image_layer_creation_check_threshold: settings
509 0 : .remove("image_layer_creation_check_threshold")
510 0 : .map(|x| x.parse::<u8>())
511 0 : .transpose()
512 0 : .context("Failed to parse 'image_creation_check_threshold' as integer")?,
513 :
514 0 : pitr_interval: settings.remove("pitr_interval").map(|x| x.to_string()),
515 0 : walreceiver_connect_timeout: settings
516 0 : .remove("walreceiver_connect_timeout")
517 0 : .map(|x| x.to_string()),
518 0 : lagging_wal_timeout: settings
519 0 : .remove("lagging_wal_timeout")
520 0 : .map(|x| x.to_string()),
521 0 : max_lsn_wal_lag: settings
522 0 : .remove("max_lsn_wal_lag")
523 0 : .map(|x| x.parse::<NonZeroU64>())
524 0 : .transpose()
525 0 : .context("Failed to parse 'max_lsn_wal_lag' as non zero integer")?,
526 0 : trace_read_requests: settings
527 0 : .remove("trace_read_requests")
528 0 : .map(|x| x.parse::<bool>())
529 0 : .transpose()
530 0 : .context("Failed to parse 'trace_read_requests' as bool")?,
531 0 : eviction_policy: settings
532 0 : .remove("eviction_policy")
533 0 : .map(serde_json::from_str)
534 0 : .transpose()
535 0 : .context("Failed to parse 'eviction_policy' json")?,
536 0 : min_resident_size_override: settings
537 0 : .remove("min_resident_size_override")
538 0 : .map(|x| x.parse::<u64>())
539 0 : .transpose()
540 0 : .context("Failed to parse 'min_resident_size_override' as an integer")?,
541 0 : evictions_low_residence_duration_metric_threshold: settings
542 0 : .remove("evictions_low_residence_duration_metric_threshold")
543 0 : .map(|x| x.to_string()),
544 0 : heatmap_period: settings.remove("heatmap_period").map(|x| x.to_string()),
545 0 : lazy_slru_download: settings
546 0 : .remove("lazy_slru_download")
547 0 : .map(|x| x.parse::<bool>())
548 0 : .transpose()
549 0 : .context("Failed to parse 'lazy_slru_download' as bool")?,
550 0 : timeline_get_throttle: settings
551 0 : .remove("timeline_get_throttle")
552 0 : .map(serde_json::from_str)
553 0 : .transpose()
554 0 : .context("parse `timeline_get_throttle` from json")?,
555 : }
556 : };
557 :
558 0 : if !settings.is_empty() {
559 0 : bail!("Unrecognized tenant settings: {settings:?}")
560 0 : }
561 0 :
562 0 : self.http_client
563 0 : .tenant_config(&models::TenantConfigRequest { tenant_id, config })
564 0 : .await?;
565 :
566 0 : Ok(())
567 0 : }
568 :
569 0 : pub async fn location_config(
570 0 : &self,
571 0 : tenant_shard_id: TenantShardId,
572 0 : config: LocationConfig,
573 0 : flush_ms: Option<Duration>,
574 0 : lazy: bool,
575 0 : ) -> anyhow::Result<()> {
576 0 : Ok(self
577 0 : .http_client
578 0 : .location_config(tenant_shard_id, config, flush_ms, lazy)
579 0 : .await?)
580 0 : }
581 :
582 0 : pub async fn timeline_list(
583 0 : &self,
584 0 : tenant_shard_id: &TenantShardId,
585 0 : ) -> anyhow::Result<Vec<TimelineInfo>> {
586 0 : Ok(self.http_client.list_timelines(*tenant_shard_id).await?)
587 0 : }
588 :
589 0 : pub async fn timeline_create(
590 0 : &self,
591 0 : tenant_shard_id: TenantShardId,
592 0 : new_timeline_id: TimelineId,
593 0 : ancestor_start_lsn: Option<Lsn>,
594 0 : ancestor_timeline_id: Option<TimelineId>,
595 0 : pg_version: Option<u32>,
596 0 : existing_initdb_timeline_id: Option<TimelineId>,
597 0 : ) -> anyhow::Result<TimelineInfo> {
598 0 : let req = models::TimelineCreateRequest {
599 0 : new_timeline_id,
600 0 : ancestor_start_lsn,
601 0 : ancestor_timeline_id,
602 0 : pg_version,
603 0 : existing_initdb_timeline_id,
604 0 : };
605 0 : Ok(self
606 0 : .http_client
607 0 : .timeline_create(tenant_shard_id, &req)
608 0 : .await?)
609 0 : }
610 :
611 : /// Import a basebackup prepared using either:
612 : /// a) `pg_basebackup -F tar`, or
613 : /// b) The `fullbackup` pageserver endpoint
614 : ///
615 : /// # Arguments
616 : /// * `tenant_id` - tenant to import into. Created if not exists
617 : /// * `timeline_id` - id to assign to imported timeline
618 : /// * `base` - (start lsn of basebackup, path to `base.tar` file)
619 : /// * `pg_wal` - if there's any wal to import: (end lsn, path to `pg_wal.tar`)
620 0 : pub async fn timeline_import(
621 0 : &self,
622 0 : tenant_id: TenantId,
623 0 : timeline_id: TimelineId,
624 0 : base: (Lsn, PathBuf),
625 0 : pg_wal: Option<(Lsn, PathBuf)>,
626 0 : pg_version: u32,
627 0 : ) -> anyhow::Result<()> {
628 0 : let (client, conn) = self.page_server_psql_client().await?;
629 : // The connection object performs the actual communication with the database,
630 : // so spawn it off to run on its own.
631 0 : tokio::spawn(async move {
632 0 : if let Err(e) = conn.await {
633 0 : eprintln!("connection error: {}", e);
634 0 : }
635 0 : });
636 0 : let client = std::pin::pin!(client);
637 0 :
638 0 : // Init base reader
639 0 : let (start_lsn, base_tarfile_path) = base;
640 0 : let base_tarfile = tokio::fs::File::open(base_tarfile_path).await?;
641 0 : let base_tarfile = tokio_util::io::ReaderStream::new(base_tarfile);
642 :
643 : // Init wal reader if necessary
644 0 : let (end_lsn, wal_reader) = if let Some((end_lsn, wal_tarfile_path)) = pg_wal {
645 0 : let wal_tarfile = tokio::fs::File::open(wal_tarfile_path).await?;
646 0 : let wal_reader = tokio_util::io::ReaderStream::new(wal_tarfile);
647 0 : (end_lsn, Some(wal_reader))
648 : } else {
649 0 : (start_lsn, None)
650 : };
651 :
652 0 : let copy_in = |reader, cmd| {
653 0 : let client = &client;
654 0 : async move {
655 0 : let writer = client.copy_in(&cmd).await?;
656 0 : let writer = std::pin::pin!(writer);
657 0 : let mut writer = writer.sink_map_err(|e| {
658 0 : std::io::Error::new(std::io::ErrorKind::Other, format!("{e}"))
659 0 : });
660 0 : let mut reader = std::pin::pin!(reader);
661 0 : writer.send_all(&mut reader).await?;
662 0 : writer.into_inner().finish().await?;
663 0 : anyhow::Ok(())
664 0 : }
665 0 : };
666 :
667 : // Import base
668 0 : copy_in(
669 0 : base_tarfile,
670 0 : format!(
671 0 : "import basebackup {tenant_id} {timeline_id} {start_lsn} {end_lsn} {pg_version}"
672 0 : ),
673 0 : )
674 0 : .await?;
675 : // Import wal if necessary
676 0 : if let Some(wal_reader) = wal_reader {
677 0 : copy_in(
678 0 : wal_reader,
679 0 : format!("import wal {tenant_id} {timeline_id} {start_lsn} {end_lsn}"),
680 0 : )
681 0 : .await?;
682 0 : }
683 :
684 0 : Ok(())
685 0 : }
686 :
687 0 : pub async fn tenant_synthetic_size(
688 0 : &self,
689 0 : tenant_shard_id: TenantShardId,
690 0 : ) -> anyhow::Result<TenantHistorySize> {
691 0 : Ok(self
692 0 : .http_client
693 0 : .tenant_synthetic_size(tenant_shard_id)
694 0 : .await?)
695 0 : }
696 : }
|