Line data Source code
1 : use std::ffi::OsStr;
2 : use std::fs;
3 : use std::path::PathBuf;
4 : use std::process::ExitStatus;
5 : use std::str::FromStr;
6 : use std::sync::OnceLock;
7 : use std::time::{Duration, Instant};
8 :
9 : use camino::{Utf8Path, Utf8PathBuf};
10 : use hyper0::Uri;
11 : use nix::unistd::Pid;
12 : use pageserver_api::controller_api::{
13 : NodeConfigureRequest, NodeDescribeResponse, NodeRegisterRequest, TenantCreateRequest,
14 : TenantCreateResponse, TenantLocateResponse,
15 : };
16 : use pageserver_api::models::{
17 : TenantConfig, TenantConfigRequest, TimelineCreateRequest, TimelineInfo,
18 : };
19 : use pageserver_api::shard::TenantShardId;
20 : use pageserver_client::mgmt_api::ResponseErrorMessageExt;
21 : use pem::Pem;
22 : use postgres_backend::AuthType;
23 : use reqwest::{Certificate, Method};
24 : use serde::de::DeserializeOwned;
25 : use serde::{Deserialize, Serialize};
26 : use tokio::process::Command;
27 : use tracing::instrument;
28 : use url::Url;
29 : use utils::auth::{Claims, Scope, encode_from_key_file};
30 : use utils::id::{NodeId, TenantId};
31 : use whoami::username;
32 :
33 : use crate::background_process;
34 : use crate::local_env::{LocalEnv, NeonStorageControllerConf};
35 :
36 : pub struct StorageController {
37 : env: LocalEnv,
38 : private_key: Option<Pem>,
39 : public_key: Option<Pem>,
40 : client: reqwest::Client,
41 : config: NeonStorageControllerConf,
42 :
43 : // The listen port is learned when starting the storage controller,
44 : // hence the use of OnceLock to init it at the right time.
45 : listen_port: OnceLock<u16>,
46 : }
47 :
48 : const COMMAND: &str = "storage_controller";
49 :
50 : const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
51 :
52 : const DB_NAME: &str = "storage_controller";
53 :
54 : pub struct NeonStorageControllerStartArgs {
55 : pub instance_id: u8,
56 : pub base_port: Option<u16>,
57 : pub start_timeout: humantime::Duration,
58 : }
59 :
60 : impl NeonStorageControllerStartArgs {
61 0 : pub fn with_default_instance_id(start_timeout: humantime::Duration) -> Self {
62 0 : Self {
63 0 : instance_id: 1,
64 0 : base_port: None,
65 0 : start_timeout,
66 0 : }
67 0 : }
68 : }
69 :
70 : pub struct NeonStorageControllerStopArgs {
71 : pub instance_id: u8,
72 : pub immediate: bool,
73 : }
74 :
75 : impl NeonStorageControllerStopArgs {
76 0 : pub fn with_default_instance_id(immediate: bool) -> Self {
77 0 : Self {
78 0 : instance_id: 1,
79 0 : immediate,
80 0 : }
81 0 : }
82 : }
83 :
84 0 : #[derive(Serialize, Deserialize)]
85 : pub struct AttachHookRequest {
86 : pub tenant_shard_id: TenantShardId,
87 : pub node_id: Option<NodeId>,
88 : pub generation_override: Option<i32>, // only new tenants
89 : pub config: Option<TenantConfig>, // only new tenants
90 : }
91 :
92 0 : #[derive(Serialize, Deserialize)]
93 : pub struct AttachHookResponse {
94 : #[serde(rename = "gen")]
95 : pub generation: Option<u32>,
96 : }
97 :
98 0 : #[derive(Serialize, Deserialize)]
99 : pub struct InspectRequest {
100 : pub tenant_shard_id: TenantShardId,
101 : }
102 :
103 0 : #[derive(Serialize, Deserialize)]
104 : pub struct InspectResponse {
105 : pub attachment: Option<(u32, NodeId)>,
106 : }
107 :
108 : impl StorageController {
109 0 : pub fn from_env(env: &LocalEnv) -> Self {
110 0 : // Assume all pageservers have symmetric auth configuration: this service
111 0 : // expects to use one JWT token to talk to all of them.
112 0 : let ps_conf = env
113 0 : .pageservers
114 0 : .first()
115 0 : .expect("Config is validated to contain at least one pageserver");
116 0 : let (private_key, public_key) = match ps_conf.http_auth_type {
117 0 : AuthType::Trust => (None, None),
118 : AuthType::NeonJWT => {
119 0 : let private_key_path = env.get_private_key_path();
120 0 : let private_key =
121 0 : pem::parse(fs::read(private_key_path).expect("failed to read private key"))
122 0 : .expect("failed to parse PEM file");
123 0 :
124 0 : // If pageserver auth is enabled, this implicitly enables auth for this service,
125 0 : // using the same credentials.
126 0 : let public_key_path =
127 0 : camino::Utf8PathBuf::try_from(env.base_data_dir.join("auth_public_key.pem"))
128 0 : .unwrap();
129 :
130 : // This service takes keys as a string rather than as a path to a file/dir: read the key into memory.
131 0 : let public_key = if std::fs::metadata(&public_key_path)
132 0 : .expect("Can't stat public key")
133 0 : .is_dir()
134 : {
135 : // Our config may specify a directory: this is for the pageserver's ability to handle multiple
136 : // keys. We only use one key at a time, so, arbitrarily load the first one in the directory.
137 0 : let mut dir =
138 0 : std::fs::read_dir(&public_key_path).expect("Can't readdir public key path");
139 0 : let dent = dir
140 0 : .next()
141 0 : .expect("Empty key dir")
142 0 : .expect("Error reading key dir");
143 0 :
144 0 : pem::parse(std::fs::read_to_string(dent.path()).expect("Can't read public key"))
145 0 : .expect("Failed to parse PEM file")
146 : } else {
147 0 : pem::parse(
148 0 : std::fs::read_to_string(&public_key_path).expect("Can't read public key"),
149 0 : )
150 0 : .expect("Failed to parse PEM file")
151 : };
152 0 : (Some(private_key), Some(public_key))
153 : }
154 : };
155 :
156 0 : let ssl_ca_certs = env.ssl_ca_cert_path().map(|ssl_ca_file| {
157 0 : let buf = std::fs::read(ssl_ca_file).expect("SSL CA file should exist");
158 0 : Certificate::from_pem_bundle(&buf).expect("SSL CA file should be valid")
159 0 : });
160 0 :
161 0 : let mut http_client = reqwest::Client::builder();
162 0 : for ssl_ca_cert in ssl_ca_certs.unwrap_or_default() {
163 0 : http_client = http_client.add_root_certificate(ssl_ca_cert);
164 0 : }
165 0 : let http_client = http_client
166 0 : .build()
167 0 : .expect("HTTP client should construct with no error");
168 0 :
169 0 : Self {
170 0 : env: env.clone(),
171 0 : private_key,
172 0 : public_key,
173 0 : client: http_client,
174 0 : config: env.storage_controller.clone(),
175 0 : listen_port: OnceLock::default(),
176 0 : }
177 0 : }
178 :
179 0 : fn storage_controller_instance_dir(&self, instance_id: u8) -> PathBuf {
180 0 : self.env
181 0 : .base_data_dir
182 0 : .join(format!("storage_controller_{}", instance_id))
183 0 : }
184 :
185 0 : fn pid_file(&self, instance_id: u8) -> Utf8PathBuf {
186 0 : Utf8PathBuf::from_path_buf(
187 0 : self.storage_controller_instance_dir(instance_id)
188 0 : .join("storage_controller.pid"),
189 0 : )
190 0 : .expect("non-Unicode path")
191 0 : }
192 :
193 : /// Find the directory containing postgres subdirectories, such `bin` and `lib`
194 : ///
195 : /// This usually uses STORAGE_CONTROLLER_POSTGRES_VERSION of postgres, but will fall back
196 : /// to other versions if that one isn't found. Some automated tests create circumstances
197 : /// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`.
198 0 : async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result<Utf8PathBuf> {
199 0 : let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14];
200 :
201 0 : for v in prefer_versions {
202 0 : let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap();
203 0 : if tokio::fs::try_exists(&path).await? {
204 0 : return Ok(path);
205 0 : }
206 : }
207 :
208 : // Fall through
209 0 : anyhow::bail!(
210 0 : "Postgres directory '{}' not found in {}",
211 0 : dir_name,
212 0 : self.env.pg_distrib_dir.display(),
213 0 : );
214 0 : }
215 :
216 0 : pub async fn get_pg_bin_dir(&self) -> anyhow::Result<Utf8PathBuf> {
217 0 : self.get_pg_dir("bin").await
218 0 : }
219 :
220 0 : pub async fn get_pg_lib_dir(&self) -> anyhow::Result<Utf8PathBuf> {
221 0 : self.get_pg_dir("lib").await
222 0 : }
223 :
224 : /// Readiness check for our postgres process
225 0 : async fn pg_isready(&self, pg_bin_dir: &Utf8Path, postgres_port: u16) -> anyhow::Result<bool> {
226 0 : let bin_path = pg_bin_dir.join("pg_isready");
227 0 : let args = [
228 0 : "-h",
229 0 : "localhost",
230 0 : "-U",
231 0 : &username(),
232 0 : "-d",
233 0 : DB_NAME,
234 0 : "-p",
235 0 : &format!("{}", postgres_port),
236 0 : ];
237 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
238 0 : let envs = [
239 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
240 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
241 0 : ];
242 0 : let exitcode = Command::new(bin_path)
243 0 : .args(args)
244 0 : .envs(envs)
245 0 : .spawn()?
246 0 : .wait()
247 0 : .await?;
248 :
249 0 : Ok(exitcode.success())
250 0 : }
251 :
252 : /// Create our database if it doesn't exist
253 : ///
254 : /// This function is equivalent to the `diesel setup` command in the diesel CLI. We implement
255 : /// the same steps by hand to avoid imposing a dependency on installing diesel-cli for developers
256 : /// who just want to run `cargo neon_local` without knowing about diesel.
257 : ///
258 : /// Returns the database url
259 0 : pub async fn setup_database(&self, postgres_port: u16) -> anyhow::Result<String> {
260 0 : let database_url = format!(
261 0 : "postgresql://{}@localhost:{}/{DB_NAME}",
262 0 : &username(),
263 0 : postgres_port
264 0 : );
265 :
266 0 : let pg_bin_dir = self.get_pg_bin_dir().await?;
267 0 : let createdb_path = pg_bin_dir.join("createdb");
268 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
269 0 : let envs = [
270 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
271 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
272 0 : ];
273 0 : let output = Command::new(&createdb_path)
274 0 : .args([
275 0 : "-h",
276 0 : "localhost",
277 0 : "-p",
278 0 : &format!("{}", postgres_port),
279 0 : "-U",
280 0 : &username(),
281 0 : "-O",
282 0 : &username(),
283 0 : DB_NAME,
284 0 : ])
285 0 : .envs(envs)
286 0 : .output()
287 0 : .await
288 0 : .expect("Failed to spawn createdb");
289 0 :
290 0 : if !output.status.success() {
291 0 : let stderr = String::from_utf8(output.stderr).expect("Non-UTF8 output from createdb");
292 0 : if stderr.contains("already exists") {
293 0 : tracing::info!("Database {DB_NAME} already exists");
294 : } else {
295 0 : anyhow::bail!("createdb failed with status {}: {stderr}", output.status);
296 : }
297 0 : }
298 :
299 0 : Ok(database_url)
300 0 : }
301 :
302 0 : pub async fn connect_to_database(
303 0 : &self,
304 0 : postgres_port: u16,
305 0 : ) -> anyhow::Result<(
306 0 : tokio_postgres::Client,
307 0 : tokio_postgres::Connection<tokio_postgres::Socket, tokio_postgres::tls::NoTlsStream>,
308 0 : )> {
309 0 : tokio_postgres::Config::new()
310 0 : .host("localhost")
311 0 : .port(postgres_port)
312 0 : // The user is the ambient operating system user name.
313 0 : // That is an impurity which we want to fix in => TODO https://github.com/neondatabase/neon/issues/8400
314 0 : //
315 0 : // Until we get there, use the ambient operating system user name.
316 0 : // Recent tokio-postgres versions default to this if the user isn't specified.
317 0 : // But tokio-postgres fork doesn't have this upstream commit:
318 0 : // https://github.com/sfackler/rust-postgres/commit/cb609be758f3fb5af537f04b584a2ee0cebd5e79
319 0 : // => we should rebase our fork => TODO https://github.com/neondatabase/neon/issues/8399
320 0 : .user(&username())
321 0 : .dbname(DB_NAME)
322 0 : .connect(tokio_postgres::NoTls)
323 0 : .await
324 0 : .map_err(anyhow::Error::new)
325 0 : }
326 :
327 : /// Wrapper for the pg_ctl binary, which we spawn as a short-lived subprocess when starting and stopping postgres
328 0 : async fn pg_ctl<I, S>(&self, args: I) -> ExitStatus
329 0 : where
330 0 : I: IntoIterator<Item = S>,
331 0 : S: AsRef<OsStr>,
332 0 : {
333 0 : let pg_bin_dir = self.get_pg_bin_dir().await.unwrap();
334 0 : let bin_path = pg_bin_dir.join("pg_ctl");
335 :
336 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
337 0 : let envs = [
338 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
339 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
340 0 : ];
341 0 :
342 0 : Command::new(bin_path)
343 0 : .args(args)
344 0 : .envs(envs)
345 0 : .spawn()
346 0 : .expect("Failed to spawn pg_ctl, binary_missing?")
347 0 : .wait()
348 0 : .await
349 0 : .expect("Failed to wait for pg_ctl termination")
350 0 : }
351 :
352 0 : pub async fn start(&self, start_args: NeonStorageControllerStartArgs) -> anyhow::Result<()> {
353 0 : let instance_dir = self.storage_controller_instance_dir(start_args.instance_id);
354 0 : if let Err(err) = tokio::fs::create_dir(&instance_dir).await {
355 0 : if err.kind() != std::io::ErrorKind::AlreadyExists {
356 0 : panic!("Failed to create instance dir {instance_dir:?}");
357 0 : }
358 0 : }
359 :
360 0 : if self.env.generate_local_ssl_certs {
361 0 : self.env.generate_ssl_cert(
362 0 : &instance_dir.join("server.crt"),
363 0 : &instance_dir.join("server.key"),
364 0 : )?;
365 0 : }
366 :
367 0 : let listen_url = &self.env.control_plane_api;
368 0 :
369 0 : let scheme = listen_url.scheme();
370 0 : let host = listen_url.host_str().unwrap();
371 :
372 0 : let (listen_port, postgres_port) = if let Some(base_port) = start_args.base_port {
373 0 : (
374 0 : base_port,
375 0 : self.config
376 0 : .database_url
377 0 : .expect("--base-port requires NeonStorageControllerConf::database_url")
378 0 : .port(),
379 0 : )
380 : } else {
381 0 : let port = listen_url.port().unwrap();
382 0 : (port, port + 1)
383 : };
384 :
385 0 : self.listen_port
386 0 : .set(listen_port)
387 0 : .expect("StorageController::listen_port is only set here");
388 :
389 : // Do we remove the pid file on stop?
390 0 : let pg_started = self.is_postgres_running().await?;
391 0 : let pg_lib_dir = self.get_pg_lib_dir().await?;
392 :
393 0 : if !pg_started {
394 : // Start a vanilla Postgres process used by the storage controller for persistence.
395 0 : let pg_data_path = Utf8PathBuf::from_path_buf(self.env.base_data_dir.clone())
396 0 : .unwrap()
397 0 : .join("storage_controller_db");
398 0 : let pg_bin_dir = self.get_pg_bin_dir().await?;
399 0 : let pg_log_path = pg_data_path.join("postgres.log");
400 0 :
401 0 : if !tokio::fs::try_exists(&pg_data_path).await? {
402 0 : let initdb_args = [
403 0 : "--pgdata",
404 0 : pg_data_path.as_ref(),
405 0 : "--username",
406 0 : &username(),
407 0 : "--no-sync",
408 0 : "--no-instructions",
409 0 : ];
410 0 : tracing::info!(
411 0 : "Initializing storage controller database with args: {:?}",
412 : initdb_args
413 : );
414 :
415 : // Initialize empty database
416 0 : let initdb_path = pg_bin_dir.join("initdb");
417 0 : let mut child = Command::new(&initdb_path)
418 0 : .envs(vec![
419 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
420 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
421 0 : ])
422 0 : .args(initdb_args)
423 0 : .spawn()
424 0 : .expect("Failed to spawn initdb");
425 0 : let status = child.wait().await?;
426 0 : if !status.success() {
427 0 : anyhow::bail!("initdb failed with status {status}");
428 0 : }
429 0 : };
430 :
431 : // Write a minimal config file:
432 : // - Specify the port, since this is chosen dynamically
433 : // - Switch off fsync, since we're running on lightweight test environments and when e.g. scale testing
434 : // the storage controller we don't want a slow local disk to interfere with that.
435 : //
436 : // NB: it's important that we rewrite this file on each start command so we propagate changes
437 : // from `LocalEnv`'s config file (`.neon/config`).
438 0 : tokio::fs::write(
439 0 : &pg_data_path.join("postgresql.conf"),
440 0 : format!("port = {}\nfsync=off\n", postgres_port),
441 0 : )
442 0 : .await?;
443 :
444 0 : println!("Starting storage controller database...");
445 0 : let db_start_args = [
446 0 : "-w",
447 0 : "-D",
448 0 : pg_data_path.as_ref(),
449 0 : "-l",
450 0 : pg_log_path.as_ref(),
451 0 : "-U",
452 0 : &username(),
453 0 : "start",
454 0 : ];
455 0 : tracing::info!(
456 0 : "Starting storage controller database with args: {:?}",
457 : db_start_args
458 : );
459 :
460 0 : let db_start_status = self.pg_ctl(db_start_args).await;
461 0 : let start_timeout: Duration = start_args.start_timeout.into();
462 0 : let db_start_deadline = Instant::now() + start_timeout;
463 0 : if !db_start_status.success() {
464 0 : return Err(anyhow::anyhow!(
465 0 : "Failed to start postgres {}",
466 0 : db_start_status.code().unwrap()
467 0 : ));
468 0 : }
469 :
470 : loop {
471 0 : if Instant::now() > db_start_deadline {
472 0 : return Err(anyhow::anyhow!("Timed out waiting for postgres to start"));
473 0 : }
474 0 :
475 0 : match self.pg_isready(&pg_bin_dir, postgres_port).await {
476 : Ok(true) => {
477 0 : tracing::info!("storage controller postgres is now ready");
478 0 : break;
479 : }
480 : Ok(false) => {
481 0 : tokio::time::sleep(Duration::from_millis(100)).await;
482 : }
483 0 : Err(e) => {
484 0 : tracing::warn!("Failed to check postgres status: {e}")
485 : }
486 : }
487 : }
488 :
489 0 : self.setup_database(postgres_port).await?;
490 0 : }
491 :
492 0 : let database_url = format!("postgresql://localhost:{}/{DB_NAME}", postgres_port);
493 0 :
494 0 : // We support running a startup SQL script to fiddle with the database before we launch storcon.
495 0 : // This is used by the test suite.
496 0 : let startup_script_path = self
497 0 : .env
498 0 : .base_data_dir
499 0 : .join("storage_controller_db.startup.sql");
500 0 : let startup_script = match tokio::fs::read_to_string(&startup_script_path).await {
501 0 : Ok(script) => {
502 0 : tokio::fs::remove_file(startup_script_path).await?;
503 0 : script
504 : }
505 0 : Err(e) => {
506 0 : if e.kind() == std::io::ErrorKind::NotFound {
507 : // always run some startup script so that this code path doesn't bit rot
508 0 : "BEGIN; COMMIT;".to_string()
509 : } else {
510 0 : anyhow::bail!("Failed to read startup script: {e}")
511 : }
512 : }
513 : };
514 0 : let (mut client, conn) = self.connect_to_database(postgres_port).await?;
515 0 : let conn = tokio::spawn(conn);
516 0 : let tx = client.build_transaction();
517 0 : let tx = tx.start().await?;
518 0 : tx.batch_execute(&startup_script).await?;
519 0 : tx.commit().await?;
520 0 : drop(client);
521 0 : conn.await??;
522 :
523 0 : let addr = format!("{}:{}", host, listen_port);
524 0 : let address_for_peers = Uri::builder()
525 0 : .scheme(scheme)
526 0 : .authority(addr.clone())
527 0 : .path_and_query("")
528 0 : .build()
529 0 : .unwrap();
530 0 :
531 0 : let mut args = vec![
532 0 : "--dev",
533 0 : "--database-url",
534 0 : &database_url,
535 0 : "--max-offline-interval",
536 0 : &humantime::Duration::from(self.config.max_offline).to_string(),
537 0 : "--max-warming-up-interval",
538 0 : &humantime::Duration::from(self.config.max_warming_up).to_string(),
539 0 : "--heartbeat-interval",
540 0 : &humantime::Duration::from(self.config.heartbeat_interval).to_string(),
541 0 : "--address-for-peers",
542 0 : &address_for_peers.to_string(),
543 0 : ]
544 0 : .into_iter()
545 0 : .map(|s| s.to_string())
546 0 : .collect::<Vec<_>>();
547 0 :
548 0 : match scheme {
549 0 : "http" => args.extend(["--listen".to_string(), addr]),
550 0 : "https" => args.extend(["--listen-https".to_string(), addr]),
551 : _ => {
552 0 : panic!("Unexpected url scheme in control_plane_api: {scheme}");
553 : }
554 : }
555 :
556 0 : if self.config.start_as_candidate {
557 0 : args.push("--start-as-candidate".to_string());
558 0 : }
559 :
560 0 : if self.config.use_https_pageserver_api {
561 0 : args.push("--use-https-pageserver-api".to_string());
562 0 : }
563 :
564 0 : if self.config.use_https_safekeeper_api {
565 0 : args.push("--use-https-safekeeper-api".to_string());
566 0 : }
567 :
568 0 : if self.config.use_local_compute_notifications {
569 0 : args.push("--use-local-compute-notifications".to_string());
570 0 : }
571 :
572 0 : if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() {
573 0 : args.push(format!("--ssl-ca-file={}", ssl_ca_file.to_str().unwrap()));
574 0 : }
575 :
576 0 : if let Some(private_key) = &self.private_key {
577 0 : let claims = Claims::new(None, Scope::PageServerApi);
578 0 : let jwt_token =
579 0 : encode_from_key_file(&claims, private_key).expect("failed to generate jwt token");
580 0 : args.push(format!("--jwt-token={jwt_token}"));
581 0 :
582 0 : let peer_claims = Claims::new(None, Scope::Admin);
583 0 : let peer_jwt_token = encode_from_key_file(&peer_claims, private_key)
584 0 : .expect("failed to generate jwt token");
585 0 : args.push(format!("--peer-jwt-token={peer_jwt_token}"));
586 0 : }
587 :
588 0 : if let Some(public_key) = &self.public_key {
589 0 : args.push(format!("--public-key=\"{public_key}\""));
590 0 : }
591 :
592 0 : if let Some(control_plane_hooks_api) = &self.env.control_plane_hooks_api {
593 0 : args.push(format!("--control-plane-url={control_plane_hooks_api}"));
594 0 : }
595 :
596 0 : if let Some(split_threshold) = self.config.split_threshold.as_ref() {
597 0 : args.push(format!("--split-threshold={split_threshold}"))
598 0 : }
599 :
600 0 : if let Some(max_split_shards) = self.config.max_split_shards.as_ref() {
601 0 : args.push(format!("--max-split-shards={max_split_shards}"))
602 0 : }
603 :
604 0 : if let Some(initial_split_threshold) = self.config.initial_split_threshold.as_ref() {
605 0 : args.push(format!(
606 0 : "--initial-split-threshold={initial_split_threshold}"
607 0 : ))
608 0 : }
609 :
610 0 : if let Some(initial_split_shards) = self.config.initial_split_shards.as_ref() {
611 0 : args.push(format!("--initial-split-shards={initial_split_shards}"))
612 0 : }
613 :
614 0 : if let Some(lag) = self.config.max_secondary_lag_bytes.as_ref() {
615 0 : args.push(format!("--max-secondary-lag-bytes={lag}"))
616 0 : }
617 :
618 0 : if let Some(threshold) = self.config.long_reconcile_threshold {
619 0 : args.push(format!(
620 0 : "--long-reconcile-threshold={}",
621 0 : humantime::Duration::from(threshold)
622 0 : ))
623 0 : }
624 :
625 0 : args.push(format!(
626 0 : "--neon-local-repo-dir={}",
627 0 : self.env.base_data_dir.display()
628 0 : ));
629 0 :
630 0 : if self.config.timelines_onto_safekeepers {
631 0 : args.push("--timelines-onto-safekeepers".to_string());
632 0 : }
633 :
634 0 : println!("Starting storage controller");
635 0 :
636 0 : background_process::start_process(
637 0 : COMMAND,
638 0 : &instance_dir,
639 0 : &self.env.storage_controller_bin(),
640 0 : args,
641 0 : vec![
642 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
643 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
644 0 : ],
645 0 : background_process::InitialPidFile::Create(self.pid_file(start_args.instance_id)),
646 0 : &start_args.start_timeout,
647 0 : || async {
648 0 : match self.ready().await {
649 0 : Ok(_) => Ok(true),
650 0 : Err(_) => Ok(false),
651 : }
652 0 : },
653 0 : )
654 0 : .await?;
655 :
656 0 : Ok(())
657 0 : }
658 :
659 0 : pub async fn stop(&self, stop_args: NeonStorageControllerStopArgs) -> anyhow::Result<()> {
660 0 : background_process::stop_process(
661 0 : stop_args.immediate,
662 0 : COMMAND,
663 0 : &self.pid_file(stop_args.instance_id),
664 0 : )?;
665 :
666 0 : let storcon_instances = self.env.storage_controller_instances().await?;
667 0 : for (instance_id, instanced_dir_path) in storcon_instances {
668 0 : if instance_id == stop_args.instance_id {
669 0 : continue;
670 0 : }
671 0 :
672 0 : let pid_file = instanced_dir_path.join("storage_controller.pid");
673 0 : let pid = tokio::fs::read_to_string(&pid_file)
674 0 : .await
675 0 : .map_err(|err| {
676 0 : anyhow::anyhow!("Failed to read storcon pid file at {pid_file:?}: {err}")
677 0 : })?
678 0 : .parse::<i32>()
679 0 : .expect("pid is valid i32");
680 :
681 0 : let other_proc_alive = !background_process::process_has_stopped(Pid::from_raw(pid))?;
682 0 : if other_proc_alive {
683 : // There is another storage controller instance running, so we return
684 : // and leave the database running.
685 0 : return Ok(());
686 0 : }
687 : }
688 :
689 0 : let pg_data_path = self.env.base_data_dir.join("storage_controller_db");
690 0 :
691 0 : println!("Stopping storage controller database...");
692 0 : let pg_stop_args = ["-D", &pg_data_path.to_string_lossy(), "stop"];
693 0 : let stop_status = self.pg_ctl(pg_stop_args).await;
694 0 : if !stop_status.success() {
695 0 : match self.is_postgres_running().await {
696 : Ok(false) => {
697 0 : println!("Storage controller database is already stopped");
698 0 : return Ok(());
699 : }
700 : Ok(true) => {
701 0 : anyhow::bail!("Failed to stop storage controller database");
702 : }
703 0 : Err(err) => {
704 0 : anyhow::bail!("Failed to stop storage controller database: {err}");
705 : }
706 : }
707 0 : }
708 0 :
709 0 : Ok(())
710 0 : }
711 :
712 0 : async fn is_postgres_running(&self) -> anyhow::Result<bool> {
713 0 : let pg_data_path = self.env.base_data_dir.join("storage_controller_db");
714 0 :
715 0 : let pg_status_args = ["-D", &pg_data_path.to_string_lossy(), "status"];
716 0 : let status_exitcode = self.pg_ctl(pg_status_args).await;
717 :
718 : // pg_ctl status returns this exit code if postgres is not running: in this case it is
719 : // fine that stop failed. Otherwise it is an error that stop failed.
720 : const PG_STATUS_NOT_RUNNING: i32 = 3;
721 : const PG_NO_DATA_DIR: i32 = 4;
722 : const PG_STATUS_RUNNING: i32 = 0;
723 0 : match status_exitcode.code() {
724 0 : Some(PG_STATUS_NOT_RUNNING) => Ok(false),
725 0 : Some(PG_NO_DATA_DIR) => Ok(false),
726 0 : Some(PG_STATUS_RUNNING) => Ok(true),
727 0 : Some(code) => Err(anyhow::anyhow!(
728 0 : "pg_ctl status returned unexpected status code: {:?}",
729 0 : code
730 0 : )),
731 0 : None => Err(anyhow::anyhow!("pg_ctl status returned no status code")),
732 : }
733 0 : }
734 :
735 0 : fn get_claims_for_path(path: &str) -> anyhow::Result<Option<Claims>> {
736 0 : let category = match path.find('/') {
737 0 : Some(idx) => &path[..idx],
738 0 : None => path,
739 : };
740 :
741 0 : match category {
742 0 : "status" | "ready" => Ok(None),
743 0 : "control" | "debug" => Ok(Some(Claims::new(None, Scope::Admin))),
744 0 : "v1" => Ok(Some(Claims::new(None, Scope::PageServerApi))),
745 0 : _ => Err(anyhow::anyhow!("Failed to determine claims for {}", path)),
746 : }
747 0 : }
748 :
749 : /// Simple HTTP request wrapper for calling into storage controller
750 0 : async fn dispatch<RQ, RS>(
751 0 : &self,
752 0 : method: reqwest::Method,
753 0 : path: String,
754 0 : body: Option<RQ>,
755 0 : ) -> anyhow::Result<RS>
756 0 : where
757 0 : RQ: Serialize + Sized,
758 0 : RS: DeserializeOwned + Sized,
759 0 : {
760 : // In the special case of the `storage_controller start` subcommand, we wish
761 : // to use the API endpoint of the newly started storage controller in order
762 : // to pass the readiness check. In this scenario [`Self::listen_port`] will
763 : // be set (see [`Self::start`]).
764 : //
765 : // Otherwise, we infer the storage controller api endpoint from the configured
766 : // control plane API.
767 0 : let port = if let Some(port) = self.listen_port.get() {
768 0 : *port
769 : } else {
770 0 : self.env.control_plane_api.port().unwrap()
771 : };
772 :
773 : // The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
774 : // for general purpose API access.
775 0 : let url = Url::from_str(&format!(
776 0 : "{}://{}:{port}/{path}",
777 0 : self.env.control_plane_api.scheme(),
778 0 : self.env.control_plane_api.host_str().unwrap(),
779 0 : ))
780 0 : .unwrap();
781 0 :
782 0 : let mut builder = self.client.request(method, url);
783 0 : if let Some(body) = body {
784 0 : builder = builder.json(&body)
785 0 : }
786 0 : if let Some(private_key) = &self.private_key {
787 0 : println!("Getting claims for path {}", path);
788 0 : if let Some(required_claims) = Self::get_claims_for_path(&path)? {
789 0 : println!("Got claims {:?} for path {}", required_claims, path);
790 0 : let jwt_token = encode_from_key_file(&required_claims, private_key)?;
791 0 : builder = builder.header(
792 0 : reqwest::header::AUTHORIZATION,
793 0 : format!("Bearer {jwt_token}"),
794 0 : );
795 0 : }
796 0 : }
797 :
798 0 : let response = builder.send().await?;
799 0 : let response = response.error_from_body().await?;
800 :
801 0 : Ok(response
802 0 : .json()
803 0 : .await
804 0 : .map_err(pageserver_client::mgmt_api::Error::ReceiveBody)?)
805 0 : }
806 :
807 : /// Call into the attach_hook API, for use before handing out attachments to pageservers
808 : #[instrument(skip(self))]
809 : pub async fn attach_hook(
810 : &self,
811 : tenant_shard_id: TenantShardId,
812 : pageserver_id: NodeId,
813 : ) -> anyhow::Result<Option<u32>> {
814 : let request = AttachHookRequest {
815 : tenant_shard_id,
816 : node_id: Some(pageserver_id),
817 : generation_override: None,
818 : config: None,
819 : };
820 :
821 : let response = self
822 : .dispatch::<_, AttachHookResponse>(
823 : Method::POST,
824 : "debug/v1/attach-hook".to_string(),
825 : Some(request),
826 : )
827 : .await?;
828 :
829 : Ok(response.generation)
830 : }
831 :
832 : #[instrument(skip(self))]
833 : pub async fn inspect(
834 : &self,
835 : tenant_shard_id: TenantShardId,
836 : ) -> anyhow::Result<Option<(u32, NodeId)>> {
837 : let request = InspectRequest { tenant_shard_id };
838 :
839 : let response = self
840 : .dispatch::<_, InspectResponse>(
841 : Method::POST,
842 : "debug/v1/inspect".to_string(),
843 : Some(request),
844 : )
845 : .await?;
846 :
847 : Ok(response.attachment)
848 : }
849 :
850 : #[instrument(skip(self))]
851 : pub async fn tenant_create(
852 : &self,
853 : req: TenantCreateRequest,
854 : ) -> anyhow::Result<TenantCreateResponse> {
855 : self.dispatch(Method::POST, "v1/tenant".to_string(), Some(req))
856 : .await
857 : }
858 :
859 : #[instrument(skip(self))]
860 : pub async fn tenant_import(&self, tenant_id: TenantId) -> anyhow::Result<TenantCreateResponse> {
861 : self.dispatch::<(), TenantCreateResponse>(
862 : Method::POST,
863 : format!("debug/v1/tenant/{tenant_id}/import"),
864 : None,
865 : )
866 : .await
867 : }
868 :
869 : #[instrument(skip(self))]
870 : pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> {
871 : self.dispatch::<(), _>(
872 : Method::GET,
873 : format!("debug/v1/tenant/{tenant_id}/locate"),
874 : None,
875 : )
876 : .await
877 : }
878 :
879 : #[instrument(skip_all, fields(node_id=%req.node_id))]
880 : pub async fn node_register(&self, req: NodeRegisterRequest) -> anyhow::Result<()> {
881 : self.dispatch::<_, ()>(Method::POST, "control/v1/node".to_string(), Some(req))
882 : .await
883 : }
884 :
885 : #[instrument(skip_all, fields(node_id=%req.node_id))]
886 : pub async fn node_configure(&self, req: NodeConfigureRequest) -> anyhow::Result<()> {
887 : self.dispatch::<_, ()>(
888 : Method::PUT,
889 : format!("control/v1/node/{}/config", req.node_id),
890 : Some(req),
891 : )
892 : .await
893 : }
894 :
895 0 : pub async fn node_list(&self) -> anyhow::Result<Vec<NodeDescribeResponse>> {
896 0 : self.dispatch::<(), Vec<NodeDescribeResponse>>(
897 0 : Method::GET,
898 0 : "control/v1/node".to_string(),
899 0 : None,
900 0 : )
901 0 : .await
902 0 : }
903 :
904 : #[instrument(skip(self))]
905 : pub async fn ready(&self) -> anyhow::Result<()> {
906 : self.dispatch::<(), ()>(Method::GET, "ready".to_string(), None)
907 : .await
908 : }
909 :
910 : #[instrument(skip_all, fields(%tenant_id, timeline_id=%req.new_timeline_id))]
911 : pub async fn tenant_timeline_create(
912 : &self,
913 : tenant_id: TenantId,
914 : req: TimelineCreateRequest,
915 : ) -> anyhow::Result<TimelineInfo> {
916 : self.dispatch(
917 : Method::POST,
918 : format!("v1/tenant/{tenant_id}/timeline"),
919 : Some(req),
920 : )
921 : .await
922 : }
923 :
924 0 : pub async fn set_tenant_config(&self, req: &TenantConfigRequest) -> anyhow::Result<()> {
925 0 : self.dispatch(Method::PUT, "v1/tenant/config".to_string(), Some(req))
926 0 : .await
927 0 : }
928 : }
|