Line data Source code
1 : use std::ffi::OsStr;
2 : use std::fs;
3 : use std::net::SocketAddr;
4 : use std::path::PathBuf;
5 : use std::process::ExitStatus;
6 : use std::str::FromStr;
7 : use std::sync::OnceLock;
8 : use std::time::{Duration, Instant};
9 :
10 : use camino::{Utf8Path, Utf8PathBuf};
11 : use hyper0::Uri;
12 : use nix::unistd::Pid;
13 : use pageserver_api::controller_api::{
14 : NodeConfigureRequest, NodeDescribeResponse, NodeRegisterRequest, TenantCreateRequest,
15 : TenantCreateResponse, TenantLocateResponse,
16 : };
17 : use pageserver_api::models::{TimelineCreateRequest, TimelineInfo};
18 : use pageserver_api::shard::TenantShardId;
19 : use pageserver_client::mgmt_api::ResponseErrorMessageExt;
20 : use postgres_backend::AuthType;
21 : use reqwest::Method;
22 : use serde::de::DeserializeOwned;
23 : use serde::{Deserialize, Serialize};
24 : use tokio::process::Command;
25 : use tracing::instrument;
26 : use url::Url;
27 : use utils::auth::{Claims, Scope, encode_from_key_file};
28 : use utils::id::{NodeId, TenantId};
29 : use whoami::username;
30 :
31 : use crate::background_process;
32 : use crate::local_env::{LocalEnv, NeonStorageControllerConf};
33 :
34 : pub struct StorageController {
35 : env: LocalEnv,
36 : private_key: Option<Vec<u8>>,
37 : public_key: Option<String>,
38 : client: reqwest::Client,
39 : config: NeonStorageControllerConf,
40 :
41 : // The listen addresses is learned when starting the storage controller,
42 : // hence the use of OnceLock to init it at the right time.
43 : listen: OnceLock<SocketAddr>,
44 : }
45 :
46 : const COMMAND: &str = "storage_controller";
47 :
48 : const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
49 :
50 : const DB_NAME: &str = "storage_controller";
51 :
52 : pub struct NeonStorageControllerStartArgs {
53 : pub instance_id: u8,
54 : pub base_port: Option<u16>,
55 : pub start_timeout: humantime::Duration,
56 : }
57 :
58 : impl NeonStorageControllerStartArgs {
59 0 : pub fn with_default_instance_id(start_timeout: humantime::Duration) -> Self {
60 0 : Self {
61 0 : instance_id: 1,
62 0 : base_port: None,
63 0 : start_timeout,
64 0 : }
65 0 : }
66 : }
67 :
68 : pub struct NeonStorageControllerStopArgs {
69 : pub instance_id: u8,
70 : pub immediate: bool,
71 : }
72 :
73 : impl NeonStorageControllerStopArgs {
74 0 : pub fn with_default_instance_id(immediate: bool) -> Self {
75 0 : Self {
76 0 : instance_id: 1,
77 0 : immediate,
78 0 : }
79 0 : }
80 : }
81 :
82 0 : #[derive(Serialize, Deserialize)]
83 : pub struct AttachHookRequest {
84 : pub tenant_shard_id: TenantShardId,
85 : pub node_id: Option<NodeId>,
86 : pub generation_override: Option<i32>,
87 : }
88 :
89 0 : #[derive(Serialize, Deserialize)]
90 : pub struct AttachHookResponse {
91 : #[serde(rename = "gen")]
92 : pub generation: Option<u32>,
93 : }
94 :
95 0 : #[derive(Serialize, Deserialize)]
96 : pub struct InspectRequest {
97 : pub tenant_shard_id: TenantShardId,
98 : }
99 :
100 0 : #[derive(Serialize, Deserialize)]
101 : pub struct InspectResponse {
102 : pub attachment: Option<(u32, NodeId)>,
103 : }
104 :
105 : impl StorageController {
106 0 : pub fn from_env(env: &LocalEnv) -> Self {
107 0 : // Assume all pageservers have symmetric auth configuration: this service
108 0 : // expects to use one JWT token to talk to all of them.
109 0 : let ps_conf = env
110 0 : .pageservers
111 0 : .first()
112 0 : .expect("Config is validated to contain at least one pageserver");
113 0 : let (private_key, public_key) = match ps_conf.http_auth_type {
114 0 : AuthType::Trust => (None, None),
115 : AuthType::NeonJWT => {
116 0 : let private_key_path = env.get_private_key_path();
117 0 : let private_key = fs::read(private_key_path).expect("failed to read private key");
118 0 :
119 0 : // If pageserver auth is enabled, this implicitly enables auth for this service,
120 0 : // using the same credentials.
121 0 : let public_key_path =
122 0 : camino::Utf8PathBuf::try_from(env.base_data_dir.join("auth_public_key.pem"))
123 0 : .unwrap();
124 :
125 : // This service takes keys as a string rather than as a path to a file/dir: read the key into memory.
126 0 : let public_key = if std::fs::metadata(&public_key_path)
127 0 : .expect("Can't stat public key")
128 0 : .is_dir()
129 : {
130 : // Our config may specify a directory: this is for the pageserver's ability to handle multiple
131 : // keys. We only use one key at a time, so, arbitrarily load the first one in the directory.
132 0 : let mut dir =
133 0 : std::fs::read_dir(&public_key_path).expect("Can't readdir public key path");
134 0 : let dent = dir
135 0 : .next()
136 0 : .expect("Empty key dir")
137 0 : .expect("Error reading key dir");
138 0 :
139 0 : std::fs::read_to_string(dent.path()).expect("Can't read public key")
140 : } else {
141 0 : std::fs::read_to_string(&public_key_path).expect("Can't read public key")
142 : };
143 0 : (Some(private_key), Some(public_key))
144 : }
145 : };
146 :
147 0 : Self {
148 0 : env: env.clone(),
149 0 : private_key,
150 0 : public_key,
151 0 : client: reqwest::ClientBuilder::new()
152 0 : .build()
153 0 : .expect("Failed to construct http client"),
154 0 : config: env.storage_controller.clone(),
155 0 : listen: OnceLock::default(),
156 0 : }
157 0 : }
158 :
159 0 : fn storage_controller_instance_dir(&self, instance_id: u8) -> PathBuf {
160 0 : self.env
161 0 : .base_data_dir
162 0 : .join(format!("storage_controller_{}", instance_id))
163 0 : }
164 :
165 0 : fn pid_file(&self, instance_id: u8) -> Utf8PathBuf {
166 0 : Utf8PathBuf::from_path_buf(
167 0 : self.storage_controller_instance_dir(instance_id)
168 0 : .join("storage_controller.pid"),
169 0 : )
170 0 : .expect("non-Unicode path")
171 0 : }
172 :
173 : /// Find the directory containing postgres subdirectories, such `bin` and `lib`
174 : ///
175 : /// This usually uses STORAGE_CONTROLLER_POSTGRES_VERSION of postgres, but will fall back
176 : /// to other versions if that one isn't found. Some automated tests create circumstances
177 : /// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`.
178 0 : async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result<Utf8PathBuf> {
179 0 : let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14];
180 :
181 0 : for v in prefer_versions {
182 0 : let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap();
183 0 : if tokio::fs::try_exists(&path).await? {
184 0 : return Ok(path);
185 0 : }
186 : }
187 :
188 : // Fall through
189 0 : anyhow::bail!(
190 0 : "Postgres directory '{}' not found in {}",
191 0 : dir_name,
192 0 : self.env.pg_distrib_dir.display(),
193 0 : );
194 0 : }
195 :
196 0 : pub async fn get_pg_bin_dir(&self) -> anyhow::Result<Utf8PathBuf> {
197 0 : self.get_pg_dir("bin").await
198 0 : }
199 :
200 0 : pub async fn get_pg_lib_dir(&self) -> anyhow::Result<Utf8PathBuf> {
201 0 : self.get_pg_dir("lib").await
202 0 : }
203 :
204 : /// Readiness check for our postgres process
205 0 : async fn pg_isready(&self, pg_bin_dir: &Utf8Path, postgres_port: u16) -> anyhow::Result<bool> {
206 0 : let bin_path = pg_bin_dir.join("pg_isready");
207 0 : let args = [
208 0 : "-h",
209 0 : "localhost",
210 0 : "-U",
211 0 : &username(),
212 0 : "-d",
213 0 : DB_NAME,
214 0 : "-p",
215 0 : &format!("{}", postgres_port),
216 0 : ];
217 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
218 0 : let envs = [
219 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
220 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
221 0 : ];
222 0 : let exitcode = Command::new(bin_path)
223 0 : .args(args)
224 0 : .envs(envs)
225 0 : .spawn()?
226 0 : .wait()
227 0 : .await?;
228 :
229 0 : Ok(exitcode.success())
230 0 : }
231 :
232 : /// Create our database if it doesn't exist
233 : ///
234 : /// This function is equivalent to the `diesel setup` command in the diesel CLI. We implement
235 : /// the same steps by hand to avoid imposing a dependency on installing diesel-cli for developers
236 : /// who just want to run `cargo neon_local` without knowing about diesel.
237 : ///
238 : /// Returns the database url
239 0 : pub async fn setup_database(&self, postgres_port: u16) -> anyhow::Result<String> {
240 0 : let database_url = format!(
241 0 : "postgresql://{}@localhost:{}/{DB_NAME}",
242 0 : &username(),
243 0 : postgres_port
244 0 : );
245 :
246 0 : let pg_bin_dir = self.get_pg_bin_dir().await?;
247 0 : let createdb_path = pg_bin_dir.join("createdb");
248 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
249 0 : let envs = [
250 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
251 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
252 0 : ];
253 0 : let output = Command::new(&createdb_path)
254 0 : .args([
255 0 : "-h",
256 0 : "localhost",
257 0 : "-p",
258 0 : &format!("{}", postgres_port),
259 0 : "-U",
260 0 : &username(),
261 0 : "-O",
262 0 : &username(),
263 0 : DB_NAME,
264 0 : ])
265 0 : .envs(envs)
266 0 : .output()
267 0 : .await
268 0 : .expect("Failed to spawn createdb");
269 0 :
270 0 : if !output.status.success() {
271 0 : let stderr = String::from_utf8(output.stderr).expect("Non-UTF8 output from createdb");
272 0 : if stderr.contains("already exists") {
273 0 : tracing::info!("Database {DB_NAME} already exists");
274 : } else {
275 0 : anyhow::bail!("createdb failed with status {}: {stderr}", output.status);
276 : }
277 0 : }
278 :
279 0 : Ok(database_url)
280 0 : }
281 :
282 0 : pub async fn connect_to_database(
283 0 : &self,
284 0 : postgres_port: u16,
285 0 : ) -> anyhow::Result<(
286 0 : tokio_postgres::Client,
287 0 : tokio_postgres::Connection<tokio_postgres::Socket, tokio_postgres::tls::NoTlsStream>,
288 0 : )> {
289 0 : tokio_postgres::Config::new()
290 0 : .host("localhost")
291 0 : .port(postgres_port)
292 0 : // The user is the ambient operating system user name.
293 0 : // That is an impurity which we want to fix in => TODO https://github.com/neondatabase/neon/issues/8400
294 0 : //
295 0 : // Until we get there, use the ambient operating system user name.
296 0 : // Recent tokio-postgres versions default to this if the user isn't specified.
297 0 : // But tokio-postgres fork doesn't have this upstream commit:
298 0 : // https://github.com/sfackler/rust-postgres/commit/cb609be758f3fb5af537f04b584a2ee0cebd5e79
299 0 : // => we should rebase our fork => TODO https://github.com/neondatabase/neon/issues/8399
300 0 : .user(&username())
301 0 : .dbname(DB_NAME)
302 0 : .connect(tokio_postgres::NoTls)
303 0 : .await
304 0 : .map_err(anyhow::Error::new)
305 0 : }
306 :
307 : /// Wrapper for the pg_ctl binary, which we spawn as a short-lived subprocess when starting and stopping postgres
308 0 : async fn pg_ctl<I, S>(&self, args: I) -> ExitStatus
309 0 : where
310 0 : I: IntoIterator<Item = S>,
311 0 : S: AsRef<OsStr>,
312 0 : {
313 0 : let pg_bin_dir = self.get_pg_bin_dir().await.unwrap();
314 0 : let bin_path = pg_bin_dir.join("pg_ctl");
315 :
316 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
317 0 : let envs = [
318 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
319 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
320 0 : ];
321 0 :
322 0 : Command::new(bin_path)
323 0 : .args(args)
324 0 : .envs(envs)
325 0 : .spawn()
326 0 : .expect("Failed to spawn pg_ctl, binary_missing?")
327 0 : .wait()
328 0 : .await
329 0 : .expect("Failed to wait for pg_ctl termination")
330 0 : }
331 :
332 0 : pub async fn start(&self, start_args: NeonStorageControllerStartArgs) -> anyhow::Result<()> {
333 0 : let instance_dir = self.storage_controller_instance_dir(start_args.instance_id);
334 0 : if let Err(err) = tokio::fs::create_dir(&instance_dir).await {
335 0 : if err.kind() != std::io::ErrorKind::AlreadyExists {
336 0 : panic!("Failed to create instance dir {instance_dir:?}");
337 0 : }
338 0 : }
339 :
340 0 : let (listen, postgres_port) = {
341 0 : if let Some(base_port) = start_args.base_port {
342 0 : (
343 0 : format!("127.0.0.1:{base_port}"),
344 0 : self.config
345 0 : .database_url
346 0 : .expect("--base-port requires NeonStorageControllerConf::database_url")
347 0 : .port(),
348 0 : )
349 : } else {
350 0 : let listen_url = self.env.control_plane_api.clone();
351 0 :
352 0 : let listen = format!(
353 0 : "{}:{}",
354 0 : listen_url.host_str().unwrap(),
355 0 : listen_url.port().unwrap()
356 0 : );
357 0 :
358 0 : (listen, listen_url.port().unwrap() + 1)
359 : }
360 : };
361 :
362 0 : let socket_addr = listen
363 0 : .parse()
364 0 : .expect("listen address is a valid socket address");
365 0 : self.listen
366 0 : .set(socket_addr)
367 0 : .expect("StorageController::listen is only set here");
368 :
369 : // Do we remove the pid file on stop?
370 0 : let pg_started = self.is_postgres_running().await?;
371 0 : let pg_lib_dir = self.get_pg_lib_dir().await?;
372 :
373 0 : if !pg_started {
374 : // Start a vanilla Postgres process used by the storage controller for persistence.
375 0 : let pg_data_path = Utf8PathBuf::from_path_buf(self.env.base_data_dir.clone())
376 0 : .unwrap()
377 0 : .join("storage_controller_db");
378 0 : let pg_bin_dir = self.get_pg_bin_dir().await?;
379 0 : let pg_log_path = pg_data_path.join("postgres.log");
380 0 :
381 0 : if !tokio::fs::try_exists(&pg_data_path).await? {
382 0 : let initdb_args = [
383 0 : "--pgdata",
384 0 : pg_data_path.as_ref(),
385 0 : "--username",
386 0 : &username(),
387 0 : "--no-sync",
388 0 : "--no-instructions",
389 0 : ];
390 0 : tracing::info!(
391 0 : "Initializing storage controller database with args: {:?}",
392 : initdb_args
393 : );
394 :
395 : // Initialize empty database
396 0 : let initdb_path = pg_bin_dir.join("initdb");
397 0 : let mut child = Command::new(&initdb_path)
398 0 : .envs(vec![
399 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
400 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
401 0 : ])
402 0 : .args(initdb_args)
403 0 : .spawn()
404 0 : .expect("Failed to spawn initdb");
405 0 : let status = child.wait().await?;
406 0 : if !status.success() {
407 0 : anyhow::bail!("initdb failed with status {status}");
408 0 : }
409 0 : };
410 :
411 : // Write a minimal config file:
412 : // - Specify the port, since this is chosen dynamically
413 : // - Switch off fsync, since we're running on lightweight test environments and when e.g. scale testing
414 : // the storage controller we don't want a slow local disk to interfere with that.
415 : //
416 : // NB: it's important that we rewrite this file on each start command so we propagate changes
417 : // from `LocalEnv`'s config file (`.neon/config`).
418 0 : tokio::fs::write(
419 0 : &pg_data_path.join("postgresql.conf"),
420 0 : format!("port = {}\nfsync=off\n", postgres_port),
421 0 : )
422 0 : .await?;
423 :
424 0 : println!("Starting storage controller database...");
425 0 : let db_start_args = [
426 0 : "-w",
427 0 : "-D",
428 0 : pg_data_path.as_ref(),
429 0 : "-l",
430 0 : pg_log_path.as_ref(),
431 0 : "-U",
432 0 : &username(),
433 0 : "start",
434 0 : ];
435 0 : tracing::info!(
436 0 : "Starting storage controller database with args: {:?}",
437 : db_start_args
438 : );
439 :
440 0 : let db_start_status = self.pg_ctl(db_start_args).await;
441 0 : let start_timeout: Duration = start_args.start_timeout.into();
442 0 : let db_start_deadline = Instant::now() + start_timeout;
443 0 : if !db_start_status.success() {
444 0 : return Err(anyhow::anyhow!(
445 0 : "Failed to start postgres {}",
446 0 : db_start_status.code().unwrap()
447 0 : ));
448 0 : }
449 :
450 : loop {
451 0 : if Instant::now() > db_start_deadline {
452 0 : return Err(anyhow::anyhow!("Timed out waiting for postgres to start"));
453 0 : }
454 0 :
455 0 : match self.pg_isready(&pg_bin_dir, postgres_port).await {
456 : Ok(true) => {
457 0 : tracing::info!("storage controller postgres is now ready");
458 0 : break;
459 : }
460 : Ok(false) => {
461 0 : tokio::time::sleep(Duration::from_millis(100)).await;
462 : }
463 0 : Err(e) => {
464 0 : tracing::warn!("Failed to check postgres status: {e}")
465 : }
466 : }
467 : }
468 :
469 0 : self.setup_database(postgres_port).await?;
470 0 : }
471 :
472 0 : let database_url = format!("postgresql://localhost:{}/{DB_NAME}", postgres_port);
473 0 :
474 0 : // We support running a startup SQL script to fiddle with the database before we launch storcon.
475 0 : // This is used by the test suite.
476 0 : let startup_script_path = self
477 0 : .env
478 0 : .base_data_dir
479 0 : .join("storage_controller_db.startup.sql");
480 0 : let startup_script = match tokio::fs::read_to_string(&startup_script_path).await {
481 0 : Ok(script) => {
482 0 : tokio::fs::remove_file(startup_script_path).await?;
483 0 : script
484 : }
485 0 : Err(e) => {
486 0 : if e.kind() == std::io::ErrorKind::NotFound {
487 : // always run some startup script so that this code path doesn't bit rot
488 0 : "BEGIN; COMMIT;".to_string()
489 : } else {
490 0 : anyhow::bail!("Failed to read startup script: {e}")
491 : }
492 : }
493 : };
494 0 : let (mut client, conn) = self.connect_to_database(postgres_port).await?;
495 0 : let conn = tokio::spawn(conn);
496 0 : let tx = client.build_transaction();
497 0 : let tx = tx.start().await?;
498 0 : tx.batch_execute(&startup_script).await?;
499 0 : tx.commit().await?;
500 0 : drop(client);
501 0 : conn.await??;
502 :
503 0 : let listen = self
504 0 : .listen
505 0 : .get()
506 0 : .expect("cell is set earlier in this function");
507 0 : let address_for_peers = Uri::builder()
508 0 : .scheme("http")
509 0 : .authority(format!("{}:{}", listen.ip(), listen.port()))
510 0 : .path_and_query("")
511 0 : .build()
512 0 : .unwrap();
513 0 :
514 0 : let mut args = vec![
515 0 : "-l",
516 0 : &listen.to_string(),
517 0 : "--dev",
518 0 : "--database-url",
519 0 : &database_url,
520 0 : "--max-offline-interval",
521 0 : &humantime::Duration::from(self.config.max_offline).to_string(),
522 0 : "--max-warming-up-interval",
523 0 : &humantime::Duration::from(self.config.max_warming_up).to_string(),
524 0 : "--heartbeat-interval",
525 0 : &humantime::Duration::from(self.config.heartbeat_interval).to_string(),
526 0 : "--address-for-peers",
527 0 : &address_for_peers.to_string(),
528 0 : ]
529 0 : .into_iter()
530 0 : .map(|s| s.to_string())
531 0 : .collect::<Vec<_>>();
532 0 :
533 0 : if self.config.start_as_candidate {
534 0 : args.push("--start-as-candidate".to_string());
535 0 : }
536 :
537 0 : if self.config.use_https_pageserver_api {
538 0 : args.push("--use-https-pageserver-api".to_string());
539 0 : }
540 :
541 0 : if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() {
542 0 : args.push(format!("--ssl-ca-file={}", ssl_ca_file.to_str().unwrap()));
543 0 : }
544 :
545 0 : if let Some(private_key) = &self.private_key {
546 0 : let claims = Claims::new(None, Scope::PageServerApi);
547 0 : let jwt_token =
548 0 : encode_from_key_file(&claims, private_key).expect("failed to generate jwt token");
549 0 : args.push(format!("--jwt-token={jwt_token}"));
550 0 :
551 0 : let peer_claims = Claims::new(None, Scope::Admin);
552 0 : let peer_jwt_token = encode_from_key_file(&peer_claims, private_key)
553 0 : .expect("failed to generate jwt token");
554 0 : args.push(format!("--peer-jwt-token={peer_jwt_token}"));
555 0 : }
556 :
557 0 : if let Some(public_key) = &self.public_key {
558 0 : args.push(format!("--public-key=\"{public_key}\""));
559 0 : }
560 :
561 0 : if let Some(control_plane_compute_hook_api) = &self.env.control_plane_compute_hook_api {
562 0 : args.push(format!(
563 0 : "--compute-hook-url={control_plane_compute_hook_api}"
564 0 : ));
565 0 : }
566 :
567 0 : if let Some(split_threshold) = self.config.split_threshold.as_ref() {
568 0 : args.push(format!("--split-threshold={split_threshold}"))
569 0 : }
570 :
571 0 : if let Some(lag) = self.config.max_secondary_lag_bytes.as_ref() {
572 0 : args.push(format!("--max-secondary-lag-bytes={lag}"))
573 0 : }
574 :
575 0 : if let Some(threshold) = self.config.long_reconcile_threshold {
576 0 : args.push(format!(
577 0 : "--long-reconcile-threshold={}",
578 0 : humantime::Duration::from(threshold)
579 0 : ))
580 0 : }
581 :
582 0 : args.push(format!(
583 0 : "--neon-local-repo-dir={}",
584 0 : self.env.base_data_dir.display()
585 0 : ));
586 0 :
587 0 : if self.config.timelines_onto_safekeepers {
588 0 : args.push("--timelines-onto-safekeepers".to_string());
589 0 : }
590 :
591 0 : background_process::start_process(
592 0 : COMMAND,
593 0 : &instance_dir,
594 0 : &self.env.storage_controller_bin(),
595 0 : args,
596 0 : vec![
597 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
598 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
599 0 : ],
600 0 : background_process::InitialPidFile::Create(self.pid_file(start_args.instance_id)),
601 0 : &start_args.start_timeout,
602 0 : || async {
603 0 : match self.ready().await {
604 0 : Ok(_) => Ok(true),
605 0 : Err(_) => Ok(false),
606 : }
607 0 : },
608 0 : )
609 0 : .await?;
610 :
611 0 : Ok(())
612 0 : }
613 :
614 0 : pub async fn stop(&self, stop_args: NeonStorageControllerStopArgs) -> anyhow::Result<()> {
615 0 : background_process::stop_process(
616 0 : stop_args.immediate,
617 0 : COMMAND,
618 0 : &self.pid_file(stop_args.instance_id),
619 0 : )?;
620 :
621 0 : let storcon_instances = self.env.storage_controller_instances().await?;
622 0 : for (instance_id, instanced_dir_path) in storcon_instances {
623 0 : if instance_id == stop_args.instance_id {
624 0 : continue;
625 0 : }
626 0 :
627 0 : let pid_file = instanced_dir_path.join("storage_controller.pid");
628 0 : let pid = tokio::fs::read_to_string(&pid_file)
629 0 : .await
630 0 : .map_err(|err| {
631 0 : anyhow::anyhow!("Failed to read storcon pid file at {pid_file:?}: {err}")
632 0 : })?
633 0 : .parse::<i32>()
634 0 : .expect("pid is valid i32");
635 :
636 0 : let other_proc_alive = !background_process::process_has_stopped(Pid::from_raw(pid))?;
637 0 : if other_proc_alive {
638 : // There is another storage controller instance running, so we return
639 : // and leave the database running.
640 0 : return Ok(());
641 0 : }
642 : }
643 :
644 0 : let pg_data_path = self.env.base_data_dir.join("storage_controller_db");
645 0 :
646 0 : println!("Stopping storage controller database...");
647 0 : let pg_stop_args = ["-D", &pg_data_path.to_string_lossy(), "stop"];
648 0 : let stop_status = self.pg_ctl(pg_stop_args).await;
649 0 : if !stop_status.success() {
650 0 : match self.is_postgres_running().await {
651 : Ok(false) => {
652 0 : println!("Storage controller database is already stopped");
653 0 : return Ok(());
654 : }
655 : Ok(true) => {
656 0 : anyhow::bail!("Failed to stop storage controller database");
657 : }
658 0 : Err(err) => {
659 0 : anyhow::bail!("Failed to stop storage controller database: {err}");
660 : }
661 : }
662 0 : }
663 0 :
664 0 : Ok(())
665 0 : }
666 :
667 0 : async fn is_postgres_running(&self) -> anyhow::Result<bool> {
668 0 : let pg_data_path = self.env.base_data_dir.join("storage_controller_db");
669 0 :
670 0 : let pg_status_args = ["-D", &pg_data_path.to_string_lossy(), "status"];
671 0 : let status_exitcode = self.pg_ctl(pg_status_args).await;
672 :
673 : // pg_ctl status returns this exit code if postgres is not running: in this case it is
674 : // fine that stop failed. Otherwise it is an error that stop failed.
675 : const PG_STATUS_NOT_RUNNING: i32 = 3;
676 : const PG_NO_DATA_DIR: i32 = 4;
677 : const PG_STATUS_RUNNING: i32 = 0;
678 0 : match status_exitcode.code() {
679 0 : Some(PG_STATUS_NOT_RUNNING) => Ok(false),
680 0 : Some(PG_NO_DATA_DIR) => Ok(false),
681 0 : Some(PG_STATUS_RUNNING) => Ok(true),
682 0 : Some(code) => Err(anyhow::anyhow!(
683 0 : "pg_ctl status returned unexpected status code: {:?}",
684 0 : code
685 0 : )),
686 0 : None => Err(anyhow::anyhow!("pg_ctl status returned no status code")),
687 : }
688 0 : }
689 :
690 0 : fn get_claims_for_path(path: &str) -> anyhow::Result<Option<Claims>> {
691 0 : let category = match path.find('/') {
692 0 : Some(idx) => &path[..idx],
693 0 : None => path,
694 : };
695 :
696 0 : match category {
697 0 : "status" | "ready" => Ok(None),
698 0 : "control" | "debug" => Ok(Some(Claims::new(None, Scope::Admin))),
699 0 : "v1" => Ok(Some(Claims::new(None, Scope::PageServerApi))),
700 0 : _ => Err(anyhow::anyhow!("Failed to determine claims for {}", path)),
701 : }
702 0 : }
703 :
704 : /// Simple HTTP request wrapper for calling into storage controller
705 0 : async fn dispatch<RQ, RS>(
706 0 : &self,
707 0 : method: reqwest::Method,
708 0 : path: String,
709 0 : body: Option<RQ>,
710 0 : ) -> anyhow::Result<RS>
711 0 : where
712 0 : RQ: Serialize + Sized,
713 0 : RS: DeserializeOwned + Sized,
714 0 : {
715 : // In the special case of the `storage_controller start` subcommand, we wish
716 : // to use the API endpoint of the newly started storage controller in order
717 : // to pass the readiness check. In this scenario [`Self::listen`] will be set
718 : // (see [`Self::start`]).
719 : //
720 : // Otherwise, we infer the storage controller api endpoint from the configured
721 : // control plane API.
722 0 : let url = if let Some(socket_addr) = self.listen.get() {
723 0 : Url::from_str(&format!(
724 0 : "http://{}:{}/{path}",
725 0 : socket_addr.ip().to_canonical(),
726 0 : socket_addr.port()
727 0 : ))
728 0 : .unwrap()
729 : } else {
730 : // The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
731 : // for general purpose API access.
732 0 : let listen_url = self.env.control_plane_api.clone();
733 0 : Url::from_str(&format!(
734 0 : "http://{}:{}/{path}",
735 0 : listen_url.host_str().unwrap(),
736 0 : listen_url.port().unwrap()
737 0 : ))
738 0 : .unwrap()
739 : };
740 :
741 0 : let mut builder = self.client.request(method, url);
742 0 : if let Some(body) = body {
743 0 : builder = builder.json(&body)
744 0 : }
745 0 : if let Some(private_key) = &self.private_key {
746 0 : println!("Getting claims for path {}", path);
747 0 : if let Some(required_claims) = Self::get_claims_for_path(&path)? {
748 0 : println!("Got claims {:?} for path {}", required_claims, path);
749 0 : let jwt_token = encode_from_key_file(&required_claims, private_key)?;
750 0 : builder = builder.header(
751 0 : reqwest::header::AUTHORIZATION,
752 0 : format!("Bearer {jwt_token}"),
753 0 : );
754 0 : }
755 0 : }
756 :
757 0 : let response = builder.send().await?;
758 0 : let response = response.error_from_body().await?;
759 :
760 0 : Ok(response
761 0 : .json()
762 0 : .await
763 0 : .map_err(pageserver_client::mgmt_api::Error::ReceiveBody)?)
764 0 : }
765 :
766 : /// Call into the attach_hook API, for use before handing out attachments to pageservers
767 : #[instrument(skip(self))]
768 : pub async fn attach_hook(
769 : &self,
770 : tenant_shard_id: TenantShardId,
771 : pageserver_id: NodeId,
772 : ) -> anyhow::Result<Option<u32>> {
773 : let request = AttachHookRequest {
774 : tenant_shard_id,
775 : node_id: Some(pageserver_id),
776 : generation_override: None,
777 : };
778 :
779 : let response = self
780 : .dispatch::<_, AttachHookResponse>(
781 : Method::POST,
782 : "debug/v1/attach-hook".to_string(),
783 : Some(request),
784 : )
785 : .await?;
786 :
787 : Ok(response.generation)
788 : }
789 :
790 : #[instrument(skip(self))]
791 : pub async fn inspect(
792 : &self,
793 : tenant_shard_id: TenantShardId,
794 : ) -> anyhow::Result<Option<(u32, NodeId)>> {
795 : let request = InspectRequest { tenant_shard_id };
796 :
797 : let response = self
798 : .dispatch::<_, InspectResponse>(
799 : Method::POST,
800 : "debug/v1/inspect".to_string(),
801 : Some(request),
802 : )
803 : .await?;
804 :
805 : Ok(response.attachment)
806 : }
807 :
808 : #[instrument(skip(self))]
809 : pub async fn tenant_create(
810 : &self,
811 : req: TenantCreateRequest,
812 : ) -> anyhow::Result<TenantCreateResponse> {
813 : self.dispatch(Method::POST, "v1/tenant".to_string(), Some(req))
814 : .await
815 : }
816 :
817 : #[instrument(skip(self))]
818 : pub async fn tenant_import(&self, tenant_id: TenantId) -> anyhow::Result<TenantCreateResponse> {
819 : self.dispatch::<(), TenantCreateResponse>(
820 : Method::POST,
821 : format!("debug/v1/tenant/{tenant_id}/import"),
822 : None,
823 : )
824 : .await
825 : }
826 :
827 : #[instrument(skip(self))]
828 : pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> {
829 : self.dispatch::<(), _>(
830 : Method::GET,
831 : format!("debug/v1/tenant/{tenant_id}/locate"),
832 : None,
833 : )
834 : .await
835 : }
836 :
837 : #[instrument(skip_all, fields(node_id=%req.node_id))]
838 : pub async fn node_register(&self, req: NodeRegisterRequest) -> anyhow::Result<()> {
839 : self.dispatch::<_, ()>(Method::POST, "control/v1/node".to_string(), Some(req))
840 : .await
841 : }
842 :
843 : #[instrument(skip_all, fields(node_id=%req.node_id))]
844 : pub async fn node_configure(&self, req: NodeConfigureRequest) -> anyhow::Result<()> {
845 : self.dispatch::<_, ()>(
846 : Method::PUT,
847 : format!("control/v1/node/{}/config", req.node_id),
848 : Some(req),
849 : )
850 : .await
851 : }
852 :
853 0 : pub async fn node_list(&self) -> anyhow::Result<Vec<NodeDescribeResponse>> {
854 0 : self.dispatch::<(), Vec<NodeDescribeResponse>>(
855 0 : Method::GET,
856 0 : "control/v1/node".to_string(),
857 0 : None,
858 0 : )
859 0 : .await
860 0 : }
861 :
862 : #[instrument(skip(self))]
863 : pub async fn ready(&self) -> anyhow::Result<()> {
864 : self.dispatch::<(), ()>(Method::GET, "ready".to_string(), None)
865 : .await
866 : }
867 :
868 : #[instrument(skip_all, fields(%tenant_id, timeline_id=%req.new_timeline_id))]
869 : pub async fn tenant_timeline_create(
870 : &self,
871 : tenant_id: TenantId,
872 : req: TimelineCreateRequest,
873 : ) -> anyhow::Result<TimelineInfo> {
874 : self.dispatch(
875 : Method::POST,
876 : format!("v1/tenant/{tenant_id}/timeline"),
877 : Some(req),
878 : )
879 : .await
880 : }
881 : }
|