Line data Source code
1 : use std::ffi::OsStr;
2 : use std::fs;
3 : use std::path::PathBuf;
4 : use std::process::ExitStatus;
5 : use std::str::FromStr;
6 : use std::sync::OnceLock;
7 : use std::time::{Duration, Instant};
8 :
9 : use camino::{Utf8Path, Utf8PathBuf};
10 : use hyper0::Uri;
11 : use nix::unistd::Pid;
12 : use pageserver_api::controller_api::{
13 : NodeConfigureRequest, NodeDescribeResponse, NodeRegisterRequest,
14 : SafekeeperSchedulingPolicyRequest, SkSchedulingPolicy, TenantCreateRequest,
15 : TenantCreateResponse, TenantLocateResponse,
16 : };
17 : use pageserver_api::models::{
18 : TenantConfig, TenantConfigRequest, TimelineCreateRequest, TimelineInfo,
19 : };
20 : use pageserver_api::shard::TenantShardId;
21 : use pageserver_client::mgmt_api::ResponseErrorMessageExt;
22 : use pem::Pem;
23 : use postgres_backend::AuthType;
24 : use reqwest::{Method, Response};
25 : use serde::de::DeserializeOwned;
26 : use serde::{Deserialize, Serialize};
27 : use tokio::process::Command;
28 : use tracing::instrument;
29 : use url::Url;
30 : use utils::auth::{Claims, Scope, encode_from_key_file};
31 : use utils::id::{NodeId, TenantId};
32 : use whoami::username;
33 :
34 : use crate::background_process;
35 : use crate::local_env::{LocalEnv, NeonStorageControllerConf};
36 :
37 : pub struct StorageController {
38 : env: LocalEnv,
39 : private_key: Option<Pem>,
40 : public_key: Option<Pem>,
41 : client: reqwest::Client,
42 : config: NeonStorageControllerConf,
43 :
44 : // The listen port is learned when starting the storage controller,
45 : // hence the use of OnceLock to init it at the right time.
46 : listen_port: OnceLock<u16>,
47 : }
48 :
49 : const COMMAND: &str = "storage_controller";
50 :
51 : const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
52 :
53 : const DB_NAME: &str = "storage_controller";
54 :
55 : pub struct NeonStorageControllerStartArgs {
56 : pub instance_id: u8,
57 : pub base_port: Option<u16>,
58 : pub start_timeout: humantime::Duration,
59 : }
60 :
61 : impl NeonStorageControllerStartArgs {
62 0 : pub fn with_default_instance_id(start_timeout: humantime::Duration) -> Self {
63 0 : Self {
64 0 : instance_id: 1,
65 0 : base_port: None,
66 0 : start_timeout,
67 0 : }
68 0 : }
69 : }
70 :
71 : pub struct NeonStorageControllerStopArgs {
72 : pub instance_id: u8,
73 : pub immediate: bool,
74 : }
75 :
76 : impl NeonStorageControllerStopArgs {
77 0 : pub fn with_default_instance_id(immediate: bool) -> Self {
78 0 : Self {
79 0 : instance_id: 1,
80 0 : immediate,
81 0 : }
82 0 : }
83 : }
84 :
85 0 : #[derive(Serialize, Deserialize)]
86 : pub struct AttachHookRequest {
87 : pub tenant_shard_id: TenantShardId,
88 : pub node_id: Option<NodeId>,
89 : pub generation_override: Option<i32>, // only new tenants
90 : pub config: Option<TenantConfig>, // only new tenants
91 : }
92 :
93 0 : #[derive(Serialize, Deserialize)]
94 : pub struct AttachHookResponse {
95 : #[serde(rename = "gen")]
96 : pub generation: Option<u32>,
97 : }
98 :
99 0 : #[derive(Serialize, Deserialize)]
100 : pub struct InspectRequest {
101 : pub tenant_shard_id: TenantShardId,
102 : }
103 :
104 0 : #[derive(Serialize, Deserialize)]
105 : pub struct InspectResponse {
106 : pub attachment: Option<(u32, NodeId)>,
107 : }
108 :
109 : impl StorageController {
110 0 : pub fn from_env(env: &LocalEnv) -> Self {
111 0 : // Assume all pageservers have symmetric auth configuration: this service
112 0 : // expects to use one JWT token to talk to all of them.
113 0 : let ps_conf = env
114 0 : .pageservers
115 0 : .first()
116 0 : .expect("Config is validated to contain at least one pageserver");
117 0 : let (private_key, public_key) = match ps_conf.http_auth_type {
118 0 : AuthType::Trust => (None, None),
119 : AuthType::NeonJWT => {
120 0 : let private_key_path = env.get_private_key_path();
121 0 : let private_key =
122 0 : pem::parse(fs::read(private_key_path).expect("failed to read private key"))
123 0 : .expect("failed to parse PEM file");
124 0 :
125 0 : // If pageserver auth is enabled, this implicitly enables auth for this service,
126 0 : // using the same credentials.
127 0 : let public_key_path =
128 0 : camino::Utf8PathBuf::try_from(env.base_data_dir.join("auth_public_key.pem"))
129 0 : .unwrap();
130 :
131 : // This service takes keys as a string rather than as a path to a file/dir: read the key into memory.
132 0 : let public_key = if std::fs::metadata(&public_key_path)
133 0 : .expect("Can't stat public key")
134 0 : .is_dir()
135 : {
136 : // Our config may specify a directory: this is for the pageserver's ability to handle multiple
137 : // keys. We only use one key at a time, so, arbitrarily load the first one in the directory.
138 0 : let mut dir =
139 0 : std::fs::read_dir(&public_key_path).expect("Can't readdir public key path");
140 0 : let dent = dir
141 0 : .next()
142 0 : .expect("Empty key dir")
143 0 : .expect("Error reading key dir");
144 0 :
145 0 : pem::parse(std::fs::read_to_string(dent.path()).expect("Can't read public key"))
146 0 : .expect("Failed to parse PEM file")
147 : } else {
148 0 : pem::parse(
149 0 : std::fs::read_to_string(&public_key_path).expect("Can't read public key"),
150 0 : )
151 0 : .expect("Failed to parse PEM file")
152 : };
153 0 : (Some(private_key), Some(public_key))
154 : }
155 : };
156 :
157 0 : Self {
158 0 : env: env.clone(),
159 0 : private_key,
160 0 : public_key,
161 0 : client: env.create_http_client(),
162 0 : config: env.storage_controller.clone(),
163 0 : listen_port: OnceLock::default(),
164 0 : }
165 0 : }
166 :
167 0 : fn storage_controller_instance_dir(&self, instance_id: u8) -> PathBuf {
168 0 : self.env
169 0 : .base_data_dir
170 0 : .join(format!("storage_controller_{}", instance_id))
171 0 : }
172 :
173 0 : fn pid_file(&self, instance_id: u8) -> Utf8PathBuf {
174 0 : Utf8PathBuf::from_path_buf(
175 0 : self.storage_controller_instance_dir(instance_id)
176 0 : .join("storage_controller.pid"),
177 0 : )
178 0 : .expect("non-Unicode path")
179 0 : }
180 :
181 : /// Find the directory containing postgres subdirectories, such `bin` and `lib`
182 : ///
183 : /// This usually uses STORAGE_CONTROLLER_POSTGRES_VERSION of postgres, but will fall back
184 : /// to other versions if that one isn't found. Some automated tests create circumstances
185 : /// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`.
186 0 : async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result<Utf8PathBuf> {
187 0 : let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14];
188 :
189 0 : for v in prefer_versions {
190 0 : let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap();
191 0 : if tokio::fs::try_exists(&path).await? {
192 0 : return Ok(path);
193 0 : }
194 : }
195 :
196 : // Fall through
197 0 : anyhow::bail!(
198 0 : "Postgres directory '{}' not found in {}",
199 0 : dir_name,
200 0 : self.env.pg_distrib_dir.display(),
201 0 : );
202 0 : }
203 :
204 0 : pub async fn get_pg_bin_dir(&self) -> anyhow::Result<Utf8PathBuf> {
205 0 : self.get_pg_dir("bin").await
206 0 : }
207 :
208 0 : pub async fn get_pg_lib_dir(&self) -> anyhow::Result<Utf8PathBuf> {
209 0 : self.get_pg_dir("lib").await
210 0 : }
211 :
212 : /// Readiness check for our postgres process
213 0 : async fn pg_isready(&self, pg_bin_dir: &Utf8Path, postgres_port: u16) -> anyhow::Result<bool> {
214 0 : let bin_path = pg_bin_dir.join("pg_isready");
215 0 : let args = [
216 0 : "-h",
217 0 : "localhost",
218 0 : "-U",
219 0 : &username(),
220 0 : "-d",
221 0 : DB_NAME,
222 0 : "-p",
223 0 : &format!("{}", postgres_port),
224 0 : ];
225 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
226 0 : let envs = [
227 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
228 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
229 0 : ];
230 0 : let exitcode = Command::new(bin_path)
231 0 : .args(args)
232 0 : .envs(envs)
233 0 : .spawn()?
234 0 : .wait()
235 0 : .await?;
236 :
237 0 : Ok(exitcode.success())
238 0 : }
239 :
240 : /// Create our database if it doesn't exist
241 : ///
242 : /// This function is equivalent to the `diesel setup` command in the diesel CLI. We implement
243 : /// the same steps by hand to avoid imposing a dependency on installing diesel-cli for developers
244 : /// who just want to run `cargo neon_local` without knowing about diesel.
245 : ///
246 : /// Returns the database url
247 0 : pub async fn setup_database(&self, postgres_port: u16) -> anyhow::Result<String> {
248 0 : let database_url = format!(
249 0 : "postgresql://{}@localhost:{}/{DB_NAME}",
250 0 : &username(),
251 0 : postgres_port
252 0 : );
253 :
254 0 : let pg_bin_dir = self.get_pg_bin_dir().await?;
255 0 : let createdb_path = pg_bin_dir.join("createdb");
256 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
257 0 : let envs = [
258 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
259 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
260 0 : ];
261 0 : let output = Command::new(&createdb_path)
262 0 : .args([
263 0 : "-h",
264 0 : "localhost",
265 0 : "-p",
266 0 : &format!("{}", postgres_port),
267 0 : "-U",
268 0 : &username(),
269 0 : "-O",
270 0 : &username(),
271 0 : DB_NAME,
272 0 : ])
273 0 : .envs(envs)
274 0 : .output()
275 0 : .await
276 0 : .expect("Failed to spawn createdb");
277 0 :
278 0 : if !output.status.success() {
279 0 : let stderr = String::from_utf8(output.stderr).expect("Non-UTF8 output from createdb");
280 0 : if stderr.contains("already exists") {
281 0 : tracing::info!("Database {DB_NAME} already exists");
282 : } else {
283 0 : anyhow::bail!("createdb failed with status {}: {stderr}", output.status);
284 : }
285 0 : }
286 :
287 0 : Ok(database_url)
288 0 : }
289 :
290 0 : pub async fn connect_to_database(
291 0 : &self,
292 0 : postgres_port: u16,
293 0 : ) -> anyhow::Result<(
294 0 : tokio_postgres::Client,
295 0 : tokio_postgres::Connection<tokio_postgres::Socket, tokio_postgres::tls::NoTlsStream>,
296 0 : )> {
297 0 : tokio_postgres::Config::new()
298 0 : .host("localhost")
299 0 : .port(postgres_port)
300 0 : // The user is the ambient operating system user name.
301 0 : // That is an impurity which we want to fix in => TODO https://github.com/neondatabase/neon/issues/8400
302 0 : //
303 0 : // Until we get there, use the ambient operating system user name.
304 0 : // Recent tokio-postgres versions default to this if the user isn't specified.
305 0 : // But tokio-postgres fork doesn't have this upstream commit:
306 0 : // https://github.com/sfackler/rust-postgres/commit/cb609be758f3fb5af537f04b584a2ee0cebd5e79
307 0 : // => we should rebase our fork => TODO https://github.com/neondatabase/neon/issues/8399
308 0 : .user(&username())
309 0 : .dbname(DB_NAME)
310 0 : .connect(tokio_postgres::NoTls)
311 0 : .await
312 0 : .map_err(anyhow::Error::new)
313 0 : }
314 :
315 : /// Wrapper for the pg_ctl binary, which we spawn as a short-lived subprocess when starting and stopping postgres
316 0 : async fn pg_ctl<I, S>(&self, args: I) -> ExitStatus
317 0 : where
318 0 : I: IntoIterator<Item = S>,
319 0 : S: AsRef<OsStr>,
320 0 : {
321 0 : let pg_bin_dir = self.get_pg_bin_dir().await.unwrap();
322 0 : let bin_path = pg_bin_dir.join("pg_ctl");
323 :
324 0 : let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
325 0 : let envs = [
326 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
327 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
328 0 : ];
329 0 :
330 0 : Command::new(bin_path)
331 0 : .args(args)
332 0 : .envs(envs)
333 0 : .spawn()
334 0 : .expect("Failed to spawn pg_ctl, binary_missing?")
335 0 : .wait()
336 0 : .await
337 0 : .expect("Failed to wait for pg_ctl termination")
338 0 : }
339 :
340 0 : pub async fn start(&self, start_args: NeonStorageControllerStartArgs) -> anyhow::Result<()> {
341 0 : let instance_dir = self.storage_controller_instance_dir(start_args.instance_id);
342 0 : if let Err(err) = tokio::fs::create_dir(&instance_dir).await {
343 0 : if err.kind() != std::io::ErrorKind::AlreadyExists {
344 0 : panic!("Failed to create instance dir {instance_dir:?}");
345 0 : }
346 0 : }
347 :
348 0 : if self.env.generate_local_ssl_certs {
349 0 : self.env.generate_ssl_cert(
350 0 : &instance_dir.join("server.crt"),
351 0 : &instance_dir.join("server.key"),
352 0 : )?;
353 0 : }
354 :
355 0 : let listen_url = &self.env.control_plane_api;
356 0 :
357 0 : let scheme = listen_url.scheme();
358 0 : let host = listen_url.host_str().unwrap();
359 :
360 0 : let (listen_port, postgres_port) = if let Some(base_port) = start_args.base_port {
361 0 : (
362 0 : base_port,
363 0 : self.config
364 0 : .database_url
365 0 : .expect("--base-port requires NeonStorageControllerConf::database_url")
366 0 : .port(),
367 0 : )
368 : } else {
369 0 : let port = listen_url.port().unwrap();
370 0 : (port, port + 1)
371 : };
372 :
373 0 : self.listen_port
374 0 : .set(listen_port)
375 0 : .expect("StorageController::listen_port is only set here");
376 :
377 : // Do we remove the pid file on stop?
378 0 : let pg_started = self.is_postgres_running().await?;
379 0 : let pg_lib_dir = self.get_pg_lib_dir().await?;
380 :
381 0 : if !pg_started {
382 : // Start a vanilla Postgres process used by the storage controller for persistence.
383 0 : let pg_data_path = Utf8PathBuf::from_path_buf(self.env.base_data_dir.clone())
384 0 : .unwrap()
385 0 : .join("storage_controller_db");
386 0 : let pg_bin_dir = self.get_pg_bin_dir().await?;
387 0 : let pg_log_path = pg_data_path.join("postgres.log");
388 0 :
389 0 : if !tokio::fs::try_exists(&pg_data_path).await? {
390 0 : let initdb_args = [
391 0 : "--pgdata",
392 0 : pg_data_path.as_ref(),
393 0 : "--username",
394 0 : &username(),
395 0 : "--no-sync",
396 0 : "--no-instructions",
397 0 : ];
398 0 : tracing::info!(
399 0 : "Initializing storage controller database with args: {:?}",
400 : initdb_args
401 : );
402 :
403 : // Initialize empty database
404 0 : let initdb_path = pg_bin_dir.join("initdb");
405 0 : let mut child = Command::new(&initdb_path)
406 0 : .envs(vec![
407 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
408 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
409 0 : ])
410 0 : .args(initdb_args)
411 0 : .spawn()
412 0 : .expect("Failed to spawn initdb");
413 0 : let status = child.wait().await?;
414 0 : if !status.success() {
415 0 : anyhow::bail!("initdb failed with status {status}");
416 0 : }
417 0 : };
418 :
419 : // Write a minimal config file:
420 : // - Specify the port, since this is chosen dynamically
421 : // - Switch off fsync, since we're running on lightweight test environments and when e.g. scale testing
422 : // the storage controller we don't want a slow local disk to interfere with that.
423 : //
424 : // NB: it's important that we rewrite this file on each start command so we propagate changes
425 : // from `LocalEnv`'s config file (`.neon/config`).
426 0 : tokio::fs::write(
427 0 : &pg_data_path.join("postgresql.conf"),
428 0 : format!("port = {}\nfsync=off\n", postgres_port),
429 0 : )
430 0 : .await?;
431 :
432 0 : println!("Starting storage controller database...");
433 0 : let db_start_args = [
434 0 : "-w",
435 0 : "-D",
436 0 : pg_data_path.as_ref(),
437 0 : "-l",
438 0 : pg_log_path.as_ref(),
439 0 : "-U",
440 0 : &username(),
441 0 : "start",
442 0 : ];
443 0 : tracing::info!(
444 0 : "Starting storage controller database with args: {:?}",
445 : db_start_args
446 : );
447 :
448 0 : let db_start_status = self.pg_ctl(db_start_args).await;
449 0 : let start_timeout: Duration = start_args.start_timeout.into();
450 0 : let db_start_deadline = Instant::now() + start_timeout;
451 0 : if !db_start_status.success() {
452 0 : return Err(anyhow::anyhow!(
453 0 : "Failed to start postgres {}",
454 0 : db_start_status.code().unwrap()
455 0 : ));
456 0 : }
457 :
458 : loop {
459 0 : if Instant::now() > db_start_deadline {
460 0 : return Err(anyhow::anyhow!("Timed out waiting for postgres to start"));
461 0 : }
462 0 :
463 0 : match self.pg_isready(&pg_bin_dir, postgres_port).await {
464 : Ok(true) => {
465 0 : tracing::info!("storage controller postgres is now ready");
466 0 : break;
467 : }
468 : Ok(false) => {
469 0 : tokio::time::sleep(Duration::from_millis(100)).await;
470 : }
471 0 : Err(e) => {
472 0 : tracing::warn!("Failed to check postgres status: {e}")
473 : }
474 : }
475 : }
476 :
477 0 : self.setup_database(postgres_port).await?;
478 0 : }
479 :
480 0 : let database_url = format!("postgresql://localhost:{}/{DB_NAME}", postgres_port);
481 0 :
482 0 : // We support running a startup SQL script to fiddle with the database before we launch storcon.
483 0 : // This is used by the test suite.
484 0 : let startup_script_path = self
485 0 : .env
486 0 : .base_data_dir
487 0 : .join("storage_controller_db.startup.sql");
488 0 : let startup_script = match tokio::fs::read_to_string(&startup_script_path).await {
489 0 : Ok(script) => {
490 0 : tokio::fs::remove_file(startup_script_path).await?;
491 0 : script
492 : }
493 0 : Err(e) => {
494 0 : if e.kind() == std::io::ErrorKind::NotFound {
495 : // always run some startup script so that this code path doesn't bit rot
496 0 : "BEGIN; COMMIT;".to_string()
497 : } else {
498 0 : anyhow::bail!("Failed to read startup script: {e}")
499 : }
500 : }
501 : };
502 0 : let (mut client, conn) = self.connect_to_database(postgres_port).await?;
503 0 : let conn = tokio::spawn(conn);
504 0 : let tx = client.build_transaction();
505 0 : let tx = tx.start().await?;
506 0 : tx.batch_execute(&startup_script).await?;
507 0 : tx.commit().await?;
508 0 : drop(client);
509 0 : conn.await??;
510 :
511 0 : let addr = format!("{}:{}", host, listen_port);
512 0 : let address_for_peers = Uri::builder()
513 0 : .scheme(scheme)
514 0 : .authority(addr.clone())
515 0 : .path_and_query("")
516 0 : .build()
517 0 : .unwrap();
518 0 :
519 0 : let mut args = vec![
520 0 : "--dev",
521 0 : "--database-url",
522 0 : &database_url,
523 0 : "--max-offline-interval",
524 0 : &humantime::Duration::from(self.config.max_offline).to_string(),
525 0 : "--max-warming-up-interval",
526 0 : &humantime::Duration::from(self.config.max_warming_up).to_string(),
527 0 : "--heartbeat-interval",
528 0 : &humantime::Duration::from(self.config.heartbeat_interval).to_string(),
529 0 : "--address-for-peers",
530 0 : &address_for_peers.to_string(),
531 0 : ]
532 0 : .into_iter()
533 0 : .map(|s| s.to_string())
534 0 : .collect::<Vec<_>>();
535 0 :
536 0 : match scheme {
537 0 : "http" => args.extend(["--listen".to_string(), addr]),
538 0 : "https" => args.extend(["--listen-https".to_string(), addr]),
539 : _ => {
540 0 : panic!("Unexpected url scheme in control_plane_api: {scheme}");
541 : }
542 : }
543 :
544 0 : if self.config.start_as_candidate {
545 0 : args.push("--start-as-candidate".to_string());
546 0 : }
547 :
548 0 : if self.config.use_https_pageserver_api {
549 0 : args.push("--use-https-pageserver-api".to_string());
550 0 : }
551 :
552 0 : if self.config.use_https_safekeeper_api {
553 0 : args.push("--use-https-safekeeper-api".to_string());
554 0 : }
555 :
556 0 : if self.config.use_local_compute_notifications {
557 0 : args.push("--use-local-compute-notifications".to_string());
558 0 : }
559 :
560 0 : if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() {
561 0 : args.push(format!("--ssl-ca-file={}", ssl_ca_file.to_str().unwrap()));
562 0 : }
563 :
564 0 : if let Some(private_key) = &self.private_key {
565 0 : let claims = Claims::new(None, Scope::PageServerApi);
566 0 : let jwt_token =
567 0 : encode_from_key_file(&claims, private_key).expect("failed to generate jwt token");
568 0 : args.push(format!("--jwt-token={jwt_token}"));
569 0 :
570 0 : let peer_claims = Claims::new(None, Scope::Admin);
571 0 : let peer_jwt_token = encode_from_key_file(&peer_claims, private_key)
572 0 : .expect("failed to generate jwt token");
573 0 : args.push(format!("--peer-jwt-token={peer_jwt_token}"));
574 0 :
575 0 : let claims = Claims::new(None, Scope::SafekeeperData);
576 0 : let jwt_token =
577 0 : encode_from_key_file(&claims, private_key).expect("failed to generate jwt token");
578 0 : args.push(format!("--safekeeper-jwt-token={jwt_token}"));
579 0 : }
580 :
581 0 : if let Some(public_key) = &self.public_key {
582 0 : args.push(format!("--public-key=\"{public_key}\""));
583 0 : }
584 :
585 0 : if let Some(control_plane_hooks_api) = &self.env.control_plane_hooks_api {
586 0 : args.push(format!("--control-plane-url={control_plane_hooks_api}"));
587 0 : }
588 :
589 0 : if let Some(split_threshold) = self.config.split_threshold.as_ref() {
590 0 : args.push(format!("--split-threshold={split_threshold}"))
591 0 : }
592 :
593 0 : if let Some(max_split_shards) = self.config.max_split_shards.as_ref() {
594 0 : args.push(format!("--max-split-shards={max_split_shards}"))
595 0 : }
596 :
597 0 : if let Some(initial_split_threshold) = self.config.initial_split_threshold.as_ref() {
598 0 : args.push(format!(
599 0 : "--initial-split-threshold={initial_split_threshold}"
600 0 : ))
601 0 : }
602 :
603 0 : if let Some(initial_split_shards) = self.config.initial_split_shards.as_ref() {
604 0 : args.push(format!("--initial-split-shards={initial_split_shards}"))
605 0 : }
606 :
607 0 : if let Some(lag) = self.config.max_secondary_lag_bytes.as_ref() {
608 0 : args.push(format!("--max-secondary-lag-bytes={lag}"))
609 0 : }
610 :
611 0 : if let Some(threshold) = self.config.long_reconcile_threshold {
612 0 : args.push(format!(
613 0 : "--long-reconcile-threshold={}",
614 0 : humantime::Duration::from(threshold)
615 0 : ))
616 0 : }
617 :
618 0 : args.push(format!(
619 0 : "--neon-local-repo-dir={}",
620 0 : self.env.base_data_dir.display()
621 0 : ));
622 0 :
623 0 : if self.env.safekeepers.iter().any(|sk| sk.auth_enabled) && self.private_key.is_none() {
624 0 : anyhow::bail!("Safekeeper set up for auth but no private key specified");
625 0 : }
626 0 :
627 0 : if self.config.timelines_onto_safekeepers {
628 0 : args.push("--timelines-onto-safekeepers".to_string());
629 0 : }
630 :
631 0 : println!("Starting storage controller");
632 0 :
633 0 : background_process::start_process(
634 0 : COMMAND,
635 0 : &instance_dir,
636 0 : &self.env.storage_controller_bin(),
637 0 : args,
638 0 : vec![
639 0 : ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
640 0 : ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
641 0 : ],
642 0 : background_process::InitialPidFile::Create(self.pid_file(start_args.instance_id)),
643 0 : &start_args.start_timeout,
644 0 : || async {
645 0 : match self.ready().await {
646 0 : Ok(_) => Ok(true),
647 0 : Err(_) => Ok(false),
648 : }
649 0 : },
650 0 : )
651 0 : .await?;
652 :
653 0 : if self.config.timelines_onto_safekeepers {
654 0 : self.register_safekeepers().await?;
655 0 : }
656 :
657 0 : Ok(())
658 0 : }
659 :
660 0 : pub async fn stop(&self, stop_args: NeonStorageControllerStopArgs) -> anyhow::Result<()> {
661 0 : background_process::stop_process(
662 0 : stop_args.immediate,
663 0 : COMMAND,
664 0 : &self.pid_file(stop_args.instance_id),
665 0 : )?;
666 :
667 0 : let storcon_instances = self.env.storage_controller_instances().await?;
668 0 : for (instance_id, instanced_dir_path) in storcon_instances {
669 0 : if instance_id == stop_args.instance_id {
670 0 : continue;
671 0 : }
672 0 :
673 0 : let pid_file = instanced_dir_path.join("storage_controller.pid");
674 0 : let pid = tokio::fs::read_to_string(&pid_file)
675 0 : .await
676 0 : .map_err(|err| {
677 0 : anyhow::anyhow!("Failed to read storcon pid file at {pid_file:?}: {err}")
678 0 : })?
679 0 : .parse::<i32>()
680 0 : .expect("pid is valid i32");
681 :
682 0 : let other_proc_alive = !background_process::process_has_stopped(Pid::from_raw(pid))?;
683 0 : if other_proc_alive {
684 : // There is another storage controller instance running, so we return
685 : // and leave the database running.
686 0 : return Ok(());
687 0 : }
688 : }
689 :
690 0 : let pg_data_path = self.env.base_data_dir.join("storage_controller_db");
691 0 :
692 0 : println!("Stopping storage controller database...");
693 0 : let pg_stop_args = ["-D", &pg_data_path.to_string_lossy(), "stop"];
694 0 : let stop_status = self.pg_ctl(pg_stop_args).await;
695 0 : if !stop_status.success() {
696 0 : match self.is_postgres_running().await {
697 : Ok(false) => {
698 0 : println!("Storage controller database is already stopped");
699 0 : return Ok(());
700 : }
701 : Ok(true) => {
702 0 : anyhow::bail!("Failed to stop storage controller database");
703 : }
704 0 : Err(err) => {
705 0 : anyhow::bail!("Failed to stop storage controller database: {err}");
706 : }
707 : }
708 0 : }
709 0 :
710 0 : Ok(())
711 0 : }
712 :
713 0 : async fn is_postgres_running(&self) -> anyhow::Result<bool> {
714 0 : let pg_data_path = self.env.base_data_dir.join("storage_controller_db");
715 0 :
716 0 : let pg_status_args = ["-D", &pg_data_path.to_string_lossy(), "status"];
717 0 : let status_exitcode = self.pg_ctl(pg_status_args).await;
718 :
719 : // pg_ctl status returns this exit code if postgres is not running: in this case it is
720 : // fine that stop failed. Otherwise it is an error that stop failed.
721 : const PG_STATUS_NOT_RUNNING: i32 = 3;
722 : const PG_NO_DATA_DIR: i32 = 4;
723 : const PG_STATUS_RUNNING: i32 = 0;
724 0 : match status_exitcode.code() {
725 0 : Some(PG_STATUS_NOT_RUNNING) => Ok(false),
726 0 : Some(PG_NO_DATA_DIR) => Ok(false),
727 0 : Some(PG_STATUS_RUNNING) => Ok(true),
728 0 : Some(code) => Err(anyhow::anyhow!(
729 0 : "pg_ctl status returned unexpected status code: {:?}",
730 0 : code
731 0 : )),
732 0 : None => Err(anyhow::anyhow!("pg_ctl status returned no status code")),
733 : }
734 0 : }
735 :
736 0 : fn get_claims_for_path(path: &str) -> anyhow::Result<Option<Claims>> {
737 0 : let category = match path.find('/') {
738 0 : Some(idx) => &path[..idx],
739 0 : None => path,
740 : };
741 :
742 0 : match category {
743 0 : "status" | "ready" => Ok(None),
744 0 : "control" | "debug" => Ok(Some(Claims::new(None, Scope::Admin))),
745 0 : "v1" => Ok(Some(Claims::new(None, Scope::PageServerApi))),
746 0 : _ => Err(anyhow::anyhow!("Failed to determine claims for {}", path)),
747 : }
748 0 : }
749 :
750 : /// Simple HTTP request wrapper for calling into storage controller
751 0 : async fn dispatch<RQ, RS>(
752 0 : &self,
753 0 : method: reqwest::Method,
754 0 : path: String,
755 0 : body: Option<RQ>,
756 0 : ) -> anyhow::Result<RS>
757 0 : where
758 0 : RQ: Serialize + Sized,
759 0 : RS: DeserializeOwned + Sized,
760 0 : {
761 0 : let response = self.dispatch_inner(method, path, body).await?;
762 0 : Ok(response
763 0 : .json()
764 0 : .await
765 0 : .map_err(pageserver_client::mgmt_api::Error::ReceiveBody)?)
766 0 : }
767 :
768 : /// Simple HTTP request wrapper for calling into storage controller
769 0 : async fn dispatch_inner<RQ>(
770 0 : &self,
771 0 : method: reqwest::Method,
772 0 : path: String,
773 0 : body: Option<RQ>,
774 0 : ) -> anyhow::Result<Response>
775 0 : where
776 0 : RQ: Serialize + Sized,
777 0 : {
778 : // In the special case of the `storage_controller start` subcommand, we wish
779 : // to use the API endpoint of the newly started storage controller in order
780 : // to pass the readiness check. In this scenario [`Self::listen_port`] will
781 : // be set (see [`Self::start`]).
782 : //
783 : // Otherwise, we infer the storage controller api endpoint from the configured
784 : // control plane API.
785 0 : let port = if let Some(port) = self.listen_port.get() {
786 0 : *port
787 : } else {
788 0 : self.env.control_plane_api.port().unwrap()
789 : };
790 :
791 : // The configured URL has the /upcall path prefix for pageservers to use: we will strip that out
792 : // for general purpose API access.
793 0 : let url = Url::from_str(&format!(
794 0 : "{}://{}:{port}/{path}",
795 0 : self.env.control_plane_api.scheme(),
796 0 : self.env.control_plane_api.host_str().unwrap(),
797 0 : ))
798 0 : .unwrap();
799 0 :
800 0 : let mut builder = self.client.request(method, url);
801 0 : if let Some(body) = body {
802 0 : builder = builder.json(&body)
803 0 : }
804 0 : if let Some(private_key) = &self.private_key {
805 0 : println!("Getting claims for path {}", path);
806 0 : if let Some(required_claims) = Self::get_claims_for_path(&path)? {
807 0 : println!("Got claims {:?} for path {}", required_claims, path);
808 0 : let jwt_token = encode_from_key_file(&required_claims, private_key)?;
809 0 : builder = builder.header(
810 0 : reqwest::header::AUTHORIZATION,
811 0 : format!("Bearer {jwt_token}"),
812 0 : );
813 0 : }
814 0 : }
815 :
816 0 : let response = builder.send().await?;
817 0 : let response = response.error_from_body().await?;
818 :
819 0 : Ok(response)
820 0 : }
821 :
822 : /// Register the safekeepers in the storage controller
823 : #[instrument(skip(self))]
824 : async fn register_safekeepers(&self) -> anyhow::Result<()> {
825 : for sk in self.env.safekeepers.iter() {
826 : let sk_id = sk.id;
827 : let body = serde_json::json!({
828 : "id": sk_id,
829 : "created_at": "2023-10-25T09:11:25Z",
830 : "updated_at": "2024-08-28T11:32:43Z",
831 : "region_id": "aws-us-east-2",
832 : "host": "127.0.0.1",
833 : "port": sk.pg_port,
834 : "http_port": sk.http_port,
835 : "https_port": sk.https_port,
836 : "version": 5957,
837 : "availability_zone_id": format!("us-east-2b-{sk_id}"),
838 : });
839 : self.upsert_safekeeper(sk_id, body).await?;
840 : self.safekeeper_scheduling_policy(sk_id, SkSchedulingPolicy::Active)
841 : .await?;
842 : }
843 : Ok(())
844 : }
845 :
846 : /// Call into the attach_hook API, for use before handing out attachments to pageservers
847 : #[instrument(skip(self))]
848 : pub async fn attach_hook(
849 : &self,
850 : tenant_shard_id: TenantShardId,
851 : pageserver_id: NodeId,
852 : ) -> anyhow::Result<Option<u32>> {
853 : let request = AttachHookRequest {
854 : tenant_shard_id,
855 : node_id: Some(pageserver_id),
856 : generation_override: None,
857 : config: None,
858 : };
859 :
860 : let response = self
861 : .dispatch::<_, AttachHookResponse>(
862 : Method::POST,
863 : "debug/v1/attach-hook".to_string(),
864 : Some(request),
865 : )
866 : .await?;
867 :
868 : Ok(response.generation)
869 : }
870 :
871 : #[instrument(skip(self))]
872 : pub async fn upsert_safekeeper(
873 : &self,
874 : node_id: NodeId,
875 : request: serde_json::Value,
876 : ) -> anyhow::Result<()> {
877 : let resp = self
878 : .dispatch_inner::<serde_json::Value>(
879 : Method::POST,
880 : format!("control/v1/safekeeper/{node_id}"),
881 : Some(request),
882 : )
883 : .await?;
884 : if !resp.status().is_success() {
885 : anyhow::bail!(
886 : "setting scheduling policy unsuccessful for safekeeper {node_id}: {}",
887 : resp.status()
888 : );
889 : }
890 : Ok(())
891 : }
892 :
893 : #[instrument(skip(self))]
894 : pub async fn safekeeper_scheduling_policy(
895 : &self,
896 : node_id: NodeId,
897 : scheduling_policy: SkSchedulingPolicy,
898 : ) -> anyhow::Result<()> {
899 : self.dispatch::<SafekeeperSchedulingPolicyRequest, ()>(
900 : Method::POST,
901 : format!("control/v1/safekeeper/{node_id}/scheduling_policy"),
902 : Some(SafekeeperSchedulingPolicyRequest { scheduling_policy }),
903 : )
904 : .await
905 : }
906 :
907 : #[instrument(skip(self))]
908 : pub async fn inspect(
909 : &self,
910 : tenant_shard_id: TenantShardId,
911 : ) -> anyhow::Result<Option<(u32, NodeId)>> {
912 : let request = InspectRequest { tenant_shard_id };
913 :
914 : let response = self
915 : .dispatch::<_, InspectResponse>(
916 : Method::POST,
917 : "debug/v1/inspect".to_string(),
918 : Some(request),
919 : )
920 : .await?;
921 :
922 : Ok(response.attachment)
923 : }
924 :
925 : #[instrument(skip(self))]
926 : pub async fn tenant_create(
927 : &self,
928 : req: TenantCreateRequest,
929 : ) -> anyhow::Result<TenantCreateResponse> {
930 : self.dispatch(Method::POST, "v1/tenant".to_string(), Some(req))
931 : .await
932 : }
933 :
934 : #[instrument(skip(self))]
935 : pub async fn tenant_import(&self, tenant_id: TenantId) -> anyhow::Result<TenantCreateResponse> {
936 : self.dispatch::<(), TenantCreateResponse>(
937 : Method::POST,
938 : format!("debug/v1/tenant/{tenant_id}/import"),
939 : None,
940 : )
941 : .await
942 : }
943 :
944 : #[instrument(skip(self))]
945 : pub async fn tenant_locate(&self, tenant_id: TenantId) -> anyhow::Result<TenantLocateResponse> {
946 : self.dispatch::<(), _>(
947 : Method::GET,
948 : format!("debug/v1/tenant/{tenant_id}/locate"),
949 : None,
950 : )
951 : .await
952 : }
953 :
954 : #[instrument(skip_all, fields(node_id=%req.node_id))]
955 : pub async fn node_register(&self, req: NodeRegisterRequest) -> anyhow::Result<()> {
956 : self.dispatch::<_, ()>(Method::POST, "control/v1/node".to_string(), Some(req))
957 : .await
958 : }
959 :
960 : #[instrument(skip_all, fields(node_id=%req.node_id))]
961 : pub async fn node_configure(&self, req: NodeConfigureRequest) -> anyhow::Result<()> {
962 : self.dispatch::<_, ()>(
963 : Method::PUT,
964 : format!("control/v1/node/{}/config", req.node_id),
965 : Some(req),
966 : )
967 : .await
968 : }
969 :
970 0 : pub async fn node_list(&self) -> anyhow::Result<Vec<NodeDescribeResponse>> {
971 0 : self.dispatch::<(), Vec<NodeDescribeResponse>>(
972 0 : Method::GET,
973 0 : "control/v1/node".to_string(),
974 0 : None,
975 0 : )
976 0 : .await
977 0 : }
978 :
979 : #[instrument(skip(self))]
980 : pub async fn ready(&self) -> anyhow::Result<()> {
981 : self.dispatch::<(), ()>(Method::GET, "ready".to_string(), None)
982 : .await
983 : }
984 :
985 : #[instrument(skip_all, fields(%tenant_id, timeline_id=%req.new_timeline_id))]
986 : pub async fn tenant_timeline_create(
987 : &self,
988 : tenant_id: TenantId,
989 : req: TimelineCreateRequest,
990 : ) -> anyhow::Result<TimelineInfo> {
991 : self.dispatch(
992 : Method::POST,
993 : format!("v1/tenant/{tenant_id}/timeline"),
994 : Some(req),
995 : )
996 : .await
997 : }
998 :
999 0 : pub async fn set_tenant_config(&self, req: &TenantConfigRequest) -> anyhow::Result<()> {
1000 0 : self.dispatch(Method::PUT, "v1/tenant/config".to_string(), Some(req))
1001 0 : .await
1002 0 : }
1003 : }
|