LCOV - code coverage report
Current view: top level - compute_tools/src/bin - fast_import.rs (source / functions) Coverage Total Hit
Test: 4f58e98c51285c7fa348e0b410c88a10caf68ad2.info Lines: 0.0 % 282 0
Test Date: 2025-01-07 20:58:07 Functions: 0.0 % 37 0

            Line data    Source code
       1              : //! This program dumps a remote Postgres database into a local Postgres database
       2              : //! and uploads the resulting PGDATA into object storage for import into a Timeline.
       3              : //!
       4              : //! # Context, Architecture, Design
       5              : //!
       6              : //! See cloud.git Fast Imports RFC (<https://github.com/neondatabase/cloud/pull/19799>)
       7              : //! for the full picture.
       8              : //! The RFC describing the storage pieces of importing the PGDATA dump into a Timeline
       9              : //! is publicly accessible at <https://github.com/neondatabase/neon/pull/9538>.
      10              : //!
      11              : //! # This is a Prototype!
      12              : //!
      13              : //! This program is part of a prototype feature and not yet used in production.
      14              : //!
      15              : //! The cloud.git RFC contains lots of suggestions for improving e2e throughput
      16              : //! of this step of the timeline import process.
      17              : //!
      18              : //! # Local Testing
      19              : //!
      20              : //! - Comment out most of the pgxns in The Dockerfile.compute-tools to speed up the build.
      21              : //! - Build the image with the following command:
      22              : //!
      23              : //! ```bash
      24              : //! docker buildx build --platform linux/amd64 --build-arg DEBIAN_VERSION=bullseye --build-arg GIT_VERSION=local --build-arg PG_VERSION=v14 --build-arg BUILD_TAG="$(date --iso-8601=s -u)" -t localhost:3030/localregistry/compute-node-v14:latest -f compute/compute-node.Dockerfile .
      25              : //! docker push localhost:3030/localregistry/compute-node-v14:latest
      26              : //! ```
      27              : 
      28              : use anyhow::Context;
      29              : use aws_config::BehaviorVersion;
      30              : use camino::{Utf8Path, Utf8PathBuf};
      31              : use clap::Parser;
      32              : use compute_tools::extension_server::{get_pg_version, PostgresMajorVersion};
      33              : use nix::unistd::Pid;
      34              : use tracing::{info, info_span, warn, Instrument};
      35              : use utils::fs_ext::is_directory_empty;
      36              : 
      37              : #[path = "fast_import/aws_s3_sync.rs"]
      38              : mod aws_s3_sync;
      39              : #[path = "fast_import/child_stdio_to_log.rs"]
      40              : mod child_stdio_to_log;
      41              : #[path = "fast_import/s3_uri.rs"]
      42              : mod s3_uri;
      43              : 
      44              : #[derive(clap::Parser)]
      45              : struct Args {
      46              :     #[clap(long)]
      47            0 :     working_directory: Utf8PathBuf,
      48              :     #[clap(long, env = "NEON_IMPORTER_S3_PREFIX")]
      49            0 :     s3_prefix: s3_uri::S3Uri,
      50              :     #[clap(long)]
      51            0 :     pg_bin_dir: Utf8PathBuf,
      52              :     #[clap(long)]
      53            0 :     pg_lib_dir: Utf8PathBuf,
      54              : }
      55              : 
      56              : #[serde_with::serde_as]
      57            0 : #[derive(serde::Deserialize)]
      58              : struct Spec {
      59              :     encryption_secret: EncryptionSecret,
      60              :     #[serde_as(as = "serde_with::base64::Base64")]
      61              :     source_connstring_ciphertext_base64: Vec<u8>,
      62              : }
      63              : 
      64            0 : #[derive(serde::Deserialize)]
      65              : enum EncryptionSecret {
      66              :     #[allow(clippy::upper_case_acronyms)]
      67              :     KMS { key_id: String },
      68              : }
      69              : 
      70              : #[tokio::main]
      71            0 : pub(crate) async fn main() -> anyhow::Result<()> {
      72            0 :     utils::logging::init(
      73            0 :         utils::logging::LogFormat::Plain,
      74            0 :         utils::logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
      75            0 :         utils::logging::Output::Stdout,
      76            0 :     )?;
      77            0 : 
      78            0 :     info!("starting");
      79            0 : 
      80            0 :     let Args {
      81            0 :         working_directory,
      82            0 :         s3_prefix,
      83            0 :         pg_bin_dir,
      84            0 :         pg_lib_dir,
      85            0 :     } = Args::parse();
      86            0 : 
      87            0 :     let aws_config = aws_config::load_defaults(BehaviorVersion::v2024_03_28()).await;
      88            0 : 
      89            0 :     let spec: Spec = {
      90            0 :         let spec_key = s3_prefix.append("/spec.json");
      91            0 :         let s3_client = aws_sdk_s3::Client::new(&aws_config);
      92            0 :         let object = s3_client
      93            0 :             .get_object()
      94            0 :             .bucket(&spec_key.bucket)
      95            0 :             .key(spec_key.key)
      96            0 :             .send()
      97            0 :             .await
      98            0 :             .context("get spec from s3")?
      99            0 :             .body
     100            0 :             .collect()
     101            0 :             .await
     102            0 :             .context("download spec body")?;
     103            0 :         serde_json::from_slice(&object.into_bytes()).context("parse spec as json")?
     104            0 :     };
     105            0 : 
     106            0 :     match tokio::fs::create_dir(&working_directory).await {
     107            0 :         Ok(()) => {}
     108            0 :         Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
     109            0 :             if !is_directory_empty(&working_directory)
     110            0 :                 .await
     111            0 :                 .context("check if working directory is empty")?
     112            0 :             {
     113            0 :                 anyhow::bail!("working directory is not empty");
     114            0 :             } else {
     115            0 :                 // ok
     116            0 :             }
     117            0 :         }
     118            0 :         Err(e) => return Err(anyhow::Error::new(e).context("create working directory")),
     119            0 :     }
     120            0 : 
     121            0 :     let pgdata_dir = working_directory.join("pgdata");
     122            0 :     tokio::fs::create_dir(&pgdata_dir)
     123            0 :         .await
     124            0 :         .context("create pgdata directory")?;
     125            0 : 
     126            0 :     //
     127            0 :     // Setup clients
     128            0 :     //
     129            0 :     let aws_config = aws_config::load_defaults(BehaviorVersion::v2024_03_28()).await;
     130            0 :     let kms_client = aws_sdk_kms::Client::new(&aws_config);
     131            0 : 
     132            0 :     //
     133            0 :     //  Initialize pgdata
     134            0 :     //
     135            0 :     let pgbin = pg_bin_dir.join("postgres");
     136            0 :     let pg_version = match get_pg_version(pgbin.as_ref()) {
     137            0 :         PostgresMajorVersion::V14 => 14,
     138            0 :         PostgresMajorVersion::V15 => 15,
     139            0 :         PostgresMajorVersion::V16 => 16,
     140            0 :         PostgresMajorVersion::V17 => 17,
     141            0 :     };
     142            0 :     let superuser = "cloud_admin"; // XXX: this shouldn't be hard-coded
     143            0 :     postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
     144            0 :         superuser,
     145            0 :         locale: "en_US.UTF-8", // XXX: this shouldn't be hard-coded,
     146            0 :         pg_version,
     147            0 :         initdb_bin: pg_bin_dir.join("initdb").as_ref(),
     148            0 :         library_search_path: &pg_lib_dir, // TODO: is this right? Prob works in compute image, not sure about neon_local.
     149            0 :         pgdata: &pgdata_dir,
     150            0 :     })
     151            0 :     .await
     152            0 :     .context("initdb")?;
     153            0 : 
     154            0 :     let nproc = num_cpus::get();
     155            0 : 
     156            0 :     //
     157            0 :     // Launch postgres process
     158            0 :     //
     159            0 :     let mut postgres_proc = tokio::process::Command::new(pgbin)
     160            0 :         .arg("-D")
     161            0 :         .arg(&pgdata_dir)
     162            0 :         .args(["-c", "wal_level=minimal"])
     163            0 :         .args(["-c", "shared_buffers=10GB"])
     164            0 :         .args(["-c", "max_wal_senders=0"])
     165            0 :         .args(["-c", "fsync=off"])
     166            0 :         .args(["-c", "full_page_writes=off"])
     167            0 :         .args(["-c", "synchronous_commit=off"])
     168            0 :         .args(["-c", "maintenance_work_mem=8388608"])
     169            0 :         .args(["-c", &format!("max_parallel_maintenance_workers={nproc}")])
     170            0 :         .args(["-c", &format!("max_parallel_workers={nproc}")])
     171            0 :         .args(["-c", &format!("max_parallel_workers_per_gather={nproc}")])
     172            0 :         .args(["-c", &format!("max_worker_processes={nproc}")])
     173            0 :         .args(["-c", "effective_io_concurrency=100"])
     174            0 :         .env_clear()
     175            0 :         .stdout(std::process::Stdio::piped())
     176            0 :         .stderr(std::process::Stdio::piped())
     177            0 :         .spawn()
     178            0 :         .context("spawn postgres")?;
     179            0 : 
     180            0 :     info!("spawned postgres, waiting for it to become ready");
     181            0 :     tokio::spawn(
     182            0 :         child_stdio_to_log::relay_process_output(
     183            0 :             postgres_proc.stdout.take(),
     184            0 :             postgres_proc.stderr.take(),
     185            0 :         )
     186            0 :         .instrument(info_span!("postgres")),
     187            0 :     );
     188            0 :     let restore_pg_connstring =
     189            0 :         format!("host=localhost port=5432 user={superuser} dbname=postgres");
     190            0 :     loop {
     191            0 :         let res = tokio_postgres::connect(&restore_pg_connstring, tokio_postgres::NoTls).await;
     192            0 :         if res.is_ok() {
     193            0 :             info!("postgres is ready, could connect to it");
     194            0 :             break;
     195            0 :         }
     196            0 :     }
     197            0 : 
     198            0 :     //
     199            0 :     // Decrypt connection string
     200            0 :     //
     201            0 :     let source_connection_string = {
     202            0 :         match spec.encryption_secret {
     203            0 :             EncryptionSecret::KMS { key_id } => {
     204            0 :                 let mut output = kms_client
     205            0 :                     .decrypt()
     206            0 :                     .key_id(key_id)
     207            0 :                     .ciphertext_blob(aws_sdk_s3::primitives::Blob::new(
     208            0 :                         spec.source_connstring_ciphertext_base64,
     209            0 :                     ))
     210            0 :                     .send()
     211            0 :                     .await
     212            0 :                     .context("decrypt source connection string")?;
     213            0 :                 let plaintext = output
     214            0 :                     .plaintext
     215            0 :                     .take()
     216            0 :                     .context("get plaintext source connection string")?;
     217            0 :                 String::from_utf8(plaintext.into_inner())
     218            0 :                     .context("parse source connection string as utf8")?
     219            0 :             }
     220            0 :         }
     221            0 :     };
     222            0 : 
     223            0 :     //
     224            0 :     // Start the work
     225            0 :     //
     226            0 : 
     227            0 :     let dumpdir = working_directory.join("dumpdir");
     228            0 : 
     229            0 :     let common_args = [
     230            0 :         // schema mapping (prob suffices to specify them on one side)
     231            0 :         "--no-owner".to_string(),
     232            0 :         "--no-privileges".to_string(),
     233            0 :         "--no-publications".to_string(),
     234            0 :         "--no-security-labels".to_string(),
     235            0 :         "--no-subscriptions".to_string(),
     236            0 :         "--no-tablespaces".to_string(),
     237            0 :         // format
     238            0 :         "--format".to_string(),
     239            0 :         "directory".to_string(),
     240            0 :         // concurrency
     241            0 :         "--jobs".to_string(),
     242            0 :         num_cpus::get().to_string(),
     243            0 :         // progress updates
     244            0 :         "--verbose".to_string(),
     245            0 :     ];
     246            0 : 
     247            0 :     info!("dump into the working directory");
     248            0 :     {
     249            0 :         let mut pg_dump = tokio::process::Command::new(pg_bin_dir.join("pg_dump"))
     250            0 :             .args(&common_args)
     251            0 :             .arg("-f")
     252            0 :             .arg(&dumpdir)
     253            0 :             .arg("--no-sync")
     254            0 :             // POSITIONAL args
     255            0 :             // source db (db name included in connection string)
     256            0 :             .arg(&source_connection_string)
     257            0 :             // how we run it
     258            0 :             .env_clear()
     259            0 :             .kill_on_drop(true)
     260            0 :             .stdout(std::process::Stdio::piped())
     261            0 :             .stderr(std::process::Stdio::piped())
     262            0 :             .spawn()
     263            0 :             .context("spawn pg_dump")?;
     264            0 : 
     265            0 :         info!(pid=%pg_dump.id().unwrap(), "spawned pg_dump");
     266            0 : 
     267            0 :         tokio::spawn(
     268            0 :             child_stdio_to_log::relay_process_output(pg_dump.stdout.take(), pg_dump.stderr.take())
     269            0 :                 .instrument(info_span!("pg_dump")),
     270            0 :         );
     271            0 : 
     272            0 :         let st = pg_dump.wait().await.context("wait for pg_dump")?;
     273            0 :         info!(status=?st, "pg_dump exited");
     274            0 :         if !st.success() {
     275            0 :             warn!(status=%st, "pg_dump failed, restore will likely fail as well");
     276            0 :         }
     277            0 :     }
     278            0 : 
     279            0 :     // TODO: do it in a streaming way, plenty of internal research done on this already
     280            0 :     // TODO: do the unlogged table trick
     281            0 : 
     282            0 :     info!("restore from working directory into vanilla postgres");
     283            0 :     {
     284            0 :         let mut pg_restore = tokio::process::Command::new(pg_bin_dir.join("pg_restore"))
     285            0 :             .args(&common_args)
     286            0 :             .arg("-d")
     287            0 :             .arg(&restore_pg_connstring)
     288            0 :             // POSITIONAL args
     289            0 :             .arg(&dumpdir)
     290            0 :             // how we run it
     291            0 :             .env_clear()
     292            0 :             .kill_on_drop(true)
     293            0 :             .stdout(std::process::Stdio::piped())
     294            0 :             .stderr(std::process::Stdio::piped())
     295            0 :             .spawn()
     296            0 :             .context("spawn pg_restore")?;
     297            0 : 
     298            0 :         info!(pid=%pg_restore.id().unwrap(), "spawned pg_restore");
     299            0 :         tokio::spawn(
     300            0 :             child_stdio_to_log::relay_process_output(
     301            0 :                 pg_restore.stdout.take(),
     302            0 :                 pg_restore.stderr.take(),
     303            0 :             )
     304            0 :             .instrument(info_span!("pg_restore")),
     305            0 :         );
     306            0 :         let st = pg_restore.wait().await.context("wait for pg_restore")?;
     307            0 :         info!(status=?st, "pg_restore exited");
     308            0 :         if !st.success() {
     309            0 :             warn!(status=%st, "pg_restore failed, restore will likely fail as well");
     310            0 :         }
     311            0 :     }
     312            0 : 
     313            0 :     info!("shutdown postgres");
     314            0 :     {
     315            0 :         nix::sys::signal::kill(
     316            0 :             Pid::from_raw(
     317            0 :                 i32::try_from(postgres_proc.id().unwrap()).expect("convert child pid to i32"),
     318            0 :             ),
     319            0 :             nix::sys::signal::SIGTERM,
     320            0 :         )
     321            0 :         .context("signal postgres to shut down")?;
     322            0 :         postgres_proc
     323            0 :             .wait()
     324            0 :             .await
     325            0 :             .context("wait for postgres to shut down")?;
     326            0 :     }
     327            0 : 
     328            0 :     info!("upload pgdata");
     329            0 :     aws_s3_sync::sync(Utf8Path::new(&pgdata_dir), &s3_prefix.append("/pgdata/"))
     330            0 :         .await
     331            0 :         .context("sync dump directory to destination")?;
     332            0 : 
     333            0 :     info!("write status");
     334            0 :     {
     335            0 :         let status_dir = working_directory.join("status");
     336            0 :         std::fs::create_dir(&status_dir).context("create status directory")?;
     337            0 :         let status_file = status_dir.join("pgdata");
     338            0 :         std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
     339            0 :             .context("write status file")?;
     340            0 :         aws_s3_sync::sync(&status_dir, &s3_prefix.append("/status/"))
     341            0 :             .await
     342            0 :             .context("sync status directory to destination")?;
     343            0 :     }
     344            0 : 
     345            0 :     Ok(())
     346            0 : }
        

Generated by: LCOV version 2.1-beta