Line data Source code
1 : //!
2 : //! Postgres wrapper (`compute_ctl`) is intended to be run as a Docker entrypoint or as a `systemd`
3 : //! `ExecStart` option. It will handle all the `Neon` specifics during compute node
4 : //! initialization:
5 : //! - `compute_ctl` accepts cluster (compute node) specification as a JSON file.
6 : //! - Every start is a fresh start, so the data directory is removed and
7 : //! initialized again on each run.
8 : //! - If remote_extension_config is provided, it will be used to fetch extensions list
9 : //! and download `shared_preload_libraries` from the remote storage.
10 : //! - Next it will put configuration files into the `PGDATA` directory.
11 : //! - Sync safekeepers and get commit LSN.
12 : //! - Get `basebackup` from pageserver using the returned on the previous step LSN.
13 : //! - Try to start `postgres` and wait until it is ready to accept connections.
14 : //! - Check and alter/drop/create roles and databases.
15 : //! - Hang waiting on the `postmaster` process to exit.
16 : //!
17 : //! Also `compute_ctl` spawns two separate service threads:
18 : //! - `compute-monitor` checks the last Postgres activity timestamp and saves it
19 : //! into the shared `ComputeNode`;
20 : //! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
21 : //! last activity requests.
22 : //!
23 : //! If `AUTOSCALING` environment variable is set, `compute_ctl` will start the
24 : //! `vm-monitor` located in [`neon/libs/vm_monitor`]. For VM compute nodes,
25 : //! `vm-monitor` communicates with the VM autoscaling system. It coordinates
26 : //! downscaling and requests immediate upscaling under resource pressure.
27 : //!
28 : //! Usage example:
29 : //! ```sh
30 : //! compute_ctl -D /var/db/postgres/compute \
31 : //! -C 'postgresql://cloud_admin@localhost/postgres' \
32 : //! -c /var/db/postgres/configs/config.json \
33 : //! -b /usr/local/bin/postgres \
34 : //! -r http://pg-ext-s3-gateway \
35 : //! ```
36 : use std::ffi::OsString;
37 : use std::fs::File;
38 : use std::process::exit;
39 : use std::sync::mpsc;
40 : use std::thread;
41 : use std::time::Duration;
42 :
43 : use anyhow::{Context, Result};
44 : use clap::Parser;
45 : use compute_api::responses::ComputeConfig;
46 : use compute_tools::compute::{
47 : BUILD_TAG, ComputeNode, ComputeNodeParams, forward_termination_signal,
48 : };
49 : use compute_tools::extension_server::get_pg_version_string;
50 : use compute_tools::logger::*;
51 : use compute_tools::params::*;
52 : use compute_tools::spec::*;
53 : use rlimit::{Resource, setrlimit};
54 : use signal_hook::consts::{SIGINT, SIGQUIT, SIGTERM};
55 : use signal_hook::iterator::Signals;
56 : use tracing::{error, info};
57 : use url::Url;
58 : use utils::failpoint_support;
59 :
60 : #[derive(Parser)]
61 : #[command(rename_all = "kebab-case")]
62 : struct Cli {
63 : #[arg(short = 'b', long, default_value = "postgres", env = "POSTGRES_PATH")]
64 0 : pub pgbin: String,
65 :
66 : #[arg(short = 'r', long)]
67 : pub remote_ext_config: Option<String>,
68 :
69 : /// The port to bind the external listening HTTP server to. Clients running
70 : /// outside the compute will talk to the compute through this port. Keep
71 : /// the previous name for this argument around for a smoother release
72 : /// with the control plane.
73 1 : #[arg(long, default_value_t = 3080)]
74 0 : pub external_http_port: u16,
75 :
76 : /// The port to bind the internal listening HTTP server to. Clients include
77 : /// the neon extension (for installing remote extensions) and local_proxy.
78 1 : #[arg(long, default_value_t = 3081)]
79 0 : pub internal_http_port: u16,
80 :
81 : #[arg(short = 'D', long, value_name = "DATADIR")]
82 0 : pub pgdata: String,
83 :
84 : #[arg(short = 'C', long, value_name = "DATABASE_URL")]
85 0 : pub connstr: String,
86 :
87 : #[cfg(target_os = "linux")]
88 : #[arg(long, default_value = "neon-postgres")]
89 0 : pub cgroup: String,
90 :
91 : #[cfg(target_os = "linux")]
92 : #[arg(
93 : long,
94 : default_value = "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable application_name=vm-monitor"
95 : )]
96 0 : pub filecache_connstr: String,
97 :
98 : #[cfg(target_os = "linux")]
99 : #[arg(long, default_value = "0.0.0.0:10301")]
100 0 : pub vm_monitor_addr: String,
101 :
102 : #[arg(long, action = clap::ArgAction::SetTrue)]
103 0 : pub resize_swap_on_bind: bool,
104 :
105 : #[arg(long)]
106 : pub set_disk_quota_for_fs: Option<String>,
107 :
108 : #[arg(short = 'c', long)]
109 : pub config: Option<OsString>,
110 :
111 : #[arg(short = 'i', long, group = "compute-id")]
112 0 : pub compute_id: String,
113 :
114 : #[arg(
115 : short = 'p',
116 : long,
117 : conflicts_with = "config",
118 : value_name = "CONTROL_PLANE_API_BASE_URL",
119 : requires = "compute-id"
120 : )]
121 : pub control_plane_uri: Option<String>,
122 : }
123 :
124 0 : fn main() -> Result<()> {
125 0 : let cli = Cli::parse();
126 0 :
127 0 : let scenario = failpoint_support::init();
128 :
129 : // For historical reasons, the main thread that processes the config and launches postgres
130 : // is synchronous, but we always have this tokio runtime available and we "enter" it so
131 : // that you can use tokio::spawn() and tokio::runtime::Handle::current().block_on(...)
132 : // from all parts of compute_ctl.
133 0 : let runtime = tokio::runtime::Builder::new_multi_thread()
134 0 : .enable_all()
135 0 : .build()?;
136 0 : let _rt_guard = runtime.enter();
137 0 :
138 0 : runtime.block_on(init())?;
139 :
140 : // enable core dumping for all child processes
141 0 : setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
142 :
143 0 : let connstr = Url::parse(&cli.connstr).context("cannot parse connstr as a URL")?;
144 :
145 0 : let config = get_config(&cli)?;
146 :
147 0 : let compute_node = ComputeNode::new(
148 0 : ComputeNodeParams {
149 0 : compute_id: cli.compute_id,
150 0 : connstr,
151 0 : pgdata: cli.pgdata.clone(),
152 0 : pgbin: cli.pgbin.clone(),
153 0 : pgversion: get_pg_version_string(&cli.pgbin),
154 0 : external_http_port: cli.external_http_port,
155 0 : internal_http_port: cli.internal_http_port,
156 0 : ext_remote_storage: cli.remote_ext_config.clone(),
157 0 : resize_swap_on_bind: cli.resize_swap_on_bind,
158 0 : set_disk_quota_for_fs: cli.set_disk_quota_for_fs,
159 0 : #[cfg(target_os = "linux")]
160 0 : filecache_connstr: cli.filecache_connstr,
161 0 : #[cfg(target_os = "linux")]
162 0 : cgroup: cli.cgroup,
163 0 : #[cfg(target_os = "linux")]
164 0 : vm_monitor_addr: cli.vm_monitor_addr,
165 0 : },
166 0 : config,
167 0 : )?;
168 :
169 0 : let exit_code = compute_node.run()?;
170 :
171 0 : scenario.teardown();
172 0 :
173 0 : deinit_and_exit(exit_code);
174 0 : }
175 :
176 0 : async fn init() -> Result<()> {
177 0 : init_tracing_and_logging(DEFAULT_LOG_LEVEL).await?;
178 :
179 0 : let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
180 0 : thread::spawn(move || {
181 0 : for sig in signals.forever() {
182 0 : handle_exit_signal(sig);
183 0 : }
184 0 : });
185 0 :
186 0 : info!("compute build_tag: {}", &BUILD_TAG.to_string());
187 :
188 0 : Ok(())
189 0 : }
190 :
191 0 : fn get_config(cli: &Cli) -> Result<ComputeConfig> {
192 : // First, read the config from the path if provided
193 0 : if let Some(ref config) = cli.config {
194 0 : let file = File::open(config)?;
195 0 : return Ok(serde_json::from_reader(&file)?);
196 0 : }
197 0 :
198 0 : // If the config wasn't provided in the CLI arguments, then retrieve it from
199 0 : // the control plane
200 0 : match get_config_from_control_plane(cli.control_plane_uri.as_ref().unwrap(), &cli.compute_id) {
201 0 : Ok(config) => Ok(config),
202 0 : Err(e) => {
203 0 : error!(
204 0 : "cannot get response from control plane: {}\n\
205 0 : neither spec nor confirmation that compute is in the Empty state was received",
206 : e
207 : );
208 0 : Err(e)
209 : }
210 : }
211 0 : }
212 :
213 0 : fn deinit_and_exit(exit_code: Option<i32>) -> ! {
214 0 : // Shutdown trace pipeline gracefully, so that it has a chance to send any
215 0 : // pending traces before we exit. Shutting down OTEL tracing provider may
216 0 : // hang for quite some time, see, for example:
217 0 : // - https://github.com/open-telemetry/opentelemetry-rust/issues/868
218 0 : // - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
219 0 : //
220 0 : // Yet, we want computes to shut down fast enough, as we may need a new one
221 0 : // for the same timeline ASAP. So wait no longer than 2s for the shutdown to
222 0 : // complete, then just error out and exit the main thread.
223 0 : info!("shutting down tracing");
224 0 : let (sender, receiver) = mpsc::channel();
225 0 : let _ = thread::spawn(move || {
226 0 : tracing_utils::shutdown_tracing();
227 0 : sender.send(()).ok()
228 0 : });
229 0 : let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
230 0 : if shutdown_res.is_err() {
231 0 : error!("timed out while shutting down tracing, exiting anyway");
232 0 : }
233 :
234 0 : info!("shutting down");
235 0 : exit(exit_code.unwrap_or(1))
236 : }
237 :
238 : /// When compute_ctl is killed, send also termination signal to sync-safekeepers
239 : /// to prevent leakage. TODO: it is better to convert compute_ctl to async and
240 : /// wait for termination which would be easy then.
241 0 : fn handle_exit_signal(sig: i32) {
242 0 : info!("received {sig} termination signal");
243 0 : forward_termination_signal();
244 0 : exit(1);
245 : }
246 :
247 : #[cfg(test)]
248 : mod test {
249 : use clap::CommandFactory;
250 :
251 : use super::Cli;
252 :
253 : #[test]
254 1 : fn verify_cli() {
255 1 : Cli::command().debug_assert()
256 1 : }
257 : }
|