Line data Source code
1 : //!
2 : //! Postgres wrapper (`compute_ctl`) is intended to be run as a Docker entrypoint or as a `systemd`
3 : //! `ExecStart` option. It will handle all the `Neon` specifics during compute node
4 : //! initialization:
5 : //! - `compute_ctl` accepts cluster (compute node) specification as a JSON file.
6 : //! - Every start is a fresh start, so the data directory is removed and
7 : //! initialized again on each run.
8 : //! - If remote_extension_config is provided, it will be used to fetch extensions list
9 : //! and download `shared_preload_libraries` from the remote storage.
10 : //! - Next it will put configuration files into the `PGDATA` directory.
11 : //! - Sync safekeepers and get commit LSN.
12 : //! - Get `basebackup` from pageserver using the returned on the previous step LSN.
13 : //! - Try to start `postgres` and wait until it is ready to accept connections.
14 : //! - Check and alter/drop/create roles and databases.
15 : //! - Hang waiting on the `postmaster` process to exit.
16 : //!
17 : //! Also `compute_ctl` spawns two separate service threads:
18 : //! - `compute-monitor` checks the last Postgres activity timestamp and saves it
19 : //! into the shared `ComputeNode`;
20 : //! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the
21 : //! last activity requests.
22 : //!
23 : //! If `AUTOSCALING` environment variable is set, `compute_ctl` will start the
24 : //! `vm-monitor` located in [`neon/libs/vm_monitor`]. For VM compute nodes,
25 : //! `vm-monitor` communicates with the VM autoscaling system. It coordinates
26 : //! downscaling and requests immediate upscaling under resource pressure.
27 : //!
28 : //! Usage example:
29 : //! ```sh
30 : //! compute_ctl -D /var/db/postgres/compute \
31 : //! -C 'postgresql://cloud_admin@localhost/postgres' \
32 : //! -c /var/db/postgres/configs/config.json \
33 : //! -b /usr/local/bin/postgres \
34 : //! -r http://pg-ext-s3-gateway \
35 : //! ```
36 : use std::ffi::OsString;
37 : use std::fs::File;
38 : use std::process::exit;
39 : use std::sync::mpsc;
40 : use std::thread;
41 : use std::time::Duration;
42 :
43 : use anyhow::{Context, Result};
44 : use clap::Parser;
45 : use compute_api::responses::ComputeConfig;
46 : use compute_tools::compute::{
47 : BUILD_TAG, ComputeNode, ComputeNodeParams, forward_termination_signal,
48 : };
49 : use compute_tools::extension_server::get_pg_version_string;
50 : use compute_tools::logger::*;
51 : use compute_tools::params::*;
52 : use compute_tools::spec::*;
53 : use rlimit::{Resource, setrlimit};
54 : use signal_hook::consts::{SIGINT, SIGQUIT, SIGTERM};
55 : use signal_hook::iterator::Signals;
56 : use tracing::{error, info};
57 : use url::Url;
58 : use utils::failpoint_support;
59 :
60 : // Compatibility hack: if the control plane specified any remote-ext-config
61 : // use the default value for extension storage proxy gateway.
62 : // Remove this once the control plane is updated to pass the gateway URL
63 0 : fn parse_remote_ext_config(arg: &str) -> Result<String> {
64 0 : if arg.starts_with("http") {
65 0 : Ok(arg.trim_end_matches('/').to_string())
66 : } else {
67 0 : Ok("http://pg-ext-s3-gateway".to_string())
68 : }
69 0 : }
70 :
71 : #[derive(Parser)]
72 : #[command(rename_all = "kebab-case")]
73 : struct Cli {
74 : #[arg(short = 'b', long, default_value = "postgres", env = "POSTGRES_PATH")]
75 0 : pub pgbin: String,
76 :
77 : #[arg(short = 'r', long, value_parser = parse_remote_ext_config)]
78 : pub remote_ext_config: Option<String>,
79 :
80 : /// The port to bind the external listening HTTP server to. Clients running
81 : /// outside the compute will talk to the compute through this port. Keep
82 : /// the previous name for this argument around for a smoother release
83 : /// with the control plane.
84 1 : #[arg(long, default_value_t = 3080)]
85 0 : pub external_http_port: u16,
86 :
87 : /// The port to bind the internal listening HTTP server to. Clients include
88 : /// the neon extension (for installing remote extensions) and local_proxy.
89 1 : #[arg(long, default_value_t = 3081)]
90 0 : pub internal_http_port: u16,
91 :
92 : #[arg(short = 'D', long, value_name = "DATADIR")]
93 0 : pub pgdata: String,
94 :
95 : #[arg(short = 'C', long, value_name = "DATABASE_URL")]
96 0 : pub connstr: String,
97 :
98 : #[cfg(target_os = "linux")]
99 : #[arg(long, default_value = "neon-postgres")]
100 0 : pub cgroup: String,
101 :
102 : #[cfg(target_os = "linux")]
103 : #[arg(
104 : long,
105 : default_value = "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable application_name=vm-monitor"
106 : )]
107 0 : pub filecache_connstr: String,
108 :
109 : #[cfg(target_os = "linux")]
110 : #[arg(long, default_value = "0.0.0.0:10301")]
111 0 : pub vm_monitor_addr: String,
112 :
113 : #[arg(long, action = clap::ArgAction::SetTrue)]
114 0 : pub resize_swap_on_bind: bool,
115 :
116 : #[arg(long)]
117 : pub set_disk_quota_for_fs: Option<String>,
118 :
119 : // TODO(tristan957): remove alias after compatibility tests are no longer
120 : // an issue
121 : #[arg(short = 'c', long, alias = "spec-path")]
122 : pub config: Option<OsString>,
123 :
124 : #[arg(short = 'i', long, group = "compute-id")]
125 0 : pub compute_id: String,
126 :
127 : #[arg(
128 : short = 'p',
129 : long,
130 : conflicts_with = "config",
131 : value_name = "CONTROL_PLANE_API_BASE_URL",
132 : requires = "compute-id"
133 : )]
134 : pub control_plane_uri: Option<String>,
135 : }
136 :
137 0 : fn main() -> Result<()> {
138 0 : let cli = Cli::parse();
139 0 :
140 0 : let scenario = failpoint_support::init();
141 :
142 : // For historical reasons, the main thread that processes the spec and launches postgres
143 : // is synchronous, but we always have this tokio runtime available and we "enter" it so
144 : // that you can use tokio::spawn() and tokio::runtime::Handle::current().block_on(...)
145 : // from all parts of compute_ctl.
146 0 : let runtime = tokio::runtime::Builder::new_multi_thread()
147 0 : .enable_all()
148 0 : .build()?;
149 0 : let _rt_guard = runtime.enter();
150 0 :
151 0 : runtime.block_on(init())?;
152 :
153 : // enable core dumping for all child processes
154 0 : setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?;
155 :
156 0 : let connstr = Url::parse(&cli.connstr).context("cannot parse connstr as a URL")?;
157 :
158 0 : let cli_spec = get_config(&cli)?;
159 :
160 0 : let compute_node = ComputeNode::new(
161 0 : ComputeNodeParams {
162 0 : compute_id: cli.compute_id,
163 0 : connstr,
164 0 : pgdata: cli.pgdata.clone(),
165 0 : pgbin: cli.pgbin.clone(),
166 0 : pgversion: get_pg_version_string(&cli.pgbin),
167 0 : external_http_port: cli.external_http_port,
168 0 : internal_http_port: cli.internal_http_port,
169 0 : ext_remote_storage: cli.remote_ext_config.clone(),
170 0 : resize_swap_on_bind: cli.resize_swap_on_bind,
171 0 : set_disk_quota_for_fs: cli.set_disk_quota_for_fs,
172 0 : #[cfg(target_os = "linux")]
173 0 : filecache_connstr: cli.filecache_connstr,
174 0 : #[cfg(target_os = "linux")]
175 0 : cgroup: cli.cgroup,
176 0 : #[cfg(target_os = "linux")]
177 0 : vm_monitor_addr: cli.vm_monitor_addr,
178 0 : },
179 0 : cli_spec.spec,
180 0 : cli_spec.compute_ctl_config,
181 0 : )?;
182 :
183 0 : let exit_code = compute_node.run()?;
184 :
185 0 : scenario.teardown();
186 0 :
187 0 : deinit_and_exit(exit_code);
188 0 : }
189 :
190 0 : async fn init() -> Result<()> {
191 0 : init_tracing_and_logging(DEFAULT_LOG_LEVEL).await?;
192 :
193 0 : let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?;
194 0 : thread::spawn(move || {
195 0 : for sig in signals.forever() {
196 0 : handle_exit_signal(sig);
197 0 : }
198 0 : });
199 0 :
200 0 : info!("compute build_tag: {}", &BUILD_TAG.to_string());
201 :
202 0 : Ok(())
203 0 : }
204 :
205 0 : fn get_config(cli: &Cli) -> Result<ComputeConfig> {
206 : // First, read the config from the path if provided
207 0 : if let Some(ref config) = cli.config {
208 0 : let file = File::open(config)?;
209 0 : return Ok(serde_json::from_reader(&file)?);
210 0 : }
211 0 :
212 0 : // If the config wasn't provided in the CLI arguments, then retrieve it from
213 0 : // the control plane
214 0 : match get_config_from_control_plane(cli.control_plane_uri.as_ref().unwrap(), &cli.compute_id) {
215 0 : Ok(config) => Ok(config),
216 0 : Err(e) => {
217 0 : error!(
218 0 : "cannot get response from control plane: {}\n\
219 0 : neither spec nor confirmation that compute is in the Empty state was received",
220 : e
221 : );
222 0 : Err(e)
223 : }
224 : }
225 0 : }
226 :
227 0 : fn deinit_and_exit(exit_code: Option<i32>) -> ! {
228 0 : // Shutdown trace pipeline gracefully, so that it has a chance to send any
229 0 : // pending traces before we exit. Shutting down OTEL tracing provider may
230 0 : // hang for quite some time, see, for example:
231 0 : // - https://github.com/open-telemetry/opentelemetry-rust/issues/868
232 0 : // - and our problems with staging https://github.com/neondatabase/cloud/issues/3707#issuecomment-1493983636
233 0 : //
234 0 : // Yet, we want computes to shut down fast enough, as we may need a new one
235 0 : // for the same timeline ASAP. So wait no longer than 2s for the shutdown to
236 0 : // complete, then just error out and exit the main thread.
237 0 : info!("shutting down tracing");
238 0 : let (sender, receiver) = mpsc::channel();
239 0 : let _ = thread::spawn(move || {
240 0 : tracing_utils::shutdown_tracing();
241 0 : sender.send(()).ok()
242 0 : });
243 0 : let shutdown_res = receiver.recv_timeout(Duration::from_millis(2000));
244 0 : if shutdown_res.is_err() {
245 0 : error!("timed out while shutting down tracing, exiting anyway");
246 0 : }
247 :
248 0 : info!("shutting down");
249 0 : exit(exit_code.unwrap_or(1))
250 : }
251 :
252 : /// When compute_ctl is killed, send also termination signal to sync-safekeepers
253 : /// to prevent leakage. TODO: it is better to convert compute_ctl to async and
254 : /// wait for termination which would be easy then.
255 0 : fn handle_exit_signal(sig: i32) {
256 0 : info!("received {sig} termination signal");
257 0 : forward_termination_signal();
258 0 : exit(1);
259 : }
260 :
261 : #[cfg(test)]
262 : mod test {
263 : use clap::CommandFactory;
264 :
265 : use super::Cli;
266 :
267 : #[test]
268 1 : fn verify_cli() {
269 1 : Cli::command().debug_assert()
270 1 : }
271 : }
|