Line data Source code
1 : //! The ComputeSpec contains all the information needed to start up
2 : //! the right version of PostgreSQL, and connect it to the storage nodes.
3 : //! It can be passed as part of the `config.json`, or the control plane can
4 : //! provide it by calling the compute_ctl's `/compute_ctl` endpoint, or
5 : //! compute_ctl can fetch it by calling the control plane's API.
6 : use std::collections::HashMap;
7 : use std::fmt::Display;
8 :
9 : use anyhow::anyhow;
10 : use indexmap::IndexMap;
11 : use regex::Regex;
12 : use remote_storage::RemotePath;
13 : use serde::{Deserialize, Serialize};
14 : use url::Url;
15 : use utils::id::{TenantId, TimelineId};
16 : use utils::lsn::Lsn;
17 :
18 : use crate::responses::TlsConfig;
19 :
20 : /// String type alias representing Postgres identifier and
21 : /// intended to be used for DB / role names.
22 : pub type PgIdent = String;
23 :
24 : /// String type alias representing Postgres extension version
25 : pub type ExtVersion = String;
26 :
27 7 : fn default_reconfigure_concurrency() -> usize {
28 7 : 1
29 7 : }
30 :
31 : /// Cluster spec or configuration represented as an optional number of
32 : /// delta operations + final cluster state description.
33 0 : #[derive(Clone, Debug, Default, Deserialize, Serialize)]
34 : pub struct ComputeSpec {
35 : pub format_version: f32,
36 :
37 : // The control plane also includes a 'timestamp' field in the JSON document,
38 : // but we don't use it for anything. Serde will ignore missing fields when
39 : // deserializing it.
40 : pub operation_uuid: Option<String>,
41 :
42 : /// Compute features to enable. These feature flags are provided, when we
43 : /// know all the details about client's compute, so they cannot be used
44 : /// to change `Empty` compute behavior.
45 : #[serde(default)]
46 : pub features: Vec<ComputeFeature>,
47 :
48 : /// If compute_ctl was passed `--resize-swap-on-bind`, a value of `Some(_)` instructs
49 : /// compute_ctl to `/neonvm/bin/resize-swap` with the given size, when the spec is first
50 : /// received.
51 : ///
52 : /// Both this field and `--resize-swap-on-bind` are required, so that the control plane's
53 : /// spec generation doesn't need to be aware of the actual compute it's running on, while
54 : /// guaranteeing gradual rollout of swap. Otherwise, without `--resize-swap-on-bind`, we could
55 : /// end up trying to resize swap in VMs without it -- or end up *not* resizing swap, thus
56 : /// giving every VM much more swap than it should have (32GiB).
57 : ///
58 : /// Eventually we may remove `--resize-swap-on-bind` and exclusively use `swap_size_bytes` for
59 : /// enabling the swap resizing behavior once rollout is complete.
60 : ///
61 : /// See neondatabase/cloud#12047 for more.
62 : #[serde(default)]
63 : pub swap_size_bytes: Option<u64>,
64 :
65 : /// If compute_ctl was passed `--set-disk-quota-for-fs`, a value of `Some(_)` instructs
66 : /// compute_ctl to run `/neonvm/bin/set-disk-quota` with the given size and fs, when the
67 : /// spec is first received.
68 : ///
69 : /// Both this field and `--set-disk-quota-for-fs` are required, so that the control plane's
70 : /// spec generation doesn't need to be aware of the actual compute it's running on, while
71 : /// guaranteeing gradual rollout of disk quota.
72 : #[serde(default)]
73 : pub disk_quota_bytes: Option<u64>,
74 :
75 : /// Disables the vm-monitor behavior that resizes LFC on upscale/downscale, instead relying on
76 : /// the initial size of LFC.
77 : ///
78 : /// This is intended for use when the LFC size is being overridden from the default but
79 : /// autoscaling is still enabled, and we don't want the vm-monitor to interfere with the custom
80 : /// LFC sizing.
81 : #[serde(default)]
82 : pub disable_lfc_resizing: Option<bool>,
83 :
84 : /// Expected cluster state at the end of transition process.
85 : pub cluster: Cluster,
86 : pub delta_operations: Option<Vec<DeltaOp>>,
87 :
88 : /// An optional hint that can be passed to speed up startup time if we know
89 : /// that no pg catalog mutations (like role creation, database creation,
90 : /// extension creation) need to be done on the actual database to start.
91 : #[serde(default)] // Default false
92 : pub skip_pg_catalog_updates: bool,
93 :
94 : // Information needed to connect to the storage layer.
95 : //
96 : // `tenant_id`, `timeline_id` and `pageserver_connstring` are always needed.
97 : //
98 : // Depending on `mode`, this can be a primary read-write node, a read-only
99 : // replica, or a read-only node pinned at an older LSN.
100 : // `safekeeper_connstrings` must be set for a primary.
101 : //
102 : // For backwards compatibility, the control plane may leave out all of
103 : // these, and instead set the "neon.tenant_id", "neon.timeline_id",
104 : // etc. GUCs in cluster.settings. TODO: Once the control plane has been
105 : // updated to fill these fields, we can make these non optional.
106 : pub tenant_id: Option<TenantId>,
107 : pub timeline_id: Option<TimelineId>,
108 : pub pageserver_connstring: Option<String>,
109 :
110 : // More neon ids that we expose to the compute_ctl
111 : // and to postgres as neon extension GUCs.
112 : pub project_id: Option<String>,
113 : pub branch_id: Option<String>,
114 : pub endpoint_id: Option<String>,
115 :
116 : /// Safekeeper membership config generation. It is put in
117 : /// neon.safekeepers GUC and serves two purposes:
118 : /// 1) Non zero value forces walproposer to use membership configurations.
119 : /// 2) If walproposer wants to update list of safekeepers to connect to
120 : /// taking them from some safekeeper mconf, it should check what value
121 : /// is newer by comparing the generation.
122 : ///
123 : /// Note: it could be SafekeeperGeneration, but this needs linking
124 : /// compute_ctl with postgres_ffi.
125 : #[serde(default)]
126 : pub safekeepers_generation: Option<u32>,
127 : #[serde(default)]
128 : pub safekeeper_connstrings: Vec<String>,
129 :
130 : #[serde(default)]
131 : pub mode: ComputeMode,
132 :
133 : /// If set, 'storage_auth_token' is used as the password to authenticate to
134 : /// the pageserver and safekeepers.
135 : pub storage_auth_token: Option<String>,
136 :
137 : // information about available remote extensions
138 : pub remote_extensions: Option<RemoteExtSpec>,
139 :
140 : pub pgbouncer_settings: Option<IndexMap<String, String>>,
141 :
142 : // Stripe size for pageserver sharding, in pages
143 : #[serde(default)]
144 : pub shard_stripe_size: Option<usize>,
145 :
146 : /// Local Proxy configuration used for JWT authentication
147 : #[serde(default)]
148 : pub local_proxy_config: Option<LocalProxySpec>,
149 :
150 : /// Number of concurrent connections during the parallel RunInEachDatabase
151 : /// phase of the apply config process.
152 : ///
153 : /// We need a higher concurrency during reconfiguration in case of many DBs,
154 : /// but instance is already running and used by client. We can easily get out of
155 : /// `max_connections` limit, and the current code won't handle that.
156 : ///
157 : /// Default is 1, but also allow control plane to override this value for specific
158 : /// projects. It's also recommended to bump `superuser_reserved_connections` +=
159 : /// `reconfigure_concurrency` for such projects to ensure that we always have
160 : /// enough spare connections for reconfiguration process to succeed.
161 : #[serde(default = "default_reconfigure_concurrency")]
162 : pub reconfigure_concurrency: usize,
163 :
164 : /// If set to true, the compute_ctl will drop all subscriptions before starting the
165 : /// compute. This is needed when we start an endpoint on a branch, so that child
166 : /// would not compete with parent branch subscriptions
167 : /// over the same replication content from publisher.
168 : #[serde(default)] // Default false
169 : pub drop_subscriptions_before_start: bool,
170 :
171 : /// Log level for compute audit logging
172 : #[serde(default)]
173 : pub audit_log_level: ComputeAudit,
174 :
175 : /// Hostname and the port of the otel collector. Leave empty to disable Postgres logs forwarding.
176 : /// Example: config-shy-breeze-123-collector-monitoring.neon-telemetry.svc.cluster.local:10514
177 : pub logs_export_host: Option<String>,
178 :
179 : /// Address of endpoint storage service
180 : pub endpoint_storage_addr: Option<String>,
181 : /// JWT for authorizing requests to endpoint storage service
182 : pub endpoint_storage_token: Option<String>,
183 :
184 : #[serde(default)]
185 : /// Download LFC state from endpoint storage and pass it to Postgres on compute startup
186 : pub autoprewarm: bool,
187 :
188 : #[serde(default)]
189 : /// Upload LFC state to endpoint storage periodically. Default value (None) means "don't upload"
190 : pub offload_lfc_interval_seconds: Option<std::num::NonZeroU64>,
191 :
192 : /// Suspend timeout in seconds.
193 : ///
194 : /// We use this value to derive other values, such as the installed extensions metric.
195 : pub suspend_timeout_seconds: i64,
196 : }
197 :
198 : /// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
199 0 : #[derive(Serialize, Clone, Copy, Debug, Deserialize, PartialEq, Eq)]
200 : #[serde(rename_all = "snake_case")]
201 : pub enum ComputeFeature {
202 : // XXX: Add more feature flags here.
203 : /// Enable the experimental activity monitor logic, which uses `pg_stat_database` to
204 : /// track short-lived connections as user activity.
205 : ActivityMonitorExperimental,
206 :
207 : /// Enable TLS functionality.
208 : TlsExperimental,
209 :
210 : /// This is a special feature flag that is used to represent unknown feature flags.
211 : /// Basically all unknown to enum flags are represented as this one. See unit test
212 : /// `parse_unknown_features()` for more details.
213 : #[serde(other)]
214 : UnknownFeature,
215 : }
216 :
217 0 : #[derive(Clone, Debug, Default, Deserialize, Serialize)]
218 : pub struct RemoteExtSpec {
219 : pub public_extensions: Option<Vec<String>>,
220 : pub custom_extensions: Option<Vec<String>>,
221 : pub library_index: HashMap<String, String>,
222 : pub extension_data: HashMap<String, ExtensionData>,
223 : }
224 :
225 0 : #[derive(Clone, Debug, Serialize, Deserialize)]
226 : pub struct ExtensionData {
227 : pub control_data: HashMap<String, String>,
228 : pub archive_path: String,
229 : }
230 :
231 : impl RemoteExtSpec {
232 7 : pub fn get_ext(
233 7 : &self,
234 7 : ext_name: &str,
235 7 : is_library: bool,
236 7 : build_tag: &str,
237 7 : pg_major_version: &str,
238 7 : ) -> anyhow::Result<(String, RemotePath)> {
239 7 : let mut real_ext_name = ext_name;
240 7 : if is_library {
241 : // sometimes library names might have a suffix like
242 : // library.so or library.so.3. We strip this off
243 : // because library_index is based on the name without the file extension
244 1 : let strip_lib_suffix = Regex::new(r"\.so.*").unwrap();
245 1 : let lib_raw_name = strip_lib_suffix.replace(real_ext_name, "").to_string();
246 :
247 1 : real_ext_name = self
248 1 : .library_index
249 1 : .get(&lib_raw_name)
250 1 : .ok_or(anyhow::anyhow!("library {} is not found", lib_raw_name))?;
251 6 : }
252 :
253 : // Check if extension is present in public or custom.
254 : // If not, then it is not allowed to be used by this compute.
255 7 : if !self
256 7 : .public_extensions
257 7 : .as_ref()
258 7 : .is_some_and(|exts| exts.iter().any(|e| e == real_ext_name))
259 4 : && !self
260 4 : .custom_extensions
261 4 : .as_ref()
262 4 : .is_some_and(|exts| exts.iter().any(|e| e == real_ext_name))
263 : {
264 3 : return Err(anyhow::anyhow!("extension {} is not found", real_ext_name));
265 4 : }
266 :
267 4 : match self.extension_data.get(real_ext_name) {
268 4 : Some(_ext_data) => Ok((
269 4 : real_ext_name.to_string(),
270 4 : Self::build_remote_path(build_tag, pg_major_version, real_ext_name)?,
271 : )),
272 0 : None => Err(anyhow::anyhow!(
273 0 : "real_ext_name {} is not found",
274 0 : real_ext_name
275 0 : )),
276 : }
277 7 : }
278 :
279 : /// Get the architecture-specific portion of the remote extension path. We
280 : /// use the Go naming convention due to Kubernetes.
281 5 : fn get_arch() -> &'static str {
282 5 : match std::env::consts::ARCH {
283 5 : "x86_64" => "amd64",
284 0 : "aarch64" => "arm64",
285 0 : arch => arch,
286 : }
287 5 : }
288 :
289 : /// Build a [`RemotePath`] for an extension.
290 5 : fn build_remote_path(
291 5 : build_tag: &str,
292 5 : pg_major_version: &str,
293 5 : ext_name: &str,
294 5 : ) -> anyhow::Result<RemotePath> {
295 5 : let arch = Self::get_arch();
296 :
297 : // Construct the path to the extension archive
298 : // BUILD_TAG/PG_MAJOR_VERSION/extensions/EXTENSION_NAME.tar.zst
299 : //
300 : // Keep it in sync with path generation in
301 : // https://github.com/neondatabase/build-custom-extensions/tree/main
302 5 : RemotePath::from_string(&format!(
303 5 : "{build_tag}/{arch}/{pg_major_version}/extensions/{ext_name}.tar.zst"
304 5 : ))
305 5 : }
306 : }
307 :
308 0 : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
309 : pub enum ComputeMode {
310 : /// A read-write node
311 : #[default]
312 : Primary,
313 : /// A read-only node, pinned at a particular LSN
314 : Static(Lsn),
315 : /// A read-only node that follows the tip of the branch in hot standby mode
316 : ///
317 : /// Future versions may want to distinguish between replicas with hot standby
318 : /// feedback and other kinds of replication configurations.
319 : Replica,
320 : }
321 :
322 : impl ComputeMode {
323 : /// Convert the compute mode to a string that can be used to identify the type of compute,
324 : /// which means that if it's a static compute, the LSN will not be included.
325 0 : pub fn to_type_str(&self) -> &'static str {
326 0 : match self {
327 0 : ComputeMode::Primary => "primary",
328 0 : ComputeMode::Static(_) => "static",
329 0 : ComputeMode::Replica => "replica",
330 : }
331 0 : }
332 : }
333 :
334 : /// Log level for audit logging
335 0 : #[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
336 : pub enum ComputeAudit {
337 : #[default]
338 : Disabled,
339 : // Deprecated, use Base instead
340 : Log,
341 : // (pgaudit.log = 'ddl', pgaudit.log_parameter='off')
342 : // logged to the standard postgresql log stream
343 : Base,
344 : // Deprecated, use Full or Extended instead
345 : Hipaa,
346 : // (pgaudit.log = 'all, -misc', pgaudit.log_parameter='off')
347 : // logged to separate files collected by rsyslog
348 : // into dedicated log storage with strict access
349 : Extended,
350 : // (pgaudit.log='all', pgaudit.log_parameter='on'),
351 : // logged to separate files collected by rsyslog
352 : // into dedicated log storage with strict access.
353 : Full,
354 : }
355 :
356 0 : #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
357 : pub struct Cluster {
358 : pub cluster_id: Option<String>,
359 : pub name: Option<String>,
360 : pub state: Option<String>,
361 : pub roles: Vec<Role>,
362 : pub databases: Vec<Database>,
363 :
364 : /// Desired contents of 'postgresql.conf' file. (The 'compute_ctl'
365 : /// tool may add additional settings to the final file.)
366 : pub postgresql_conf: Option<String>,
367 :
368 : /// Additional settings that will be appended to the 'postgresql.conf' file.
369 : pub settings: GenericOptions,
370 : }
371 :
372 : /// Single cluster state changing operation that could not be represented as
373 : /// a static `Cluster` structure. For example:
374 : /// - DROP DATABASE
375 : /// - DROP ROLE
376 : /// - ALTER ROLE name RENAME TO new_name
377 : /// - ALTER DATABASE name RENAME TO new_name
378 0 : #[derive(Clone, Debug, Deserialize, Serialize)]
379 : pub struct DeltaOp {
380 : pub action: String,
381 : pub name: PgIdent,
382 : pub new_name: Option<PgIdent>,
383 : }
384 :
385 : /// Rust representation of Postgres role info with only those fields
386 : /// that matter for us.
387 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
388 : pub struct Role {
389 : pub name: PgIdent,
390 : pub encrypted_password: Option<String>,
391 : pub options: GenericOptions,
392 : }
393 :
394 : /// Rust representation of Postgres database info with only those fields
395 : /// that matter for us.
396 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
397 : pub struct Database {
398 : pub name: PgIdent,
399 : pub owner: PgIdent,
400 : pub options: GenericOptions,
401 : // These are derived flags, not present in the spec file.
402 : // They are never set by the control plane.
403 : #[serde(skip_deserializing, default)]
404 : pub restrict_conn: bool,
405 : #[serde(skip_deserializing, default)]
406 : pub invalid: bool,
407 : }
408 :
409 : /// Common type representing both SQL statement params with or without value,
410 : /// like `LOGIN` or `OWNER username` in the `CREATE/ALTER ROLE`, and config
411 : /// options like `wal_level = logical`.
412 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
413 : pub struct GenericOption {
414 : pub name: String,
415 : pub value: Option<String>,
416 : pub vartype: String,
417 : }
418 :
419 : /// Postgres compute TLS settings.
420 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
421 : pub struct PgComputeTlsSettings {
422 : // Absolute path to the certificate file for server-side TLS.
423 : pub cert_file: String,
424 : // Absolute path to the private key file for server-side TLS.
425 : pub key_file: String,
426 : // Absolute path to the certificate authority file for verifying client certificates.
427 : pub ca_file: String,
428 : }
429 :
430 : /// Databricks specific options for compute instance.
431 : /// This is used to store any other settings that needs to be propagate to Compute
432 : /// but should not be persisted to ComputeSpec in the database.
433 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
434 : pub struct DatabricksSettings {
435 : pub pg_compute_tls_settings: PgComputeTlsSettings,
436 : // Absolute file path to databricks_pg_hba.conf file.
437 : pub databricks_pg_hba: String,
438 : // Absolute file path to databricks_pg_ident.conf file.
439 : pub databricks_pg_ident: String,
440 : // Hostname portion of the Databricks workspace URL of the endpoint, or empty string if not known.
441 : // A valid hostname is required for the compute instance to support PAT logins.
442 : pub databricks_workspace_host: String,
443 : }
444 :
445 : /// Optional collection of `GenericOption`'s. Type alias allows us to
446 : /// declare a `trait` on it.
447 : pub type GenericOptions = Option<Vec<GenericOption>>;
448 :
449 : /// Configured the local_proxy application with the relevant JWKS and roles it should
450 : /// use for authorizing connect requests using JWT.
451 0 : #[derive(Clone, Debug, Deserialize, Serialize)]
452 : pub struct LocalProxySpec {
453 : #[serde(default)]
454 : #[serde(skip_serializing_if = "Option::is_none")]
455 : pub jwks: Option<Vec<JwksSettings>>,
456 : #[serde(default)]
457 : #[serde(skip_serializing_if = "Option::is_none")]
458 : pub tls: Option<TlsConfig>,
459 : }
460 :
461 0 : #[derive(Clone, Debug, Deserialize, Serialize)]
462 : pub struct JwksSettings {
463 : pub id: String,
464 : pub role_names: Vec<String>,
465 : pub jwks_url: String,
466 : pub provider_name: String,
467 : pub jwt_audience: Option<String>,
468 : }
469 :
470 : /// Protocol used to connect to a Pageserver. Parsed from the connstring scheme.
471 : #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
472 : pub enum PageserverProtocol {
473 : /// The original protocol based on libpq and COPY. Uses postgresql:// or postgres:// scheme.
474 : #[default]
475 : Libpq,
476 : /// A newer, gRPC-based protocol. Uses grpc:// scheme.
477 : Grpc,
478 : }
479 :
480 : impl PageserverProtocol {
481 : /// Parses the protocol from a connstring scheme. Defaults to Libpq if no scheme is given.
482 : /// Errors if the connstring is an invalid URL.
483 0 : pub fn from_connstring(connstring: &str) -> anyhow::Result<Self> {
484 0 : let scheme = match Url::parse(connstring) {
485 0 : Ok(url) => url.scheme().to_lowercase(),
486 0 : Err(url::ParseError::RelativeUrlWithoutBase) => return Ok(Self::default()),
487 0 : Err(err) => return Err(anyhow!("invalid connstring URL: {err}")),
488 : };
489 0 : match scheme.as_str() {
490 0 : "postgresql" | "postgres" => Ok(Self::Libpq),
491 0 : "grpc" => Ok(Self::Grpc),
492 0 : scheme => Err(anyhow!("invalid protocol scheme: {scheme}")),
493 : }
494 0 : }
495 :
496 : /// Returns the URL scheme for the protocol, for use in connstrings.
497 0 : pub fn scheme(&self) -> &'static str {
498 0 : match self {
499 0 : Self::Libpq => "postgresql",
500 0 : Self::Grpc => "grpc",
501 : }
502 0 : }
503 : }
504 :
505 : impl Display for PageserverProtocol {
506 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
507 0 : f.write_str(self.scheme())
508 0 : }
509 : }
510 :
511 : #[cfg(test)]
512 : mod tests {
513 : use std::fs::File;
514 :
515 : use super::*;
516 :
517 : #[test]
518 1 : fn allow_installing_remote_extensions() {
519 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
520 1 : "public_extensions": null,
521 1 : "custom_extensions": null,
522 1 : "library_index": {},
523 1 : "extension_data": {},
524 : }))
525 1 : .unwrap();
526 :
527 1 : rspec
528 1 : .get_ext("ext", false, "latest", "v17")
529 1 : .expect_err("Extension should not be found");
530 :
531 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
532 1 : "public_extensions": [],
533 1 : "custom_extensions": null,
534 1 : "library_index": {},
535 1 : "extension_data": {},
536 : }))
537 1 : .unwrap();
538 :
539 1 : rspec
540 1 : .get_ext("ext", false, "latest", "v17")
541 1 : .expect_err("Extension should not be found");
542 :
543 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
544 1 : "public_extensions": [],
545 1 : "custom_extensions": [],
546 1 : "library_index": {
547 1 : "ext": "ext"
548 : },
549 1 : "extension_data": {
550 1 : "ext": {
551 1 : "control_data": {
552 1 : "ext.control": ""
553 : },
554 1 : "archive_path": ""
555 : }
556 : },
557 : }))
558 1 : .unwrap();
559 :
560 1 : rspec
561 1 : .get_ext("ext", false, "latest", "v17")
562 1 : .expect_err("Extension should not be found");
563 :
564 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
565 1 : "public_extensions": [],
566 1 : "custom_extensions": ["ext"],
567 1 : "library_index": {
568 1 : "ext": "ext"
569 : },
570 1 : "extension_data": {
571 1 : "ext": {
572 1 : "control_data": {
573 1 : "ext.control": ""
574 : },
575 1 : "archive_path": ""
576 : }
577 : },
578 : }))
579 1 : .unwrap();
580 :
581 1 : rspec
582 1 : .get_ext("ext", false, "latest", "v17")
583 1 : .expect("Extension should be found");
584 :
585 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
586 1 : "public_extensions": ["ext"],
587 1 : "custom_extensions": [],
588 1 : "library_index": {
589 1 : "extlib": "ext",
590 : },
591 1 : "extension_data": {
592 1 : "ext": {
593 1 : "control_data": {
594 1 : "ext.control": ""
595 : },
596 1 : "archive_path": ""
597 : }
598 : },
599 : }))
600 1 : .unwrap();
601 :
602 1 : rspec
603 1 : .get_ext("ext", false, "latest", "v17")
604 1 : .expect("Extension should be found");
605 :
606 : // test library index for the case when library name
607 : // doesn't match the extension name
608 1 : rspec
609 1 : .get_ext("extlib", true, "latest", "v17")
610 1 : .expect("Library should be found");
611 1 : }
612 :
613 : #[test]
614 1 : fn remote_extension_path() {
615 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
616 1 : "public_extensions": ["ext"],
617 1 : "custom_extensions": [],
618 1 : "library_index": {
619 1 : "extlib": "ext",
620 : },
621 1 : "extension_data": {
622 1 : "ext": {
623 1 : "control_data": {
624 1 : "ext.control": ""
625 : },
626 1 : "archive_path": ""
627 : }
628 : },
629 : }))
630 1 : .unwrap();
631 :
632 1 : let (_ext_name, ext_path) = rspec
633 1 : .get_ext("ext", false, "latest", "v17")
634 1 : .expect("Extension should be found");
635 : // Starting with a forward slash would have consequences for the
636 : // Url::join() that occurs when downloading a remote extension.
637 1 : assert!(!ext_path.to_string().starts_with("/"));
638 1 : assert_eq!(
639 : ext_path,
640 1 : RemoteExtSpec::build_remote_path("latest", "v17", "ext").unwrap()
641 : );
642 1 : }
643 :
644 : #[test]
645 1 : fn parse_spec_file() {
646 1 : let file = File::open("tests/cluster_spec.json").unwrap();
647 1 : let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
648 :
649 : // Features list defaults to empty vector.
650 1 : assert!(spec.features.is_empty());
651 :
652 : // Reconfigure concurrency defaults to 1.
653 1 : assert_eq!(spec.reconfigure_concurrency, 1);
654 1 : }
655 :
656 : #[test]
657 1 : fn parse_unknown_fields() {
658 : // Forward compatibility test
659 1 : let file = File::open("tests/cluster_spec.json").unwrap();
660 1 : let mut json: serde_json::Value = serde_json::from_reader(file).unwrap();
661 1 : let ob = json.as_object_mut().unwrap();
662 1 : ob.insert("unknown_field_123123123".into(), "hello".into());
663 1 : let _spec: ComputeSpec = serde_json::from_value(json).unwrap();
664 1 : }
665 :
666 : #[test]
667 1 : fn parse_unknown_features() {
668 : // Test that unknown feature flags do not cause any errors.
669 1 : let file = File::open("tests/cluster_spec.json").unwrap();
670 1 : let mut json: serde_json::Value = serde_json::from_reader(file).unwrap();
671 1 : let ob = json.as_object_mut().unwrap();
672 :
673 : // Add unknown feature flags.
674 1 : let features = vec!["foo_bar_feature", "baz_feature"];
675 1 : ob.insert("features".into(), features.into());
676 :
677 1 : let spec: ComputeSpec = serde_json::from_value(json).unwrap();
678 :
679 1 : assert!(spec.features.len() == 2);
680 1 : assert!(spec.features.contains(&ComputeFeature::UnknownFeature));
681 1 : assert_eq!(spec.features, vec![ComputeFeature::UnknownFeature; 2]);
682 1 : }
683 :
684 : #[test]
685 1 : fn parse_known_features() {
686 : // Test that we can properly parse known feature flags.
687 1 : let file = File::open("tests/cluster_spec.json").unwrap();
688 1 : let mut json: serde_json::Value = serde_json::from_reader(file).unwrap();
689 1 : let ob = json.as_object_mut().unwrap();
690 :
691 : // Add known feature flags.
692 1 : let features = vec!["activity_monitor_experimental"];
693 1 : ob.insert("features".into(), features.into());
694 :
695 1 : let spec: ComputeSpec = serde_json::from_value(json).unwrap();
696 :
697 1 : assert_eq!(
698 : spec.features,
699 1 : vec![ComputeFeature::ActivityMonitorExperimental]
700 : );
701 1 : }
702 : }
|