Line data Source code
1 : //! The ComputeSpec contains all the information needed to start up
2 : //! the right version of PostgreSQL, and connect it to the storage nodes.
3 : //! It can be passed as part of the `config.json`, or the control plane can
4 : //! provide it by calling the compute_ctl's `/compute_ctl` endpoint, or
5 : //! compute_ctl can fetch it by calling the control plane's API.
6 : use std::collections::HashMap;
7 : use std::fmt::Display;
8 :
9 : use anyhow::anyhow;
10 : use indexmap::IndexMap;
11 : use regex::Regex;
12 : use remote_storage::RemotePath;
13 : use serde::{Deserialize, Serialize};
14 : use url::Url;
15 : use utils::id::{TenantId, TimelineId};
16 : use utils::lsn::Lsn;
17 :
18 : use crate::responses::TlsConfig;
19 :
20 : /// String type alias representing Postgres identifier and
21 : /// intended to be used for DB / role names.
22 : pub type PgIdent = String;
23 :
24 : /// String type alias representing Postgres extension version
25 : pub type ExtVersion = String;
26 :
27 7 : fn default_reconfigure_concurrency() -> usize {
28 7 : 1
29 7 : }
30 :
31 : /// Cluster spec or configuration represented as an optional number of
32 : /// delta operations + final cluster state description.
33 0 : #[derive(Clone, Debug, Default, Deserialize, Serialize)]
34 : pub struct ComputeSpec {
35 : pub format_version: f32,
36 :
37 : // The control plane also includes a 'timestamp' field in the JSON document,
38 : // but we don't use it for anything. Serde will ignore missing fields when
39 : // deserializing it.
40 : pub operation_uuid: Option<String>,
41 :
42 : /// Compute features to enable. These feature flags are provided, when we
43 : /// know all the details about client's compute, so they cannot be used
44 : /// to change `Empty` compute behavior.
45 : #[serde(default)]
46 : pub features: Vec<ComputeFeature>,
47 :
48 : /// If compute_ctl was passed `--resize-swap-on-bind`, a value of `Some(_)` instructs
49 : /// compute_ctl to `/neonvm/bin/resize-swap` with the given size, when the spec is first
50 : /// received.
51 : ///
52 : /// Both this field and `--resize-swap-on-bind` are required, so that the control plane's
53 : /// spec generation doesn't need to be aware of the actual compute it's running on, while
54 : /// guaranteeing gradual rollout of swap. Otherwise, without `--resize-swap-on-bind`, we could
55 : /// end up trying to resize swap in VMs without it -- or end up *not* resizing swap, thus
56 : /// giving every VM much more swap than it should have (32GiB).
57 : ///
58 : /// Eventually we may remove `--resize-swap-on-bind` and exclusively use `swap_size_bytes` for
59 : /// enabling the swap resizing behavior once rollout is complete.
60 : ///
61 : /// See neondatabase/cloud#12047 for more.
62 : #[serde(default)]
63 : pub swap_size_bytes: Option<u64>,
64 :
65 : /// If compute_ctl was passed `--set-disk-quota-for-fs`, a value of `Some(_)` instructs
66 : /// compute_ctl to run `/neonvm/bin/set-disk-quota` with the given size and fs, when the
67 : /// spec is first received.
68 : ///
69 : /// Both this field and `--set-disk-quota-for-fs` are required, so that the control plane's
70 : /// spec generation doesn't need to be aware of the actual compute it's running on, while
71 : /// guaranteeing gradual rollout of disk quota.
72 : #[serde(default)]
73 : pub disk_quota_bytes: Option<u64>,
74 :
75 : /// Disables the vm-monitor behavior that resizes LFC on upscale/downscale, instead relying on
76 : /// the initial size of LFC.
77 : ///
78 : /// This is intended for use when the LFC size is being overridden from the default but
79 : /// autoscaling is still enabled, and we don't want the vm-monitor to interfere with the custom
80 : /// LFC sizing.
81 : #[serde(default)]
82 : pub disable_lfc_resizing: Option<bool>,
83 :
84 : /// Expected cluster state at the end of transition process.
85 : pub cluster: Cluster,
86 : pub delta_operations: Option<Vec<DeltaOp>>,
87 :
88 : /// An optional hint that can be passed to speed up startup time if we know
89 : /// that no pg catalog mutations (like role creation, database creation,
90 : /// extension creation) need to be done on the actual database to start.
91 : #[serde(default)] // Default false
92 : pub skip_pg_catalog_updates: bool,
93 :
94 : // Information needed to connect to the storage layer.
95 : //
96 : // `tenant_id`, `timeline_id` and `pageserver_connstring` are always needed.
97 : //
98 : // Depending on `mode`, this can be a primary read-write node, a read-only
99 : // replica, or a read-only node pinned at an older LSN.
100 : // `safekeeper_connstrings` must be set for a primary.
101 : //
102 : // For backwards compatibility, the control plane may leave out all of
103 : // these, and instead set the "neon.tenant_id", "neon.timeline_id",
104 : // etc. GUCs in cluster.settings. TODO: Once the control plane has been
105 : // updated to fill these fields, we can make these non optional.
106 : pub tenant_id: Option<TenantId>,
107 : pub timeline_id: Option<TimelineId>,
108 : pub pageserver_connstring: Option<String>,
109 :
110 : // More neon ids that we expose to the compute_ctl
111 : // and to postgres as neon extension GUCs.
112 : pub project_id: Option<String>,
113 : pub branch_id: Option<String>,
114 : pub endpoint_id: Option<String>,
115 :
116 : /// Safekeeper membership config generation. It is put in
117 : /// neon.safekeepers GUC and serves two purposes:
118 : /// 1) Non zero value forces walproposer to use membership configurations.
119 : /// 2) If walproposer wants to update list of safekeepers to connect to
120 : /// taking them from some safekeeper mconf, it should check what value
121 : /// is newer by comparing the generation.
122 : ///
123 : /// Note: it could be SafekeeperGeneration, but this needs linking
124 : /// compute_ctl with postgres_ffi.
125 : #[serde(default)]
126 : pub safekeepers_generation: Option<u32>,
127 : #[serde(default)]
128 : pub safekeeper_connstrings: Vec<String>,
129 :
130 : #[serde(default)]
131 : pub mode: ComputeMode,
132 :
133 : /// If set, 'storage_auth_token' is used as the password to authenticate to
134 : /// the pageserver and safekeepers.
135 : pub storage_auth_token: Option<String>,
136 :
137 : // information about available remote extensions
138 : pub remote_extensions: Option<RemoteExtSpec>,
139 :
140 : pub pgbouncer_settings: Option<IndexMap<String, String>>,
141 :
142 : // Stripe size for pageserver sharding, in pages
143 : #[serde(default)]
144 : pub shard_stripe_size: Option<usize>,
145 :
146 : /// Local Proxy configuration used for JWT authentication
147 : #[serde(default)]
148 : pub local_proxy_config: Option<LocalProxySpec>,
149 :
150 : /// Number of concurrent connections during the parallel RunInEachDatabase
151 : /// phase of the apply config process.
152 : ///
153 : /// We need a higher concurrency during reconfiguration in case of many DBs,
154 : /// but instance is already running and used by client. We can easily get out of
155 : /// `max_connections` limit, and the current code won't handle that.
156 : ///
157 : /// Default is 1, but also allow control plane to override this value for specific
158 : /// projects. It's also recommended to bump `superuser_reserved_connections` +=
159 : /// `reconfigure_concurrency` for such projects to ensure that we always have
160 : /// enough spare connections for reconfiguration process to succeed.
161 : #[serde(default = "default_reconfigure_concurrency")]
162 : pub reconfigure_concurrency: usize,
163 :
164 : /// If set to true, the compute_ctl will drop all subscriptions before starting the
165 : /// compute. This is needed when we start an endpoint on a branch, so that child
166 : /// would not compete with parent branch subscriptions
167 : /// over the same replication content from publisher.
168 : #[serde(default)] // Default false
169 : pub drop_subscriptions_before_start: bool,
170 :
171 : /// Log level for compute audit logging
172 : #[serde(default)]
173 : pub audit_log_level: ComputeAudit,
174 :
175 : /// Hostname and the port of the otel collector. Leave empty to disable Postgres logs forwarding.
176 : /// Example: config-shy-breeze-123-collector-monitoring.neon-telemetry.svc.cluster.local:10514
177 : pub logs_export_host: Option<String>,
178 :
179 : /// Address of endpoint storage service
180 : pub endpoint_storage_addr: Option<String>,
181 : /// JWT for authorizing requests to endpoint storage service
182 : pub endpoint_storage_token: Option<String>,
183 :
184 : #[serde(default)]
185 : /// Download LFC state from endpoint storage and pass it to Postgres on compute startup
186 : pub autoprewarm: bool,
187 :
188 : #[serde(default)]
189 : /// Upload LFC state to endpoint storage periodically. Default value (None) means "don't upload"
190 : pub offload_lfc_interval_seconds: Option<std::num::NonZeroU64>,
191 :
192 : /// Suspend timeout in seconds.
193 : ///
194 : /// We use this value to derive other values, such as the installed extensions metric.
195 : pub suspend_timeout_seconds: i64,
196 :
197 : // Databricks specific options for compute instance.
198 : pub databricks_settings: Option<DatabricksSettings>,
199 : }
200 :
201 : /// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
202 0 : #[derive(Serialize, Clone, Copy, Debug, Deserialize, PartialEq, Eq)]
203 : #[serde(rename_all = "snake_case")]
204 : pub enum ComputeFeature {
205 : // XXX: Add more feature flags here.
206 : /// Enable the experimental activity monitor logic, which uses `pg_stat_database` to
207 : /// track short-lived connections as user activity.
208 : ActivityMonitorExperimental,
209 :
210 : /// Enable TLS functionality.
211 : TlsExperimental,
212 :
213 : /// This is a special feature flag that is used to represent unknown feature flags.
214 : /// Basically all unknown to enum flags are represented as this one. See unit test
215 : /// `parse_unknown_features()` for more details.
216 : #[serde(other)]
217 : UnknownFeature,
218 : }
219 :
220 0 : #[derive(Clone, Debug, Default, Deserialize, Serialize)]
221 : pub struct RemoteExtSpec {
222 : pub public_extensions: Option<Vec<String>>,
223 : pub custom_extensions: Option<Vec<String>>,
224 : pub library_index: HashMap<String, String>,
225 : pub extension_data: HashMap<String, ExtensionData>,
226 : }
227 :
228 0 : #[derive(Clone, Debug, Serialize, Deserialize)]
229 : pub struct ExtensionData {
230 : pub control_data: HashMap<String, String>,
231 : pub archive_path: String,
232 : }
233 :
234 : impl RemoteExtSpec {
235 7 : pub fn get_ext(
236 7 : &self,
237 7 : ext_name: &str,
238 7 : is_library: bool,
239 7 : build_tag: &str,
240 7 : pg_major_version: &str,
241 7 : ) -> anyhow::Result<(String, RemotePath)> {
242 7 : let mut real_ext_name = ext_name;
243 7 : if is_library {
244 : // sometimes library names might have a suffix like
245 : // library.so or library.so.3. We strip this off
246 : // because library_index is based on the name without the file extension
247 1 : let strip_lib_suffix = Regex::new(r"\.so.*").unwrap();
248 1 : let lib_raw_name = strip_lib_suffix.replace(real_ext_name, "").to_string();
249 :
250 1 : real_ext_name = self
251 1 : .library_index
252 1 : .get(&lib_raw_name)
253 1 : .ok_or(anyhow::anyhow!("library {} is not found", lib_raw_name))?;
254 6 : }
255 :
256 : // Check if extension is present in public or custom.
257 : // If not, then it is not allowed to be used by this compute.
258 7 : if !self
259 7 : .public_extensions
260 7 : .as_ref()
261 7 : .is_some_and(|exts| exts.iter().any(|e| e == real_ext_name))
262 4 : && !self
263 4 : .custom_extensions
264 4 : .as_ref()
265 4 : .is_some_and(|exts| exts.iter().any(|e| e == real_ext_name))
266 : {
267 3 : return Err(anyhow::anyhow!("extension {} is not found", real_ext_name));
268 4 : }
269 :
270 4 : match self.extension_data.get(real_ext_name) {
271 4 : Some(_ext_data) => Ok((
272 4 : real_ext_name.to_string(),
273 4 : Self::build_remote_path(build_tag, pg_major_version, real_ext_name)?,
274 : )),
275 0 : None => Err(anyhow::anyhow!(
276 0 : "real_ext_name {} is not found",
277 0 : real_ext_name
278 0 : )),
279 : }
280 7 : }
281 :
282 : /// Get the architecture-specific portion of the remote extension path. We
283 : /// use the Go naming convention due to Kubernetes.
284 5 : fn get_arch() -> &'static str {
285 5 : match std::env::consts::ARCH {
286 5 : "x86_64" => "amd64",
287 0 : "aarch64" => "arm64",
288 0 : arch => arch,
289 : }
290 5 : }
291 :
292 : /// Build a [`RemotePath`] for an extension.
293 5 : fn build_remote_path(
294 5 : build_tag: &str,
295 5 : pg_major_version: &str,
296 5 : ext_name: &str,
297 5 : ) -> anyhow::Result<RemotePath> {
298 5 : let arch = Self::get_arch();
299 :
300 : // Construct the path to the extension archive
301 : // BUILD_TAG/PG_MAJOR_VERSION/extensions/EXTENSION_NAME.tar.zst
302 : //
303 : // Keep it in sync with path generation in
304 : // https://github.com/neondatabase/build-custom-extensions/tree/main
305 5 : RemotePath::from_string(&format!(
306 5 : "{build_tag}/{arch}/{pg_major_version}/extensions/{ext_name}.tar.zst"
307 5 : ))
308 5 : }
309 : }
310 :
311 0 : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
312 : pub enum ComputeMode {
313 : /// A read-write node
314 : #[default]
315 : Primary,
316 : /// A read-only node, pinned at a particular LSN
317 : Static(Lsn),
318 : /// A read-only node that follows the tip of the branch in hot standby mode
319 : ///
320 : /// Future versions may want to distinguish between replicas with hot standby
321 : /// feedback and other kinds of replication configurations.
322 : Replica,
323 : }
324 :
325 : impl ComputeMode {
326 : /// Convert the compute mode to a string that can be used to identify the type of compute,
327 : /// which means that if it's a static compute, the LSN will not be included.
328 0 : pub fn to_type_str(&self) -> &'static str {
329 0 : match self {
330 0 : ComputeMode::Primary => "primary",
331 0 : ComputeMode::Static(_) => "static",
332 0 : ComputeMode::Replica => "replica",
333 : }
334 0 : }
335 : }
336 :
337 : /// Log level for audit logging
338 0 : #[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
339 : pub enum ComputeAudit {
340 : #[default]
341 : Disabled,
342 : // Deprecated, use Base instead
343 : Log,
344 : // (pgaudit.log = 'ddl', pgaudit.log_parameter='off')
345 : // logged to the standard postgresql log stream
346 : Base,
347 : // Deprecated, use Full or Extended instead
348 : Hipaa,
349 : // (pgaudit.log = 'all, -misc', pgaudit.log_parameter='off')
350 : // logged to separate files collected by rsyslog
351 : // into dedicated log storage with strict access
352 : Extended,
353 : // (pgaudit.log='all', pgaudit.log_parameter='on'),
354 : // logged to separate files collected by rsyslog
355 : // into dedicated log storage with strict access.
356 : Full,
357 : }
358 :
359 0 : #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
360 : pub struct Cluster {
361 : pub cluster_id: Option<String>,
362 : pub name: Option<String>,
363 : pub state: Option<String>,
364 : pub roles: Vec<Role>,
365 : pub databases: Vec<Database>,
366 :
367 : /// Desired contents of 'postgresql.conf' file. (The 'compute_ctl'
368 : /// tool may add additional settings to the final file.)
369 : pub postgresql_conf: Option<String>,
370 :
371 : /// Additional settings that will be appended to the 'postgresql.conf' file.
372 : pub settings: GenericOptions,
373 : }
374 :
375 : /// Single cluster state changing operation that could not be represented as
376 : /// a static `Cluster` structure. For example:
377 : /// - DROP DATABASE
378 : /// - DROP ROLE
379 : /// - ALTER ROLE name RENAME TO new_name
380 : /// - ALTER DATABASE name RENAME TO new_name
381 0 : #[derive(Clone, Debug, Deserialize, Serialize)]
382 : pub struct DeltaOp {
383 : pub action: String,
384 : pub name: PgIdent,
385 : pub new_name: Option<PgIdent>,
386 : }
387 :
388 : /// Rust representation of Postgres role info with only those fields
389 : /// that matter for us.
390 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
391 : pub struct Role {
392 : pub name: PgIdent,
393 : pub encrypted_password: Option<String>,
394 : pub options: GenericOptions,
395 : }
396 :
397 : /// Rust representation of Postgres database info with only those fields
398 : /// that matter for us.
399 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
400 : pub struct Database {
401 : pub name: PgIdent,
402 : pub owner: PgIdent,
403 : pub options: GenericOptions,
404 : // These are derived flags, not present in the spec file.
405 : // They are never set by the control plane.
406 : #[serde(skip_deserializing, default)]
407 : pub restrict_conn: bool,
408 : #[serde(skip_deserializing, default)]
409 : pub invalid: bool,
410 : }
411 :
412 : /// Common type representing both SQL statement params with or without value,
413 : /// like `LOGIN` or `OWNER username` in the `CREATE/ALTER ROLE`, and config
414 : /// options like `wal_level = logical`.
415 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
416 : pub struct GenericOption {
417 : pub name: String,
418 : pub value: Option<String>,
419 : pub vartype: String,
420 : }
421 :
422 : /// Postgres compute TLS settings.
423 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
424 : pub struct PgComputeTlsSettings {
425 : // Absolute path to the certificate file for server-side TLS.
426 : pub cert_file: String,
427 : // Absolute path to the private key file for server-side TLS.
428 : pub key_file: String,
429 : // Absolute path to the certificate authority file for verifying client certificates.
430 : pub ca_file: String,
431 : }
432 :
433 : /// Databricks specific options for compute instance.
434 : /// This is used to store any other settings that needs to be propagate to Compute
435 : /// but should not be persisted to ComputeSpec in the database.
436 0 : #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
437 : pub struct DatabricksSettings {
438 : pub pg_compute_tls_settings: PgComputeTlsSettings,
439 : // Absolute file path to databricks_pg_hba.conf file.
440 : pub databricks_pg_hba: String,
441 : // Absolute file path to databricks_pg_ident.conf file.
442 : pub databricks_pg_ident: String,
443 : // Hostname portion of the Databricks workspace URL of the endpoint, or empty string if not known.
444 : // A valid hostname is required for the compute instance to support PAT logins.
445 : pub databricks_workspace_host: String,
446 : }
447 :
448 : /// Optional collection of `GenericOption`'s. Type alias allows us to
449 : /// declare a `trait` on it.
450 : pub type GenericOptions = Option<Vec<GenericOption>>;
451 :
452 : /// Configured the local_proxy application with the relevant JWKS and roles it should
453 : /// use for authorizing connect requests using JWT.
454 0 : #[derive(Clone, Debug, Deserialize, Serialize)]
455 : pub struct LocalProxySpec {
456 : #[serde(default)]
457 : #[serde(skip_serializing_if = "Option::is_none")]
458 : pub jwks: Option<Vec<JwksSettings>>,
459 : #[serde(default)]
460 : #[serde(skip_serializing_if = "Option::is_none")]
461 : pub tls: Option<TlsConfig>,
462 : }
463 :
464 0 : #[derive(Clone, Debug, Deserialize, Serialize)]
465 : pub struct JwksSettings {
466 : pub id: String,
467 : pub role_names: Vec<String>,
468 : pub jwks_url: String,
469 : pub provider_name: String,
470 : pub jwt_audience: Option<String>,
471 : }
472 :
473 : /// Protocol used to connect to a Pageserver. Parsed from the connstring scheme.
474 : #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
475 : pub enum PageserverProtocol {
476 : /// The original protocol based on libpq and COPY. Uses postgresql:// or postgres:// scheme.
477 : #[default]
478 : Libpq,
479 : /// A newer, gRPC-based protocol. Uses grpc:// scheme.
480 : Grpc,
481 : }
482 :
483 : impl PageserverProtocol {
484 : /// Parses the protocol from a connstring scheme. Defaults to Libpq if no scheme is given.
485 : /// Errors if the connstring is an invalid URL.
486 0 : pub fn from_connstring(connstring: &str) -> anyhow::Result<Self> {
487 0 : let scheme = match Url::parse(connstring) {
488 0 : Ok(url) => url.scheme().to_lowercase(),
489 0 : Err(url::ParseError::RelativeUrlWithoutBase) => return Ok(Self::default()),
490 0 : Err(err) => return Err(anyhow!("invalid connstring URL: {err}")),
491 : };
492 0 : match scheme.as_str() {
493 0 : "postgresql" | "postgres" => Ok(Self::Libpq),
494 0 : "grpc" => Ok(Self::Grpc),
495 0 : scheme => Err(anyhow!("invalid protocol scheme: {scheme}")),
496 : }
497 0 : }
498 :
499 : /// Returns the URL scheme for the protocol, for use in connstrings.
500 0 : pub fn scheme(&self) -> &'static str {
501 0 : match self {
502 0 : Self::Libpq => "postgresql",
503 0 : Self::Grpc => "grpc",
504 : }
505 0 : }
506 : }
507 :
508 : impl Display for PageserverProtocol {
509 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
510 0 : f.write_str(self.scheme())
511 0 : }
512 : }
513 :
514 : #[cfg(test)]
515 : mod tests {
516 : use std::fs::File;
517 :
518 : use super::*;
519 :
520 : #[test]
521 1 : fn allow_installing_remote_extensions() {
522 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
523 1 : "public_extensions": null,
524 1 : "custom_extensions": null,
525 1 : "library_index": {},
526 1 : "extension_data": {},
527 : }))
528 1 : .unwrap();
529 :
530 1 : rspec
531 1 : .get_ext("ext", false, "latest", "v17")
532 1 : .expect_err("Extension should not be found");
533 :
534 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
535 1 : "public_extensions": [],
536 1 : "custom_extensions": null,
537 1 : "library_index": {},
538 1 : "extension_data": {},
539 : }))
540 1 : .unwrap();
541 :
542 1 : rspec
543 1 : .get_ext("ext", false, "latest", "v17")
544 1 : .expect_err("Extension should not be found");
545 :
546 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
547 1 : "public_extensions": [],
548 1 : "custom_extensions": [],
549 1 : "library_index": {
550 1 : "ext": "ext"
551 : },
552 1 : "extension_data": {
553 1 : "ext": {
554 1 : "control_data": {
555 1 : "ext.control": ""
556 : },
557 1 : "archive_path": ""
558 : }
559 : },
560 : }))
561 1 : .unwrap();
562 :
563 1 : rspec
564 1 : .get_ext("ext", false, "latest", "v17")
565 1 : .expect_err("Extension should not be found");
566 :
567 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
568 1 : "public_extensions": [],
569 1 : "custom_extensions": ["ext"],
570 1 : "library_index": {
571 1 : "ext": "ext"
572 : },
573 1 : "extension_data": {
574 1 : "ext": {
575 1 : "control_data": {
576 1 : "ext.control": ""
577 : },
578 1 : "archive_path": ""
579 : }
580 : },
581 : }))
582 1 : .unwrap();
583 :
584 1 : rspec
585 1 : .get_ext("ext", false, "latest", "v17")
586 1 : .expect("Extension should be found");
587 :
588 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
589 1 : "public_extensions": ["ext"],
590 1 : "custom_extensions": [],
591 1 : "library_index": {
592 1 : "extlib": "ext",
593 : },
594 1 : "extension_data": {
595 1 : "ext": {
596 1 : "control_data": {
597 1 : "ext.control": ""
598 : },
599 1 : "archive_path": ""
600 : }
601 : },
602 : }))
603 1 : .unwrap();
604 :
605 1 : rspec
606 1 : .get_ext("ext", false, "latest", "v17")
607 1 : .expect("Extension should be found");
608 :
609 : // test library index for the case when library name
610 : // doesn't match the extension name
611 1 : rspec
612 1 : .get_ext("extlib", true, "latest", "v17")
613 1 : .expect("Library should be found");
614 1 : }
615 :
616 : #[test]
617 1 : fn remote_extension_path() {
618 1 : let rspec: RemoteExtSpec = serde_json::from_value(serde_json::json!({
619 1 : "public_extensions": ["ext"],
620 1 : "custom_extensions": [],
621 1 : "library_index": {
622 1 : "extlib": "ext",
623 : },
624 1 : "extension_data": {
625 1 : "ext": {
626 1 : "control_data": {
627 1 : "ext.control": ""
628 : },
629 1 : "archive_path": ""
630 : }
631 : },
632 : }))
633 1 : .unwrap();
634 :
635 1 : let (_ext_name, ext_path) = rspec
636 1 : .get_ext("ext", false, "latest", "v17")
637 1 : .expect("Extension should be found");
638 : // Starting with a forward slash would have consequences for the
639 : // Url::join() that occurs when downloading a remote extension.
640 1 : assert!(!ext_path.to_string().starts_with("/"));
641 1 : assert_eq!(
642 : ext_path,
643 1 : RemoteExtSpec::build_remote_path("latest", "v17", "ext").unwrap()
644 : );
645 1 : }
646 :
647 : #[test]
648 1 : fn parse_spec_file() {
649 1 : let file = File::open("tests/cluster_spec.json").unwrap();
650 1 : let spec: ComputeSpec = serde_json::from_reader(file).unwrap();
651 :
652 : // Features list defaults to empty vector.
653 1 : assert!(spec.features.is_empty());
654 :
655 : // Reconfigure concurrency defaults to 1.
656 1 : assert_eq!(spec.reconfigure_concurrency, 1);
657 1 : }
658 :
659 : #[test]
660 1 : fn parse_unknown_fields() {
661 : // Forward compatibility test
662 1 : let file = File::open("tests/cluster_spec.json").unwrap();
663 1 : let mut json: serde_json::Value = serde_json::from_reader(file).unwrap();
664 1 : let ob = json.as_object_mut().unwrap();
665 1 : ob.insert("unknown_field_123123123".into(), "hello".into());
666 1 : let _spec: ComputeSpec = serde_json::from_value(json).unwrap();
667 1 : }
668 :
669 : #[test]
670 1 : fn parse_unknown_features() {
671 : // Test that unknown feature flags do not cause any errors.
672 1 : let file = File::open("tests/cluster_spec.json").unwrap();
673 1 : let mut json: serde_json::Value = serde_json::from_reader(file).unwrap();
674 1 : let ob = json.as_object_mut().unwrap();
675 :
676 : // Add unknown feature flags.
677 1 : let features = vec!["foo_bar_feature", "baz_feature"];
678 1 : ob.insert("features".into(), features.into());
679 :
680 1 : let spec: ComputeSpec = serde_json::from_value(json).unwrap();
681 :
682 1 : assert!(spec.features.len() == 2);
683 1 : assert!(spec.features.contains(&ComputeFeature::UnknownFeature));
684 1 : assert_eq!(spec.features, vec![ComputeFeature::UnknownFeature; 2]);
685 1 : }
686 :
687 : #[test]
688 1 : fn parse_known_features() {
689 : // Test that we can properly parse known feature flags.
690 1 : let file = File::open("tests/cluster_spec.json").unwrap();
691 1 : let mut json: serde_json::Value = serde_json::from_reader(file).unwrap();
692 1 : let ob = json.as_object_mut().unwrap();
693 :
694 : // Add known feature flags.
695 1 : let features = vec!["activity_monitor_experimental"];
696 1 : ob.insert("features".into(), features.into());
697 :
698 1 : let spec: ComputeSpec = serde_json::from_value(json).unwrap();
699 :
700 1 : assert_eq!(
701 : spec.features,
702 1 : vec![ComputeFeature::ActivityMonitorExperimental]
703 : );
704 1 : }
705 : }
|