Line data Source code
1 : use std::collections::{HashMap, HashSet};
2 : use std::fmt::Display;
3 : use std::str::FromStr;
4 : use std::time::{Duration, Instant};
5 :
6 : /// Request/response types for the storage controller
7 : /// API (`/control/v1` prefix). Implemented by the server
8 : /// in [`storage_controller::http`]
9 : use serde::{Deserialize, Serialize};
10 : use utils::id::{NodeId, TenantId, TimelineId};
11 : use utils::lsn::Lsn;
12 :
13 : use crate::models::{PageserverUtilization, ShardParameters, TenantConfig};
14 : use crate::shard::{ShardStripeSize, TenantShardId};
15 :
16 0 : #[derive(Serialize, Deserialize, Debug)]
17 : #[serde(deny_unknown_fields)]
18 : pub struct TenantCreateRequest {
19 : pub new_tenant_id: TenantShardId,
20 : #[serde(default)]
21 : #[serde(skip_serializing_if = "Option::is_none")]
22 : pub generation: Option<u32>,
23 :
24 : // If omitted, create a single shard with TenantShardId::unsharded()
25 : #[serde(default)]
26 : #[serde(skip_serializing_if = "ShardParameters::is_unsharded")]
27 : pub shard_parameters: ShardParameters,
28 :
29 : #[serde(default)]
30 : #[serde(skip_serializing_if = "Option::is_none")]
31 : pub placement_policy: Option<PlacementPolicy>,
32 :
33 : #[serde(flatten)]
34 : pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it
35 : }
36 :
37 0 : #[derive(Serialize, Deserialize)]
38 : pub struct TenantCreateResponseShard {
39 : pub shard_id: TenantShardId,
40 : pub node_id: NodeId,
41 : pub generation: u32,
42 : }
43 :
44 0 : #[derive(Serialize, Deserialize)]
45 : pub struct TenantCreateResponse {
46 : pub shards: Vec<TenantCreateResponseShard>,
47 : }
48 :
49 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
50 : pub struct NodeRegisterRequest {
51 : pub node_id: NodeId,
52 :
53 : pub listen_pg_addr: String,
54 : pub listen_pg_port: u16,
55 : pub listen_grpc_addr: Option<String>,
56 : pub listen_grpc_port: Option<u16>,
57 :
58 : pub listen_http_addr: String,
59 : pub listen_http_port: u16,
60 : pub listen_https_port: Option<u16>,
61 :
62 : pub availability_zone_id: AvailabilityZone,
63 : }
64 :
65 0 : #[derive(Serialize, Deserialize)]
66 : pub struct NodeConfigureRequest {
67 : pub node_id: NodeId,
68 :
69 : pub availability: Option<NodeAvailabilityWrapper>,
70 : pub scheduling: Option<NodeSchedulingPolicy>,
71 : }
72 :
73 0 : #[derive(Serialize, Deserialize)]
74 : pub struct TenantPolicyRequest {
75 : pub placement: Option<PlacementPolicy>,
76 : pub scheduling: Option<ShardSchedulingPolicy>,
77 : }
78 :
79 0 : #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
80 : pub struct AvailabilityZone(pub String);
81 :
82 : impl Display for AvailabilityZone {
83 300 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
84 300 : write!(f, "{}", self.0)
85 300 : }
86 : }
87 :
88 0 : #[derive(Serialize, Deserialize)]
89 : pub struct ShardsPreferredAzsRequest {
90 : #[serde(flatten)]
91 : pub preferred_az_ids: HashMap<TenantShardId, Option<AvailabilityZone>>,
92 : }
93 :
94 0 : #[derive(Serialize, Deserialize)]
95 : pub struct ShardsPreferredAzsResponse {
96 : pub updated: Vec<TenantShardId>,
97 : }
98 :
99 0 : #[derive(Serialize, Deserialize, Debug)]
100 : pub struct TenantLocateResponseShard {
101 : pub shard_id: TenantShardId,
102 : pub node_id: NodeId,
103 :
104 : pub listen_pg_addr: String,
105 : pub listen_pg_port: u16,
106 : pub listen_grpc_addr: Option<String>,
107 : pub listen_grpc_port: Option<u16>,
108 :
109 : pub listen_http_addr: String,
110 : pub listen_http_port: u16,
111 : pub listen_https_port: Option<u16>,
112 : }
113 :
114 0 : #[derive(Serialize, Deserialize)]
115 : pub struct TenantLocateResponse {
116 : pub shards: Vec<TenantLocateResponseShard>,
117 : pub shard_params: ShardParameters,
118 : }
119 :
120 0 : #[derive(Serialize, Deserialize, Debug)]
121 : pub struct TenantDescribeResponse {
122 : pub tenant_id: TenantId,
123 : pub shards: Vec<TenantDescribeResponseShard>,
124 : pub stripe_size: ShardStripeSize,
125 : pub policy: PlacementPolicy,
126 : pub config: TenantConfig,
127 : }
128 :
129 0 : #[derive(Serialize, Deserialize, Debug)]
130 : pub struct NodeShardResponse {
131 : pub node_id: NodeId,
132 : pub shards: Vec<NodeShard>,
133 : }
134 :
135 0 : #[derive(Serialize, Deserialize, Debug)]
136 : pub struct NodeShard {
137 : pub tenant_shard_id: TenantShardId,
138 : /// Whether the shard is observed secondary on a specific node. True = yes, False = no, None = not on this node.
139 : pub is_observed_secondary: Option<bool>,
140 : /// Whether the shard is intended to be a secondary on a specific node. True = yes, False = no, None = not on this node.
141 : pub is_intended_secondary: Option<bool>,
142 : }
143 :
144 0 : #[derive(Serialize, Deserialize)]
145 : pub struct NodeDescribeResponse {
146 : pub id: NodeId,
147 :
148 : pub availability: NodeAvailabilityWrapper,
149 : pub scheduling: NodeSchedulingPolicy,
150 :
151 : pub availability_zone_id: String,
152 :
153 : pub listen_http_addr: String,
154 : pub listen_http_port: u16,
155 : pub listen_https_port: Option<u16>,
156 :
157 : pub listen_pg_addr: String,
158 : pub listen_pg_port: u16,
159 : pub listen_grpc_addr: Option<String>,
160 : pub listen_grpc_port: Option<u16>,
161 : }
162 :
163 0 : #[derive(Serialize, Deserialize, Debug)]
164 : pub struct TenantDescribeResponseShard {
165 : pub tenant_shard_id: TenantShardId,
166 :
167 : pub node_attached: Option<NodeId>,
168 : pub node_secondary: Vec<NodeId>,
169 :
170 : pub last_error: String,
171 :
172 : /// A task is currently running to reconcile this tenant's intent state with the state on pageservers
173 : pub is_reconciling: bool,
174 : /// This shard failed in sending a compute notification to the cloud control plane, and a retry is pending.
175 : pub is_pending_compute_notification: bool,
176 : /// A shard split is currently underway
177 : pub is_splitting: bool,
178 : /// A timeline is being imported into this tenant
179 : pub is_importing: bool,
180 :
181 : pub scheduling_policy: ShardSchedulingPolicy,
182 :
183 : pub preferred_az_id: Option<String>,
184 : }
185 :
186 : /// Migration request for a given tenant shard to a given node.
187 : ///
188 : /// Explicitly migrating a particular shard is a low level operation
189 : /// TODO: higher level "Reschedule tenant" operation where the request
190 : /// specifies some constraints, e.g. asking it to get off particular node(s)
191 0 : #[derive(Serialize, Deserialize, Debug)]
192 : pub struct TenantShardMigrateRequest {
193 : pub node_id: NodeId,
194 :
195 : /// Optionally, callers may specify the node they are migrating _from_, and the server will
196 : /// reject the request if the shard is no longer attached there: this enables writing safer
197 : /// clients that don't risk fighting with some other movement of the shard.
198 : #[serde(default)]
199 : pub origin_node_id: Option<NodeId>,
200 :
201 : #[serde(default)]
202 : pub migration_config: MigrationConfig,
203 : }
204 :
205 : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
206 : pub struct MigrationConfig {
207 : /// If true, the migration will be executed even if it is to a location with a sub-optimal scheduling
208 : /// score: this is usually not what you want, and if you use this then you'll also need to set the
209 : /// tenant's scheduling policy to Essential or Pause to avoid the optimiser reverting your migration.
210 : ///
211 : /// Default: false
212 : #[serde(default)]
213 : pub override_scheduler: bool,
214 :
215 : /// If true, the migration will be done gracefully by creating a secondary location first and
216 : /// waiting for it to warm up before cutting over. If false, if there is no existing secondary
217 : /// location at the destination, the tenant will be migrated immediately. If the tenant's data
218 : /// can't be downloaded within [`Self::secondary_warmup_timeout`], then the migration will go
219 : /// ahead but run with a cold cache that can severely reduce performance until it warms up.
220 : ///
221 : /// When doing a graceful migration, the migration API returns as soon as it is started.
222 : ///
223 : /// Default: true
224 : #[serde(default = "default_prewarm")]
225 : pub prewarm: bool,
226 :
227 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
228 : /// overall for secondary warmup before cutting over
229 : #[serde(default)]
230 : #[serde(with = "humantime_serde")]
231 : pub secondary_warmup_timeout: Option<Duration>,
232 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
233 : /// within each secondary download poll call to pageserver.
234 : #[serde(default)]
235 : #[serde(with = "humantime_serde")]
236 : pub secondary_download_request_timeout: Option<Duration>,
237 : }
238 :
239 3 : fn default_prewarm() -> bool {
240 3 : true
241 3 : }
242 :
243 : impl Default for MigrationConfig {
244 2 : fn default() -> Self {
245 2 : Self {
246 2 : override_scheduler: false,
247 2 : prewarm: default_prewarm(),
248 2 : secondary_warmup_timeout: None,
249 2 : secondary_download_request_timeout: None,
250 2 : }
251 2 : }
252 : }
253 :
254 : #[derive(Serialize, Clone, Debug)]
255 : #[serde(into = "NodeAvailabilityWrapper")]
256 : pub enum NodeAvailability {
257 : // Normal, happy state
258 : Active(PageserverUtilization),
259 : // Node is warming up, but we expect it to become available soon. Covers
260 : // the time span between the re-attach response being composed on the storage controller
261 : // and the first successful heartbeat after the processing of the re-attach response
262 : // finishes on the pageserver.
263 : WarmingUp(Instant),
264 : // Offline: Tenants shouldn't try to attach here, but they may assume that their
265 : // secondary locations on this node still exist. Newly added nodes are in this
266 : // state until we successfully contact them.
267 : Offline,
268 : }
269 :
270 : impl PartialEq for NodeAvailability {
271 0 : fn eq(&self, other: &Self) -> bool {
272 : use NodeAvailability::*;
273 0 : matches!(
274 0 : (self, other),
275 : (Active(_), Active(_)) | (Offline, Offline) | (WarmingUp(_), WarmingUp(_))
276 : )
277 0 : }
278 : }
279 :
280 : impl Eq for NodeAvailability {}
281 :
282 : // This wrapper provides serde functionality and it should only be used to
283 : // communicate with external callers which don't know or care about the
284 : // utilisation score of the pageserver it is targeting.
285 0 : #[derive(Serialize, Deserialize, Clone, Copy, Debug)]
286 : pub enum NodeAvailabilityWrapper {
287 : Active,
288 : WarmingUp,
289 : Offline,
290 : }
291 :
292 : impl From<NodeAvailabilityWrapper> for NodeAvailability {
293 0 : fn from(val: NodeAvailabilityWrapper) -> Self {
294 0 : match val {
295 : // Assume the worst utilisation score to begin with. It will later be updated by
296 : // the heartbeats.
297 : NodeAvailabilityWrapper::Active => {
298 0 : NodeAvailability::Active(PageserverUtilization::full())
299 : }
300 0 : NodeAvailabilityWrapper::WarmingUp => NodeAvailability::WarmingUp(Instant::now()),
301 0 : NodeAvailabilityWrapper::Offline => NodeAvailability::Offline,
302 : }
303 0 : }
304 : }
305 :
306 : impl From<NodeAvailability> for NodeAvailabilityWrapper {
307 0 : fn from(val: NodeAvailability) -> Self {
308 0 : match val {
309 0 : NodeAvailability::Active(_) => NodeAvailabilityWrapper::Active,
310 0 : NodeAvailability::WarmingUp(_) => NodeAvailabilityWrapper::WarmingUp,
311 0 : NodeAvailability::Offline => NodeAvailabilityWrapper::Offline,
312 : }
313 0 : }
314 : }
315 :
316 : /// Scheduling policy enables us to selectively disable some automatic actions that the
317 : /// controller performs on a tenant shard. This is only set to a non-default value by
318 : /// human intervention, and it is reset to the default value (Active) when the tenant's
319 : /// placement policy is modified away from Attached.
320 : ///
321 : /// The typical use of a non-Active scheduling policy is one of:
322 : /// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
323 : /// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
324 : ///
325 : /// If you're not sure which policy to use to pin a shard to its current location, you probably
326 : /// want Pause.
327 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
328 : pub enum ShardSchedulingPolicy {
329 : // Normal mode: the tenant's scheduled locations may be updated at will, including
330 : // for non-essential optimization.
331 : Active,
332 :
333 : // Disable optimizations, but permit scheduling when necessary to fulfil the PlacementPolicy.
334 : // For example, this still permits a node's attachment location to change to a secondary in
335 : // response to a node failure, or to assign a new secondary if a node was removed.
336 : Essential,
337 :
338 : // No scheduling: leave the shard running wherever it currently is. Even if the shard is
339 : // unavailable, it will not be rescheduled to another node.
340 : Pause,
341 :
342 : // No reconciling: we will make no location_conf API calls to pageservers at all. If the
343 : // shard is unavailable, it stays that way. If a node fails, this shard doesn't get failed over.
344 : Stop,
345 : }
346 :
347 : impl Default for ShardSchedulingPolicy {
348 12843 : fn default() -> Self {
349 12843 : Self::Active
350 12843 : }
351 : }
352 :
353 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
354 : pub enum NodeLifecycle {
355 : Active,
356 : Deleted,
357 : }
358 :
359 : impl FromStr for NodeLifecycle {
360 : type Err = anyhow::Error;
361 :
362 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
363 0 : match s {
364 0 : "active" => Ok(Self::Active),
365 0 : "deleted" => Ok(Self::Deleted),
366 0 : _ => Err(anyhow::anyhow!("Unknown node lifecycle '{s}'")),
367 : }
368 0 : }
369 : }
370 :
371 : impl From<NodeLifecycle> for String {
372 0 : fn from(value: NodeLifecycle) -> String {
373 : use NodeLifecycle::*;
374 0 : match value {
375 0 : Active => "active",
376 0 : Deleted => "deleted",
377 : }
378 0 : .to_string()
379 0 : }
380 : }
381 :
382 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
383 : pub enum NodeSchedulingPolicy {
384 : Active,
385 : Filling,
386 : Pause,
387 : PauseForRestart,
388 : Draining,
389 : Deleting,
390 : }
391 :
392 : impl FromStr for NodeSchedulingPolicy {
393 : type Err = anyhow::Error;
394 :
395 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
396 0 : match s {
397 0 : "active" => Ok(Self::Active),
398 0 : "filling" => Ok(Self::Filling),
399 0 : "pause" => Ok(Self::Pause),
400 0 : "pause_for_restart" => Ok(Self::PauseForRestart),
401 0 : "draining" => Ok(Self::Draining),
402 0 : "deleting" => Ok(Self::Deleting),
403 0 : _ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")),
404 : }
405 0 : }
406 : }
407 :
408 : impl From<NodeSchedulingPolicy> for String {
409 0 : fn from(value: NodeSchedulingPolicy) -> String {
410 : use NodeSchedulingPolicy::*;
411 0 : match value {
412 0 : Active => "active",
413 0 : Filling => "filling",
414 0 : Pause => "pause",
415 0 : PauseForRestart => "pause_for_restart",
416 0 : Draining => "draining",
417 0 : Deleting => "deleting",
418 : }
419 0 : .to_string()
420 0 : }
421 : }
422 :
423 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
424 : pub enum SkSchedulingPolicy {
425 : Active,
426 : Activating,
427 : Pause,
428 : Decomissioned,
429 : }
430 :
431 : impl FromStr for SkSchedulingPolicy {
432 : type Err = anyhow::Error;
433 :
434 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
435 0 : Ok(match s {
436 0 : "active" => Self::Active,
437 0 : "activating" => Self::Activating,
438 0 : "pause" => Self::Pause,
439 0 : "decomissioned" => Self::Decomissioned,
440 : _ => {
441 0 : return Err(anyhow::anyhow!(
442 0 : "Unknown scheduling policy '{s}', try active,pause,decomissioned"
443 0 : ));
444 : }
445 : })
446 0 : }
447 : }
448 :
449 : impl From<SkSchedulingPolicy> for String {
450 0 : fn from(value: SkSchedulingPolicy) -> String {
451 : use SkSchedulingPolicy::*;
452 0 : match value {
453 0 : Active => "active",
454 0 : Activating => "activating",
455 0 : Pause => "pause",
456 0 : Decomissioned => "decomissioned",
457 : }
458 0 : .to_string()
459 0 : }
460 : }
461 :
462 : /// Controls how tenant shards are mapped to locations on pageservers, e.g. whether
463 : /// to create secondary locations.
464 0 : #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
465 : pub enum PlacementPolicy {
466 : /// Normal live state: one attached pageserver and zero or more secondaries.
467 : Attached(usize),
468 : /// Create one secondary mode locations. This is useful when onboarding
469 : /// a tenant, or for an idle tenant that we might want to bring online quickly.
470 : Secondary,
471 :
472 : /// Do not attach to any pageservers. This is appropriate for tenants that
473 : /// have been idle for a long time, where we do not mind some delay in making
474 : /// them available in future.
475 : Detached,
476 : }
477 :
478 : impl PlacementPolicy {
479 55 : pub fn want_secondaries(&self) -> usize {
480 55 : match self {
481 52 : PlacementPolicy::Attached(secondary_count) => *secondary_count,
482 3 : PlacementPolicy::Secondary => 1,
483 0 : PlacementPolicy::Detached => 0,
484 : }
485 55 : }
486 : }
487 :
488 0 : #[derive(Serialize, Deserialize, Debug)]
489 : pub struct TenantShardMigrateResponse {}
490 :
491 : /// Metadata health record posted from scrubber.
492 0 : #[derive(Serialize, Deserialize, Debug)]
493 : pub struct MetadataHealthRecord {
494 : pub tenant_shard_id: TenantShardId,
495 : pub healthy: bool,
496 : pub last_scrubbed_at: chrono::DateTime<chrono::Utc>,
497 : }
498 :
499 0 : #[derive(Serialize, Deserialize, Debug)]
500 : pub struct MetadataHealthUpdateRequest {
501 : pub healthy_tenant_shards: HashSet<TenantShardId>,
502 : pub unhealthy_tenant_shards: HashSet<TenantShardId>,
503 : }
504 :
505 0 : #[derive(Serialize, Deserialize, Debug)]
506 : pub struct MetadataHealthUpdateResponse {}
507 :
508 0 : #[derive(Serialize, Deserialize, Debug)]
509 : pub struct MetadataHealthListUnhealthyResponse {
510 : pub unhealthy_tenant_shards: Vec<TenantShardId>,
511 : }
512 :
513 : #[derive(Serialize, Deserialize, Debug)]
514 : pub struct MetadataHealthListOutdatedRequest {
515 : #[serde(with = "humantime_serde")]
516 : pub not_scrubbed_for: Duration,
517 : }
518 :
519 0 : #[derive(Serialize, Deserialize, Debug)]
520 : pub struct MetadataHealthListOutdatedResponse {
521 : pub health_records: Vec<MetadataHealthRecord>,
522 : }
523 :
524 : /// Publicly exposed safekeeper description
525 0 : #[derive(Serialize, Deserialize, Clone)]
526 : pub struct SafekeeperDescribeResponse {
527 : pub id: NodeId,
528 : pub region_id: String,
529 : /// 1 is special, it means just created (not currently posted to storcon).
530 : /// Zero or negative is not really expected.
531 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
532 : pub version: i64,
533 : pub host: String,
534 : pub port: i32,
535 : pub http_port: i32,
536 : pub https_port: Option<i32>,
537 : pub availability_zone_id: String,
538 : pub scheduling_policy: SkSchedulingPolicy,
539 : }
540 :
541 0 : #[derive(Serialize, Deserialize, Clone)]
542 : pub struct SafekeeperSchedulingPolicyRequest {
543 : pub scheduling_policy: SkSchedulingPolicy,
544 : }
545 :
546 : /// Import request for safekeeper timelines.
547 0 : #[derive(Serialize, Deserialize, Clone)]
548 : pub struct TimelineImportRequest {
549 : pub tenant_id: TenantId,
550 : pub timeline_id: TimelineId,
551 : pub start_lsn: Lsn,
552 : pub sk_set: Vec<NodeId>,
553 : }
554 :
555 0 : #[derive(serde::Serialize, serde::Deserialize, Clone)]
556 : pub struct TimelineSafekeeperMigrateRequest {
557 : pub new_sk_set: Vec<NodeId>,
558 : }
559 :
560 : #[cfg(test)]
561 : mod test {
562 : use serde_json;
563 :
564 : use super::*;
565 :
566 : /// Check stability of PlacementPolicy's serialization
567 : #[test]
568 1 : fn placement_policy_encoding() -> anyhow::Result<()> {
569 1 : let v = PlacementPolicy::Attached(1);
570 1 : let encoded = serde_json::to_string(&v)?;
571 1 : assert_eq!(encoded, "{\"Attached\":1}");
572 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
573 :
574 1 : let v = PlacementPolicy::Detached;
575 1 : let encoded = serde_json::to_string(&v)?;
576 1 : assert_eq!(encoded, "\"Detached\"");
577 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
578 1 : Ok(())
579 1 : }
580 :
581 : #[test]
582 1 : fn test_reject_unknown_field() {
583 1 : let id = TenantId::generate();
584 1 : let create_request = serde_json::json!({
585 1 : "new_tenant_id": id.to_string(),
586 1 : "unknown_field": "unknown_value".to_string(),
587 : });
588 1 : let err = serde_json::from_value::<TenantCreateRequest>(create_request).unwrap_err();
589 1 : assert!(
590 1 : err.to_string().contains("unknown field `unknown_field`"),
591 0 : "expect unknown field `unknown_field` error, got: {err}"
592 : );
593 1 : }
594 :
595 : /// Check that a minimal migrate request with no config results in the expected default settings
596 : #[test]
597 1 : fn test_migrate_request_decode_defaults() {
598 1 : let json = r#"{
599 1 : "node_id": 123
600 1 : }"#;
601 :
602 1 : let request: TenantShardMigrateRequest = serde_json::from_str(json).unwrap();
603 1 : assert_eq!(request.node_id, NodeId(123));
604 1 : assert_eq!(request.origin_node_id, None);
605 1 : assert!(!request.migration_config.override_scheduler);
606 1 : assert!(request.migration_config.prewarm);
607 1 : assert_eq!(request.migration_config.secondary_warmup_timeout, None);
608 1 : assert_eq!(
609 : request.migration_config.secondary_download_request_timeout,
610 : None
611 : );
612 1 : }
613 :
614 : /// Check that a partially specified migration config results in the expected default settings
615 : #[test]
616 1 : fn test_migration_config_decode_defaults() {
617 : // Specify just one field of the config
618 1 : let json = r#"{
619 1 : }"#;
620 :
621 1 : let config: MigrationConfig = serde_json::from_str(json).unwrap();
622 :
623 : // Check each field's expected default value
624 1 : assert!(!config.override_scheduler);
625 1 : assert!(config.prewarm);
626 1 : assert_eq!(config.secondary_warmup_timeout, None);
627 1 : assert_eq!(config.secondary_download_request_timeout, None);
628 1 : assert_eq!(config.secondary_warmup_timeout, None);
629 :
630 : // Consistency check that the Default impl agrees with our serde defaults
631 1 : assert_eq!(MigrationConfig::default(), config);
632 1 : }
633 : }
|