Line data Source code
1 : use std::collections::{HashMap, HashSet};
2 : use std::fmt::Display;
3 : use std::str::FromStr;
4 : use std::time::{Duration, Instant};
5 :
6 : /// Request/response types for the storage controller
7 : /// API (`/control/v1` prefix). Implemented by the server
8 : /// in [`storage_controller::http`]
9 : use serde::{Deserialize, Serialize};
10 : use utils::id::{NodeId, TenantId, TimelineId};
11 : use utils::lsn::Lsn;
12 :
13 : use crate::models::{PageserverUtilization, ShardParameters, TenantConfig, TimelineInfo};
14 : use crate::shard::{ShardStripeSize, TenantShardId};
15 :
16 0 : #[derive(Serialize, Deserialize, Debug)]
17 : #[serde(deny_unknown_fields)]
18 : pub struct TenantCreateRequest {
19 : pub new_tenant_id: TenantShardId,
20 : #[serde(default)]
21 : #[serde(skip_serializing_if = "Option::is_none")]
22 : pub generation: Option<u32>,
23 :
24 : // If omitted, create a single shard with TenantShardId::unsharded()
25 : #[serde(default)]
26 : #[serde(skip_serializing_if = "ShardParameters::is_unsharded")]
27 : pub shard_parameters: ShardParameters,
28 :
29 : #[serde(default)]
30 : #[serde(skip_serializing_if = "Option::is_none")]
31 : pub placement_policy: Option<PlacementPolicy>,
32 :
33 : #[serde(flatten)]
34 : pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it
35 : }
36 :
37 0 : #[derive(Serialize, Deserialize)]
38 : pub struct TenantCreateResponseShard {
39 : pub shard_id: TenantShardId,
40 : pub node_id: NodeId,
41 : pub generation: u32,
42 : }
43 :
44 0 : #[derive(Serialize, Deserialize)]
45 : pub struct TenantCreateResponse {
46 : pub shards: Vec<TenantCreateResponseShard>,
47 : }
48 :
49 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
50 : pub struct NodeRegisterRequest {
51 : pub node_id: NodeId,
52 :
53 : pub listen_pg_addr: String,
54 : pub listen_pg_port: u16,
55 : pub listen_grpc_addr: Option<String>,
56 : pub listen_grpc_port: Option<u16>,
57 :
58 : pub listen_http_addr: String,
59 : pub listen_http_port: u16,
60 : pub listen_https_port: Option<u16>,
61 :
62 : pub availability_zone_id: AvailabilityZone,
63 : }
64 :
65 0 : #[derive(Serialize, Deserialize)]
66 : pub struct NodeConfigureRequest {
67 : pub node_id: NodeId,
68 :
69 : pub availability: Option<NodeAvailabilityWrapper>,
70 : pub scheduling: Option<NodeSchedulingPolicy>,
71 : }
72 :
73 0 : #[derive(Serialize, Deserialize)]
74 : pub struct TenantPolicyRequest {
75 : pub placement: Option<PlacementPolicy>,
76 : pub scheduling: Option<ShardSchedulingPolicy>,
77 : }
78 :
79 0 : #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
80 : pub struct AvailabilityZone(pub String);
81 :
82 : impl Display for AvailabilityZone {
83 300 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
84 300 : write!(f, "{}", self.0)
85 300 : }
86 : }
87 :
88 0 : #[derive(Serialize, Deserialize)]
89 : pub struct ShardsPreferredAzsRequest {
90 : #[serde(flatten)]
91 : pub preferred_az_ids: HashMap<TenantShardId, Option<AvailabilityZone>>,
92 : }
93 :
94 0 : #[derive(Serialize, Deserialize)]
95 : pub struct ShardsPreferredAzsResponse {
96 : pub updated: Vec<TenantShardId>,
97 : }
98 :
99 0 : #[derive(Serialize, Deserialize, Debug)]
100 : pub struct TenantLocateResponseShard {
101 : pub shard_id: TenantShardId,
102 : pub node_id: NodeId,
103 :
104 : pub listen_pg_addr: String,
105 : pub listen_pg_port: u16,
106 : pub listen_grpc_addr: Option<String>,
107 : pub listen_grpc_port: Option<u16>,
108 :
109 : pub listen_http_addr: String,
110 : pub listen_http_port: u16,
111 : pub listen_https_port: Option<u16>,
112 : }
113 :
114 0 : #[derive(Serialize, Deserialize)]
115 : pub struct TenantLocateResponse {
116 : pub shards: Vec<TenantLocateResponseShard>,
117 : pub shard_params: ShardParameters,
118 : }
119 :
120 0 : #[derive(Serialize, Deserialize, Debug)]
121 : pub struct TenantDescribeResponse {
122 : pub tenant_id: TenantId,
123 : pub shards: Vec<TenantDescribeResponseShard>,
124 : pub stripe_size: ShardStripeSize,
125 : pub policy: PlacementPolicy,
126 : pub config: TenantConfig,
127 : }
128 :
129 0 : #[derive(Serialize, Deserialize, Debug)]
130 : pub struct TenantTimelineDescribeResponse {
131 : pub shards: Vec<TimelineInfo>,
132 : #[serde(skip_serializing_if = "Option::is_none")]
133 : pub image_consistent_lsn: Option<Lsn>,
134 : }
135 :
136 0 : #[derive(Serialize, Deserialize, Debug)]
137 : pub struct NodeShardResponse {
138 : pub node_id: NodeId,
139 : pub shards: Vec<NodeShard>,
140 : }
141 :
142 0 : #[derive(Serialize, Deserialize, Debug)]
143 : pub struct NodeShard {
144 : pub tenant_shard_id: TenantShardId,
145 : /// Whether the shard is observed secondary on a specific node. True = yes, False = no, None = not on this node.
146 : pub is_observed_secondary: Option<bool>,
147 : /// Whether the shard is intended to be a secondary on a specific node. True = yes, False = no, None = not on this node.
148 : pub is_intended_secondary: Option<bool>,
149 : }
150 :
151 0 : #[derive(Serialize, Deserialize)]
152 : pub struct NodeDescribeResponse {
153 : pub id: NodeId,
154 :
155 : pub availability: NodeAvailabilityWrapper,
156 : pub scheduling: NodeSchedulingPolicy,
157 :
158 : pub availability_zone_id: String,
159 :
160 : pub listen_http_addr: String,
161 : pub listen_http_port: u16,
162 : pub listen_https_port: Option<u16>,
163 :
164 : pub listen_pg_addr: String,
165 : pub listen_pg_port: u16,
166 : pub listen_grpc_addr: Option<String>,
167 : pub listen_grpc_port: Option<u16>,
168 : }
169 :
170 0 : #[derive(Serialize, Deserialize, Debug)]
171 : pub struct TenantDescribeResponseShard {
172 : pub tenant_shard_id: TenantShardId,
173 :
174 : pub node_attached: Option<NodeId>,
175 : pub node_secondary: Vec<NodeId>,
176 :
177 : pub last_error: String,
178 :
179 : /// A task is currently running to reconcile this tenant's intent state with the state on pageservers
180 : pub is_reconciling: bool,
181 : /// This shard failed in sending a compute notification to the cloud control plane, and a retry is pending.
182 : pub is_pending_compute_notification: bool,
183 : /// A shard split is currently underway
184 : pub is_splitting: bool,
185 : /// A timeline is being imported into this tenant
186 : pub is_importing: bool,
187 :
188 : pub scheduling_policy: ShardSchedulingPolicy,
189 :
190 : pub preferred_az_id: Option<String>,
191 : }
192 :
193 : /// Migration request for a given tenant shard to a given node.
194 : ///
195 : /// Explicitly migrating a particular shard is a low level operation
196 : /// TODO: higher level "Reschedule tenant" operation where the request
197 : /// specifies some constraints, e.g. asking it to get off particular node(s)
198 0 : #[derive(Serialize, Deserialize, Debug)]
199 : pub struct TenantShardMigrateRequest {
200 : pub node_id: NodeId,
201 :
202 : /// Optionally, callers may specify the node they are migrating _from_, and the server will
203 : /// reject the request if the shard is no longer attached there: this enables writing safer
204 : /// clients that don't risk fighting with some other movement of the shard.
205 : #[serde(default)]
206 : pub origin_node_id: Option<NodeId>,
207 :
208 : #[serde(default)]
209 : pub migration_config: MigrationConfig,
210 : }
211 :
212 : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
213 : pub struct MigrationConfig {
214 : /// If true, the migration will be executed even if it is to a location with a sub-optimal scheduling
215 : /// score: this is usually not what you want, and if you use this then you'll also need to set the
216 : /// tenant's scheduling policy to Essential or Pause to avoid the optimiser reverting your migration.
217 : ///
218 : /// Default: false
219 : #[serde(default)]
220 : pub override_scheduler: bool,
221 :
222 : /// If true, the migration will be done gracefully by creating a secondary location first and
223 : /// waiting for it to warm up before cutting over. If false, if there is no existing secondary
224 : /// location at the destination, the tenant will be migrated immediately. If the tenant's data
225 : /// can't be downloaded within [`Self::secondary_warmup_timeout`], then the migration will go
226 : /// ahead but run with a cold cache that can severely reduce performance until it warms up.
227 : ///
228 : /// When doing a graceful migration, the migration API returns as soon as it is started.
229 : ///
230 : /// Default: true
231 : #[serde(default = "default_prewarm")]
232 : pub prewarm: bool,
233 :
234 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
235 : /// overall for secondary warmup before cutting over
236 : #[serde(default)]
237 : #[serde(with = "humantime_serde")]
238 : pub secondary_warmup_timeout: Option<Duration>,
239 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
240 : /// within each secondary download poll call to pageserver.
241 : #[serde(default)]
242 : #[serde(with = "humantime_serde")]
243 : pub secondary_download_request_timeout: Option<Duration>,
244 : }
245 :
246 3 : fn default_prewarm() -> bool {
247 3 : true
248 3 : }
249 :
250 : impl Default for MigrationConfig {
251 2 : fn default() -> Self {
252 2 : Self {
253 2 : override_scheduler: false,
254 2 : prewarm: default_prewarm(),
255 2 : secondary_warmup_timeout: None,
256 2 : secondary_download_request_timeout: None,
257 2 : }
258 2 : }
259 : }
260 :
261 : #[derive(Serialize, Clone, Debug)]
262 : #[serde(into = "NodeAvailabilityWrapper")]
263 : pub enum NodeAvailability {
264 : // Normal, happy state
265 : Active(PageserverUtilization),
266 : // Node is warming up, but we expect it to become available soon. Covers
267 : // the time span between the re-attach response being composed on the storage controller
268 : // and the first successful heartbeat after the processing of the re-attach response
269 : // finishes on the pageserver.
270 : WarmingUp(Instant),
271 : // Offline: Tenants shouldn't try to attach here, but they may assume that their
272 : // secondary locations on this node still exist. Newly added nodes are in this
273 : // state until we successfully contact them.
274 : Offline,
275 : }
276 :
277 : impl PartialEq for NodeAvailability {
278 0 : fn eq(&self, other: &Self) -> bool {
279 : use NodeAvailability::*;
280 0 : matches!(
281 0 : (self, other),
282 : (Active(_), Active(_)) | (Offline, Offline) | (WarmingUp(_), WarmingUp(_))
283 : )
284 0 : }
285 : }
286 :
287 : impl Eq for NodeAvailability {}
288 :
289 : // This wrapper provides serde functionality and it should only be used to
290 : // communicate with external callers which don't know or care about the
291 : // utilisation score of the pageserver it is targeting.
292 0 : #[derive(Serialize, Deserialize, Clone, Copy, Debug)]
293 : pub enum NodeAvailabilityWrapper {
294 : Active,
295 : WarmingUp,
296 : Offline,
297 : }
298 :
299 : impl From<NodeAvailabilityWrapper> for NodeAvailability {
300 0 : fn from(val: NodeAvailabilityWrapper) -> Self {
301 0 : match val {
302 : // Assume the worst utilisation score to begin with. It will later be updated by
303 : // the heartbeats.
304 : NodeAvailabilityWrapper::Active => {
305 0 : NodeAvailability::Active(PageserverUtilization::full())
306 : }
307 0 : NodeAvailabilityWrapper::WarmingUp => NodeAvailability::WarmingUp(Instant::now()),
308 0 : NodeAvailabilityWrapper::Offline => NodeAvailability::Offline,
309 : }
310 0 : }
311 : }
312 :
313 : impl From<NodeAvailability> for NodeAvailabilityWrapper {
314 0 : fn from(val: NodeAvailability) -> Self {
315 0 : match val {
316 0 : NodeAvailability::Active(_) => NodeAvailabilityWrapper::Active,
317 0 : NodeAvailability::WarmingUp(_) => NodeAvailabilityWrapper::WarmingUp,
318 0 : NodeAvailability::Offline => NodeAvailabilityWrapper::Offline,
319 : }
320 0 : }
321 : }
322 :
323 : /// Scheduling policy enables us to selectively disable some automatic actions that the
324 : /// controller performs on a tenant shard. This is only set to a non-default value by
325 : /// human intervention, and it is reset to the default value (Active) when the tenant's
326 : /// placement policy is modified away from Attached.
327 : ///
328 : /// The typical use of a non-Active scheduling policy is one of:
329 : /// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
330 : /// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
331 : ///
332 : /// If you're not sure which policy to use to pin a shard to its current location, you probably
333 : /// want Pause.
334 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
335 : pub enum ShardSchedulingPolicy {
336 : // Normal mode: the tenant's scheduled locations may be updated at will, including
337 : // for non-essential optimization.
338 : Active,
339 :
340 : // Disable optimizations, but permit scheduling when necessary to fulfil the PlacementPolicy.
341 : // For example, this still permits a node's attachment location to change to a secondary in
342 : // response to a node failure, or to assign a new secondary if a node was removed.
343 : Essential,
344 :
345 : // No scheduling: leave the shard running wherever it currently is. Even if the shard is
346 : // unavailable, it will not be rescheduled to another node.
347 : Pause,
348 :
349 : // No reconciling: we will make no location_conf API calls to pageservers at all. If the
350 : // shard is unavailable, it stays that way. If a node fails, this shard doesn't get failed over.
351 : Stop,
352 : }
353 :
354 : impl Default for ShardSchedulingPolicy {
355 12843 : fn default() -> Self {
356 12843 : Self::Active
357 12843 : }
358 : }
359 :
360 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
361 : pub enum NodeLifecycle {
362 : Active,
363 : Deleted,
364 : }
365 :
366 : impl FromStr for NodeLifecycle {
367 : type Err = anyhow::Error;
368 :
369 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
370 0 : match s {
371 0 : "active" => Ok(Self::Active),
372 0 : "deleted" => Ok(Self::Deleted),
373 0 : _ => Err(anyhow::anyhow!("Unknown node lifecycle '{s}'")),
374 : }
375 0 : }
376 : }
377 :
378 : impl From<NodeLifecycle> for String {
379 0 : fn from(value: NodeLifecycle) -> String {
380 : use NodeLifecycle::*;
381 0 : match value {
382 0 : Active => "active",
383 0 : Deleted => "deleted",
384 : }
385 0 : .to_string()
386 0 : }
387 : }
388 :
389 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
390 : pub enum NodeSchedulingPolicy {
391 : Active,
392 : Filling,
393 : Pause,
394 : PauseForRestart,
395 : Draining,
396 : Deleting,
397 : }
398 :
399 : impl FromStr for NodeSchedulingPolicy {
400 : type Err = anyhow::Error;
401 :
402 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
403 0 : match s {
404 0 : "active" => Ok(Self::Active),
405 0 : "filling" => Ok(Self::Filling),
406 0 : "pause" => Ok(Self::Pause),
407 0 : "pause_for_restart" => Ok(Self::PauseForRestart),
408 0 : "draining" => Ok(Self::Draining),
409 0 : "deleting" => Ok(Self::Deleting),
410 0 : _ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")),
411 : }
412 0 : }
413 : }
414 :
415 : impl From<NodeSchedulingPolicy> for String {
416 0 : fn from(value: NodeSchedulingPolicy) -> String {
417 : use NodeSchedulingPolicy::*;
418 0 : match value {
419 0 : Active => "active",
420 0 : Filling => "filling",
421 0 : Pause => "pause",
422 0 : PauseForRestart => "pause_for_restart",
423 0 : Draining => "draining",
424 0 : Deleting => "deleting",
425 : }
426 0 : .to_string()
427 0 : }
428 : }
429 :
430 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
431 : pub enum SkSchedulingPolicy {
432 : Active,
433 : Activating,
434 : Pause,
435 : Decomissioned,
436 : }
437 :
438 : impl FromStr for SkSchedulingPolicy {
439 : type Err = anyhow::Error;
440 :
441 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
442 0 : Ok(match s {
443 0 : "active" => Self::Active,
444 0 : "activating" => Self::Activating,
445 0 : "pause" => Self::Pause,
446 0 : "decomissioned" => Self::Decomissioned,
447 : _ => {
448 0 : return Err(anyhow::anyhow!(
449 0 : "Unknown scheduling policy '{s}', try active,pause,decomissioned"
450 0 : ));
451 : }
452 : })
453 0 : }
454 : }
455 :
456 : impl From<SkSchedulingPolicy> for String {
457 0 : fn from(value: SkSchedulingPolicy) -> String {
458 : use SkSchedulingPolicy::*;
459 0 : match value {
460 0 : Active => "active",
461 0 : Activating => "activating",
462 0 : Pause => "pause",
463 0 : Decomissioned => "decomissioned",
464 : }
465 0 : .to_string()
466 0 : }
467 : }
468 :
469 : /// Controls how tenant shards are mapped to locations on pageservers, e.g. whether
470 : /// to create secondary locations.
471 0 : #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
472 : pub enum PlacementPolicy {
473 : /// Normal live state: one attached pageserver and zero or more secondaries.
474 : Attached(usize),
475 : /// Create one secondary mode locations. This is useful when onboarding
476 : /// a tenant, or for an idle tenant that we might want to bring online quickly.
477 : Secondary,
478 :
479 : /// Do not attach to any pageservers. This is appropriate for tenants that
480 : /// have been idle for a long time, where we do not mind some delay in making
481 : /// them available in future.
482 : Detached,
483 : }
484 :
485 : impl PlacementPolicy {
486 55 : pub fn want_secondaries(&self) -> usize {
487 55 : match self {
488 52 : PlacementPolicy::Attached(secondary_count) => *secondary_count,
489 3 : PlacementPolicy::Secondary => 1,
490 0 : PlacementPolicy::Detached => 0,
491 : }
492 55 : }
493 : }
494 :
495 0 : #[derive(Serialize, Deserialize, Debug)]
496 : pub struct TenantShardMigrateResponse {}
497 :
498 : /// Metadata health record posted from scrubber.
499 0 : #[derive(Serialize, Deserialize, Debug)]
500 : pub struct MetadataHealthRecord {
501 : pub tenant_shard_id: TenantShardId,
502 : pub healthy: bool,
503 : pub last_scrubbed_at: chrono::DateTime<chrono::Utc>,
504 : }
505 :
506 0 : #[derive(Serialize, Deserialize, Debug)]
507 : pub struct MetadataHealthUpdateRequest {
508 : pub healthy_tenant_shards: HashSet<TenantShardId>,
509 : pub unhealthy_tenant_shards: HashSet<TenantShardId>,
510 : }
511 :
512 0 : #[derive(Serialize, Deserialize, Debug)]
513 : pub struct MetadataHealthUpdateResponse {}
514 :
515 0 : #[derive(Serialize, Deserialize, Debug)]
516 : pub struct MetadataHealthListUnhealthyResponse {
517 : pub unhealthy_tenant_shards: Vec<TenantShardId>,
518 : }
519 :
520 : #[derive(Serialize, Deserialize, Debug)]
521 : pub struct MetadataHealthListOutdatedRequest {
522 : #[serde(with = "humantime_serde")]
523 : pub not_scrubbed_for: Duration,
524 : }
525 :
526 0 : #[derive(Serialize, Deserialize, Debug)]
527 : pub struct MetadataHealthListOutdatedResponse {
528 : pub health_records: Vec<MetadataHealthRecord>,
529 : }
530 :
531 : /// Publicly exposed safekeeper description
532 0 : #[derive(Serialize, Deserialize, Clone)]
533 : pub struct SafekeeperDescribeResponse {
534 : pub id: NodeId,
535 : pub region_id: String,
536 : /// 1 is special, it means just created (not currently posted to storcon).
537 : /// Zero or negative is not really expected.
538 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
539 : pub version: i64,
540 : pub host: String,
541 : pub port: i32,
542 : pub http_port: i32,
543 : pub https_port: Option<i32>,
544 : pub availability_zone_id: String,
545 : pub scheduling_policy: SkSchedulingPolicy,
546 : }
547 :
548 0 : #[derive(Serialize, Deserialize, Clone)]
549 : pub struct SafekeeperSchedulingPolicyRequest {
550 : pub scheduling_policy: SkSchedulingPolicy,
551 : }
552 :
553 : /// Import request for safekeeper timelines.
554 0 : #[derive(Serialize, Deserialize, Clone)]
555 : pub struct TimelineImportRequest {
556 : pub tenant_id: TenantId,
557 : pub timeline_id: TimelineId,
558 : pub start_lsn: Lsn,
559 : pub sk_set: Vec<NodeId>,
560 : }
561 :
562 0 : #[derive(serde::Serialize, serde::Deserialize, Clone)]
563 : pub struct TimelineSafekeeperMigrateRequest {
564 : pub new_sk_set: Vec<NodeId>,
565 : }
566 :
567 : #[cfg(test)]
568 : mod test {
569 : use serde_json;
570 :
571 : use super::*;
572 :
573 : /// Check stability of PlacementPolicy's serialization
574 : #[test]
575 1 : fn placement_policy_encoding() -> anyhow::Result<()> {
576 1 : let v = PlacementPolicy::Attached(1);
577 1 : let encoded = serde_json::to_string(&v)?;
578 1 : assert_eq!(encoded, "{\"Attached\":1}");
579 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
580 :
581 1 : let v = PlacementPolicy::Detached;
582 1 : let encoded = serde_json::to_string(&v)?;
583 1 : assert_eq!(encoded, "\"Detached\"");
584 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
585 1 : Ok(())
586 1 : }
587 :
588 : #[test]
589 1 : fn test_reject_unknown_field() {
590 1 : let id = TenantId::generate();
591 1 : let create_request = serde_json::json!({
592 1 : "new_tenant_id": id.to_string(),
593 1 : "unknown_field": "unknown_value".to_string(),
594 : });
595 1 : let err = serde_json::from_value::<TenantCreateRequest>(create_request).unwrap_err();
596 1 : assert!(
597 1 : err.to_string().contains("unknown field `unknown_field`"),
598 0 : "expect unknown field `unknown_field` error, got: {err}"
599 : );
600 1 : }
601 :
602 : /// Check that a minimal migrate request with no config results in the expected default settings
603 : #[test]
604 1 : fn test_migrate_request_decode_defaults() {
605 1 : let json = r#"{
606 1 : "node_id": 123
607 1 : }"#;
608 :
609 1 : let request: TenantShardMigrateRequest = serde_json::from_str(json).unwrap();
610 1 : assert_eq!(request.node_id, NodeId(123));
611 1 : assert_eq!(request.origin_node_id, None);
612 1 : assert!(!request.migration_config.override_scheduler);
613 1 : assert!(request.migration_config.prewarm);
614 1 : assert_eq!(request.migration_config.secondary_warmup_timeout, None);
615 1 : assert_eq!(
616 : request.migration_config.secondary_download_request_timeout,
617 : None
618 : );
619 1 : }
620 :
621 : /// Check that a partially specified migration config results in the expected default settings
622 : #[test]
623 1 : fn test_migration_config_decode_defaults() {
624 : // Specify just one field of the config
625 1 : let json = r#"{
626 1 : }"#;
627 :
628 1 : let config: MigrationConfig = serde_json::from_str(json).unwrap();
629 :
630 : // Check each field's expected default value
631 1 : assert!(!config.override_scheduler);
632 1 : assert!(config.prewarm);
633 1 : assert_eq!(config.secondary_warmup_timeout, None);
634 1 : assert_eq!(config.secondary_download_request_timeout, None);
635 1 : assert_eq!(config.secondary_warmup_timeout, None);
636 :
637 : // Consistency check that the Default impl agrees with our serde defaults
638 1 : assert_eq!(MigrationConfig::default(), config);
639 1 : }
640 : }
|