Line data Source code
1 : use std::collections::{HashMap, HashSet};
2 : use std::fmt::Display;
3 : use std::str::FromStr;
4 : use std::time::{Duration, Instant};
5 :
6 : /// Request/response types for the storage controller
7 : /// API (`/control/v1` prefix). Implemented by the server
8 : /// in [`storage_controller::http`]
9 : use serde::{Deserialize, Serialize};
10 : use utils::id::{NodeId, TenantId, TimelineId};
11 : use utils::lsn::Lsn;
12 :
13 : use crate::models::{PageserverUtilization, ShardParameters, TenantConfig};
14 : use crate::shard::{ShardStripeSize, TenantShardId};
15 :
16 2 : #[derive(Serialize, Deserialize, Debug)]
17 : #[serde(deny_unknown_fields)]
18 : pub struct TenantCreateRequest {
19 : pub new_tenant_id: TenantShardId,
20 : #[serde(default)]
21 : #[serde(skip_serializing_if = "Option::is_none")]
22 : pub generation: Option<u32>,
23 :
24 : // If omitted, create a single shard with TenantShardId::unsharded()
25 : #[serde(default)]
26 : #[serde(skip_serializing_if = "ShardParameters::is_unsharded")]
27 : pub shard_parameters: ShardParameters,
28 :
29 : #[serde(default)]
30 : #[serde(skip_serializing_if = "Option::is_none")]
31 : pub placement_policy: Option<PlacementPolicy>,
32 :
33 : #[serde(flatten)]
34 : pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it
35 : }
36 :
37 0 : #[derive(Serialize, Deserialize)]
38 : pub struct TenantCreateResponseShard {
39 : pub shard_id: TenantShardId,
40 : pub node_id: NodeId,
41 : pub generation: u32,
42 : }
43 :
44 0 : #[derive(Serialize, Deserialize)]
45 : pub struct TenantCreateResponse {
46 : pub shards: Vec<TenantCreateResponseShard>,
47 : }
48 :
49 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
50 : pub struct NodeRegisterRequest {
51 : pub node_id: NodeId,
52 :
53 : pub listen_pg_addr: String,
54 : pub listen_pg_port: u16,
55 :
56 : pub listen_http_addr: String,
57 : pub listen_http_port: u16,
58 : pub listen_https_port: Option<u16>,
59 :
60 : pub availability_zone_id: AvailabilityZone,
61 : }
62 :
63 0 : #[derive(Serialize, Deserialize)]
64 : pub struct NodeConfigureRequest {
65 : pub node_id: NodeId,
66 :
67 : pub availability: Option<NodeAvailabilityWrapper>,
68 : pub scheduling: Option<NodeSchedulingPolicy>,
69 : }
70 :
71 0 : #[derive(Serialize, Deserialize)]
72 : pub struct TenantPolicyRequest {
73 : pub placement: Option<PlacementPolicy>,
74 : pub scheduling: Option<ShardSchedulingPolicy>,
75 : }
76 :
77 0 : #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
78 : pub struct AvailabilityZone(pub String);
79 :
80 : impl Display for AvailabilityZone {
81 300 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82 300 : write!(f, "{}", self.0)
83 300 : }
84 : }
85 :
86 0 : #[derive(Serialize, Deserialize)]
87 : pub struct ShardsPreferredAzsRequest {
88 : #[serde(flatten)]
89 : pub preferred_az_ids: HashMap<TenantShardId, Option<AvailabilityZone>>,
90 : }
91 :
92 0 : #[derive(Serialize, Deserialize)]
93 : pub struct ShardsPreferredAzsResponse {
94 : pub updated: Vec<TenantShardId>,
95 : }
96 :
97 0 : #[derive(Serialize, Deserialize, Debug)]
98 : pub struct TenantLocateResponseShard {
99 : pub shard_id: TenantShardId,
100 : pub node_id: NodeId,
101 :
102 : pub listen_pg_addr: String,
103 : pub listen_pg_port: u16,
104 :
105 : pub listen_http_addr: String,
106 : pub listen_http_port: u16,
107 : pub listen_https_port: Option<u16>,
108 : }
109 :
110 0 : #[derive(Serialize, Deserialize)]
111 : pub struct TenantLocateResponse {
112 : pub shards: Vec<TenantLocateResponseShard>,
113 : pub shard_params: ShardParameters,
114 : }
115 :
116 0 : #[derive(Serialize, Deserialize, Debug)]
117 : pub struct TenantDescribeResponse {
118 : pub tenant_id: TenantId,
119 : pub shards: Vec<TenantDescribeResponseShard>,
120 : pub stripe_size: ShardStripeSize,
121 : pub policy: PlacementPolicy,
122 : pub config: TenantConfig,
123 : }
124 :
125 0 : #[derive(Serialize, Deserialize, Debug)]
126 : pub struct NodeShardResponse {
127 : pub node_id: NodeId,
128 : pub shards: Vec<NodeShard>,
129 : }
130 :
131 0 : #[derive(Serialize, Deserialize, Debug)]
132 : pub struct NodeShard {
133 : pub tenant_shard_id: TenantShardId,
134 : /// Whether the shard is observed secondary on a specific node. True = yes, False = no, None = not on this node.
135 : pub is_observed_secondary: Option<bool>,
136 : /// Whether the shard is intended to be a secondary on a specific node. True = yes, False = no, None = not on this node.
137 : pub is_intended_secondary: Option<bool>,
138 : }
139 :
140 0 : #[derive(Serialize, Deserialize)]
141 : pub struct NodeDescribeResponse {
142 : pub id: NodeId,
143 :
144 : pub availability: NodeAvailabilityWrapper,
145 : pub scheduling: NodeSchedulingPolicy,
146 :
147 : pub availability_zone_id: String,
148 :
149 : pub listen_http_addr: String,
150 : pub listen_http_port: u16,
151 : pub listen_https_port: Option<u16>,
152 :
153 : pub listen_pg_addr: String,
154 : pub listen_pg_port: u16,
155 : }
156 :
157 0 : #[derive(Serialize, Deserialize, Debug)]
158 : pub struct TenantDescribeResponseShard {
159 : pub tenant_shard_id: TenantShardId,
160 :
161 : pub node_attached: Option<NodeId>,
162 : pub node_secondary: Vec<NodeId>,
163 :
164 : pub last_error: String,
165 :
166 : /// A task is currently running to reconcile this tenant's intent state with the state on pageservers
167 : pub is_reconciling: bool,
168 : /// This shard failed in sending a compute notification to the cloud control plane, and a retry is pending.
169 : pub is_pending_compute_notification: bool,
170 : /// A shard split is currently underway
171 : pub is_splitting: bool,
172 : /// A timeline is being imported into this tenant
173 : pub is_importing: bool,
174 :
175 : pub scheduling_policy: ShardSchedulingPolicy,
176 :
177 : pub preferred_az_id: Option<String>,
178 : }
179 :
180 : /// Migration request for a given tenant shard to a given node.
181 : ///
182 : /// Explicitly migrating a particular shard is a low level operation
183 : /// TODO: higher level "Reschedule tenant" operation where the request
184 : /// specifies some constraints, e.g. asking it to get off particular node(s)
185 1 : #[derive(Serialize, Deserialize, Debug)]
186 : pub struct TenantShardMigrateRequest {
187 : pub node_id: NodeId,
188 :
189 : /// Optionally, callers may specify the node they are migrating _from_, and the server will
190 : /// reject the request if the shard is no longer attached there: this enables writing safer
191 : /// clients that don't risk fighting with some other movement of the shard.
192 : #[serde(default)]
193 : pub origin_node_id: Option<NodeId>,
194 :
195 : #[serde(default)]
196 : pub migration_config: MigrationConfig,
197 : }
198 :
199 0 : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
200 : pub struct MigrationConfig {
201 : /// If true, the migration will be executed even if it is to a location with a sub-optimal scheduling
202 : /// score: this is usually not what you want, and if you use this then you'll also need to set the
203 : /// tenant's scheduling policy to Essential or Pause to avoid the optimiser reverting your migration.
204 : ///
205 : /// Default: false
206 : #[serde(default)]
207 : pub override_scheduler: bool,
208 :
209 : /// If true, the migration will be done gracefully by creating a secondary location first and
210 : /// waiting for it to warm up before cutting over. If false, if there is no existing secondary
211 : /// location at the destination, the tenant will be migrated immediately. If the tenant's data
212 : /// can't be downloaded within [`Self::secondary_warmup_timeout`], then the migration will go
213 : /// ahead but run with a cold cache that can severely reduce performance until it warms up.
214 : ///
215 : /// When doing a graceful migration, the migration API returns as soon as it is started.
216 : ///
217 : /// Default: true
218 : #[serde(default = "default_prewarm")]
219 : pub prewarm: bool,
220 :
221 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
222 : /// overall for secondary warmup before cutting over
223 : #[serde(default)]
224 : #[serde(with = "humantime_serde")]
225 : pub secondary_warmup_timeout: Option<Duration>,
226 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
227 : /// within each secondary download poll call to pageserver.
228 : #[serde(default)]
229 : #[serde(with = "humantime_serde")]
230 : pub secondary_download_request_timeout: Option<Duration>,
231 : }
232 :
233 3 : fn default_prewarm() -> bool {
234 3 : true
235 3 : }
236 :
237 : impl Default for MigrationConfig {
238 2 : fn default() -> Self {
239 2 : Self {
240 2 : override_scheduler: false,
241 2 : prewarm: default_prewarm(),
242 2 : secondary_warmup_timeout: None,
243 2 : secondary_download_request_timeout: None,
244 2 : }
245 2 : }
246 : }
247 :
248 : #[derive(Serialize, Clone, Debug)]
249 : #[serde(into = "NodeAvailabilityWrapper")]
250 : pub enum NodeAvailability {
251 : // Normal, happy state
252 : Active(PageserverUtilization),
253 : // Node is warming up, but we expect it to become available soon. Covers
254 : // the time span between the re-attach response being composed on the storage controller
255 : // and the first successful heartbeat after the processing of the re-attach response
256 : // finishes on the pageserver.
257 : WarmingUp(Instant),
258 : // Offline: Tenants shouldn't try to attach here, but they may assume that their
259 : // secondary locations on this node still exist. Newly added nodes are in this
260 : // state until we successfully contact them.
261 : Offline,
262 : }
263 :
264 : impl PartialEq for NodeAvailability {
265 0 : fn eq(&self, other: &Self) -> bool {
266 : use NodeAvailability::*;
267 0 : matches!(
268 0 : (self, other),
269 : (Active(_), Active(_)) | (Offline, Offline) | (WarmingUp(_), WarmingUp(_))
270 : )
271 0 : }
272 : }
273 :
274 : impl Eq for NodeAvailability {}
275 :
276 : // This wrapper provides serde functionality and it should only be used to
277 : // communicate with external callers which don't know or care about the
278 : // utilisation score of the pageserver it is targeting.
279 0 : #[derive(Serialize, Deserialize, Clone, Copy, Debug)]
280 : pub enum NodeAvailabilityWrapper {
281 : Active,
282 : WarmingUp,
283 : Offline,
284 : }
285 :
286 : impl From<NodeAvailabilityWrapper> for NodeAvailability {
287 0 : fn from(val: NodeAvailabilityWrapper) -> Self {
288 0 : match val {
289 : // Assume the worst utilisation score to begin with. It will later be updated by
290 : // the heartbeats.
291 : NodeAvailabilityWrapper::Active => {
292 0 : NodeAvailability::Active(PageserverUtilization::full())
293 : }
294 0 : NodeAvailabilityWrapper::WarmingUp => NodeAvailability::WarmingUp(Instant::now()),
295 0 : NodeAvailabilityWrapper::Offline => NodeAvailability::Offline,
296 : }
297 0 : }
298 : }
299 :
300 : impl From<NodeAvailability> for NodeAvailabilityWrapper {
301 0 : fn from(val: NodeAvailability) -> Self {
302 0 : match val {
303 0 : NodeAvailability::Active(_) => NodeAvailabilityWrapper::Active,
304 0 : NodeAvailability::WarmingUp(_) => NodeAvailabilityWrapper::WarmingUp,
305 0 : NodeAvailability::Offline => NodeAvailabilityWrapper::Offline,
306 : }
307 0 : }
308 : }
309 :
310 : /// Scheduling policy enables us to selectively disable some automatic actions that the
311 : /// controller performs on a tenant shard. This is only set to a non-default value by
312 : /// human intervention, and it is reset to the default value (Active) when the tenant's
313 : /// placement policy is modified away from Attached.
314 : ///
315 : /// The typical use of a non-Active scheduling policy is one of:
316 : /// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
317 : /// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
318 : ///
319 : /// If you're not sure which policy to use to pin a shard to its current location, you probably
320 : /// want Pause.
321 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
322 : pub enum ShardSchedulingPolicy {
323 : // Normal mode: the tenant's scheduled locations may be updated at will, including
324 : // for non-essential optimization.
325 : Active,
326 :
327 : // Disable optimizations, but permit scheduling when necessary to fulfil the PlacementPolicy.
328 : // For example, this still permits a node's attachment location to change to a secondary in
329 : // response to a node failure, or to assign a new secondary if a node was removed.
330 : Essential,
331 :
332 : // No scheduling: leave the shard running wherever it currently is. Even if the shard is
333 : // unavailable, it will not be rescheduled to another node.
334 : Pause,
335 :
336 : // No reconciling: we will make no location_conf API calls to pageservers at all. If the
337 : // shard is unavailable, it stays that way. If a node fails, this shard doesn't get failed over.
338 : Stop,
339 : }
340 :
341 : impl Default for ShardSchedulingPolicy {
342 12843 : fn default() -> Self {
343 12843 : Self::Active
344 12843 : }
345 : }
346 :
347 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
348 : pub enum NodeLifecycle {
349 : Active,
350 : Deleted,
351 : }
352 :
353 : impl FromStr for NodeLifecycle {
354 : type Err = anyhow::Error;
355 :
356 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
357 0 : match s {
358 0 : "active" => Ok(Self::Active),
359 0 : "deleted" => Ok(Self::Deleted),
360 0 : _ => Err(anyhow::anyhow!("Unknown node lifecycle '{s}'")),
361 : }
362 0 : }
363 : }
364 :
365 : impl From<NodeLifecycle> for String {
366 0 : fn from(value: NodeLifecycle) -> String {
367 : use NodeLifecycle::*;
368 0 : match value {
369 0 : Active => "active",
370 0 : Deleted => "deleted",
371 : }
372 0 : .to_string()
373 0 : }
374 : }
375 :
376 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
377 : pub enum NodeSchedulingPolicy {
378 : Active,
379 : Filling,
380 : Pause,
381 : PauseForRestart,
382 : Draining,
383 : }
384 :
385 : impl FromStr for NodeSchedulingPolicy {
386 : type Err = anyhow::Error;
387 :
388 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
389 0 : match s {
390 0 : "active" => Ok(Self::Active),
391 0 : "filling" => Ok(Self::Filling),
392 0 : "pause" => Ok(Self::Pause),
393 0 : "pause_for_restart" => Ok(Self::PauseForRestart),
394 0 : "draining" => Ok(Self::Draining),
395 0 : _ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")),
396 : }
397 0 : }
398 : }
399 :
400 : impl From<NodeSchedulingPolicy> for String {
401 0 : fn from(value: NodeSchedulingPolicy) -> String {
402 : use NodeSchedulingPolicy::*;
403 0 : match value {
404 0 : Active => "active",
405 0 : Filling => "filling",
406 0 : Pause => "pause",
407 0 : PauseForRestart => "pause_for_restart",
408 0 : Draining => "draining",
409 : }
410 0 : .to_string()
411 0 : }
412 : }
413 :
414 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
415 : pub enum SkSchedulingPolicy {
416 : Active,
417 : Pause,
418 : Decomissioned,
419 : }
420 :
421 : impl FromStr for SkSchedulingPolicy {
422 : type Err = anyhow::Error;
423 :
424 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
425 0 : Ok(match s {
426 0 : "active" => Self::Active,
427 0 : "pause" => Self::Pause,
428 0 : "decomissioned" => Self::Decomissioned,
429 : _ => {
430 0 : return Err(anyhow::anyhow!(
431 0 : "Unknown scheduling policy '{s}', try active,pause,decomissioned"
432 0 : ));
433 : }
434 : })
435 0 : }
436 : }
437 :
438 : impl From<SkSchedulingPolicy> for String {
439 0 : fn from(value: SkSchedulingPolicy) -> String {
440 : use SkSchedulingPolicy::*;
441 0 : match value {
442 0 : Active => "active",
443 0 : Pause => "pause",
444 0 : Decomissioned => "decomissioned",
445 : }
446 0 : .to_string()
447 0 : }
448 : }
449 :
450 : /// Controls how tenant shards are mapped to locations on pageservers, e.g. whether
451 : /// to create secondary locations.
452 2 : #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
453 : pub enum PlacementPolicy {
454 : /// Normal live state: one attached pageserver and zero or more secondaries.
455 : Attached(usize),
456 : /// Create one secondary mode locations. This is useful when onboarding
457 : /// a tenant, or for an idle tenant that we might want to bring online quickly.
458 : Secondary,
459 :
460 : /// Do not attach to any pageservers. This is appropriate for tenants that
461 : /// have been idle for a long time, where we do not mind some delay in making
462 : /// them available in future.
463 : Detached,
464 : }
465 :
466 : impl PlacementPolicy {
467 55 : pub fn want_secondaries(&self) -> usize {
468 55 : match self {
469 52 : PlacementPolicy::Attached(secondary_count) => *secondary_count,
470 3 : PlacementPolicy::Secondary => 1,
471 0 : PlacementPolicy::Detached => 0,
472 : }
473 55 : }
474 : }
475 :
476 0 : #[derive(Serialize, Deserialize, Debug)]
477 : pub struct TenantShardMigrateResponse {}
478 :
479 : /// Metadata health record posted from scrubber.
480 0 : #[derive(Serialize, Deserialize, Debug)]
481 : pub struct MetadataHealthRecord {
482 : pub tenant_shard_id: TenantShardId,
483 : pub healthy: bool,
484 : pub last_scrubbed_at: chrono::DateTime<chrono::Utc>,
485 : }
486 :
487 0 : #[derive(Serialize, Deserialize, Debug)]
488 : pub struct MetadataHealthUpdateRequest {
489 : pub healthy_tenant_shards: HashSet<TenantShardId>,
490 : pub unhealthy_tenant_shards: HashSet<TenantShardId>,
491 : }
492 :
493 0 : #[derive(Serialize, Deserialize, Debug)]
494 : pub struct MetadataHealthUpdateResponse {}
495 :
496 0 : #[derive(Serialize, Deserialize, Debug)]
497 : pub struct MetadataHealthListUnhealthyResponse {
498 : pub unhealthy_tenant_shards: Vec<TenantShardId>,
499 : }
500 :
501 0 : #[derive(Serialize, Deserialize, Debug)]
502 : pub struct MetadataHealthListOutdatedRequest {
503 : #[serde(with = "humantime_serde")]
504 : pub not_scrubbed_for: Duration,
505 : }
506 :
507 0 : #[derive(Serialize, Deserialize, Debug)]
508 : pub struct MetadataHealthListOutdatedResponse {
509 : pub health_records: Vec<MetadataHealthRecord>,
510 : }
511 :
512 : /// Publicly exposed safekeeper description
513 0 : #[derive(Serialize, Deserialize, Clone)]
514 : pub struct SafekeeperDescribeResponse {
515 : pub id: NodeId,
516 : pub region_id: String,
517 : /// 1 is special, it means just created (not currently posted to storcon).
518 : /// Zero or negative is not really expected.
519 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
520 : pub version: i64,
521 : pub host: String,
522 : pub port: i32,
523 : pub http_port: i32,
524 : pub https_port: Option<i32>,
525 : pub availability_zone_id: String,
526 : pub scheduling_policy: SkSchedulingPolicy,
527 : }
528 :
529 0 : #[derive(Serialize, Deserialize, Clone)]
530 : pub struct SafekeeperSchedulingPolicyRequest {
531 : pub scheduling_policy: SkSchedulingPolicy,
532 : }
533 :
534 : /// Import request for safekeeper timelines.
535 0 : #[derive(Serialize, Deserialize, Clone)]
536 : pub struct TimelineImportRequest {
537 : pub tenant_id: TenantId,
538 : pub timeline_id: TimelineId,
539 : pub start_lsn: Lsn,
540 : pub sk_set: Vec<NodeId>,
541 : }
542 :
543 : #[cfg(test)]
544 : mod test {
545 : use serde_json;
546 :
547 : use super::*;
548 :
549 : /// Check stability of PlacementPolicy's serialization
550 : #[test]
551 1 : fn placement_policy_encoding() -> anyhow::Result<()> {
552 1 : let v = PlacementPolicy::Attached(1);
553 1 : let encoded = serde_json::to_string(&v)?;
554 1 : assert_eq!(encoded, "{\"Attached\":1}");
555 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
556 :
557 1 : let v = PlacementPolicy::Detached;
558 1 : let encoded = serde_json::to_string(&v)?;
559 1 : assert_eq!(encoded, "\"Detached\"");
560 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
561 1 : Ok(())
562 1 : }
563 :
564 : #[test]
565 1 : fn test_reject_unknown_field() {
566 1 : let id = TenantId::generate();
567 1 : let create_request = serde_json::json!({
568 1 : "new_tenant_id": id.to_string(),
569 1 : "unknown_field": "unknown_value".to_string(),
570 1 : });
571 1 : let err = serde_json::from_value::<TenantCreateRequest>(create_request).unwrap_err();
572 1 : assert!(
573 1 : err.to_string().contains("unknown field `unknown_field`"),
574 0 : "expect unknown field `unknown_field` error, got: {}",
575 : err
576 : );
577 1 : }
578 :
579 : /// Check that a minimal migrate request with no config results in the expected default settings
580 : #[test]
581 1 : fn test_migrate_request_decode_defaults() {
582 1 : let json = r#"{
583 1 : "node_id": 123
584 1 : }"#;
585 1 :
586 1 : let request: TenantShardMigrateRequest = serde_json::from_str(json).unwrap();
587 1 : assert_eq!(request.node_id, NodeId(123));
588 1 : assert_eq!(request.origin_node_id, None);
589 1 : assert!(!request.migration_config.override_scheduler);
590 1 : assert!(request.migration_config.prewarm);
591 1 : assert_eq!(request.migration_config.secondary_warmup_timeout, None);
592 1 : assert_eq!(
593 1 : request.migration_config.secondary_download_request_timeout,
594 1 : None
595 1 : );
596 1 : }
597 :
598 : /// Check that a partially specified migration config results in the expected default settings
599 : #[test]
600 1 : fn test_migration_config_decode_defaults() {
601 1 : // Specify just one field of the config
602 1 : let json = r#"{
603 1 : }"#;
604 1 :
605 1 : let config: MigrationConfig = serde_json::from_str(json).unwrap();
606 1 :
607 1 : // Check each field's expected default value
608 1 : assert!(!config.override_scheduler);
609 1 : assert!(config.prewarm);
610 1 : assert_eq!(config.secondary_warmup_timeout, None);
611 1 : assert_eq!(config.secondary_download_request_timeout, None);
612 1 : assert_eq!(config.secondary_warmup_timeout, None);
613 :
614 : // Consistency check that the Default impl agrees with our serde defaults
615 1 : assert_eq!(MigrationConfig::default(), config);
616 1 : }
617 : }
|