Line data Source code
1 : use std::collections::{HashMap, HashSet};
2 : use std::fmt::Display;
3 : use std::net::IpAddr;
4 : use std::str::FromStr;
5 : use std::time::{Duration, Instant};
6 :
7 : /// Request/response types for the storage controller
8 : /// API (`/control/v1` prefix). Implemented by the server
9 : /// in [`storage_controller::http`]
10 : use serde::{Deserialize, Serialize};
11 : use utils::id::{NodeId, TenantId, TimelineId};
12 : use utils::lsn::Lsn;
13 :
14 : use crate::models::{PageserverUtilization, ShardParameters, TenantConfig, TimelineInfo};
15 : use crate::shard::{ShardStripeSize, TenantShardId};
16 :
17 0 : #[derive(Serialize, Deserialize, Debug)]
18 : #[serde(deny_unknown_fields)]
19 : pub struct TenantCreateRequest {
20 : pub new_tenant_id: TenantShardId,
21 : #[serde(default)]
22 : #[serde(skip_serializing_if = "Option::is_none")]
23 : pub generation: Option<u32>,
24 :
25 : // If omitted, create a single shard with TenantShardId::unsharded()
26 : #[serde(default)]
27 : #[serde(skip_serializing_if = "ShardParameters::is_unsharded")]
28 : pub shard_parameters: ShardParameters,
29 :
30 : #[serde(default)]
31 : #[serde(skip_serializing_if = "Option::is_none")]
32 : pub placement_policy: Option<PlacementPolicy>,
33 :
34 : #[serde(flatten)]
35 : pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it
36 : }
37 :
38 0 : #[derive(Serialize, Deserialize)]
39 : pub struct TenantCreateResponseShard {
40 : pub shard_id: TenantShardId,
41 : pub node_id: NodeId,
42 : pub generation: u32,
43 : }
44 :
45 0 : #[derive(Serialize, Deserialize)]
46 : pub struct TenantCreateResponse {
47 : pub shards: Vec<TenantCreateResponseShard>,
48 : }
49 :
50 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
51 : pub struct NodeRegisterRequest {
52 : pub node_id: NodeId,
53 :
54 : pub listen_pg_addr: String,
55 : pub listen_pg_port: u16,
56 : pub listen_grpc_addr: Option<String>,
57 : pub listen_grpc_port: Option<u16>,
58 :
59 : pub listen_http_addr: String,
60 : pub listen_http_port: u16,
61 : pub listen_https_port: Option<u16>,
62 :
63 : pub availability_zone_id: AvailabilityZone,
64 :
65 : // Reachable IP address of the PS/SK registering, if known.
66 : // Hadron Cluster Coordiantor will update the DNS record of the registering node
67 : // with this IP address.
68 : pub node_ip_addr: Option<IpAddr>,
69 : }
70 :
71 0 : #[derive(Serialize, Deserialize)]
72 : pub struct NodeConfigureRequest {
73 : pub node_id: NodeId,
74 :
75 : pub availability: Option<NodeAvailabilityWrapper>,
76 : pub scheduling: Option<NodeSchedulingPolicy>,
77 : }
78 :
79 0 : #[derive(Serialize, Deserialize)]
80 : pub struct TenantPolicyRequest {
81 : pub placement: Option<PlacementPolicy>,
82 : pub scheduling: Option<ShardSchedulingPolicy>,
83 : }
84 :
85 0 : #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
86 : pub struct AvailabilityZone(pub String);
87 :
88 : impl Display for AvailabilityZone {
89 300 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
90 300 : write!(f, "{}", self.0)
91 300 : }
92 : }
93 :
94 0 : #[derive(Serialize, Deserialize)]
95 : pub struct ShardsPreferredAzsRequest {
96 : #[serde(flatten)]
97 : pub preferred_az_ids: HashMap<TenantShardId, Option<AvailabilityZone>>,
98 : }
99 :
100 0 : #[derive(Serialize, Deserialize)]
101 : pub struct ShardsPreferredAzsResponse {
102 : pub updated: Vec<TenantShardId>,
103 : }
104 :
105 0 : #[derive(Serialize, Deserialize, Debug)]
106 : pub struct TenantLocateResponseShard {
107 : pub shard_id: TenantShardId,
108 : pub node_id: NodeId,
109 :
110 : pub listen_pg_addr: String,
111 : pub listen_pg_port: u16,
112 : pub listen_grpc_addr: Option<String>,
113 : pub listen_grpc_port: Option<u16>,
114 :
115 : pub listen_http_addr: String,
116 : pub listen_http_port: u16,
117 : pub listen_https_port: Option<u16>,
118 : }
119 :
120 0 : #[derive(Serialize, Deserialize)]
121 : pub struct TenantLocateResponse {
122 : pub shards: Vec<TenantLocateResponseShard>,
123 : pub shard_params: ShardParameters,
124 : }
125 :
126 0 : #[derive(Serialize, Deserialize, Debug)]
127 : pub struct TenantDescribeResponse {
128 : pub tenant_id: TenantId,
129 : pub shards: Vec<TenantDescribeResponseShard>,
130 : pub stripe_size: ShardStripeSize,
131 : pub policy: PlacementPolicy,
132 : pub config: TenantConfig,
133 : }
134 :
135 0 : #[derive(Serialize, Deserialize, Debug)]
136 : pub struct TenantTimelineDescribeResponse {
137 : pub shards: Vec<TimelineInfo>,
138 : #[serde(skip_serializing_if = "Option::is_none")]
139 : pub image_consistent_lsn: Option<Lsn>,
140 : }
141 :
142 0 : #[derive(Serialize, Deserialize, Debug)]
143 : pub struct NodeShardResponse {
144 : pub node_id: NodeId,
145 : pub shards: Vec<NodeShard>,
146 : }
147 :
148 0 : #[derive(Serialize, Deserialize, Debug)]
149 : pub struct NodeShard {
150 : pub tenant_shard_id: TenantShardId,
151 : /// Whether the shard is observed secondary on a specific node. True = yes, False = no, None = not on this node.
152 : pub is_observed_secondary: Option<bool>,
153 : /// Whether the shard is intended to be a secondary on a specific node. True = yes, False = no, None = not on this node.
154 : pub is_intended_secondary: Option<bool>,
155 : }
156 :
157 0 : #[derive(Serialize, Deserialize)]
158 : pub struct NodeDescribeResponse {
159 : pub id: NodeId,
160 :
161 : pub availability: NodeAvailabilityWrapper,
162 : pub scheduling: NodeSchedulingPolicy,
163 :
164 : pub availability_zone_id: String,
165 :
166 : pub listen_http_addr: String,
167 : pub listen_http_port: u16,
168 : pub listen_https_port: Option<u16>,
169 :
170 : pub listen_pg_addr: String,
171 : pub listen_pg_port: u16,
172 : pub listen_grpc_addr: Option<String>,
173 : pub listen_grpc_port: Option<u16>,
174 : }
175 :
176 0 : #[derive(Serialize, Deserialize, Debug)]
177 : pub struct TenantDescribeResponseShard {
178 : pub tenant_shard_id: TenantShardId,
179 :
180 : pub node_attached: Option<NodeId>,
181 : pub node_secondary: Vec<NodeId>,
182 :
183 : pub last_error: String,
184 :
185 : /// A task is currently running to reconcile this tenant's intent state with the state on pageservers
186 : pub is_reconciling: bool,
187 : /// This shard failed in sending a compute notification to the cloud control plane, and a retry is pending.
188 : pub is_pending_compute_notification: bool,
189 : /// A shard split is currently underway
190 : pub is_splitting: bool,
191 : /// A timeline is being imported into this tenant
192 : pub is_importing: bool,
193 :
194 : pub scheduling_policy: ShardSchedulingPolicy,
195 :
196 : pub preferred_az_id: Option<String>,
197 : }
198 :
199 : /// Migration request for a given tenant shard to a given node.
200 : ///
201 : /// Explicitly migrating a particular shard is a low level operation
202 : /// TODO: higher level "Reschedule tenant" operation where the request
203 : /// specifies some constraints, e.g. asking it to get off particular node(s)
204 0 : #[derive(Serialize, Deserialize, Debug)]
205 : pub struct TenantShardMigrateRequest {
206 : pub node_id: NodeId,
207 :
208 : /// Optionally, callers may specify the node they are migrating _from_, and the server will
209 : /// reject the request if the shard is no longer attached there: this enables writing safer
210 : /// clients that don't risk fighting with some other movement of the shard.
211 : #[serde(default)]
212 : pub origin_node_id: Option<NodeId>,
213 :
214 : #[serde(default)]
215 : pub migration_config: MigrationConfig,
216 : }
217 :
218 : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
219 : pub struct MigrationConfig {
220 : /// If true, the migration will be executed even if it is to a location with a sub-optimal scheduling
221 : /// score: this is usually not what you want, and if you use this then you'll also need to set the
222 : /// tenant's scheduling policy to Essential or Pause to avoid the optimiser reverting your migration.
223 : ///
224 : /// Default: false
225 : #[serde(default)]
226 : pub override_scheduler: bool,
227 :
228 : /// If true, the migration will be done gracefully by creating a secondary location first and
229 : /// waiting for it to warm up before cutting over. If false, if there is no existing secondary
230 : /// location at the destination, the tenant will be migrated immediately. If the tenant's data
231 : /// can't be downloaded within [`Self::secondary_warmup_timeout`], then the migration will go
232 : /// ahead but run with a cold cache that can severely reduce performance until it warms up.
233 : ///
234 : /// When doing a graceful migration, the migration API returns as soon as it is started.
235 : ///
236 : /// Default: true
237 : #[serde(default = "default_prewarm")]
238 : pub prewarm: bool,
239 :
240 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
241 : /// overall for secondary warmup before cutting over
242 : #[serde(default)]
243 : #[serde(with = "humantime_serde")]
244 : pub secondary_warmup_timeout: Option<Duration>,
245 : /// For non-prewarm migrations which will immediately enter a cutover to the new node: how long to wait
246 : /// within each secondary download poll call to pageserver.
247 : #[serde(default)]
248 : #[serde(with = "humantime_serde")]
249 : pub secondary_download_request_timeout: Option<Duration>,
250 : }
251 :
252 3 : fn default_prewarm() -> bool {
253 3 : true
254 3 : }
255 :
256 : impl Default for MigrationConfig {
257 2 : fn default() -> Self {
258 2 : Self {
259 2 : override_scheduler: false,
260 2 : prewarm: default_prewarm(),
261 2 : secondary_warmup_timeout: None,
262 2 : secondary_download_request_timeout: None,
263 2 : }
264 2 : }
265 : }
266 :
267 : #[derive(Serialize, Clone, Debug)]
268 : #[serde(into = "NodeAvailabilityWrapper")]
269 : pub enum NodeAvailability {
270 : // Normal, happy state
271 : Active(PageserverUtilization),
272 : // Node is warming up, but we expect it to become available soon. Covers
273 : // the time span between the re-attach response being composed on the storage controller
274 : // and the first successful heartbeat after the processing of the re-attach response
275 : // finishes on the pageserver.
276 : WarmingUp(Instant),
277 : // Offline: Tenants shouldn't try to attach here, but they may assume that their
278 : // secondary locations on this node still exist. Newly added nodes are in this
279 : // state until we successfully contact them.
280 : Offline,
281 : }
282 :
283 : impl PartialEq for NodeAvailability {
284 0 : fn eq(&self, other: &Self) -> bool {
285 : use NodeAvailability::*;
286 0 : matches!(
287 0 : (self, other),
288 : (Active(_), Active(_)) | (Offline, Offline) | (WarmingUp(_), WarmingUp(_))
289 : )
290 0 : }
291 : }
292 :
293 : impl Eq for NodeAvailability {}
294 :
295 : // This wrapper provides serde functionality and it should only be used to
296 : // communicate with external callers which don't know or care about the
297 : // utilisation score of the pageserver it is targeting.
298 0 : #[derive(Serialize, Deserialize, Clone, Copy, Debug)]
299 : pub enum NodeAvailabilityWrapper {
300 : Active,
301 : WarmingUp,
302 : Offline,
303 : }
304 :
305 : impl From<NodeAvailabilityWrapper> for NodeAvailability {
306 0 : fn from(val: NodeAvailabilityWrapper) -> Self {
307 0 : match val {
308 : // Assume the worst utilisation score to begin with. It will later be updated by
309 : // the heartbeats.
310 : NodeAvailabilityWrapper::Active => {
311 0 : NodeAvailability::Active(PageserverUtilization::full())
312 : }
313 0 : NodeAvailabilityWrapper::WarmingUp => NodeAvailability::WarmingUp(Instant::now()),
314 0 : NodeAvailabilityWrapper::Offline => NodeAvailability::Offline,
315 : }
316 0 : }
317 : }
318 :
319 : impl From<NodeAvailability> for NodeAvailabilityWrapper {
320 0 : fn from(val: NodeAvailability) -> Self {
321 0 : match val {
322 0 : NodeAvailability::Active(_) => NodeAvailabilityWrapper::Active,
323 0 : NodeAvailability::WarmingUp(_) => NodeAvailabilityWrapper::WarmingUp,
324 0 : NodeAvailability::Offline => NodeAvailabilityWrapper::Offline,
325 : }
326 0 : }
327 : }
328 :
329 : /// Scheduling policy enables us to selectively disable some automatic actions that the
330 : /// controller performs on a tenant shard. This is only set to a non-default value by
331 : /// human intervention, and it is reset to the default value (Active) when the tenant's
332 : /// placement policy is modified away from Attached.
333 : ///
334 : /// The typical use of a non-Active scheduling policy is one of:
335 : /// - Pinnning a shard to a node (i.e. migrating it there & setting a non-Active scheduling policy)
336 : /// - Working around a bug (e.g. if something is flapping and we need to stop it until the bug is fixed)
337 : ///
338 : /// If you're not sure which policy to use to pin a shard to its current location, you probably
339 : /// want Pause.
340 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
341 : pub enum ShardSchedulingPolicy {
342 : // Normal mode: the tenant's scheduled locations may be updated at will, including
343 : // for non-essential optimization.
344 : Active,
345 :
346 : // Disable optimizations, but permit scheduling when necessary to fulfil the PlacementPolicy.
347 : // For example, this still permits a node's attachment location to change to a secondary in
348 : // response to a node failure, or to assign a new secondary if a node was removed.
349 : Essential,
350 :
351 : // No scheduling: leave the shard running wherever it currently is. Even if the shard is
352 : // unavailable, it will not be rescheduled to another node.
353 : Pause,
354 :
355 : // No reconciling: we will make no location_conf API calls to pageservers at all. If the
356 : // shard is unavailable, it stays that way. If a node fails, this shard doesn't get failed over.
357 : Stop,
358 : }
359 :
360 : impl Default for ShardSchedulingPolicy {
361 12843 : fn default() -> Self {
362 12843 : Self::Active
363 12843 : }
364 : }
365 :
366 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
367 : pub enum NodeLifecycle {
368 : Active,
369 : Deleted,
370 : }
371 :
372 : impl FromStr for NodeLifecycle {
373 : type Err = anyhow::Error;
374 :
375 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
376 0 : match s {
377 0 : "active" => Ok(Self::Active),
378 0 : "deleted" => Ok(Self::Deleted),
379 0 : _ => Err(anyhow::anyhow!("Unknown node lifecycle '{s}'")),
380 : }
381 0 : }
382 : }
383 :
384 : impl From<NodeLifecycle> for String {
385 0 : fn from(value: NodeLifecycle) -> String {
386 : use NodeLifecycle::*;
387 0 : match value {
388 0 : Active => "active",
389 0 : Deleted => "deleted",
390 : }
391 0 : .to_string()
392 0 : }
393 : }
394 :
395 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
396 : pub enum NodeSchedulingPolicy {
397 : Active,
398 : Filling,
399 : Pause,
400 : PauseForRestart,
401 : Draining,
402 : Deleting,
403 : }
404 :
405 : impl FromStr for NodeSchedulingPolicy {
406 : type Err = anyhow::Error;
407 :
408 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
409 0 : match s {
410 0 : "active" => Ok(Self::Active),
411 0 : "filling" => Ok(Self::Filling),
412 0 : "pause" => Ok(Self::Pause),
413 0 : "pause_for_restart" => Ok(Self::PauseForRestart),
414 0 : "draining" => Ok(Self::Draining),
415 0 : "deleting" => Ok(Self::Deleting),
416 0 : _ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")),
417 : }
418 0 : }
419 : }
420 :
421 : impl From<NodeSchedulingPolicy> for String {
422 0 : fn from(value: NodeSchedulingPolicy) -> String {
423 : use NodeSchedulingPolicy::*;
424 0 : match value {
425 0 : Active => "active",
426 0 : Filling => "filling",
427 0 : Pause => "pause",
428 0 : PauseForRestart => "pause_for_restart",
429 0 : Draining => "draining",
430 0 : Deleting => "deleting",
431 : }
432 0 : .to_string()
433 0 : }
434 : }
435 :
436 0 : #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)]
437 : pub enum SkSchedulingPolicy {
438 : Active,
439 : Activating,
440 : Pause,
441 : Decomissioned,
442 : }
443 :
444 : impl FromStr for SkSchedulingPolicy {
445 : type Err = anyhow::Error;
446 :
447 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
448 0 : Ok(match s {
449 0 : "active" => Self::Active,
450 0 : "activating" => Self::Activating,
451 0 : "pause" => Self::Pause,
452 0 : "decomissioned" => Self::Decomissioned,
453 : _ => {
454 0 : return Err(anyhow::anyhow!(
455 0 : "Unknown scheduling policy '{s}', try active,pause,decomissioned"
456 0 : ));
457 : }
458 : })
459 0 : }
460 : }
461 :
462 : impl From<SkSchedulingPolicy> for String {
463 0 : fn from(value: SkSchedulingPolicy) -> String {
464 : use SkSchedulingPolicy::*;
465 0 : match value {
466 0 : Active => "active",
467 0 : Activating => "activating",
468 0 : Pause => "pause",
469 0 : Decomissioned => "decomissioned",
470 : }
471 0 : .to_string()
472 0 : }
473 : }
474 :
475 : /// Controls how tenant shards are mapped to locations on pageservers, e.g. whether
476 : /// to create secondary locations.
477 0 : #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
478 : pub enum PlacementPolicy {
479 : /// Normal live state: one attached pageserver and zero or more secondaries.
480 : Attached(usize),
481 : /// Create one secondary mode locations. This is useful when onboarding
482 : /// a tenant, or for an idle tenant that we might want to bring online quickly.
483 : Secondary,
484 :
485 : /// Do not attach to any pageservers. This is appropriate for tenants that
486 : /// have been idle for a long time, where we do not mind some delay in making
487 : /// them available in future.
488 : Detached,
489 : }
490 :
491 : impl PlacementPolicy {
492 55 : pub fn want_secondaries(&self) -> usize {
493 55 : match self {
494 52 : PlacementPolicy::Attached(secondary_count) => *secondary_count,
495 3 : PlacementPolicy::Secondary => 1,
496 0 : PlacementPolicy::Detached => 0,
497 : }
498 55 : }
499 : }
500 :
501 0 : #[derive(Serialize, Deserialize, Debug)]
502 : pub struct TenantShardMigrateResponse {}
503 :
504 : /// Metadata health record posted from scrubber.
505 0 : #[derive(Serialize, Deserialize, Debug)]
506 : pub struct MetadataHealthRecord {
507 : pub tenant_shard_id: TenantShardId,
508 : pub healthy: bool,
509 : pub last_scrubbed_at: chrono::DateTime<chrono::Utc>,
510 : }
511 :
512 0 : #[derive(Serialize, Deserialize, Debug)]
513 : pub struct MetadataHealthUpdateRequest {
514 : pub healthy_tenant_shards: HashSet<TenantShardId>,
515 : pub unhealthy_tenant_shards: HashSet<TenantShardId>,
516 : }
517 :
518 0 : #[derive(Serialize, Deserialize, Debug)]
519 : pub struct MetadataHealthUpdateResponse {}
520 :
521 0 : #[derive(Serialize, Deserialize, Debug)]
522 : pub struct MetadataHealthListUnhealthyResponse {
523 : pub unhealthy_tenant_shards: Vec<TenantShardId>,
524 : }
525 :
526 : #[derive(Serialize, Deserialize, Debug)]
527 : pub struct MetadataHealthListOutdatedRequest {
528 : #[serde(with = "humantime_serde")]
529 : pub not_scrubbed_for: Duration,
530 : }
531 :
532 0 : #[derive(Serialize, Deserialize, Debug)]
533 : pub struct MetadataHealthListOutdatedResponse {
534 : pub health_records: Vec<MetadataHealthRecord>,
535 : }
536 :
537 : /// Publicly exposed safekeeper description
538 0 : #[derive(Serialize, Deserialize, Clone)]
539 : pub struct SafekeeperDescribeResponse {
540 : pub id: NodeId,
541 : pub region_id: String,
542 : /// 1 is special, it means just created (not currently posted to storcon).
543 : /// Zero or negative is not really expected.
544 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
545 : pub version: i64,
546 : pub host: String,
547 : pub port: i32,
548 : pub http_port: i32,
549 : pub https_port: Option<i32>,
550 : pub availability_zone_id: String,
551 : pub scheduling_policy: SkSchedulingPolicy,
552 : }
553 :
554 0 : #[derive(Serialize, Deserialize, Clone, Debug)]
555 : pub struct TimelineSafekeeperPeer {
556 : pub node_id: NodeId,
557 : pub listen_http_addr: String,
558 : pub http_port: i32,
559 : }
560 :
561 0 : #[derive(Serialize, Deserialize, Clone, Debug)]
562 : pub struct SCSafekeeperTimeline {
563 : // SC does not know the tenant id.
564 : pub timeline_id: TimelineId,
565 : pub peers: Vec<NodeId>,
566 : }
567 :
568 0 : #[derive(Serialize, Deserialize, Clone, Debug)]
569 : pub struct SCSafekeeperTimelinesResponse {
570 : pub timelines: Vec<SCSafekeeperTimeline>,
571 : pub safekeeper_peers: Vec<TimelineSafekeeperPeer>,
572 : }
573 :
574 0 : #[derive(Serialize, Deserialize, Clone, Debug)]
575 : pub struct SafekeeperTimeline {
576 : pub tenant_id: TenantId,
577 : pub timeline_id: TimelineId,
578 : pub peers: Vec<NodeId>,
579 : }
580 :
581 0 : #[derive(Serialize, Deserialize, Clone, Debug)]
582 : pub struct SafekeeperTimelinesResponse {
583 : pub timelines: Vec<SafekeeperTimeline>,
584 : pub safekeeper_peers: Vec<TimelineSafekeeperPeer>,
585 : }
586 :
587 0 : #[derive(Serialize, Deserialize, Clone)]
588 : pub struct SafekeeperSchedulingPolicyRequest {
589 : pub scheduling_policy: SkSchedulingPolicy,
590 : }
591 :
592 : /// Import request for safekeeper timelines.
593 0 : #[derive(Serialize, Deserialize, Clone)]
594 : pub struct TimelineImportRequest {
595 : pub tenant_id: TenantId,
596 : pub timeline_id: TimelineId,
597 : pub start_lsn: Lsn,
598 : pub sk_set: Vec<NodeId>,
599 : }
600 :
601 0 : #[derive(serde::Serialize, serde::Deserialize, Clone)]
602 : pub struct TimelineSafekeeperMigrateRequest {
603 : pub new_sk_set: Vec<NodeId>,
604 : }
605 :
606 : #[cfg(test)]
607 : mod test {
608 : use serde_json;
609 :
610 : use super::*;
611 :
612 : /// Check stability of PlacementPolicy's serialization
613 : #[test]
614 1 : fn placement_policy_encoding() -> anyhow::Result<()> {
615 1 : let v = PlacementPolicy::Attached(1);
616 1 : let encoded = serde_json::to_string(&v)?;
617 1 : assert_eq!(encoded, "{\"Attached\":1}");
618 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
619 :
620 1 : let v = PlacementPolicy::Detached;
621 1 : let encoded = serde_json::to_string(&v)?;
622 1 : assert_eq!(encoded, "\"Detached\"");
623 1 : assert_eq!(serde_json::from_str::<PlacementPolicy>(&encoded)?, v);
624 1 : Ok(())
625 1 : }
626 :
627 : #[test]
628 1 : fn test_reject_unknown_field() {
629 1 : let id = TenantId::generate();
630 1 : let create_request = serde_json::json!({
631 1 : "new_tenant_id": id.to_string(),
632 1 : "unknown_field": "unknown_value".to_string(),
633 : });
634 1 : let err = serde_json::from_value::<TenantCreateRequest>(create_request).unwrap_err();
635 1 : assert!(
636 1 : err.to_string().contains("unknown field `unknown_field`"),
637 0 : "expect unknown field `unknown_field` error, got: {err}"
638 : );
639 1 : }
640 :
641 : /// Check that a minimal migrate request with no config results in the expected default settings
642 : #[test]
643 1 : fn test_migrate_request_decode_defaults() {
644 1 : let json = r#"{
645 1 : "node_id": 123
646 1 : }"#;
647 :
648 1 : let request: TenantShardMigrateRequest = serde_json::from_str(json).unwrap();
649 1 : assert_eq!(request.node_id, NodeId(123));
650 1 : assert_eq!(request.origin_node_id, None);
651 1 : assert!(!request.migration_config.override_scheduler);
652 1 : assert!(request.migration_config.prewarm);
653 1 : assert_eq!(request.migration_config.secondary_warmup_timeout, None);
654 1 : assert_eq!(
655 : request.migration_config.secondary_download_request_timeout,
656 : None
657 : );
658 1 : }
659 :
660 : /// Check that a partially specified migration config results in the expected default settings
661 : #[test]
662 1 : fn test_migration_config_decode_defaults() {
663 : // Specify just one field of the config
664 1 : let json = r#"{
665 1 : }"#;
666 :
667 1 : let config: MigrationConfig = serde_json::from_str(json).unwrap();
668 :
669 : // Check each field's expected default value
670 1 : assert!(!config.override_scheduler);
671 1 : assert!(config.prewarm);
672 1 : assert_eq!(config.secondary_warmup_timeout, None);
673 1 : assert_eq!(config.secondary_download_request_timeout, None);
674 1 : assert_eq!(config.secondary_warmup_timeout, None);
675 :
676 : // Consistency check that the Default impl agrees with our serde defaults
677 1 : assert_eq!(MigrationConfig::default(), config);
678 1 : }
679 : }
|