From w.bumiller at proxmox.com Tue May 6 13:57:08 2025 From: w.bumiller at proxmox.com (Wolfgang Bumiller) Date: Tue, 6 May 2025 13:57:08 +0200 Subject: [pbs-devel] applied: [PATCH proxmox v2] proxmox-client: add query builder In-Reply-To: <20250416113601.256829-1-m.sandoval@proxmox.com> References: <20250416113601.256829-1-m.sandoval@proxmox.com> Message-ID: applied, thanks From c.ebner at proxmox.com Wed May 7 17:38:34 2025 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 7 May 2025 17:38:34 +0200 Subject: [pbs-devel] [PATCH proxmox-backup 2/2] fix #6358: remove group note file if present on group destroy In-Reply-To: <20250507153834.758840-1-c.ebner@proxmox.com> References: <20250507153834.758840-1-c.ebner@proxmox.com> Message-ID: <20250507153834.758840-3-c.ebner@proxmox.com> Removing the group directory when forgetting a backup group or removing the final backup snapshot of a group did not take into consideration a potentially present group note file, leading for it to fail. Further, since the owner file is removed before trying to remove the (not empty) group directory, the group will not be usable anymore as the owner check will fail as well. To fix this, remove the backup group's note file first, if present and only after that try to cleanup the rest. Fixes: https://bugzilla.proxmox.com/show_bug.cgi?id=6358 Signed-off-by: Christian Ebner --- pbs-datastore/src/backup_info.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index d4732fdd9..22b4eddf3 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -244,6 +244,13 @@ impl BackupGroup { /// Helper function, assumes that no more snapshots are present in the group. fn remove_group_dir(&self) -> Result<(), Error> { + let note_path = self.store.group_notes_path(&self.ns, &self.group); + if let Err(err) = std::fs::remove_file(¬e_path) { + if err.kind() != std::io::ErrorKind::NotFound { + bail!("removing the note file '{note_path:?}' failed - {err}") + } + } + let owner_path = self.store.owner_path(&self.ns, &self.group); std::fs::remove_file(&owner_path).map_err(|err| { -- 2.39.5 From c.ebner at proxmox.com Wed May 7 17:38:33 2025 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 7 May 2025 17:38:33 +0200 Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api: datastore: make group notes path helper a DataStore method In-Reply-To: <20250507153834.758840-1-c.ebner@proxmox.com> References: <20250507153834.758840-1-c.ebner@proxmox.com> Message-ID: <20250507153834.758840-2-c.ebner@proxmox.com> Move and make the helper function to get a backup groups notes file path a `DataStore` method instead. This allows it to be reused when access to the notes path is required from the datastore itself. Further, use the plural `notes` wording also in the helper to be consistent with the rest of the codebase. In preparation for correctly removing the notes file from the backup group on destruction. No functional changes intended. Signed-off-by: Christian Ebner --- pbs-datastore/src/datastore.rs | 11 +++++++++++ src/api2/admin/datastore.rs | 26 +++++++------------------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index cbf78ecb6..91c7e76be 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -40,6 +40,8 @@ use crate::DataBlob; static DATASTORE_MAP: LazyLock>>> = LazyLock::new(|| Mutex::new(HashMap::new())); +const GROUP_NOTES_FILE_NAME: &str = "notes"; + /// checks if auth_id is owner, or, if owner is a token, if /// auth_id is the user of the token pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> { @@ -524,6 +526,15 @@ impl DataStore { full_path } + /// Returns the absolute path of a backup groups notes file + pub fn group_notes_path( + &self, + ns: &BackupNamespace, + group: &pbs_api_types::BackupGroup, + ) -> PathBuf { + self.group_path(ns, group).join(GROUP_NOTES_FILE_NAME) + } + /// Returns the absolute path for backup_dir pub fn snapshot_path( &self, diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 392494488..cc7e17a29 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -4,7 +4,7 @@ use std::collections::HashSet; use std::ffi::OsStr; use std::ops::Deref; use std::os::unix::ffi::OsStrExt; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::sync::Arc; use anyhow::{bail, format_err, Context, Error}; @@ -77,18 +77,6 @@ use crate::backup::{ use crate::server::jobstate::{compute_schedule_status, Job, JobState}; -const GROUP_NOTES_FILE_NAME: &str = "notes"; - -fn get_group_note_path( - store: &DataStore, - ns: &BackupNamespace, - group: &pbs_api_types::BackupGroup, -) -> PathBuf { - let mut note_path = store.group_path(ns, group); - note_path.push(GROUP_NOTES_FILE_NAME); - note_path -} - // helper to unify common sequence of checks: // 1. check privs on NS (full or limited access) // 2. load datastore @@ -244,8 +232,8 @@ pub fn list_groups( }) .to_owned(); - let note_path = get_group_note_path(&datastore, &ns, group.as_ref()); - let comment = file_read_firstline(note_path).ok(); + let notes_path = datastore.group_notes_path(&ns, group.as_ref()); + let comment = file_read_firstline(notes_path).ok(); group_info.push(GroupListItem { backup: group.into(), @@ -2053,8 +2041,8 @@ pub fn get_group_notes( &backup_group, )?; - let note_path = get_group_note_path(&datastore, &ns, &backup_group); - Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned())) + let notes_path = datastore.group_notes_path(&ns, &backup_group); + Ok(file_read_optional_string(notes_path)?.unwrap_or_else(|| "".to_owned())) } #[api( @@ -2101,8 +2089,8 @@ pub fn set_group_notes( &backup_group, )?; - let note_path = get_group_note_path(&datastore, &ns, &backup_group); - replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?; + let notes_path = datastore.group_notes_path(&ns, &backup_group); + replace_file(notes_path, notes.as_bytes(), CreateOptions::new(), false)?; Ok(()) } -- 2.39.5 From c.ebner at proxmox.com Wed May 7 17:38:32 2025 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 7 May 2025 17:38:32 +0200 Subject: [pbs-devel] [PATCH proxmox-backup 0/2] fix #6358: group removal fails if group notes exist Message-ID: <20250507153834.758840-1-c.ebner@proxmox.com> These patches fix an issue with the backup group removal failing and leaving behind an unusable backup group, due to not taking a possibly present backup group's notes file into account. The series consists of 2 patches, the first being preparatory, allowing to reuse the group notes filepath helper, the second fixing the actual issue by conditionally removing the notes file before further group directory cleanup. Link to the bugtracker issue: https://bugzilla.proxmox.com/show_bug.cgi?id=6358 Christian Ebner (2): api: datastore: make group notes path helper a DataStore method fix #6358: remove group note file if present on group destroy pbs-datastore/src/backup_info.rs | 7 +++++++ pbs-datastore/src/datastore.rs | 11 +++++++++++ src/api2/admin/datastore.rs | 26 +++++++------------------- 3 files changed, 25 insertions(+), 19 deletions(-) -- 2.39.5 From l.leahu-vladucu at proxmox.com Wed May 7 17:36:39 2025 From: l.leahu-vladucu at proxmox.com (=?UTF-8?q?Lauren=C8=9Biu=20Leahu-Vl=C4=83ducu?=) Date: Wed, 7 May 2025 17:36:39 +0200 Subject: [pbs-devel] [PATCH proxmox] proxmox-product-config: fix code documentation on permissions Message-ID: <20250507153639.46774-1-l.leahu-vladucu@proxmox.com> This patch fixes the documentation of some functions being inconsistent with the actual code. While such inconsistencies are never good, when it comes to permissions, they might have even worse consequences. To be precise, this patch fixes the following: - replace_config() actually uses permissions 0640 (docs stated 0660) - although the possibility of setting a privileged user (usually root, but possibly different) has been added in the past, the docs still stated "root" or "superuser". However, some functions also explicitly use "root", which made it even more confusing. It is now clear which functions use the API user, which use the privileged user, and which explicitly use root. - fixed some small style inconsistencies (e.g. priv-user instead of priv_user) Signed-off-by: Lauren?iu Leahu-Vl?ducu --- .../src/filesystem_helpers.rs | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/proxmox-product-config/src/filesystem_helpers.rs b/proxmox-product-config/src/filesystem_helpers.rs index 9aa8b1a4..d9f0e827 100644 --- a/proxmox-product-config/src/filesystem_helpers.rs +++ b/proxmox-product-config/src/filesystem_helpers.rs @@ -17,9 +17,9 @@ pub fn default_create_options() -> CreateOptions { .group(api_user.gid) } -/// Return [CreateOptions] for files owned by `priv_user.uid:api-user.gid` with permission `0640`. +/// Return [CreateOptions] for files owned by `priv_user.uid:api_user.gid` with permission `0640`. /// -/// Only the superuser can write those files, but group `api-user.gid` can read them. +/// Only `priv_user` can write those files, but group `api_user.gid` can read them. pub fn privileged_create_options() -> CreateOptions { let api_user = get_api_user(); let priv_user = get_priv_user(); @@ -30,9 +30,9 @@ pub fn privileged_create_options() -> CreateOptions { .group(api_user.gid) } -/// Return [CreateOptions] for files owned by `priv_user.uid: priv_user.gid` with permission `0600`. +/// Return [CreateOptions] for files owned by `priv_user.uid:priv_user.gid` with permission `0600`. /// -/// Only the superuser can read and write those files. +/// Only `priv_user` can read and write those files. pub fn secret_create_options() -> CreateOptions { let priv_user = get_priv_user(); let mode = Mode::from_bits_truncate(0o0600); @@ -63,16 +63,16 @@ pub fn lockfile_create_options() -> CreateOptions { .group(api_user.gid) } -/// Atomically write data to file owned by `priv_user.uid:api-user.gid` with permission `0640` +/// Atomically write data to file owned by `priv_user.uid:api_user.gid` with permission `0640` /// -/// Only the superuser can write those files, but group 'api-user' can read them. +/// Only `priv_user` can write those files, but group 'api_user' can read them. pub fn replace_privileged_config>(path: P, data: &[u8]) -> Result<(), Error> { let options = privileged_create_options(); proxmox_sys::fs::replace_file(path, data, options, true)?; Ok(()) } -/// Atomically write data to file owned by `api-user.uid:api-user.gid` with permission `0660`. +/// Atomically write data to file owned by `api_user.uid:api_user.gid` with permission `0640`. pub fn replace_config>(path: P, data: &[u8]) -> Result<(), Error> { let options = default_create_options(); proxmox_sys::fs::replace_file(path, data, options, true)?; @@ -81,7 +81,7 @@ pub fn replace_config>(path: P, data: &[u8]) -> Result<(), Error> /// Atomically write data to file owned by `priv_user.uid:priv_user.gid` with permission `0600`. /// -/// Only the superuser can read and write those files. +/// Only `priv_user` can read and write those files. pub fn replace_secret_config>(path: P, data: &[u8]) -> Result<(), Error> { let options = secret_create_options(); proxmox_sys::fs::replace_file(path, data, options, true)?; @@ -119,15 +119,15 @@ pub unsafe fn create_mocked_lock() -> ApiLockGuard { ApiLockGuard(None) } -/// Open or create a lock file owned by user `api-user` and lock it. +/// Open or create a lock file owned by user `api_user` and lock it. /// -/// Owner/Group of the file is set to `api-user.uid/api-user.gid`. +/// Owner/Group of the file is set to `api_user.uid/api_user.gid`. /// File mode is `0660`. /// Default timeout is 10 seconds. /// /// The lock is released as soon as you drop the returned lock guard. /// -/// Note: This method needs to be called by user `root` or `api-user`. +/// Note: This method needs to be called by `priv_user` or `api_user`. pub fn open_api_lockfile>( path: P, timeout: Option, @@ -139,14 +139,14 @@ pub fn open_api_lockfile>( Ok(ApiLockGuard(Some(file))) } /// -/// Open or create a lock file owned by root and lock it. +/// Open or create a lock file owned by `priv_user` and lock it. /// /// File mode is `0600`. /// Default timeout is 10 seconds. /// /// The lock is released as soon as you drop the returned lock guard. /// -/// Note: This method needs to be called by user `root`. +/// Note: This method needs to be called by user `priv_user`. pub fn open_secret_lockfile>( path: P, timeout: Option, -- 2.39.5 From g.goller at proxmox.com Wed May 7 18:31:14 2025 From: g.goller at proxmox.com (Gabriel Goller) Date: Wed, 7 May 2025 18:31:14 +0200 Subject: [pbs-devel] [RFC PATCH] schema: allow serializing rust Schema to perl JsonSchema Message-ID: <20250507163114.1162300-1-g.goller@proxmox.com> Implement serde::Serialize on the rust Schema, so that we can serialize it and use it as a JsonSchema in perl. This allows us to write a single Schema in rust and reuse it in perl for the api properties. The interesting bits (custom impls) are: * Recursive oneOf type-property resolver * oneOf and allOf implementation * ApiStringFormat skip of ApiStringVerifyFn (which won't work obviously) Signed-off-by: Gabriel Goller --- This is kinda hard to test, because nothing actually fails when the properties are wrong and the whole allOf, oneOf and ApiStringFormat is a bit untransparent. So some properties could be wrongly serialized, but I think I got everything right. Looking over all the properties would be appreciated! Cargo.toml | 1 + proxmox-schema/Cargo.toml | 4 +- proxmox-schema/src/const_regex.rs | 12 ++ proxmox-schema/src/schema.rs | 242 ++++++++++++++++++++++++++++-- 4 files changed, 246 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2ca0ea618707..a0d760ae8fc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,6 +104,7 @@ regex = "1.5" serde = "1.0" serde_cbor = "0.11.1" serde_json = "1.0" +serde_with = "3.8.1" serde_plain = "1.0" syn = { version = "2", features = [ "full", "visit-mut" ] } tar = "0.4" diff --git a/proxmox-schema/Cargo.toml b/proxmox-schema/Cargo.toml index c8028aa52bd0..48ebf3a9005e 100644 --- a/proxmox-schema/Cargo.toml +++ b/proxmox-schema/Cargo.toml @@ -15,8 +15,9 @@ rust-version.workspace = true anyhow.workspace = true const_format = { workspace = true, optional = true } regex.workspace = true -serde.workspace = true +serde = { workspace = true, features = ["derive"] } serde_json.workspace = true +serde_with.workspace = true textwrap = "0.16" # the upid type needs this for 'getpid' @@ -27,7 +28,6 @@ proxmox-api-macro = { workspace = true, optional = true } [dev-dependencies] url.workspace = true -serde = { workspace = true, features = [ "derive" ] } proxmox-api-macro.workspace = true [features] diff --git a/proxmox-schema/src/const_regex.rs b/proxmox-schema/src/const_regex.rs index 8ddc41abedeb..56f6c27fa1de 100644 --- a/proxmox-schema/src/const_regex.rs +++ b/proxmox-schema/src/const_regex.rs @@ -1,5 +1,7 @@ use std::fmt; +use serde::Serialize; + /// Helper to represent const regular expressions /// /// The current Regex::new() function is not `const_fn`. Unless that @@ -13,6 +15,16 @@ pub struct ConstRegexPattern { pub regex_obj: fn() -> &'static regex::Regex, } +impl Serialize for ConstRegexPattern { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + // Get the compiled regex and serialize its pattern as a string + serializer.serialize_str((self.regex_obj)().as_str()) + } +} + impl fmt::Debug for ConstRegexPattern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.regex_string) diff --git a/proxmox-schema/src/schema.rs b/proxmox-schema/src/schema.rs index ddbbacd462a4..11461eaf6ace 100644 --- a/proxmox-schema/src/schema.rs +++ b/proxmox-schema/src/schema.rs @@ -4,10 +4,12 @@ //! completely static API definitions that can be included within the programs read-only text //! segment. -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::fmt; use anyhow::{bail, format_err, Error}; +use serde::ser::{SerializeMap, SerializeStruct}; +use serde::{Serialize, Serializer}; use serde_json::{json, Value}; use crate::ConstRegexPattern; @@ -181,7 +183,8 @@ impl<'a> FromIterator<(&'a str, Error)> for ParameterError { } /// Data type to describe boolean values -#[derive(Debug)] +#[serde_with::skip_serializing_none] +#[derive(Debug, Serialize)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] #[non_exhaustive] pub struct BooleanSchema { @@ -222,7 +225,8 @@ impl BooleanSchema { } /// Data type to describe integer values. -#[derive(Debug)] +#[serde_with::skip_serializing_none] +#[derive(Debug, Serialize)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] #[non_exhaustive] pub struct IntegerSchema { @@ -304,7 +308,8 @@ impl IntegerSchema { } /// Data type to describe (JSON like) number value -#[derive(Debug)] +#[serde_with::skip_serializing_none] +#[derive(Debug, Serialize)] #[non_exhaustive] pub struct NumberSchema { pub description: &'static str, @@ -406,7 +411,8 @@ impl PartialEq for NumberSchema { } /// Data type to describe string values. -#[derive(Debug)] +#[serde_with::skip_serializing_none] +#[derive(Debug, Serialize)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] #[non_exhaustive] pub struct StringSchema { @@ -418,6 +424,7 @@ pub struct StringSchema { /// Optional maximal length. pub max_length: Option, /// Optional microformat. + #[serde(flatten)] pub format: Option<&'static ApiStringFormat>, /// A text representation of the format/type (used to generate documentation). pub type_text: Option<&'static str>, @@ -534,7 +541,8 @@ impl StringSchema { /// /// All array elements are of the same type, as defined in the `items` /// schema. -#[derive(Debug)] +#[serde_with::skip_serializing_none] +#[derive(Debug, Serialize)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] #[non_exhaustive] pub struct ArraySchema { @@ -634,6 +642,43 @@ pub type SchemaPropertyEntry = (&'static str, bool, &'static Schema); /// This is a workaround unless RUST can const_fn `Hash::new()` pub type SchemaPropertyMap = &'static [SchemaPropertyEntry]; +/// A wrapper struct to hold the [`SchemaPropertyMap`] and serialize it nicely. +/// +/// [`SchemaPropertyMap`] holds [`SchemaPropertyEntry`]s which are tuples. Tuples are serialized to +/// arrays, but we need a Map with the name (first item in the tuple) as a key and the optional +/// (second item in the tuple) as a property of the value. +pub struct SerializableSchemaProperties<'a>(&'a [SchemaPropertyEntry]); + +impl Serialize for SerializableSchemaProperties<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut seq = serializer.serialize_map(Some(self.0.len()))?; + + for (name, optional, schema) in self.0 { + let schema_with_metadata = OptionalSchema { + optional: *optional, + schema, + }; + + seq.serialize_entry(&name, &schema_with_metadata)?; + } + + seq.end() + } +} + +/// A schema with a optional bool property. +/// +/// The schema gets flattened, so it looks just like a normal Schema but with a optional property. +#[derive(Serialize)] +struct OptionalSchema<'a> { + optional: bool, + #[serde(flatten)] + schema: &'a Schema, +} + const fn assert_properties_sorted(properties: SchemaPropertyMap) { use std::cmp::Ordering; @@ -656,7 +701,7 @@ const fn assert_properties_sorted(properties: SchemaPropertyMap) { /// Legacy property strings may contain shortcuts where the *value* of a specific key is used as a /// *key* for yet another option. Most notably, PVE's `netX` properties use `=` /// instead of `model=,macaddr=`. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Serialize)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] pub struct KeyAliasInfo { pub key_alias: &'static str, @@ -700,6 +745,77 @@ pub struct ObjectSchema { pub key_alias_info: Option, } +impl Serialize for ObjectSchema { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut s = serializer.serialize_struct("ObjectSchema", 5)?; + + s.serialize_field("description", self.description)?; + s.serialize_field("additional_properties", &self.additional_properties)?; + + // Collect all OneOf type properties recursively + let mut oneofs: Vec = Vec::new(); + for (_, _, schema) in self.properties { + collect_oneof_type_properties(schema, &mut oneofs); + } + + if !oneofs.is_empty() { + // Extend the oneOf type-properties with the actual properties + oneofs.extend_from_slice(self.properties); + s.serialize_field("properties", &SerializableSchemaProperties(&oneofs))?; + } else { + s.serialize_field("properties", &SerializableSchemaProperties(self.properties))?; + } + + if let Some(default_key) = self.default_key { + s.serialize_field("default_key", default_key)?; + } else { + s.skip_field("default_key")?; + } + if let Some(key_alias_info) = self.key_alias_info { + s.serialize_field("key_alias_info", &key_alias_info)?; + } else { + s.skip_field("key_alias_info")?; + } + + s.end() + } +} + +// Recursive function to find all OneOf type properties in a schema +fn collect_oneof_type_properties(schema: &Schema, result: &mut Vec) { + match schema { + Schema::OneOf(oneof) => { + result.push(*oneof.type_property_entry); + } + Schema::Array(array) => { + // Recursively check the array schema + collect_oneof_type_properties(array.items, result); + } + Schema::String(string) => { + // Check the PropertyString Schema + if let Some(ApiStringFormat::PropertyString(schema)) = string.format { + collect_oneof_type_properties(schema, result); + } + } + Schema::Object(obj) => { + // Check all properties in the object + for (_, _, prop_schema) in obj.properties { + collect_oneof_type_properties(prop_schema, result); + } + } + Schema::AllOf(all_of) => { + // Check all schemas in the allOf list + for &schema in all_of.list { + collect_oneof_type_properties(schema, result); + } + } + _ => {} + } +} + impl ObjectSchema { /// Create a new `object` schema. /// @@ -811,7 +927,7 @@ impl ObjectSchema { /// /// Technically this could also contain an `additional_properties` flag, however, in the JSON /// Schema, this is not supported, so here we simply assume additional properties to be allowed. -#[derive(Debug)] +#[derive(Debug, Serialize)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] #[non_exhaustive] pub struct AllOfSchema { @@ -864,7 +980,7 @@ impl AllOfSchema { /// In serde-language, we use an internally tagged enum representation. /// /// Note that these are limited to object schemas. Other schemas will produce errors. -#[derive(Debug)] +#[derive(Debug, Serialize)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] #[non_exhaustive] pub struct OneOfSchema { @@ -880,6 +996,30 @@ pub struct OneOfSchema { pub list: &'static [(&'static str, &'static Schema)], } +fn serialize_oneof_schema(one_of: &OneOfSchema, serializer: S) -> Result +where + S: Serializer, +{ + use serde::ser::SerializeMap; + + let mut map = serializer.serialize_map(Some(3))?; + + map.serialize_entry("description", &one_of.description)?; + + let variants = one_of + .list + .iter() + .map(|(_, schema)| schema) + .collect::>(); + + map.serialize_entry("oneOf", &variants)?; + + // The schema gets inserted into the parent properties + map.serialize_entry("type-property", &one_of.type_property_entry.0)?; + + map.end() +} + const fn assert_one_of_list_is_sorted(list: &[(&str, &Schema)]) { use std::cmp::Ordering; @@ -1360,7 +1500,8 @@ impl Iterator for OneOfPropertyIterator { /// ], /// ).schema(); /// ``` -#[derive(Debug)] +#[derive(Debug, Serialize)] +#[serde(tag = "type", rename_all = "lowercase")] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] pub enum Schema { Null, @@ -1370,10 +1511,81 @@ pub enum Schema { String(StringSchema), Object(ObjectSchema), Array(ArraySchema), + #[serde(serialize_with = "serialize_allof_schema")] AllOf(AllOfSchema), + #[serde(untagged)] + #[serde(serialize_with = "serialize_oneof_schema", rename = "oneOf")] OneOf(OneOfSchema), } +/// Serialize the AllOf Schema +/// +/// This will create one ObjectSchema and merge the properties of all the children. +fn serialize_allof_schema(all_of: &AllOfSchema, serializer: S) -> Result +where + S: Serializer, +{ + use serde::ser::SerializeMap; + + let mut map = serializer.serialize_map(Some(4))?; + + // Add the top-level description + map.serialize_entry("description", &all_of.description)?; + + // The type is always object + map.serialize_entry("type", "object")?; + + let mut all_properties = HashMap::new(); + let mut additional_properties = false; + + for &schema in all_of.list { + if let Some(object_schema) = schema.object() { + // If any schema allows additional properties, the merged schema will too + if object_schema.additional_properties { + additional_properties = true; + } + + // Add all properties from this schema + for (name, optional, prop_schema) in object_schema.properties { + all_properties.insert(*name, (*optional, *prop_schema)); + } + } else if let Some(nested_all_of) = schema.all_of() { + // For nested AllOf schemas go through recursively + for &nested_schema in nested_all_of.list { + if let Some(object_schema) = nested_schema.object() { + if object_schema.additional_properties { + additional_properties = true; + } + + for (name, optional, prop_schema) in object_schema.properties { + all_properties.insert(*name, (*optional, *prop_schema)); + } + } + } + } + } + + // Add the merged properties + let properties_entry = all_properties + .iter() + .map(|(name, (optional, schema))| { + ( + *name, + OptionalSchema { + optional: *optional, + schema, + }, + ) + }) + .collect::>(); + + map.serialize_entry("properties", &properties_entry)?; + + map.serialize_entry("additional_properties", &additional_properties)?; + + map.end() +} + impl Schema { /// Verify JSON value with `schema`. pub fn verify_json(&self, data: &Value) -> Result<(), Error> { @@ -1694,10 +1906,12 @@ impl Schema { } /// A string enum entry. An enum entry must have a value and a description. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize)] +#[serde(transparent)] #[cfg_attr(feature = "test-harness", derive(Eq, PartialEq))] pub struct EnumEntry { pub value: &'static str, + #[serde(skip)] pub description: &'static str, } @@ -1776,14 +1990,20 @@ impl EnumEntry { /// let data = PRODUCT_LIST_SCHEMA.parse_property_string("1,2"); // parse as Array /// assert!(data.is_ok()); /// ``` +#[derive(Serialize)] pub enum ApiStringFormat { /// Enumerate all valid strings + #[serde(rename = "enum")] Enum(&'static [EnumEntry]), /// Use a regular expression to describe valid strings. + #[serde(rename = "pattern")] Pattern(&'static ConstRegexPattern), /// Use a schema to describe complex types encoded as string. + #[serde(rename = "format")] PropertyString(&'static Schema), /// Use a verification function. + /// Note: we can't serialize this, panic if we encounter this. + #[serde(skip)] VerifyFn(ApiStringVerifyFn), } -- 2.39.5