pull/sync: treat unset max-depth as full recursion
to be consistent with tape backup and verification jobs. Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
		| @ -473,8 +473,8 @@ pub struct SyncJobConfig { | ||||
|     pub remote_ns: Option<BackupNamespace>, | ||||
|     #[serde(skip_serializing_if = "Option::is_none")] | ||||
|     pub remove_vanished: Option<bool>, | ||||
|     #[serde(default)] | ||||
|     pub max_depth: usize, | ||||
|     #[serde(skip_serializing_if = "Option::is_none")] | ||||
|     pub max_depth: Option<usize>, | ||||
|     #[serde(skip_serializing_if = "Option::is_none")] | ||||
|     pub comment: Option<String>, | ||||
|     #[serde(skip_serializing_if = "Option::is_none")] | ||||
|  | ||||
| @ -221,6 +221,8 @@ pub enum DeletableProperty { | ||||
|     ns, | ||||
|     /// Delete the remote_ns property, | ||||
|     remote_ns, | ||||
|     /// Delete the max_depth property, | ||||
|     max_depth, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
| @ -312,6 +314,9 @@ pub fn update_sync_job( | ||||
|                 DeletableProperty::remote_ns => { | ||||
|                     data.remote_ns = None; | ||||
|                 } | ||||
|                 DeletableProperty::max_depth => { | ||||
|                     data.max_depth = None; | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| @ -341,7 +346,9 @@ pub fn update_sync_job( | ||||
|         data.store = store; | ||||
|     } | ||||
|     if let Some(ns) = update.ns { | ||||
|         check_max_depth(&ns, update.max_depth.unwrap_or(data.max_depth))?; | ||||
|         if let Some(explicit_depth) = update.max_depth.or(data.max_depth) { | ||||
|             check_max_depth(&ns, explicit_depth)?; | ||||
|         } | ||||
|         data.ns = Some(ns); | ||||
|     } | ||||
|     if let Some(remote) = update.remote { | ||||
| @ -351,7 +358,9 @@ pub fn update_sync_job( | ||||
|         data.remote_store = remote_store; | ||||
|     } | ||||
|     if let Some(remote_ns) = update.remote_ns { | ||||
|         check_max_depth(&remote_ns, update.max_depth.unwrap_or(data.max_depth))?; | ||||
|         if let Some(explicit_depth) = update.max_depth.or(data.max_depth) { | ||||
|             check_max_depth(&remote_ns, explicit_depth)?; | ||||
|         } | ||||
|         data.remote_ns = Some(remote_ns); | ||||
|     } | ||||
|     if let Some(owner) = update.owner { | ||||
| @ -391,7 +400,7 @@ pub fn update_sync_job( | ||||
|         if let Some(ref ns) = data.remote_ns { | ||||
|             check_max_depth(ns, max_depth)?; | ||||
|         } | ||||
|         data.max_depth = max_depth; | ||||
|         data.max_depth = Some(max_depth); | ||||
|     } | ||||
|  | ||||
|     if !check_sync_job_modify_access(&user_info, &auth_id, &data) { | ||||
| @ -517,7 +526,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator | ||||
|         owner: Some(write_auth_id.clone()), | ||||
|         comment: None, | ||||
|         remove_vanished: None, | ||||
|         max_depth: 0, | ||||
|         max_depth: None, | ||||
|         group_filter: None, | ||||
|         schedule: None, | ||||
|         limit: pbs_api_types::RateLimitConfig::default(), // no limit | ||||
|  | ||||
| @ -233,7 +233,6 @@ async fn pull( | ||||
|     let delete = remove_vanished.unwrap_or(false); | ||||
|  | ||||
|     let ns = ns.unwrap_or_default(); | ||||
|     let max_depth = max_depth.unwrap_or(0); | ||||
|     let ns_str = if ns.is_root() { | ||||
|         None | ||||
|     } else { | ||||
|  | ||||
| @ -54,8 +54,8 @@ pub struct PullParameters { | ||||
|     owner: Authid, | ||||
|     /// Whether to remove groups which exist locally, but not on the remote end | ||||
|     remove_vanished: bool, | ||||
|     /// How many levels of sub-namespaces to pull (0 == no recursion) | ||||
|     max_depth: usize, | ||||
|     /// How many levels of sub-namespaces to pull (0 == no recursion, None == maximum recursion) | ||||
|     max_depth: Option<usize>, | ||||
|     /// Filters for reducing the pull scope | ||||
|     group_filter: Option<Vec<GroupFilter>>, | ||||
|     /// Rate limits for all transfers from `remote` | ||||
| @ -75,13 +75,14 @@ impl PullParameters { | ||||
|         remote_ns: BackupNamespace, | ||||
|         owner: Authid, | ||||
|         remove_vanished: Option<bool>, | ||||
|         max_depth: usize, | ||||
|         max_depth: Option<usize>, | ||||
|         group_filter: Option<Vec<GroupFilter>>, | ||||
|         limit: RateLimitConfig, | ||||
|     ) -> Result<Self, Error> { | ||||
|         let store = DataStore::lookup_datastore(store, Some(Operation::Write))?; | ||||
|  | ||||
|         let max_depth = min(max_depth, MAX_NAMESPACE_DEPTH - remote_ns.depth()); | ||||
|         let max_depth = | ||||
|             max_depth.map(|max_depth| min(max_depth, MAX_NAMESPACE_DEPTH - remote_ns.depth())); | ||||
|  | ||||
|         let (remote_config, _digest) = pbs_config::remote::config()?; | ||||
|         let remote: Remote = remote_config.lookup("remote", remote)?; | ||||
| @ -749,11 +750,11 @@ async fn query_namespaces( | ||||
|         "api2/json/admin/datastore/{}/namespace", | ||||
|         params.source.store() | ||||
|     ); | ||||
|     let data = json!({ | ||||
|         "max-depth": params.max_depth, | ||||
|     }); | ||||
|     let data = params | ||||
|         .max_depth | ||||
|         .map(|max_depth| json!({ "max-depth": max_depth })); | ||||
|     let mut result = client | ||||
|         .get(&path, Some(data)) | ||||
|         .get(&path, data) | ||||
|         .await | ||||
|         .map_err(|err| format_err!("Failed to retrieve namespaces from remote - {}", err))?; | ||||
|     let mut list: Vec<NamespaceListItem> = serde_json::from_value(result["data"].take())?; | ||||
| @ -846,7 +847,7 @@ fn check_and_remove_vanished_ns( | ||||
|  | ||||
|     let mut local_ns_list: Vec<BackupNamespace> = params | ||||
|         .store | ||||
|         .recursive_iter_backup_ns_ok(params.ns.clone(), Some(params.max_depth))? | ||||
|         .recursive_iter_backup_ns_ok(params.ns.clone(), params.max_depth)? | ||||
|         .filter(|ns| { | ||||
|             let store_with_ns = params.store_with_ns(ns.clone()); | ||||
|             let user_privs = user_info.lookup_privs(¶ms.owner, &store_with_ns.acl_path()); | ||||
| @ -911,7 +912,7 @@ pub async fn pull_store( | ||||
|     // explicit create shared lock to prevent GC on newly created chunks | ||||
|     let _shared_store_lock = params.store.try_shared_chunk_store_lock()?; | ||||
|  | ||||
|     let namespaces = if params.remote_ns.is_root() && params.max_depth == 0 { | ||||
|     let namespaces = if params.remote_ns.is_root() && params.max_depth == Some(0) { | ||||
|         vec![params.remote_ns.clone()] // backwards compat - don't query remote namespaces! | ||||
|     } else { | ||||
|         query_namespaces(client, ¶ms).await? | ||||
| @ -959,7 +960,7 @@ pub async fn pull_store( | ||||
|             Ok((ns_progress, ns_errors)) => { | ||||
|                 errors |= ns_errors; | ||||
|  | ||||
|                 if params.max_depth > 0 { | ||||
|                 if params.max_depth != Some(0) { | ||||
|                     groups += ns_progress.done_groups; | ||||
|                     snapshots += ns_progress.done_snapshots; | ||||
|                     task_log!( | ||||
|  | ||||
		Reference in New Issue
	
	Block a user