pull/sync: treat unset max-depth as full recursion

to be consistent with tape backup and verification jobs.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2022-05-12 16:44:52 +02:00
parent d9aad37f2f
commit b9310489cf
4 changed files with 27 additions and 18 deletions

View File

@ -473,8 +473,8 @@ pub struct SyncJobConfig {
pub remote_ns: Option<BackupNamespace>, pub remote_ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub remove_vanished: Option<bool>, pub remove_vanished: Option<bool>,
#[serde(default)] #[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: usize, pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]

View File

@ -221,6 +221,8 @@ pub enum DeletableProperty {
ns, ns,
/// Delete the remote_ns property, /// Delete the remote_ns property,
remote_ns, remote_ns,
/// Delete the max_depth property,
max_depth,
} }
#[api( #[api(
@ -312,6 +314,9 @@ pub fn update_sync_job(
DeletableProperty::remote_ns => { DeletableProperty::remote_ns => {
data.remote_ns = None; data.remote_ns = None;
} }
DeletableProperty::max_depth => {
data.max_depth = None;
}
} }
} }
} }
@ -341,7 +346,9 @@ pub fn update_sync_job(
data.store = store; data.store = store;
} }
if let Some(ns) = update.ns { if let Some(ns) = update.ns {
check_max_depth(&ns, update.max_depth.unwrap_or(data.max_depth))?; if let Some(explicit_depth) = update.max_depth.or(data.max_depth) {
check_max_depth(&ns, explicit_depth)?;
}
data.ns = Some(ns); data.ns = Some(ns);
} }
if let Some(remote) = update.remote { if let Some(remote) = update.remote {
@ -351,7 +358,9 @@ pub fn update_sync_job(
data.remote_store = remote_store; data.remote_store = remote_store;
} }
if let Some(remote_ns) = update.remote_ns { if let Some(remote_ns) = update.remote_ns {
check_max_depth(&remote_ns, update.max_depth.unwrap_or(data.max_depth))?; if let Some(explicit_depth) = update.max_depth.or(data.max_depth) {
check_max_depth(&remote_ns, explicit_depth)?;
}
data.remote_ns = Some(remote_ns); data.remote_ns = Some(remote_ns);
} }
if let Some(owner) = update.owner { if let Some(owner) = update.owner {
@ -391,7 +400,7 @@ pub fn update_sync_job(
if let Some(ref ns) = data.remote_ns { if let Some(ref ns) = data.remote_ns {
check_max_depth(ns, max_depth)?; check_max_depth(ns, max_depth)?;
} }
data.max_depth = max_depth; data.max_depth = Some(max_depth);
} }
if !check_sync_job_modify_access(&user_info, &auth_id, &data) { if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
@ -517,7 +526,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
owner: Some(write_auth_id.clone()), owner: Some(write_auth_id.clone()),
comment: None, comment: None,
remove_vanished: None, remove_vanished: None,
max_depth: 0, max_depth: None,
group_filter: None, group_filter: None,
schedule: None, schedule: None,
limit: pbs_api_types::RateLimitConfig::default(), // no limit limit: pbs_api_types::RateLimitConfig::default(), // no limit

View File

@ -233,7 +233,6 @@ async fn pull(
let delete = remove_vanished.unwrap_or(false); let delete = remove_vanished.unwrap_or(false);
let ns = ns.unwrap_or_default(); let ns = ns.unwrap_or_default();
let max_depth = max_depth.unwrap_or(0);
let ns_str = if ns.is_root() { let ns_str = if ns.is_root() {
None None
} else { } else {

View File

@ -54,8 +54,8 @@ pub struct PullParameters {
owner: Authid, owner: Authid,
/// Whether to remove groups which exist locally, but not on the remote end /// Whether to remove groups which exist locally, but not on the remote end
remove_vanished: bool, remove_vanished: bool,
/// How many levels of sub-namespaces to pull (0 == no recursion) /// How many levels of sub-namespaces to pull (0 == no recursion, None == maximum recursion)
max_depth: usize, max_depth: Option<usize>,
/// Filters for reducing the pull scope /// Filters for reducing the pull scope
group_filter: Option<Vec<GroupFilter>>, group_filter: Option<Vec<GroupFilter>>,
/// Rate limits for all transfers from `remote` /// Rate limits for all transfers from `remote`
@ -75,13 +75,14 @@ impl PullParameters {
remote_ns: BackupNamespace, remote_ns: BackupNamespace,
owner: Authid, owner: Authid,
remove_vanished: Option<bool>, remove_vanished: Option<bool>,
max_depth: usize, max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>, group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig, limit: RateLimitConfig,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let store = DataStore::lookup_datastore(store, Some(Operation::Write))?; let store = DataStore::lookup_datastore(store, Some(Operation::Write))?;
let max_depth = min(max_depth, MAX_NAMESPACE_DEPTH - remote_ns.depth()); let max_depth =
max_depth.map(|max_depth| min(max_depth, MAX_NAMESPACE_DEPTH - remote_ns.depth()));
let (remote_config, _digest) = pbs_config::remote::config()?; let (remote_config, _digest) = pbs_config::remote::config()?;
let remote: Remote = remote_config.lookup("remote", remote)?; let remote: Remote = remote_config.lookup("remote", remote)?;
@ -749,11 +750,11 @@ async fn query_namespaces(
"api2/json/admin/datastore/{}/namespace", "api2/json/admin/datastore/{}/namespace",
params.source.store() params.source.store()
); );
let data = json!({ let data = params
"max-depth": params.max_depth, .max_depth
}); .map(|max_depth| json!({ "max-depth": max_depth }));
let mut result = client let mut result = client
.get(&path, Some(data)) .get(&path, data)
.await .await
.map_err(|err| format_err!("Failed to retrieve namespaces from remote - {}", err))?; .map_err(|err| format_err!("Failed to retrieve namespaces from remote - {}", err))?;
let mut list: Vec<NamespaceListItem> = serde_json::from_value(result["data"].take())?; let mut list: Vec<NamespaceListItem> = serde_json::from_value(result["data"].take())?;
@ -846,7 +847,7 @@ fn check_and_remove_vanished_ns(
let mut local_ns_list: Vec<BackupNamespace> = params let mut local_ns_list: Vec<BackupNamespace> = params
.store .store
.recursive_iter_backup_ns_ok(params.ns.clone(), Some(params.max_depth))? .recursive_iter_backup_ns_ok(params.ns.clone(), params.max_depth)?
.filter(|ns| { .filter(|ns| {
let store_with_ns = params.store_with_ns(ns.clone()); let store_with_ns = params.store_with_ns(ns.clone());
let user_privs = user_info.lookup_privs(&params.owner, &store_with_ns.acl_path()); let user_privs = user_info.lookup_privs(&params.owner, &store_with_ns.acl_path());
@ -911,7 +912,7 @@ pub async fn pull_store(
// explicit create shared lock to prevent GC on newly created chunks // explicit create shared lock to prevent GC on newly created chunks
let _shared_store_lock = params.store.try_shared_chunk_store_lock()?; let _shared_store_lock = params.store.try_shared_chunk_store_lock()?;
let namespaces = if params.remote_ns.is_root() && params.max_depth == 0 { let namespaces = if params.remote_ns.is_root() && params.max_depth == Some(0) {
vec![params.remote_ns.clone()] // backwards compat - don't query remote namespaces! vec![params.remote_ns.clone()] // backwards compat - don't query remote namespaces!
} else { } else {
query_namespaces(client, &params).await? query_namespaces(client, &params).await?
@ -959,7 +960,7 @@ pub async fn pull_store(
Ok((ns_progress, ns_errors)) => { Ok((ns_progress, ns_errors)) => {
errors |= ns_errors; errors |= ns_errors;
if params.max_depth > 0 { if params.max_depth != Some(0) {
groups += ns_progress.done_groups; groups += ns_progress.done_groups;
snapshots += ns_progress.done_snapshots; snapshots += ns_progress.done_snapshots;
task_log!( task_log!(