pull: allow pulling groups selectively

without requiring workarounds based on ownership and limited
visibility/access.

if a group filter is set, remove_vanished will only consider filtered
groups for removal to prevent concurrent disjunct filters from trashing
eachother's synced groups.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Reviewed-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2021-10-28 15:00:53 +02:00 committed by Thomas Lamprecht
parent 6e9e6c7a54
commit 71e534631f
3 changed files with 53 additions and 8 deletions

View File

@ -8,7 +8,7 @@ use proxmox_schema::api;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use pbs_api_types::{ use pbs_api_types::{
Authid, SyncJobConfig, Authid, SyncJobConfig, GroupFilter, GROUP_FILTER_LIST_SCHEMA,
DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
}; };
@ -50,6 +50,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
&sync_job.remote_store, &sync_job.remote_store,
sync_job.owner.as_ref().unwrap_or_else(|| Authid::root_auth_id()).clone(), sync_job.owner.as_ref().unwrap_or_else(|| Authid::root_auth_id()).clone(),
sync_job.remove_vanished, sync_job.remove_vanished,
None,
) )
} }
} }
@ -151,6 +152,10 @@ pub fn do_sync_job(
schema: REMOVE_VANISHED_BACKUPS_SCHEMA, schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true, optional: true,
}, },
"groups": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
}, },
}, },
access: { access: {
@ -168,6 +173,7 @@ async fn pull (
remote: String, remote: String,
remote_store: String, remote_store: String,
remove_vanished: Option<bool>, remove_vanished: Option<bool>,
groups: Option<Vec<GroupFilter>>,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
@ -183,6 +189,7 @@ async fn pull (
&remote_store, &remote_store,
auth_id.clone(), auth_id.clone(),
remove_vanished, remove_vanished,
groups,
)?; )?;
let client = pull_params.client().await?; let client = pull_params.client().await?;

View File

@ -12,8 +12,9 @@ use pbs_client::{display_task_log, view_task_result};
use pbs_tools::percent_encoding::percent_encode_component; use pbs_tools::percent_encoding::percent_encode_component;
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use pbs_api_types::{ use pbs_api_types::{
DATASTORE_SCHEMA, UPID_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, GroupFilter,
IGNORE_VERIFIED_BACKUPS_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, REMOTE_ID_SCHEMA,
REMOVE_VANISHED_BACKUPS_SCHEMA, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
}; };
use proxmox_rest_server::wait_for_local_worker; use proxmox_rest_server::wait_for_local_worker;
@ -238,6 +239,10 @@ fn task_mgmt_cli() -> CommandLineInterface {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA, schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true, optional: true,
}, },
"groups": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
"output-format": { "output-format": {
schema: OUTPUT_FORMAT, schema: OUTPUT_FORMAT,
optional: true, optional: true,
@ -251,6 +256,7 @@ async fn pull_datastore(
remote_store: String, remote_store: String,
local_store: String, local_store: String,
remove_vanished: Option<bool>, remove_vanished: Option<bool>,
groups: Option<Vec<GroupFilter>>,
param: Value, param: Value,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
@ -264,6 +270,10 @@ async fn pull_datastore(
"remote-store": remote_store, "remote-store": remote_store,
}); });
if groups.is_some() {
args["groups"] = json!(groups);
}
if let Some(remove_vanished) = remove_vanished { if let Some(remove_vanished) = remove_vanished {
args["remove-vanished"] = Value::from(remove_vanished); args["remove-vanished"] = Value::from(remove_vanished);
} }

View File

@ -13,8 +13,9 @@ use http::StatusCode;
use proxmox_router::HttpError; use proxmox_router::HttpError;
use pbs_api_types::{Authid, GroupListItem, Remote, SnapshotListItem}; use pbs_api_types::{Authid, GroupFilter, GroupListItem, Remote, SnapshotListItem};
use pbs_datastore::{DataStore, BackupInfo, BackupDir, BackupGroup, StoreProgress};
use pbs_datastore::{BackupDir, BackupInfo, BackupGroup, DataStore, StoreProgress};
use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
@ -39,6 +40,7 @@ pub struct PullParameters {
store: Arc<DataStore>, store: Arc<DataStore>,
owner: Authid, owner: Authid,
remove_vanished: bool, remove_vanished: bool,
group_filter: Option<Vec<GroupFilter>>,
} }
impl PullParameters { impl PullParameters {
@ -48,6 +50,7 @@ impl PullParameters {
remote_store: &str, remote_store: &str,
owner: Authid, owner: Authid,
remove_vanished: Option<bool>, remove_vanished: Option<bool>,
group_filter: Option<Vec<GroupFilter>>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let store = DataStore::lookup_datastore(store)?; let store = DataStore::lookup_datastore(store)?;
@ -63,7 +66,7 @@ impl PullParameters {
remote_store.to_string(), remote_store.to_string(),
); );
Ok(Self { remote, source, store, owner, remove_vanished }) Ok(Self { remote, source, store, owner, remove_vanished, group_filter })
} }
pub async fn client(&self) -> Result<HttpClient, Error> { pub async fn client(&self) -> Result<HttpClient, Error> {
@ -678,8 +681,7 @@ pub async fn pull_store(
let mut list: Vec<GroupListItem> = serde_json::from_value(result["data"].take())?; let mut list: Vec<GroupListItem> = serde_json::from_value(result["data"].take())?;
task_log!(worker, "found {} groups to sync", list.len()); let total_count = list.len();
list.sort_unstable_by(|a, b| { list.sort_unstable_by(|a, b| {
let type_order = a.backup_type.cmp(&b.backup_type); let type_order = a.backup_type.cmp(&b.backup_type);
if type_order == std::cmp::Ordering::Equal { if type_order == std::cmp::Ordering::Equal {
@ -689,11 +691,32 @@ pub async fn pull_store(
} }
}); });
let apply_filters = |group: &BackupGroup, filters: &[GroupFilter]| -> bool {
filters
.iter()
.any(|filter| group.matches(filter))
};
let list:Vec<BackupGroup> = list let list:Vec<BackupGroup> = list
.into_iter() .into_iter()
.map(|item| BackupGroup::new(item.backup_type, item.backup_id)) .map(|item| BackupGroup::new(item.backup_type, item.backup_id))
.collect(); .collect();
let list = if let Some(ref group_filter) = &params.group_filter {
let unfiltered_count = list.len();
let list:Vec<BackupGroup> = list
.into_iter()
.filter(|group| {
apply_filters(&group, group_filter)
})
.collect();
task_log!(worker, "found {} groups to sync (out of {} total)", list.len(), unfiltered_count);
list
} else {
task_log!(worker, "found {} groups to sync", total_count);
list
};
let mut errors = false; let mut errors = false;
let mut new_groups = std::collections::HashSet::new(); let mut new_groups = std::collections::HashSet::new();
@ -755,6 +778,11 @@ pub async fn pull_store(
if new_groups.contains(&local_group) { if new_groups.contains(&local_group) {
continue; continue;
} }
if let Some(ref group_filter) = &params.group_filter {
if !apply_filters(&local_group, group_filter) {
continue;
}
}
task_log!( task_log!(
worker, worker,
"delete vanished group '{}/{}'", "delete vanished group '{}/{}'",