api: tape/restore: add namespace mapping

by adding a new parameter 'namespaces', which contains a mapping
for a namespace like this:

store=datastore,source=foo,target=bar,max-depth=2

if source or target are omitted the root namespace is used for its value

this mapping can be given several times (on the cli) or as an array (via
api) to have mappings for multiple datastores

if a specific snapshot list is given simultaneously, the given snapshots
will be restored according to this mapping, or to the source namespace
if no mapping was found.

to do this, we reutilize the restore_list_worker, but change it so that
it does not hold a lock for the duration of the restore, but fails
if the snapshot does exist at the end. also the snapshot will now
be temporarily restored into the target datastore into the
'.tmp/<media-set-uuid>' folder.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
This commit is contained in:
Dominik Csapak 2022-05-05 15:59:36 +02:00
parent fc99c2791b
commit 07ffb86451
5 changed files with 415 additions and 160 deletions

View File

@ -36,6 +36,14 @@ macro_rules! BACKUP_NS_RE {
); );
} }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_NS_PATH_RE {
() => (
concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/")
);
}
#[rustfmt::skip] #[rustfmt::skip]
#[macro_export] #[macro_export]
macro_rules! SNAPSHOT_PATH_REGEX_STR { macro_rules! SNAPSHOT_PATH_REGEX_STR {

View File

@ -30,7 +30,7 @@ use proxmox_uuid::Uuid;
use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT}; use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT};
const_regex! { const_regex! {
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$"); pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":(:?", BACKUP_NS_PATH_RE!(),")?", SNAPSHOT_PATH_REGEX_STR!(), r"$");
} }
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
@ -42,9 +42,9 @@ pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema =
.schema(); .schema();
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
StringSchema::new("A snapshot in the format: 'store:type/id/time") StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time")
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT) .format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
.type_text("store:type/id/time") .type_text("store:[ns/namespace/...]type/id/time")
.schema(); .schema();
#[api( #[api(

View File

@ -17,10 +17,10 @@ use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupNamespace, CryptMode, Operation, TapeRestoreNamespace, Userid, parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode,
DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_SNAPSHOT_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
UPID_SCHEMA, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
@ -50,8 +50,6 @@ use crate::{
tools::parallel_handler::ParallelHandler, tools::parallel_handler::ParallelHandler,
}; };
const RESTORE_TMP_DIR: &str = "/var/tmp/proxmox-backup";
pub struct DataStoreMap { pub struct DataStoreMap {
map: HashMap<String, Arc<DataStore>>, map: HashMap<String, Arc<DataStore>>,
default: Option<Arc<DataStore>>, default: Option<Arc<DataStore>>,
@ -275,6 +273,14 @@ pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
store: { store: {
schema: DATASTORE_MAP_LIST_SCHEMA, schema: DATASTORE_MAP_LIST_SCHEMA,
}, },
"namespaces": {
description: "List of namespace to restore.",
type: Array,
optional: true,
items: {
schema: TAPE_RESTORE_NAMESPACE_SCHEMA,
},
},
drive: { drive: {
schema: DRIVE_NAME_SCHEMA, schema: DRIVE_NAME_SCHEMA,
}, },
@ -315,6 +321,7 @@ pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
pub fn restore( pub fn restore(
store: String, store: String,
drive: String, drive: String,
namespaces: Option<Vec<String>>,
media_set: String, media_set: String,
notify_user: Option<Userid>, notify_user: Option<Userid>,
snapshots: Option<Vec<String>>, snapshots: Option<Vec<String>>,
@ -324,14 +331,22 @@ pub fn restore(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let store_map = DataStoreMap::try_from(store) let mut store_map = DataStoreMap::try_from(store)
.map_err(|err| format_err!("cannot parse store mapping: {}", err))?; .map_err(|err| format_err!("cannot parse store mapping: {}", err))?;
let namespaces = if let Some(maps) = namespaces {
store_map
.add_namespaces_maps(maps)
.map_err(|err| format_err!("cannot parse namespace mapping: {}", err))?
} else {
false
};
let used_datastores = store_map.used_datastores(); let used_datastores = store_map.used_datastores();
if used_datastores.is_empty() { if used_datastores.is_empty() {
bail!("no datastores given"); bail!("no datastores given");
} }
for (_, (target, namespaces)) in used_datastores.iter() { for (target, namespaces) in used_datastores.values() {
check_datastore_privs( check_datastore_privs(
&user_info, &user_info,
target.name(), target.name(),
@ -375,8 +390,8 @@ pub fn restore(
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let taskid = used_datastores let taskid = used_datastores
.iter() .values()
.map(|(_, (t, _))| t.name().to_string()) .map(|(t, _)| t.name().to_string())
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join(", "); .join(", ");
@ -400,10 +415,10 @@ pub fn restore(
task_log!(worker, "Mediaset '{}'", media_set); task_log!(worker, "Mediaset '{}'", media_set);
task_log!(worker, "Pool: {}", pool); task_log!(worker, "Pool: {}", pool);
let res = if let Some(snapshots) = snapshots { let res = if snapshots.is_some() || namespaces {
restore_list_worker( restore_list_worker(
worker.clone(), worker.clone(),
snapshots, snapshots.unwrap_or_else(Vec::new),
inventory, inventory,
media_set_uuid, media_set_uuid,
drive_config, drive_config,
@ -490,13 +505,13 @@ fn restore_full_worker(
task_log!(worker, "Encryption key fingerprint: {}", fingerprint); task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
} }
let used_datastores = store_map.used_datastores();
task_log!( task_log!(
worker, worker,
"Datastore(s): {}", "Datastore(s): {}",
store_map used_datastores
.used_datastores() .values()
.into_iter() .map(|(t, _)| String::from(t.name()))
.map(|(_, (t, _))| String::from(t.name()))
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join(", "), .join(", "),
); );
@ -513,7 +528,7 @@ fn restore_full_worker(
); );
let mut datastore_locks = Vec::new(); let mut datastore_locks = Vec::new();
for (_, (target, _)) in store_map.used_datastores() { for (target, _) in used_datastores.values() {
// explicit create shared lock to prevent GC on newly created chunks // explicit create shared lock to prevent GC on newly created chunks
let shared_store_lock = target.try_shared_chunk_store_lock()?; let shared_store_lock = target.try_shared_chunk_store_lock()?;
datastore_locks.push(shared_store_lock); datastore_locks.push(shared_store_lock);
@ -538,6 +553,97 @@ fn restore_full_worker(
Ok(()) Ok(())
} }
fn check_snapshot_restorable(
worker: &WorkerTask,
store_map: &DataStoreMap,
store: &str,
snapshot: &str,
ns: &BackupNamespace,
dir: &BackupDir,
required: bool,
user_info: &CachedUserInfo,
auth_id: &Authid,
restore_owner: &Authid,
) -> Result<bool, Error> {
let (datastore, namespaces) = if required {
let (datastore, namespaces) = match store_map.get_targets(store, ns) {
Some((target_ds, target_ns)) => {
(target_ds, target_ns.unwrap_or_else(|| vec![ns.clone()]))
}
None => bail!("could not find target datastore for {store}:{snapshot}"),
};
if namespaces.is_empty() {
bail!("could not find target namespace for {store}:{snapshot}");
}
(datastore, namespaces)
} else {
match store_map.get_targets(store, ns) {
Some((ds, Some(ns))) => {
if ns.is_empty() {
return Ok(false);
}
(ds, ns)
}
Some((_, None)) => return Ok(false),
None => return Ok(false),
}
};
let mut have_some_permissions = false;
let mut can_restore_some = false;
for ns in namespaces {
// only simple check, ns creation comes later
if let Err(err) = check_datastore_privs(
user_info,
datastore.name(),
&ns,
auth_id,
Some(restore_owner),
) {
task_warn!(worker, "cannot restore {store}:{snapshot} to {ns}: '{err}'");
continue;
}
// rechecked when we create the group!
if let Ok(owner) = datastore.get_owner(&ns, dir.as_ref()) {
if restore_owner != &owner {
// only the owner is allowed to create additional snapshots
task_warn!(
worker,
"restore '{}' to {} failed - owner check failed ({} != {})",
&snapshot,
ns,
restore_owner,
owner,
);
continue;
}
}
have_some_permissions = true;
if datastore.snapshot_path(&ns, &dir).exists() {
task_warn!(
worker,
"found snapshot {} on target datastore/namespace, skipping...",
&snapshot,
);
continue;
}
can_restore_some = true;
}
if !have_some_permissions {
bail!(
"cannot restore {} to any target namespace due to permissions",
&snapshot
);
}
return Ok(can_restore_some);
}
fn restore_list_worker( fn restore_list_worker(
worker: Arc<WorkerTask>, worker: Arc<WorkerTask>,
snapshots: Vec<String>, snapshots: Vec<String>,
@ -551,101 +657,104 @@ fn restore_list_worker(
user_info: Arc<CachedUserInfo>, user_info: Arc<CachedUserInfo>,
auth_id: &Authid, auth_id: &Authid,
) -> Result<(), Error> { ) -> Result<(), Error> {
// FIXME: Namespace needs to come from somewhere, `snapshots` is just a snapshot string list
// here.
let ns = BackupNamespace::root();
let base_path: PathBuf = format!("{}/{}", RESTORE_TMP_DIR, media_set_uuid).into();
std::fs::create_dir_all(&base_path)?;
let catalog = get_media_set_catalog(&inventory, &media_set_uuid)?; let catalog = get_media_set_catalog(&inventory, &media_set_uuid)?;
let mut datastore_locks = Vec::new(); let mut datastore_locks = Vec::new();
let mut snapshot_file_hash: BTreeMap<Uuid, Vec<u64>> = BTreeMap::new(); let mut snapshot_file_hash: BTreeMap<Uuid, Vec<u64>> = BTreeMap::new();
let mut snapshot_locks = HashMap::new(); let mut skipped = Vec::new();
let res = proxmox_lang::try_block!({ let res = proxmox_lang::try_block!({
// assemble snapshot files/locks // phase 0
for store_snapshot in snapshots.iter() { let snapshots = if snapshots.is_empty() {
let mut split = store_snapshot.splitn(2, ':'); let mut restorable = Vec::new();
let source_datastore = split // restore source namespaces
.next() for (store, snapshot) in catalog.list_snapshots() {
.ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?; if let Ok((ns, dir)) = parse_ns_and_snapshot(&snapshot) {
let snapshot = split if let Some((_, Some(_))) = store_map.get_targets(store, &ns) {
.next() let snapshot = print_ns_and_snapshot(&ns, &dir);
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?; match check_snapshot_restorable(
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?; &worker,
&store_map,
// FIXME ns store,
let (datastore, _) = store_map &snapshot,
.get_targets(source_datastore, &ns) &ns,
.ok_or_else(|| { &dir,
format_err!( false,
"could not find mapping for source datastore: {}", &user_info,
source_datastore auth_id,
) restore_owner,
})?; ) {
Ok(true) => {
// only simple check, ns creation comes later restorable.push((store.to_string(), snapshot.to_string(), ns, dir))
if let Err(err) = check_datastore_privs( }
&user_info, Ok(false) => {}
datastore.name(), Err(err) => {
&ns, task_warn!(worker, "{err}");
auth_id, skipped.push(format!("{store}:{snapshot}"));
Some(restore_owner), }
) { }
task_warn!( }
worker, }
"could not restore {}:{}: '{}'",
source_datastore,
snapshot,
err
);
continue;
} }
restorable
} else {
snapshots
.into_iter()
.filter_map(|store_snapshot| {
// we can unwrap here because of the api format
let idx = store_snapshot.find(':').unwrap();
let (store, snapshot) = store_snapshot.split_at(idx + 1);
let store = &store[..idx]; // remove ':'
let (owner, _group_lock) = match parse_ns_and_snapshot(&snapshot) {
datastore.create_locked_backup_group(&ns, backup_dir.as_ref(), restore_owner)?; Ok((ns, dir)) => {
if restore_owner != &owner { match check_snapshot_restorable(
// only the owner is allowed to create additional snapshots &worker,
task_warn!( &store_map,
worker, &store,
"restore '{}' failed - owner check failed ({} != {})", &snapshot,
snapshot, &ns,
restore_owner, &dir,
owner true,
); &user_info,
continue; auth_id,
} restore_owner,
) {
let (media_id, file_num) = if let Some((media_uuid, file_num)) = Ok(true) => {
catalog.lookup_snapshot(source_datastore, snapshot) Some((store.to_string(), snapshot.to_string(), ns, dir))
{ }
let media_id = inventory.lookup_media(media_uuid).unwrap(); Ok(false) => None,
(media_id, file_num) Err(err) => {
} else { task_warn!(worker, "{err}");
task_warn!( skipped.push(format!("{store}:{snapshot}"));
worker, None
"did not find snapshot '{}' in media set {}", }
snapshot, }
media_set_uuid }
); Err(err) => {
continue; task_warn!(worker, "could not restore {store_snapshot}: {err}");
}; skipped.push(store_snapshot);
None
let (_rel_path, is_new, snap_lock) = }
datastore.create_locked_backup_dir(&ns, &backup_dir)?; }
})
if !is_new { .collect()
task_log!( };
worker, for (store, snapshot, ns, _) in snapshots.iter() {
"found snapshot {} on target datastore, skipping...", // unwrap ok, we already checked those snapshots
snapshot let (datastore, _) = store_map.get_targets(store, &ns).unwrap();
); let (media_id, file_num) =
continue; if let Some((media_uuid, file_num)) = catalog.lookup_snapshot(store, &snapshot) {
} let media_id = inventory.lookup_media(media_uuid).unwrap();
(media_id, file_num)
snapshot_locks.insert(store_snapshot.to_string(), snap_lock); } else {
task_warn!(
worker,
"did not find snapshot '{store}:{snapshot}' in media set",
);
skipped.push(format!("{store}:{snapshot}"));
continue;
};
let shared_store_lock = datastore.try_shared_chunk_store_lock()?; let shared_store_lock = datastore.try_shared_chunk_store_lock()?;
datastore_locks.push(shared_store_lock); datastore_locks.push(shared_store_lock);
@ -658,7 +767,7 @@ fn restore_list_worker(
task_log!( task_log!(
worker, worker,
"found snapshot {} on {}: file {}", "found snapshot {} on {}: file {}",
snapshot, &snapshot,
media_id.label.label_text, media_id.label.label_text,
file_num file_num
); );
@ -666,11 +775,18 @@ fn restore_list_worker(
if snapshot_file_hash.is_empty() { if snapshot_file_hash.is_empty() {
task_log!(worker, "nothing to restore, skipping remaining phases..."); task_log!(worker, "nothing to restore, skipping remaining phases...");
if !skipped.is_empty() {
task_log!(worker, "skipped the following snapshots:");
for snap in skipped {
task_log!(worker, " {snap}");
}
}
return Ok(()); return Ok(());
} }
task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir"); task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir");
let mut datastore_chunk_map: HashMap<String, HashSet<[u8; 32]>> = HashMap::new(); let mut datastore_chunk_map: HashMap<String, HashSet<[u8; 32]>> = HashMap::new();
let mut tmp_paths = Vec::new();
for (media_uuid, file_list) in snapshot_file_hash.iter_mut() { for (media_uuid, file_list) in snapshot_file_hash.iter_mut() {
let media_id = inventory.lookup_media(media_uuid).unwrap(); let media_id = inventory.lookup_media(media_uuid).unwrap();
let (drive, info) = request_and_load_media( let (drive, info) = request_and_load_media(
@ -681,9 +797,10 @@ fn restore_list_worker(
&email, &email,
)?; )?;
file_list.sort_unstable(); file_list.sort_unstable();
restore_snapshots_to_tmpdir(
let tmp_path = restore_snapshots_to_tmpdir(
worker.clone(), worker.clone(),
&base_path, &store_map,
file_list, file_list,
drive, drive,
&info, &info,
@ -691,6 +808,7 @@ fn restore_list_worker(
&mut datastore_chunk_map, &mut datastore_chunk_map,
) )
.map_err(|err| format_err!("could not restore snapshots to tmpdir: {}", err))?; .map_err(|err| format_err!("could not restore snapshots to tmpdir: {}", err))?;
tmp_paths.extend(tmp_path);
} }
// sorted media_uuid => (sorted file_num => (set of digests))) // sorted media_uuid => (sorted file_num => (set of digests)))
@ -745,51 +863,110 @@ fn restore_list_worker(
worker, worker,
"Phase 3: copy snapshots from temp dir to datastores" "Phase 3: copy snapshots from temp dir to datastores"
); );
for (store_snapshot, _lock) in snapshot_locks.into_iter() { let mut errors = false;
proxmox_lang::try_block!({ for (source_datastore, snapshot, source_ns, backup_dir) in snapshots.into_iter() {
let mut split = store_snapshot.splitn(2, ':'); if let Err(err) = proxmox_lang::try_block!({
let source_datastore = split let (datastore, target_ns) = store_map
.next() .get_targets(&source_datastore, &source_ns)
.ok_or_else(|| format_err!("invalid snapshot: {}", store_snapshot))?; .ok_or_else(|| {
let snapshot = split format_err!("unexpected source datastore: {}", source_datastore)
.next() })?;
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
// FIXME ns let namespaces = target_ns.unwrap_or_else(|| vec![source_ns.clone()]);
let (datastore, _) =
store_map
.get_targets(source_datastore, &ns)
.ok_or_else(|| {
format_err!("unexpected source datastore: {}", source_datastore)
})?;
let ns = BackupNamespace::root(); for ns in namespaces {
if let Err(err) = proxmox_lang::try_block!({
check_and_create_namespaces(
&user_info,
&datastore,
&ns,
auth_id,
Some(restore_owner),
)?;
let mut tmp_path = base_path.clone(); let (owner, _group_lock) = datastore.create_locked_backup_group(
tmp_path.push(&source_datastore); &ns,
tmp_path.push(snapshot); backup_dir.as_ref(),
restore_owner,
)?;
if restore_owner != &owner {
bail!(
"cannot restore snapshot '{}' into group '{}', owner check failed ({} != {})",
snapshot,
backup_dir.group,
restore_owner,
owner,
);
}
check_and_create_namespaces( let (_rel_path, is_new, _snap_lock) =
&user_info, datastore.create_locked_backup_dir(&ns, backup_dir.as_ref())?;
&datastore,
&ns,
auth_id,
Some(restore_owner),
)?;
let path = datastore.snapshot_path(&ns, &backup_dir); if !is_new {
bail!("snapshot {}/{} already exists", datastore.name(), &snapshot);
}
for entry in std::fs::read_dir(tmp_path)? { let path = datastore.snapshot_path(&ns, &backup_dir);
let entry = entry?; let tmp_path = snapshot_tmpdir(
let mut new_path = path.clone(); &source_datastore,
new_path.push(entry.file_name()); &datastore,
std::fs::copy(entry.path(), new_path)?; &snapshot,
&media_set_uuid,
);
for entry in std::fs::read_dir(tmp_path)? {
let entry = entry?;
let mut new_path = path.clone();
new_path.push(entry.file_name());
std::fs::copy(entry.path(), new_path)?;
}
Ok(())
}) {
task_warn!(
worker,
"could not restore {source_datastore}:{snapshot}: '{err}'"
);
skipped.push(format!("{source_datastore}:{snapshot}"));
}
} }
task_log!(worker, "Restore snapshot '{}' done", snapshot); task_log!(worker, "Restore snapshot '{}' done", snapshot);
Ok(()) Ok::<_, Error>(())
}) }) {
.map_err(|err: Error| format_err!("could not copy {}: {}", store_snapshot, err))?; task_warn!(
worker,
"could not copy {}:{}: {}",
source_datastore,
snapshot,
err,
);
errors = true;
}
}
for tmp_path in tmp_paths {
if let Err(err) = proxmox_lang::try_block!({
std::fs::remove_dir_all(&tmp_path)
.map_err(|err| format_err!("remove_dir_all failed - {err}"))
}) {
task_warn!(
worker,
"could not clean up temp dir {:?}: {}",
tmp_path,
err,
);
errors = true;
};
}
if errors {
bail!("errors during copy occurred");
}
if !skipped.is_empty() {
task_log!(worker, "(partially) skipped the following snapshots:");
for snap in skipped {
task_log!(worker, " {snap}");
}
} }
Ok(()) Ok(())
}); });
@ -801,9 +978,13 @@ fn restore_list_worker(
); );
} }
match std::fs::remove_dir_all(&base_path) { for (datastore, _) in store_map.used_datastores().values() {
Ok(()) => {} let tmp_path = media_set_tmpdir(&datastore, &media_set_uuid);
Err(err) => task_warn!(worker, "error cleaning up: {}", err), match std::fs::remove_dir_all(&tmp_path) {
Ok(()) => {}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {}
Err(err) => task_warn!(worker, "error cleaning up: {}", err),
}
} }
res res
@ -839,15 +1020,35 @@ fn get_media_set_catalog(
Ok(catalog) Ok(catalog)
} }
fn media_set_tmpdir(datastore: &DataStore, media_set_uuid: &Uuid) -> PathBuf {
let mut path = datastore.base_path();
path.push(".tmp");
path.push(media_set_uuid.to_string());
path
}
fn snapshot_tmpdir(
source_datastore: &str,
datastore: &DataStore,
snapshot: &str,
media_set_uuid: &Uuid,
) -> PathBuf {
let mut path = media_set_tmpdir(datastore, media_set_uuid);
path.push(source_datastore);
path.push(snapshot);
path
}
fn restore_snapshots_to_tmpdir( fn restore_snapshots_to_tmpdir(
worker: Arc<WorkerTask>, worker: Arc<WorkerTask>,
path: &PathBuf, store_map: &DataStoreMap,
file_list: &[u64], file_list: &[u64],
mut drive: Box<dyn TapeDriver>, mut drive: Box<dyn TapeDriver>,
media_id: &MediaId, media_id: &MediaId,
media_set_uuid: &Uuid, media_set_uuid: &Uuid,
chunks_list: &mut HashMap<String, HashSet<[u8; 32]>>, chunks_list: &mut HashMap<String, HashSet<[u8; 32]>>,
) -> Result<(), Error> { ) -> Result<Vec<PathBuf>, Error> {
let mut tmp_paths = Vec::new();
match media_id.media_set_label { match media_id.media_set_label {
None => { None => {
bail!( bail!(
@ -916,9 +1117,26 @@ fn restore_snapshots_to_tmpdir(
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
let mut tmp_path = path.clone(); let target_datastore =
tmp_path.push(&source_datastore); match store_map.get_targets(&source_datastore, &Default::default()) {
tmp_path.push(snapshot); Some((datastore, _)) => datastore,
None => {
task_warn!(
worker,
"could not find target datastore for {}:{}",
source_datastore,
snapshot
);
continue;
}
};
let tmp_path = snapshot_tmpdir(
&source_datastore,
&target_datastore,
&snapshot,
media_set_uuid,
);
std::fs::create_dir_all(&tmp_path)?; std::fs::create_dir_all(&tmp_path)?;
let chunks = chunks_list let chunks = chunks_list
@ -926,6 +1144,7 @@ fn restore_snapshots_to_tmpdir(
.or_insert_with(HashSet::new); .or_insert_with(HashSet::new);
let manifest = let manifest =
try_restore_snapshot_archive(worker.clone(), &mut decoder, &tmp_path)?; try_restore_snapshot_archive(worker.clone(), &mut decoder, &tmp_path)?;
for item in manifest.files() { for item in manifest.files() {
let mut archive_path = tmp_path.to_owned(); let mut archive_path = tmp_path.to_owned();
archive_path.push(&item.filename); archive_path.push(&item.filename);
@ -943,12 +1162,13 @@ fn restore_snapshots_to_tmpdir(
} }
} }
} }
tmp_paths.push(tmp_path);
} }
other => bail!("unexpected file type: {:?}", other), other => bail!("unexpected file type: {:?}", other),
} }
} }
Ok(()) Ok(tmp_paths)
} }
fn restore_file_chunk_map( fn restore_file_chunk_map(
@ -1237,9 +1457,7 @@ fn restore_archive<'a>(
snapshot snapshot
); );
// FIXME: Namespace let (backup_ns, backup_dir) = parse_ns_and_snapshot(&snapshot)?;
let backup_ns = BackupNamespace::root();
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
if let Some((store_map, restore_owner)) = target.as_ref() { if let Some((store_map, restore_owner)) = target.as_ref() {
if let Some((datastore, _)) = store_map.get_targets(&datastore_name, &backup_ns) { if let Some((datastore, _)) = store_map.get_targets(&datastore_name, &backup_ns) {

View File

@ -20,7 +20,7 @@ use pbs_config::media_pool::complete_pool_name;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupNamespace, GroupListItem, HumanByte, Userid, DATASTORE_MAP_LIST_SCHEMA, Authid, BackupNamespace, GroupListItem, HumanByte, Userid, DATASTORE_MAP_LIST_SCHEMA,
DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, GROUP_FILTER_LIST_SCHEMA, MEDIA_LABEL_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, GROUP_FILTER_LIST_SCHEMA, MEDIA_LABEL_SCHEMA,
MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_SCHEMA, TAPE_RESTORE_NAMESPACE_SCHEMA,
TAPE_RESTORE_SNAPSHOT_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA,
}; };
use pbs_tape::{BlockReadError, MediaContentHeader, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0}; use pbs_tape::{BlockReadError, MediaContentHeader, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0};
@ -885,6 +885,14 @@ async fn backup(mut param: Value) -> Result<(), Error> {
store: { store: {
schema: DATASTORE_MAP_LIST_SCHEMA, schema: DATASTORE_MAP_LIST_SCHEMA,
}, },
"namespaces": {
description: "List of namespace to restore.",
type: Array,
optional: true,
items: {
schema: TAPE_RESTORE_NAMESPACE_SCHEMA,
},
},
drive: { drive: {
schema: DRIVE_NAME_SCHEMA, schema: DRIVE_NAME_SCHEMA,
optional: true, optional: true,

View File

@ -995,6 +995,27 @@ impl MediaSetCatalog {
} }
None None
} }
/// Returns an iterator over all registered snapshots per datastore
/// as (datastore, snapshot).
/// The snapshot contains namespaces in the format 'ns/namespace'.
pub fn list_snapshots(&self) -> impl Iterator<Item = (&str, &str)> {
self.catalog_list
.values()
.map(|catalog| {
catalog
.content
.iter()
.map(|(store, content)| {
content
.snapshot_index
.keys()
.map(move |key| (store.as_str(), key.as_str()))
})
.flatten()
})
.flatten()
}
} }
// Type definitions for internal binary catalog encoding // Type definitions for internal binary catalog encoding