api-types: introduce BackupType enum and Group/Dir api types
The type is a real enum. All are API types and implement Display and FromStr. The ordering is the same as it is in pbs-datastore. Also, they are now flattened into a few structs instead of being copied manually. Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
33eb23d57e
commit
988d575dbb
|
@ -2,7 +2,7 @@ use std::io::Write;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
use pbs_api_types::Authid;
|
use pbs_api_types::{Authid, BackupType};
|
||||||
use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
|
use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
|
||||||
|
|
||||||
pub struct DummyWriter {
|
pub struct DummyWriter {
|
||||||
|
@ -33,8 +33,16 @@ async fn run() -> Result<(), Error> {
|
||||||
|
|
||||||
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||||
|
|
||||||
let client =
|
let client = BackupReader::start(
|
||||||
BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true).await?;
|
client,
|
||||||
|
None,
|
||||||
|
"store2",
|
||||||
|
BackupType::Host,
|
||||||
|
"elsa",
|
||||||
|
backup_time,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
use pbs_api_types::Authid;
|
use pbs_api_types::{Authid, BackupType};
|
||||||
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
|
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
|
||||||
|
|
||||||
async fn upload_speed() -> Result<f64, Error> {
|
async fn upload_speed() -> Result<f64, Error> {
|
||||||
|
@ -21,7 +21,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||||
client,
|
client,
|
||||||
None,
|
None,
|
||||||
datastore,
|
datastore,
|
||||||
"host",
|
BackupType::Host,
|
||||||
"speedtest",
|
"speedtest",
|
||||||
backup_time,
|
backup_time,
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox_schema::{
|
use proxmox_schema::{
|
||||||
|
@ -394,17 +397,244 @@ pub struct SnapshotVerifyState {
|
||||||
pub state: VerifyState,
|
pub state: VerifyState,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
/// Backup types.
|
||||||
|
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum BackupType {
|
||||||
|
/// Virtual machines.
|
||||||
|
Vm,
|
||||||
|
|
||||||
|
/// Containers.
|
||||||
|
Ct,
|
||||||
|
|
||||||
|
/// "Host" backups.
|
||||||
|
Host,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BackupType {
|
||||||
|
pub const fn as_str(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
BackupType::Vm => "vm",
|
||||||
|
BackupType::Ct => "ct",
|
||||||
|
BackupType::Host => "host",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We used to have alphabetical ordering here when this was a string.
|
||||||
|
const fn order(self) -> u8 {
|
||||||
|
match self {
|
||||||
|
BackupType::Ct => 0,
|
||||||
|
BackupType::Host => 1,
|
||||||
|
BackupType::Vm => 2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for BackupType {
|
||||||
|
#[inline]
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
fmt::Display::fmt(self.as_str(), f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for BackupType {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
/// Parse a backup type.
|
||||||
|
fn from_str(ty: &str) -> Result<Self, Error> {
|
||||||
|
Ok(match ty {
|
||||||
|
"ct" => BackupType::Ct,
|
||||||
|
"host" => BackupType::Host,
|
||||||
|
"vm" => BackupType::Vm,
|
||||||
|
_ => bail!("invalid backup type {ty:?}"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::cmp::Ord for BackupType {
|
||||||
|
#[inline]
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
self.order().cmp(&other.order())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::cmp::PartialOrd for BackupType {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
"backup-type": {
|
"backup-type": { type: BackupType },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
},
|
},
|
||||||
"backup-id": {
|
)]
|
||||||
schema: BACKUP_ID_SCHEMA,
|
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||||
},
|
#[serde(rename_all = "kebab-case")]
|
||||||
"backup-time": {
|
/// A backup group (without a data store).
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
pub struct BackupGroup {
|
||||||
|
/// Backup type.
|
||||||
|
#[serde(rename = "backup-type")]
|
||||||
|
pub ty: BackupType,
|
||||||
|
|
||||||
|
/// Backup id.
|
||||||
|
#[serde(rename = "backup-id")]
|
||||||
|
pub id: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BackupGroup {
|
||||||
|
pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
|
||||||
|
Self { ty, id: id.into() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<(BackupType, String)> for BackupGroup {
|
||||||
|
fn from(data: (BackupType, String)) -> Self {
|
||||||
|
Self {
|
||||||
|
ty: data.0,
|
||||||
|
id: data.1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::cmp::Ord for BackupGroup {
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
let type_order = self.ty.cmp(&other.ty);
|
||||||
|
if type_order != std::cmp::Ordering::Equal {
|
||||||
|
return type_order;
|
||||||
|
}
|
||||||
|
// try to compare IDs numerically
|
||||||
|
let id_self = self.id.parse::<u64>();
|
||||||
|
let id_other = other.id.parse::<u64>();
|
||||||
|
match (id_self, id_other) {
|
||||||
|
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
||||||
|
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
||||||
|
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
||||||
|
_ => self.id.cmp(&other.id),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::cmp::PartialOrd for BackupGroup {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for BackupGroup {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}/{}", self.ty, self.id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for BackupGroup {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
/// Parse a backup group.
|
||||||
|
///
|
||||||
|
/// This parses strings like `vm/100".
|
||||||
|
fn from_str(path: &str) -> Result<Self, Error> {
|
||||||
|
let cap = GROUP_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
|
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
ty: cap.get(1).unwrap().as_str().parse()?,
|
||||||
|
id: cap.get(2).unwrap().as_str().to_owned(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"group": { type: BackupGroup },
|
||||||
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
},
|
},
|
||||||
|
)]
|
||||||
|
/// Uniquely identify a Backup (relative to data store)
|
||||||
|
///
|
||||||
|
/// We also call this a backup snaphost.
|
||||||
|
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct BackupDir {
|
||||||
|
/// Backup group.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub group: BackupGroup,
|
||||||
|
|
||||||
|
/// Backup timestamp unix epoch.
|
||||||
|
#[serde(rename = "backup-time")]
|
||||||
|
pub time: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<(BackupGroup, i64)> for BackupDir {
|
||||||
|
fn from(data: (BackupGroup, i64)) -> Self {
|
||||||
|
Self {
|
||||||
|
group: data.0,
|
||||||
|
time: data.1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<(BackupType, String, i64)> for BackupDir {
|
||||||
|
fn from(data: (BackupType, String, i64)) -> Self {
|
||||||
|
Self {
|
||||||
|
group: (data.0, data.1).into(),
|
||||||
|
time: data.2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BackupDir {
|
||||||
|
pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
|
||||||
|
where
|
||||||
|
T: Into<String>,
|
||||||
|
{
|
||||||
|
let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
||||||
|
let group = BackupGroup::new(ty, id.into());
|
||||||
|
Ok(Self { group, time })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ty(&self) -> BackupType {
|
||||||
|
self.group.ty
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn id(&self) -> &str {
|
||||||
|
&self.group.id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for BackupDir {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
/// Parse a snapshot path.
|
||||||
|
///
|
||||||
|
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||||
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
|
let cap = SNAPSHOT_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
|
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||||
|
|
||||||
|
BackupDir::with_rfc3339(
|
||||||
|
cap.get(1).unwrap().as_str().parse()?,
|
||||||
|
cap.get(2).unwrap().as_str(),
|
||||||
|
cap.get(3).unwrap().as_str(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for BackupDir {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
// FIXME: log error?
|
||||||
|
let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
|
||||||
|
write!(f, "{}/{}", self.group, time)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"backup": { type: BackupDir },
|
||||||
comment: {
|
comment: {
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
|
@ -432,9 +662,8 @@ pub struct SnapshotVerifyState {
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Basic information about backup snapshot.
|
/// Basic information about backup snapshot.
|
||||||
pub struct SnapshotListItem {
|
pub struct SnapshotListItem {
|
||||||
pub backup_type: String, // enum
|
#[serde(flatten)]
|
||||||
pub backup_id: String,
|
pub backup: BackupDir,
|
||||||
pub backup_time: i64,
|
|
||||||
/// The first line from manifest "notes"
|
/// The first line from manifest "notes"
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
|
@ -459,15 +688,8 @@ pub struct SnapshotListItem {
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
"backup-type": {
|
"backup": { type: BackupGroup },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
"last-backup": { schema: BACKUP_TIME_SCHEMA },
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"last-backup": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-count": {
|
"backup-count": {
|
||||||
type: Integer,
|
type: Integer,
|
||||||
},
|
},
|
||||||
|
@ -486,8 +708,9 @@ pub struct SnapshotListItem {
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Basic information about a backup group.
|
/// Basic information about a backup group.
|
||||||
pub struct GroupListItem {
|
pub struct GroupListItem {
|
||||||
pub backup_type: String, // enum
|
#[serde(flatten)]
|
||||||
pub backup_id: String,
|
pub backup: BackupGroup,
|
||||||
|
|
||||||
pub last_backup: i64,
|
pub last_backup: i64,
|
||||||
/// Number of contained snapshots
|
/// Number of contained snapshots
|
||||||
pub backup_count: u64,
|
pub backup_count: u64,
|
||||||
|
@ -503,24 +726,16 @@ pub struct GroupListItem {
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
"backup-type": {
|
"backup": { type: BackupDir },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Prune result.
|
/// Prune result.
|
||||||
pub struct PruneListItem {
|
pub struct PruneListItem {
|
||||||
pub backup_type: String, // enum
|
#[serde(flatten)]
|
||||||
pub backup_id: String,
|
pub backup: BackupDir,
|
||||||
pub backup_time: i64,
|
|
||||||
/// Keep snapshot
|
/// Keep snapshot
|
||||||
pub keep: bool,
|
pub keep: bool,
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ use serde::{Deserialize, Serialize};
|
||||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
|
||||||
use crate::{BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, FINGERPRINT_SHA256_FORMAT};
|
use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT};
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||||
|
@ -66,7 +66,7 @@ pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"backup-type": {
|
"backup-type": {
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
type: BackupType,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"backup-id": {
|
"backup-id": {
|
||||||
|
@ -83,6 +83,6 @@ pub struct MediaContentListFilter {
|
||||||
pub label_text: Option<String>,
|
pub label_text: Option<String>,
|
||||||
pub media: Option<Uuid>,
|
pub media: Option<Uuid>,
|
||||||
pub media_set: Option<Uuid>,
|
pub media_set: Option<Uuid>,
|
||||||
pub backup_type: Option<String>,
|
pub backup_type: Option<BackupType>,
|
||||||
pub backup_id: Option<String>,
|
pub backup_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ use std::sync::Arc;
|
||||||
use futures::future::AbortHandle;
|
use futures::future::AbortHandle;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use pbs_api_types::BackupType;
|
||||||
use pbs_datastore::data_blob::DataBlob;
|
use pbs_datastore::data_blob::DataBlob;
|
||||||
use pbs_datastore::data_blob_reader::DataBlobReader;
|
use pbs_datastore::data_blob_reader::DataBlobReader;
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
|
@ -46,7 +47,7 @@ impl BackupReader {
|
||||||
client: HttpClient,
|
client: HttpClient,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
datastore: &str,
|
datastore: &str,
|
||||||
backup_type: &str,
|
backup_type: BackupType,
|
||||||
backup_id: &str,
|
backup_id: &str,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
|
|
|
@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
use pbs_api_types::HumanByte;
|
use pbs_api_types::{BackupType, HumanByte};
|
||||||
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
|
@ -86,7 +86,7 @@ impl BackupWriter {
|
||||||
client: HttpClient,
|
client: HttpClient,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
datastore: &str,
|
datastore: &str,
|
||||||
backup_type: &str,
|
backup_type: BackupType,
|
||||||
backup_id: &str,
|
backup_id: &str,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
|
|
|
@ -265,6 +265,13 @@ pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec
|
||||||
item["backup-type"].as_str(),
|
item["backup-type"].as_str(),
|
||||||
item["backup-time"].as_i64(),
|
item["backup-time"].as_i64(),
|
||||||
) {
|
) {
|
||||||
|
let backup_type = match backup_type.parse() {
|
||||||
|
Ok(ty) => ty,
|
||||||
|
Err(_) => {
|
||||||
|
// FIXME: print error in completion?
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
|
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
|
||||||
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,8 @@ use std::str::FromStr;
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, GROUP_PATH_REGEX, SNAPSHOT_PATH_REGEX,
|
BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, GROUP_PATH_REGEX,
|
||||||
|
SNAPSHOT_PATH_REGEX,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::manifest::MANIFEST_BLOB_NAME;
|
use super::manifest::MANIFEST_BLOB_NAME;
|
||||||
|
@ -14,7 +15,7 @@ use super::manifest::MANIFEST_BLOB_NAME;
|
||||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||||
pub struct BackupGroup {
|
pub struct BackupGroup {
|
||||||
/// Type of backup
|
/// Type of backup
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
/// Unique (for this type) ID
|
/// Unique (for this type) ID
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
}
|
}
|
||||||
|
@ -44,15 +45,15 @@ impl std::cmp::PartialOrd for BackupGroup {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupGroup {
|
impl BackupGroup {
|
||||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
pub fn new<T: Into<String>>(backup_type: BackupType, backup_id: T) -> Self {
|
||||||
Self {
|
Self {
|
||||||
backup_type: backup_type.into(),
|
backup_type,
|
||||||
backup_id: backup_id.into(),
|
backup_id: backup_id.into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_type(&self) -> &str {
|
pub fn backup_type(&self) -> BackupType {
|
||||||
&self.backup_type
|
self.backup_type
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_id(&self) -> &str {
|
pub fn backup_id(&self) -> &str {
|
||||||
|
@ -62,7 +63,7 @@ impl BackupGroup {
|
||||||
pub fn group_path(&self) -> PathBuf {
|
pub fn group_path(&self) -> PathBuf {
|
||||||
let mut relative_path = PathBuf::new();
|
let mut relative_path = PathBuf::new();
|
||||||
|
|
||||||
relative_path.push(&self.backup_type);
|
relative_path.push(self.backup_type.as_str());
|
||||||
|
|
||||||
relative_path.push(&self.backup_id);
|
relative_path.push(&self.backup_id);
|
||||||
|
|
||||||
|
@ -85,7 +86,7 @@ impl BackupGroup {
|
||||||
}
|
}
|
||||||
|
|
||||||
let backup_dir =
|
let backup_dir =
|
||||||
BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
BackupDir::with_rfc3339(self.backup_type, &self.backup_id, backup_time)?;
|
||||||
let files = list_backup_files(l2_fd, backup_time)?;
|
let files = list_backup_files(l2_fd, backup_time)?;
|
||||||
|
|
||||||
let protected = backup_dir.is_protected(base_path.to_owned());
|
let protected = backup_dir.is_protected(base_path.to_owned());
|
||||||
|
@ -162,12 +163,24 @@ impl BackupGroup {
|
||||||
Ok(group) => &group == self,
|
Ok(group) => &group == self,
|
||||||
Err(_) => false, // shouldn't happen if value is schema-checked
|
Err(_) => false, // shouldn't happen if value is schema-checked
|
||||||
},
|
},
|
||||||
GroupFilter::BackupType(backup_type) => self.backup_type() == backup_type,
|
GroupFilter::BackupType(backup_type) => self.backup_type().as_str() == backup_type,
|
||||||
GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
|
GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<&BackupGroup> for pbs_api_types::BackupGroup {
|
||||||
|
fn from(group: &BackupGroup) -> pbs_api_types::BackupGroup {
|
||||||
|
(group.backup_type, group.backup_id.clone()).into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BackupGroup> for pbs_api_types::BackupGroup {
|
||||||
|
fn from(group: BackupGroup) -> pbs_api_types::BackupGroup {
|
||||||
|
(group.backup_type, group.backup_id).into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for BackupGroup {
|
impl std::fmt::Display for BackupGroup {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let backup_type = self.backup_type();
|
let backup_type = self.backup_type();
|
||||||
|
@ -188,7 +201,7 @@ impl std::str::FromStr for BackupGroup {
|
||||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
backup_type: cap.get(1).unwrap().as_str().parse()?,
|
||||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -208,28 +221,26 @@ pub struct BackupDir {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupDir {
|
impl BackupDir {
|
||||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
pub fn new<T>(backup_type: BackupType, backup_id: T, backup_time: i64) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
U: Into<String>,
|
|
||||||
{
|
{
|
||||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
let group = BackupGroup::new(backup_type, backup_id.into());
|
||||||
BackupDir::with_group(group, backup_time)
|
BackupDir::with_group(group, backup_time)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_rfc3339<T, U, V>(
|
pub fn with_rfc3339<T, U>(
|
||||||
backup_type: T,
|
backup_type: BackupType,
|
||||||
backup_id: U,
|
backup_id: T,
|
||||||
backup_time_string: V,
|
backup_time_string: U,
|
||||||
) -> Result<Self, Error>
|
) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
U: Into<String>,
|
U: Into<String>,
|
||||||
V: Into<String>,
|
|
||||||
{
|
{
|
||||||
let backup_time_string = backup_time_string.into();
|
let backup_time_string = backup_time_string.into();
|
||||||
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
||||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
let group = BackupGroup::new(backup_type, backup_id.into());
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
group,
|
group,
|
||||||
backup_time,
|
backup_time,
|
||||||
|
@ -283,6 +294,22 @@ impl BackupDir {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<&BackupDir> for pbs_api_types::BackupDir {
|
||||||
|
fn from(dir: &BackupDir) -> pbs_api_types::BackupDir {
|
||||||
|
(
|
||||||
|
pbs_api_types::BackupGroup::from(dir.group.clone()),
|
||||||
|
dir.backup_time,
|
||||||
|
)
|
||||||
|
.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BackupDir> for pbs_api_types::BackupDir {
|
||||||
|
fn from(dir: BackupDir) -> pbs_api_types::BackupDir {
|
||||||
|
(pbs_api_types::BackupGroup::from(dir.group), dir.backup_time).into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::str::FromStr for BackupDir {
|
impl std::str::FromStr for BackupDir {
|
||||||
type Err = Error;
|
type Err = Error;
|
||||||
|
|
||||||
|
@ -295,7 +322,7 @@ impl std::str::FromStr for BackupDir {
|
||||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||||
|
|
||||||
BackupDir::with_rfc3339(
|
BackupDir::with_rfc3339(
|
||||||
cap.get(1).unwrap().as_str(),
|
cap.get(1).unwrap().as_str().parse()?,
|
||||||
cap.get(2).unwrap().as_str(),
|
cap.get(2).unwrap().as_str(),
|
||||||
cap.get(3).unwrap().as_str(),
|
cap.get(3).unwrap().as_str(),
|
||||||
)
|
)
|
||||||
|
|
|
@ -18,8 +18,8 @@ use proxmox_sys::WorkerTaskContext;
|
||||||
use proxmox_sys::{task_log, task_warn};
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus, HumanByte,
|
Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
|
||||||
Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, BACKUP_TYPE_REGEX, UPID,
|
HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
|
||||||
};
|
};
|
||||||
use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
|
use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
|
||||||
|
|
||||||
|
@ -494,7 +494,7 @@ impl DataStore {
|
||||||
) -> Result<(Authid, DirLockGuard), Error> {
|
) -> Result<(Authid, DirLockGuard), Error> {
|
||||||
// create intermediate path first:
|
// create intermediate path first:
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_group.backup_type());
|
full_path.push(backup_group.backup_type().as_str());
|
||||||
std::fs::create_dir_all(&full_path)?;
|
std::fs::create_dir_all(&full_path)?;
|
||||||
|
|
||||||
full_path.push(backup_group.backup_id());
|
full_path.push(backup_group.backup_id());
|
||||||
|
@ -1113,7 +1113,7 @@ impl Iterator for ListSnapshots {
|
||||||
/// A iterator for a (single) level of Backup Groups
|
/// A iterator for a (single) level of Backup Groups
|
||||||
pub struct ListGroups {
|
pub struct ListGroups {
|
||||||
type_fd: proxmox_sys::fs::ReadDir,
|
type_fd: proxmox_sys::fs::ReadDir,
|
||||||
id_state: Option<(String, proxmox_sys::fs::ReadDir)>,
|
id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ListGroups {
|
impl ListGroups {
|
||||||
|
@ -1130,7 +1130,7 @@ impl Iterator for ListGroups {
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
loop {
|
loop {
|
||||||
if let Some((ref group_type, ref mut id_fd)) = self.id_state {
|
if let Some((group_type, ref mut id_fd)) = self.id_state {
|
||||||
let item = match id_fd.next() {
|
let item = match id_fd.next() {
|
||||||
Some(item) => item,
|
Some(item) => item,
|
||||||
None => {
|
None => {
|
||||||
|
@ -1162,7 +1162,7 @@ impl Iterator for ListGroups {
|
||||||
Some(nix::dir::Type::Directory) => {} // OK
|
Some(nix::dir::Type::Directory) => {} // OK
|
||||||
_ => continue,
|
_ => continue,
|
||||||
}
|
}
|
||||||
if BACKUP_TYPE_REGEX.is_match(name) {
|
if let Ok(group_type) = BackupType::from_str(name) {
|
||||||
// found a backup group type, descend into it to scan all IDs in it
|
// found a backup group type, descend into it to scan all IDs in it
|
||||||
// by switching to the id-state branch
|
// by switching to the id-state branch
|
||||||
let base_fd = entry.parent_fd();
|
let base_fd = entry.parent_fd();
|
||||||
|
@ -1170,7 +1170,7 @@ impl Iterator for ListGroups {
|
||||||
Ok(dirfd) => dirfd,
|
Ok(dirfd) => dirfd,
|
||||||
Err(err) => return Some(Err(err.into())),
|
Err(err) => return Some(Err(err.into())),
|
||||||
};
|
};
|
||||||
self.id_state = Some((name.to_owned(), id_dirfd));
|
self.id_state = Some((group_type, id_dirfd));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue; // file did not match regex or isn't valid utf-8
|
continue; // file did not match regex or isn't valid utf-8
|
||||||
|
|
|
@ -6,7 +6,7 @@ use anyhow::{bail, format_err, Error};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use pbs_api_types::{CryptMode, Fingerprint};
|
use pbs_api_types::{BackupType, CryptMode, Fingerprint};
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
use crate::BackupDir;
|
use crate::BackupDir;
|
||||||
|
@ -50,7 +50,7 @@ impl FileInfo {
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
pub struct BackupManifest {
|
pub struct BackupManifest {
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
files: Vec<FileInfo>,
|
files: Vec<FileInfo>,
|
||||||
|
@ -87,7 +87,7 @@ pub fn archive_type<P: AsRef<Path>>(archive_name: P) -> Result<ArchiveType, Erro
|
||||||
impl BackupManifest {
|
impl BackupManifest {
|
||||||
pub fn new(snapshot: BackupDir) -> Self {
|
pub fn new(snapshot: BackupDir) -> Self {
|
||||||
Self {
|
Self {
|
||||||
backup_type: snapshot.group().backup_type().into(),
|
backup_type: snapshot.group().backup_type(),
|
||||||
backup_id: snapshot.group().backup_id().into(),
|
backup_id: snapshot.group().backup_id().into(),
|
||||||
backup_time: snapshot.backup_time(),
|
backup_time: snapshot.backup_time(),
|
||||||
files: Vec::new(),
|
files: Vec::new(),
|
||||||
|
|
|
@ -14,6 +14,7 @@ use proxmox_router::{
|
||||||
};
|
};
|
||||||
use proxmox_schema::{api, ApiType, ReturnType};
|
use proxmox_schema::{api, ApiType, ReturnType};
|
||||||
|
|
||||||
|
use pbs_api_types::BackupType;
|
||||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||||
use pbs_client::{BackupRepository, BackupWriter};
|
use pbs_client::{BackupRepository, BackupWriter};
|
||||||
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
|
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
|
||||||
|
@ -241,7 +242,7 @@ async fn test_upload_speed(
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
"host",
|
BackupType::Host,
|
||||||
"benchmark",
|
"benchmark",
|
||||||
backup_time,
|
backup_time,
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -190,7 +190,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
&backup_type,
|
backup_type,
|
||||||
&backup_id,
|
&backup_id,
|
||||||
backup_time,
|
backup_time,
|
||||||
true,
|
true,
|
||||||
|
|
|
@ -22,9 +22,10 @@ use proxmox_time::{epoch_i64, strftime_local};
|
||||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, CryptMode, Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions,
|
Authid, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte, PruneListItem,
|
||||||
RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
|
PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA,
|
||||||
BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
|
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
|
||||||
|
TRAFFIC_CONTROL_RATE_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_client::catalog_shell::Shell;
|
use pbs_client::catalog_shell::Shell;
|
||||||
use pbs_client::tools::{
|
use pbs_client::tools::{
|
||||||
|
@ -135,7 +136,7 @@ async fn api_datastore_list_snapshots(
|
||||||
|
|
||||||
let mut args = json!({});
|
let mut args = json!({});
|
||||||
if let Some(group) = group {
|
if let Some(group) = group {
|
||||||
args["backup-type"] = group.backup_type().into();
|
args["backup-type"] = group.backup_type().to_string().into();
|
||||||
args["backup-id"] = group.backup_id().into();
|
args["backup-id"] = group.backup_id().into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,7 +149,7 @@ pub async fn api_datastore_latest_snapshot(
|
||||||
client: &HttpClient,
|
client: &HttpClient,
|
||||||
store: &str,
|
store: &str,
|
||||||
group: BackupGroup,
|
group: BackupGroup,
|
||||||
) -> Result<(String, String, i64), Error> {
|
) -> Result<(BackupType, String, i64), Error> {
|
||||||
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
|
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
|
||||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
||||||
|
|
||||||
|
@ -159,9 +160,9 @@ pub async fn api_datastore_latest_snapshot(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
|
list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
|
||||||
|
|
||||||
let backup_time = list[0].backup_time;
|
let backup_time = list[0].backup.time;
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
group.backup_type().to_owned(),
|
group.backup_type().to_owned(),
|
||||||
|
@ -261,13 +262,13 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let group = BackupGroup::new(item.backup_type, item.backup_id);
|
let group = BackupGroup::new(item.backup.ty, item.backup.id);
|
||||||
Ok(group.group_path().to_str().unwrap().to_owned())
|
Ok(group.group_path().to_str().unwrap().to_owned())
|
||||||
};
|
};
|
||||||
|
|
||||||
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
|
let snapshot = BackupDir::new(item.backup.ty, item.backup.id, item.last_backup)?;
|
||||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -329,7 +330,7 @@ async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Erro
|
||||||
|
|
||||||
let group: BackupGroup = group.parse()?;
|
let group: BackupGroup = group.parse()?;
|
||||||
|
|
||||||
param["backup-type"] = group.backup_type().into();
|
param["backup-type"] = group.backup_type().to_string().into();
|
||||||
param["backup-id"] = group.backup_id().into();
|
param["backup-id"] = group.backup_id().into();
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
|
||||||
|
@ -659,7 +660,7 @@ async fn create_backup(
|
||||||
.as_str()
|
.as_str()
|
||||||
.unwrap_or(proxmox_sys::nodename());
|
.unwrap_or(proxmox_sys::nodename());
|
||||||
|
|
||||||
let backup_type = param["backup-type"].as_str().unwrap_or("host");
|
let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
|
||||||
|
|
||||||
let include_dev = param["include-dev"].as_array();
|
let include_dev = param["include-dev"].as_array();
|
||||||
|
|
||||||
|
@ -1221,7 +1222,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
&backup_type,
|
backup_type,
|
||||||
&backup_id,
|
&backup_id,
|
||||||
backup_time,
|
backup_time,
|
||||||
true,
|
true,
|
||||||
|
@ -1414,7 +1415,7 @@ async fn prune(
|
||||||
if let Some(dry_run) = dry_run {
|
if let Some(dry_run) = dry_run {
|
||||||
api_param["dry-run"] = dry_run.into();
|
api_param["dry-run"] = dry_run.into();
|
||||||
}
|
}
|
||||||
api_param["backup-type"] = group.backup_type().into();
|
api_param["backup-type"] = group.backup_type().to_string().into();
|
||||||
api_param["backup-id"] = group.backup_id().into();
|
api_param["backup-id"] = group.backup_id().into();
|
||||||
|
|
||||||
let mut result = client.post(&path, Some(api_param)).await?;
|
let mut result = client.post(&path, Some(api_param)).await?;
|
||||||
|
@ -1423,7 +1424,7 @@ async fn prune(
|
||||||
|
|
||||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
|
||||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -240,7 +240,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
&backup_type,
|
backup_type,
|
||||||
&backup_id,
|
&backup_id,
|
||||||
backup_time,
|
backup_time,
|
||||||
true,
|
true,
|
||||||
|
|
|
@ -59,7 +59,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
|
||||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ use pxar::accessor::aio::Accessor;
|
||||||
use pxar::EntryKind;
|
use pxar::EntryKind;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, BackupContent, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
|
Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
|
||||||
GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
|
GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
|
||||||
SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
|
SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
|
||||||
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
|
@ -207,8 +207,7 @@ pub fn list_groups(
|
||||||
let comment = file_read_firstline(¬e_path).ok();
|
let comment = file_read_firstline(¬e_path).ok();
|
||||||
|
|
||||||
group_info.push(GroupListItem {
|
group_info.push(GroupListItem {
|
||||||
backup_type: group.backup_type().to_string(),
|
backup: group.into(),
|
||||||
backup_id: group.backup_id().to_string(),
|
|
||||||
last_backup: last_backup.backup_dir.backup_time(),
|
last_backup: last_backup.backup_dir.backup_time(),
|
||||||
owner: Some(owner),
|
owner: Some(owner),
|
||||||
backup_count,
|
backup_count,
|
||||||
|
@ -223,15 +222,9 @@ pub fn list_groups(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
|
@ -244,7 +237,7 @@ pub fn list_groups(
|
||||||
/// Delete backup group including all snapshots.
|
/// Delete backup group including all snapshots.
|
||||||
pub fn delete_group(
|
pub fn delete_group(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
@ -266,18 +259,10 @@ pub fn delete_group(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
|
returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
|
||||||
|
@ -291,7 +276,7 @@ pub fn delete_group(
|
||||||
/// List snapshot files.
|
/// List snapshot files.
|
||||||
pub fn list_snapshot_files(
|
pub fn list_snapshot_files(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
|
@ -319,18 +304,10 @@ pub fn list_snapshot_files(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
|
@ -343,7 +320,7 @@ pub fn list_snapshot_files(
|
||||||
/// Delete backup snapshot.
|
/// Delete backup snapshot.
|
||||||
pub fn delete_snapshot(
|
pub fn delete_snapshot(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
|
@ -370,12 +347,10 @@ pub fn delete_snapshot(
|
||||||
streaming: true,
|
streaming: true,
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-type": {
|
"backup-type": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
type: BackupType,
|
||||||
},
|
},
|
||||||
"backup-id": {
|
"backup-id": {
|
||||||
optional: true,
|
optional: true,
|
||||||
|
@ -394,7 +369,7 @@ pub fn delete_snapshot(
|
||||||
/// List backup snapshots.
|
/// List backup snapshots.
|
||||||
pub fn list_snapshots(
|
pub fn list_snapshots(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: Option<String>,
|
backup_type: Option<BackupType>,
|
||||||
backup_id: Option<String>,
|
backup_id: Option<String>,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
|
@ -424,9 +399,10 @@ pub fn list_snapshots(
|
||||||
};
|
};
|
||||||
|
|
||||||
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
|
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
|
||||||
let backup_type = group.backup_type().to_string();
|
let backup = pbs_api_types::BackupDir {
|
||||||
let backup_id = group.backup_id().to_string();
|
group: group.into(),
|
||||||
let backup_time = info.backup_dir.backup_time();
|
time: info.backup_dir.backup_time(),
|
||||||
|
};
|
||||||
let protected = info.backup_dir.is_protected(datastore.base_path());
|
let protected = info.backup_dir.is_protected(datastore.base_path());
|
||||||
|
|
||||||
match get_all_snapshot_files(&datastore, &info) {
|
match get_all_snapshot_files(&datastore, &info) {
|
||||||
|
@ -458,9 +434,7 @@ pub fn list_snapshots(
|
||||||
let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||||
|
|
||||||
SnapshotListItem {
|
SnapshotListItem {
|
||||||
backup_type,
|
backup,
|
||||||
backup_id,
|
|
||||||
backup_time,
|
|
||||||
comment,
|
comment,
|
||||||
verification,
|
verification,
|
||||||
fingerprint,
|
fingerprint,
|
||||||
|
@ -483,9 +457,7 @@ pub fn list_snapshots(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
SnapshotListItem {
|
SnapshotListItem {
|
||||||
backup_type,
|
backup,
|
||||||
backup_id,
|
|
||||||
backup_time,
|
|
||||||
comment: None,
|
comment: None,
|
||||||
verification: None,
|
verification: None,
|
||||||
fingerprint: None,
|
fingerprint: None,
|
||||||
|
@ -550,10 +522,9 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
|
||||||
// only include groups with snapshots, counting/displaying emtpy groups can confuse
|
// only include groups with snapshots, counting/displaying emtpy groups can confuse
|
||||||
if snapshot_count > 0 {
|
if snapshot_count > 0 {
|
||||||
let type_count = match group.backup_type() {
|
let type_count = match group.backup_type() {
|
||||||
"ct" => counts.ct.get_or_insert(Default::default()),
|
BackupType::Ct => counts.ct.get_or_insert(Default::default()),
|
||||||
"vm" => counts.vm.get_or_insert(Default::default()),
|
BackupType::Vm => counts.vm.get_or_insert(Default::default()),
|
||||||
"host" => counts.host.get_or_insert(Default::default()),
|
BackupType::Host => counts.host.get_or_insert(Default::default()),
|
||||||
_ => counts.other.get_or_insert(Default::default()),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
type_count.groups += 1;
|
type_count.groups += 1;
|
||||||
|
@ -630,7 +601,7 @@ pub fn status(
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_SCHEMA,
|
||||||
},
|
},
|
||||||
"backup-type": {
|
"backup-type": {
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
type: BackupType,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"backup-id": {
|
"backup-id": {
|
||||||
|
@ -664,7 +635,7 @@ pub fn status(
|
||||||
/// or all backups in the datastore.
|
/// or all backups in the datastore.
|
||||||
pub fn verify(
|
pub fn verify(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: Option<String>,
|
backup_type: Option<BackupType>,
|
||||||
backup_id: Option<String>,
|
backup_id: Option<String>,
|
||||||
backup_time: Option<i64>,
|
backup_time: Option<i64>,
|
||||||
ignore_verified: Option<bool>,
|
ignore_verified: Option<bool>,
|
||||||
|
@ -771,12 +742,8 @@ pub fn verify(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
"backup-id": {
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
schema: BACKUP_ID_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
|
||||||
"backup-type": {
|
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"dry-run": {
|
"dry-run": {
|
||||||
optional: true,
|
optional: true,
|
||||||
type: bool,
|
type: bool,
|
||||||
|
@ -800,7 +767,7 @@ pub fn verify(
|
||||||
/// Prune a group on the datastore
|
/// Prune a group on the datastore
|
||||||
pub fn prune(
|
pub fn prune(
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
prune_options: PruneOptions,
|
prune_options: PruneOptions,
|
||||||
store: String,
|
store: String,
|
||||||
|
@ -809,13 +776,13 @@ pub fn prune(
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let group = BackupGroup::new(&backup_type, &backup_id);
|
let group = BackupGroup::new(backup_type, &backup_id);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||||
|
|
||||||
let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
|
let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
|
||||||
|
|
||||||
let mut prune_result = Vec::new();
|
let mut prune_result = Vec::new();
|
||||||
|
|
||||||
|
@ -1111,7 +1078,7 @@ pub fn download_file(
|
||||||
|
|
||||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
@ -1194,7 +1161,7 @@ pub fn download_file_decoded(
|
||||||
|
|
||||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
@ -1320,7 +1287,7 @@ pub fn upload_backup_log(
|
||||||
|
|
||||||
let file_name = CLIENT_LOG_BLOB_NAME;
|
let file_name = CLIENT_LOG_BLOB_NAME;
|
||||||
|
|
||||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
@ -1369,18 +1336,10 @@ pub fn upload_backup_log(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
"filepath": {
|
"filepath": {
|
||||||
description: "Base64 encoded path.",
|
description: "Base64 encoded path.",
|
||||||
type: String,
|
type: String,
|
||||||
|
@ -1394,7 +1353,7 @@ pub fn upload_backup_log(
|
||||||
/// Get the entries of the given path of the catalog
|
/// Get the entries of the given path of the catalog
|
||||||
pub fn catalog(
|
pub fn catalog(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
filepath: String,
|
filepath: String,
|
||||||
|
@ -1481,7 +1440,7 @@ pub fn pxar_file_download(
|
||||||
|
|
||||||
let filepath = required_string_param(¶m, "filepath")?.to_owned();
|
let filepath = required_string_param(¶m, "filepath")?.to_owned();
|
||||||
|
|
||||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
@ -1659,15 +1618,9 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
|
@ -1677,7 +1630,7 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
|
||||||
/// Get "notes" for a backup group
|
/// Get "notes" for a backup group
|
||||||
pub fn get_group_notes(
|
pub fn get_group_notes(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
@ -1695,15 +1648,9 @@ pub fn get_group_notes(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
notes: {
|
notes: {
|
||||||
description: "A multiline text.",
|
description: "A multiline text.",
|
||||||
},
|
},
|
||||||
|
@ -1718,7 +1665,7 @@ pub fn get_group_notes(
|
||||||
/// Set "notes" for a backup group
|
/// Set "notes" for a backup group
|
||||||
pub fn set_group_notes(
|
pub fn set_group_notes(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
notes: String,
|
notes: String,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
@ -1739,18 +1686,10 @@ pub fn set_group_notes(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
|
@ -1760,7 +1699,7 @@ pub fn set_group_notes(
|
||||||
/// Get "notes" for a specific backup
|
/// Get "notes" for a specific backup
|
||||||
pub fn get_notes(
|
pub fn get_notes(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
@ -1787,18 +1726,10 @@ pub fn get_notes(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
notes: {
|
notes: {
|
||||||
description: "A multiline text.",
|
description: "A multiline text.",
|
||||||
},
|
},
|
||||||
|
@ -1813,7 +1744,7 @@ pub fn get_notes(
|
||||||
/// Set "notes" for a specific backup
|
/// Set "notes" for a specific backup
|
||||||
pub fn set_notes(
|
pub fn set_notes(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
notes: String,
|
notes: String,
|
||||||
|
@ -1843,18 +1774,10 @@ pub fn set_notes(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
|
@ -1864,7 +1787,7 @@ pub fn set_notes(
|
||||||
/// Query protection for a specific backup
|
/// Query protection for a specific backup
|
||||||
pub fn get_protection(
|
pub fn get_protection(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
@ -1887,18 +1810,10 @@ pub fn get_protection(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-time": {
|
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
|
||||||
},
|
|
||||||
protected: {
|
protected: {
|
||||||
description: "Enable/disable protection.",
|
description: "Enable/disable protection.",
|
||||||
},
|
},
|
||||||
|
@ -1913,7 +1828,7 @@ pub fn get_protection(
|
||||||
/// En- or disable protection for a specific backup
|
/// En- or disable protection for a specific backup
|
||||||
pub fn set_protection(
|
pub fn set_protection(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
protected: bool,
|
protected: bool,
|
||||||
|
@ -1937,15 +1852,9 @@ pub fn set_protection(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
store: { schema: DATASTORE_SCHEMA },
|
||||||
schema: DATASTORE_SCHEMA,
|
"backup-type": { type: BackupType },
|
||||||
},
|
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||||
"backup-type": {
|
|
||||||
schema: BACKUP_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"new-owner": {
|
"new-owner": {
|
||||||
type: Authid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
|
@ -1959,7 +1868,7 @@ pub fn set_protection(
|
||||||
/// Change owner of a backup group
|
/// Change owner of a backup group
|
||||||
pub fn set_backup_owner(
|
pub fn set_backup_owner(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: BackupType,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
new_owner: Authid,
|
new_owner: Authid,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
|
|
@ -16,7 +16,7 @@ use proxmox_schema::*;
|
||||||
use proxmox_sys::sortable;
|
use proxmox_sys::sortable;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
|
Authid, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||||
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
|
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
|
||||||
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||||
};
|
};
|
||||||
|
@ -82,7 +82,7 @@ fn upgrade_to_backup_protocol(
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ fn upgrade_to_backup_protocol(
|
||||||
|
|
||||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||||
|
|
||||||
let worker_type = if backup_type == "host" && backup_id == "benchmark" {
|
let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
|
||||||
if !benchmark {
|
if !benchmark {
|
||||||
bail!("unable to run benchmark without --benchmark flags");
|
bail!("unable to run benchmark without --benchmark flags");
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,9 +16,9 @@ use proxmox_schema::{BooleanSchema, ObjectSchema};
|
||||||
use proxmox_sys::sortable;
|
use proxmox_sys::sortable;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
|
Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
|
||||||
BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
|
||||||
};
|
};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
use pbs_datastore::backup_info::BackupDir;
|
||||||
|
@ -90,7 +90,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||||
|
|
||||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
|
|
@ -441,7 +441,7 @@ pub fn list_content(
|
||||||
for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
|
for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some(ref backup_type) = filter.backup_type {
|
if let Some(backup_type) = filter.backup_type {
|
||||||
if backup_dir.group().backup_type() != backup_type {
|
if backup_dir.group().backup_type() != backup_type {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
use proxmox_sys::{task_log, WorkerTaskContext};
|
||||||
|
|
||||||
use pbs_api_types::{Authid, CryptMode, SnapshotVerifyState, VerifyState, UPID};
|
use pbs_api_types::{Authid, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID};
|
||||||
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
|
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
|
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
|
||||||
|
@ -539,7 +539,9 @@ pub fn verify_all_backups(
|
||||||
|
|
||||||
let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
|
let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
|
||||||
Ok(list) => list
|
Ok(list) => list
|
||||||
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
.filter(|group| {
|
||||||
|
!(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
|
||||||
|
})
|
||||||
.filter(filter_by_owner)
|
.filter(filter_by_owner)
|
||||||
.collect::<Vec<BackupGroup>>(),
|
.collect::<Vec<BackupGroup>>(),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
|
|
@ -523,7 +523,7 @@ pub fn complete_remote_datastore_group(_arg: &str, param: &HashMap<String, Strin
|
||||||
.await
|
.await
|
||||||
}) {
|
}) {
|
||||||
for item in data {
|
for item in data {
|
||||||
list.push(format!("{}/{}", item.backup_type, item.backup_id));
|
list.push(format!("{}/{}", item.backup.ty, item.backup.id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ pub fn complete_datastore_group_filter(_arg: &str, param: &HashMap<String, Strin
|
||||||
list.extend(
|
list.extend(
|
||||||
groups
|
groups
|
||||||
.iter()
|
.iter()
|
||||||
.map(|group| format!("group:{}/{}", group.backup_type, group.backup_id)),
|
.map(|group| format!("group:{}/{}", group.backup.ty, group.backup.id)),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -577,7 +577,7 @@ pub async fn pull_group(
|
||||||
let mut result = client.get(&path, Some(args)).await?;
|
let mut result = client.get(&path, Some(args)).await?;
|
||||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
|
let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
|
||||||
|
|
||||||
list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
|
list.sort_unstable_by(|a, b| a.backup.time.cmp(&b.backup.time));
|
||||||
|
|
||||||
client.login().await?; // make sure auth is complete
|
client.login().await?; // make sure auth is complete
|
||||||
|
|
||||||
|
@ -599,7 +599,7 @@ pub async fn pull_group(
|
||||||
};
|
};
|
||||||
|
|
||||||
for (pos, item) in list.into_iter().enumerate() {
|
for (pos, item) in list.into_iter().enumerate() {
|
||||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
|
||||||
|
|
||||||
// in-progress backups can't be synced
|
// in-progress backups can't be synced
|
||||||
if item.size.is_none() {
|
if item.size.is_none() {
|
||||||
|
@ -712,9 +712,9 @@ pub async fn pull_store(
|
||||||
|
|
||||||
let total_count = list.len();
|
let total_count = list.len();
|
||||||
list.sort_unstable_by(|a, b| {
|
list.sort_unstable_by(|a, b| {
|
||||||
let type_order = a.backup_type.cmp(&b.backup_type);
|
let type_order = a.backup.ty.cmp(&b.backup.ty);
|
||||||
if type_order == std::cmp::Ordering::Equal {
|
if type_order == std::cmp::Ordering::Equal {
|
||||||
a.backup_id.cmp(&b.backup_id)
|
a.backup.id.cmp(&b.backup.id)
|
||||||
} else {
|
} else {
|
||||||
type_order
|
type_order
|
||||||
}
|
}
|
||||||
|
@ -726,7 +726,7 @@ pub async fn pull_store(
|
||||||
|
|
||||||
let list: Vec<BackupGroup> = list
|
let list: Vec<BackupGroup> = list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|item| BackupGroup::new(item.backup_type, item.backup_id))
|
.map(|item| BackupGroup::new(item.backup.ty, item.backup.id))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let list = if let Some(ref group_filter) = ¶ms.group_filter {
|
let list = if let Some(ref group_filter) = ¶ms.group_filter {
|
||||||
|
|
Loading…
Reference in New Issue