Compare commits

..

No commits in common. "master" and "v2.2.1" have entirely different histories.

130 changed files with 1471 additions and 3168 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "2.2.3"
version = "2.2.1"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -61,7 +61,7 @@ hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.24"
nix = "0.19.1"
num-traits = "0.2"
once_cell = "1.3.1"
openssl = "0.10.38" # currently patched!
@ -69,7 +69,7 @@ pam = "0.7"
pam-sys = "0.5"
percent-encoding = "2.1"
regex = "1.5.5"
rustyline = "9"
rustyline = "7"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
siphasher = "0.3"
@ -77,7 +77,7 @@ syslog = "4.0"
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "codec", "io" ] }
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
tower-service = "0.3.0"
udev = "0.4"
url = "2.1"
@ -104,7 +104,7 @@ proxmox-time = "1.1.2"
proxmox-uuid = "1"
proxmox-serde = "0.1"
proxmox-shared-memory = "0.2"
proxmox-sys = { version = "0.3", features = [ "sortable-macro" ] }
proxmox-sys = { version = "0.2", features = [ "sortable-macro" ] }
proxmox-compression = "0.1"
@ -126,22 +126,18 @@ pbs-tape = { path = "pbs-tape" }
# Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io]
#proxmox-acme-rs = { path = "../proxmox-acme-rs" }
#proxmox-apt = { path = "../proxmox-apt" }
#proxmox = { path = "../proxmox/proxmox" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
#proxmox-fuse = { path = "../proxmox-fuse" }
#proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-io = { path = "../proxmox/proxmox-io" }
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
#proxmox-openid = { path = "../proxmox-openid-rs" }
#proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-shared-memory = { path = "../proxmox/proxmox-shared-memory" }
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-time = { path = "../proxmox/proxmox-time" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }

76
debian/changelog vendored
View File

@ -1,78 +1,4 @@
rust-proxmox-backup (2.2.3-1) bullseye; urgency=medium
* datastore: swap dirtying the datastore cache every 60s by just using the
available config digest to detect any changes accuratly when the actually
happen
* api: datastore list and datastore status: avoid opening datastore and
possibly iterating over namespace (for lesser privileged users), but
rather use the in-memory ACL tree directly to check if there's access to
any namespace below.
-- Proxmox Support Team <support@proxmox.com> Sat, 04 Jun 2022 16:30:05 +0200
rust-proxmox-backup (2.2.2-3) bullseye; urgency=medium
* datastore: lookup: reuse ChunkStore on stale datastore re-open
* bump tokio (async framework) dependency
-- Proxmox Support Team <support@proxmox.com> Thu, 02 Jun 2022 17:25:01 +0200
rust-proxmox-backup (2.2.2-2) bullseye; urgency=medium
* improvement of error handling when removing status files and locks from
jobs that were never executed.
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 16:22:22 +0200
rust-proxmox-backup (2.2.2-1) bullseye; urgency=medium
* Revert "verify: allow '0' days for reverification", was already possible
by setting "ignore-verified" to false
* ui: datastore permissions: allow ACL path edit & query namespaces
* accessible group iter: allow NS descending with DATASTORE_READ privilege
* prune datastore: rework worker tak log
* prune datastore: support max-depth and improve priv checks
* ui: prune input: support opt-in recursive/max-depth field
* add prune job config and api, allowing one to setup a scheduled pruning
for a specific namespace only
* ui: add ui for prune jobs
* api: disable setting prune options in datastore.cfg and transform any
existing prune tasks from datastore config to new prune job config in a
post installation hook
* proxmox-tape: use correct api call for 'load-media-from-slot'
* avoid overly strict privilege restrictions for some API endpoints and
actions when using namespaces. Better support navigating the user
interface when only having Datastore.Admin on a (sub) namespace.
* include required privilege names in some permission errors
* docs: fix some typos
* api: status: include empty entry for stores with ns-only privs
* ui: datastore options: avoid breakage if rrd store ore active-ops cannot
be queried
* ui: datastore content: only mask the inner treeview, not the top bar on
error to allow a user to trigger a manual reload
* ui: system config: improve bottom margins and scroll behavior
-- Proxmox Support Team <support@proxmox.com> Wed, 01 Jun 2022 15:09:36 +0200
rust-proxmox-backup (2.2.1-1) bullseye; urgency=medium
rust-proxmox-backup (2.2.1-1) UNRELEASED; urgency=medium
* docs: update some screenshots and add new ones

23
debian/control vendored
View File

@ -31,7 +31,7 @@ Build-Depends: debhelper (>= 12),
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~) <!nocheck>,
librust-nix-0.24+default-dev,
librust-nix-0.19+default-dev (>= 0.19.1-~~),
librust-nom-5+default-dev (>= 5.1-~~),
librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
@ -47,10 +47,10 @@ Build-Depends: debhelper (>= 12),
librust-proxmox-borrow-1+default-dev,
librust-proxmox-compression-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-http-0.6+client-dev (>= 0.6.1-~~),
librust-proxmox-http-0.6+default-dev (>= 0.6.1-~~),
librust-proxmox-http-0.6+http-helpers-dev (>= 0.6.1-~~),
librust-proxmox-http-0.6+websocket-dev (>= 0.6.1-~~),
librust-proxmox-http-0.6.1+client-dev,
librust-proxmox-http-0.6.1+default-dev,
librust-proxmox-http-0.6.1+http-helpers-dev,
librust-proxmox-http-0.6.1+websocket-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
@ -63,9 +63,8 @@ Build-Depends: debhelper (>= 12),
librust-proxmox-section-config-1+default-dev,
librust-proxmox-serde-0.1+default-dev,
librust-proxmox-shared-memory-0.2+default-dev,
librust-proxmox-sys-0.3+default-dev,
librust-proxmox-sys-0.3+logrotate-dev,
librust-proxmox-sys-0.3+sortable-macro-dev,
librust-proxmox-sys-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-sys-0.2+sortable-macro-dev (>= 0.2.1-~~),
librust-proxmox-tfa-2+api-dev,
librust-proxmox-tfa-2+api-types-dev,
librust-proxmox-tfa-2+default-dev,
@ -75,7 +74,7 @@ Build-Depends: debhelper (>= 12),
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
librust-regex-1+default-dev (>= 1.5.5-~~),
librust-rustyline-9+default-dev,
librust-rustyline-7+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
@ -98,9 +97,9 @@ Build-Depends: debhelper (>= 12),
librust-tokio-1+time-dev (>= 1.6-~~),
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
librust-tokio-stream-0.1+default-dev,
librust-tokio-util-0.7+codec-dev,
librust-tokio-util-0.7+default-dev,
librust-tokio-util-0.7+io-dev,
librust-tokio-util-0.6+codec-dev,
librust-tokio-util-0.6+default-dev,
librust-tokio-util-0.6+io-dev,
librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev,
librust-url-2+default-dev (>= 2.1-~~),

9
debian/postinst vendored
View File

@ -41,14 +41,7 @@ case "$1" in
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
fi
if dpkg --compare-versions "$2" 'lt' '2.2.2~'; then
echo "moving prune schedule from datacenter config to new prune job config"
proxmox-backup-manager update-to-prune-jobs-config \
|| echo "Failed to move prune jobs, please check manually"
true
fi
if dpkg --compare-versions "$2" 'lt' '2.1.3~' && test -e /etc/proxmox-backup/sync.cfg; then
if dpkg --compare-versions "$2" 'lt' '7.1-1' && test -e /etc/proxmox-backup/sync.cfg; then
prev_job=""
# read from HERE doc because POSIX sh limitations

View File

@ -29,7 +29,7 @@ How long will my Proxmox Backup Server version be supported?
+=======================+======================+===============+============+====================+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | 2022-08 | 2022-07 |
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | ~Q2/2022 | Q2-Q3/2022 |
+-----------------------+----------------------+---------------+------------+--------------------+

View File

@ -217,7 +217,7 @@ errors. Newer ZFS packages ship the daemon in a separate package ``zfs-zed``,
which should already be installed by default in `Proxmox Backup`_.
You can configure the daemon via the file ``/etc/zfs/zed.d/zed.rc`` with your
favorite editor. The required setting for email notification is
favorite editor. The required setting for email notfication is
``ZED_EMAIL_ADDR``, which is set to ``root`` by default.
.. code-block:: console

View File

@ -125,7 +125,7 @@ execution:
- ``remote-ns``: the remote namespace anchor (default: the root namespace)
- ``ns``: the local namespace anchor (default: the root namespace)
- ``ns``: the local namespace anchor (default: the root naemspace)
- ``max-depth``: whether to recursively iterate over sub-namespaces of the remote
namespace anchor (default: `None`)

View File

@ -51,7 +51,7 @@ ENVIRONMENT
:CHANGER: If set, replaces the `--device` option
:PROXMOX_TAPE_DRIVE: If set, use the Proxmox Backup Server
configuration to find the associated changer device.
configuration to find the associcated changer device.
.. include:: ../pbs-copyright.rst

View File

@ -262,7 +262,7 @@ categorized by checksum, after a backup operation has been executed.
Once you uploaded some backups, or created namespaces, you may see the Backup
Type (`ct`, `vm`, `host`) and the start of the namespace hierarchy (`ns`).
Type (`ct`, `vm`, `host`) and the start of the namespace hierachy (`ns`).
.. _storage_namespaces:

View File

@ -682,7 +682,7 @@ To remove a job, please use:
# proxmox-tape backup-job remove job2
By default, all (recursive) namespaces of the datastore are included in a tape
backup. You can specify a single namespace with ``ns`` and a depth with
backup. You can specify a single namespace wth ``ns`` and a depth with
``max-depth``. For example:
.. code-block:: console

View File

@ -95,7 +95,7 @@ The backup server groups backups by *type*, where *type* is one of:
Backup ID
---------
A unique ID for a specific Backup Type and Backup Namespace. Usually the
A unique ID for a specific Backup Type and Backup Namesapce. Usually the
virtual machine or container ID. ``host`` type backups normally use the
hostname.

View File

@ -73,17 +73,6 @@ constnamedbitmap! {
}
}
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
PRIVILEGES
.iter()
.fold(Vec::new(), |mut priv_names, (name, value)| {
if value & privs != 0 {
priv_names.push(name);
}
priv_names
})
}
/// Admin always has all privileges. It can do everything except a few actions
/// which are limited to the 'root@pam` superuser
pub const ROLE_ADMIN: u64 = u64::MAX;

View File

@ -157,6 +157,52 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
.minimum(1)
.schema();
#[api(
properties: {
"keep-last": {
schema: PRUNE_SCHEMA_KEEP_LAST,
optional: true,
},
"keep-hourly": {
schema: PRUNE_SCHEMA_KEEP_HOURLY,
optional: true,
},
"keep-daily": {
schema: PRUNE_SCHEMA_KEEP_DAILY,
optional: true,
},
"keep-weekly": {
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
optional: true,
},
"keep-monthly": {
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
optional: true,
},
"keep-yearly": {
schema: PRUNE_SCHEMA_KEEP_YEARLY,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_yearly: Option<u64>,
}
#[api]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
@ -218,8 +264,29 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore
optional: true,
schema: PRUNE_SCHEDULE_SCHEMA,
},
keep: {
type: crate::KeepOptions,
"keep-last": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_LAST,
},
"keep-hourly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_HOURLY,
},
"keep-daily": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_DAILY,
},
"keep-weekly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
},
"keep-monthly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
},
"keep-yearly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_YEARLY,
},
"verify-new": {
description: "If enabled, all new backups will be verified right after completion.",
@ -243,38 +310,38 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore
pub struct DataStoreConfig {
#[updater(skip)]
pub name: String,
#[updater(skip)]
pub path: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub gc_schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prune_schedule: Option<String>,
#[serde(flatten)]
pub keep: crate::KeepOptions,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_yearly: Option<u64>,
/// If enabled, all backups will be verified right after completion.
#[serde(skip_serializing_if = "Option::is_none")]
pub verify_new: Option<bool>,
/// Send job email notification to this user
#[serde(skip_serializing_if = "Option::is_none")]
pub notify_user: Option<Userid>,
/// Send notification only for job errors
#[serde(skip_serializing_if = "Option::is_none")]
pub notify: Option<String>,
/// Datastore tuning options
#[serde(skip_serializing_if = "Option::is_none")]
pub tuning: Option<String>,
/// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
#[serde(skip_serializing_if = "Option::is_none")]
pub maintenance_mode: Option<String>,
@ -288,7 +355,12 @@ impl DataStoreConfig {
comment: None,
gc_schedule: None,
prune_schedule: None,
keep: Default::default(),
keep_last: None,
keep_hourly: None,
keep_daily: None,
keep_weekly: None,
keep_monthly: None,
keep_yearly: None,
verify_new: None,
notify_user: None,
notify: None,
@ -622,39 +694,6 @@ impl BackupNamespace {
}
Ok(())
}
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
let mut path: Vec<&str> = vec!["datastore", store];
if self.is_root() {
path
} else {
path.extend(self.inner.iter().map(|comp| comp.as_str()));
path
}
}
/// Check whether this namespace contains another namespace.
///
/// If so, the depth is returned.
///
/// Example:
/// ```
/// # use pbs_api_types::BackupNamespace;
/// let main: BackupNamespace = "a/b".parse().unwrap();
/// let sub: BackupNamespace = "a/b/c/d".parse().unwrap();
/// let other: BackupNamespace = "x/y".parse().unwrap();
/// assert_eq!(main.contains(&main), Some(0));
/// assert_eq!(main.contains(&sub), Some(2));
/// assert_eq!(sub.contains(&main), None);
/// assert_eq!(main.contains(&other), None);
/// ```
pub fn contains(&self, other: &BackupNamespace) -> Option<usize> {
other
.inner
.strip_prefix(&self.inner[..])
.map(|suffix| suffix.len())
}
}
impl fmt::Display for BackupNamespace {
@ -944,7 +983,7 @@ impl BackupDir {
where
T: Into<String>,
{
let time = proxmox_time::parse_rfc3339(backup_time_string)?;
let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
let group = BackupGroup::new(ty, id.into());
Ok(Self { group, time })
}
@ -987,6 +1026,35 @@ impl fmt::Display for BackupDir {
}
}
/// Helper struct for places where sensible formatting of store+NS combo is required
pub struct DatastoreWithNamespace {
pub store: String,
pub ns: BackupNamespace,
}
impl fmt::Display for DatastoreWithNamespace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.ns.is_root() {
write!(f, "datastore {}, root namespace", self.store)
} else {
write!(f, "datastore '{}', namespace '{}'", self.store, self.ns)
}
}
}
impl DatastoreWithNamespace {
pub fn acl_path(&self) -> Vec<&str> {
let mut path: Vec<&str> = vec!["datastore", &self.store];
if self.ns.is_root() {
path
} else {
path.extend(self.ns.inner.iter().map(|comp| comp.as_str()));
path
}
}
}
/// Used when both a backup group or a directory can be valid.
pub enum BackupPart {
Group(BackupGroup),
@ -1311,23 +1379,6 @@ pub struct DataStoreStatusListItem {
pub gc_status: Option<GarbageCollectionStatus>,
}
impl DataStoreStatusListItem {
pub fn empty(store: &str, err: Option<String>) -> Self {
DataStoreStatusListItem {
store: store.to_owned(),
total: -1,
used: -1,
avail: -1,
history: None,
history_start: None,
history_delta: None,
estimated_full_date: None,
error: err,
gc_status: None,
}
}
}
pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new(
@ -1428,12 +1479,3 @@ pub fn print_ns_and_snapshot(ns: &BackupNamespace, dir: &BackupDir) -> String {
format!("{}/{}", ns.display_as_path(), dir)
}
}
/// Prints a Datastore name and [`BackupNamespace`] for logs/errors.
pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String {
if ns.is_root() {
format!("datastore '{}', root namespace", store)
} else {
format!("datastore '{}', namespace '{}'", store, ns)
}
}

View File

@ -18,7 +18,7 @@ const_regex! {
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:");
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(?:", BACKUP_NS_RE!(), r"):");
}
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
@ -155,7 +155,7 @@ pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
.schema();
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
IntegerSchema::new("Days after that a verification becomes outdated. (0 means always)")
.minimum(0)
.schema();
@ -200,7 +200,7 @@ pub struct VerificationJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
/// the datastore ID this verification job affects
/// the datastore ID this verificaiton job affects
pub store: String,
#[serde(skip_serializing_if = "Option::is_none")]
/// if not set to false, check the age of the last snapshot verification to filter
@ -223,15 +223,6 @@ pub struct VerificationJobConfig {
pub max_depth: Option<usize>,
}
impl VerificationJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}
#[api(
properties: {
config: {
@ -390,7 +381,7 @@ impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once(':') {
match s.split_once(":") {
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())),
Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)),
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
@ -507,15 +498,6 @@ pub struct SyncJobConfig {
pub limit: RateLimitConfig,
}
impl SyncJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}
#[api(
properties: {
config: {
@ -535,186 +517,3 @@ pub struct SyncJobStatus {
#[serde(flatten)]
pub status: JobScheduleStatus,
}
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
/// call to prune a specific group, where `max-depth` makes no sense.
#[api(
properties: {
"keep-last": {
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
optional: true,
},
"keep-hourly": {
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
optional: true,
},
"keep-daily": {
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
optional: true,
},
"keep-weekly": {
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
optional: true,
},
"keep-monthly": {
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
optional: true,
},
"keep-yearly": {
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct KeepOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_yearly: Option<u64>,
}
impl KeepOptions {
pub fn keeps_something(&self) -> bool {
self.keep_last.unwrap_or(0)
+ self.keep_hourly.unwrap_or(0)
+ self.keep_daily.unwrap_or(0)
+ self.keep_weekly.unwrap_or(0)
+ self.keep_monthly.unwrap_or(0)
+ self.keep_yearly.unwrap_or(0)
> 0
}
}
#[api(
properties: {
keep: {
type: KeepOptions,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneJobOptions {
#[serde(flatten)]
pub keep: KeepOptions,
/// The (optional) recursion depth
#[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ns: Option<BackupNamespace>,
}
impl PruneJobOptions {
pub fn keeps_something(&self) -> bool {
self.keep.keeps_something()
}
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
match &self.ns {
Some(ns) => ns.acl_path(store),
None => vec!["datastore", store],
}
}
}
#[api(
properties: {
disable: {
type: Boolean,
optional: true,
default: false,
},
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
schedule: {
schema: PRUNE_SCHEDULE_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
options: {
type: PruneJobOptions,
},
},
)]
#[derive(Deserialize, Serialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// Prune configuration.
pub struct PruneJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
pub store: String,
/// Disable this job.
#[serde(default, skip_serializing_if = "is_false")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
pub disable: bool,
pub schedule: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(flatten)]
pub options: PruneJobOptions,
}
impl PruneJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
self.options.acl_path(&self.store)
}
}
fn is_false(b: &bool) -> bool {
!b
}
#[api(
properties: {
config: {
type: PruneJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Status of prune job
pub struct PruneJobStatus {
#[serde(flatten)]
pub config: PruneJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}

View File

@ -1,6 +1,6 @@
[package]
name = "pbs-buildcfg"
version = "2.2.3"
version = "2.2.1"
authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
description = "macros used for pbs related paths such as configdir and rundir"

View File

@ -16,12 +16,12 @@ http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
nix = "0.24"
nix = "0.19.1"
openssl = "0.10"
percent-encoding = "2.1"
pin-project-lite = "0.2"
regex = "1.5"
rustyline = "9"
rustyline = "7"
serde = "1.0"
serde_json = "1.0"
tokio = { version = "1.6", features = [ "fs", "signal" ] }
@ -41,7 +41,7 @@ proxmox-lang = "1.1"
proxmox-router = { version = "1.2", features = [ "cli" ] }
proxmox-schema = "1.3.1"
proxmox-time = "1"
proxmox-sys = "0.3"
proxmox-sys = "0.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }

View File

@ -329,13 +329,13 @@ impl Archiver {
Mode::empty(),
) {
Ok(fd) => Ok(Some(fd)),
Err(Errno::ENOENT) => {
Err(nix::Error::Sys(Errno::ENOENT)) => {
if existed {
self.report_vanished_file()?;
}
Ok(None)
}
Err(Errno::EACCES) => {
Err(nix::Error::Sys(Errno::EACCES)) => {
writeln!(
self.errors,
"failed to open file: {:?}: access denied",
@ -343,7 +343,7 @@ impl Archiver {
)?;
Ok(None)
}
Err(Errno::EPERM) if !noatime.is_empty() => {
Err(nix::Error::Sys(Errno::EPERM)) if !noatime.is_empty() => {
// Retry without O_NOATIME:
noatime = OFlag::empty();
continue;
@ -899,7 +899,7 @@ fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
match unsafe { fs::read_attr_fd(fd, &mut attr) } {
Ok(_) => (),
Err(errno) if errno_is_unsupported(errno) => {
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read file attributes: {}", err),
@ -921,7 +921,7 @@ fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(),
match unsafe { fs::read_fat_attr_fd(fd, &mut attr) } {
Ok(_) => (),
Err(errno) if errno_is_unsupported(errno) => {
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read fat attributes: {}", err),
@ -959,7 +959,10 @@ fn get_quota_project_id(
// On some FUSE filesystems it can happen that ioctl is not supported.
// For these cases projid is set to 0 while the error is ignored.
if let Err(errno) = res {
if let Err(err) = res {
let errno = err
.as_errno()
.ok_or_else(|| format_err!("error while reading quota project id"))?;
if errno_is_unsupported(errno) {
return Ok(());
} else {

View File

@ -428,7 +428,7 @@ impl Extractor {
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(errno) if errno == nix::errno::Errno::EINTR => true,
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
} {}
}
@ -485,7 +485,7 @@ impl Extractor {
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(errno) if errno == nix::errno::Errno::EINTR => true,
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
} {}
}
@ -584,7 +584,8 @@ where
match entry.kind() {
EntryKind::File { .. } => {
let size = decoder.content_size().unwrap_or(0);
tar_add_file(&mut tarencoder, decoder.contents(), size, metadata, path).await?
tar_add_file(&mut tarencoder, decoder.contents(), size, &metadata, &path)
.await?
}
EntryKind::Hardlink(link) => {
if !link.data.is_empty() {
@ -613,7 +614,7 @@ where
decoder.contents(),
size,
metadata,
path,
&path,
)
.await?;
hardlinks.insert(realpath.to_owned(), path.to_owned());

View File

@ -372,7 +372,7 @@ fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(
let mut fattr: libc::c_long = 0;
match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
Ok(_) => (),
Err(errno) if errno_is_unsupported(errno) => {
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read file attributes: {}", err),
@ -386,7 +386,7 @@ fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(
match unsafe { fs::write_attr_fd(fd, &attr) } {
Ok(_) => Ok(()),
Err(errno) if errno_is_unsupported(errno) => Ok(()),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
Err(err) => bail!("failed to set file attributes: {}", err),
}
}
@ -400,7 +400,7 @@ fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
if fatattr != 0 {
match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
Ok(_) => (),
Err(errno) if errno_is_unsupported(errno) => (),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
Err(err) => bail!("failed to set file FAT attributes: {}", err),
}
}

View File

@ -72,7 +72,7 @@ impl tower_service::Service<Uri> for VsockConnector {
)?;
let sock_addr = VsockAddr::new(cid, port as u32);
connect(sock_fd, &sock_addr)?;
connect(sock_fd, &SockAddr::Vsock(sock_addr))?;
// connect sync, but set nonblock after (tokio requires it)
let std_stream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(sock_fd) };

View File

@ -10,7 +10,7 @@ anyhow = "1.0"
hex = "0.4.3"
lazy_static = "1.4"
libc = "0.2"
nix = "0.24"
nix = "0.19.1"
once_cell = "1.3.1"
openssl = "0.10"
regex = "1.5"
@ -24,7 +24,7 @@ proxmox-section-config = "1"
proxmox-time = "1"
proxmox-serde = "0.1"
proxmox-shared-memory = "0.2"
proxmox-sys = "0.3"
proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }

View File

@ -4,7 +4,7 @@ use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use anyhow::{bail, format_err, Error};
use anyhow::{bail, Error};
use lazy_static::lazy_static;
@ -301,45 +301,6 @@ impl AclTreeNode {
map.insert(role, propagate);
}
}
/// Check if auth_id has any of the provided privileges on the current note.
///
/// If `only_propagated` is set to true only propagating privileges will be checked.
fn check_any_privs(
&self,
auth_id: &Authid,
privs: u64,
only_propagated: bool,
) -> Result<bool, Error> {
for role in self.extract_roles(&auth_id, !only_propagated).into_keys() {
let current_privs = Role::from_str(&role)
.map_err(|e| format_err!("invalid role in current node: {role} - {e}"))?
as u64;
if privs & current_privs != 0 {
return Ok(true);
}
}
return Ok(false);
}
/// Checks if the given auth_id has any of the privileges specified by `privs` on the sub-tree
/// below the current node.
fn any_privs_below(&self, auth_id: &Authid, privs: u64) -> Result<bool, Error> {
// set only_propagated to false to check all roles on the current node
if self.check_any_privs(auth_id, privs, false)? {
return Ok(true);
}
for (_comp, child) in self.children.iter() {
if child.any_privs_below(auth_id, privs)? {
return Ok(true);
}
}
return Ok(false);
}
}
impl AclTree {
@ -642,22 +603,15 @@ impl AclTree {
let mut node = &self.root;
let mut role_map = node.extract_roles(auth_id, path.is_empty());
let mut comp_iter = path.iter().peekable();
while let Some(comp) = comp_iter.next() {
let last_comp = comp_iter.peek().is_none();
let mut sub_comp_iter = comp.split('/').peekable();
while let Some(sub_comp) = sub_comp_iter.next() {
let last_sub_comp = last_comp && sub_comp_iter.peek().is_none();
node = match node.children.get(sub_comp) {
for (pos, comp) in path.iter().enumerate() {
let last_comp = (pos + 1) == path.len();
for scomp in comp.split('/') {
node = match node.children.get(scomp) {
Some(n) => n,
None => return role_map, // path not found
};
let new_map = node.extract_roles(auth_id, last_sub_comp);
let new_map = node.extract_roles(auth_id, last_comp);
if !new_map.is_empty() {
// overwrite previous mappings
role_map = new_map;
@ -667,44 +621,6 @@ impl AclTree {
role_map
}
/// Checks whether the `auth_id` has any of the privilegs `privs` on any object below `path`.
pub fn any_privs_below(
&self,
auth_id: &Authid,
path: &[&str],
privs: u64,
) -> Result<bool, Error> {
let mut node = &self.root;
// check first if there's any propagated priv we need to be aware of
for outer in path {
for c in outer.split('/') {
if node.check_any_privs(auth_id, privs, true)? {
return Ok(true);
}
// check next component
node = node.children.get(&c.to_string()).ok_or(format_err!(
"component '{c}' of path '{path:?}' does not exist in current acl tree"
))?;
}
}
// check last node in the path too
if node.check_any_privs(auth_id, privs, true)? {
return Ok(true);
}
// now search trough the sub-tree
for (_comp, child) in node.children.iter() {
if child.any_privs_below(auth_id, privs)? {
return Ok(true);
}
}
// we could not find any privileges, return false
return Ok(false);
}
}
/// Filename where [AclTree] is stored.
@ -744,7 +660,7 @@ pub fn cached_config() -> Result<Arc<AclTree>, Error> {
let stat = match nix::sys::stat::stat(ACL_CFG_FILENAME) {
Ok(stat) => Some(stat),
Err(nix::errno::Errno::ENOENT) => None,
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => None,
Err(err) => bail!("unable to stat '{}' - {}", ACL_CFG_FILENAME, err),
};
@ -791,7 +707,7 @@ mod test {
use super::AclTree;
use anyhow::Error;
use pbs_api_types::{Authid, ROLE_ADMIN, ROLE_DATASTORE_READER, ROLE_TAPE_READER};
use pbs_api_types::Authid;
fn check_roles(tree: &AclTree, auth_id: &Authid, path: &str, expected_roles: &str) {
let path_vec = super::split_acl_path(path);
@ -933,45 +849,4 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
Ok(())
}
#[test]
fn test_any_privs_below() -> Result<(), Error> {
let tree = AclTree::from_raw(
"\
acl:0:/store/store2:user1@pbs:Admin\n\
acl:1:/store/store2/store31/store4/store6:user2@pbs:DatastoreReader\n\
acl:0:/store/store2/store3:user1@pbs:Admin\n\
",
)
.expect("failed to parse acl tree");
let user1: Authid = "user1@pbs".parse()?;
let user2: Authid = "user2@pbs".parse()?;
// user1 has admin on "/store/store2/store3" -> return true
assert!(tree.any_privs_below(&user1, &["store"], ROLE_ADMIN)?);
// user2 has not privileges under "/store/store2/store3" --> return false
assert!(!tree.any_privs_below(
&user2,
&["store", "store2", "store3"],
ROLE_DATASTORE_READER
)?);
// user2 has DatastoreReader privileges under "/store/store2/store31" --> return true
assert!(tree.any_privs_below(&user2, &["store/store2/store31"], ROLE_DATASTORE_READER)?);
// user2 has no TapeReader privileges under "/store/store2/store31" --> return false
assert!(!tree.any_privs_below(&user2, &["store/store2/store31"], ROLE_TAPE_READER)?);
// user2 has no DatastoreReader propagating privileges on
// "/store/store2/store31/store4/store6" --> return true
assert!(tree.any_privs_below(
&user2,
&["store/store2/store31/store4/store6"],
ROLE_DATASTORE_READER
)?);
Ok(())
}
}

View File

@ -9,7 +9,7 @@ use proxmox_router::UserInformation;
use proxmox_section_config::SectionConfigData;
use proxmox_time::epoch_i64;
use pbs_api_types::{privs_to_priv_names, ApiToken, Authid, User, Userid, ROLE_ADMIN};
use pbs_api_types::{ApiToken, Authid, User, Userid, ROLE_ADMIN};
use crate::acl::{AclTree, ROLE_NAMES};
use crate::ConfigVersionCache;
@ -123,16 +123,7 @@ impl CachedUserInfo {
if !allowed {
// printing the path doesn't leaks any information as long as we
// always check privilege before resource existence
let priv_names = privs_to_priv_names(required_privs);
let priv_names = if partial {
priv_names.join("|")
} else {
priv_names.join("&")
};
bail!(
"missing permissions '{priv_names}' on '/{}'",
path.join("/")
);
bail!("no permissions on '/{}'", path.join("/"));
}
Ok(())
}
@ -179,16 +170,6 @@ impl CachedUserInfo {
(privs, propagated_privs)
}
/// Checks whether the `auth_id` has any of the privilegs `privs` on any object below `path`.
pub fn any_privs_below(
&self,
auth_id: &Authid,
path: &[&str],
privs: u64,
) -> Result<bool, Error> {
self.acl_tree.any_privs_below(auth_id, path, privs)
}
}
impl UserInformation for CachedUserInfo {

View File

@ -26,7 +26,6 @@ struct ConfigVersionCacheDataInner {
// Traffic control (traffic-control.cfg) generation/version.
traffic_control_generation: AtomicUsize,
// datastore (datastore.cfg) generation/version
// FIXME: remove with PBS 3.0
datastore_generation: AtomicUsize,
// Add further atomics here
}
@ -145,12 +144,19 @@ impl ConfigVersionCache {
.fetch_add(1, Ordering::AcqRel);
}
/// Returns the datastore generation number.
pub fn datastore_generation(&self) -> usize {
self.shmem
.data()
.datastore_generation
.load(Ordering::Acquire)
}
/// Increase the datastore generation number.
// FIXME: remove with PBS 3.0 or make actually useful again in datastore lookup
pub fn increase_datastore_generation(&self) -> usize {
self.shmem
.data()
.datastore_generation
.fetch_add(1, Ordering::AcqRel)
.fetch_add(1, Ordering::Acquire)
}
}

View File

@ -2,7 +2,7 @@ use anyhow::Error;
use lazy_static::lazy_static;
use std::collections::HashMap;
use proxmox_schema::{AllOfSchema, ApiType};
use proxmox_schema::{ApiType, Schema};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
@ -14,12 +14,15 @@ lazy_static! {
}
fn init() -> SectionConfig {
const OBJ_SCHEMA: &AllOfSchema = DataStoreConfig::API_SCHEMA.unwrap_all_of_schema();
let obj_schema = match DataStoreConfig::API_SCHEMA {
Schema::Object(ref obj_schema) => obj_schema,
_ => unreachable!(),
};
let plugin = SectionConfigPlugin::new(
"datastore".to_string(),
Some(String::from("name")),
OBJ_SCHEMA,
obj_schema,
);
let mut config = SectionConfig::new(&DATASTORE_SCHEMA);
config.register_plugin(plugin);
@ -64,11 +67,11 @@ pub fn complete_datastore_name(_arg: &str, _param: &HashMap<String, String>) ->
}
pub fn complete_acl_path(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut list = vec![
String::from("/"),
String::from("/datastore"),
String::from("/datastore/"),
];
let mut list = Vec::new();
list.push(String::from("/"));
list.push(String::from("/datastore"));
list.push(String::from("/datastore/"));
if let Ok((data, _digest)) = config() {
for id in data.sections.keys() {

View File

@ -370,8 +370,8 @@ fn fingerprint_checks() -> Result<(), Error> {
131, 185, 101, 156, 10, 87, 174, 25, 144, 144, 21, 155,
]);
let data = serde_json::to_vec(&key).expect("encoding KeyConfig failed");
decrypt_key(&data, &{ || Ok(Vec::new()) })
let mut data = serde_json::to_vec(&key).expect("encoding KeyConfig failed");
decrypt_key(&mut data, &{ || Ok(Vec::new()) })
.expect_err("decoding KeyConfig with wrong fingerprint worked");
let key = KeyConfig {
@ -383,8 +383,8 @@ fn fingerprint_checks() -> Result<(), Error> {
hint: None,
};
let data = serde_json::to_vec(&key).expect("encoding KeyConfig failed");
let (key_data, created, fingerprint) = decrypt_key(&data, &{ || Ok(Vec::new()) })
let mut data = serde_json::to_vec(&key).expect("encoding KeyConfig failed");
let (key_data, created, fingerprint) = decrypt_key(&mut data, &{ || Ok(Vec::new()) })
.expect("decoding KeyConfig without fingerprint failed");
assert_eq!(key.data, key_data);

View File

@ -7,7 +7,6 @@ pub mod drive;
pub mod key_config;
pub mod media_pool;
pub mod network;
pub mod prune;
pub mod remote;
pub mod sync;
pub mod tape_encryption_keys;

View File

@ -101,7 +101,7 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), E
if let Some(caps) = CIDR_V4_REGEX.captures(cidr) {
let address = &caps[1];
if let Some(mask) = caps.get(2) {
let mask: u8 = mask.as_str().parse()?;
let mask = u8::from_str_radix(mask.as_str(), 10)?;
check_netmask(mask, false)?;
Ok((address.to_string(), Some(mask), false))
} else {
@ -110,7 +110,7 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), E
} else if let Some(caps) = CIDR_V6_REGEX.captures(cidr) {
let address = &caps[1];
if let Some(mask) = caps.get(2) {
let mask: u8 = mask.as_str().parse()?;
let mask = u8::from_str_radix(mask.as_str(), 10)?;
check_netmask(mask, true)?;
Ok((address.to_string(), Some(mask), true))
} else {

View File

@ -164,7 +164,7 @@ impl<R: BufRead> NetworkParser<R> {
let mask = if let Some(mask) = IPV4_MASK_HASH_LOCALNET.get(netmask.as_str()) {
*mask
} else {
match netmask.as_str().parse::<u8>() {
match u8::from_str_radix(netmask.as_str(), 10) {
Ok(mask) => mask,
Err(err) => {
bail!("unable to parse netmask '{}' - {}", netmask, err);
@ -211,7 +211,7 @@ impl<R: BufRead> NetworkParser<R> {
self.eat(Token::MTU)?;
let mtu = self.next_text()?;
let mtu = match mtu.parse::<u64>() {
let mtu = match u64::from_str_radix(&mtu, 10) {
Ok(mtu) => mtu,
Err(err) => {
bail!("unable to parse mtu value '{}' - {}", mtu, err);

View File

@ -1,57 +0,0 @@
use std::collections::HashMap;
use anyhow::Error;
use lazy_static::lazy_static;
use proxmox_schema::*;
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use pbs_api_types::{PruneJobConfig, JOB_ID_SCHEMA};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
fn init() -> SectionConfig {
const OBJ_SCHEMA: &AllOfSchema = PruneJobConfig::API_SCHEMA.unwrap_all_of_schema();
let plugin =
SectionConfigPlugin::new("prune".to_string(), Some(String::from("id")), OBJ_SCHEMA);
let mut config = SectionConfig::new(&JOB_ID_SCHEMA);
config.register_plugin(plugin);
config
}
pub const PRUNE_CFG_FILENAME: &str = "/etc/proxmox-backup/prune.cfg";
pub const PRUNE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.prune.lck";
/// Get exclusive lock
pub fn lock_config() -> Result<BackupLockGuard, Error> {
open_backup_lockfile(PRUNE_CFG_LOCKFILE, None, true)
}
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(PRUNE_CFG_FILENAME)?;
let content = content.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(PRUNE_CFG_FILENAME, &content)?;
Ok((data, digest))
}
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
let raw = CONFIG.write(PRUNE_CFG_FILENAME, config)?;
replace_backup_config(PRUNE_CFG_FILENAME, raw.as_bytes())
}
// shell completion helper
pub fn complete_prune_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
Err(_) => return vec![],
}
}

View File

@ -90,7 +90,7 @@ pub fn cached_config() -> Result<Arc<SectionConfigData>, Error> {
let stat = match nix::sys::stat::stat(USER_CFG_FILENAME) {
Ok(stat) => Some(stat),
Err(nix::errno::Errno::ENOENT) => None,
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => None,
Err(err) => bail!("unable to stat '{}' - {}", USER_CFG_FILENAME, err),
};

View File

@ -41,7 +41,7 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(VERIFICATION_CFG_FILENAME)?;
let content = content.unwrap_or_default();
let content = content.unwrap_or_else(String::new);
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(VERIFICATION_CFG_FILENAME, &content)?;

View File

@ -15,7 +15,7 @@ hex = { version = "0.4.3", features = [ "serde" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.24"
nix = "0.19.1"
openssl = "0.10"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
@ -32,7 +32,7 @@ proxmox-lang = "1.1"
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] }
proxmox-time = "1"
proxmox-uuid = "1"
proxmox-sys = "0.3"
proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }

View File

@ -147,7 +147,7 @@ impl BackupGroup {
/* close else this leaks! */
nix::unistd::close(rawfd)?;
}
Err(nix::errno::Errno::ENOENT) => {
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
return Ok(());
}
Err(err) => {

View File

@ -153,12 +153,7 @@ impl ChunkStore {
lockfile_path
}
/// Opens the chunk store with a new process locker.
///
/// Note that this must be used with care, as it's dangerous to create two instances on the
/// same base path, as closing the underlying ProcessLocker drops all locks from this process
/// on the lockfile (even if separate FDs)
pub(crate) fn open<P: Into<PathBuf>>(name: &str, base: P) -> Result<Self, Error> {
pub fn open<P: Into<PathBuf>>(name: &str, base: P) -> Result<Self, Error> {
let base: PathBuf = base.into();
if !base.is_absolute() {
@ -226,7 +221,7 @@ impl ChunkStore {
})?;
if let Err(err) = res {
if !assert_exists && err == nix::errno::Errno::ENOENT {
if !assert_exists && err.as_errno() == Some(nix::errno::Errno::ENOENT) {
return Ok(false);
}
bail!("update atime failed for chunk/file {path:?} - {err}");
@ -309,7 +304,7 @@ impl ChunkStore {
// start reading:
continue;
}
Err(ref err) if err == &nix::errno::Errno::ENOENT => {
Err(ref err) if err.as_errno() == Some(nix::errno::Errno::ENOENT) => {
// non-existing directories are okay, just keep going:
continue;
}

View File

@ -21,6 +21,7 @@ use pbs_api_types::{
Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning,
GarbageCollectionStatus, HumanByte, Operation, UPID,
};
use pbs_config::ConfigVersionCache;
use crate::backup_info::{BackupDir, BackupGroup};
use crate::chunk_store::ChunkStore;
@ -58,20 +59,22 @@ pub struct DataStoreImpl {
last_gc_status: Mutex<GarbageCollectionStatus>,
verify_new: bool,
chunk_order: ChunkOrder,
last_digest: Option<[u8; 32]>,
last_generation: usize,
last_update: i64,
}
impl DataStoreImpl {
// This one just panics on everything
#[doc(hidden)]
pub(crate) unsafe fn new_test() -> Arc<Self> {
pub unsafe fn new_test() -> Arc<Self> {
Arc::new(Self {
chunk_store: Arc::new(unsafe { ChunkStore::panic_store() }),
gc_mutex: Mutex::new(()),
last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
verify_new: false,
chunk_order: ChunkOrder::None,
last_digest: None,
last_generation: 0,
last_update: 0,
})
}
}
@ -111,7 +114,7 @@ impl Drop for DataStore {
impl DataStore {
// This one just panics on everything
#[doc(hidden)]
pub(crate) unsafe fn new_test() -> Arc<Self> {
pub unsafe fn new_test() -> Arc<Self> {
Arc::new(Self {
inner: unsafe { DataStoreImpl::new_test() },
operation: None,
@ -122,9 +125,11 @@ impl DataStore {
name: &str,
operation: Option<Operation>,
) -> Result<Arc<DataStore>, Error> {
// we could use the ConfigVersionCache's generation for staleness detection, but we load
// the config anyway -> just use digest, additional benefit: manual changes get detected
let (config, digest) = pbs_config::datastore::config()?;
let version_cache = ConfigVersionCache::new()?;
let generation = version_cache.datastore_generation();
let now = proxmox_time::epoch_i64();
let (config, _digest) = pbs_config::datastore::config()?;
let config: DataStoreConfig = config.lookup("datastore", name)?;
if let Some(maintenance_mode) = config.get_maintenance_mode() {
@ -137,27 +142,23 @@ impl DataStore {
update_active_operations(name, operation, 1)?;
}
let mut datastore_cache = DATASTORE_MAP.lock().unwrap();
let entry = datastore_cache.get(name);
let mut map = DATASTORE_MAP.lock().unwrap();
let entry = map.get(name);
// reuse chunk store so that we keep using the same process locker instance!
let chunk_store = if let Some(datastore) = &entry {
let last_digest = datastore.last_digest.as_ref();
if let Some(true) = last_digest.map(|last_digest| last_digest == &digest) {
if let Some(datastore) = &entry {
if datastore.last_generation == generation && now < (datastore.last_update + 60) {
return Ok(Arc::new(Self {
inner: Arc::clone(datastore),
operation,
}));
}
Arc::clone(&datastore.chunk_store)
} else {
Arc::new(ChunkStore::open(name, &config.path)?)
};
}
let datastore = DataStore::with_store_and_config(chunk_store, config, Some(digest))?;
let chunk_store = ChunkStore::open(name, &config.path)?;
let datastore = DataStore::with_store_and_config(chunk_store, config, generation, now)?;
let datastore = Arc::new(datastore);
datastore_cache.insert(name.to_string(), datastore.clone());
map.insert(name.to_string(), datastore.clone());
Ok(Arc::new(Self {
inner: datastore,
@ -176,9 +177,6 @@ impl DataStore {
}
/// Open a raw database given a name and a path.
///
/// # Safety
/// See the safety section in `open_from_config`
pub unsafe fn open_path(
name: &str,
path: impl AsRef<Path>,
@ -193,26 +191,14 @@ impl DataStore {
}
/// Open a datastore given a raw configuration.
///
/// # Safety
/// There's no memory saftey implication, but as this is opening a new ChunkStore it will
/// create a new process locker instance, potentially on the same path as existing safely
/// created ones. This is dangerous as dropping the reference of this and thus the underlying
/// chunkstore's process locker will close all locks from our process on the config.path,
/// breaking guarantees we need to uphold for safe long backup + GC interaction on newer/older
/// process instances (from package update).
unsafe fn open_from_config(
pub unsafe fn open_from_config(
config: DataStoreConfig,
operation: Option<Operation>,
) -> Result<Arc<Self>, Error> {
let name = config.name.clone();
let chunk_store = ChunkStore::open(&name, &config.path)?;
let inner = Arc::new(Self::with_store_and_config(
Arc::new(chunk_store),
config,
None,
)?);
let inner = Arc::new(Self::with_store_and_config(chunk_store, config, 0, 0)?);
if let Some(operation) = operation {
update_active_operations(&name, operation, 1)?;
@ -222,9 +208,10 @@ impl DataStore {
}
fn with_store_and_config(
chunk_store: Arc<ChunkStore>,
chunk_store: ChunkStore,
config: DataStoreConfig,
last_digest: Option<[u8; 32]>,
last_generation: usize,
last_update: i64,
) -> Result<DataStoreImpl, Error> {
let mut gc_status_path = chunk_store.base_path();
gc_status_path.push(".gc-status");
@ -248,12 +235,13 @@ impl DataStore {
let chunk_order = tuning.chunk_order.unwrap_or(ChunkOrder::Inode);
Ok(DataStoreImpl {
chunk_store,
chunk_store: Arc::new(chunk_store),
gc_mutex: Mutex::new(()),
last_gc_status: Mutex::new(gc_status),
verify_new: config.verify_new.unwrap_or(false),
chunk_order,
last_digest,
last_generation,
last_update,
})
}
@ -445,7 +433,7 @@ impl DataStore {
ty_dir.push(ty.to_string());
// best effort only, but we probably should log the error
if let Err(err) = unlinkat(Some(base_fd), &ty_dir, UnlinkatFlags::RemoveDir) {
if err != nix::errno::Errno::ENOENT {
if err.as_errno() != Some(nix::errno::Errno::ENOENT) {
log::error!("failed to remove backup type {ty} in {ns} - {err}");
}
}
@ -482,7 +470,7 @@ impl DataStore {
.recursive_iter_backup_ns(ns.to_owned())?
.collect::<Result<Vec<BackupNamespace>, Error>>()?;
children.sort_by_key(|b| std::cmp::Reverse(b.depth()));
children.sort_by(|a, b| b.depth().cmp(&a.depth()));
let base_file = std::fs::File::open(self.base_path())?;
let base_fd = base_file.as_raw_fd();
@ -495,10 +483,10 @@ impl DataStore {
if !ns.is_root() {
match unlinkat(Some(base_fd), &ns.path(), UnlinkatFlags::RemoveDir) {
Ok(()) => log::debug!("removed namespace {ns}"),
Err(nix::errno::Errno::ENOENT) => {
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
log::debug!("namespace {ns} already removed")
}
Err(nix::errno::Errno::ENOTEMPTY) if !delete_groups => {
Err(nix::Error::Sys(nix::errno::Errno::ENOTEMPTY)) if !delete_groups => {
removed_all_requested = false;
log::debug!("skip removal of non-empty namespace {ns}")
}
@ -994,10 +982,8 @@ impl DataStore {
.oldest_writer()
.unwrap_or(phase1_start_time);
let mut gc_status = GarbageCollectionStatus {
upid: Some(upid.to_string()),
..Default::default()
};
let mut gc_status = GarbageCollectionStatus::default();
gc_status.upid = Some(upid.to_string());
task_log!(worker, "Start GC phase1 (mark used chunks)");
@ -1154,8 +1140,8 @@ impl DataStore {
self.inner.verify_new
}
/// returns a list of chunks sorted by their inode number on disk chunks that couldn't get
/// stat'ed are placed at the end of the list
/// returns a list of chunks sorted by their inode number on disk
/// chunks that could not be stat'ed are at the end of the list
pub fn get_chunks_in_order<F, A>(
&self,
index: &Box<dyn IndexFile + Send>,

View File

@ -373,14 +373,14 @@ impl DynamicIndexWriter {
);
}
let offset_le: [u8; 8] = offset.to_le().to_ne_bytes();
let offset_le: &[u8; 8] = unsafe { &std::mem::transmute::<u64, [u8; 8]>(offset.to_le()) };
if let Some(ref mut csum) = self.csum {
csum.update(&offset_le);
csum.update(offset_le);
csum.update(digest);
}
self.writer.write_all(&offset_le)?;
self.writer.write_all(offset_le)?;
self.writer.write_all(digest)?;
Ok(())
}

View File

@ -196,7 +196,7 @@ impl Iterator for ListNamespaces {
let ns_dirfd = match proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path) {
Ok(dirfd) => dirfd,
Err(nix::errno::Errno::ENOENT) => return None,
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => return None,
Err(err) => return Some(Err(err.into())),
};
// found a ns directory, descend into it to scan all it's namespaces

View File

@ -3,7 +3,7 @@ use std::path::PathBuf;
use anyhow::Error;
use pbs_api_types::KeepOptions;
use pbs_api_types::PruneOptions;
use super::BackupInfo;
@ -103,10 +103,81 @@ fn remove_incomplete_snapshots(mark: &mut HashMap<PathBuf, PruneMark>, list: &[B
}
}
/// This filters incomplete and kept backups.
pub fn keeps_something(options: &PruneOptions) -> bool {
let mut keep_something = false;
if let Some(count) = options.keep_last {
if count > 0 {
keep_something = true;
}
}
if let Some(count) = options.keep_hourly {
if count > 0 {
keep_something = true;
}
}
if let Some(count) = options.keep_daily {
if count > 0 {
keep_something = true;
}
}
if let Some(count) = options.keep_weekly {
if count > 0 {
keep_something = true;
}
}
if let Some(count) = options.keep_monthly {
if count > 0 {
keep_something = true;
}
}
if let Some(count) = options.keep_yearly {
if count > 0 {
keep_something = true;
}
}
keep_something
}
pub fn cli_options_string(options: &PruneOptions) -> String {
let mut opts = Vec::new();
if let Some(count) = options.keep_last {
if count > 0 {
opts.push(format!("--keep-last {}", count));
}
}
if let Some(count) = options.keep_hourly {
if count > 0 {
opts.push(format!("--keep-hourly {}", count));
}
}
if let Some(count) = options.keep_daily {
if count > 0 {
opts.push(format!("--keep-daily {}", count));
}
}
if let Some(count) = options.keep_weekly {
if count > 0 {
opts.push(format!("--keep-weekly {}", count));
}
}
if let Some(count) = options.keep_monthly {
if count > 0 {
opts.push(format!("--keep-monthly {}", count));
}
}
if let Some(count) = options.keep_yearly {
if count > 0 {
opts.push(format!("--keep-yearly {}", count));
}
}
opts.join(" ")
}
pub fn compute_prune_info(
mut list: Vec<BackupInfo>,
options: &KeepOptions,
options: &PruneOptions,
) -> Result<Vec<(BackupInfo, PruneMark)>, Error> {
let mut mark = HashMap::new();

View File

@ -8,7 +8,7 @@ use nix::dir::Dir;
use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{print_store_and_ns, BackupNamespace, Operation};
use pbs_api_types::{BackupNamespace, DatastoreWithNamespace, Operation};
use crate::backup_info::BackupDir;
use crate::dynamic_index::DynamicIndexReader;
@ -39,6 +39,10 @@ impl SnapshotReader {
pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
let datastore = snapshot.datastore();
let store_with_ns = DatastoreWithNamespace {
store: datastore.name().to_owned(),
ns: snapshot.backup_ns().clone(),
};
let snapshot_path = snapshot.full_path();
let locked_dir =
@ -50,7 +54,7 @@ impl SnapshotReader {
Err(err) => {
bail!(
"manifest load error on {}, snapshot '{}' - {}",
print_store_and_ns(datastore.name(), snapshot.backup_ns()),
store_with_ns,
snapshot.dir(),
err
);
@ -60,7 +64,8 @@ impl SnapshotReader {
let mut client_log_path = snapshot_path;
client_log_path.push(CLIENT_LOG_BLOB_NAME);
let mut file_list = vec![MANIFEST_BLOB_NAME.to_string()];
let mut file_list = Vec::new();
file_list.push(MANIFEST_BLOB_NAME.to_string());
for item in manifest.files() {
file_list.push(item.filename.clone());
}

View File

@ -10,10 +10,10 @@ anyhow = "1.0"
futures = "0.3"
lazy_static = "1.4"
libc = "0.2"
nix = "0.24"
nix = "0.19.1"
regex = "1.5"
tokio = { version = "1.6", features = [] }
proxmox-time = "1"
proxmox-fuse = "0.1.1"
proxmox-sys = "0.3"
proxmox-sys = "0.2"

View File

@ -330,7 +330,7 @@ fn unmap_from_backing(backing_file: &Path, loopdev: Option<&str>) -> Result<(),
// send SIGINT to trigger cleanup and exit in target process
match signal::kill(pid, Signal::SIGINT) {
Ok(()) => {}
Err(nix::errno::Errno::ESRCH) => {
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
emerg_cleanup(loopdev, backing_file.to_owned());
return Ok(());
}
@ -348,7 +348,7 @@ fn unmap_from_backing(backing_file: &Path, loopdev: Option<&str>) -> Result<(),
}
std::thread::sleep(std::time::Duration::from_millis(100));
}
Err(nix::errno::Errno::ESRCH) => {
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
break;
}
Err(e) => return Err(e.into()),

View File

@ -12,7 +12,7 @@ anyhow = "1.0"
thiserror = "1.0"
endian_trait = { version = "0.6", features = ["arrays"] }
hex = "0.4.3"
nix = "0.24"
nix = "0.19.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
bitflags = "1.2.1"
@ -28,7 +28,7 @@ proxmox-uuid = "1"
# router::cli is only used by binaries, so maybe we should split them out
proxmox-router = "1.2"
proxmox-sys = "0.3"
proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-config = { path = "../pbs-config" }

View File

@ -442,7 +442,7 @@ impl<'a, F: AsRawFd> SgRaw<'a, F> {
SCSI_PT_DO_TIMEOUT => return Err(format_err!("do_scsi_pt failed - timeout").into()),
code if code < 0 => {
let errno = unsafe { get_scsi_pt_os_err(ptvp.as_ptr()) };
let err = nix::errno::Errno::from_i32(errno);
let err = nix::Error::from_errno(nix::errno::Errno::from_i32(errno));
return Err(format_err!("do_scsi_pt failed with err {}", err).into());
}
unknown => {
@ -524,7 +524,7 @@ impl<'a, F: AsRawFd> SgRaw<'a, F> {
}
SCSI_PT_RESULT_OS_ERR => {
let errno = unsafe { get_scsi_pt_os_err(ptvp.as_ptr()) };
let err = nix::errno::Errno::from_i32(errno);
let err = nix::Error::from_errno(nix::errno::Errno::from_i32(errno));
return Err(format_err!("scsi command failed with err {}", err).into());
}
unknown => {

View File

@ -19,7 +19,7 @@ hex = "0.4.3"
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.24"
nix = "0.19.1"
nom = "5.1"
openssl = "0.10"
percent-encoding = "2.1"
@ -38,7 +38,7 @@ proxmox-borrow = "1"
proxmox-io = { version = "1", features = [ "tokio" ] }
proxmox-lang = { version = "1.1" }
proxmox-time = { version = "1" }
proxmox-sys = "0.3"
proxmox-sys = "0.2"
pbs-buildcfg = { path = "../pbs-buildcfg" }
pbs-api-types = { path = "../pbs-api-types" }

View File

@ -19,7 +19,7 @@ pub fn render_backup_file_list<S: Borrow<str>>(files: &[S]) -> String {
.map(|v| strip_server_file_extension(v.borrow()))
.collect();
files.sort_unstable();
files.sort();
files.join(" ")
}

View File

@ -5,5 +5,4 @@ authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
[dependencies]
anyhow = "1"
nix = "0.24"
nix = "0.19.1"

View File

@ -1,29 +1,12 @@
use anyhow::{format_err, Error};
use std::fmt::Write;
use std::fs;
use std::net::ToSocketAddrs;
use std::os::unix::prelude::OsStrExt;
use nix::sys::utsname::uname;
fn nodename() -> Result<String, Error> {
let uname = uname().map_err(|err| format_err!("uname() failed - {err}"))?; // save on stack to avoid to_owned() allocation below
std::str::from_utf8(uname.nodename().as_bytes())?
.split('.')
.next()
.ok_or_else(|| format_err!("Failed to split FQDN to get hostname"))
.map(|s| s.to_owned())
}
fn main() {
let nodename = match nodename() {
Ok(value) => value,
Err(err) => {
eprintln!("Failed to retrieve hostname: {err}");
"INVALID".to_string()
}
};
let uname = uname(); // save on stack to avoid to_owned() allocation below
let nodename = uname.nodename().split('.').next().unwrap();
let addr = format!("{}:8007", nodename);

View File

@ -9,13 +9,13 @@ anyhow = "1.0"
futures = "0.3"
hyper = { version = "0.14", features = [ "full" ] }
libc = "0.2"
nix = "0.24"
nix = "0.19.1"
openssl = "0.10"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.6", features = [ "rt", "rt-multi-thread" ] }
tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "codec", "io" ] }
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
xdg = "2.2"
zstd = { version = "0.6", features = [ "bindgen" ] }
@ -27,7 +27,7 @@ proxmox-io = "1.0.1"
proxmox-router = { version = "1.2", features = [ "cli" ] }
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] }
proxmox-time = "1"
proxmox-sys = { version = "0.3", features = [ "sortable-macro" ] }
proxmox-sys = { version = "0.2.1", features = [ "sortable-macro" ] }
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }

View File

@ -163,7 +163,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let path = required_string_param(&param, "snapshot")?;
let archive_name = required_string_param(&param, "archive-name")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?;
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
let crypto = crypto_parameters(&param)?;

View File

@ -24,7 +24,7 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{
Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode,
Fingerprint, GroupListItem, HumanByte, PruneJobOptions, PruneListItem, RateLimitConfig,
Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions, RateLimitConfig,
SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
};
@ -176,7 +176,7 @@ pub async fn dir_or_last_from_group(
match path.parse::<BackupPart>()? {
BackupPart::Dir(dir) => Ok(dir),
BackupPart::Group(group) => {
api_datastore_latest_snapshot(client, repo.store(), ns, group).await
api_datastore_latest_snapshot(&client, repo.store(), ns, group).await
}
}
}
@ -1245,7 +1245,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
let ns = optional_ns_param(&param)?;
let path = json::required_string_param(&param, "snapshot")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &ns, path).await?;
let backup_dir = dir_or_last_from_group(&client, &repo, &ns, &path).await?;
let target = json::required_string_param(&param, "target")?;
let target = if target == "-" { None } else { Some(target) };
@ -1417,8 +1417,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
type: String,
description: "Backup group",
},
ns: {
type: BackupNamespace,
optional: true,
},
"prune-options": {
type: PruneJobOptions,
type: PruneOptions,
flatten: true,
},
"output-format": {
@ -1442,11 +1446,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
async fn prune(
dry_run: Option<bool>,
group: String,
prune_options: PruneJobOptions,
prune_options: PruneOptions,
quiet: bool,
mut param: Value,
) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let ns = optional_ns_param(&param)?;
let client = connect(&repo)?;
@ -1461,6 +1466,9 @@ async fn prune(
api_param["dry-run"] = dry_run.into();
}
merge_group_into(api_param.as_object_mut().unwrap(), group);
if !ns.is_root() {
api_param["ns"] = serde_json::to_value(ns)?;
}
let mut result = client.post(&path, Some(api_param)).await?;

View File

@ -205,7 +205,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?;
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
let crypt_config = match keyfile {

View File

@ -9,7 +9,7 @@ anyhow = "1.0"
base64 = "0.13"
futures = "0.3"
libc = "0.2"
nix = "0.24"
nix = "0.19.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.6", features = [ "io-std", "rt", "rt-multi-thread", "time" ] }
@ -23,7 +23,7 @@ proxmox-router = { version = "1.2", features = [ "cli" ] }
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] }
proxmox-time = "1"
proxmox-uuid = "1"
proxmox-sys = "0.3"
proxmox-sys = "0.2"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }

View File

@ -204,6 +204,7 @@ pub fn complete_block_driver_ids<S: BuildHasher>(
ALL_DRIVERS
.iter()
.map(BlockDriverType::resolve)
.flat_map(|d| d.list())
.map(|d| d.list())
.flatten()
.collect()
}

View File

@ -19,7 +19,7 @@ hyper = { version = "0.14.5", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.24"
nix = "0.19.1"
once_cell = "1.3.1"
percent-encoding = "2.1"
regex = "1.5"
@ -40,4 +40,4 @@ proxmox-http = { version = "0.6", features = [ "client" ] }
proxmox-router = "1.2"
proxmox-schema = { version = "1.3.1", features = [ "api-macro", "upid-api-impl" ] }
proxmox-time = "1"
proxmox-sys = { version = "0.3", features = [ "logrotate" ] }
proxmox-sys = "0.2"

View File

@ -262,11 +262,13 @@ pub fn rotate_task_log_archive(
}
}
}
} else if let Err(err) = std::fs::remove_file(&file_name) {
} else {
if let Err(err) = std::fs::remove_file(&file_name) {
log::error!("could not remove {:?}: {}", file_name, err);
}
}
}
}
Ok(rotated)
}
@ -964,7 +966,7 @@ impl WorkerTask {
/// Set progress indicator
pub fn progress(&self, progress: f64) {
if (0.0..=1.0).contains(&progress) {
if progress >= 0.0 && progress <= 1.0 {
let mut data = self.data.lock().unwrap();
data.progress = progress;
} else {

View File

@ -15,13 +15,13 @@ hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.24"
nix = "0.19.1"
regex = "1.5"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.6", features = ["parking_lot", "sync"] }
tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "codec", "io" ] }
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
@ -31,7 +31,7 @@ proxmox-compression = "0.1.1"
proxmox-router = { version = "1.2", features = [ "cli" ] }
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] }
proxmox-time = "1"
proxmox-sys = { version = "0.3", features = [ "sortable-macro" ] }
proxmox-sys = { version = "0.2", features = [ "sortable-macro" ] }
pbs-api-types = { path = "../pbs-api-types" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -171,7 +171,7 @@ fn get_vsock_fd() -> Result<RawFd, Error> {
None,
)?;
let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
bind(sock_fd, &sock_addr)?;
bind(sock_fd, &SockAddr::Vsock(sock_addr))?;
listen(sock_fd, MAX_PENDING)?;
Ok(sock_fd)
}

View File

@ -107,14 +107,14 @@ impl Bucket {
Bucket::RawFs(_) => ty == "raw",
Bucket::ZPool(data) => {
if let Some(ref comp) = comp.get(0) {
ty == "zpool" && comp.as_ref() == data.name
ty == "zpool" && comp.as_ref() == &data.name
} else {
false
}
}
Bucket::LVM(data) => {
if let (Some(ref vg), Some(ref lv)) = (comp.get(0), comp.get(1)) {
ty == "lvm" && vg.as_ref() == data.vg_name && lv.as_ref() == data.lv_name
ty == "lvm" && vg.as_ref() == &data.vg_name && lv.as_ref() == &data.lv_name
} else {
false
}
@ -336,8 +336,8 @@ impl Filesystems {
info!("mounting '{}' succeeded, fstype: '{}'", source, fs);
return Ok(());
}
Err(nix::errno::Errno::EINVAL) => {}
Err(nix::errno::Errno::EBUSY) => return Ok(()),
Err(nix::Error::Sys(nix::errno::Errno::EINVAL)) => {}
Err(nix::Error::Sys(nix::errno::Errno::EBUSY)) => return Ok(()),
Err(err) => {
warn!("mount error on '{}' ({}) - {}", source, fs, err);
}

View File

@ -14,7 +14,7 @@ bitflags = "1.2.1"
crossbeam-channel = "0.5"
libc = "0.2"
log = "0.4.17"
nix = "0.24"
nix = "0.19.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_cbor = "0.11.1"
@ -22,4 +22,4 @@ serde_cbor = "0.11.1"
#proxmox = { version = "0.15.3" }
proxmox-time = "1"
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] }
proxmox-sys = "0.3"
proxmox-sys = "0.2"

View File

@ -11,7 +11,7 @@ path = "src/main.rs"
[dependencies]
anyhow = "1.0"
futures = "0.3"
nix = "0.24"
nix = "0.19.1"
serde_json = "1.0"
tokio = { version = "1.6", features = [ "rt", "rt-multi-thread" ] }
@ -20,7 +20,7 @@ pathpatterns = "0.1.2"
proxmox-async = "0.4"
proxmox-schema = { version = "1.3.1", features = [ "api-macro" ] }
proxmox-router = "1.2"
proxmox-sys = "0.3"
proxmox-sys = "0.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pbs-client = { path = "../pbs-client" }

View File

@ -147,7 +147,7 @@ fn extract_archive(
feature_flags.remove(Flags::WITH_SOCKETS);
}
let pattern = pattern.unwrap_or_default();
let pattern = pattern.unwrap_or_else(Vec::new);
let target = target.as_ref().map_or_else(|| ".", String::as_str);
let mut match_list = Vec::new();
@ -297,7 +297,7 @@ async fn create_archive(
entries_max: isize,
) -> Result<(), Error> {
let patterns = {
let input = exclude.unwrap_or_default();
let input = exclude.unwrap_or_else(Vec::new);
let mut patterns = Vec::with_capacity(input.len());
for entry in input {
patterns.push(

View File

@ -161,7 +161,7 @@ impl AcmeClient {
let mut data = Vec::<u8>::new();
self.write_to(&mut data)?;
let account_path = self.account_path.as_ref().ok_or_else(|| {
format_err!("no account path set, cannot save updated account information")
format_err!("no account path set, cannot save upated account information")
})?;
crate::config::acme::make_acme_account_dir()?;
replace_file(

View File

@ -32,9 +32,9 @@ use pxar::accessor::aio::Accessor;
use pxar::EntryKind;
use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
print_ns_and_snapshot, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode,
DataStoreListItem, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus,
GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
@ -63,8 +63,8 @@ use proxmox_rest_server::{formatter, WorkerTask};
use crate::api2::backup::optional_ns_param;
use crate::api2::node::rrd::create_value_from_rrd;
use crate::backup::{
check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
ListAccessibleBackupGroups, NS_PRIVS_OK,
verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
ListAccessibleBackupGroups,
};
use crate::server::jobstate::Job;
@ -81,6 +81,38 @@ fn get_group_note_path(
note_path
}
// TODO: move somewhere we can reuse it from (namespace has its own copy atm.)
fn get_ns_privs(store: &str, ns: &BackupNamespace, auth_id: &Authid) -> Result<u64, Error> {
let user_info = CachedUserInfo::new()?;
Ok(if ns.is_root() {
user_info.lookup_privs(auth_id, &["datastore", store])
} else {
user_info.lookup_privs(auth_id, &["datastore", store, &ns.to_string()])
})
}
// asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled,
// returning value indicates whether further checks like group ownerships are required
fn check_ns_privs(
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
full_access_privs: u64,
partial_access_privs: u64,
) -> Result<bool, Error> {
let privs = get_ns_privs(store, ns, auth_id)?;
if full_access_privs != 0 && (privs & full_access_privs) != 0 {
return Ok(false);
}
if partial_access_privs != 0 && (privs & partial_access_privs) != 0 {
return Ok(true);
}
proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
}
// helper to unify common sequence of checks:
// 1. check privs on NS (full or limited access)
// 2. load datastore
@ -94,12 +126,12 @@ fn check_privs_and_load_store(
operation: Option<Operation>,
backup_group: &pbs_api_types::BackupGroup,
) -> Result<Arc<DataStore>, Error> {
let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
let limited = check_ns_privs(store, ns, auth_id, full_access_privs, partial_access_privs)?;
let datastore = DataStore::lookup_datastore(store, operation)?;
let datastore = DataStore::lookup_datastore(&store, operation)?;
if limited {
let owner = datastore.get_owner(ns, backup_group)?;
let owner = datastore.get_owner(&ns, backup_group)?;
check_backup_owner(&owner, &auth_id)?;
}
@ -182,9 +214,9 @@ pub fn list_groups(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let list_all = !check_ns_privs_full(
let ns = ns.unwrap_or_default();
let list_all = !check_ns_privs(
&store,
&ns,
&auth_id,
@ -193,6 +225,10 @@ pub fn list_groups(
)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: ns.clone(),
};
datastore
.iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
@ -205,7 +241,7 @@ pub fn list_groups(
eprintln!(
"Failed to get owner of group '{}' in {} - {}",
group.group(),
print_store_and_ns(&store, &ns),
store_with_ns,
err
);
return Ok(group_info);
@ -281,6 +317,7 @@ pub fn delete_group(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
@ -330,6 +367,7 @@ pub fn list_snapshot_files(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
@ -380,8 +418,8 @@ pub fn delete_snapshot(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store,
&ns,
@ -439,7 +477,7 @@ pub fn list_snapshots(
let ns = ns.unwrap_or_default();
let list_all = !check_ns_privs_full(
let list_all = !check_ns_privs(
&store,
&ns,
&auth_id,
@ -448,25 +486,29 @@ pub fn list_snapshots(
)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: ns.clone(),
};
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
// backup group and provide an error free (Err -> None) accessor
let groups = match (backup_type, backup_id) {
(Some(backup_type), Some(backup_id)) => {
vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
vec![datastore.backup_group_from_parts(ns, backup_type, backup_id)]
}
// FIXME: Recursion
(Some(backup_type), None) => datastore
.iter_backup_groups_ok(ns.clone())?
.iter_backup_groups_ok(ns)?
.filter(|group| group.backup_type() == backup_type)
.collect(),
// FIXME: Recursion
(None, Some(backup_id)) => datastore
.iter_backup_groups_ok(ns.clone())?
.iter_backup_groups_ok(ns)?
.filter(|group| group.backup_id() == backup_id)
.collect(),
// FIXME: Recursion
(None, None) => datastore.list_backup_groups(ns.clone())?,
(None, None) => datastore.list_backup_groups(ns)?,
};
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
@ -547,8 +589,8 @@ pub fn list_snapshots(
Err(err) => {
eprintln!(
"Failed to get owner of group '{}' in {} - {}",
&store_with_ns,
group.group(),
print_store_and_ns(&store, &ns),
err
);
return Ok(snapshots);
@ -573,22 +615,16 @@ pub fn list_snapshots(
fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result<Counts, Error> {
let root_ns = Default::default();
ListAccessibleBackupGroups::new_with_privs(
store,
root_ns,
MAX_NAMESPACE_DEPTH,
Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
None,
owner,
)?
.try_fold(Counts::default(), |mut counts, group| {
ListAccessibleBackupGroups::new(store, root_ns, MAX_NAMESPACE_DEPTH, owner)?.try_fold(
Counts::default(),
|mut counts, group| {
let group = match group {
Ok(group) => group,
Err(_) => return Ok(counts), // TODO: add this as error counts?
};
let snapshot_count = group.list_backups()?.len() as u64;
// only include groups with snapshots, counting/displaying empty groups can confuse
// only include groups with snapshots, counting/displaying emtpy groups can confuse
if snapshot_count > 0 {
let type_count = match group.backup_type() {
BackupType::Ct => counts.ct.get_or_insert(Default::default()),
@ -601,7 +637,8 @@ fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result
}
Ok(counts)
})
},
)
}
#[api(
@ -623,9 +660,8 @@ fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result
type: DataStoreStatus,
},
access: {
permission: &Permission::Anybody,
description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
the full statistics. Counts of accessible groups are always returned, if any",
permission: &Permission::Privilege(
&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
},
)]
/// Get datastore status.
@ -635,26 +671,13 @@ pub fn status(
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<DataStoreStatus, Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
let (counts, gc_status) = if verbose {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
true
} else if store_privs & PRIV_DATASTORE_READ != 0 {
false // allow at least counts, user can read groups anyway..
} else {
match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
// avoid leaking existence info if users hasn't at least any priv. below
Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
_ => false,
}
};
let datastore = datastore?; // only unwrap no to avoid leaking existence info
let (counts, gc_status) = if verbose {
let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
None
} else {
@ -662,34 +685,19 @@ pub fn status(
};
let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
let gc_status = if store_stats {
Some(datastore.last_gc_status())
} else {
None
};
let gc_status = Some(datastore.last_gc_status());
(counts, gc_status)
} else {
(None, None)
};
Ok(if store_stats {
let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
DataStoreStatus {
Ok(DataStoreStatus {
total: storage.total,
used: storage.used,
avail: storage.avail,
gc_status,
counts,
}
} else {
DataStoreStatus {
total: 0,
used: 0,
avail: 0,
gc_status,
counts,
}
})
}
@ -755,8 +763,7 @@ pub fn verify(
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let owner_check_required = check_ns_privs_full(
let owner_check_required = check_ns_privs(
&store,
&ns,
&auth_id,
@ -814,9 +821,9 @@ pub fn verify(
}
(None, None, None) => {
worker_id = if ns.is_root() {
store
store.clone()
} else {
format!("{}:{}", store, ns.display_as_path())
format!("{store}:{}", ns.display_as_path())
};
}
_ => bail!("parameters do not specify a backup group or snapshot"),
@ -887,6 +894,10 @@ pub fn verify(
#[api(
input: {
properties: {
ns: {
type: BackupNamespace,
optional: true,
},
group: {
type: pbs_api_types::BackupGroup,
flatten: true,
@ -897,17 +908,13 @@ pub fn verify(
default: false,
description: "Just show what prune would do, but do not delete anything.",
},
"keep-options": {
type: KeepOptions,
"prune-options": {
type: PruneOptions,
flatten: true,
},
store: {
schema: DATASTORE_SCHEMA,
},
ns: {
type: BackupNamespace,
optional: true,
},
},
},
returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
@ -919,11 +926,11 @@ pub fn verify(
)]
/// Prune a group on the datastore
pub fn prune(
ns: Option<BackupNamespace>,
group: pbs_api_types::BackupGroup,
dry_run: bool,
keep_options: KeepOptions,
prune_options: PruneOptions,
store: String,
ns: Option<BackupNamespace>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
@ -938,19 +945,23 @@ pub fn prune(
Some(Operation::Write),
&group,
)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: ns.clone(),
};
let worker_id = format!("{}:{}:{}", store, ns, group);
let group = datastore.backup_group(ns.clone(), group);
let group = datastore.backup_group(ns, group);
let mut prune_result = Vec::new();
let list = group.list_backups()?;
let mut prune_info = compute_prune_info(list, &keep_options)?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
prune_info.reverse(); // delete older snapshots first
let keep_all = !keep_options.keeps_something();
let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
if dry_run {
for (info, mark) in prune_info {
@ -978,17 +989,15 @@ pub fn prune(
if keep_all {
task_log!(worker, "No prune selection - keeping all files.");
} else {
let mut opts = Vec::new();
if !ns.is_root() {
opts.push(format!("--ns {ns}"));
}
crate::server::cli_keep_options(&mut opts, &keep_options);
task_log!(worker, "retention options: {}", opts.join(" "));
task_log!(
worker,
"retention options: {}",
pbs_datastore::prune::cli_options_string(&prune_options)
);
task_log!(
worker,
"Starting prune on {} group \"{}\"",
print_store_and_ns(&store, &ns),
store_with_ns,
group.group(),
);
}
@ -1039,54 +1048,52 @@ pub fn prune(
description: "Just show what prune would do, but do not delete anything.",
},
"prune-options": {
type: PruneJobOptions,
type: PruneOptions,
flatten: true,
},
store: {
schema: DATASTORE_SCHEMA,
},
ns: {
type: BackupNamespace,
optional: true,
},
},
},
returns: {
schema: UPID_SCHEMA,
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
permission: &Permission::Privilege(
&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
},
)]
/// Prune the datastore
pub fn prune_datastore(
dry_run: bool,
prune_options: PruneJobOptions,
prune_options: PruneOptions,
store: String,
ns: Option<BackupNamespace>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let user_info = CachedUserInfo::new()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
user_info.check_privs(
&auth_id,
&prune_options.acl_path(&store),
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
true,
)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let ns = prune_options.ns.clone().unwrap_or_default();
let ns = ns.unwrap_or_default();
let worker_id = format!("{}:{}", store, ns);
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
// FIXME: add max-depth
let upid_str = WorkerTask::new_thread(
"prune",
Some(worker_id),
auth_id.to_string(),
to_stdout,
move |worker| {
crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
crate::server::prune_datastore(worker, auth_id, prune_options, datastore, ns, dry_run)
},
)?;
@ -1163,6 +1170,24 @@ pub fn garbage_collection_status(
Ok(status)
}
fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &CachedUserInfo) -> bool {
// NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
// below /datastore/{store}" helper
let mut iter =
if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
iter
} else {
return false;
};
let wanted =
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
let name = store.name();
iter.any(|ns| -> bool {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]);
user_privs & wanted != 0
})
}
#[api(
returns: {
description: "List the accessible datastores.",
@ -1187,14 +1212,15 @@ pub fn get_datastore_list(
let mut list = Vec::new();
for (store, (_, data)) in &config.sections {
let acl_path = &["datastore", store];
let user_privs = user_info.lookup_privs(&auth_id, acl_path);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
let mut allow_id = false;
if !allowed {
if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
allow_id = any_privs;
let scfg: pbs_api_types::DataStoreConfig = serde_json::from_value(data.to_owned())?;
// safety: we just cannot go through lookup as we must avoid an operation check
if let Ok(datastore) = unsafe { DataStore::open_from_config(scfg, None) } {
allow_id = can_access_any_ns(datastore, &auth_id, &user_info);
}
}
@ -1249,6 +1275,10 @@ pub fn download_file(
let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store,
@ -1264,10 +1294,7 @@ pub fn download_file(
println!(
"Download {} from {} ({}/{})",
file_name,
print_store_and_ns(&store, &backup_ns),
backup_dir,
file_name
file_name, store_with_ns, backup_dir, file_name
);
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
@ -1333,7 +1360,10 @@ pub fn download_file_decoded(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store,
@ -1346,7 +1376,7 @@ pub fn download_file_decoded(
)?;
let file_name = required_string_param(&param, "file-name")?.to_owned();
let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
let (manifest, files) = read_backup_index(&backup_dir)?;
for file in files {
@ -1357,10 +1387,7 @@ pub fn download_file_decoded(
println!(
"Download {} from {} ({}/{})",
file_name,
print_store_and_ns(&store, &backup_ns),
backup_dir_api,
file_name
file_name, store_with_ns, backup_dir_api, file_name
);
let mut path = datastore.base_path();
@ -1463,7 +1490,10 @@ pub fn upload_backup_log(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
@ -1475,7 +1505,7 @@ pub fn upload_backup_log(
Some(Operation::Write),
&backup_dir_api.group,
)?;
let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
let file_name = CLIENT_LOG_BLOB_NAME;
@ -1486,10 +1516,7 @@ pub fn upload_backup_log(
bail!("backup already contains a log.");
}
println!(
"Upload backup log to {} {backup_dir_api}/{file_name}",
print_store_and_ns(&store, &backup_ns),
);
println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
let data = req_body
.map_err(Error::from)
@ -1544,7 +1571,6 @@ pub fn catalog(
) -> Result<Vec<ArchiveEntry>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store,
&ns,
@ -1624,7 +1650,6 @@ pub fn pxar_file_download(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?;
let ns = optional_ns_param(&param)?;
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store,
@ -1832,7 +1857,6 @@ pub fn get_group_notes(
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store,
&ns,
@ -1880,7 +1904,6 @@ pub fn set_group_notes(
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store,
&ns,
@ -1926,7 +1949,6 @@ pub fn get_notes(
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store,
&ns,
@ -1979,7 +2001,6 @@ pub fn set_notes(
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store,
&ns,
@ -2126,7 +2147,7 @@ pub fn set_backup_owner(
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let owner_check_required = check_ns_privs_full(
let owner_check_required = check_ns_privs(
&store,
&ns,
&auth_id,

View File

@ -2,23 +2,19 @@
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{Router, SubdirMap};
use proxmox_sys::sortable;
pub mod datastore;
pub mod namespace;
pub mod prune;
pub mod sync;
pub mod traffic_control;
pub mod verify;
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
const SUBDIRS: SubdirMap = &[
("datastore", &datastore::ROUTER),
("prune", &prune::ROUTER),
("sync", &sync::ROUTER),
("traffic-control", &traffic_control::ROUTER),
("verify", &verify::ROUTER),
]);
];
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))

View File

@ -7,12 +7,21 @@ use proxmox_schema::*;
use pbs_api_types::{
Authid, BackupNamespace, NamespaceListItem, Operation, DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA,
PROXMOX_SAFE_ID_FORMAT,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PROXMOX_SAFE_ID_FORMAT,
};
use pbs_datastore::DataStore;
use crate::backup::{check_ns_modification_privs, check_ns_privs, NS_PRIVS_OK};
// TODO: move somewhere we can reuse it from (datastore has its own copy atm.)
fn get_ns_privs(store: &str, ns: &BackupNamespace, auth_id: &Authid) -> Result<u64, Error> {
let user_info = CachedUserInfo::new()?;
Ok(if ns.is_root() {
user_info.lookup_privs(auth_id, &["datastore", store])
} else {
user_info.lookup_privs(auth_id, &["datastore", store, &ns.to_string()])
})
}
#[api(
input: {
@ -50,10 +59,9 @@ pub fn create_namespace(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let parent = parent.unwrap_or_default();
let mut ns = parent.clone();
ns.push(name.clone())?;
check_ns_modification_privs(&store, &ns, &auth_id)?;
if get_ns_privs(&store, &parent, &auth_id)? & PRIV_DATASTORE_MODIFY == 0 {
proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
}
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
@ -94,34 +102,29 @@ pub fn list_namespaces(
) -> Result<Vec<NamespaceListItem>, Error> {
let parent = parent.unwrap_or_default();
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
const PRIVS_OK: u64 = PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT;
// first do a base check to avoid leaking if a NS exists or not
if get_ns_privs(&store, &parent, &auth_id)? & PRIVS_OK == 0 {
proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
}
let user_info = CachedUserInfo::new()?;
// get result up-front to avoid cloning NS, it's relatively cheap anyway (no IO normally)
let parent_access = check_ns_privs(&store, &parent, &auth_id, NS_PRIVS_OK);
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let iter = match datastore.recursive_iter_backup_ns_ok(parent, max_depth) {
Ok(iter) => iter,
// parent NS doesn't exists and user has no privs on it, avoid info leakage.
Err(_) if parent_access.is_err() => http_bail!(FORBIDDEN, "permission check failed"),
Err(err) => return Err(err),
};
let ns_to_item =
|ns: BackupNamespace| -> NamespaceListItem { NamespaceListItem { ns, comment: None } };
let namespace_list: Vec<NamespaceListItem> = iter
Ok(datastore
.recursive_iter_backup_ns_ok(parent, max_depth)?
.filter(|ns| {
let privs = user_info.lookup_privs(&auth_id, &ns.acl_path(&store));
privs & NS_PRIVS_OK != 0
if ns.is_root() {
return true; // already covered by access permission above
}
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store, &ns.to_string()]);
privs & PRIVS_OK != 0
})
.map(ns_to_item)
.collect();
if namespace_list.is_empty() && parent_access.is_err() {
http_bail!(FORBIDDEN, "permission check failed"); // avoid leakage
}
Ok(namespace_list)
.collect())
}
#[api(
@ -133,7 +136,7 @@ pub fn list_namespaces(
},
"delete-groups": {
type: bool,
description: "If set, all groups will be destroyed in the whole hierarchy below and\
description: "If set, all groups will be destroyed in the whole hierachy below and\
including `ns`. If not set, only empty namespaces will be pruned.",
optional: true,
default: false,
@ -152,9 +155,15 @@ pub fn delete_namespace(
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
// we could allow it as easy purge-whole datastore, but lets be more restrictive for now
if ns.is_root() {
bail!("cannot delete root namespace!");
};
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
check_ns_modification_privs(&store, &ns, &auth_id)?;
let parent = ns.parent(); // must have MODIFY permission on parent to allow deletion
if get_ns_privs(&store, &parent, &auth_id)? & PRIV_DATASTORE_MODIFY == 0 {
http_bail!(FORBIDDEN, "permission check failed");
}
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;

View File

@ -1,138 +0,0 @@
//! Datastore Prune Job Management
use anyhow::{format_err, Error};
use serde_json::Value;
use proxmox_router::{
list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::api;
use proxmox_sys::sortable;
use pbs_api_types::{
Authid, PruneJobConfig, PruneJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_MODIFY,
};
use pbs_config::prune;
use pbs_config::CachedUserInfo;
use crate::server::{
do_prune_job,
jobstate::{compute_schedule_status, Job, JobState},
};
#[api(
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
optional: true,
},
},
},
returns: {
description: "List configured jobs and their status (filtered by access)",
type: Array,
items: { type: PruneJobStatus },
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Modify on datastore.",
},
)]
/// List all prune jobs
pub fn list_prune_jobs(
store: Option<String>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<PruneJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY;
let (config, digest) = prune::config()?;
let job_config_iter =
config
.convert_to_typed_array("prune")?
.into_iter()
.filter(|job: &PruneJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
if privs & required_privs == 0 {
return false;
}
if let Some(store) = &store {
&job.store == store
} else {
true
}
});
let mut list = Vec::new();
for job in job_config_iter {
let last_state = JobState::load("prunejob", &job.id)
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
let mut status = compute_schedule_status(&last_state, Some(&job.schedule))?;
if job.disable {
status.next_run = None;
}
list.push(PruneJobStatus {
config: job,
status,
});
}
rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
}
}
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Modify on job's datastore.",
},
)]
/// Runs a prune job manually.
pub fn run_prune_job(
id: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, _digest) = prune::config()?;
let prune_job: PruneJobConfig = config.lookup("prune", &id)?;
user_info.check_privs(&auth_id, &prune_job.acl_path(), PRIV_DATASTORE_MODIFY, true)?;
let job = Job::new("prunejob", &id)?;
let upid_str = do_prune_job(job, prune_job.options, prune_job.store, &auth_id, None)?;
Ok(upid_str)
}
#[sortable]
const PRUNE_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_PRUNE_JOB))];
const PRUNE_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(PRUNE_INFO_SUBDIRS))
.subdirs(PRUNE_INFO_SUBDIRS);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_PRUNE_JOBS)
.match_all("id", &PRUNE_INFO_ROUTER);

View File

@ -58,7 +58,7 @@ pub fn list_verification_jobs(
.convert_to_typed_array("verification")?
.into_iter()
.filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
if privs & required_privs == 0 {
return false;
}
@ -116,7 +116,7 @@ pub fn run_verification_job(
user_info.check_privs(
&auth_id,
&verification_job.acl_path(),
&["datastore", &verification_job.store],
PRIV_DATASTORE_VERIFY,
true,
)?;

View File

@ -9,7 +9,7 @@ use hyper::{Body, Request, Response, StatusCode};
use serde::Deserialize;
use serde_json::{json, Value};
use proxmox_router::{http_err, list_subdirs_api_method};
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{
ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
};
@ -85,14 +85,14 @@ fn upgrade_to_backup_protocol(
let user_info = CachedUserInfo::new()?;
user_info
.check_privs(
&auth_id,
&backup_ns.acl_path(&store),
PRIV_DATASTORE_BACKUP,
false,
)
.map_err(|err| http_err!(FORBIDDEN, "{err}"))?;
let privs = if backup_ns.is_root() {
user_info.lookup_privs(&auth_id, &["datastore", &store])
} else {
user_info.lookup_privs(&auth_id, &["datastore", &store, &backup_ns.to_string()])
};
if privs & PRIV_DATASTORE_BACKUP == 0 {
proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
}
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
@ -117,7 +117,6 @@ fn upgrade_to_backup_protocol(
proxmox_router::http_bail!(NOT_FOUND, "namespace not found");
}
// FIXME: include namespace here?
let worker_id = format!("{}:{}/{}", store, backup_dir_arg.ty(), backup_dir_arg.id());
let env_type = rpcenv.env_type();

View File

@ -121,7 +121,7 @@ pub fn update_webauthn_config(
} else {
let rp = webauthn
.rp
.ok_or_else(|| format_err!("missing property: 'rp'"))?;
.ok_or_else(|| format_err!("missing proeprty: 'rp'"))?;
let origin = webauthn.origin;
let id = webauthn
.id

View File

@ -251,22 +251,22 @@ pub fn update_datastore(
data.prune_schedule = None;
}
DeletableProperty::keep_last => {
data.keep.keep_last = None;
data.keep_last = None;
}
DeletableProperty::keep_hourly => {
data.keep.keep_hourly = None;
data.keep_hourly = None;
}
DeletableProperty::keep_daily => {
data.keep.keep_daily = None;
data.keep_daily = None;
}
DeletableProperty::keep_weekly => {
data.keep.keep_weekly = None;
data.keep_weekly = None;
}
DeletableProperty::keep_monthly => {
data.keep.keep_monthly = None;
data.keep_monthly = None;
}
DeletableProperty::keep_yearly => {
data.keep.keep_yearly = None;
data.keep_yearly = None;
}
DeletableProperty::verify_new => {
data.verify_new = None;
@ -302,26 +302,29 @@ pub fn update_datastore(
data.gc_schedule = update.gc_schedule;
}
macro_rules! prune_disabled {
($(($param:literal, $($member:tt)+)),+) => {
$(
if update.$($member)+.is_some() {
param_bail!(
$param,
"datastore prune settings have been replaced by prune jobs",
);
let mut prune_schedule_changed = false;
if update.prune_schedule.is_some() {
prune_schedule_changed = data.prune_schedule != update.prune_schedule;
data.prune_schedule = update.prune_schedule;
}
)+
};
if update.keep_last.is_some() {
data.keep_last = update.keep_last;
}
prune_disabled! {
("keep-last", keep.keep_last),
("keep-hourly", keep.keep_hourly),
("keep-daily", keep.keep_daily),
("keep-weekly", keep.keep_weekly),
("keep-monthly", keep.keep_monthly),
("keep-yearly", keep.keep_yearly),
("prune-schedule", prune_schedule)
if update.keep_hourly.is_some() {
data.keep_hourly = update.keep_hourly;
}
if update.keep_daily.is_some() {
data.keep_daily = update.keep_daily;
}
if update.keep_weekly.is_some() {
data.keep_weekly = update.keep_weekly;
}
if update.keep_monthly.is_some() {
data.keep_monthly = update.keep_monthly;
}
if update.keep_yearly.is_some() {
data.keep_yearly = update.keep_yearly;
}
if let Some(notify_str) = update.notify {
@ -364,6 +367,10 @@ pub fn update_datastore(
jobstate::update_job_last_run_time("garbage_collection", &name)?;
}
if prune_schedule_changed {
jobstate::update_job_last_run_time("prune", &name)?;
}
Ok(())
}

View File

@ -2,7 +2,6 @@
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{Router, SubdirMap};
use proxmox_sys::sortable;
pub mod access;
pub mod acme;
@ -10,7 +9,6 @@ pub mod changer;
pub mod datastore;
pub mod drive;
pub mod media_pool;
pub mod prune;
pub mod remote;
pub mod sync;
pub mod tape_backup_job;
@ -18,22 +16,20 @@ pub mod tape_encryption_keys;
pub mod traffic_control;
pub mod verify;
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
const SUBDIRS: SubdirMap = &[
("access", &access::ROUTER),
("acme", &acme::ROUTER),
("changer", &changer::ROUTER),
("datastore", &datastore::ROUTER),
("drive", &drive::ROUTER),
("media-pool", &media_pool::ROUTER),
("prune", &prune::ROUTER),
("remote", &remote::ROUTER),
("sync", &sync::ROUTER),
("tape-backup-job", &tape_backup_job::ROUTER),
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
("traffic-control", &traffic_control::ROUTER),
("verify", &verify::ROUTER),
]);
];
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))

View File

@ -1,378 +0,0 @@
use anyhow::Error;
use hex::FromHex;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, PruneJobConfig, PruneJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::prune;
use pbs_config::CachedUserInfo;
#[api(
input: {
properties: {},
},
returns: {
description: "List configured prune schedules.",
type: Array,
items: { type: PruneJobConfig },
},
access: {
permission: &Permission::Anybody,
// FIXME: Audit on namespaces
description: "Requires Datastore.Audit.",
},
)]
/// List all scheduled prune jobs.
pub fn list_prune_jobs(
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<PruneJobConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY;
let (config, digest) = prune::config()?;
let list = config.convert_to_typed_array("prune")?;
let list = list
.into_iter()
.filter(|job: &PruneJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
privs & required_privs != 00
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
#[api(
protected: true,
input: {
properties: {
config: {
type: PruneJobConfig,
flatten: true,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Modify on job's datastore.",
},
)]
/// Create a new prune job.
pub fn create_prune_job(
config: PruneJobConfig,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_MODIFY, true)?;
let _lock = prune::lock_config()?;
let (mut section_config, _digest) = prune::config()?;
if section_config.sections.get(&config.id).is_some() {
param_bail!("id", "job '{}' already exists.", config.id);
}
section_config.set_data(&config.id, "prune", &config)?;
prune::save_config(&section_config)?;
crate::server::jobstate::create_state_file("prunejob", &config.id)?;
Ok(())
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
},
},
returns: { type: PruneJobConfig },
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Verify on job's datastore.",
},
)]
/// Read a prune job configuration.
pub fn read_prune_job(
id: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<PruneJobConfig, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = prune::config()?;
let prune_job: PruneJobConfig = config.lookup("prune", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT;
user_info.check_privs(&auth_id, &prune_job.acl_path(), required_privs, true)?;
rpcenv["digest"] = hex::encode(&digest).into();
Ok(prune_job)
}
#[api]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the comment.
Comment,
/// Unset the disable flag.
Disable,
/// Reset the namespace to the root namespace.
Ns,
/// Reset the maximum depth to full recursion.
MaxDepth,
/// Delete number of last backups to keep.
KeepLast,
/// Delete number of hourly backups to keep.
KeepHourly,
/// Delete number of daily backups to keep.
KeepDaily,
/// Delete number of weekly backups to keep.
KeepWeekly,
/// Delete number of monthly backups to keep.
KeepMonthly,
/// Delete number of yearly backups to keep.
KeepYearly,
}
#[api(
protected: true,
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
update: {
type: PruneJobConfigUpdater,
flatten: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Modify on job's datastore.",
},
)]
/// Update prune job config.
#[allow(clippy::too_many_arguments)]
pub fn update_prune_job(
id: String,
update: PruneJobConfigUpdater,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = prune::lock_config()?;
// pass/compare digest
let (mut config, expected_digest) = prune::config()?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: PruneJobConfig = config.lookup("prune", &id)?;
user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_MODIFY, true)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::Comment => {
data.comment = None;
}
DeletableProperty::Disable => {
data.disable = false;
}
DeletableProperty::Ns => {
data.options.ns = None;
}
DeletableProperty::MaxDepth => {
data.options.max_depth = None;
}
DeletableProperty::KeepLast => {
data.options.keep.keep_last = None;
}
DeletableProperty::KeepHourly => {
data.options.keep.keep_hourly = None;
}
DeletableProperty::KeepDaily => {
data.options.keep.keep_daily = None;
}
DeletableProperty::KeepWeekly => {
data.options.keep.keep_weekly = None;
}
DeletableProperty::KeepMonthly => {
data.options.keep.keep_monthly = None;
}
DeletableProperty::KeepYearly => {
data.options.keep.keep_yearly = None;
}
}
}
}
let mut recheck_privs = false;
if let Some(store) = update.store {
// check new store with possibly new ns:
recheck_privs = true;
data.store = store;
}
if let Some(ns) = update.options.ns {
recheck_privs = true;
data.options.ns = if ns.is_root() { None } else { Some(ns) };
}
if recheck_privs {
user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_MODIFY, true)?;
}
let mut schedule_changed = false;
if let Some(schedule) = update.schedule {
schedule_changed = data.schedule != schedule;
data.schedule = schedule;
}
if let Some(max_depth) = update.options.max_depth {
if max_depth <= pbs_api_types::MAX_NAMESPACE_DEPTH {
data.options.max_depth = Some(max_depth);
}
}
if let Some(value) = update.disable {
data.disable = value;
}
if let Some(value) = update.comment {
data.comment = Some(value);
}
if let Some(value) = update.options.keep.keep_last {
data.options.keep.keep_last = Some(value);
}
if let Some(value) = update.options.keep.keep_hourly {
data.options.keep.keep_hourly = Some(value);
}
if let Some(value) = update.options.keep.keep_daily {
data.options.keep.keep_daily = Some(value);
}
if let Some(value) = update.options.keep.keep_weekly {
data.options.keep.keep_weekly = Some(value);
}
if let Some(value) = update.options.keep.keep_monthly {
data.options.keep.keep_monthly = Some(value);
}
if let Some(value) = update.options.keep.keep_yearly {
data.options.keep.keep_yearly = Some(value);
}
config.set_data(&id, "prune", &data)?;
prune::save_config(&config)?;
if schedule_changed {
crate::server::jobstate::update_job_last_run_time("prunejob", &id)?;
}
Ok(())
}
#[api(
protected: true,
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Verify on job's datastore.",
},
)]
/// Remove a prune job configuration
pub fn delete_prune_job(
id: String,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = prune::lock_config()?;
let (mut config, expected_digest) = prune::config()?;
let job: PruneJobConfig = config.lookup("prune", &id)?;
user_info.check_privs(&auth_id, &job.acl_path(), PRIV_DATASTORE_MODIFY, true)?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
if config.sections.remove(&id).is_none() {
http_bail!(NOT_FOUND, "job '{}' does not exist.", id);
}
prune::save_config(&config)?;
crate::server::jobstate::remove_state_file("prunejob", &id)?;
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_PRUNE_JOB)
.put(&API_METHOD_UPDATE_PRUNE_JOB)
.delete(&API_METHOD_DELETE_PRUNE_JOB);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_PRUNE_JOBS)
.post(&API_METHOD_CREATE_PRUNE_JOB)
.match_all("id", &ITEM_ROUTER);

View File

@ -503,13 +503,13 @@ pub async fn scan_remote_groups(
}
#[sortable]
const DATASTORE_SCAN_SUBDIRS: SubdirMap = &sorted!([
const DATASTORE_SCAN_SUBDIRS: SubdirMap = &[
("groups", &Router::new().get(&API_METHOD_SCAN_REMOTE_GROUPS)),
(
"namespaces",
&Router::new().get(&API_METHOD_SCAN_REMOTE_NAMESPACES),
),
]);
];
const DATASTORE_SCAN_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(DATASTORE_SCAN_SUBDIRS))

View File

@ -20,11 +20,18 @@ pub fn check_sync_job_read_access(
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
if datastore_privs & PRIV_DATASTORE_AUDIT == 0 {
return false;
}
if let Some(ref ns) = job.ns {
let ns_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store, &ns.to_string()]);
if ns_privs & PRIV_DATASTORE_AUDIT == 0 {
return false;
}
}
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
remote_privs & PRIV_REMOTE_AUDIT != 0
}
@ -38,13 +45,20 @@ pub fn check_sync_job_modify_access(
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 {
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
if datastore_privs & PRIV_DATASTORE_BACKUP == 0 {
return false;
}
if let Some(ref ns) = job.ns {
let ns_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store, &ns.to_string()]);
if ns_privs & PRIV_DATASTORE_BACKUP == 0 {
return false;
}
}
if let Some(true) = job.remove_vanished {
if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 {
if datastore_privs & PRIV_DATASTORE_PRUNE == 0 {
return false;
}
}
@ -59,7 +73,7 @@ pub fn check_sync_job_modify_access(
};
// same permission as changing ownership after syncing
if !correct_owner && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 {
if !correct_owner && datastore_privs & PRIV_DATASTORE_MODIFY == 0 {
return false;
}

View File

@ -45,7 +45,7 @@ pub fn list_verification_jobs(
let list = list
.into_iter()
.filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
privs & required_privs != 00
})
@ -79,7 +79,12 @@ pub fn create_verification_job(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_VERIFY, false)?;
user_info.check_privs(
&auth_id,
&["datastore", &config.store],
PRIV_DATASTORE_VERIFY,
false,
)?;
let _lock = verify::lock_config()?;
@ -125,7 +130,12 @@ pub fn read_verification_job(
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
user_info.check_privs(&auth_id, &verification_job.acl_path(), required_privs, true)?;
user_info.check_privs(
&auth_id,
&["datastore", &verification_job.store],
required_privs,
true,
)?;
rpcenv["digest"] = hex::encode(&digest).into();
@ -205,8 +215,13 @@ pub fn update_verification_job(
let mut data: VerificationJobConfig = config.lookup("verification", &id)?;
// check existing store and NS
user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
// check existing store
user_info.check_privs(
&auth_id,
&["datastore", &data.store],
PRIV_DATASTORE_VERIFY,
true,
)?;
if let Some(delete) = delete {
for delete_prop in delete {
@ -243,6 +258,13 @@ pub fn update_verification_job(
}
if let Some(store) = update.store {
// check new store
user_info.check_privs(
&auth_id,
&["datastore", &store],
PRIV_DATASTORE_VERIFY,
true,
)?;
data.store = store;
}
@ -267,9 +289,6 @@ pub fn update_verification_job(
}
}
// check new store and NS
user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
config.set_data(&id, "verification", &data)?;
verify::save_config(&config)?;
@ -313,7 +332,12 @@ pub fn delete_verification_job(
let (mut config, expected_digest) = verify::config()?;
let job: VerificationJobConfig = config.lookup("verification", &id)?;
user_info.check_privs(&auth_id, &job.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
user_info.check_privs(
&auth_id,
&["datastore", &job.store],
PRIV_DATASTORE_VERIFY,
true,
)?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;

View File

@ -1,7 +1,5 @@
//! The Proxmox Backup Server API
use proxmox_sys::sortable;
pub mod access;
pub mod admin;
pub mod backup;
@ -18,8 +16,7 @@ pub mod version;
use proxmox_router::{list_subdirs_api_method, Router, SubdirMap};
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
const SUBDIRS: SubdirMap = &[
("access", &access::ROUTER),
("admin", &admin::ROUTER),
("backup", &backup::ROUTER),
@ -31,7 +28,7 @@ const SUBDIRS: SubdirMap = &sorted!([
("status", &status::ROUTER),
("tape", &tape::ROUTER),
("version", &version::ROUTER),
]);
];
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))

View File

@ -1,7 +1,6 @@
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::os::unix::prelude::OsStrExt;
use proxmox_router::{
list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
@ -361,7 +360,7 @@ pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> {
let running_kernel = format!(
"running kernel: {}",
std::str::from_utf8(nix::sys::utsname::uname()?.release().as_bytes())?.to_owned()
nix::sys::utsname::uname().release().to_owned()
);
if let Some(proxmox_backup) = pbs_packages
.iter()

View File

@ -317,7 +317,7 @@ fn upgrade_to_websocket(
}
#[api]
/// List Nodes (only for compatibility)
/// List Nodes (only for compatiblity)
fn list_nodes() -> Result<Value, Error> {
Ok(json!([ { "node": proxmox_sys::nodename().to_string() } ]))
}

View File

@ -1,5 +1,5 @@
use std::path::Path;
use std::process::Command;
use std::{os::unix::prelude::OsStrExt, path::Path};
use anyhow::{bail, format_err, Error};
use serde_json::Value;
@ -69,12 +69,12 @@ fn get_status(
let cpuinfo = procfs::read_cpuinfo()?;
let cpuinfo = cpuinfo.into();
let uname = nix::sys::utsname::uname()?;
let uname = nix::sys::utsname::uname();
let kversion = format!(
"{} {} {}",
std::str::from_utf8(uname.sysname().as_bytes())?,
std::str::from_utf8(uname.release().as_bytes())?,
std::str::from_utf8(uname.version().as_bytes())?
uname.sysname(),
uname.release(),
uname.version()
);
Ok(NodeStatus {

View File

@ -22,7 +22,6 @@ use proxmox_rest_server::{upid_log_path, upid_read_status, TaskListInfoIterator,
// matches respective job execution privileges
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
match (upid.worker_type.as_str(), &upid.worker_id) {
// FIXME: parse namespace here?
("verificationjob", Some(workerid)) => {
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
if let Some(store) = captures.get(1) {

View File

@ -263,7 +263,6 @@ async fn pull(
let client = pull_params.client().await?;
// fixme: set to_stdout to false?
// FIXME: add namespace to worker id?
let upid_str = WorkerTask::spawn(
"sync",
Some(store.clone()),

View File

@ -78,22 +78,21 @@ fn upgrade_to_backup_reader_protocol(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?.to_owned();
let backup_ns = optional_ns_param(&param)?;
let user_info = CachedUserInfo::new()?;
let acl_path = backup_ns.acl_path(&store);
let privs = user_info.lookup_privs(&auth_id, &acl_path);
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let priv_read = privs & PRIV_DATASTORE_READ != 0;
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
// priv_backup needs owner check further down below!
if !priv_read && !priv_backup {
bail!("no permissions on /{}", acl_path.join("/"));
bail!("no permissions on /datastore/{}", store);
}
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_ns = optional_ns_param(&param)?;
let backup_dir = pbs_api_types::BackupDir::deserialize(&param)?;
let protocols = parts
@ -135,7 +134,6 @@ fn upgrade_to_backup_reader_protocol(
//let files = BackupInfo::list_files(&path, &backup_dir)?;
// FIXME: include namespace here?
let worker_id = format!(
"{}:{}/{}/{:08X}",
store,

View File

@ -18,8 +18,6 @@ use pbs_datastore::DataStore;
use crate::rrd_cache::extract_rrd_data;
use crate::tools::statistics::linear_regression;
use crate::backup::can_access_any_namespace;
#[api(
returns: {
description: "Lists the Status of the Datastores.",
@ -49,18 +47,24 @@ pub fn datastore_status(
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
if !allowed {
if let Ok(datastore) = DataStore::lookup_datastore(&store, Some(Operation::Lookup)) {
if can_access_any_namespace(datastore, &auth_id, &user_info) {
list.push(DataStoreStatusListItem::empty(store, None));
}
}
continue;
}
let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Read)) {
Ok(datastore) => datastore,
Err(err) => {
list.push(DataStoreStatusListItem::empty(store, Some(err.to_string())));
list.push(DataStoreStatusListItem {
store: store.clone(),
total: -1,
used: -1,
avail: -1,
history: None,
history_start: None,
history_delta: None,
estimated_full_date: None,
error: Some(err.to_string()),
gc_status: None,
});
continue;
}
};

View File

@ -10,7 +10,7 @@ use proxmox_schema::api;
use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, GroupFilter, MediaPoolConfig, Operation,
print_ns_and_snapshot, Authid, DatastoreWithNamespace, GroupFilter, MediaPoolConfig, Operation,
TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA,
PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
};
@ -47,11 +47,20 @@ fn check_backup_permission(
) -> Result<(), Error> {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_READ, false)?;
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
if (privs & PRIV_DATASTORE_READ) == 0 {
bail!("no permissions on /datastore/{}", store);
}
user_info.check_privs(auth_id, &["tape", "drive", drive], PRIV_TAPE_WRITE, false)?;
let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]);
if (privs & PRIV_TAPE_WRITE) == 0 {
bail!("no permissions on /tape/drive/{}", drive);
}
user_info.check_privs(auth_id, &["tape", "pool", pool], PRIV_TAPE_WRITE, false)?;
let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]);
if (privs & PRIV_TAPE_WRITE) == 0 {
bail!("no permissions on /tape/pool/{}", pool);
}
Ok(())
}
@ -228,7 +237,11 @@ pub fn do_tape_backup_job(
}
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", job.jobtype(), err);
eprintln!(
"could not finish job state for {}: {}",
job.jobtype().to_string(),
err
);
}
if let Err(err) = set_tape_device_state(&setup.drive, "") {
@ -449,6 +462,11 @@ fn backup_worker(
let mut need_catalog = false; // avoid writing catalog for empty jobs
for (group_number, group) in group_list.into_iter().enumerate() {
let store_with_ns = DatastoreWithNamespace {
store: datastore_name.to_owned(),
ns: group.backup_ns().clone(),
};
progress.done_groups = group_number as u64;
progress.done_snapshots = 0;
progress.group_snapshots = 0;
@ -465,7 +483,7 @@ fn backup_worker(
task_log!(
worker,
"{}, group {} was empty",
print_store_and_ns(datastore_name, group.backup_ns()),
store_with_ns,
group.group()
);
continue;
@ -478,11 +496,7 @@ fn backup_worker(
if let Some(info) = snapshot_list.pop() {
let rel_path =
print_ns_and_snapshot(info.backup_dir.backup_ns(), info.backup_dir.as_ref());
if pool_writer.contains_snapshot(
datastore_name,
&info.backup_dir.backup_ns(),
info.backup_dir.as_ref(),
) {
if pool_writer.contains_snapshot(datastore_name, &rel_path) {
task_log!(worker, "skip snapshot {}", rel_path);
continue;
}
@ -503,11 +517,7 @@ fn backup_worker(
let rel_path =
print_ns_and_snapshot(info.backup_dir.backup_ns(), info.backup_dir.as_ref());
if pool_writer.contains_snapshot(
datastore_name,
&info.backup_dir.backup_ns(),
info.backup_dir.as_ref(),
) {
if pool_writer.contains_snapshot(datastore_name, &rel_path) {
task_log!(worker, "skip snapshot {}", rel_path);
continue;
}

View File

@ -18,10 +18,9 @@ use proxmox_uuid::Uuid;
use pbs_api_types::{
parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode,
HumanByte, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA,
DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA,
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader;
@ -34,7 +33,6 @@ use pbs_tape::{
};
use proxmox_rest_server::WorkerTask;
use crate::backup::check_ns_modification_privs;
use crate::{
server::lookup_user_email,
tape::{
@ -53,6 +51,12 @@ use crate::{
tools::parallel_handler::ParallelHandler,
};
pub struct DataStoreMap {
map: HashMap<String, Arc<DataStore>>,
default: Option<Arc<DataStore>>,
ns_map: Option<NamespaceMap>,
}
struct NamespaceMap {
map: HashMap<String, HashMap<BackupNamespace, (BackupNamespace, usize)>>,
}
@ -118,12 +122,6 @@ impl NamespaceMap {
}
}
pub struct DataStoreMap {
map: HashMap<String, Arc<DataStore>>,
default: Option<Arc<DataStore>>,
ns_map: Option<NamespaceMap>,
}
impl TryFrom<String> for DataStoreMap {
type Error = Error;
@ -181,26 +179,20 @@ impl DataStoreMap {
map
}
fn target_ns(&self, datastore: &str, ns: &BackupNamespace) -> Option<Vec<BackupNamespace>> {
self.ns_map
.as_ref()
.map(|mapping| mapping.get_namespaces(datastore, ns))
}
fn target_store(&self, source_datastore: &str) -> Option<Arc<DataStore>> {
self.map
.get(source_datastore)
.or_else(|| self.default.as_ref())
.map(|store| Arc::clone(store))
}
fn get_targets(
&self,
source_datastore: &str,
source_ds: &str,
source_ns: &BackupNamespace,
) -> Option<(Arc<DataStore>, Option<Vec<BackupNamespace>>)> {
self.target_store(source_datastore)
.map(|store| (store, self.target_ns(source_datastore, source_ns)))
if let Some(store) = self.map.get(source_ds).or(self.default.as_ref()) {
let ns = self
.ns_map
.as_ref()
.map(|map| map.get_namespaces(source_ds, source_ns));
return Some((Arc::clone(store), ns));
}
None
}
}
@ -211,10 +203,17 @@ fn check_datastore_privs(
auth_id: &Authid,
owner: Option<&Authid>,
) -> Result<(), Error> {
let acl_path = ns.acl_path(store);
let privs = user_info.lookup_privs(auth_id, &acl_path);
let privs = if ns.is_root() {
user_info.lookup_privs(auth_id, &["datastore", store])
} else {
user_info.lookup_privs(auth_id, &["datastore", store, &ns.to_string()])
};
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
bail!("no permissions on /{}", acl_path.join("/"));
if ns.is_root() {
bail!("no permissions on /datastore/{}", store);
} else {
bail!("no permissions on /datastore/{}/{}", store, &ns.to_string());
}
}
if let Some(ref owner) = owner {
@ -242,16 +241,25 @@ fn check_and_create_namespaces(
// try create recursively if it does not exist
if !store.namespace_exists(ns) {
let mut tmp_ns = BackupNamespace::root();
let mut tmp_ns: BackupNamespace = Default::default();
let has_datastore_priv = user_info.lookup_privs(auth_id, &["datastore", store.name()])
& PRIV_DATASTORE_MODIFY
!= 0;
for comp in ns.components() {
tmp_ns.push(comp.to_string())?;
if !store.namespace_exists(&tmp_ns) {
check_ns_modification_privs(store.name(), &tmp_ns, auth_id).map_err(|_err| {
format_err!("no permission to create namespace '{}'", tmp_ns)
})?;
if has_datastore_priv
|| user_info.lookup_privs(
auth_id,
&["datastore", store.name(), &tmp_ns.parent().to_string()],
) & PRIV_DATASTORE_MODIFY
!= 0
{
store.create_namespace(&tmp_ns.parent(), comp.to_string())?;
} else {
bail!("no permissions to create '{}'", tmp_ns);
}
}
}
}
@ -304,8 +312,8 @@ pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
},
access: {
// Note: parameters are no uri parameter, so we need to test inside function body
description: "The user needs Tape.Read privilege on /tape/pool/{pool} and \
/tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}/[{namespace}], \
description: "The user needs Tape.Read privilege on /tape/pool/{pool} \
and /tape/drive/{drive}, Datastore.Backup privilege on /datastore/{store}/[{namespace}],\
Datastore.Modify privileges to create namespaces (if they don't exist).",
permission: &Permission::Anybody,
},
@ -325,11 +333,11 @@ pub fn restore(
let user_info = CachedUserInfo::new()?;
let mut store_map = DataStoreMap::try_from(store)
.map_err(|err| format_err!("cannot parse store mapping: {err}"))?;
.map_err(|err| format_err!("cannot parse store mapping: {}", err))?;
let namespaces = if let Some(maps) = namespaces {
store_map
.add_namespaces_maps(maps)
.map_err(|err| format_err!("cannot parse namespace mapping: {err}"))?
.map_err(|err| format_err!("cannot parse namespace mapping: {}", err))?
} else {
false
};
@ -343,19 +351,25 @@ pub fn restore(
check_datastore_privs(
&user_info,
target.name(),
&BackupNamespace::root(),
&Default::default(),
&auth_id,
owner.as_ref(),
)?;
if let Some(namespaces) = namespaces {
for ns in namespaces {
check_and_create_namespaces(&user_info, target, ns, &auth_id, owner.as_ref())?;
}
}
}
user_info.check_privs(&auth_id, &["tape", "drive", &drive], PRIV_TAPE_READ, false)?;
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
if (privs & PRIV_TAPE_READ) == 0 {
bail!("no permissions on /tape/drive/{}", drive);
}
let media_set_uuid = media_set.parse()?;
let status_path = Path::new(TAPE_STATUS_DIR);
let _lock = lock_media_set(status_path, &media_set_uuid, None)?;
@ -363,7 +377,11 @@ pub fn restore(
let inventory = Inventory::load(status_path)?;
let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
user_info.check_privs(&auth_id, &["tape", "pool", &pool], PRIV_TAPE_READ, false)?;
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool]);
if (privs & PRIV_TAPE_READ) == 0 {
bail!("no permissions on /tape/pool/{}", pool);
}
let (drive_config, _digest) = pbs_config::drive::config()?;
@ -395,8 +413,8 @@ pub fn restore(
.and_then(|userid| lookup_user_email(userid))
.or_else(|| lookup_user_email(&auth_id.clone().into()));
task_log!(worker, "Mediaset '{media_set}'");
task_log!(worker, "Pool: {pool}");
task_log!(worker, "Mediaset '{}'", media_set);
task_log!(worker, "Pool: {}", pool);
let res = if snapshots.is_some() || namespaces {
restore_list_worker(
@ -425,11 +443,13 @@ pub fn restore(
&auth_id,
)
};
if res.is_ok() {
task_log!(worker, "Restore mediaset '{media_set}' done");
task_log!(worker, "Restore mediaset '{}' done", media_set);
}
if let Err(err) = set_tape_device_state(&drive, "") {
task_log!(worker, "could not unset drive state for {drive}: {err}");
task_log!(worker, "could not unset drive state for {}: {}", drive, err);
}
res
@ -461,7 +481,11 @@ fn restore_full_worker(
for (seq_nr, media_uuid) in media_list.iter().enumerate() {
match media_uuid {
None => {
bail!("media set {media_set_uuid} is incomplete (missing member {seq_nr}).");
bail!(
"media set {} is incomplete (missing member {}).",
media_set_uuid,
seq_nr
);
}
Some(media_uuid) => {
let media_id = inventory.lookup_media(media_uuid).unwrap();
@ -479,23 +503,30 @@ fn restore_full_worker(
}
if let Some(fingerprint) = encryption_key_fingerprint {
task_log!(worker, "Encryption key fingerprint: {fingerprint}");
task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
}
let used_datastores = store_map.used_datastores();
let datastore_list = used_datastores
task_log!(
worker,
"Datastore(s): {}",
used_datastores
.values()
.map(|(t, _)| String::from(t.name()))
.collect::<Vec<String>>()
.join(", ");
task_log!(worker, "Datastore(s): {datastore_list}",);
task_log!(worker, "Drive: {drive_name}");
let required_media = media_id_list
.join(", "),
);
task_log!(worker, "Drive: {}", drive_name);
task_log!(
worker,
"Required media list: {}",
media_id_list
.iter()
.map(|media_id| media_id.label.label_text.as_str())
.collect::<Vec<&str>>()
.join(";");
task_log!(worker, "Required media list: {required_media}",);
.join(";")
);
let mut datastore_locks = Vec::new();
for (target, _) in used_datastores.values() {
@ -537,8 +568,9 @@ fn check_snapshot_restorable(
) -> Result<bool, Error> {
let (datastore, namespaces) = if required {
let (datastore, namespaces) = match store_map.get_targets(store, ns) {
Some((target_ds, Some(target_ns))) => (target_ds, target_ns),
Some((target_ds, None)) => (target_ds, vec![ns.clone()]),
Some((target_ds, target_ns)) => {
(target_ds, target_ns.unwrap_or_else(|| vec![ns.clone()]))
}
None => bail!("could not find target datastore for {store}:{snapshot}"),
};
if namespaces.is_empty() {
@ -548,9 +580,14 @@ fn check_snapshot_restorable(
(datastore, namespaces)
} else {
match store_map.get_targets(store, ns) {
Some((_, Some(ns))) if ns.is_empty() => return Ok(false),
Some((datastore, Some(ns))) => (datastore, ns),
Some((_, None)) | None => return Ok(false),
Some((ds, Some(ns))) => {
if ns.is_empty() {
return Ok(false);
}
(ds, ns)
}
Some((_, None)) => return Ok(false),
None => return Ok(false),
}
};
@ -575,8 +612,11 @@ fn check_snapshot_restorable(
// only the owner is allowed to create additional snapshots
task_warn!(
worker,
"restore of '{snapshot}' to {ns} failed, owner check failed ({restore_owner} \
!= {owner})",
"restore '{}' to {} failed - owner check failed ({} != {})",
&snapshot,
ns,
restore_owner,
owner,
);
continue;
}
@ -587,7 +627,8 @@ fn check_snapshot_restorable(
if datastore.snapshot_path(&ns, &dir).exists() {
task_warn!(
worker,
"found snapshot {snapshot} on target datastore/namespace, skipping...",
"found snapshot {} on target datastore/namespace, skipping...",
&snapshot,
);
continue;
}
@ -595,7 +636,10 @@ fn check_snapshot_restorable(
}
if !have_some_permissions {
bail!("cannot restore {snapshot} to any target namespace due to permissions");
bail!(
"cannot restore {} to any target namespace due to permissions",
&snapshot
);
}
return Ok(can_restore_some);
@ -697,11 +741,9 @@ fn restore_list_worker(
})
.collect()
};
for (store, snapshot, _ns, _) in snapshots.iter() {
let datastore = match store_map.target_store(store) {
Some(store) => store,
None => bail!("unexpected error"), // we already checked those
};
for (store, snapshot, ns, _) in snapshots.iter() {
// unwrap ok, we already checked those snapshots
let (datastore, _) = store_map.get_targets(store, &ns).unwrap();
let (media_id, file_num) =
if let Some((media_uuid, file_num)) = catalog.lookup_snapshot(store, &snapshot) {
let media_id = inventory.lookup_media(media_uuid).unwrap();
@ -725,8 +767,10 @@ fn restore_list_worker(
task_log!(
worker,
"found snapshot {snapshot} on {}: file {file_num}",
"found snapshot {} on {}: file {}",
&snapshot,
media_id.label.label_text,
file_num
);
}
@ -773,8 +817,13 @@ fn restore_list_worker(
BTreeMap::new();
for (source_datastore, chunks) in datastore_chunk_map.into_iter() {
let datastore = store_map.target_store(&source_datastore).ok_or_else(|| {
format_err!("could not find mapping for source datastore: {source_datastore}")
let (datastore, _) = store_map
.get_targets(&source_datastore, &Default::default())
.ok_or_else(|| {
format_err!(
"could not find mapping for source datastore: {}",
source_datastore
)
})?;
for digest in chunks.into_iter() {
// we only want to restore chunks that we do not have yet
@ -796,7 +845,7 @@ fn restore_list_worker(
if !media_file_chunk_map.is_empty() {
task_log!(worker, "Phase 2: restore chunks to datastores");
} else {
task_log!(worker, "All chunks are already present, skip phase 2...");
task_log!(worker, "all chunks exist already, skipping phase 2...");
}
for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() {
@ -824,7 +873,9 @@ fn restore_list_worker(
format_err!("unexpected source datastore: {}", source_datastore)
})?;
for ns in target_ns.unwrap_or_else(|| vec![source_ns.clone()]) {
let namespaces = target_ns.unwrap_or_else(|| vec![source_ns.clone()]);
for ns in namespaces {
if let Err(err) = proxmox_lang::try_block!({
check_and_create_namespaces(
&user_info,
@ -841,9 +892,11 @@ fn restore_list_worker(
)?;
if restore_owner != &owner {
bail!(
"cannot restore snapshot '{snapshot}' into group '{}', owner check \
failed ({restore_owner} != {owner})",
"cannot restore snapshot '{}' into group '{}', owner check failed ({} != {})",
snapshot,
backup_dir.group,
restore_owner,
owner,
);
}
@ -883,7 +936,10 @@ fn restore_list_worker(
}) {
task_warn!(
worker,
"could not copy {source_datastore}:{snapshot}: {err}"
"could not copy {}:{}: {}",
source_datastore,
snapshot,
err,
);
errors = true;
}
@ -894,7 +950,12 @@ fn restore_list_worker(
std::fs::remove_dir_all(&tmp_path)
.map_err(|err| format_err!("remove_dir_all failed - {err}"))
}) {
task_warn!(worker, "could not clean up temp dir {tmp_path:?}: {err}");
task_warn!(
worker,
"could not clean up temp dir {:?}: {}",
tmp_path,
err,
);
errors = true;
};
}
@ -943,7 +1004,11 @@ fn get_media_set_catalog(
for (seq_nr, media_uuid) in media_list.iter().enumerate() {
match media_uuid {
None => {
bail!("media set {media_set_uuid} is incomplete (missing member {seq_nr}).");
bail!(
"media set {} is incomplete (missing member {}).",
media_set_uuid,
seq_nr
);
}
Some(media_uuid) => {
let media_id = inventory.lookup_media(media_uuid).unwrap();
@ -1016,7 +1081,9 @@ fn restore_snapshots_to_tmpdir(
if current_file_number != *file_num {
task_log!(
worker,
"was at file {current_file_number}, moving to {file_num}"
"was at file {}, moving to {}",
current_file_number,
file_num
);
drive.move_to_file(*file_num)?;
let current_file_number = drive.current_file_number()?;
@ -1036,7 +1103,7 @@ fn restore_snapshots_to_tmpdir(
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
.map_err(|err| {
format_err!("unable to parse snapshot archive header - {err}")
format_err!("unable to parse snapshot archive header - {}", err)
})?;
let source_datastore = archive_header.store;
@ -1044,17 +1111,23 @@ fn restore_snapshots_to_tmpdir(
task_log!(
worker,
"File {file_num}: snapshot archive {source_datastore}:{snapshot}",
"File {}: snapshot archive {}:{}",
file_num,
source_datastore,
snapshot
);
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
let target_datastore = match store_map.target_store(&source_datastore) {
Some(datastore) => datastore,
let target_datastore =
match store_map.get_targets(&source_datastore, &Default::default()) {
Some((datastore, _)) => datastore,
None => {
task_warn!(
worker,
"could not find target datastore for {source_datastore}:{snapshot}",
"could not find target datastore for {}:{}",
source_datastore,
snapshot
);
continue;
}
@ -1093,7 +1166,7 @@ fn restore_snapshots_to_tmpdir(
}
tmp_paths.push(tmp_path);
}
other => bail!("unexpected file type: {other:?}"),
other => bail!("unexpected file type: {:?}", other),
}
}
@ -1109,7 +1182,12 @@ fn restore_file_chunk_map(
for (nr, chunk_map) in file_chunk_map.iter_mut() {
let current_file_number = drive.current_file_number()?;
if current_file_number != *nr {
task_log!(worker, "was at file {current_file_number}, moving to {nr}");
task_log!(
worker,
"was at file {}, moving to {}",
current_file_number,
nr
);
drive.move_to_file(*nr)?;
let current_file_number = drive.current_file_number()?;
task_log!(worker, "now at file {}", current_file_number);
@ -1117,7 +1195,7 @@ fn restore_file_chunk_map(
let mut reader = drive.read_next_file()?;
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
bail!("file is missing the MediaContentHeader");
bail!("missing MediaContentHeader");
}
match header.content_magic {
@ -1125,17 +1203,21 @@ fn restore_file_chunk_map(
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
.map_err(|err| format_err!("unable to parse chunk archive header - {err}"))?;
.map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
let source_datastore = archive_header.store;
task_log!(
worker,
"File {nr}: chunk archive for datastore '{source_datastore}'",
"File {}: chunk archive for datastore '{}'",
nr,
source_datastore
);
let datastore = store_map.target_store(&source_datastore).ok_or_else(|| {
format_err!("unexpected chunk archive for store: {source_datastore}")
let (datastore, _) = store_map
.get_targets(&source_datastore, &Default::default())
.ok_or_else(|| {
format_err!("unexpected chunk archive for store: {}", source_datastore)
})?;
let count = restore_partial_chunk_archive(
@ -1144,7 +1226,7 @@ fn restore_file_chunk_map(
datastore.clone(),
chunk_map,
)?;
task_log!(worker, "restored {count} chunks");
task_log!(worker, "restored {} chunks", count);
}
_ => bail!("unexpected content magic {:?}", header.content_magic),
}
@ -1191,12 +1273,14 @@ fn restore_partial_chunk_archive<'a>(
Some((digest, blob)) => (digest, blob),
None => break,
};
worker.check_abort()?;
if chunk_list.remove(&digest) {
verify_and_write_channel.send((blob, digest.clone()))?;
count += 1;
}
if chunk_list.is_empty() {
break;
}
@ -1207,12 +1291,14 @@ fn restore_partial_chunk_archive<'a>(
writer_pool.complete()?;
let elapsed = start_time.elapsed()?.as_secs_f64();
let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64;
let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst);
task_log!(
worker,
"restored {} ({:.2}/s)",
HumanByte::new_decimal(bytes),
HumanByte::new_decimal(bytes / elapsed),
"restored {} bytes ({:.2} MB/s)",
bytes,
(bytes as f64) / (1_000_000.0 * elapsed)
);
Ok(count)
@ -1376,7 +1462,7 @@ fn restore_archive<'a>(
let (backup_ns, backup_dir) = parse_ns_and_snapshot(&snapshot)?;
if let Some((store_map, restore_owner)) = target.as_ref() {
if let Some(datastore) = store_map.target_store(&datastore_name) {
if let Some((datastore, _)) = store_map.get_targets(&datastore_name, &backup_ns) {
check_and_create_namespaces(
&user_info,
&datastore,
@ -1465,20 +1551,20 @@ fn restore_archive<'a>(
);
let datastore = target
.as_ref()
.and_then(|t| t.0.target_store(&source_datastore));
.and_then(|t| t.0.get_targets(&source_datastore, &Default::default()));
if datastore.is_some() || target.is_none() {
let checked_chunks = checked_chunks_map
.entry(
datastore
.as_ref()
.map(|d| d.name())
.map(|(d, _)| d.name())
.unwrap_or("_unused_")
.to_string(),
)
.or_insert(HashSet::new());
let chunks = if let Some(datastore) = datastore {
let chunks = if let Some((datastore, _)) = datastore {
restore_chunk_archive(
worker.clone(),
reader,
@ -1653,12 +1739,14 @@ fn restore_chunk_archive<'a>(
writer_pool.complete()?;
let elapsed = start_time.elapsed()?.as_secs_f64();
let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64;
let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst);
task_log!(
worker,
"restored {} ({:.2}/s)",
HumanByte::new_decimal(bytes),
HumanByte::new_decimal(bytes / elapsed),
"restored {} bytes ({:.2} MB/s)",
bytes,
(bytes as f64) / (1_000_000.0 * elapsed)
);
Ok(Some(chunks))
@ -1921,7 +2009,7 @@ pub fn fast_catalog_restore(
if &media_uuid != catalog_uuid {
task_log!(
worker,
"catalog uuid mismatch at pos {}",
"catalog uuid missmatch at pos {}",
current_file_number
);
continue;
@ -1929,7 +2017,7 @@ pub fn fast_catalog_restore(
if media_set_uuid != archive_header.media_set_uuid {
task_log!(
worker,
"catalog media_set mismatch at pos {}",
"catalog media_set missmatch at pos {}",
current_file_number
);
continue;

View File

@ -1,96 +1,14 @@
use std::sync::Arc;
use anyhow::{bail, Error};
use anyhow::Error;
use pbs_api_types::{
privs_to_priv_names, Authid, BackupNamespace, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ,
Authid, BackupNamespace, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::{backup_info::BackupGroup, DataStore, ListGroups, ListNamespacesRecursive};
/// Asserts that `privs` are fulfilled on datastore + (optional) namespace.
pub fn check_ns_privs(
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
privs: u64,
) -> Result<(), Error> {
check_ns_privs_full(store, ns, auth_id, privs, 0).map(|_| ())
}
/// Asserts that `privs` for creating/destroying namespace in datastore are fulfilled.
pub fn check_ns_modification_privs(
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
) -> Result<(), Error> {
// we could allow it as easy purge-whole datastore, but lets be more restrictive for now
if ns.is_root() {
// TODO
bail!("Cannot create/delete root namespace!");
}
let parent = ns.parent();
check_ns_privs(store, &parent, auth_id, PRIV_DATASTORE_MODIFY)
}
/// Asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled on
/// datastore + (optional) namespace.
///
/// Return value indicates whether further checks like group ownerships are required because
/// `full_access_privs` are missing.
pub fn check_ns_privs_full(
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
full_access_privs: u64,
partial_access_privs: u64,
) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?;
let acl_path = ns.acl_path(store);
let privs = user_info.lookup_privs(auth_id, &acl_path);
if full_access_privs != 0 && (privs & full_access_privs) != 0 {
return Ok(false);
}
if partial_access_privs != 0 && (privs & partial_access_privs) != 0 {
return Ok(true);
}
let priv_names = privs_to_priv_names(full_access_privs | partial_access_privs).join("|");
let path = format!("/{}", acl_path.join("/"));
proxmox_router::http_bail!(
FORBIDDEN,
"permission check failed - missing {priv_names} on {path}"
);
}
pub fn can_access_any_namespace(
store: Arc<DataStore>,
auth_id: &Authid,
user_info: &CachedUserInfo,
) -> bool {
// NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
// below /datastore/{store}" helper
let mut iter =
if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
iter
} else {
return false;
};
let wanted =
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
let name = store.name();
iter.any(|ns| -> bool {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]);
user_privs & wanted != 0
})
}
/// A privilege aware iterator for all backup groups in all Namespaces below an anchor namespace,
/// A priviledge aware iterator for all backup groups in all Namespaces below an anchor namespace,
/// most often that will be the `BackupNamespace::root()` one.
///
/// Is basically just a filter-iter for pbs_datastore::ListNamespacesRecursive including access and
@ -99,42 +17,23 @@ pub struct ListAccessibleBackupGroups<'a> {
store: &'a Arc<DataStore>,
auth_id: Option<&'a Authid>,
user_info: Arc<CachedUserInfo>,
/// The priv on NS level that allows auth_id trump the owner check
override_owner_priv: u64,
/// The priv that auth_id is required to have on NS level additionally to being owner
owner_and_priv: u64,
/// Contains the intertnal state, group iter and a bool flag for override_owner_priv
state: Option<(ListGroups, bool)>,
state: Option<ListGroups>,
ns_iter: ListNamespacesRecursive,
}
impl<'a> ListAccessibleBackupGroups<'a> {
// TODO: builder pattern
pub fn new_owned(
pub fn new(
store: &'a Arc<DataStore>,
ns: BackupNamespace,
max_depth: usize,
auth_id: Option<&'a Authid>,
) -> Result<Self, Error> {
// only owned groups by default and no extra priv required
Self::new_with_privs(store, ns, max_depth, None, None, auth_id)
}
pub fn new_with_privs(
store: &'a Arc<DataStore>,
ns: BackupNamespace,
max_depth: usize,
override_owner_priv: Option<u64>,
owner_and_priv: Option<u64>,
auth_id: Option<&'a Authid>,
) -> Result<Self, Error> {
let ns_iter = ListNamespacesRecursive::new_max_depth(Arc::clone(store), ns, max_depth)?;
Ok(ListAccessibleBackupGroups {
auth_id,
ns_iter,
override_owner_priv: override_owner_priv.unwrap_or(0),
owner_and_priv: owner_and_priv.unwrap_or(0),
state: None,
store: store,
user_info: CachedUserInfo::new()?,
@ -142,20 +41,15 @@ impl<'a> ListAccessibleBackupGroups<'a> {
}
}
pub static NS_PRIVS_OK: u64 =
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT;
impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
type Item = Result<BackupGroup, Error>;
fn next(&mut self) -> Option<Self::Item> {
const PRIVS_OK: u64 = PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT;
loop {
if let Some((ref mut state, override_owner)) = self.state {
if let Some(ref mut state) = self.state {
match state.next() {
Some(Ok(group)) => {
if override_owner {
return Some(Ok(group));
}
if let Some(auth_id) = &self.auth_id {
match self.store.owns_backup(
&group.backup_ns(),
@ -178,26 +72,22 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
} else {
match self.ns_iter.next() {
Some(Ok(ns)) => {
let mut override_owner = false;
if let Some(auth_id) = &self.auth_id {
let info = &self.user_info;
let privs =
info.lookup_privs(&auth_id, &ns.acl_path(self.store.name()));
if privs & NS_PRIVS_OK == 0 {
let privs = if ns.is_root() {
info.lookup_privs(&auth_id, &["datastore", self.store.name()])
} else {
info.lookup_privs(
&auth_id,
&["datastore", self.store.name(), &ns.to_string()],
)
};
if privs & PRIVS_OK == 0 {
continue;
}
// check first if *any* override owner priv is available up front
if privs & self.override_owner_priv != 0 {
override_owner = true;
} else if privs & self.owner_and_priv != self.owner_and_priv {
continue; // no owner override and no extra privs -> nothing visible
}
}
self.state = match ListGroups::new(Arc::clone(&self.store), ns) {
Ok(iter) => Some((iter, override_owner)),
Ok(iter) => Some(iter),
Err(err) => return Some(Err(err)),
};
}

View File

@ -9,8 +9,8 @@ use anyhow::{bail, format_err, Error};
use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode,
SnapshotVerifyState, VerifyState, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY, UPID,
print_ns_and_snapshot, Authid, BackupNamespace, BackupType, CryptMode, DatastoreWithNamespace,
SnapshotVerifyState, VerifyState, UPID,
};
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::index::IndexFile;
@ -453,10 +453,14 @@ pub fn verify_backup_group(
let mut list = match group.list_backups() {
Ok(list) => list,
Err(err) => {
let store_with_ns = DatastoreWithNamespace {
store: verify_worker.datastore.name().to_owned(),
ns: group.backup_ns().clone(),
};
task_log!(
verify_worker.worker,
"verify {}, group {} - unable to list backups: {}",
print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()),
store_with_ns,
group.group(),
err,
);
@ -525,14 +529,7 @@ pub fn verify_all_backups(
let store = &verify_worker.datastore;
let max_depth = max_depth.unwrap_or(pbs_api_types::MAX_NAMESPACE_DEPTH);
let mut list = match ListAccessibleBackupGroups::new_with_privs(
store,
ns.clone(),
max_depth,
Some(PRIV_DATASTORE_VERIFY),
Some(PRIV_DATASTORE_BACKUP),
owner,
) {
let mut list = match ListAccessibleBackupGroups::new(store, ns.clone(), max_depth, owner) {
Ok(list) => list
.filter_map(|group| match group {
Ok(group) => Some(group),
@ -578,7 +575,7 @@ pub fn verify_all_backups(
Ok(errors)
}
/// Filter out any snapshot from being (re-)verified where this fn returns false.
/// Filter for the verification of snapshots
pub fn verify_filter(
ignore_verified_snapshots: bool,
outdated_after: Option<i64>,
@ -598,7 +595,7 @@ pub fn verify_filter(
let now = proxmox_time::epoch_i64();
let days_since_last_verify = (now - last_verify.upid.starttime) / 86400;
days_since_last_verify > max_age
max_age == 0 || days_since_last_verify > max_age
}
}
}

View File

@ -430,7 +430,6 @@ async fn run() -> Result<(), Error> {
.insert("subscription", subscription_commands())
.insert("sync-job", sync_job_commands())
.insert("verify-job", verify_job_commands())
.insert("prune-job", prune_job_commands())
.insert("task", task_mgmt_cli())
.insert(
"pull",
@ -453,9 +452,6 @@ async fn run() -> Result<(), Error> {
.insert("versions", CliCommand::new(&API_METHOD_GET_VERSIONS));
let args: Vec<String> = std::env::args().take(2).collect();
if args.len() >= 2 && args[1] == "update-to-prune-jobs-config" {
return update_to_prune_jobs_config();
}
let avoid_init = args.len() >= 2 && (args[1] == "bashcomplete" || args[1] == "printdoc");
if !avoid_init {
@ -463,7 +459,6 @@ async fn run() -> Result<(), Error> {
let file_opts = CreateOptions::new()
.owner(backup_user.uid)
.group(backup_user.gid);
proxmox_rest_server::init_worker_tasks(
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
file_opts,

View File

@ -47,7 +47,7 @@ use pbs_buildcfg::configdir;
use proxmox_time::CalendarEvent;
use pbs_api_types::{
Authid, DataStoreConfig, Operation, PruneJobConfig, SyncJobConfig, TapeBackupJobConfig,
Authid, DataStoreConfig, Operation, PruneOptions, SyncJobConfig, TapeBackupJobConfig,
VerificationJobConfig,
};
@ -557,7 +557,7 @@ async fn run_task_scheduler() {
async fn schedule_tasks() -> Result<(), Error> {
schedule_datastore_garbage_collection().await;
schedule_datastore_prune_jobs().await;
schedule_datastore_prune().await;
schedule_datastore_sync_jobs().await;
schedule_datastore_verify_jobs().await;
schedule_tape_backup_jobs().await;
@ -667,47 +667,55 @@ async fn schedule_datastore_garbage_collection() {
}
}
async fn schedule_datastore_prune_jobs() {
let config = match pbs_config::prune::config() {
async fn schedule_datastore_prune() {
let config = match pbs_config::datastore::config() {
Err(err) => {
eprintln!("unable to read prune job config - {}", err);
eprintln!("unable to read datastore config - {}", err);
return;
}
Ok((config, _digest)) => config,
};
for (job_id, (_, job_config)) in config.sections {
let job_config: PruneJobConfig = match serde_json::from_value(job_config) {
for (store, (_, store_config)) in config.sections {
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c,
Err(err) => {
eprintln!("prune job config from_value failed - {}", err);
eprintln!("datastore '{}' config from_value failed - {}", store, err);
continue;
}
};
if job_config.disable {
let event_str = match store_config.prune_schedule {
Some(event_str) => event_str,
None => continue,
};
let prune_options = PruneOptions {
keep_last: store_config.keep_last,
keep_hourly: store_config.keep_hourly,
keep_daily: store_config.keep_daily,
keep_weekly: store_config.keep_weekly,
keep_monthly: store_config.keep_monthly,
keep_yearly: store_config.keep_yearly,
};
if !pbs_datastore::prune::keeps_something(&prune_options) {
// no prune settings - keep all
continue;
}
if !job_config.options.keeps_something() {
// no 'keep' values set, keep all
continue;
}
let worker_type = "prunejob";
let auth_id = Authid::root_auth_id().clone();
if check_schedule(worker_type, &job_config.schedule, &job_id) {
let job = match Job::new(worker_type, &job_id) {
let worker_type = "prune";
if check_schedule(worker_type, &event_str, &store) {
let job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
if let Err(err) = do_prune_job(
job,
job_config.options,
job_config.store,
&auth_id,
Some(job_config.schedule),
) {
eprintln!("unable to start datastore prune job {} - {}", &job_id, err);
let auth_id = Authid::root_auth_id().clone();
if let Err(err) =
do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str))
{
eprintln!("unable to start datastore prune job {} - {}", &store, err);
}
};
}
@ -837,7 +845,10 @@ async fn schedule_task_log_rotate() {
if !check_schedule(worker_type, schedule, job_id) {
// if we never ran the rotation, schedule instantly
match jobstate::JobState::load(worker_type, job_id) {
Ok(jobstate::JobState::Created { .. }) => {}
Ok(state) => match state {
jobstate::JobState::Created { .. } => {}
_ => return,
},
_ => return,
}
}
@ -1001,7 +1012,7 @@ async fn run_stat_generator() {
async fn generate_host_stats() {
match tokio::task::spawn_blocking(generate_host_stats_sync).await {
Ok(()) => (),
Err(err) => log::error!("generate_host_stats panicked: {}", err),
Err(err) => log::error!("generate_host_stats paniced: {}", err),
}
}
@ -1180,6 +1191,10 @@ fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &st
}
// Rate Limiter lookup
// Test WITH
// proxmox-backup-client restore vm/201/2021-10-22T09:55:56Z drive-scsi0.img img1.img --repository localhost:store2
async fn run_traffic_control_updater() {
loop {
let delay_target = Instant::now() + Duration::from_secs(1);

View File

@ -292,7 +292,7 @@ async fn load_media_from_slot(mut param: Value) -> Result<(), Error> {
let client = connect_to_localhost()?;
let path = format!("api2/json/tape/drive/{}/load-slot", drive);
client.post(&path, Some(param)).await?;
client.put(&path, Some(param)).await?;
Ok(())
}

View File

@ -1,6 +1,7 @@
use std::collections::HashSet;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom, Write};
use std::io::{stdout, Read, Seek, SeekFrom, Write};
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::path::Path;
use anyhow::{bail, format_err, Error};
@ -26,6 +27,18 @@ use pbs_datastore::index::IndexFile;
use pbs_datastore::DataBlob;
use pbs_tools::crypt_config::CryptConfig;
// Returns either a new file, if a path is given, or stdout, if no path is given.
fn outfile_or_stdout<P: AsRef<Path>>(
path: Option<P>,
) -> std::io::Result<Box<dyn Write + Send + Sync + Unpin + RefUnwindSafe + UnwindSafe>> {
if let Some(path) = path {
let f = File::create(path)?;
Ok(Box::new(f) as Box<_>)
} else {
Ok(Box::new(stdout()) as Box<_>)
}
}
/// Decodes a blob and writes its content either to stdout or into a file
fn decode_blob(
mut output_path: Option<&Path>,
@ -48,8 +61,7 @@ fn decode_blob(
_ => output_path,
};
crate::outfile_or_stdout(output_path)?
.write_all(blob.decode(crypt_conf_opt, digest)?.as_slice())?;
outfile_or_stdout(output_path)?.write_all(blob.decode(crypt_conf_opt, digest)?.as_slice())?;
Ok(())
}

View File

@ -1,22 +1,3 @@
use std::{
fs::File,
io::{stdout, Write},
panic::{RefUnwindSafe, UnwindSafe},
path::Path,
};
pub mod api;
pub mod inspect;
pub mod recover;
// Returns either a new file, if a path is given, or stdout, if no path is given.
pub(crate) fn outfile_or_stdout<P: AsRef<Path>>(
path: Option<P>,
) -> std::io::Result<Box<dyn Write + Send + Sync + Unpin + RefUnwindSafe + UnwindSafe>> {
if let Some(path) = path {
let f = File::create(path)?;
Ok(Box::new(f) as Box<_>)
} else {
Ok(Box::new(stdout()) as Box<_>)
}
}

View File

@ -3,6 +3,7 @@ use std::io::{Read, Seek, SeekFrom, Write};
use std::path::Path;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
use proxmox_schema::api;
@ -24,7 +25,7 @@ use pbs_tools::crypt_config::CryptConfig;
type: String,
},
chunks: {
description: "Path to the directory that contains the chunks, usually <datastore>/.chunks.",
description: "Path to the directorty that contains the chunks, usually <datastore>/.chunks.",
type: String,
},
"keyfile": {
@ -37,24 +38,7 @@ use pbs_tools::crypt_config::CryptConfig;
type: Boolean,
optional: true,
default: false,
},
"ignore-missing-chunks": {
description: "If a chunk is missing, warn and write 0-bytes instead to attempt partial recovery.",
type: Boolean,
optional: true,
default: false,
},
"ignore-corrupt-chunks": {
description: "If a chunk is corrupt, warn and write 0-bytes instead to attempt partial recovery.",
type: Boolean,
optional: true,
default: false,
},
"output-path": {
type: String,
description: "Output file path, defaults to `file` without extension, '-' means STDOUT.",
optional: true,
},
}
}
}
)]
@ -65,9 +49,7 @@ fn recover_index(
chunks: String,
keyfile: Option<String>,
skip_crc: bool,
ignore_missing_chunks: bool,
ignore_corrupt_chunks: bool,
output_path: Option<String>,
_param: Value,
) -> Result<(), Error> {
let file_path = Path::new(&file);
let chunks_path = Path::new(&chunks);
@ -96,16 +78,9 @@ fn recover_index(
None
};
let output_path = output_path.unwrap_or_else(|| {
let filename = file_path.file_stem().unwrap().to_str().unwrap();
filename.to_string()
});
let output_path = match output_path.as_str() {
"-" => None,
path => Some(path),
};
let mut output_file = crate::outfile_or_stdout(output_path)
let output_filename = file_path.file_stem().unwrap().to_str().unwrap();
let output_path = Path::new(output_filename);
let mut output_file = File::create(output_path)
.map_err(|e| format_err!("could not create output file - {}", e))?;
let mut data = Vec::with_capacity(4 * 1024 * 1024);
@ -114,78 +89,22 @@ fn recover_index(
let digest_str = hex::encode(chunk_digest);
let digest_prefix = &digest_str[0..4];
let chunk_path = chunks_path.join(digest_prefix).join(digest_str);
let mut chunk_file = std::fs::File::open(&chunk_path)
.map_err(|e| format_err!("could not open chunk file - {}", e))?;
let create_zero_chunk = |msg: String| -> Result<(DataBlob, Option<&[u8; 32]>), Error> {
let info = index
.chunk_info(pos)
.ok_or_else(|| format_err!("Couldn't read chunk info from index at {pos}"))?;
let size = info.size();
eprintln!("WARN: chunk {:?} {}", chunk_path, msg);
eprintln!("WARN: replacing output file {:?} with '\\0'", info.range,);
Ok((
DataBlob::encode(&vec![0; size as usize], crypt_conf_opt.as_ref(), true)?,
None,
))
};
let (chunk_blob, chunk_digest) = match std::fs::File::open(&chunk_path) {
Ok(mut chunk_file) => {
data.clear();
chunk_file.read_to_end(&mut data)?;
let chunk_blob = DataBlob::from_raw(data.clone())?;
// first chance for corrupt chunk - handling magic fails
DataBlob::from_raw(data.clone())
.map(|blob| (blob, Some(chunk_digest)))
.or_else(|err| {
if ignore_corrupt_chunks {
create_zero_chunk(format!("is corrupt - {err}"))
} else {
bail!("{err}");
if !skip_crc {
chunk_blob.verify_crc()?;
}
})?
}
Err(err) => {
if ignore_missing_chunks && err.kind() == std::io::ErrorKind::NotFound {
create_zero_chunk("is missing".to_string())?
} else {
bail!("could not open chunk file - {}", err);
}
}
};
// second chance - we need CRC to detect truncated chunks!
let crc_res = if skip_crc {
Ok(())
} else {
chunk_blob.verify_crc()
};
let (chunk_blob, chunk_digest) = if let Err(crc_err) = crc_res {
if ignore_corrupt_chunks {
create_zero_chunk(format!("is corrupt - {crc_err}"))?
} else {
bail!("Error at chunk {:?} - {crc_err}", chunk_path);
}
} else {
(chunk_blob, chunk_digest)
};
// third chance - decoding might fail (digest, compression, encryption)
let decoded = chunk_blob
.decode(crypt_conf_opt.as_ref(), chunk_digest)
.or_else(|err| {
if ignore_corrupt_chunks {
create_zero_chunk(format!("fails to decode - {err}"))?
.0
.decode(crypt_conf_opt.as_ref(), None)
} else {
bail!("Failed to decode chunk {:?} = {}", chunk_path, err);
}
})?;
output_file.write_all(decoded.as_slice())?;
output_file.write_all(
chunk_blob
.decode(crypt_conf_opt.as_ref(), Some(chunk_digest))?
.as_slice(),
)?;
}
Ok(())

View File

@ -10,8 +10,6 @@ mod dns;
pub use dns::*;
mod network;
pub use network::*;
mod prune;
pub use prune::*;
mod remote;
pub use remote::*;
mod sync;

View File

@ -1,242 +0,0 @@
use std::collections::HashMap;
use anyhow::Error;
use serde::Deserialize;
use serde_json::Value;
use proxmox_router::{cli::*, ApiHandler, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{DataStoreConfig, PruneJobConfig, PruneJobOptions, JOB_ID_SCHEMA};
use pbs_config::prune;
use proxmox_backup::api2;
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// List all prune jobs
fn list_prune_jobs(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::prune::API_METHOD_LIST_PRUNE_JOBS;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("id"))
.column(ColumnConfig::new("disable"))
.column(ColumnConfig::new("store"))
.column(ColumnConfig::new("ns"))
.column(ColumnConfig::new("schedule"))
.column(ColumnConfig::new("max-depth"))
.column(ColumnConfig::new("keep-last"))
.column(ColumnConfig::new("keep-hourly"))
.column(ColumnConfig::new("keep-daily"))
.column(ColumnConfig::new("keep-weekly"))
.column(ColumnConfig::new("keep-monthly"))
.column(ColumnConfig::new("keep-yearly"));
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
Ok(Value::Null)
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Show prune job configuration
fn show_prune_job(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::prune::API_METHOD_READ_PRUNE_JOB;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options();
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn prune_job_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&API_METHOD_LIST_PRUNE_JOBS))
.insert(
"show",
CliCommand::new(&API_METHOD_SHOW_PRUNE_JOB)
.arg_param(&["id"])
.completion_cb("id", pbs_config::prune::complete_prune_job_id),
)
.insert(
"create",
CliCommand::new(&api2::config::prune::API_METHOD_CREATE_PRUNE_JOB)
.arg_param(&["id"])
.completion_cb("id", pbs_config::prune::complete_prune_job_id)
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
.completion_cb("ns", complete_prune_local_datastore_namespace),
)
.insert(
"update",
CliCommand::new(&api2::config::prune::API_METHOD_UPDATE_PRUNE_JOB)
.arg_param(&["id"])
.completion_cb("id", pbs_config::prune::complete_prune_job_id)
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
.completion_cb("ns", complete_prune_local_datastore_namespace),
)
.insert(
"remove",
CliCommand::new(&api2::config::prune::API_METHOD_DELETE_PRUNE_JOB)
.arg_param(&["id"])
.completion_cb("id", pbs_config::prune::complete_prune_job_id),
);
cmd_def.into()
}
// shell completion helper
fn complete_prune_local_datastore_namespace(
_arg: &str,
param: &HashMap<String, String>,
) -> Vec<String> {
let mut list = Vec::new();
let mut rpcenv = CliEnvironment::new();
rpcenv.set_auth_id(Some(String::from("root@pam")));
let mut job: Option<PruneJobConfig> = None;
let store = param.get("store").map(|r| r.to_owned()).or_else(|| {
if let Some(id) = param.get("id") {
job = get_prune_job(id).ok();
if let Some(ref job) = job {
return Some(job.store.clone());
}
}
None
});
if let Some(store) = store {
if let Ok(data) =
crate::api2::admin::namespace::list_namespaces(store, None, None, &mut rpcenv)
{
for item in data {
list.push(item.ns.name());
}
}
}
list
}
fn get_prune_job(id: &str) -> Result<PruneJobConfig, Error> {
let (config, _digest) = prune::config()?;
config.lookup("prune", id)
}
pub(crate) fn update_to_prune_jobs_config() -> Result<(), Error> {
use pbs_config::datastore;
let _prune_lock = prune::lock_config()?;
let _datastore_lock = datastore::lock_config()?;
let (mut data, _digest) = prune::config()?;
let (mut storeconfig, _digest) = datastore::config()?;
for (store, entry) in storeconfig.sections.iter_mut() {
let ty = &entry.0;
if ty != "datastore" {
continue;
}
let mut config = match DataStoreConfig::deserialize(&entry.1) {
Ok(c) => c,
Err(err) => {
eprintln!("failed to parse config of store {store}: {err}");
continue;
}
};
let options = PruneJobOptions {
keep: std::mem::take(&mut config.keep),
..Default::default()
};
let schedule = config.prune_schedule.take();
entry.1 = serde_json::to_value(config)?;
let schedule = match schedule {
Some(s) => s,
None => {
if options.keeps_something() {
eprintln!(
"dropping prune job without schedule from datastore '{store}' in datastore.cfg"
);
} else {
eprintln!("ignoring empty prune job of datastore '{store}' in datastore.cfg");
}
continue;
}
};
let mut id = format!("storeconfig-{store}");
id.truncate(32);
if data.sections.contains_key(&id) {
eprintln!("skipping existing converted prune job for datastore '{store}': {id}");
continue;
}
if !options.keeps_something() {
eprintln!("dropping empty prune job of datastore '{store}' in datastore.cfg");
continue;
}
let prune_config = PruneJobConfig {
id: id.clone(),
store: store.clone(),
disable: false,
comment: None,
schedule,
options,
};
let prune_config = serde_json::to_value(prune_config)?;
data.sections
.insert(id, ("prune".to_string(), prune_config));
eprintln!(
"migrating prune job of datastore '{store}' from datastore.cfg to prune.cfg jobs"
);
}
prune::save_config(&data)?;
datastore::save_config(&storeconfig)?;
Ok(())
}

View File

@ -154,7 +154,7 @@ pub fn complete_acme_plugin(_arg: &str, _param: &HashMap<String, String>) -> Vec
pub fn complete_acme_plugin_type(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
vec![
"dns".to_string(),
//"http".to_string(), // makes currently not really sense to create or the like
//"http".to_string(), // makes currently not realyl sense to create or the like
]
}

View File

@ -60,7 +60,7 @@ pub fn create_configdir() -> Result<(), Error> {
match nix::unistd::mkdir(cfgdir, Mode::from_bits_truncate(0o700)) {
Ok(()) => {}
Err(nix::errno::Errno::EEXIST) => {
Err(nix::Error::Sys(nix::errno::Errno::EEXIST)) => {
check_configdir_permissions()?;
return Ok(());
}

View File

@ -40,7 +40,11 @@ pub fn do_garbage_collection_job(
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", job.jobtype(), err);
eprintln!(
"could not finish job state for {}: {}",
job.jobtype().to_string(),
err
);
}
if let Some(email) = email {

View File

@ -112,17 +112,24 @@ where
pub fn remove_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
let mut path = get_path(jobtype, jobname);
let _lock = get_lock(&path)?;
if let Err(err) = std::fs::remove_file(&path) {
if err.kind() != std::io::ErrorKind::NotFound {
bail!("cannot remove statefile for {jobtype} - {jobname}: {err}");
}
}
std::fs::remove_file(&path).map_err(|err| {
format_err!(
"cannot remove statefile for {} - {}: {}",
jobtype,
jobname,
err
)
})?;
path.set_extension("lck");
if let Err(err) = std::fs::remove_file(&path) {
if err.kind() != std::io::ErrorKind::NotFound {
bail!("cannot remove lockfile for {jobtype} - {jobname}: {err}");
}
}
// ignore errors
let _ = std::fs::remove_file(&path).map_err(|err| {
format_err!(
"cannot remove lockfile for {} - {}: {}",
jobtype,
jobname,
err
)
});
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More