Compare commits
18 Commits
Author | SHA1 | Date | |
---|---|---|---|
0903403ce7 | |||
b6563f48ad | |||
932390bd46 | |||
6b7688aa98 | |||
ab0cf7e6a1 | |||
264779e704 | |||
7f3d91003c | |||
14e0862509 | |||
9e733dae48 | |||
bfea476be2 | |||
385cf2bd9d | |||
d6373f3525 | |||
01f37e01c3 | |||
b4fb262335 | |||
5499bd3dee | |||
d771a608f5 | |||
227a39b34b | |||
f9beae9cc9 |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.9.2"
|
||||
version = "0.9.3"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
|
20
debian/changelog
vendored
20
debian/changelog
vendored
@ -1,4 +1,20 @@
|
||||
rust-proxmox-backup (0.9.2-1) unstable; urgency=medium
|
||||
rust-proxmox-backup (0.9.3-1) unstable; urgency=medium
|
||||
|
||||
* fix #2998: encode mtime as i64 instead of u64
|
||||
|
||||
* GC: log the number of leftover bad chunks we could not yet cleanup, as no
|
||||
valid one replaced them. Also log deduplication factor.
|
||||
|
||||
* send sync job status emails
|
||||
|
||||
* api: datstore status: introduce proper structs and restore compatibility
|
||||
to 0.9.1
|
||||
|
||||
* ui: drop id field from verify/sync add window, they are now seen as internal
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 14:58:13 +0100
|
||||
|
||||
rust-proxmox-backup (0.9.2-2) unstable; urgency=medium
|
||||
|
||||
* rework server web-interface, move more datastore related panels as tabs
|
||||
inside the datastore view
|
||||
@ -76,7 +92,7 @@ rust-proxmox-backup (0.9.2-1) unstable; urgency=medium
|
||||
|
||||
* ui: datastore: show snapshot manifest comment and allow to edit them
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 21:27:02 +0100
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 23:05:41 +0100
|
||||
|
||||
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
|
@ -423,12 +423,18 @@ pub fn list_snapshots (
|
||||
Ok(snapshots)
|
||||
}
|
||||
|
||||
// returns a map from type to (group_count, snapshot_count)
|
||||
fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize)>, Error> {
|
||||
fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> {
|
||||
let base_path = store.base_path();
|
||||
let backup_list = BackupInfo::list_backups(&base_path)?;
|
||||
let mut groups = HashSet::new();
|
||||
let mut result: HashMap<String, (usize, usize)> = HashMap::new();
|
||||
|
||||
let mut result = Counts {
|
||||
ct: None,
|
||||
host: None,
|
||||
vm: None,
|
||||
other: None,
|
||||
};
|
||||
|
||||
for info in backup_list {
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
@ -441,13 +447,23 @@ fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize
|
||||
new_id = true;
|
||||
}
|
||||
|
||||
if let Some(mut counts) = result.get_mut(backup_type) {
|
||||
counts.1 += 1;
|
||||
if new_id {
|
||||
counts.0 +=1;
|
||||
}
|
||||
} else {
|
||||
result.insert(backup_type.to_string(), (1, 1));
|
||||
let mut counts = match backup_type {
|
||||
"ct" => result.ct.take().unwrap_or(Default::default()),
|
||||
"host" => result.host.take().unwrap_or(Default::default()),
|
||||
"vm" => result.vm.take().unwrap_or(Default::default()),
|
||||
_ => result.other.take().unwrap_or(Default::default()),
|
||||
};
|
||||
|
||||
counts.snapshots += 1;
|
||||
if new_id {
|
||||
counts.groups +=1;
|
||||
}
|
||||
|
||||
match backup_type {
|
||||
"ct" => result.ct = Some(counts),
|
||||
"host" => result.host = Some(counts),
|
||||
"vm" => result.vm = Some(counts),
|
||||
_ => result.other = Some(counts),
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,21 +479,7 @@ fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The overall Datastore status and information.",
|
||||
type: Object,
|
||||
properties: {
|
||||
storage: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
counts: {
|
||||
description: "Group and Snapshot counts per Type",
|
||||
type: Object,
|
||||
properties: { },
|
||||
},
|
||||
"gc-status": {
|
||||
type: GarbageCollectionStatus,
|
||||
},
|
||||
},
|
||||
type: DataStoreStatus,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
|
||||
@ -488,19 +490,19 @@ pub fn status(
|
||||
store: String,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
) -> Result<DataStoreStatus, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let storage_status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
let counts = get_snaphots_count(&datastore)?;
|
||||
let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
let counts = get_snapshots_count(&datastore)?;
|
||||
let gc_status = datastore.last_gc_status();
|
||||
|
||||
let res = json!({
|
||||
"storage": storage_status,
|
||||
"counts": counts,
|
||||
"gc-status": gc_status,
|
||||
});
|
||||
|
||||
Ok(res)
|
||||
Ok(DataStoreStatus {
|
||||
total: storage.total,
|
||||
used: storage.used,
|
||||
avail: storage.avail,
|
||||
gc_status,
|
||||
counts,
|
||||
})
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -579,7 +581,6 @@ pub fn verify(
|
||||
move |worker| {
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
let filter = |_backup_info: &BackupInfo| { true };
|
||||
|
||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||
let mut res = Vec::new();
|
||||
@ -590,6 +591,7 @@ pub fn verify(
|
||||
corrupt_chunks,
|
||||
worker.clone(),
|
||||
worker.upid().clone(),
|
||||
None,
|
||||
)? {
|
||||
res.push(backup_dir.to_string());
|
||||
}
|
||||
@ -603,11 +605,11 @@ pub fn verify(
|
||||
None,
|
||||
worker.clone(),
|
||||
worker.upid(),
|
||||
&filter,
|
||||
None,
|
||||
)?;
|
||||
failed_dirs
|
||||
} else {
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid(), &filter)?
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid(), None)?
|
||||
};
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
|
@ -533,6 +533,7 @@ impl BackupEnvironment {
|
||||
corrupt_chunks,
|
||||
worker.clone(),
|
||||
worker.upid().clone(),
|
||||
None,
|
||||
snap_lock,
|
||||
)? {
|
||||
bail!("verification failed - please check the log for details");
|
||||
|
@ -75,6 +75,8 @@ pub fn do_sync_job(
|
||||
let job_id = job.jobname().to_string();
|
||||
let worker_type = job.jobtype().to_string();
|
||||
|
||||
let email = crate::server::lookup_user_email(userid);
|
||||
|
||||
let upid_str = WorkerTask::spawn(
|
||||
&worker_type,
|
||||
Some(job.jobname().to_string()),
|
||||
@ -85,6 +87,7 @@ pub fn do_sync_job(
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
let worker2 = worker.clone();
|
||||
let sync_job2 = sync_job.clone();
|
||||
|
||||
let worker_future = async move {
|
||||
|
||||
@ -107,12 +110,12 @@ pub fn do_sync_job(
|
||||
|
||||
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
|
||||
|
||||
let res = select!{
|
||||
let result = select!{
|
||||
worker = worker_future.fuse() => worker,
|
||||
abort = abort_future => abort,
|
||||
};
|
||||
|
||||
let status = worker2.create_state(&res);
|
||||
let status = worker2.create_state(&result);
|
||||
|
||||
match job.finish(status) {
|
||||
Ok(_) => {},
|
||||
@ -121,7 +124,13 @@ pub fn do_sync_job(
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
if let Some(email) = email {
|
||||
if let Err(err) = crate::server::send_sync_status(&email, &sync_job2, &result) {
|
||||
eprintln!("send sync notification failed: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
|
@ -622,6 +622,71 @@ pub struct StorageStatus {
|
||||
pub avail: u64,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
/// Backup Type group/snapshot counts.
|
||||
pub struct TypeCounts {
|
||||
/// The number of groups of the type.
|
||||
pub groups: u64,
|
||||
/// The number of snapshots of the type.
|
||||
pub snapshots: u64,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
ct: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
host: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
vm: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
other: {
|
||||
type: TypeCounts,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Counts of groups/snapshots per BackupType.
|
||||
pub struct Counts {
|
||||
/// The counts for CT backups
|
||||
pub ct: Option<TypeCounts>,
|
||||
/// The counts for Host backups
|
||||
pub host: Option<TypeCounts>,
|
||||
/// The counts for VM backups
|
||||
pub vm: Option<TypeCounts>,
|
||||
/// The counts for other backup types
|
||||
pub other: Option<TypeCounts>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"gc-status": { type: GarbageCollectionStatus, },
|
||||
counts: { type: Counts, }
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Overall Datastore status and useful information.
|
||||
pub struct DataStoreStatus {
|
||||
/// Total space (bytes).
|
||||
pub total: u64,
|
||||
/// Used space (bytes).
|
||||
pub used: u64,
|
||||
/// Available space (bytes).
|
||||
pub avail: u64,
|
||||
/// Status of last GC
|
||||
pub gc_status: GarbageCollectionStatus,
|
||||
/// Group/Snapshot counts
|
||||
pub counts: Counts,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: { schema: UPID_SCHEMA },
|
||||
|
@ -78,7 +78,7 @@ pub struct DirEntry {
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum DirEntryAttribute {
|
||||
Directory { start: u64 },
|
||||
File { size: u64, mtime: u64 },
|
||||
File { size: u64, mtime: i64 },
|
||||
Symlink,
|
||||
Hardlink,
|
||||
BlockDevice,
|
||||
@ -89,7 +89,7 @@ pub enum DirEntryAttribute {
|
||||
|
||||
impl DirEntry {
|
||||
|
||||
fn new(etype: CatalogEntryType, name: Vec<u8>, start: u64, size: u64, mtime:u64) -> Self {
|
||||
fn new(etype: CatalogEntryType, name: Vec<u8>, start: u64, size: u64, mtime: i64) -> Self {
|
||||
match etype {
|
||||
CatalogEntryType::Directory => {
|
||||
DirEntry { name, attr: DirEntryAttribute::Directory { start } }
|
||||
@ -184,7 +184,7 @@ impl DirInfo {
|
||||
catalog_encode_u64(writer, name.len() as u64)?;
|
||||
writer.write_all(name)?;
|
||||
catalog_encode_u64(writer, *size)?;
|
||||
catalog_encode_u64(writer, *mtime)?;
|
||||
catalog_encode_i64(writer, *mtime)?;
|
||||
}
|
||||
DirEntry { name, attr: DirEntryAttribute::Symlink } => {
|
||||
writer.write_all(&[CatalogEntryType::Symlink as u8])?;
|
||||
@ -234,7 +234,7 @@ impl DirInfo {
|
||||
Ok((self.name, data))
|
||||
}
|
||||
|
||||
fn parse<C: FnMut(CatalogEntryType, &[u8], u64, u64, u64) -> Result<bool, Error>>(
|
||||
fn parse<C: FnMut(CatalogEntryType, &[u8], u64, u64, i64) -> Result<bool, Error>>(
|
||||
data: &[u8],
|
||||
mut callback: C,
|
||||
) -> Result<(), Error> {
|
||||
@ -265,7 +265,7 @@ impl DirInfo {
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let size = catalog_decode_u64(&mut cursor)?;
|
||||
let mtime = catalog_decode_u64(&mut cursor)?;
|
||||
let mtime = catalog_decode_i64(&mut cursor)?;
|
||||
callback(etype, name, 0, size, mtime)?
|
||||
}
|
||||
_ => {
|
||||
@ -362,7 +362,7 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: u64) -> Result<(), Error> {
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: i64) -> Result<(), Error> {
|
||||
let dir = self.dirstack.last_mut().ok_or_else(|| format_err!("outside root"))?;
|
||||
let name = name.to_bytes().to_vec();
|
||||
dir.entries.push(DirEntry { name, attr: DirEntryAttribute::File { size, mtime } });
|
||||
@ -587,14 +587,77 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize i64 as short, variable length byte sequence
|
||||
///
|
||||
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
|
||||
/// If the value is negative, we end with a zero byte (0x00).
|
||||
pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error> {
|
||||
let mut enc = Vec::new();
|
||||
|
||||
let mut d = if v < 0 {
|
||||
(-1 * (v + 1)) as u64 + 1 // also handles i64::MIN
|
||||
} else {
|
||||
v as u64
|
||||
};
|
||||
|
||||
loop {
|
||||
if d < 128 {
|
||||
if v < 0 {
|
||||
enc.push(128 | d as u8);
|
||||
enc.push(0u8);
|
||||
} else {
|
||||
enc.push(d as u8);
|
||||
}
|
||||
break;
|
||||
}
|
||||
enc.push((128 | (d & 127)) as u8);
|
||||
d = d >> 7;
|
||||
}
|
||||
writer.write_all(&enc)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deserialize i64 from variable length byte sequence
|
||||
///
|
||||
/// We currently read maximal 11 bytes, which give a maximum of 70 bits + sign.
|
||||
/// this method is compatible with catalog_encode_u64 iff the
|
||||
/// value encoded is <= 2^63 (values > 2^63 cannot be represented in an i64)
|
||||
pub fn catalog_decode_i64<R: Read>(reader: &mut R) -> Result<i64, Error> {
|
||||
|
||||
let mut v: u64 = 0;
|
||||
let mut buf = [0u8];
|
||||
|
||||
for i in 0..11 { // only allow 11 bytes (70 bits + sign marker)
|
||||
if buf.is_empty() {
|
||||
bail!("decode_i64 failed - unexpected EOB");
|
||||
}
|
||||
reader.read_exact(&mut buf)?;
|
||||
|
||||
let t = buf[0];
|
||||
|
||||
if t == 0 {
|
||||
if v == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
return Ok(((v - 1) as i64 * -1) - 1); // also handles i64::MIN
|
||||
} else if t < 128 {
|
||||
v |= (t as u64) << (i*7);
|
||||
return Ok(v as i64);
|
||||
} else {
|
||||
v |= ((t & 127) as u64) << (i*7);
|
||||
}
|
||||
}
|
||||
|
||||
bail!("decode_i64 failed - missing end marker");
|
||||
}
|
||||
|
||||
/// Serialize u64 as short, variable length byte sequence
|
||||
///
|
||||
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
|
||||
/// We limit values to a maximum of 2^63.
|
||||
pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error> {
|
||||
let mut enc = Vec::new();
|
||||
|
||||
if (v & (1<<63)) != 0 { bail!("catalog_encode_u64 failed - value >= 2^63"); }
|
||||
let mut d = v;
|
||||
loop {
|
||||
if d < 128 {
|
||||
@ -611,13 +674,14 @@ pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error>
|
||||
|
||||
/// Deserialize u64 from variable length byte sequence
|
||||
///
|
||||
/// We currently read maximal 9 bytes, which give a maximum of 63 bits.
|
||||
/// We currently read maximal 10 bytes, which give a maximum of 70 bits,
|
||||
/// but we currently only encode up to 64 bits
|
||||
pub fn catalog_decode_u64<R: Read>(reader: &mut R) -> Result<u64, Error> {
|
||||
|
||||
let mut v: u64 = 0;
|
||||
let mut buf = [0u8];
|
||||
|
||||
for i in 0..9 { // only allow 9 bytes (63 bits)
|
||||
for i in 0..10 { // only allow 10 bytes (70 bits)
|
||||
if buf.is_empty() {
|
||||
bail!("decode_u64 failed - unexpected EOB");
|
||||
}
|
||||
@ -652,9 +716,58 @@ fn test_catalog_u64_encoder() {
|
||||
assert!(decoded == value);
|
||||
}
|
||||
|
||||
test_encode_decode(u64::MIN);
|
||||
test_encode_decode(126);
|
||||
test_encode_decode((1<<12)-1);
|
||||
test_encode_decode((1<<20)-1);
|
||||
test_encode_decode((1<<50)-1);
|
||||
test_encode_decode((1<<63)-1);
|
||||
test_encode_decode(u64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_catalog_i64_encoder() {
|
||||
|
||||
fn test_encode_decode(value: i64) {
|
||||
|
||||
let mut data = Vec::new();
|
||||
catalog_encode_i64(&mut data, value).unwrap();
|
||||
|
||||
let slice = &mut &data[..];
|
||||
let decoded = catalog_decode_i64(slice).unwrap();
|
||||
|
||||
assert!(decoded == value);
|
||||
}
|
||||
|
||||
test_encode_decode(0);
|
||||
test_encode_decode(-0);
|
||||
test_encode_decode(126);
|
||||
test_encode_decode(-126);
|
||||
test_encode_decode((1<<12)-1);
|
||||
test_encode_decode(-(1<<12)-1);
|
||||
test_encode_decode((1<<20)-1);
|
||||
test_encode_decode(-(1<<20)-1);
|
||||
test_encode_decode(i64::MIN);
|
||||
test_encode_decode(i64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_catalog_i64_compatibility() {
|
||||
|
||||
fn test_encode_decode(value: u64) {
|
||||
|
||||
let mut data = Vec::new();
|
||||
catalog_encode_u64(&mut data, value).unwrap();
|
||||
|
||||
let slice = &mut &data[..];
|
||||
let decoded = catalog_decode_i64(slice).unwrap() as u64;
|
||||
|
||||
assert!(decoded == value);
|
||||
}
|
||||
|
||||
test_encode_decode(u64::MIN);
|
||||
test_encode_decode(126);
|
||||
test_encode_decode((1<<12)-1);
|
||||
test_encode_decode((1<<20)-1);
|
||||
test_encode_decode((1<<50)-1);
|
||||
test_encode_decode(u64::MAX);
|
||||
}
|
||||
|
@ -559,7 +559,11 @@ impl DataStore {
|
||||
);
|
||||
}
|
||||
if gc_status.removed_bad > 0 {
|
||||
crate::task_log!(worker, "Removed bad files: {}", gc_status.removed_bad);
|
||||
crate::task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
|
||||
}
|
||||
|
||||
if gc_status.still_bad > 0 {
|
||||
crate::task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
|
||||
}
|
||||
|
||||
crate::task_log!(
|
||||
@ -580,6 +584,14 @@ impl DataStore {
|
||||
|
||||
crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
|
||||
|
||||
let deduplication_factor = if gc_status.disk_bytes > 0 {
|
||||
(gc_status.index_data_bytes as f64)/(gc_status.disk_bytes as f64)
|
||||
} else {
|
||||
1.0
|
||||
};
|
||||
|
||||
crate::task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
|
||||
|
||||
if gc_status.disk_chunks > 0 {
|
||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||
|
@ -14,6 +14,7 @@ use crate::{
|
||||
BackupGroup,
|
||||
BackupDir,
|
||||
BackupInfo,
|
||||
BackupManifest,
|
||||
IndexFile,
|
||||
CryptMode,
|
||||
FileInfo,
|
||||
@ -284,6 +285,7 @@ pub fn verify_backup_dir(
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: UPID,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<bool, Error> {
|
||||
let snap_lock = lock_dir_noblock_shared(
|
||||
&datastore.snapshot_path(&backup_dir),
|
||||
@ -297,6 +299,7 @@ pub fn verify_backup_dir(
|
||||
corrupt_chunks,
|
||||
worker,
|
||||
upid,
|
||||
filter,
|
||||
snap_lock
|
||||
),
|
||||
Err(err) => {
|
||||
@ -320,6 +323,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: UPID,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
_snap_lock: Dir,
|
||||
) -> Result<bool, Error> {
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
@ -336,6 +340,18 @@ pub fn verify_backup_dir_with_lock(
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(filter) = filter {
|
||||
if filter(&manifest) == false {
|
||||
task_log!(
|
||||
worker,
|
||||
"SKIPPED: verify {}:{} (recently verified)",
|
||||
datastore.name(),
|
||||
backup_dir,
|
||||
);
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
task_log!(worker, "verify {}:{}", datastore.name(), backup_dir);
|
||||
|
||||
let mut error_count = 0;
|
||||
@ -412,7 +428,7 @@ pub fn verify_backup_group(
|
||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: &UPID,
|
||||
filter: &dyn Fn(&BackupInfo) -> bool,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<(usize, Vec<String>), Error> {
|
||||
|
||||
let mut errors = Vec::new();
|
||||
@ -439,16 +455,6 @@ pub fn verify_backup_group(
|
||||
for info in list {
|
||||
count += 1;
|
||||
|
||||
if filter(&info) == false {
|
||||
task_log!(
|
||||
worker,
|
||||
"SKIPPED: verify {}:{} (recently verified)",
|
||||
datastore.name(),
|
||||
info.backup_dir,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
if !verify_backup_dir(
|
||||
datastore.clone(),
|
||||
&info.backup_dir,
|
||||
@ -456,6 +462,7 @@ pub fn verify_backup_group(
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
upid.clone(),
|
||||
filter,
|
||||
)? {
|
||||
errors.push(info.backup_dir.to_string());
|
||||
}
|
||||
@ -486,7 +493,7 @@ pub fn verify_all_backups(
|
||||
datastore: Arc<DataStore>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: &UPID,
|
||||
filter: &dyn Fn(&BackupInfo) -> bool,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
|
@ -1657,7 +1657,10 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
)]
|
||||
/// Get repository status.
|
||||
async fn status(param: Value) -> Result<Value, Error> {
|
||||
@ -1690,7 +1693,7 @@ async fn status(param: Value) -> Result<Value, Error> {
|
||||
.column(ColumnConfig::new("used").renderer(render_total_percentage))
|
||||
.column(ColumnConfig::new("avail").renderer(render_total_percentage));
|
||||
|
||||
let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
|
||||
let schema = &API_RETURN_SCHEMA_STATUS;
|
||||
|
||||
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
||||
|
||||
|
@ -579,9 +579,9 @@ async fn schedule_datastore_sync_jobs() {
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let userid = Userid::backup_userid().clone();
|
||||
let userid = Userid::backup_userid();
|
||||
|
||||
if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
|
||||
if let Err(err) = do_sync_job(job, job_config, userid, Some(event_str)) {
|
||||
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ lazy_static! {
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
#[derive(Serialize,Deserialize,Clone)]
|
||||
/// Sync Job
|
||||
pub struct SyncJobConfig {
|
||||
pub id: String,
|
||||
|
@ -9,7 +9,7 @@ use std::ffi::CStr;
|
||||
pub trait BackupCatalogWriter {
|
||||
fn start_directory(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn end_directory(&mut self) -> Result<(), Error>;
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: u64) -> Result<(), Error>;
|
||||
fn add_file(&mut self, name: &CStr, size: u64, mtime: i64) -> Result<(), Error>;
|
||||
fn add_symlink(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn add_hardlink(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn add_block_device(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
|
@ -535,7 +535,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
|
||||
let file_size = stat.st_size as u64;
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_file(c_file_name, file_size, stat.st_mtime as u64)?;
|
||||
catalog.add_file(c_file_name, file_size, stat.st_mtime)?;
|
||||
}
|
||||
|
||||
let offset: LinkOffset =
|
||||
|
@ -7,6 +7,7 @@ use proxmox::tools::email::sendmail;
|
||||
|
||||
use crate::{
|
||||
config::verify::VerificationJobConfig,
|
||||
config::sync::SyncJobConfig,
|
||||
api2::types::{
|
||||
Userid,
|
||||
GarbageCollectionStatus,
|
||||
@ -16,19 +17,22 @@ use crate::{
|
||||
|
||||
const GC_OK_TEMPLATE: &str = r###"
|
||||
|
||||
Datastore: {{datastore}}
|
||||
Task ID: {{status.upid}}
|
||||
Index file count: {{status.index-file-count}}
|
||||
Datastore: {{datastore}}
|
||||
Task ID: {{status.upid}}
|
||||
Index file count: {{status.index-file-count}}
|
||||
|
||||
Removed garbage: {{human-bytes status.removed-bytes}}
|
||||
Removed chunks: {{status.removed-chunks}}
|
||||
Remove bad files: {{status.removed-bad}}
|
||||
Removed garbage: {{human-bytes status.removed-bytes}}
|
||||
Removed chunks: {{status.removed-chunks}}
|
||||
Remove bad files: {{status.removed-bad}}
|
||||
|
||||
Pending removals: {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks)
|
||||
Bad files: {{status.still-bad}}
|
||||
Pending removals: {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks)
|
||||
|
||||
Original Data usage: {{human-bytes status.index-data-bytes}}
|
||||
On Disk usage: {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}})
|
||||
On Disk chunks: {{status.disk-chunks}}
|
||||
Original Data usage: {{human-bytes status.index-data-bytes}}
|
||||
On Disk usage: {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}})
|
||||
On Disk chunks: {{status.disk-chunks}}
|
||||
|
||||
Deduplication Factor: {{deduplication-factor}}
|
||||
|
||||
Garbage collection successful.
|
||||
|
||||
@ -65,6 +69,28 @@ Verification failed on these snapshots:
|
||||
|
||||
"###;
|
||||
|
||||
const SYNC_OK_TEMPLATE: &str = r###"
|
||||
|
||||
Job ID: {{job.id}}
|
||||
Datastore: {{job.store}}
|
||||
Remote: {{job.remote}}
|
||||
Remote Store: {{job.remote-store}}
|
||||
|
||||
Synchronization successful.
|
||||
|
||||
"###;
|
||||
|
||||
const SYNC_ERR_TEMPLATE: &str = r###"
|
||||
|
||||
Job ID: {{job.id}}
|
||||
Datastore: {{job.store}}
|
||||
Remote: {{job.remote}}
|
||||
Remote Store: {{job.remote-store}}
|
||||
|
||||
Synchronization failed: {{error}}
|
||||
|
||||
"###;
|
||||
|
||||
lazy_static::lazy_static!{
|
||||
|
||||
static ref HANDLEBARS: Handlebars<'static> = {
|
||||
@ -81,6 +107,9 @@ lazy_static::lazy_static!{
|
||||
hb.register_template_string("verify_ok_template", VERIFY_OK_TEMPLATE).unwrap();
|
||||
hb.register_template_string("verify_err_template", VERIFY_ERR_TEMPLATE).unwrap();
|
||||
|
||||
hb.register_template_string("sync_ok_template", SYNC_OK_TEMPLATE).unwrap();
|
||||
hb.register_template_string("sync_err_template", SYNC_ERR_TEMPLATE).unwrap();
|
||||
|
||||
hb
|
||||
};
|
||||
}
|
||||
@ -93,7 +122,7 @@ fn send_job_status_mail(
|
||||
|
||||
// Note: OX has serious problems displaying text mails,
|
||||
// so we include html as well
|
||||
let html = format!("<html><body><pre>\n{}\n<pre>", text);
|
||||
let html = format!("<html><body><pre>\n{}\n<pre>", handlebars::html_escape(text));
|
||||
|
||||
let nodename = proxmox::tools::nodename();
|
||||
|
||||
@ -120,10 +149,18 @@ pub fn send_gc_status(
|
||||
|
||||
let text = match result {
|
||||
Ok(()) => {
|
||||
let deduplication_factor = if status.disk_bytes > 0 {
|
||||
(status.index_data_bytes as f64)/(status.disk_bytes as f64)
|
||||
} else {
|
||||
1.0
|
||||
};
|
||||
|
||||
let data = json!({
|
||||
"status": status,
|
||||
"datastore": datastore,
|
||||
"deduplication-factor": format!("{:.2}", deduplication_factor),
|
||||
});
|
||||
|
||||
HANDLEBARS.render("gc_ok_template", &data)?
|
||||
}
|
||||
Err(err) => {
|
||||
@ -189,6 +226,41 @@ pub fn send_verify_status(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_sync_status(
|
||||
email: &str,
|
||||
job: &SyncJobConfig,
|
||||
result: &Result<(), Error>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let text = match result {
|
||||
Ok(()) => {
|
||||
let data = json!({ "job": job });
|
||||
HANDLEBARS.render("sync_ok_template", &data)?
|
||||
}
|
||||
Err(err) => {
|
||||
let data = json!({ "job": job, "error": err.to_string() });
|
||||
HANDLEBARS.render("sync_err_template", &data)?
|
||||
}
|
||||
};
|
||||
|
||||
let subject = match result {
|
||||
Ok(()) => format!(
|
||||
"Sync remote '{}' datastore '{}' successful",
|
||||
job.remote,
|
||||
job.remote_store,
|
||||
),
|
||||
Err(_) => format!(
|
||||
"Sync remote '{}' datastore '{}' failed",
|
||||
job.remote,
|
||||
job.remote_store,
|
||||
),
|
||||
};
|
||||
|
||||
send_job_status_mail(email, &subject, &text)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lookup users email address
|
||||
///
|
||||
/// For "backup@pam", this returns the address from "root@pam".
|
||||
|
@ -7,7 +7,7 @@ use crate::{
|
||||
config::verify::VerificationJobConfig,
|
||||
backup::{
|
||||
DataStore,
|
||||
BackupInfo,
|
||||
BackupManifest,
|
||||
verify_all_backups,
|
||||
},
|
||||
task_log,
|
||||
@ -23,19 +23,13 @@ pub fn do_verification_job(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&verification_job.store)?;
|
||||
|
||||
let datastore2 = datastore.clone();
|
||||
|
||||
let outdated_after = verification_job.outdated_after.clone();
|
||||
let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
|
||||
|
||||
let filter = move |backup_info: &BackupInfo| {
|
||||
let filter = move |manifest: &BackupManifest| {
|
||||
if !ignore_verified_snapshots {
|
||||
return true;
|
||||
}
|
||||
let manifest = match datastore2.load_manifest(&backup_info.backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(_) => return true, // include, so task picks this up as error
|
||||
};
|
||||
|
||||
let raw_verify_state = manifest.unprotected["verify_state"].clone();
|
||||
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
|
||||
@ -71,7 +65,7 @@ pub fn do_verification_job(
|
||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||
}
|
||||
|
||||
let result = verify_all_backups(datastore, worker.clone(), worker.upid(), &filter);
|
||||
let result = verify_all_backups(datastore, worker.clone(), worker.upid(), Some(&filter));
|
||||
let job_result = match result {
|
||||
Ok(ref errors) if errors.is_empty() => Ok(()),
|
||||
Ok(_) => Err(format_err!("verification failed - please check the log for details")),
|
||||
|
@ -35,8 +35,6 @@ pub const PROXMOX_BACKUP_ACTIVE_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_
|
||||
pub const PROXMOX_BACKUP_INDEX_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/index");
|
||||
pub const PROXMOX_BACKUP_ARCHIVE_TASK_FN: &str = concat!(PROXMOX_BACKUP_TASK_DIR_M!(), "/archive");
|
||||
|
||||
const MAX_INDEX_TASKS: usize = 1000;
|
||||
|
||||
lazy_static! {
|
||||
static ref WORKER_TASK_LIST: Mutex<HashMap<usize, Arc<WorkerTask>>> = Mutex::new(HashMap::new());
|
||||
|
||||
@ -363,7 +361,10 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
|
||||
let lock = lock_task_list_files(true)?;
|
||||
|
||||
// TODO remove with 1.x
|
||||
let mut finish_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN)?;
|
||||
let had_index_file = !finish_list.is_empty();
|
||||
|
||||
let mut active_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?
|
||||
.into_iter()
|
||||
.filter_map(|info| {
|
||||
@ -374,7 +375,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
if !worker_is_active_local(&info.upid) {
|
||||
println!("Detected stopped UPID {}", &info.upid_str);
|
||||
println!("Detected stopped task '{}'", &info.upid_str);
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let status = upid_read_status(&info.upid)
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
||||
@ -412,33 +413,10 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
let start = if finish_list.len() > MAX_INDEX_TASKS {
|
||||
finish_list.len() - MAX_INDEX_TASKS
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let end = (start+MAX_INDEX_TASKS).min(finish_list.len());
|
||||
|
||||
let index_raw = if end > start {
|
||||
render_task_list(&finish_list[start..end])
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
|
||||
replace_file(
|
||||
PROXMOX_BACKUP_INDEX_TASK_FN,
|
||||
index_raw.as_bytes(),
|
||||
CreateOptions::new()
|
||||
.owner(backup_user.uid)
|
||||
.group(backup_user.gid),
|
||||
)?;
|
||||
|
||||
if !finish_list.is_empty() && start > 0 {
|
||||
if !finish_list.is_empty() {
|
||||
match std::fs::OpenOptions::new().append(true).create(true).open(PROXMOX_BACKUP_ARCHIVE_TASK_FN) {
|
||||
Ok(mut writer) => {
|
||||
for info in &finish_list[0..start] {
|
||||
for info in &finish_list {
|
||||
writer.write_all(render_task_line(&info).as_bytes())?;
|
||||
}
|
||||
},
|
||||
@ -448,6 +426,12 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
nix::unistd::chown(PROXMOX_BACKUP_ARCHIVE_TASK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
}
|
||||
|
||||
// TODO Remove with 1.x
|
||||
// for compatibility, if we had an INDEX file, we do not need it anymore
|
||||
if had_index_file {
|
||||
let _ = nix::unistd::unlink(PROXMOX_BACKUP_INDEX_TASK_FN);
|
||||
}
|
||||
|
||||
drop(lock);
|
||||
|
||||
Ok(())
|
||||
@ -511,16 +495,9 @@ where
|
||||
read_task_file(file)
|
||||
}
|
||||
|
||||
enum TaskFile {
|
||||
Active,
|
||||
Index,
|
||||
Archive,
|
||||
End,
|
||||
}
|
||||
|
||||
pub struct TaskListInfoIterator {
|
||||
list: VecDeque<TaskListInfo>,
|
||||
file: TaskFile,
|
||||
end: bool,
|
||||
archive: Option<LogRotateFiles>,
|
||||
lock: Option<File>,
|
||||
}
|
||||
@ -535,7 +512,10 @@ impl TaskListInfoIterator {
|
||||
.iter()
|
||||
.any(|info| info.state.is_some() || !worker_is_active_local(&info.upid));
|
||||
|
||||
if needs_update {
|
||||
// TODO remove with 1.x
|
||||
let index_exists = std::path::Path::new(PROXMOX_BACKUP_INDEX_TASK_FN).is_file();
|
||||
|
||||
if needs_update || index_exists {
|
||||
drop(lock);
|
||||
update_active_workers(None)?;
|
||||
let lock = lock_task_list_files(false)?;
|
||||
@ -554,12 +534,11 @@ impl TaskListInfoIterator {
|
||||
Some(logrotate.files())
|
||||
};
|
||||
|
||||
let file = if active_only { TaskFile::End } else { TaskFile::Active };
|
||||
let lock = if active_only { None } else { Some(read_lock) };
|
||||
|
||||
Ok(Self {
|
||||
list: active_list.into(),
|
||||
file,
|
||||
end: active_only,
|
||||
archive,
|
||||
lock,
|
||||
})
|
||||
@ -573,35 +552,23 @@ impl Iterator for TaskListInfoIterator {
|
||||
loop {
|
||||
if let Some(element) = self.list.pop_back() {
|
||||
return Some(Ok(element));
|
||||
} else if self.end {
|
||||
return None;
|
||||
} else {
|
||||
match self.file {
|
||||
TaskFile::Active => {
|
||||
let index = match read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN) {
|
||||
Ok(index) => index,
|
||||
if let Some(mut archive) = self.archive.take() {
|
||||
if let Some(file) = archive.next() {
|
||||
let list = match read_task_file(file) {
|
||||
Ok(list) => list,
|
||||
Err(err) => return Some(Err(err)),
|
||||
};
|
||||
self.list.append(&mut index.into());
|
||||
self.file = TaskFile::Index;
|
||||
},
|
||||
TaskFile::Index | TaskFile::Archive => {
|
||||
if let Some(mut archive) = self.archive.take() {
|
||||
if let Some(file) = archive.next() {
|
||||
let list = match read_task_file(file) {
|
||||
Ok(list) => list,
|
||||
Err(err) => return Some(Err(err)),
|
||||
};
|
||||
self.list.append(&mut list.into());
|
||||
self.archive = Some(archive);
|
||||
self.file = TaskFile::Archive;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
self.file = TaskFile::End;
|
||||
self.lock.take();
|
||||
return None;
|
||||
self.list.append(&mut list.into());
|
||||
self.archive = Some(archive);
|
||||
continue;
|
||||
}
|
||||
TaskFile::End => return None,
|
||||
}
|
||||
|
||||
self.end = true;
|
||||
self.lock.take();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -652,7 +619,7 @@ impl WorkerTask {
|
||||
|
||||
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
|
||||
|
||||
path.push(format!("{:02X}", upid.pstart % 256));
|
||||
path.push(format!("{:02X}", upid.pstart & 255));
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
|
||||
@ -660,8 +627,6 @@ impl WorkerTask {
|
||||
|
||||
path.push(upid.to_string());
|
||||
|
||||
println!("FILE: {:?}", path);
|
||||
|
||||
let logger_options = FileLogOptions {
|
||||
to_stdout: to_stdout,
|
||||
exclusive: true,
|
||||
|
@ -542,7 +542,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
v = '';
|
||||
}
|
||||
v = Ext.String.htmlEncode(v);
|
||||
let icon = 'fa fa-fw fa-pencil';
|
||||
let icon = 'fa fa-fw fa-pencil pointer';
|
||||
|
||||
return `<span class="snapshot-comment-column">${v}</span>
|
||||
<i data-qtip="${gettext('Edit')}" style="float: right;" class="${icon}"></i>`;
|
||||
|
@ -48,21 +48,23 @@ Ext.define('PBS.DataStoreInfo', {
|
||||
let vm = me.getViewModel();
|
||||
|
||||
let counts = store.getById('counts').data.value;
|
||||
let storage = store.getById('storage').data.value;
|
||||
let total = store.getById('total').data.value;
|
||||
let used = store.getById('used').data.value;
|
||||
|
||||
let used = Proxmox.Utils.format_size(storage.used);
|
||||
let total = Proxmox.Utils.format_size(storage.total);
|
||||
let percent = 100*storage.used/storage.total;
|
||||
if (storage.total === 0) {
|
||||
let percent = 100*used/total;
|
||||
if (total === 0) {
|
||||
percent = 0;
|
||||
}
|
||||
let used_percent = `${percent.toFixed(2)}%`;
|
||||
|
||||
let usage = used_percent + ' (' +
|
||||
Ext.String.format(gettext('{0} of {1}'),
|
||||
used, total) + ')';
|
||||
Ext.String.format(
|
||||
gettext('{0} of {1}'),
|
||||
Proxmox.Utils.format_size(used),
|
||||
Proxmox.Utils.format_size(total),
|
||||
) + ')';
|
||||
vm.set('usagetext', usage);
|
||||
vm.set('usage', storage.used/storage.total);
|
||||
vm.set('usage', used/total);
|
||||
|
||||
let gcstatus = store.getById('gc-status').data.value;
|
||||
|
||||
@ -70,12 +72,12 @@ Ext.define('PBS.DataStoreInfo', {
|
||||
(gcstatus['disk-bytes'] || Infinity);
|
||||
|
||||
let countstext = function(count) {
|
||||
return `${count[0]} ${gettext('Groups')}, ${count[1]} ${gettext('Snapshots')}`;
|
||||
return `${count.groups || 0} ${gettext('Groups')}, ${count.snapshots || 0} ${gettext('Snapshots')}`;
|
||||
};
|
||||
|
||||
vm.set('ctcount', countstext(counts.ct || [0, 0]));
|
||||
vm.set('vmcount', countstext(counts.vm || [0, 0]));
|
||||
vm.set('hostcount', countstext(counts.host || [0, 0]));
|
||||
vm.set('ctcount', countstext(counts.ct));
|
||||
vm.set('vmcount', countstext(counts.vm));
|
||||
vm.set('hostcount', countstext(counts.host));
|
||||
vm.set('deduplication', dedup.toFixed(2));
|
||||
vm.set('stillbad', gcstatus['still-bad']);
|
||||
vm.set('removedbytes', Proxmox.Utils.format_size(gcstatus['removed-bytes']));
|
||||
|
@ -199,6 +199,7 @@ Ext.define('PBS.config.SyncJobView', {
|
||||
{
|
||||
xtype: 'proxmoxStdRemoveButton',
|
||||
baseurl: '/config/sync/',
|
||||
confirmMsg: gettext('Remove entry?'),
|
||||
callback: 'reload',
|
||||
},
|
||||
'-',
|
||||
|
@ -199,6 +199,7 @@ Ext.define('PBS.config.VerifyJobView', {
|
||||
{
|
||||
xtype: 'proxmoxStdRemoveButton',
|
||||
baseurl: '/config/verify/',
|
||||
confirmMsg: gettext('Remove entry?'),
|
||||
callback: 'reload',
|
||||
},
|
||||
'-',
|
||||
|
@ -60,20 +60,6 @@ Ext.define('PBS.window.SyncJobEdit', {
|
||||
name: 'remote-store',
|
||||
},
|
||||
],
|
||||
advancedColumn1: [
|
||||
{
|
||||
xtype: 'pmxDisplayEditField',
|
||||
name: 'id',
|
||||
fieldLabel: gettext('Sync Job ID'),
|
||||
emptyText: gettext('Automatic'),
|
||||
renderer: Ext.htmlEncode,
|
||||
allowBlank: true,
|
||||
minLength: 4,
|
||||
cbind: {
|
||||
editable: '{isCreate}',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
column2: [
|
||||
{
|
||||
|
@ -65,20 +65,6 @@ Ext.define('PBS.window.VerifyJobEdit', {
|
||||
},
|
||||
},
|
||||
],
|
||||
advancedColumn1: [
|
||||
{
|
||||
xtype: 'pmxDisplayEditField',
|
||||
name: 'id',
|
||||
fieldLabel: gettext('Verify Job ID'),
|
||||
emptyText: gettext('Automatic'),
|
||||
renderer: Ext.htmlEncode,
|
||||
allowBlank: true,
|
||||
minLength: 4,
|
||||
cbind: {
|
||||
editable: '{isCreate}',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
column2: [
|
||||
{
|
||||
|
Reference in New Issue
Block a user