Compare commits
47 Commits
Author | SHA1 | Date | |
---|---|---|---|
43ba913977 | |||
a720894ff0 | |||
a95a3fb893 | |||
620911b426 | |||
5c264c8d80 | |||
8d78589969 | |||
eed8a5ad79 | |||
538b9c1c27 | |||
55919bf141 | |||
456ad0c478 | |||
c76c7f8303 | |||
c48aa39f3b | |||
2d32fe2c04 | |||
dc155e9bd7 | |||
4e14781aec | |||
a595f0fee0 | |||
add5861e8d | |||
1610c45a86 | |||
b2387eaa45 | |||
96d65fbcd0 | |||
7cc3473a4e | |||
4856a21836 | |||
a0153b02c9 | |||
04b0ca8b59 | |||
86e432b0b8 | |||
f0ed6a218c | |||
709584719d | |||
d43f86f3f3 | |||
997d7e19fc | |||
c67b1fa72f | |||
268687ddf0 | |||
426c1e353b | |||
2888b27f4c | |||
f5d00373f3 | |||
934f5bb8ac | |||
9857472211 | |||
013fa7bbcb | |||
a8d7033cb2 | |||
04ad7bc436 | |||
77ebbefc1a | |||
750252ba2f | |||
dc58194ebe | |||
c6887a8a4d | |||
090decbe76 | |||
c32186595e | |||
947f45252d | |||
c94e1f655e |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.2.1"
|
version = "0.2.3"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
|
27
debian/changelog
vendored
27
debian/changelog
vendored
@ -1,3 +1,30 @@
|
|||||||
|
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tools/systemd/time: fix compute_next_event for weekdays
|
||||||
|
|
||||||
|
* improve display of 'next run' for sync jobs
|
||||||
|
|
||||||
|
* fix csum calculation for images which do not have a 'chunk_size' aligned
|
||||||
|
size
|
||||||
|
|
||||||
|
* add parser for zpool list output
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Jun 2020 10:39:06 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* proxmox-backup-client.rs: implement quiet flag
|
||||||
|
|
||||||
|
* client restore: don't add server file ending if already specified
|
||||||
|
|
||||||
|
* src/client/pull.rs: also download client.log.blob
|
||||||
|
|
||||||
|
* src/client/pull.rs: more verbose logging
|
||||||
|
|
||||||
|
* gui improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jun 2020 10:37:12 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.2.1-1) unstable; urgency=medium
|
rust-proxmox-backup (0.2.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
* ui: move server RRD statistics to 'Server Status' panel
|
* ui: move server RRD statistics to 'Server Status' panel
|
||||||
|
@ -2,9 +2,11 @@ use proxmox::api::router::{Router, SubdirMap};
|
|||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
|
pub mod sync;
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("datastore", &datastore::ROUTER)
|
("datastore", &datastore::ROUTER),
|
||||||
|
("sync", &sync::ROUTER)
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
@ -44,7 +44,7 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
|
|||||||
|
|
||||||
let mut path = store.base_path();
|
let mut path = store.base_path();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
path.push("index.json.blob");
|
path.push(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
let raw_data = file_get_contents(&path)?;
|
let raw_data = file_get_contents(&path)?;
|
||||||
let index_size = raw_data.len() as u64;
|
let index_size = raw_data.len() as u64;
|
||||||
@ -61,7 +61,7 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
|
|||||||
}
|
}
|
||||||
|
|
||||||
result.push(BackupContent {
|
result.push(BackupContent {
|
||||||
filename: "index.json.blob".to_string(),
|
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||||
size: Some(index_size),
|
size: Some(index_size),
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -130,8 +130,8 @@ fn list_groups(
|
|||||||
let group = info.backup_dir.group();
|
let group = info.backup_dir.group();
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
|
let owner = datastore.get_owner(group)?;
|
||||||
if !list_all {
|
if !list_all {
|
||||||
let owner = datastore.get_owner(group)?;
|
|
||||||
if owner != username { continue; }
|
if owner != username { continue; }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,6 +141,7 @@ fn list_groups(
|
|||||||
last_backup: info.backup_dir.backup_time().timestamp(),
|
last_backup: info.backup_dir.backup_time().timestamp(),
|
||||||
backup_count: list.len() as u64,
|
backup_count: list.len() as u64,
|
||||||
files: info.files.clone(),
|
files: info.files.clone(),
|
||||||
|
owner: Some(owner),
|
||||||
};
|
};
|
||||||
groups.push(result_item);
|
groups.push(result_item);
|
||||||
}
|
}
|
||||||
@ -329,8 +330,9 @@ pub fn list_snapshots (
|
|||||||
}
|
}
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
|
let owner = datastore.get_owner(group)?;
|
||||||
|
|
||||||
if !list_all {
|
if !list_all {
|
||||||
let owner = datastore.get_owner(group)?;
|
|
||||||
if owner != username { continue; }
|
if owner != username { continue; }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -340,6 +342,7 @@ pub fn list_snapshots (
|
|||||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||||
files: info.files,
|
files: info.files,
|
||||||
size: None,
|
size: None,
|
||||||
|
owner: Some(owner),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
|
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
|
||||||
@ -802,7 +805,7 @@ fn upload_backup_log(
|
|||||||
let store = tools::required_string_param(¶m, "store")?;
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(store)?;
|
let datastore = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
let file_name = "client.log.blob";
|
let file_name = CLIENT_LOG_BLOB_NAME;
|
||||||
|
|
||||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||||
@ -875,8 +878,9 @@ fn get_rrd_stats(
|
|||||||
&rrd_dir,
|
&rrd_dir,
|
||||||
&[
|
&[
|
||||||
"total", "used",
|
"total", "used",
|
||||||
"read_ios", "read_bytes", "read_ticks",
|
"read_ios", "read_bytes",
|
||||||
"write_ios", "write_bytes", "write_ticks",
|
"write_ios", "write_bytes",
|
||||||
|
"io_ticks",
|
||||||
],
|
],
|
||||||
timeframe,
|
timeframe,
|
||||||
cf,
|
cf,
|
||||||
|
130
src/api2/admin/sync.rs
Normal file
130
src/api2/admin/sync.rs
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||||
|
use proxmox::api::router::SubdirMap;
|
||||||
|
use proxmox::{list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
use crate::api2::pull::{get_pull_parameters};
|
||||||
|
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||||
|
use crate::server::{self, TaskListInfo, WorkerTask};
|
||||||
|
use crate::tools::systemd::time::{
|
||||||
|
parse_calendar_event, compute_next_event};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List configured jobs and their status.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: sync::SyncJobStatus },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all sync jobs
|
||||||
|
pub fn list_sync_jobs(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<SyncJobStatus>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = sync::config()?;
|
||||||
|
|
||||||
|
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||||
|
|
||||||
|
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
|
||||||
|
let tasks = server::read_task_list()?;
|
||||||
|
|
||||||
|
for info in tasks.iter() {
|
||||||
|
let worker_id = match &info.upid.worker_id {
|
||||||
|
Some(id) => id,
|
||||||
|
_ => { continue; },
|
||||||
|
};
|
||||||
|
if let Some(last) = last_tasks.get(worker_id) {
|
||||||
|
if last.upid.starttime < info.upid.starttime {
|
||||||
|
last_tasks.insert(worker_id.to_string(), &info);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
last_tasks.insert(worker_id.to_string(), &info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for job in &mut list {
|
||||||
|
let mut last = 0;
|
||||||
|
if let Some(task) = last_tasks.get(&job.id) {
|
||||||
|
job.last_run_upid = Some(task.upid_str.clone());
|
||||||
|
if let Some((endtime, status)) = &task.state {
|
||||||
|
job.last_run_state = Some(String::from(status));
|
||||||
|
job.last_run_endtime = Some(*endtime);
|
||||||
|
last = *endtime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
job.next_run = (|| -> Option<i64> {
|
||||||
|
let schedule = job.schedule.as_ref()?;
|
||||||
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
|
compute_next_event(&event, last, false).ok()
|
||||||
|
})();
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Runs the sync jobs manually.
|
||||||
|
async fn run_sync_job(
|
||||||
|
id: String,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = sync::config()?;
|
||||||
|
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
|
||||||
|
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||||
|
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
|
||||||
|
|
||||||
|
worker.log(format!("sync job '{}' start", &id));
|
||||||
|
|
||||||
|
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
|
||||||
|
|
||||||
|
worker.log(format!("sync job '{}' end", &id));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const SYNC_INFO_SUBDIRS: SubdirMap = &[
|
||||||
|
(
|
||||||
|
"run",
|
||||||
|
&Router::new()
|
||||||
|
.post(&API_METHOD_RUN_SYNC_JOB)
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
const SYNC_INFO_ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
|
||||||
|
.subdirs(SYNC_INFO_SUBDIRS);
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||||
|
.match_all("id", &SYNC_INFO_ROUTER);
|
@ -107,7 +107,7 @@ async move {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||||
if !is_new { bail!("backup directorty already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||||
let mut env = BackupEnvironment::new(
|
let mut env = BackupEnvironment::new(
|
||||||
@ -151,7 +151,7 @@ async move {
|
|||||||
|
|
||||||
match (res, env.ensure_finished()) {
|
match (res, env.ensure_finished()) {
|
||||||
(Ok(_), Ok(())) => {
|
(Ok(_), Ok(())) => {
|
||||||
env.log("backup finished sucessfully");
|
env.log("backup finished successfully");
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Err(err), Ok(())) => {
|
(Err(err), Ok(())) => {
|
||||||
@ -378,7 +378,7 @@ fn dynamic_append (
|
|||||||
|
|
||||||
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
||||||
|
|
||||||
env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
@ -443,7 +443,7 @@ fn fixed_append (
|
|||||||
|
|
||||||
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
||||||
|
|
||||||
env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
@ -498,7 +498,7 @@ fn close_dynamic_index (
|
|||||||
|
|
||||||
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
|
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
|
||||||
|
|
||||||
env.log(format!("sucessfully closed dynamic index {}", wid));
|
env.log(format!("successfully closed dynamic index {}", wid));
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
@ -552,7 +552,7 @@ fn close_fixed_index (
|
|||||||
|
|
||||||
env.fixed_writer_close(wid, chunk_count, size, csum)?;
|
env.fixed_writer_close(wid, chunk_count, size, csum)?;
|
||||||
|
|
||||||
env.log(format!("sucessfully closed fixed index {}", wid));
|
env.log(format!("successfully closed fixed index {}", wid));
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
@ -566,7 +566,7 @@ fn finish_backup (
|
|||||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||||
|
|
||||||
env.finish_backup()?;
|
env.finish_backup()?;
|
||||||
env.log("sucessfully finished backup");
|
env.log("successfully finished backup");
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ struct FixedWriterState {
|
|||||||
struct SharedBackupState {
|
struct SharedBackupState {
|
||||||
finished: bool,
|
finished: bool,
|
||||||
uid_counter: usize,
|
uid_counter: usize,
|
||||||
file_counter: usize, // sucessfully uploaded files
|
file_counter: usize, // successfully uploaded files
|
||||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||||
known_chunks: HashMap<[u8;32], u32>,
|
known_chunks: HashMap<[u8;32], u32>,
|
||||||
|
@ -269,6 +269,8 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
|
|||||||
None => bail!("remote '{}' does not exist.", name),
|
None => bail!("remote '{}' does not exist.", name),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
remote::save_config(&config)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ pub fn list_sync_jobs(
|
|||||||
},
|
},
|
||||||
schedule: {
|
schedule: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: GC_SCHEDULE_SCHEMA,
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -154,7 +154,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
schedule: {
|
schedule: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: GC_SCHEDULE_SCHEMA,
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
delete: {
|
delete: {
|
||||||
description: "List of properties to delete.",
|
description: "List of properties to delete.",
|
||||||
@ -274,4 +274,4 @@ const ITEM_ROUTER: Router = Router::new()
|
|||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||||
.post(&API_METHOD_CREATE_SYNC_JOB)
|
.post(&API_METHOD_CREATE_SYNC_JOB)
|
||||||
.match_all("name", &ITEM_ROUTER);
|
.match_all("id", &ITEM_ROUTER);
|
||||||
|
@ -338,7 +338,7 @@ pub enum DeletableProperty {
|
|||||||
autostart,
|
autostart,
|
||||||
/// Delete bridge ports (set to 'none')
|
/// Delete bridge ports (set to 'none')
|
||||||
bridge_ports,
|
bridge_ports,
|
||||||
/// Delet bridge-vlan-aware flag
|
/// Delete bridge-vlan-aware flag
|
||||||
bridge_vlan_aware,
|
bridge_vlan_aware,
|
||||||
/// Delete bond-slaves (set to 'none')
|
/// Delete bond-slaves (set to 'none')
|
||||||
slaves,
|
slaves,
|
||||||
|
@ -36,8 +36,9 @@ fn get_node_stats(
|
|||||||
"netin", "netout",
|
"netin", "netout",
|
||||||
"loadavg",
|
"loadavg",
|
||||||
"total", "used",
|
"total", "used",
|
||||||
"read_ios", "read_bytes", "read_ticks",
|
"read_ios", "read_bytes",
|
||||||
"write_ios", "write_bytes", "write_ticks",
|
"write_ios", "write_bytes",
|
||||||
|
"io_ticks",
|
||||||
],
|
],
|
||||||
timeframe,
|
timeframe,
|
||||||
cf,
|
cf,
|
||||||
|
@ -256,7 +256,7 @@ fn stop_service(
|
|||||||
_param: Value,
|
_param: Value,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
log::info!("stoping service {}", service);
|
log::info!("stopping service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "stop")
|
run_service_command(&service, "stop")
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
//! Sync datastore from remote server
|
//! Sync datastore from remote server
|
||||||
|
use std::sync::{Arc};
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
@ -15,6 +16,52 @@ use crate::config::{
|
|||||||
cached_user_info::CachedUserInfo,
|
cached_user_info::CachedUserInfo,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
pub fn check_pull_privs(
|
||||||
|
username: &str,
|
||||||
|
store: &str,
|
||||||
|
remote: &str,
|
||||||
|
remote_store: &str,
|
||||||
|
delete: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||||
|
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||||
|
|
||||||
|
if delete {
|
||||||
|
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_pull_parameters(
|
||||||
|
store: &str,
|
||||||
|
remote: &str,
|
||||||
|
remote_store: &str,
|
||||||
|
) -> Result<(HttpClient, BackupRepository, Arc<DataStore>), Error> {
|
||||||
|
|
||||||
|
let tgt_store = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
|
let (remote_config, _digest) = remote::config()?;
|
||||||
|
let remote: remote::Remote = remote_config.lookup("remote", remote)?;
|
||||||
|
|
||||||
|
let options = HttpClientOptions::new()
|
||||||
|
.password(Some(remote.password.clone()))
|
||||||
|
.fingerprint(remote.fingerprint.clone());
|
||||||
|
|
||||||
|
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||||
|
let _auth_info = client.login() // make sure we can auth
|
||||||
|
.await
|
||||||
|
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||||
|
|
||||||
|
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store.to_string());
|
||||||
|
|
||||||
|
Ok((client, src_repo, tgt_store))
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -52,33 +99,12 @@ async fn pull (
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let username = rpcenv.get_user().unwrap();
|
||||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
|
||||||
user_info.check_privs(&username, &["remote", &remote, &remote_store], PRIV_REMOTE_READ, false)?;
|
|
||||||
|
|
||||||
let delete = remove_vanished.unwrap_or(true);
|
let delete = remove_vanished.unwrap_or(true);
|
||||||
|
|
||||||
if delete {
|
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
|
||||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_PRUNE, false)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let tgt_store = DataStore::lookup_datastore(&store)?;
|
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||||
|
|
||||||
let (remote_config, _digest) = remote::config()?;
|
|
||||||
let remote: remote::Remote = remote_config.lookup("remote", &remote)?;
|
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
|
||||||
.password(Some(remote.password.clone()))
|
|
||||||
.fingerprint(remote.fingerprint.clone());
|
|
||||||
|
|
||||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
|
||||||
let _auth_info = client.login() // make sure we can auth
|
|
||||||
.await
|
|
||||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
|
||||||
|
|
||||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store);
|
|
||||||
|
|
||||||
// fixme: set to_stdout to false?
|
// fixme: set to_stdout to false?
|
||||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
||||||
|
@ -131,7 +131,7 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
Either::Right((Ok(res), _)) => Ok(res),
|
Either::Right((Ok(res), _)) => Ok(res),
|
||||||
Either::Right((Err(err), _)) => Err(err),
|
Either::Right((Err(err), _)) => Err(err),
|
||||||
})
|
})
|
||||||
.map_ok(move |_| env.log("reader finished sucessfully"))
|
.map_ok(move |_| env.log("reader finished successfully"))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
|
@ -27,6 +27,8 @@ macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL
|
|||||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||||
|
|
||||||
|
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
||||||
|
|
||||||
@ -63,7 +65,9 @@ const_regex!{
|
|||||||
|
|
||||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||||
|
|
||||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
|
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||||
|
|
||||||
|
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||||
|
|
||||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||||
|
|
||||||
@ -287,6 +291,11 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run sync job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
"Run garbage collection job at specified schedule.")
|
"Run garbage collection job at specified schedule.")
|
||||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||||
@ -379,6 +388,9 @@ pub struct GroupListItem {
|
|||||||
pub backup_count: u64,
|
pub backup_count: u64,
|
||||||
/// List of contained archive files.
|
/// List of contained archive files.
|
||||||
pub files: Vec<String>,
|
pub files: Vec<String>,
|
||||||
|
/// The owner of group
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub owner: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -411,6 +423,9 @@ pub struct SnapshotListItem {
|
|||||||
/// Overall snapshot size (sum of all archive sizes).
|
/// Overall snapshot size (sum of all archive sizes).
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
|
/// The owner of the snapshots group
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub owner: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -807,7 +822,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
|||||||
|
|
||||||
for fingerprint in invalid_fingerprints.iter() {
|
for fingerprint in invalid_fingerprints.iter() {
|
||||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
||||||
bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint);
|
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -851,7 +866,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
|||||||
|
|
||||||
for name in invalid_user_ids.iter() {
|
for name in invalid_user_ids.iter() {
|
||||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
if let Ok(_) = parse_simple_value(name, &schema) {
|
||||||
bail!("test userid '{}' failed - got Ok() while expection an error.", name);
|
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,7 +311,7 @@ impl DataBlob {
|
|||||||
/// Verify digest and data length for unencrypted chunks.
|
/// Verify digest and data length for unencrypted chunks.
|
||||||
///
|
///
|
||||||
/// To do that, we need to decompress data first. Please note that
|
/// To do that, we need to decompress data first. Please note that
|
||||||
/// this is noth possible for encrypted chunks.
|
/// this is not possible for encrypted chunks.
|
||||||
pub fn verify_unencrypted(
|
pub fn verify_unencrypted(
|
||||||
&self,
|
&self,
|
||||||
expected_chunk_size: usize,
|
expected_chunk_size: usize,
|
||||||
|
@ -11,7 +11,7 @@ use super::backup_info::{BackupGroup, BackupDir};
|
|||||||
use super::chunk_store::ChunkStore;
|
use super::chunk_store::ChunkStore;
|
||||||
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||||
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||||
use super::manifest::{MANIFEST_BLOB_NAME, BackupManifest};
|
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||||
use super::index::*;
|
use super::index::*;
|
||||||
use super::{DataBlob, ArchiveType, archive_type};
|
use super::{DataBlob, ArchiveType, archive_type};
|
||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
@ -149,6 +149,7 @@ impl DataStore {
|
|||||||
|
|
||||||
let mut wanted_files = HashSet::new();
|
let mut wanted_files = HashSet::new();
|
||||||
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
||||||
|
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
|
||||||
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
|
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
|
||||||
|
|
||||||
for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
|
for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
|
||||||
|
@ -198,7 +198,7 @@ impl FixedIndexReader {
|
|||||||
let mut csum = openssl::sha::Sha256::new();
|
let mut csum = openssl::sha::Sha256::new();
|
||||||
let mut chunk_end = 0;
|
let mut chunk_end = 0;
|
||||||
for pos in 0..self.index_length {
|
for pos in 0..self.index_length {
|
||||||
chunk_end = ((pos + 1) * self.chunk_size) as u64;
|
chunk_end = self.chunk_end(pos);
|
||||||
let digest = self.chunk_digest(pos);
|
let digest = self.chunk_digest(pos);
|
||||||
csum.update(digest);
|
csum.update(digest);
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@ use serde_json::{json, Value};
|
|||||||
use crate::backup::BackupDir;
|
use crate::backup::BackupDir;
|
||||||
|
|
||||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||||
|
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||||
|
|
||||||
pub struct FileInfo {
|
pub struct FileInfo {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
@ -72,7 +73,7 @@ impl BackupManifest {
|
|||||||
let info = self.lookup_file_info(name)?;
|
let info = self.lookup_file_info(name)?;
|
||||||
|
|
||||||
if size != info.size {
|
if size != info.size {
|
||||||
bail!("wrong size for file '{}' ({} != {}", name, info.size, size);
|
bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if csum != &info.csum {
|
if csum != &info.csum {
|
||||||
|
@ -49,7 +49,7 @@ fn hello_command(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api(input: { properties: {} })]
|
#[api(input: { properties: {} })]
|
||||||
/// Quit command. Exit the programm.
|
/// Quit command. Exit the program.
|
||||||
///
|
///
|
||||||
/// Returns: nothing
|
/// Returns: nothing
|
||||||
fn quit_command() -> Result<(), Error> {
|
fn quit_command() -> Result<(), Error> {
|
||||||
|
@ -16,7 +16,7 @@ use std::io::Write;
|
|||||||
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
|
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
|
||||||
|
|
||||||
// # pxar create test.pxar ./dyntest1/
|
// # pxar create test.pxar ./dyntest1/
|
||||||
// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||||
|
|
||||||
fn create_large_file(path: PathBuf) {
|
fn create_large_file(path: PathBuf) {
|
||||||
|
|
||||||
|
@ -22,11 +22,6 @@ use proxmox_backup::client::*;
|
|||||||
use proxmox_backup::backup::*;
|
use proxmox_backup::backup::*;
|
||||||
use proxmox_backup::pxar::{ self, catalog::* };
|
use proxmox_backup::pxar::{ self, catalog::* };
|
||||||
|
|
||||||
//use proxmox_backup::backup::image_index::*;
|
|
||||||
//use proxmox_backup::config::datastore;
|
|
||||||
//use proxmox_backup::pxar::encoder::*;
|
|
||||||
//use proxmox_backup::backup::datastore::*;
|
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
//use hyper::Body;
|
//use hyper::Body;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@ -39,20 +34,12 @@ use tokio::sync::mpsc;
|
|||||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||||
|
|
||||||
proxmox::const_regex! {
|
|
||||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
|
|
||||||
}
|
|
||||||
|
|
||||||
const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
||||||
.format(&BACKUP_REPO_URL)
|
.format(&BACKUP_REPO_URL)
|
||||||
.max_length(256)
|
.max_length(256)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Backup source specification ([<label>:<path>]).")
|
|
||||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
||||||
"Path to encryption key. All data will be encrypted using this key.")
|
"Path to encryption key. All data will be encrypted using this key.")
|
||||||
.schema();
|
.schema();
|
||||||
@ -688,14 +675,6 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
|
|
||||||
|
|
||||||
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
|
|
||||||
return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
|
|
||||||
}
|
|
||||||
bail!("unable to parse directory specification '{}'", value);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn spawn_catalog_upload(
|
fn spawn_catalog_upload(
|
||||||
client: Arc<BackupWriter>,
|
client: Arc<BackupWriter>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
@ -865,12 +844,12 @@ async fn create_backup(
|
|||||||
|
|
||||||
let mut upload_list = vec![];
|
let mut upload_list = vec![];
|
||||||
|
|
||||||
enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
|
|
||||||
|
|
||||||
let mut upload_catalog = false;
|
let mut upload_catalog = false;
|
||||||
|
|
||||||
for backupspec in backupspec_list {
|
for backupspec in backupspec_list {
|
||||||
let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
|
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||||
|
let filename = &spec.config_string;
|
||||||
|
let target = &spec.archive_name;
|
||||||
|
|
||||||
use std::os::unix::fs::FileTypeExt;
|
use std::os::unix::fs::FileTypeExt;
|
||||||
|
|
||||||
@ -878,19 +857,15 @@ async fn create_backup(
|
|||||||
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
||||||
let file_type = metadata.file_type();
|
let file_type = metadata.file_type();
|
||||||
|
|
||||||
let extension = target.rsplit('.').next()
|
match spec.spec_type {
|
||||||
.ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
|
BackupSpecificationType::PXAR => {
|
||||||
|
|
||||||
match extension {
|
|
||||||
"pxar" => {
|
|
||||||
if !file_type.is_dir() {
|
if !file_type.is_dir() {
|
||||||
bail!("got unexpected file type (expected directory)");
|
bail!("got unexpected file type (expected directory)");
|
||||||
}
|
}
|
||||||
upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
||||||
upload_catalog = true;
|
upload_catalog = true;
|
||||||
}
|
}
|
||||||
"img" => {
|
BackupSpecificationType::IMAGE => {
|
||||||
|
|
||||||
if !(file_type.is_file() || file_type.is_block_device()) {
|
if !(file_type.is_file() || file_type.is_block_device()) {
|
||||||
bail!("got unexpected file type (expected file or block device)");
|
bail!("got unexpected file type (expected file or block device)");
|
||||||
}
|
}
|
||||||
@ -899,22 +874,19 @@ async fn create_backup(
|
|||||||
|
|
||||||
if size == 0 { bail!("got zero-sized file '{}'", filename); }
|
if size == 0 { bail!("got zero-sized file '{}'", filename); }
|
||||||
|
|
||||||
upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
|
upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
|
||||||
}
|
}
|
||||||
"conf" => {
|
BackupSpecificationType::CONFIG => {
|
||||||
if !file_type.is_file() {
|
if !file_type.is_file() {
|
||||||
bail!("got unexpected file type (expected regular file)");
|
bail!("got unexpected file type (expected regular file)");
|
||||||
}
|
}
|
||||||
upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||||
}
|
}
|
||||||
"log" => {
|
BackupSpecificationType::LOGFILE => {
|
||||||
if !file_type.is_file() {
|
if !file_type.is_file() {
|
||||||
bail!("got unexpected file type (expected regular file)");
|
bail!("got unexpected file type (expected regular file)");
|
||||||
}
|
}
|
||||||
upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
bail!("got unknown archive extension '{}'", extension);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -967,21 +939,21 @@ async fn create_backup(
|
|||||||
|
|
||||||
for (backup_type, filename, target, size) in upload_list {
|
for (backup_type, filename, target, size) in upload_list {
|
||||||
match backup_type {
|
match backup_type {
|
||||||
BackupType::CONFIG => {
|
BackupSpecificationType::CONFIG => {
|
||||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum)?;
|
manifest.add_file(target, stats.size, stats.csum)?;
|
||||||
}
|
}
|
||||||
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
|
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum)?;
|
manifest.add_file(target, stats.size, stats.csum)?;
|
||||||
}
|
}
|
||||||
BackupType::PXAR => {
|
BackupSpecificationType::PXAR => {
|
||||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||||
let stats = backup_directory(
|
let stats = backup_directory(
|
||||||
@ -1000,7 +972,7 @@ async fn create_backup(
|
|||||||
manifest.add_file(target, stats.size, stats.csum)?;
|
manifest.add_file(target, stats.size, stats.csum)?;
|
||||||
catalog.lock().unwrap().end_directory()?;
|
catalog.lock().unwrap().end_directory()?;
|
||||||
}
|
}
|
||||||
BackupType::IMAGE => {
|
BackupSpecificationType::IMAGE => {
|
||||||
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = backup_image(
|
let stats = backup_image(
|
||||||
&client,
|
&client,
|
||||||
@ -1135,6 +1107,18 @@ fn dump_image<W: Write>(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn parse_archive_type(name: &str) -> (String, ArchiveType) {
|
||||||
|
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
|
||||||
|
(name.into(), archive_type(name).unwrap())
|
||||||
|
} else if name.ends_with(".pxar") {
|
||||||
|
(format!("{}.didx", name), ArchiveType::DynamicIndex)
|
||||||
|
} else if name.ends_with(".img") {
|
||||||
|
(format!("{}.fidx", name), ArchiveType::FixedIndex)
|
||||||
|
} else {
|
||||||
|
(format!("{}.blob", name), ArchiveType::Blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -1207,14 +1191,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
|
||||||
format!("{}.didx", archive_name)
|
|
||||||
} else if archive_name.ends_with(".img") {
|
|
||||||
format!("{}.fidx", archive_name)
|
|
||||||
} else {
|
|
||||||
format!("{}.blob", archive_name)
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = BackupReader::start(
|
let client = BackupReader::start(
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
@ -1227,7 +1203,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
let manifest = client.download_manifest().await?;
|
||||||
|
|
||||||
if server_archive_name == MANIFEST_BLOB_NAME {
|
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
||||||
|
|
||||||
|
if archive_name == MANIFEST_BLOB_NAME {
|
||||||
let backup_index_data = manifest.into_json().to_string();
|
let backup_index_data = manifest.into_json().to_string();
|
||||||
if let Some(target) = target {
|
if let Some(target) = target {
|
||||||
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
|
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
|
||||||
@ -1238,9 +1216,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if server_archive_name.ends_with(".blob") {
|
} else if archive_type == ArchiveType::Blob {
|
||||||
|
|
||||||
let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
|
let mut reader = client.download_blob(&manifest, &archive_name).await?;
|
||||||
|
|
||||||
if let Some(target) = target {
|
if let Some(target) = target {
|
||||||
let mut writer = std::fs::OpenOptions::new()
|
let mut writer = std::fs::OpenOptions::new()
|
||||||
@ -1257,9 +1235,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if server_archive_name.ends_with(".didx") {
|
} else if archive_type == ArchiveType::DynamicIndex {
|
||||||
|
|
||||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
let index = client.download_dynamic_index(&manifest, &archive_name).await?;
|
||||||
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
@ -1289,9 +1267,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
std::io::copy(&mut reader, &mut writer)
|
std::io::copy(&mut reader, &mut writer)
|
||||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||||
}
|
}
|
||||||
} else if server_archive_name.ends_with(".fidx") {
|
} else if archive_type == ArchiveType::FixedIndex {
|
||||||
|
|
||||||
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
|
let index = client.download_fixed_index(&manifest, &archive_name).await?;
|
||||||
|
|
||||||
let mut writer = if let Some(target) = target {
|
let mut writer = if let Some(target) = target {
|
||||||
std::fs::OpenOptions::new()
|
std::fs::OpenOptions::new()
|
||||||
@ -1308,9 +1286,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
|
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
|
||||||
|
|
||||||
} else {
|
|
||||||
bail!("unknown archive file extension (expected .pxar of .img)");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
@ -1390,6 +1365,12 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
|||||||
("group", false, &StringSchema::new("Backup group.").schema()),
|
("group", false, &StringSchema::new("Backup group.").schema()),
|
||||||
], [
|
], [
|
||||||
("output-format", true, &OUTPUT_FORMAT),
|
("output-format", true, &OUTPUT_FORMAT),
|
||||||
|
(
|
||||||
|
"quiet",
|
||||||
|
true,
|
||||||
|
&BooleanSchema::new("Minimal output - only show removals.")
|
||||||
|
.schema()
|
||||||
|
),
|
||||||
("repository", true, &REPO_URL_SCHEMA),
|
("repository", true, &REPO_URL_SCHEMA),
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
@ -1417,9 +1398,12 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let quiet = param["quiet"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
param.as_object_mut().unwrap().remove("repository");
|
param.as_object_mut().unwrap().remove("repository");
|
||||||
param.as_object_mut().unwrap().remove("group");
|
param.as_object_mut().unwrap().remove("group");
|
||||||
param.as_object_mut().unwrap().remove("output-format");
|
param.as_object_mut().unwrap().remove("output-format");
|
||||||
|
param.as_object_mut().unwrap().remove("quiet");
|
||||||
|
|
||||||
param["backup-type"] = group.backup_type().into();
|
param["backup-type"] = group.backup_type().into();
|
||||||
param["backup-id"] = group.backup_id().into();
|
param["backup-id"] = group.backup_id().into();
|
||||||
@ -1434,19 +1418,34 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
|||||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
|
||||||
|
Ok(match v.as_bool() {
|
||||||
|
Some(true) => "keep",
|
||||||
|
Some(false) => "remove",
|
||||||
|
None => "unknown",
|
||||||
|
}.to_string())
|
||||||
|
};
|
||||||
|
|
||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
.sortby("backup-type", false)
|
.sortby("backup-type", false)
|
||||||
.sortby("backup-id", false)
|
.sortby("backup-id", false)
|
||||||
.sortby("backup-time", false)
|
.sortby("backup-time", false)
|
||||||
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
||||||
.column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
|
.column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
|
||||||
.column(ColumnConfig::new("keep"))
|
.column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
|
||||||
;
|
;
|
||||||
|
|
||||||
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
|
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
|
||||||
|
|
||||||
let mut data = result["data"].take();
|
let mut data = result["data"].take();
|
||||||
|
|
||||||
|
if quiet {
|
||||||
|
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
|
||||||
|
item["keep"].as_bool() == Some(false)
|
||||||
|
}).map(|v| v.clone()).collect();
|
||||||
|
data = list.into();
|
||||||
|
}
|
||||||
|
|
||||||
format_and_print_result_full(&mut data, info, &output_format, &options);
|
format_and_print_result_full(&mut data, info, &output_format, &options);
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
@ -2028,7 +2027,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
|
|
||||||
if let Some(pipe) = pipe {
|
if let Some(pipe) = pipe {
|
||||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||||
// Finish creation of deamon by redirecting filedescriptors.
|
// Finish creation of daemon by redirecting filedescriptors.
|
||||||
let nullfd = nix::fcntl::open(
|
let nullfd = nix::fcntl::open(
|
||||||
"/dev/null",
|
"/dev/null",
|
||||||
nix::fcntl::OFlag::O_RDWR,
|
nix::fcntl::OFlag::O_RDWR,
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::ffi::OsString;
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
@ -9,7 +8,6 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
|||||||
|
|
||||||
use proxmox::try_block;
|
use proxmox::try_block;
|
||||||
use proxmox::api::RpcEnvironmentType;
|
use proxmox::api::RpcEnvironmentType;
|
||||||
use proxmox::sys::linux::procfs::mountinfo::{Device, MountInfo};
|
|
||||||
|
|
||||||
use proxmox_backup::configdir;
|
use proxmox_backup::configdir;
|
||||||
use proxmox_backup::buildcfg;
|
use proxmox_backup::buildcfg;
|
||||||
@ -17,7 +15,7 @@ use proxmox_backup::server;
|
|||||||
use proxmox_backup::tools::daemon;
|
use proxmox_backup::tools::daemon;
|
||||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
use proxmox_backup::tools::disks::{ DiskManage, zfs::zfs_pool_stats };
|
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||||
@ -385,12 +383,15 @@ async fn schedule_datastore_prune() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
//fixme: if last_prune_job_stzill_running { continue; }
|
|
||||||
|
|
||||||
let worker_type = "prune";
|
let worker_type = "prune";
|
||||||
|
|
||||||
let last = match lookup_last_worker(worker_type, &store) {
|
let last = match lookup_last_worker(worker_type, &store) {
|
||||||
Ok(Some(upid)) => upid.starttime,
|
Ok(Some(upid)) => {
|
||||||
|
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
upid.starttime
|
||||||
|
}
|
||||||
Ok(None) => 0,
|
Ok(None) => 0,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("lookup_last_job_start failed: {}", err);
|
eprintln!("lookup_last_job_start failed: {}", err);
|
||||||
@ -507,12 +508,15 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
//fixme: if last_sync_job_still_running { continue; }
|
let worker_type = "syncjob";
|
||||||
|
|
||||||
let worker_type = "sync";
|
let last = match lookup_last_worker(worker_type, &job_id) {
|
||||||
|
Ok(Some(upid)) => {
|
||||||
let last = match lookup_last_worker(worker_type, &job_config.store) {
|
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||||
Ok(Some(upid)) => upid.starttime,
|
continue;
|
||||||
|
}
|
||||||
|
upid.starttime
|
||||||
|
},
|
||||||
Ok(None) => 0,
|
Ok(None) => 0,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("lookup_last_job_start failed: {}", err);
|
eprintln!("lookup_last_job_start failed: {}", err);
|
||||||
@ -594,31 +598,36 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
|
|
||||||
async fn run_stat_generator() {
|
async fn run_stat_generator() {
|
||||||
|
|
||||||
|
let mut count = 0;
|
||||||
loop {
|
loop {
|
||||||
|
count += 1;
|
||||||
|
let save = if count >= 6 { count = 0; true } else { false };
|
||||||
|
|
||||||
let delay_target = Instant::now() + Duration::from_secs(10);
|
let delay_target = Instant::now() + Duration::from_secs(10);
|
||||||
|
|
||||||
generate_host_stats().await;
|
generate_host_stats(save).await;
|
||||||
|
|
||||||
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
|
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
|
||||||
}
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rrd_update_gauge(name: &str, value: f64) {
|
fn rrd_update_gauge(name: &str, value: f64, save: bool) {
|
||||||
use proxmox_backup::rrd;
|
use proxmox_backup::rrd;
|
||||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge) {
|
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
|
||||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rrd_update_derive(name: &str, value: f64) {
|
fn rrd_update_derive(name: &str, value: f64, save: bool) {
|
||||||
use proxmox_backup::rrd;
|
use proxmox_backup::rrd;
|
||||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive) {
|
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
|
||||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn generate_host_stats() {
|
async fn generate_host_stats(save: bool) {
|
||||||
use proxmox::sys::linux::procfs::{
|
use proxmox::sys::linux::procfs::{
|
||||||
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
|
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
|
||||||
use proxmox_backup::config::datastore;
|
use proxmox_backup::config::datastore;
|
||||||
@ -628,8 +637,8 @@ async fn generate_host_stats() {
|
|||||||
|
|
||||||
match read_proc_stat() {
|
match read_proc_stat() {
|
||||||
Ok(stat) => {
|
Ok(stat) => {
|
||||||
rrd_update_gauge("host/cpu", stat.cpu);
|
rrd_update_gauge("host/cpu", stat.cpu, save);
|
||||||
rrd_update_gauge("host/iowait", stat.iowait_percent);
|
rrd_update_gauge("host/iowait", stat.iowait_percent, save);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("read_proc_stat failed - {}", err);
|
eprintln!("read_proc_stat failed - {}", err);
|
||||||
@ -638,10 +647,10 @@ async fn generate_host_stats() {
|
|||||||
|
|
||||||
match read_meminfo() {
|
match read_meminfo() {
|
||||||
Ok(meminfo) => {
|
Ok(meminfo) => {
|
||||||
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64);
|
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
|
||||||
rrd_update_gauge("host/memused", meminfo.memused as f64);
|
rrd_update_gauge("host/memused", meminfo.memused as f64, save);
|
||||||
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64);
|
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
|
||||||
rrd_update_gauge("host/swapused", meminfo.swapused as f64);
|
rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("read_meminfo failed - {}", err);
|
eprintln!("read_meminfo failed - {}", err);
|
||||||
@ -658,8 +667,8 @@ async fn generate_host_stats() {
|
|||||||
netin += item.receive;
|
netin += item.receive;
|
||||||
netout += item.send;
|
netout += item.send;
|
||||||
}
|
}
|
||||||
rrd_update_derive("host/netin", netin as f64);
|
rrd_update_derive("host/netin", netin as f64, save);
|
||||||
rrd_update_derive("host/netout", netout as f64);
|
rrd_update_derive("host/netout", netout as f64, save);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("read_prox_net_dev failed - {}", err);
|
eprintln!("read_prox_net_dev failed - {}", err);
|
||||||
@ -668,7 +677,7 @@ async fn generate_host_stats() {
|
|||||||
|
|
||||||
match read_loadavg() {
|
match read_loadavg() {
|
||||||
Ok(loadavg) => {
|
Ok(loadavg) => {
|
||||||
rrd_update_gauge("host/loadavg", loadavg.0 as f64);
|
rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("read_loadavg failed - {}", err);
|
eprintln!("read_loadavg failed - {}", err);
|
||||||
@ -677,7 +686,7 @@ async fn generate_host_stats() {
|
|||||||
|
|
||||||
let disk_manager = DiskManage::new();
|
let disk_manager = DiskManage::new();
|
||||||
|
|
||||||
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host");
|
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
|
||||||
|
|
||||||
match datastore::config() {
|
match datastore::config() {
|
||||||
Ok((config, _)) => {
|
Ok((config, _)) => {
|
||||||
@ -688,7 +697,7 @@ async fn generate_host_stats() {
|
|||||||
|
|
||||||
let rrd_prefix = format!("datastore/{}", config.name);
|
let rrd_prefix = format!("datastore/{}", config.name);
|
||||||
let path = std::path::Path::new(&config.path);
|
let path = std::path::Path::new(&config.path);
|
||||||
gather_disk_stats(disk_manager.clone(), path, &rrd_prefix);
|
gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
@ -699,100 +708,59 @@ async fn generate_host_stats() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
|
||||||
|
|
||||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str) {
|
match proxmox_backup::tools::disks::disk_usage(path) {
|
||||||
|
|
||||||
match disk_usage(path) {
|
|
||||||
Ok((total, used, _avail)) => {
|
Ok((total, used, _avail)) => {
|
||||||
let rrd_key = format!("{}/total", rrd_prefix);
|
let rrd_key = format!("{}/total", rrd_prefix);
|
||||||
rrd_update_gauge(&rrd_key, total as f64);
|
rrd_update_gauge(&rrd_key, total as f64, save);
|
||||||
let rrd_key = format!("{}/used", rrd_prefix);
|
let rrd_key = format!("{}/used", rrd_prefix);
|
||||||
rrd_update_gauge(&rrd_key, used as f64);
|
rrd_update_gauge(&rrd_key, used as f64, save);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match disk_manager.mount_info() {
|
match disk_manager.find_mounted_device(path) {
|
||||||
Ok(mountinfo) => {
|
Ok(None) => {},
|
||||||
if let Some((fs_type, device, source)) = find_mounted_device(mountinfo, path) {
|
Ok(Some((fs_type, device, source))) => {
|
||||||
let mut device_stat = None;
|
let mut device_stat = None;
|
||||||
match fs_type.as_str() {
|
match fs_type.as_str() {
|
||||||
"zfs" => {
|
"zfs" => {
|
||||||
if let Some(pool) = source {
|
if let Some(pool) = source {
|
||||||
match zfs_pool_stats(&pool) {
|
match zfs_pool_stats(&pool) {
|
||||||
Ok(stat) => device_stat = stat,
|
Ok(stat) => device_stat = stat,
|
||||||
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
|
|
||||||
match disk.read_stat() {
|
|
||||||
Ok(stat) => device_stat = stat,
|
|
||||||
Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(stat) = device_stat {
|
_ => {
|
||||||
let rrd_key = format!("{}/read_ios", rrd_prefix);
|
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
|
||||||
rrd_update_derive(&rrd_key, stat.read_ios as f64);
|
match disk.read_stat() {
|
||||||
let rrd_key = format!("{}/read_bytes", rrd_prefix);
|
Ok(stat) => device_stat = stat,
|
||||||
rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64);
|
Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
|
||||||
let rrd_key = format!("{}/read_ticks", rrd_prefix);
|
}
|
||||||
rrd_update_derive(&rrd_key, (stat.read_ticks as f64)/1000.0);
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(stat) = device_stat {
|
||||||
|
let rrd_key = format!("{}/read_ios", rrd_prefix);
|
||||||
|
rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
|
||||||
|
let rrd_key = format!("{}/read_bytes", rrd_prefix);
|
||||||
|
rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
|
||||||
|
|
||||||
let rrd_key = format!("{}/write_ios", rrd_prefix);
|
let rrd_key = format!("{}/write_ios", rrd_prefix);
|
||||||
rrd_update_derive(&rrd_key, stat.write_ios as f64);
|
rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
|
||||||
let rrd_key = format!("{}/write_bytes", rrd_prefix);
|
let rrd_key = format!("{}/write_bytes", rrd_prefix);
|
||||||
rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64);
|
rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
|
||||||
let rrd_key = format!("{}/write_ticks", rrd_prefix);
|
|
||||||
rrd_update_derive(&rrd_key, (stat.write_ticks as f64)/1000.0);
|
let rrd_key = format!("{}/io_ticks", rrd_prefix);
|
||||||
}
|
rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("disk_manager mount_info() failed - {}", err);
|
eprintln!("find_mounted_device failed - {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns (total, used, avail)
|
|
||||||
fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
|
||||||
|
|
||||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
|
||||||
|
|
||||||
use nix::NixPath;
|
|
||||||
|
|
||||||
let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
|
||||||
nix::errno::Errno::result(res)?;
|
|
||||||
|
|
||||||
let bsize = stat.f_bsize as u64;
|
|
||||||
|
|
||||||
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns (fs_type, device, mount_source)
|
|
||||||
pub fn find_mounted_device(
|
|
||||||
mountinfo: &MountInfo,
|
|
||||||
path: &std::path::Path,
|
|
||||||
) -> Option<(String, Device, Option<OsString>)> {
|
|
||||||
|
|
||||||
let mut result = None;
|
|
||||||
let mut match_len = 0;
|
|
||||||
|
|
||||||
let root_path = std::path::Path::new("/");
|
|
||||||
for (_id, entry) in mountinfo {
|
|
||||||
if entry.root == root_path && path.starts_with(&entry.mount_point) {
|
|
||||||
let len = entry.mount_point.as_path().as_os_str().len();
|
|
||||||
if len > match_len {
|
|
||||||
match_len = len;
|
|
||||||
result = Some((entry.fs_type.clone(), entry.device, entry.mount_source.clone()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
@ -17,7 +17,7 @@ fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api]
|
#[api]
|
||||||
/// Diplay node certificate information.
|
/// Display node certificate information.
|
||||||
fn cert_info() -> Result<(), Error> {
|
fn cert_info() -> Result<(), Error> {
|
||||||
|
|
||||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||||
|
@ -30,4 +30,7 @@ pub use pxar_decode_writer::*;
|
|||||||
mod backup_repo;
|
mod backup_repo;
|
||||||
pub use backup_repo::*;
|
pub use backup_repo::*;
|
||||||
|
|
||||||
|
mod backup_specification;
|
||||||
|
pub use backup_specification::*;
|
||||||
|
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
|
@ -138,7 +138,7 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download a .blob file
|
/// Download a .blob file
|
||||||
///
|
///
|
||||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The data is verified using
|
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
|
||||||
/// the provided manifest.
|
/// the provided manifest.
|
||||||
pub async fn download_blob(
|
pub async fn download_blob(
|
||||||
&self,
|
&self,
|
||||||
@ -164,7 +164,7 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download dynamic index file
|
/// Download dynamic index file
|
||||||
///
|
///
|
||||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||||
/// the provided manifest.
|
/// the provided manifest.
|
||||||
pub async fn download_dynamic_index(
|
pub async fn download_dynamic_index(
|
||||||
&self,
|
&self,
|
||||||
@ -192,7 +192,7 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download fixed index file
|
/// Download fixed index file
|
||||||
///
|
///
|
||||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||||
/// the provided manifest.
|
/// the provided manifest.
|
||||||
pub async fn download_fixed_index(
|
pub async fn download_fixed_index(
|
||||||
&self,
|
&self,
|
||||||
|
@ -3,12 +3,8 @@ use std::fmt;
|
|||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
use proxmox::const_regex;
|
|
||||||
|
|
||||||
const_regex! {
|
use crate::api2::types::*;
|
||||||
/// Regular expression to parse repository URLs
|
|
||||||
pub BACKUP_REPO_URL_REGEX = r"^(?:(?:([\w@]+)@)?([\w\-_.]+):)?(\w+)$";
|
|
||||||
}
|
|
||||||
|
|
||||||
/// API schema format definition for repository URLs
|
/// API schema format definition for repository URLs
|
||||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||||
|
39
src/client/backup_specification.rs
Normal file
39
src/client/backup_specification.rs
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
|
use proxmox::api::schema::*;
|
||||||
|
|
||||||
|
proxmox::const_regex! {
|
||||||
|
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Backup source specification ([<label>:<path>]).")
|
||||||
|
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
|
||||||
|
|
||||||
|
pub struct BackupSpecification {
|
||||||
|
pub archive_name: String, // left part
|
||||||
|
pub config_string: String, // right part
|
||||||
|
pub spec_type: BackupSpecificationType,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
|
||||||
|
|
||||||
|
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
|
||||||
|
let archive_name = caps.get(1).unwrap().as_str().into();
|
||||||
|
let extension = caps.get(2).unwrap().as_str();
|
||||||
|
let config_string = caps.get(3).unwrap().as_str().into();
|
||||||
|
let spec_type = match extension {
|
||||||
|
"pxar" => BackupSpecificationType::PXAR,
|
||||||
|
"img" => BackupSpecificationType::IMAGE,
|
||||||
|
"conf" => BackupSpecificationType::CONFIG,
|
||||||
|
"log" => BackupSpecificationType::LOGFILE,
|
||||||
|
_ => bail!("unknown backup source type '{}'", extension),
|
||||||
|
};
|
||||||
|
return Ok(BackupSpecification { archive_name, config_string, spec_type });
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("unable to parse backup source specification '{}'", value);
|
||||||
|
}
|
@ -343,7 +343,7 @@ impl HttpClient {
|
|||||||
|
|
||||||
/// Login
|
/// Login
|
||||||
///
|
///
|
||||||
/// Login is done on demand, so this is onyl required if you need
|
/// Login is done on demand, so this is only required if you need
|
||||||
/// access to authentication data in 'AuthInfo'.
|
/// access to authentication data in 'AuthInfo'.
|
||||||
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
||||||
self.auth.listen().await
|
self.auth.listen().await
|
||||||
@ -400,21 +400,22 @@ impl HttpClient {
|
|||||||
if interactive && tty::stdin_isatty() {
|
if interactive && tty::stdin_isatty() {
|
||||||
println!("fingerprint: {}", fp_string);
|
println!("fingerprint: {}", fp_string);
|
||||||
loop {
|
loop {
|
||||||
print!("Want to trust? (y/n): ");
|
print!("Are you sure you want to continue connecting? (y/n): ");
|
||||||
let _ = std::io::stdout().flush();
|
let _ = std::io::stdout().flush();
|
||||||
let mut buf = [0u8; 1];
|
use std::io::{BufRead, BufReader};
|
||||||
use std::io::Read;
|
let mut line = String::new();
|
||||||
match std::io::stdin().read_exact(&mut buf) {
|
match BufReader::new(std::io::stdin()).read_line(&mut line) {
|
||||||
Ok(()) => {
|
Ok(_) => {
|
||||||
if buf[0] == b'y' || buf[0] == b'Y' {
|
let trimmed = line.trim();
|
||||||
|
if trimmed == "y" || trimmed == "Y" {
|
||||||
return (true, Some(fp_string));
|
return (true, Some(fp_string));
|
||||||
} else if buf[0] == b'n' || buf[0] == b'N' {
|
} else if trimmed == "n" || trimmed == "N" {
|
||||||
return (false, None);
|
return (false, None);
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => return (false, None),
|
||||||
return (false, None);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -106,6 +106,34 @@ async fn pull_single_archive(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: The client.log.blob is uploaded after the backup, so it is
|
||||||
|
// not mentioned in the manifest.
|
||||||
|
async fn try_client_log_download(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
reader: Arc<BackupReader>,
|
||||||
|
path: &std::path::Path,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut tmp_path = path.to_owned();
|
||||||
|
tmp_path.set_extension("tmp");
|
||||||
|
|
||||||
|
let tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.create(true)
|
||||||
|
.read(true)
|
||||||
|
.open(&tmp_path)?;
|
||||||
|
|
||||||
|
// Note: be silent if there is no log - only log successful download
|
||||||
|
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||||
|
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||||
|
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||||
|
}
|
||||||
|
worker.log(format!("got backup log file {:?}", CLIENT_LOG_BLOB_NAME));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
async fn pull_snapshot(
|
async fn pull_snapshot(
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
reader: Arc<BackupReader>,
|
reader: Arc<BackupReader>,
|
||||||
@ -117,6 +145,10 @@ async fn pull_snapshot(
|
|||||||
manifest_name.push(snapshot.relative_path());
|
manifest_name.push(snapshot.relative_path());
|
||||||
manifest_name.push(MANIFEST_BLOB_NAME);
|
manifest_name.push(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
|
let mut client_log_name = tgt_store.base_path();
|
||||||
|
client_log_name.push(snapshot.relative_path());
|
||||||
|
client_log_name.push(CLIENT_LOG_BLOB_NAME);
|
||||||
|
|
||||||
let mut tmp_manifest_name = manifest_name.clone();
|
let mut tmp_manifest_name = manifest_name.clone();
|
||||||
tmp_manifest_name.set_extension("tmp");
|
tmp_manifest_name.set_extension("tmp");
|
||||||
|
|
||||||
@ -137,6 +169,10 @@ async fn pull_snapshot(
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
||||||
|
if !client_log_name.exists() {
|
||||||
|
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||||
|
}
|
||||||
|
worker.log("no data changes");
|
||||||
return Ok(()); // nothing changed
|
return Ok(()); // nothing changed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -199,6 +235,10 @@ async fn pull_snapshot(
|
|||||||
bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
|
bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !client_log_name.exists() {
|
||||||
|
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||||
|
}
|
||||||
|
|
||||||
// cleanup - remove stale files
|
// cleanup - remove stale files
|
||||||
tgt_store.cleanup_backup_dir(snapshot, &manifest)?;
|
tgt_store.cleanup_backup_dir(snapshot, &manifest)?;
|
||||||
|
|
||||||
@ -223,9 +263,11 @@ pub async fn pull_snapshot_from(
|
|||||||
}
|
}
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
|
worker.log(format!("sync snapshot {:?} done", snapshot.relative_path()));
|
||||||
} else {
|
} else {
|
||||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?
|
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||||
|
worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -149,7 +149,7 @@ impl Interface {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write attributes not dependening on address family
|
/// Write attributes not depending on address family
|
||||||
fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> {
|
fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> {
|
||||||
|
|
||||||
static EMPTY_LIST: Vec<String> = Vec::new();
|
static EMPTY_LIST: Vec<String> = Vec::new();
|
||||||
@ -187,7 +187,7 @@ impl Interface {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write attributes dependening on address family inet (IPv4)
|
/// Write attributes depending on address family inet (IPv4)
|
||||||
fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||||
if method == NetworkConfigMethod::Static {
|
if method == NetworkConfigMethod::Static {
|
||||||
if let Some(address) = &self.cidr {
|
if let Some(address) = &self.cidr {
|
||||||
@ -211,7 +211,7 @@ impl Interface {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write attributes dependening on address family inet6 (IPv6)
|
/// Write attributes depending on address family inet6 (IPv6)
|
||||||
fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||||
if method == NetworkConfigMethod::Static {
|
if method == NetworkConfigMethod::Static {
|
||||||
if let Some(address) = &self.cidr6 {
|
if let Some(address) = &self.cidr6 {
|
||||||
|
@ -46,7 +46,7 @@ lazy_static! {
|
|||||||
},
|
},
|
||||||
schedule: {
|
schedule: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: GC_SCHEDULE_SCHEMA,
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -66,6 +66,79 @@ pub struct SyncJobConfig {
|
|||||||
pub schedule: Option<String>,
|
pub schedule: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: generate duplicate schemas/structs from one listing?
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
remote: {
|
||||||
|
schema: REMOTE_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"remote-store": {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"remove-vanished": {
|
||||||
|
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
"next-run": {
|
||||||
|
description: "Estimated time of the next run (UNIX epoch).",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
"last-run-state": {
|
||||||
|
description: "Result of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-upid": {
|
||||||
|
description: "Task UPID of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-endtime": {
|
||||||
|
description: "Endtime of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// Status of Sync Job
|
||||||
|
pub struct SyncJobStatus {
|
||||||
|
pub id: String,
|
||||||
|
pub store: String,
|
||||||
|
pub remote: String,
|
||||||
|
pub remote_store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub remove_vanished: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub next_run: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_state: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_upid: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_endtime: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
fn init() -> SectionConfig {
|
fn init() -> SectionConfig {
|
||||||
let obj_schema = match SyncJobConfig::API_SCHEMA {
|
let obj_schema = match SyncJobConfig::API_SCHEMA {
|
||||||
Schema::Object(ref obj_schema) => obj_schema,
|
Schema::Object(ref obj_schema) => obj_schema,
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
//! format used in the [casync](https://github.com/systemd/casync)
|
//! format used in the [casync](https://github.com/systemd/casync)
|
||||||
//! toolkit (we are not 100\% binary compatible). It is a file archive
|
//! toolkit (we are not 100\% binary compatible). It is a file archive
|
||||||
//! format defined by 'Lennart Poettering', specially defined for
|
//! format defined by 'Lennart Poettering', specially defined for
|
||||||
//! efficent deduplication.
|
//! efficient deduplication.
|
||||||
|
|
||||||
//! Every archive contains items in the following order:
|
//! Every archive contains items in the following order:
|
||||||
//! * `ENTRY` -- containing general stat() data and related bits
|
//! * `ENTRY` -- containing general stat() data and related bits
|
||||||
|
@ -61,7 +61,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This function calls the provided `copy_func()` with the permutaion
|
/// This function calls the provided `copy_func()` with the permutation
|
||||||
/// info.
|
/// info.
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
@ -71,7 +71,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
|||||||
/// });
|
/// });
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// This will produce the folowing output:
|
/// This will produce the following output:
|
||||||
///
|
///
|
||||||
/// ```no-compile
|
/// ```no-compile
|
||||||
/// Copy 3 to 0
|
/// Copy 3 to 0
|
||||||
@ -81,7 +81,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
|||||||
/// Copy 4 to 2
|
/// Copy 4 to 2
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// So this generates the following permuation: `[3,1,4,0,2]`.
|
/// So this generates the following permutation: `[3,1,4,0,2]`.
|
||||||
|
|
||||||
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
||||||
n: usize,
|
n: usize,
|
||||||
|
@ -1117,7 +1117,7 @@ impl<'a, W: Write, C: BackupCatalogWriter> Encoder<'a, W, C> {
|
|||||||
if pos != size {
|
if pos != size {
|
||||||
// Note:: casync format cannot handle that
|
// Note:: casync format cannot handle that
|
||||||
bail!(
|
bail!(
|
||||||
"detected shrinked file {:?} ({} < {})",
|
"detected shrunk file {:?} ({} < {})",
|
||||||
self.full_path(),
|
self.full_path(),
|
||||||
pos,
|
pos,
|
||||||
size
|
size
|
||||||
|
@ -29,7 +29,7 @@ pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b;
|
|||||||
/// Marks item as hardlink
|
/// Marks item as hardlink
|
||||||
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
||||||
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
||||||
/// Marks the beginnig of the payload (actual content) of regular files
|
/// Marks the beginning of the payload (actual content) of regular files
|
||||||
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
||||||
/// Marks item as entry of goodbye table
|
/// Marks item as entry of goodbye table
|
||||||
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
||||||
|
@ -124,7 +124,7 @@ impl MatchPattern {
|
|||||||
Ok(Some((match_pattern, content_buffer, stat)))
|
Ok(Some((match_pattern, content_buffer, stat)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Interprete a byte buffer as a sinlge line containing a valid
|
/// Interpret a byte buffer as a sinlge line containing a valid
|
||||||
/// `MatchPattern`.
|
/// `MatchPattern`.
|
||||||
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
||||||
/// Pattern starting with '!' are interpreted as negative match pattern.
|
/// Pattern starting with '!' are interpreted as negative match pattern.
|
||||||
|
@ -84,7 +84,7 @@ impl<R: Read> SequentialDecoder<R> {
|
|||||||
|
|
||||||
pub(crate) fn read_link(&mut self, size: u64) -> Result<PathBuf, Error> {
|
pub(crate) fn read_link(&mut self, size: u64) -> Result<PathBuf, Error> {
|
||||||
if size < (HEADER_SIZE + 2) {
|
if size < (HEADER_SIZE + 2) {
|
||||||
bail!("dectected short link target.");
|
bail!("detected short link target.");
|
||||||
}
|
}
|
||||||
let target_len = size - HEADER_SIZE;
|
let target_len = size - HEADER_SIZE;
|
||||||
|
|
||||||
@ -104,7 +104,7 @@ impl<R: Read> SequentialDecoder<R> {
|
|||||||
|
|
||||||
pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> {
|
pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> {
|
||||||
if size < (HEADER_SIZE + 8 + 2) {
|
if size < (HEADER_SIZE + 8 + 2) {
|
||||||
bail!("dectected short hardlink header.");
|
bail!("detected short hardlink header.");
|
||||||
}
|
}
|
||||||
let offset: u64 = self.read_item()?;
|
let offset: u64 = self.read_item()?;
|
||||||
let target = self.read_link(size - 8)?;
|
let target = self.read_link(size - 8)?;
|
||||||
@ -121,7 +121,7 @@ impl<R: Read> SequentialDecoder<R> {
|
|||||||
|
|
||||||
pub(crate) fn read_filename(&mut self, size: u64) -> Result<OsString, Error> {
|
pub(crate) fn read_filename(&mut self, size: u64) -> Result<OsString, Error> {
|
||||||
if size < (HEADER_SIZE + 2) {
|
if size < (HEADER_SIZE + 2) {
|
||||||
bail!("dectected short filename");
|
bail!("detected short filename");
|
||||||
}
|
}
|
||||||
let name_len = size - HEADER_SIZE;
|
let name_len = size - HEADER_SIZE;
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ fn now() -> Result<f64, Error> {
|
|||||||
Ok(time.as_secs_f64())
|
Ok(time.as_secs_f64())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_value(rel_path: &str, value: f64, dst: DST) -> Result<(), Error> {
|
pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = PathBuf::from(PBS_RRD_BASEDIR);
|
let mut path = PathBuf::from(PBS_RRD_BASEDIR);
|
||||||
path.push(rel_path);
|
path.push(rel_path);
|
||||||
@ -52,7 +52,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST) -> Result<(), Error> {
|
|||||||
|
|
||||||
if let Some(rrd) = map.get_mut(rel_path) {
|
if let Some(rrd) = map.get_mut(rel_path) {
|
||||||
rrd.update(now, value);
|
rrd.update(now, value);
|
||||||
rrd.save(&path)?;
|
if save { rrd.save(&path)?; }
|
||||||
} else {
|
} else {
|
||||||
let mut rrd = match RRD::load(&path) {
|
let mut rrd = match RRD::load(&path) {
|
||||||
Ok(rrd) => rrd,
|
Ok(rrd) => rrd,
|
||||||
@ -64,7 +64,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
rrd.update(now, value);
|
rrd.update(now, value);
|
||||||
rrd.save(&path)?;
|
if save { rrd.save(&path)?; }
|
||||||
map.insert(rel_path.into(), rrd);
|
map.insert(rel_path.into(), rrd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
|
|||||||
/// If the task is spanned from a different process, we simply return if
|
/// If the task is spanned from a different process, we simply return if
|
||||||
/// that process is still running. This information is good enough to detect
|
/// that process is still running. This information is good enough to detect
|
||||||
/// stale tasks...
|
/// stale tasks...
|
||||||
fn worker_is_active_local(upid: &UPID) -> bool {
|
pub fn worker_is_active_local(upid: &UPID) -> bool {
|
||||||
if (upid.pid == *MY_PID) && (upid.pstart == *MY_PID_PSTART) {
|
if (upid.pid == *MY_PID) && (upid.pstart == *MY_PID_PSTART) {
|
||||||
WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id)
|
WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id)
|
||||||
} else {
|
} else {
|
||||||
@ -277,7 +277,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
|||||||
} else {
|
} else {
|
||||||
match state {
|
match state {
|
||||||
None => {
|
None => {
|
||||||
println!("Detected stoped UPID {}", upid_str);
|
println!("Detected stopped UPID {}", upid_str);
|
||||||
let status = upid_read_status(&upid)
|
let status = upid_read_status(&upid)
|
||||||
.unwrap_or_else(|_| String::from("unknown"));
|
.unwrap_or_else(|_| String::from("unknown"));
|
||||||
finish_list.push(TaskListInfo {
|
finish_list.push(TaskListInfo {
|
||||||
|
@ -127,7 +127,7 @@ pub fn lock_file<F: AsRawFd>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Open or create a lock file (append mode). Then try to
|
/// Open or create a lock file (append mode). Then try to
|
||||||
/// aquire a lock using `lock_file()`.
|
/// acquire a lock using `lock_file()`.
|
||||||
pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
|
pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
|
||||||
let path = path.as_ref();
|
let path = path.as_ref();
|
||||||
let mut file = match OpenOptions::new().create(true).append(true).open(path) {
|
let mut file = match OpenOptions::new().create(true).append(true).open(path) {
|
||||||
@ -136,7 +136,7 @@ pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<Fi
|
|||||||
};
|
};
|
||||||
match lock_file(&mut file, true, Some(timeout)) {
|
match lock_file(&mut file, true, Some(timeout)) {
|
||||||
Ok(_) => Ok(file),
|
Ok(_) => Ok(file),
|
||||||
Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err),
|
Err(err) => bail!("Unable to acquire lock {:?} - {}", path, err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,7 +441,7 @@ pub fn join(data: &Vec<String>, sep: char) -> String {
|
|||||||
|
|
||||||
/// Detect modified configuration files
|
/// Detect modified configuration files
|
||||||
///
|
///
|
||||||
/// This function fails with a resonable error message if checksums do not match.
|
/// This function fails with a reasonable error message if checksums do not match.
|
||||||
pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
|
pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
|
||||||
if digest1 != digest2 {
|
if digest1 != digest2 {
|
||||||
bail!("detected modified configuration - file changed by other user? Try again.");
|
bail!("detected modified configuration - file changed by other user? Try again.");
|
||||||
|
@ -149,14 +149,14 @@ fn test_broadcast_future() {
|
|||||||
.map_ok(|res| {
|
.map_ok(|res| {
|
||||||
CHECKSUM.fetch_add(res, Ordering::SeqCst);
|
CHECKSUM.fetch_add(res, Ordering::SeqCst);
|
||||||
})
|
})
|
||||||
.map_err(|err| { panic!("got errror {}", err); })
|
.map_err(|err| { panic!("got error {}", err); })
|
||||||
.map(|_| ());
|
.map(|_| ());
|
||||||
|
|
||||||
let receiver2 = sender.listen()
|
let receiver2 = sender.listen()
|
||||||
.map_ok(|res| {
|
.map_ok(|res| {
|
||||||
CHECKSUM.fetch_add(res*2, Ordering::SeqCst);
|
CHECKSUM.fetch_add(res*2, Ordering::SeqCst);
|
||||||
})
|
})
|
||||||
.map_err(|err| { panic!("got errror {}", err); })
|
.map_err(|err| { panic!("got error {}", err); })
|
||||||
.map(|_| ());
|
.map(|_| ());
|
||||||
|
|
||||||
let mut rt = tokio::runtime::Runtime::new().unwrap();
|
let mut rt = tokio::runtime::Runtime::new().unwrap();
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Disk query/management utilities for.
|
//! Disk query/management utilities for.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::ffi::{OsStr, OsString};
|
use std::ffi::{OsStr, OsString};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||||
@ -13,10 +13,13 @@ use libc::dev_t;
|
|||||||
use once_cell::sync::OnceCell;
|
use once_cell::sync::OnceCell;
|
||||||
|
|
||||||
use proxmox::sys::error::io_err_other;
|
use proxmox::sys::error::io_err_other;
|
||||||
use proxmox::sys::linux::procfs::MountInfo;
|
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
|
||||||
use proxmox::{io_bail, io_format_err};
|
use proxmox::{io_bail, io_format_err};
|
||||||
|
|
||||||
pub mod zfs;
|
mod zfs;
|
||||||
|
pub use zfs::*;
|
||||||
|
mod lvm;
|
||||||
|
pub use lvm::*;
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Ways a device is being used.
|
/// Ways a device is being used.
|
||||||
@ -135,6 +138,28 @@ impl DiskManage {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Information about file system type and unsed device for a path
|
||||||
|
///
|
||||||
|
/// Returns tuple (fs_type, device, mount_source)
|
||||||
|
pub fn find_mounted_device(
|
||||||
|
&self,
|
||||||
|
path: &std::path::Path,
|
||||||
|
) -> Result<Option<(String, Device, Option<OsString>)>, Error> {
|
||||||
|
|
||||||
|
let stat = nix::sys::stat::stat(path)?;
|
||||||
|
let device = Device::from_dev_t(stat.st_dev);
|
||||||
|
|
||||||
|
let root_path = std::path::Path::new("/");
|
||||||
|
|
||||||
|
for (_id, entry) in self.mount_info()? {
|
||||||
|
if entry.root == root_path && entry.device == device {
|
||||||
|
return Ok(Some((entry.fs_type.clone(), entry.device, entry.mount_source.clone())));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
/// Check whether a specific device node is mounted.
|
/// Check whether a specific device node is mounted.
|
||||||
///
|
///
|
||||||
/// Note that this tries to `stat` the sources of all mount points without caching the result
|
/// Note that this tries to `stat` the sources of all mount points without caching the result
|
||||||
@ -427,6 +452,8 @@ impl Disk {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Read block device stats
|
/// Read block device stats
|
||||||
|
///
|
||||||
|
/// see https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||||
pub fn read_stat(&self) -> std::io::Result<Option<BlockDevStat>> {
|
pub fn read_stat(&self) -> std::io::Result<Option<BlockDevStat>> {
|
||||||
if let Some(stat) = self.read_sys(Path::new("stat"))? {
|
if let Some(stat) = self.read_sys(Path::new("stat"))? {
|
||||||
let stat = unsafe { std::str::from_utf8_unchecked(&stat) };
|
let stat = unsafe { std::str::from_utf8_unchecked(&stat) };
|
||||||
@ -434,23 +461,35 @@ impl Disk {
|
|||||||
u64::from_str_radix(s, 10).unwrap_or(0)
|
u64::from_str_radix(s, 10).unwrap_or(0)
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
if stat.len() < 8 { return Ok(None); }
|
if stat.len() < 15 { return Ok(None); }
|
||||||
|
|
||||||
return Ok(Some(BlockDevStat {
|
return Ok(Some(BlockDevStat {
|
||||||
read_ios: stat[0],
|
read_ios: stat[0],
|
||||||
read_merges: stat[1],
|
|
||||||
read_sectors: stat[2],
|
read_sectors: stat[2],
|
||||||
read_ticks: stat[3],
|
write_ios: stat[4] + stat[11], // write + discard
|
||||||
write_ios: stat[4],
|
write_sectors: stat[6] + stat[13], // write + discard
|
||||||
write_merges: stat[5],
|
io_ticks: stat[10],
|
||||||
write_sectors: stat[6],
|
}));
|
||||||
write_ticks: stat[7],
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns disk usage information (total, used, avail)
|
||||||
|
pub fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
||||||
|
|
||||||
|
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||||
|
|
||||||
|
use nix::NixPath;
|
||||||
|
|
||||||
|
let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||||
|
nix::errno::Errno::result(res)?;
|
||||||
|
|
||||||
|
let bsize = stat.f_bsize as u64;
|
||||||
|
|
||||||
|
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
||||||
|
}
|
||||||
|
|
||||||
/// This is just a rough estimate for a "type" of disk.
|
/// This is just a rough estimate for a "type" of disk.
|
||||||
pub enum DiskType {
|
pub enum DiskType {
|
||||||
/// We know nothing.
|
/// We know nothing.
|
||||||
@ -470,11 +509,47 @@ pub enum DiskType {
|
|||||||
/// Represents the contents of the /sys/block/<dev>/stat file.
|
/// Represents the contents of the /sys/block/<dev>/stat file.
|
||||||
pub struct BlockDevStat {
|
pub struct BlockDevStat {
|
||||||
pub read_ios: u64,
|
pub read_ios: u64,
|
||||||
pub read_merges: u64,
|
|
||||||
pub read_sectors: u64,
|
pub read_sectors: u64,
|
||||||
pub read_ticks: u64, //milliseconds
|
|
||||||
pub write_ios: u64,
|
pub write_ios: u64,
|
||||||
pub write_merges: u64,
|
|
||||||
pub write_sectors: u64,
|
pub write_sectors: u64,
|
||||||
pub write_ticks: u64, //milliseconds
|
pub io_ticks: u64, // milliseconds
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Use lsblk to read partition type uuids.
|
||||||
|
pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error> {
|
||||||
|
|
||||||
|
const LSBLK_BIN_PATH: &str = "/usr/bin/lsblk";
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new(LSBLK_BIN_PATH);
|
||||||
|
command.args(&["--json", "-o", "path,parttype"]);
|
||||||
|
|
||||||
|
let output = command.output()
|
||||||
|
.map_err(|err| format_err!("failed to execute '{}' - {}", LSBLK_BIN_PATH, err))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, None)
|
||||||
|
.map_err(|err| format_err!("lsblk command failed: {}", err))?;
|
||||||
|
|
||||||
|
let mut res: HashMap<String, Vec<String>> = HashMap::new();
|
||||||
|
|
||||||
|
let output: serde_json::Value = output.parse()?;
|
||||||
|
match output["blockdevices"].as_array() {
|
||||||
|
Some(list) => {
|
||||||
|
for info in list {
|
||||||
|
let path = match info["path"].as_str() {
|
||||||
|
Some(p) => p,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let partition_type = match info["parttype"].as_str() {
|
||||||
|
Some(t) => t.to_owned(),
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let devices = res.entry(partition_type).or_insert(Vec::new());
|
||||||
|
devices.push(path.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
55
src/tools/disks/lvm.rs
Normal file
55
src/tools/disks/lvm.rs
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
use std::collections::{HashSet, HashMap};
|
||||||
|
|
||||||
|
use anyhow::{format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
|
lazy_static!{
|
||||||
|
static ref LVM_UUIDS: HashSet<&'static str> = {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert("e6d6d379-f507-44c2-a23c-238f2a3df928");
|
||||||
|
set
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get list of devices used by LVM (pvs).
|
||||||
|
pub fn get_lvm_devices(
|
||||||
|
partition_type_map: &HashMap<String, Vec<String>>,
|
||||||
|
) -> Result<HashSet<String>, Error> {
|
||||||
|
|
||||||
|
const PVS_BIN_PATH: &str = "/sbin/pvs";
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new(PVS_BIN_PATH);
|
||||||
|
command.args(&["--reportformat", "json", "--noheadings", "--readonly", "-o", "pv_name"]);
|
||||||
|
|
||||||
|
let output = command.output()
|
||||||
|
.map_err(|err| format_err!("failed to execute '{}' - {}", PVS_BIN_PATH, err))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, None)
|
||||||
|
.map_err(|err| format_err!("pvs command failed: {}", err))?;
|
||||||
|
|
||||||
|
let mut device_set: HashSet<String> = HashSet::new();
|
||||||
|
|
||||||
|
for device_list in partition_type_map.iter()
|
||||||
|
.filter_map(|(uuid, list)| if LVM_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||||
|
{
|
||||||
|
for device in device_list {
|
||||||
|
device_set.insert(device.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let output: Value = output.parse()?;
|
||||||
|
|
||||||
|
match output["report"][0]["pv"].as_array() {
|
||||||
|
Some(list) => {
|
||||||
|
for info in list {
|
||||||
|
if let Some(pv_name) = info["pv_name"].as_str() {
|
||||||
|
device_set.insert(pv_name.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => return Ok(device_set),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(device_set)
|
||||||
|
}
|
@ -1,9 +1,46 @@
|
|||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
|
use nom::{
|
||||||
|
error::VerboseError,
|
||||||
|
bytes::complete::{take_while, take_while1, take_till, take_till1},
|
||||||
|
combinator::{map_res, all_consuming, recognize},
|
||||||
|
sequence::{preceded, tuple},
|
||||||
|
character::complete::{space1, digit1, char, line_ending},
|
||||||
|
multi::{many0, many1},
|
||||||
|
};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
lazy_static!{
|
||||||
|
static ref ZFS_UUIDS: HashSet<&'static str> = {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert("6a898cc3-1dd2-11b2-99a6-080020736631"); // apple
|
||||||
|
set.insert("516e7cba-6ecf-11d6-8ff8-00022d09712b"); // bsd
|
||||||
|
set
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
type IResult<I, O, E = VerboseError<I>> = Result<(I, O), nom::Err<E>>;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ZFSPoolUsage {
|
||||||
|
total: u64,
|
||||||
|
used: u64,
|
||||||
|
free: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ZFSPoolStatus {
|
||||||
|
name: String,
|
||||||
|
usage: Option<ZFSPoolUsage>,
|
||||||
|
devices: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns kernel IO-stats for zfs pools
|
||||||
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
||||||
|
|
||||||
let mut path = PathBuf::from("/proc/spl/kstat/zfs");
|
let mut path = PathBuf::from("/proc/spl/kstat/zfs");
|
||||||
@ -37,11 +74,133 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
|||||||
write_sectors: stat[1]>>9,
|
write_sectors: stat[1]>>9,
|
||||||
read_ios: stat[2],
|
read_ios: stat[2],
|
||||||
write_ios: stat[3],
|
write_ios: stat[3],
|
||||||
read_merges: 0, // there is no such info
|
io_ticks: ticks,
|
||||||
write_merges: 0, // there is no such info
|
|
||||||
write_ticks: ticks,
|
|
||||||
read_ticks: ticks,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Some(stat))
|
Ok(Some(stat))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Recognizes zero or more spaces and tabs (but not carage returns or line feeds)
|
||||||
|
fn multispace0(i: &str) -> IResult<&str, &str> {
|
||||||
|
take_while(|c| c == ' ' || c == '\t')(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Recognizes one or more spaces and tabs (but not carage returns or line feeds)
|
||||||
|
fn multispace1(i: &str) -> IResult<&str, &str> {
|
||||||
|
take_while1(|c| c == ' ' || c == '\t')(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_optional_u64(i: &str) -> IResult<&str, Option<u64>> {
|
||||||
|
if i.starts_with('-') {
|
||||||
|
Ok((&i[1..], None))
|
||||||
|
} else {
|
||||||
|
let (i, value) = map_res(recognize(digit1), str::parse)(i)?;
|
||||||
|
Ok((i, Some(value)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_pool_device(i: &str) -> IResult<&str, String> {
|
||||||
|
let (i, (device, _, _rest)) = tuple((
|
||||||
|
preceded(multispace1, take_till1(|c| c == ' ' || c == '\t')),
|
||||||
|
multispace1,
|
||||||
|
preceded(take_till(|c| c == '\n'), char('\n')),
|
||||||
|
))(i)?;
|
||||||
|
|
||||||
|
Ok((i, device.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_pool_header(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
||||||
|
let (i, (text, total, used, free, _, _eol)) = tuple((
|
||||||
|
take_while1(|c| char::is_alphanumeric(c)),
|
||||||
|
preceded(multispace1, parse_optional_u64),
|
||||||
|
preceded(multispace1, parse_optional_u64),
|
||||||
|
preceded(multispace1, parse_optional_u64),
|
||||||
|
preceded(space1, take_till(|c| c == '\n')),
|
||||||
|
line_ending,
|
||||||
|
))(i)?;
|
||||||
|
|
||||||
|
let status = if let (Some(total), Some(used), Some(free)) = (total, used, free) {
|
||||||
|
ZFSPoolStatus {
|
||||||
|
name: text.into(),
|
||||||
|
usage: Some(ZFSPoolUsage { total, used, free }),
|
||||||
|
devices: Vec::new(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ZFSPoolStatus {
|
||||||
|
name: text.into(), usage: None, devices: Vec::new(),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((i, status))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_pool_status(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
||||||
|
|
||||||
|
let (i, mut stat) = parse_pool_header(i)?;
|
||||||
|
let (i, devices) = many1(parse_pool_device)(i)?;
|
||||||
|
|
||||||
|
for device_path in devices.into_iter().filter(|n| n.starts_with("/dev/")) {
|
||||||
|
stat.devices.push(device_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (i, _) = many0(tuple((multispace0, char('\n'))))(i)?; // skip empty lines
|
||||||
|
|
||||||
|
Ok((i, stat))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse zpool list outout
|
||||||
|
///
|
||||||
|
/// Note: This does not reveal any details on how the pool uses the devices, because
|
||||||
|
/// the zpool list output format is not really defined...
|
||||||
|
pub fn parse_zfs_list(i: &str) -> Result<Vec<ZFSPoolStatus>, Error> {
|
||||||
|
match all_consuming(many1(parse_pool_status))(i) {
|
||||||
|
Err(nom::Err::Error(err)) |
|
||||||
|
Err(nom::Err::Failure(err)) => {
|
||||||
|
bail!("unable to parse zfs list output - {}", nom::error::convert_error(i, err));
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
bail!("unable to parse calendar event: {}", err);
|
||||||
|
}
|
||||||
|
Ok((_, ce)) => Ok(ce),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List devices used by zfs (or a specific zfs pool)
|
||||||
|
pub fn zfs_devices(
|
||||||
|
partition_type_map: &HashMap<String, Vec<String>>,
|
||||||
|
pool: Option<&OsStr>,
|
||||||
|
) -> Result<HashSet<String>, Error> {
|
||||||
|
|
||||||
|
// Note: zpools list output can include entries for 'special', 'cache' and 'logs'
|
||||||
|
// and maybe other things.
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("/sbin/zpool");
|
||||||
|
command.args(&["list", "-H", "-v", "-p", "-P"]);
|
||||||
|
|
||||||
|
if let Some(pool) = pool { command.arg(pool); }
|
||||||
|
|
||||||
|
let output = command.output()
|
||||||
|
.map_err(|err| format_err!("failed to execute '/sbin/zpool' - {}", err))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, None)
|
||||||
|
.map_err(|err| format_err!("zpool list command failed: {}", err))?;
|
||||||
|
|
||||||
|
let list = parse_zfs_list(&output)?;
|
||||||
|
|
||||||
|
let mut device_set = HashSet::new();
|
||||||
|
for entry in list {
|
||||||
|
for device in entry.devices {
|
||||||
|
device_set.insert(device.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for device_list in partition_type_map.iter()
|
||||||
|
.filter_map(|(uuid, list)| if ZFS_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||||
|
{
|
||||||
|
for device in device_list {
|
||||||
|
device_set.insert(device.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(device_set)
|
||||||
|
}
|
||||||
|
@ -4,7 +4,7 @@ use std::io::Write;
|
|||||||
|
|
||||||
/// Log messages with timestamps into files
|
/// Log messages with timestamps into files
|
||||||
///
|
///
|
||||||
/// Logs messages to file, and optionaly to standart output.
|
/// Logs messages to file, and optionally to standard output.
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
/// #### Example:
|
/// #### Example:
|
||||||
|
@ -107,7 +107,7 @@ pub fn read_subdir<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> nix::Res
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
|
/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
|
||||||
/// results of `read_subdir`. Non-UTF8 comaptible file names are silently ignored.
|
/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored.
|
||||||
pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
|
pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
|
||||||
dirfd: RawFd,
|
dirfd: RawFd,
|
||||||
path: &P,
|
path: &P,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Inter-process reader-writer lock builder.
|
//! Inter-process reader-writer lock builder.
|
||||||
//!
|
//!
|
||||||
//! This implemenation uses fcntl record locks with non-blocking
|
//! This implementation uses fcntl record locks with non-blocking
|
||||||
//! F_SETLK command (never blocks).
|
//! F_SETLK command (never blocks).
|
||||||
//!
|
//!
|
||||||
//! We maintain a map of shared locks with time stamps, so you can get
|
//! We maintain a map of shared locks with time stamps, so you can get
|
||||||
@ -127,9 +127,9 @@ impl ProcessLocker {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try to aquire a shared lock
|
/// Try to acquire a shared lock
|
||||||
///
|
///
|
||||||
/// On sucess, this makes sure that no other process can get an exclusive lock for the file.
|
/// On success, this makes sure that no other process can get an exclusive lock for the file.
|
||||||
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
|
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
|
||||||
|
|
||||||
let mut data = locker.lock().unwrap();
|
let mut data = locker.lock().unwrap();
|
||||||
@ -168,7 +168,7 @@ impl ProcessLocker {
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try to aquire a exclusive lock
|
/// Try to acquire a exclusive lock
|
||||||
///
|
///
|
||||||
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
|
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
|
||||||
pub fn try_exclusive_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockExclusiveGuard, Error> {
|
pub fn try_exclusive_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockExclusiveGuard, Error> {
|
||||||
|
@ -163,12 +163,11 @@ pub fn compute_next_event(
|
|||||||
if event.days.contains(day) {
|
if event.days.contains(day) {
|
||||||
t.changes.remove(TMChanges::WDAY);
|
t.changes.remove(TMChanges::WDAY);
|
||||||
} else {
|
} else {
|
||||||
if let Some(n) = (day_num+1..6)
|
if let Some(n) = ((day_num+1)..7)
|
||||||
.map(|d| WeekDays::from_bits(1<<d).unwrap())
|
.find(|d| event.days.contains(WeekDays::from_bits(1<<d).unwrap()))
|
||||||
.find(|d| event.days.contains(*d))
|
|
||||||
{
|
{
|
||||||
// try next day
|
// try next day
|
||||||
t.add_days((n.bits() as i32) - day_num, true);
|
t.add_days(n - day_num, true);
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
// try next week
|
// try next week
|
||||||
@ -296,6 +295,16 @@ mod test {
|
|||||||
test_value("mon 2:*", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR)?;
|
test_value("mon 2:*", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR)?;
|
||||||
test_value("mon 2:50", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR + 50*MIN)?;
|
test_value("mon 2:50", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR + 50*MIN)?;
|
||||||
|
|
||||||
|
test_value("tue", THURSDAY_00_00, THURSDAY_00_00 + 5*DAY)?;
|
||||||
|
test_value("wed", THURSDAY_00_00, THURSDAY_00_00 + 6*DAY)?;
|
||||||
|
test_value("thu", THURSDAY_00_00, THURSDAY_00_00 + 7*DAY)?;
|
||||||
|
test_value("fri", THURSDAY_00_00, THURSDAY_00_00 + 1*DAY)?;
|
||||||
|
test_value("sat", THURSDAY_00_00, THURSDAY_00_00 + 2*DAY)?;
|
||||||
|
test_value("sun", THURSDAY_00_00, THURSDAY_00_00 + 3*DAY)?;
|
||||||
|
|
||||||
|
test_value("daily", THURSDAY_00_00, THURSDAY_00_00 + DAY)?;
|
||||||
|
test_value("daily", THURSDAY_00_00+1, THURSDAY_00_00 + DAY)?;
|
||||||
|
|
||||||
let n = test_value("5/2:0", THURSDAY_00_00, THURSDAY_00_00 + 5*HOUR)?;
|
let n = test_value("5/2:0", THURSDAY_00_00, THURSDAY_00_00 + 5*HOUR)?;
|
||||||
let n = test_value("5/2:0", n, THURSDAY_00_00 + 7*HOUR)?;
|
let n = test_value("5/2:0", n, THURSDAY_00_00 + 7*HOUR)?;
|
||||||
let n = test_value("5/2:0", n, THURSDAY_00_00 + 9*HOUR)?;
|
let n = test_value("5/2:0", n, THURSDAY_00_00 + 9*HOUR)?;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//! Generate and verify Authentification tickets
|
//! Generate and verify Authentication tickets
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use base64;
|
use base64;
|
||||||
|
@ -9,6 +9,7 @@ Ext.define('pbs-data-store-snapshots', {
|
|||||||
dateFormat: 'timestamp'
|
dateFormat: 'timestamp'
|
||||||
},
|
},
|
||||||
'files',
|
'files',
|
||||||
|
'owner',
|
||||||
{ name: 'size', type: 'int' },
|
{ name: 'size', type: 'int' },
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
@ -76,7 +77,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
} else if (btype === 'host') {
|
} else if (btype === 'host') {
|
||||||
cls = 'fa-building';
|
cls = 'fa-building';
|
||||||
} else {
|
} else {
|
||||||
console.warn(`got unkown backup-type '${btype}'`);
|
console.warn(`got unknown backup-type '${btype}'`);
|
||||||
continue; // FIXME: auto render? what do?
|
continue; // FIXME: auto render? what do?
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,6 +126,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
group["backup-time"] = last_backup;
|
group["backup-time"] = last_backup;
|
||||||
group.files = item.files;
|
group.files = item.files;
|
||||||
group.size = item.size;
|
group.size = item.size;
|
||||||
|
group.owner = item.owner;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
group.count = group.children.length;
|
group.count = group.children.length;
|
||||||
@ -157,67 +159,59 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
initComponent: function() {
|
columns: [
|
||||||
var me = this;
|
{
|
||||||
|
xtype: 'treecolumn',
|
||||||
|
header: gettext("Backup Group"),
|
||||||
|
dataIndex: 'text',
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'datecolumn',
|
||||||
|
header: gettext('Backup Time'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'backup-time',
|
||||||
|
format: 'Y-m-d H:i:s',
|
||||||
|
width: 150
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext("Size"),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'size',
|
||||||
|
renderer: Proxmox.Utils.format_size,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numbercolumn',
|
||||||
|
format: '0',
|
||||||
|
header: gettext("Count"),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'count',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext("Owner"),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'owner',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext("Files"),
|
||||||
|
sortable: false,
|
||||||
|
dataIndex: 'files',
|
||||||
|
flex: 2
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
var sm = Ext.create('Ext.selection.RowModel', {});
|
tbar: [
|
||||||
|
{
|
||||||
var prune_btn = new Proxmox.button.Button({
|
text: gettext('Reload'),
|
||||||
|
iconCls: 'fa fa-refresh',
|
||||||
|
handler: 'reload',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Prune'),
|
text: gettext('Prune'),
|
||||||
disabled: true,
|
disabled: true,
|
||||||
selModel: sm,
|
|
||||||
enableFn: function(record) { return !record.data.leaf; },
|
enableFn: function(record) { return !record.data.leaf; },
|
||||||
handler: 'onPrune',
|
handler: 'onPrune',
|
||||||
});
|
}
|
||||||
|
],
|
||||||
Ext.apply(me, {
|
|
||||||
selModel: sm,
|
|
||||||
columns: [
|
|
||||||
{
|
|
||||||
xtype: 'treecolumn',
|
|
||||||
header: gettext("Backup Group"),
|
|
||||||
dataIndex: 'text',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'datecolumn',
|
|
||||||
header: gettext('Backup Time'),
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'backup-time',
|
|
||||||
format: 'Y-m-d H:i:s',
|
|
||||||
width: 150
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: gettext("Size"),
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'size',
|
|
||||||
renderer: Proxmox.Utils.format_size,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
xtype: 'numbercolumn',
|
|
||||||
format: '0',
|
|
||||||
header: gettext("Count"),
|
|
||||||
sortable: true,
|
|
||||||
dataIndex: 'count',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: gettext("Files"),
|
|
||||||
sortable: false,
|
|
||||||
dataIndex: 'files',
|
|
||||||
flex: 2
|
|
||||||
}
|
|
||||||
],
|
|
||||||
|
|
||||||
tbar: [
|
|
||||||
{
|
|
||||||
text: gettext('Reload'),
|
|
||||||
iconCls: 'fa fa-refresh',
|
|
||||||
handler: 'reload',
|
|
||||||
},
|
|
||||||
prune_btn
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
me.callParent();
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
|
@ -5,24 +5,18 @@ Ext.define('pve-rrd-datastore', {
|
|||||||
'total',
|
'total',
|
||||||
'read_ios',
|
'read_ios',
|
||||||
'read_bytes',
|
'read_bytes',
|
||||||
'read_ticks',
|
|
||||||
'write_ios',
|
'write_ios',
|
||||||
'write_bytes',
|
'write_bytes',
|
||||||
'write_ticks',
|
'io_ticks',
|
||||||
{
|
{
|
||||||
name: 'read_delay', calculate: function(data) {
|
name: 'io_delay', calculate: function(data) {
|
||||||
if (data.read_ios === undefined || data.read_ios === 0 || data.read_ticks == undefined) {
|
let ios = 0;
|
||||||
|
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
||||||
|
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
||||||
|
if (ios == 0 || data.io_ticks === undefined) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
return (data.read_ticks*1000)/data.read_ios;
|
return (data.io_ticks*1000.0)/ios;
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'write_delay', calculate: function(data) {
|
|
||||||
if (data.write_ios === undefined || data.write_ios === 0 || data.write_ticks == undefined) {
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
return (data.write_ticks*1000)/data.write_ios;
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
||||||
@ -85,9 +79,9 @@ Ext.define('PBS.DataStoreStatistic', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Delay (ms)'),
|
title: gettext('IO Delay (ms)'),
|
||||||
fields: ['read_delay','write_delay'],
|
fields: ['io_delay'],
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
fieldTitles: [gettext('IO Delay')],
|
||||||
store: rrdstore
|
store: rrdstore
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
@ -6,11 +6,15 @@ IMAGES := \
|
|||||||
|
|
||||||
JSSRC= \
|
JSSRC= \
|
||||||
form/UserSelector.js \
|
form/UserSelector.js \
|
||||||
|
form/RemoteSelector.js \
|
||||||
|
form/DataStoreSelector.js \
|
||||||
config/UserView.js \
|
config/UserView.js \
|
||||||
config/RemoteView.js \
|
config/RemoteView.js \
|
||||||
config/ACLView.js \
|
config/ACLView.js \
|
||||||
|
config/SyncView.js \
|
||||||
window/UserEdit.js \
|
window/UserEdit.js \
|
||||||
window/RemoteEdit.js \
|
window/RemoteEdit.js \
|
||||||
|
window/SyncJobEdit.js \
|
||||||
window/ACLEdit.js \
|
window/ACLEdit.js \
|
||||||
Utils.js \
|
Utils.js \
|
||||||
LoginView.js \
|
LoginView.js \
|
||||||
|
@ -36,6 +36,12 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
path: 'pbsRemoteView',
|
path: 'pbsRemoteView',
|
||||||
leaf: true,
|
leaf: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Sync Jobs'),
|
||||||
|
iconCls: 'fa fa-refresh',
|
||||||
|
path: 'pbsSyncJobView',
|
||||||
|
leaf: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Data Store'),
|
text: gettext('Data Store'),
|
||||||
iconCls: 'fa fa-archive',
|
iconCls: 'fa fa-archive',
|
||||||
|
@ -21,8 +21,24 @@ Ext.define('pve-rrd-node', {
|
|||||||
'memused',
|
'memused',
|
||||||
'swaptotal',
|
'swaptotal',
|
||||||
'swapused',
|
'swapused',
|
||||||
'roottotal',
|
'total',
|
||||||
'rootused',
|
'used',
|
||||||
|
'read_ios',
|
||||||
|
'read_bytes',
|
||||||
|
'write_ios',
|
||||||
|
'write_bytes',
|
||||||
|
'io_ticks',
|
||||||
|
{
|
||||||
|
name: 'io_delay', calculate: function(data) {
|
||||||
|
let ios = 0;
|
||||||
|
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
||||||
|
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
||||||
|
if (ios == 0 || data.io_ticks === undefined) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
return (data.io_ticks*1000.0)/ios;
|
||||||
|
}
|
||||||
|
},
|
||||||
'loadavg',
|
'loadavg',
|
||||||
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
||||||
]
|
]
|
||||||
@ -92,7 +108,7 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('CPU usage'),
|
title: gettext('CPU usage'),
|
||||||
fields: ['cpu','iowait'],
|
fields: ['cpu','iowait'],
|
||||||
fieldTitles: [gettext('CPU usage'), gettext('IO delay')],
|
fieldTitles: [gettext('CPU usage'), gettext('IO wait')],
|
||||||
store: rrdstore
|
store: rrdstore
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -146,8 +162,8 @@ Ext.define('PBS.ServerStatus', {
|
|||||||
{
|
{
|
||||||
xtype: 'proxmoxRRDChart',
|
xtype: 'proxmoxRRDChart',
|
||||||
title: gettext('Root Disk IO Delay (ms)'),
|
title: gettext('Root Disk IO Delay (ms)'),
|
||||||
fields: ['read_delay','write_delay'],
|
fields: ['io_delay'],
|
||||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
fieldTitles: [gettext('IO Delay')],
|
||||||
store: rrdstore
|
store: rrdstore
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
@ -7,12 +7,8 @@ Ext.define('PBS.Utils', {
|
|||||||
singleton: true,
|
singleton: true,
|
||||||
|
|
||||||
updateLoginData: function(data) {
|
updateLoginData: function(data) {
|
||||||
Proxmox.CSRFPreventionToken = data.CSRFPreventionToken;
|
|
||||||
Proxmox.UserName = data.username;
|
Proxmox.Utils.setAuthData(data);
|
||||||
//console.log(data.ticket);
|
|
||||||
// fixme: use secure flag once we have TLS
|
|
||||||
//Ext.util.Cookies.set('PBSAuthCookie', data.ticket, null, '/', null, true );
|
|
||||||
Ext.util.Cookies.set('PBSAuthCookie', data.ticket, null, '/', null, false);
|
|
||||||
},
|
},
|
||||||
|
|
||||||
dataStorePrefix: 'DataStore-',
|
dataStorePrefix: 'DataStore-',
|
||||||
@ -62,6 +58,7 @@ Ext.define('PBS.Utils', {
|
|||||||
Proxmox.Utils.override_task_descriptions({
|
Proxmox.Utils.override_task_descriptions({
|
||||||
garbage_collection: ['Datastore', gettext('Garbage collect') ],
|
garbage_collection: ['Datastore', gettext('Garbage collect') ],
|
||||||
sync: ['Datastore', gettext('Remote Sync') ],
|
sync: ['Datastore', gettext('Remote Sync') ],
|
||||||
|
syncjob: [gettext('Sync Job'), gettext('Remote Sync') ],
|
||||||
prune: (type, id) => {
|
prune: (type, id) => {
|
||||||
return PBS.Utils.render_datastore_worker_id(id, gettext('Prune'));
|
return PBS.Utils.render_datastore_worker_id(id, gettext('Prune'));
|
||||||
},
|
},
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
Ext.define('pmx-remotes', {
|
Ext.define('pmx-remotes', {
|
||||||
extend: 'Ext.data.Model',
|
extend: 'Ext.data.Model',
|
||||||
fields: [ 'name', 'host', 'userid', 'fingerprint' ],
|
fields: [ 'name', 'host', 'userid', 'fingerprint', 'comment' ],
|
||||||
idProperty: 'name',
|
idProperty: 'name',
|
||||||
proxy: {
|
proxy: {
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
@ -113,7 +113,7 @@ Ext.define('PBS.config.RemoteView', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
header: gettext('User name'),
|
header: gettext('User name'),
|
||||||
width: 100,
|
width: 200,
|
||||||
sortable: true,
|
sortable: true,
|
||||||
renderer: Ext.String.htmlEncode,
|
renderer: Ext.String.htmlEncode,
|
||||||
dataIndex: 'userid',
|
dataIndex: 'userid',
|
||||||
@ -123,6 +123,13 @@ Ext.define('PBS.config.RemoteView', {
|
|||||||
sortable: false,
|
sortable: false,
|
||||||
renderer: Ext.String.htmlEncode,
|
renderer: Ext.String.htmlEncode,
|
||||||
dataIndex: 'fingerprint',
|
dataIndex: 'fingerprint',
|
||||||
|
width: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Comment'),
|
||||||
|
sortable: false,
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
dataIndex: 'comment',
|
||||||
flex: 1,
|
flex: 1,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
263
www/config/SyncView.js
Normal file
263
www/config/SyncView.js
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
Ext.define('pbs-sync-jobs-status', {
|
||||||
|
extend: 'Ext.data.Model',
|
||||||
|
fields: [
|
||||||
|
'id', 'remote', 'remote-store', 'store', 'schedule',
|
||||||
|
'next-run', 'last-run-upid', 'last-run-state', 'last-run-endtime',
|
||||||
|
{
|
||||||
|
name: 'duration',
|
||||||
|
calculate: function(data) {
|
||||||
|
let endtime = data['last-run-endtime'];
|
||||||
|
if (!endtime) return undefined;
|
||||||
|
let task = Proxmox.Utils.parse_task_upid(data['last-run-upid']);
|
||||||
|
return endtime - task.starttime;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
idProperty: 'id',
|
||||||
|
proxy: {
|
||||||
|
type: 'proxmox',
|
||||||
|
url: '/api2/json/admin/sync',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.config.SyncJobView', {
|
||||||
|
extend: 'Ext.grid.GridPanel',
|
||||||
|
alias: 'widget.pbsSyncJobView',
|
||||||
|
|
||||||
|
stateful: true,
|
||||||
|
stateId: 'grid-sync-jobs',
|
||||||
|
|
||||||
|
title: gettext('Sync Jobs'),
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
addSyncJob: function() {
|
||||||
|
let me = this;
|
||||||
|
Ext.create('PBS.window.SyncJobEdit', {
|
||||||
|
listeners: {
|
||||||
|
destroy: function() {
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
editSyncJob: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let selection = view.getSelection();
|
||||||
|
if (selection.length < 1) return;
|
||||||
|
|
||||||
|
Ext.create('PBS.window.SyncJobEdit', {
|
||||||
|
id: selection[0].data.id,
|
||||||
|
listeners: {
|
||||||
|
destroy: function() {
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
openTaskLog: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let selection = view.getSelection();
|
||||||
|
if (selection.length < 1) return;
|
||||||
|
|
||||||
|
let upid = selection[0].data['last-run-upid'];
|
||||||
|
if (!upid) return;
|
||||||
|
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
runSyncJob: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let selection = view.getSelection();
|
||||||
|
if (selection.length < 1) return;
|
||||||
|
|
||||||
|
let id = selection[0].data.id;
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
method: 'POST',
|
||||||
|
url: `/admin/sync/${id}/run`,
|
||||||
|
success: function(response, opt) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
taskDone: function(success) {
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
failure: function(response, opt) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
render_sync_status: function(value, metadata, record) {
|
||||||
|
if (!record.data['last-run-upid']) {
|
||||||
|
return '-';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!record.data['last-run-endtime']) {
|
||||||
|
metadata.tdCls = 'x-grid-row-loading';
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (value === 'OK') {
|
||||||
|
return `<i class="fa fa-check good"></i> ${gettext("OK")}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return `<i class="fa fa-times critical"></i> ${gettext("Error")}:${value}`;
|
||||||
|
},
|
||||||
|
|
||||||
|
render_next_run: function(value, metadat, record) {
|
||||||
|
if (!value) return '-';
|
||||||
|
|
||||||
|
let now = new Date();
|
||||||
|
let next = new Date(value*1000);
|
||||||
|
|
||||||
|
if (next < now) {
|
||||||
|
return gettext('pending');
|
||||||
|
}
|
||||||
|
return Proxmox.Utils.render_timestamp(value);
|
||||||
|
},
|
||||||
|
|
||||||
|
render_optional_timestamp: function(value, metadata, record) {
|
||||||
|
if (!value) return '-';
|
||||||
|
return Proxmox.Utils.render_timestamp(value);
|
||||||
|
},
|
||||||
|
|
||||||
|
reload: function() { this.getView().getStore().rstore.load(); },
|
||||||
|
|
||||||
|
init: function(view) {
|
||||||
|
Proxmox.Utils.monStoreErrors(view, view.getStore().rstore);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
activate: 'reload',
|
||||||
|
itemdblclick: 'editSyncJob',
|
||||||
|
},
|
||||||
|
|
||||||
|
store: {
|
||||||
|
type: 'diff',
|
||||||
|
autoDestroy: true,
|
||||||
|
autoDestroyRstore: true,
|
||||||
|
sorters: 'id',
|
||||||
|
rstore: {
|
||||||
|
type: 'update',
|
||||||
|
storeid: 'pbs-sync-jobs-status',
|
||||||
|
model: 'pbs-sync-jobs-status',
|
||||||
|
autoStart: true,
|
||||||
|
interval: 5000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
tbar: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Add'),
|
||||||
|
handler: 'addSyncJob',
|
||||||
|
selModel: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Edit'),
|
||||||
|
handler: 'editSyncJob',
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxStdRemoveButton',
|
||||||
|
baseurl: '/config/sync/',
|
||||||
|
callback: 'reload',
|
||||||
|
},
|
||||||
|
'-',
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Log'),
|
||||||
|
handler: 'openTaskLog',
|
||||||
|
enableFn: (rec) => !!rec.data['last-run-upid'],
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Run now'),
|
||||||
|
handler: 'runSyncJob',
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
viewConfig: {
|
||||||
|
trackOver: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
header: gettext('Sync Job'),
|
||||||
|
width: 200,
|
||||||
|
sortable: true,
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
dataIndex: 'id',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Remote'),
|
||||||
|
width: 200,
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'remote',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Remote Store'),
|
||||||
|
width: 200,
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'remote-store',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Local Store'),
|
||||||
|
width: 200,
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'store',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Schedule'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'schedule',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Status'),
|
||||||
|
dataIndex: 'last-run-state',
|
||||||
|
flex: 1,
|
||||||
|
renderer: 'render_sync_status',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Last Sync'),
|
||||||
|
sortable: true,
|
||||||
|
minWidth: 200,
|
||||||
|
renderer: 'render_optional_timestamp',
|
||||||
|
dataIndex: 'last-run-endtime',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Duration'),
|
||||||
|
dataIndex: 'duration',
|
||||||
|
width: 60,
|
||||||
|
renderer: Proxmox.Utils.render_duration,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Next Run'),
|
||||||
|
sortable: true,
|
||||||
|
minWidth: 200,
|
||||||
|
renderer: 'render_next_run',
|
||||||
|
dataIndex: 'next-run',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Comment'),
|
||||||
|
hidden: true,
|
||||||
|
sortable: true,
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
dataIndex: 'comment',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
34
www/form/DataStoreSelector.js
Normal file
34
www/form/DataStoreSelector.js
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
Ext.define('PBS.form.DataStoreSelector', {
|
||||||
|
extend: 'Proxmox.form.ComboGrid',
|
||||||
|
alias: 'widget.pbsDataStoreSelector',
|
||||||
|
|
||||||
|
allowBlank: false,
|
||||||
|
autoSelect: false,
|
||||||
|
valueField: 'store',
|
||||||
|
displayField: 'store',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
model: 'pbs-datastore-list',
|
||||||
|
autoLoad: true,
|
||||||
|
sorters: 'store',
|
||||||
|
},
|
||||||
|
|
||||||
|
listConfig: {
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
header: gettext('DataStore'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'store',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Comment'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'comment',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
40
www/form/RemoteSelector.js
Normal file
40
www/form/RemoteSelector.js
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
Ext.define('PBS.form.RemoteSelector', {
|
||||||
|
extend: 'Proxmox.form.ComboGrid',
|
||||||
|
alias: 'widget.pbsRemoteSelector',
|
||||||
|
|
||||||
|
allowBlank: false,
|
||||||
|
autoSelect: false,
|
||||||
|
valueField: 'name',
|
||||||
|
displayField: 'name',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
model: 'pmx-remotes',
|
||||||
|
autoLoad: true,
|
||||||
|
sorters: 'name',
|
||||||
|
},
|
||||||
|
|
||||||
|
listConfig: {
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
header: gettext('Remote'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'name',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Host'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'host',
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('User name'),
|
||||||
|
sortable: true,
|
||||||
|
dataIndex: 'userid',
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
@ -73,6 +73,11 @@ Ext.define('PBS.window.RemoteEdit', {
|
|||||||
name: 'fingerprint',
|
name: 'fingerprint',
|
||||||
fieldLabel: gettext('Fingerprint'),
|
fieldLabel: gettext('Fingerprint'),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxtextfield',
|
||||||
|
name: 'comment',
|
||||||
|
fieldLabel: gettext('Comment'),
|
||||||
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
|
|
||||||
|
84
www/window/SyncJobEdit.js
Normal file
84
www/window/SyncJobEdit.js
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
Ext.define('PBS.window.SyncJobEdit', {
|
||||||
|
extend: 'Proxmox.window.Edit',
|
||||||
|
alias: 'widget.pbsSyncJobEdit',
|
||||||
|
mixins: ['Proxmox.Mixin.CBind'],
|
||||||
|
|
||||||
|
userid: undefined,
|
||||||
|
|
||||||
|
isAdd: true,
|
||||||
|
|
||||||
|
subject: gettext('SyncJob'),
|
||||||
|
|
||||||
|
fieldDefaults: { labelWidth: 120 },
|
||||||
|
|
||||||
|
cbindData: function(initialConfig) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let baseurl = '/api2/extjs/config/sync';
|
||||||
|
let id = initialConfig.id;
|
||||||
|
|
||||||
|
me.isCreate = !id;
|
||||||
|
me.url = id ? `${baseurl}/${id}` : baseurl;
|
||||||
|
me.method = id ? 'PUT' : 'POST';
|
||||||
|
me.autoLoad = !!id;
|
||||||
|
return { };
|
||||||
|
},
|
||||||
|
|
||||||
|
items: {
|
||||||
|
xtype: 'inputpanel',
|
||||||
|
column1: [
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Sync Job'),
|
||||||
|
xtype: 'pmxDisplayEditField',
|
||||||
|
name: 'id',
|
||||||
|
renderer: Ext.htmlEncode,
|
||||||
|
allowBlank: false,
|
||||||
|
minLength: 4,
|
||||||
|
cbind: {
|
||||||
|
editable: '{isCreate}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Remote'),
|
||||||
|
xtype: 'pbsRemoteSelector',
|
||||||
|
allowBlank: false,
|
||||||
|
name: 'remote',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Local Datastore'),
|
||||||
|
xtype: 'pbsDataStoreSelector',
|
||||||
|
allowBlank: false,
|
||||||
|
name: 'store',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Remote Datastore'),
|
||||||
|
xtype: 'proxmoxtextfield',
|
||||||
|
allowBlank: false,
|
||||||
|
name: 'remote-store',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
column2: [
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Remove vanished'),
|
||||||
|
xtype: 'proxmoxcheckbox',
|
||||||
|
name: 'remove-vanished',
|
||||||
|
uncheckedValue: false,
|
||||||
|
value: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Schedule'),
|
||||||
|
xtype: 'proxmoxtextfield',
|
||||||
|
name: 'schedule',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
columnB: [
|
||||||
|
{
|
||||||
|
fieldLabel: gettext('Comment'),
|
||||||
|
xtype: 'proxmoxtextfield',
|
||||||
|
name: 'comment',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
Reference in New Issue
Block a user