Compare commits
37 Commits
Author | SHA1 | Date | |
---|---|---|---|
c76c7f8303 | |||
c48aa39f3b | |||
2d32fe2c04 | |||
dc155e9bd7 | |||
4e14781aec | |||
a595f0fee0 | |||
add5861e8d | |||
1610c45a86 | |||
b2387eaa45 | |||
96d65fbcd0 | |||
7cc3473a4e | |||
4856a21836 | |||
a0153b02c9 | |||
04b0ca8b59 | |||
86e432b0b8 | |||
f0ed6a218c | |||
709584719d | |||
d43f86f3f3 | |||
997d7e19fc | |||
c67b1fa72f | |||
268687ddf0 | |||
426c1e353b | |||
2888b27f4c | |||
f5d00373f3 | |||
934f5bb8ac | |||
9857472211 | |||
013fa7bbcb | |||
a8d7033cb2 | |||
04ad7bc436 | |||
77ebbefc1a | |||
750252ba2f | |||
dc58194ebe | |||
c6887a8a4d | |||
090decbe76 | |||
c32186595e | |||
947f45252d | |||
c94e1f655e |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.2.1"
|
||||
version = "0.2.2"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
|
14
debian/changelog
vendored
14
debian/changelog
vendored
@ -1,3 +1,17 @@
|
||||
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
|
||||
|
||||
* proxmox-backup-client.rs: implement quiet flag
|
||||
|
||||
* client restore: don't add server file ending if already specified
|
||||
|
||||
* src/client/pull.rs: also download client.log.blob
|
||||
|
||||
* src/client/pull.rs: more verbose logging
|
||||
|
||||
* gui improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jun 2020 10:37:12 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.1-1) unstable; urgency=medium
|
||||
|
||||
* ui: move server RRD statistics to 'Server Status' panel
|
||||
|
@ -2,9 +2,11 @@ use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod datastore;
|
||||
pub mod sync;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER)
|
||||
("datastore", &datastore::ROUTER),
|
||||
("sync", &sync::ROUTER)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
@ -44,7 +44,7 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
|
||||
|
||||
let mut path = store.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push("index.json.blob");
|
||||
path.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
let raw_data = file_get_contents(&path)?;
|
||||
let index_size = raw_data.len() as u64;
|
||||
@ -61,7 +61,7 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
|
||||
}
|
||||
|
||||
result.push(BackupContent {
|
||||
filename: "index.json.blob".to_string(),
|
||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||
size: Some(index_size),
|
||||
});
|
||||
|
||||
@ -130,8 +130,8 @@ fn list_groups(
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if !list_all {
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
@ -141,6 +141,7 @@ fn list_groups(
|
||||
last_backup: info.backup_dir.backup_time().timestamp(),
|
||||
backup_count: list.len() as u64,
|
||||
files: info.files.clone(),
|
||||
owner: Some(owner),
|
||||
};
|
||||
groups.push(result_item);
|
||||
}
|
||||
@ -329,8 +330,9 @@ pub fn list_snapshots (
|
||||
}
|
||||
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
|
||||
if !list_all {
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
@ -340,6 +342,7 @@ pub fn list_snapshots (
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
files: info.files,
|
||||
size: None,
|
||||
owner: Some(owner),
|
||||
};
|
||||
|
||||
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
|
||||
@ -802,7 +805,7 @@ fn upload_backup_log(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let file_name = "client.log.blob";
|
||||
let file_name = CLIENT_LOG_BLOB_NAME;
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
@ -875,8 +878,9 @@ fn get_rrd_stats(
|
||||
&rrd_dir,
|
||||
&[
|
||||
"total", "used",
|
||||
"read_ios", "read_bytes", "read_ticks",
|
||||
"write_ios", "write_bytes", "write_ticks",
|
||||
"read_ios", "read_bytes",
|
||||
"write_ios", "write_bytes",
|
||||
"io_ticks",
|
||||
],
|
||||
timeframe,
|
||||
cf,
|
||||
|
134
src/api2/admin/sync.rs
Normal file
134
src/api2/admin/sync.rs
Normal file
@ -0,0 +1,134 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::pull::{get_pull_parameters};
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::{self, TaskListInfo, WorkerTask};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs and their status.",
|
||||
type: Array,
|
||||
items: { type: sync::SyncJobStatus },
|
||||
},
|
||||
)]
|
||||
/// List all sync jobs
|
||||
pub fn list_sync_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SyncJobStatus>, Error> {
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||
|
||||
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
|
||||
let tasks = server::read_task_list()?;
|
||||
|
||||
for info in tasks.iter() {
|
||||
let worker_id = match &info.upid.worker_id {
|
||||
Some(id) => id,
|
||||
_ => { continue; },
|
||||
};
|
||||
if let Some(last) = last_tasks.get(worker_id) {
|
||||
if last.upid.starttime < info.upid.starttime {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
} else {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
}
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
_ => 0i64,
|
||||
};
|
||||
|
||||
for job in &mut list {
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
compute_next_event(&event, now, false).ok()
|
||||
})();
|
||||
|
||||
if let Some(task) = last_tasks.get(&job.id) {
|
||||
job.last_run_upid = Some(task.upid_str.clone());
|
||||
if let Some((endttime, status)) = &task.state {
|
||||
job.last_run_state = Some(String::from(status));
|
||||
job.last_run_endtime = Some(*endttime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Runs the sync jobs manually.
|
||||
async fn run_sync_job(
|
||||
id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let (config, _digest) = sync::config()?;
|
||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync job '{}' start", &id));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &id));
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SYNC_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"run",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_RUN_SYNC_JOB)
|
||||
),
|
||||
];
|
||||
|
||||
const SYNC_INFO_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
|
||||
.subdirs(SYNC_INFO_SUBDIRS);
|
||||
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||
.match_all("id", &SYNC_INFO_ROUTER);
|
@ -107,7 +107,7 @@ async move {
|
||||
}
|
||||
|
||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directorty already exists."); }
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
@ -151,7 +151,7 @@ async move {
|
||||
|
||||
match (res, env.ensure_finished()) {
|
||||
(Ok(_), Ok(())) => {
|
||||
env.log("backup finished sucessfully");
|
||||
env.log("backup finished successfully");
|
||||
Ok(())
|
||||
},
|
||||
(Err(err), Ok(())) => {
|
||||
@ -378,7 +378,7 @@ fn dynamic_append (
|
||||
|
||||
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -443,7 +443,7 @@ fn fixed_append (
|
||||
|
||||
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -498,7 +498,7 @@ fn close_dynamic_index (
|
||||
|
||||
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
|
||||
|
||||
env.log(format!("sucessfully closed dynamic index {}", wid));
|
||||
env.log(format!("successfully closed dynamic index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -552,7 +552,7 @@ fn close_fixed_index (
|
||||
|
||||
env.fixed_writer_close(wid, chunk_count, size, csum)?;
|
||||
|
||||
env.log(format!("sucessfully closed fixed index {}", wid));
|
||||
env.log(format!("successfully closed fixed index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -566,7 +566,7 @@ fn finish_backup (
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.finish_backup()?;
|
||||
env.log("sucessfully finished backup");
|
||||
env.log("successfully finished backup");
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ struct FixedWriterState {
|
||||
struct SharedBackupState {
|
||||
finished: bool,
|
||||
uid_counter: usize,
|
||||
file_counter: usize, // sucessfully uploaded files
|
||||
file_counter: usize, // successfully uploaded files
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
|
@ -269,6 +269,8 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
|
||||
None => bail!("remote '{}' does not exist.", name),
|
||||
}
|
||||
|
||||
remote::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ pub fn list_sync_jobs(
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -154,7 +154,7 @@ pub enum DeletableProperty {
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
@ -274,4 +274,4 @@ const ITEM_ROUTER: Router = Router::new()
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||
.post(&API_METHOD_CREATE_SYNC_JOB)
|
||||
.match_all("name", &ITEM_ROUTER);
|
||||
.match_all("id", &ITEM_ROUTER);
|
||||
|
@ -338,7 +338,7 @@ pub enum DeletableProperty {
|
||||
autostart,
|
||||
/// Delete bridge ports (set to 'none')
|
||||
bridge_ports,
|
||||
/// Delet bridge-vlan-aware flag
|
||||
/// Delete bridge-vlan-aware flag
|
||||
bridge_vlan_aware,
|
||||
/// Delete bond-slaves (set to 'none')
|
||||
slaves,
|
||||
|
@ -36,8 +36,9 @@ fn get_node_stats(
|
||||
"netin", "netout",
|
||||
"loadavg",
|
||||
"total", "used",
|
||||
"read_ios", "read_bytes", "read_ticks",
|
||||
"write_ios", "write_bytes", "write_ticks",
|
||||
"read_ios", "read_bytes",
|
||||
"write_ios", "write_bytes",
|
||||
"io_ticks",
|
||||
],
|
||||
timeframe,
|
||||
cf,
|
||||
|
@ -256,7 +256,7 @@ fn stop_service(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
log::info!("stoping service {}", service);
|
||||
log::info!("stopping service {}", service);
|
||||
|
||||
run_service_command(&service, "stop")
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
//! Sync datastore from remote server
|
||||
use std::sync::{Arc};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
@ -15,6 +16,52 @@ use crate::config::{
|
||||
cached_user_info::CachedUserInfo,
|
||||
};
|
||||
|
||||
|
||||
pub fn check_pull_privs(
|
||||
username: &str,
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
delete: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_pull_parameters(
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
) -> Result<(HttpClient, BackupRepository, Arc<DataStore>), Error> {
|
||||
|
||||
let tgt_store = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let remote: remote::Remote = remote_config.lookup("remote", remote)?;
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store.to_string());
|
||||
|
||||
Ok((client, src_repo, tgt_store))
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -52,33 +99,12 @@ async fn pull (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(&username, &["remote", &remote, &remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
let delete = remove_vanished.unwrap_or(true);
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
|
||||
|
||||
let tgt_store = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let remote: remote::Remote = remote_config.lookup("remote", &remote)?;
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||
|
||||
// fixme: set to_stdout to false?
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
||||
|
@ -131,7 +131,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
})
|
||||
.map_ok(move |_| env.log("reader finished sucessfully"))
|
||||
.map_ok(move |_| env.log("reader finished successfully"))
|
||||
})?;
|
||||
|
||||
let response = Response::builder()
|
||||
|
@ -27,6 +27,8 @@ macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
||||
|
||||
@ -63,7 +65,9 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
|
||||
@ -287,6 +291,11 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
@ -379,6 +388,9 @@ pub struct GroupListItem {
|
||||
pub backup_count: u64,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<String>,
|
||||
/// The owner of group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -411,6 +423,9 @@ pub struct SnapshotListItem {
|
||||
/// Overall snapshot size (sum of all archive sizes).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
/// The owner of the snapshots group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -807,7 +822,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
for fingerprint in invalid_fingerprints.iter() {
|
||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
||||
bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint);
|
||||
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||
}
|
||||
}
|
||||
|
||||
@ -851,7 +866,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
for name in invalid_user_ids.iter() {
|
||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
||||
bail!("test userid '{}' failed - got Ok() while expection an error.", name);
|
||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -311,7 +311,7 @@ impl DataBlob {
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
/// this is noth possible for encrypted chunks.
|
||||
/// this is not possible for encrypted chunks.
|
||||
pub fn verify_unencrypted(
|
||||
&self,
|
||||
expected_chunk_size: usize,
|
||||
|
@ -11,7 +11,7 @@ use super::backup_info::{BackupGroup, BackupDir};
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, BackupManifest};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::config::datastore;
|
||||
@ -149,6 +149,7 @@ impl DataStore {
|
||||
|
||||
let mut wanted_files = HashSet::new();
|
||||
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
||||
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
|
||||
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
|
||||
|
||||
for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
|
||||
|
@ -7,6 +7,7 @@ use serde_json::{json, Value};
|
||||
use crate::backup::BackupDir;
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
|
@ -49,7 +49,7 @@ fn hello_command(
|
||||
}
|
||||
|
||||
#[api(input: { properties: {} })]
|
||||
/// Quit command. Exit the programm.
|
||||
/// Quit command. Exit the program.
|
||||
///
|
||||
/// Returns: nothing
|
||||
fn quit_command() -> Result<(), Error> {
|
||||
|
@ -16,7 +16,7 @@ use std::io::Write;
|
||||
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
|
||||
|
||||
// # pxar create test.pxar ./dyntest1/
|
||||
// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||
|
||||
fn create_large_file(path: PathBuf) {
|
||||
|
||||
|
@ -22,11 +22,6 @@ use proxmox_backup::client::*;
|
||||
use proxmox_backup::backup::*;
|
||||
use proxmox_backup::pxar::{ self, catalog::* };
|
||||
|
||||
//use proxmox_backup::backup::image_index::*;
|
||||
//use proxmox_backup::config::datastore;
|
||||
//use proxmox_backup::pxar::encoder::*;
|
||||
//use proxmox_backup::backup::datastore::*;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
//use hyper::Body;
|
||||
use std::sync::{Arc, Mutex};
|
||||
@ -39,20 +34,12 @@ use tokio::sync::mpsc;
|
||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||
|
||||
proxmox::const_regex! {
|
||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
|
||||
}
|
||||
|
||||
const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
||||
.format(&BACKUP_REPO_URL)
|
||||
.max_length(256)
|
||||
.schema();
|
||||
|
||||
const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||
"Backup source specification ([<label>:<path>]).")
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
|
||||
const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
||||
"Path to encryption key. All data will be encrypted using this key.")
|
||||
.schema();
|
||||
@ -688,14 +675,6 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
|
||||
|
||||
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
|
||||
return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
|
||||
}
|
||||
bail!("unable to parse directory specification '{}'", value);
|
||||
}
|
||||
|
||||
fn spawn_catalog_upload(
|
||||
client: Arc<BackupWriter>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
@ -865,12 +844,12 @@ async fn create_backup(
|
||||
|
||||
let mut upload_list = vec![];
|
||||
|
||||
enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
|
||||
|
||||
let mut upload_catalog = false;
|
||||
|
||||
for backupspec in backupspec_list {
|
||||
let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
|
||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||
let filename = &spec.config_string;
|
||||
let target = &spec.archive_name;
|
||||
|
||||
use std::os::unix::fs::FileTypeExt;
|
||||
|
||||
@ -878,19 +857,15 @@ async fn create_backup(
|
||||
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
||||
let file_type = metadata.file_type();
|
||||
|
||||
let extension = target.rsplit('.').next()
|
||||
.ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
|
||||
|
||||
match extension {
|
||||
"pxar" => {
|
||||
match spec.spec_type {
|
||||
BackupSpecificationType::PXAR => {
|
||||
if !file_type.is_dir() {
|
||||
bail!("got unexpected file type (expected directory)");
|
||||
}
|
||||
upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
||||
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
||||
upload_catalog = true;
|
||||
}
|
||||
"img" => {
|
||||
|
||||
BackupSpecificationType::IMAGE => {
|
||||
if !(file_type.is_file() || file_type.is_block_device()) {
|
||||
bail!("got unexpected file type (expected file or block device)");
|
||||
}
|
||||
@ -899,22 +874,19 @@ async fn create_backup(
|
||||
|
||||
if size == 0 { bail!("got zero-sized file '{}'", filename); }
|
||||
|
||||
upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
|
||||
upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
|
||||
}
|
||||
"conf" => {
|
||||
BackupSpecificationType::CONFIG => {
|
||||
if !file_type.is_file() {
|
||||
bail!("got unexpected file type (expected regular file)");
|
||||
}
|
||||
upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
}
|
||||
"log" => {
|
||||
BackupSpecificationType::LOGFILE => {
|
||||
if !file_type.is_file() {
|
||||
bail!("got unexpected file type (expected regular file)");
|
||||
}
|
||||
upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
}
|
||||
_ => {
|
||||
bail!("got unknown archive extension '{}'", extension);
|
||||
upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -967,21 +939,21 @@ async fn create_backup(
|
||||
|
||||
for (backup_type, filename, target, size) in upload_list {
|
||||
match backup_type {
|
||||
BackupType::CONFIG => {
|
||||
BackupSpecificationType::CONFIG => {
|
||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
}
|
||||
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
}
|
||||
BackupType::PXAR => {
|
||||
BackupSpecificationType::PXAR => {
|
||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||
let stats = backup_directory(
|
||||
@ -1000,7 +972,7 @@ async fn create_backup(
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
catalog.lock().unwrap().end_directory()?;
|
||||
}
|
||||
BackupType::IMAGE => {
|
||||
BackupSpecificationType::IMAGE => {
|
||||
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = backup_image(
|
||||
&client,
|
||||
@ -1135,6 +1107,18 @@ fn dump_image<W: Write>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_archive_type(name: &str) -> (String, ArchiveType) {
|
||||
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
|
||||
(name.into(), archive_type(name).unwrap())
|
||||
} else if name.ends_with(".pxar") {
|
||||
(format!("{}.didx", name), ArchiveType::DynamicIndex)
|
||||
} else if name.ends_with(".img") {
|
||||
(format!("{}.fidx", name), ArchiveType::FixedIndex)
|
||||
} else {
|
||||
(format!("{}.blob", name), ArchiveType::Blob)
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -1207,14 +1191,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
}
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
format!("{}.didx", archive_name)
|
||||
} else if archive_name.ends_with(".img") {
|
||||
format!("{}.fidx", archive_name)
|
||||
} else {
|
||||
format!("{}.blob", archive_name)
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
@ -1227,7 +1203,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
|
||||
if server_archive_name == MANIFEST_BLOB_NAME {
|
||||
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
||||
|
||||
if archive_name == MANIFEST_BLOB_NAME {
|
||||
let backup_index_data = manifest.into_json().to_string();
|
||||
if let Some(target) = target {
|
||||
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
|
||||
@ -1238,9 +1216,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
|
||||
} else if server_archive_name.ends_with(".blob") {
|
||||
} else if archive_type == ArchiveType::Blob {
|
||||
|
||||
let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
|
||||
let mut reader = client.download_blob(&manifest, &archive_name).await?;
|
||||
|
||||
if let Some(target) = target {
|
||||
let mut writer = std::fs::OpenOptions::new()
|
||||
@ -1257,9 +1235,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
|
||||
} else if server_archive_name.ends_with(".didx") {
|
||||
} else if archive_type == ArchiveType::DynamicIndex {
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let index = client.download_dynamic_index(&manifest, &archive_name).await?;
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
@ -1289,9 +1267,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
std::io::copy(&mut reader, &mut writer)
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
} else if server_archive_name.ends_with(".fidx") {
|
||||
} else if archive_type == ArchiveType::FixedIndex {
|
||||
|
||||
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
|
||||
let index = client.download_fixed_index(&manifest, &archive_name).await?;
|
||||
|
||||
let mut writer = if let Some(target) = target {
|
||||
std::fs::OpenOptions::new()
|
||||
@ -1308,9 +1286,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
};
|
||||
|
||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
|
||||
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar of .img)");
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -1390,6 +1365,12 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||
("group", false, &StringSchema::new("Backup group.").schema()),
|
||||
], [
|
||||
("output-format", true, &OUTPUT_FORMAT),
|
||||
(
|
||||
"quiet",
|
||||
true,
|
||||
&BooleanSchema::new("Minimal output - only show removals.")
|
||||
.schema()
|
||||
),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
])
|
||||
)
|
||||
@ -1417,9 +1398,12 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let quiet = param["quiet"].as_bool().unwrap_or(false);
|
||||
|
||||
param.as_object_mut().unwrap().remove("repository");
|
||||
param.as_object_mut().unwrap().remove("group");
|
||||
param.as_object_mut().unwrap().remove("output-format");
|
||||
param.as_object_mut().unwrap().remove("quiet");
|
||||
|
||||
param["backup-type"] = group.backup_type().into();
|
||||
param["backup-id"] = group.backup_id().into();
|
||||
@ -1434,19 +1418,34 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
|
||||
Ok(match v.as_bool() {
|
||||
Some(true) => "keep",
|
||||
Some(false) => "remove",
|
||||
None => "unknown",
|
||||
}.to_string())
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.sortby("backup-type", false)
|
||||
.sortby("backup-id", false)
|
||||
.sortby("backup-time", false)
|
||||
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
||||
.column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
|
||||
.column(ColumnConfig::new("keep"))
|
||||
.column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
|
||||
;
|
||||
|
||||
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
|
||||
|
||||
let mut data = result["data"].take();
|
||||
|
||||
if quiet {
|
||||
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
|
||||
item["keep"].as_bool() == Some(false)
|
||||
}).map(|v| v.clone()).collect();
|
||||
data = list.into();
|
||||
}
|
||||
|
||||
format_and_print_result_full(&mut data, info, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -2028,7 +2027,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
|
||||
if let Some(pipe) = pipe {
|
||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||
// Finish creation of deamon by redirecting filedescriptors.
|
||||
// Finish creation of daemon by redirecting filedescriptors.
|
||||
let nullfd = nix::fcntl::open(
|
||||
"/dev/null",
|
||||
nix::fcntl::OFlag::O_RDWR,
|
||||
|
@ -1,5 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
use std::ffi::OsString;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
@ -9,7 +8,6 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
|
||||
use proxmox::try_block;
|
||||
use proxmox::api::RpcEnvironmentType;
|
||||
use proxmox::sys::linux::procfs::mountinfo::{Device, MountInfo};
|
||||
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
@ -385,12 +383,15 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
};
|
||||
|
||||
//fixme: if last_prune_job_stzill_running { continue; }
|
||||
|
||||
let worker_type = "prune";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
}
|
||||
Ok(None) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
@ -507,12 +508,15 @@ async fn schedule_datastore_sync_jobs() {
|
||||
}
|
||||
};
|
||||
|
||||
//fixme: if last_sync_job_still_running { continue; }
|
||||
let worker_type = "syncjob";
|
||||
|
||||
let worker_type = "sync";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &job_config.store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
let last = match lookup_last_worker(worker_type, &job_id) {
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
},
|
||||
Ok(None) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
@ -594,31 +598,36 @@ async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
async fn run_stat_generator() {
|
||||
|
||||
let mut count = 0;
|
||||
loop {
|
||||
count += 1;
|
||||
let save = if count > 6 { count = 0; true } else { false };
|
||||
|
||||
let delay_target = Instant::now() + Duration::from_secs(10);
|
||||
|
||||
generate_host_stats().await;
|
||||
generate_host_stats(save).await;
|
||||
|
||||
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fn rrd_update_gauge(name: &str, value: f64) {
|
||||
fn rrd_update_gauge(name: &str, value: f64, save: bool) {
|
||||
use proxmox_backup::rrd;
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge) {
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
|
||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||
}
|
||||
}
|
||||
|
||||
fn rrd_update_derive(name: &str, value: f64) {
|
||||
fn rrd_update_derive(name: &str, value: f64, save: bool) {
|
||||
use proxmox_backup::rrd;
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive) {
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
|
||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||
}
|
||||
}
|
||||
|
||||
async fn generate_host_stats() {
|
||||
async fn generate_host_stats(save: bool) {
|
||||
use proxmox::sys::linux::procfs::{
|
||||
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
|
||||
use proxmox_backup::config::datastore;
|
||||
@ -628,8 +637,8 @@ async fn generate_host_stats() {
|
||||
|
||||
match read_proc_stat() {
|
||||
Ok(stat) => {
|
||||
rrd_update_gauge("host/cpu", stat.cpu);
|
||||
rrd_update_gauge("host/iowait", stat.iowait_percent);
|
||||
rrd_update_gauge("host/cpu", stat.cpu, save);
|
||||
rrd_update_gauge("host/iowait", stat.iowait_percent, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_proc_stat failed - {}", err);
|
||||
@ -638,10 +647,10 @@ async fn generate_host_stats() {
|
||||
|
||||
match read_meminfo() {
|
||||
Ok(meminfo) => {
|
||||
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64);
|
||||
rrd_update_gauge("host/memused", meminfo.memused as f64);
|
||||
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64);
|
||||
rrd_update_gauge("host/swapused", meminfo.swapused as f64);
|
||||
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
|
||||
rrd_update_gauge("host/memused", meminfo.memused as f64, save);
|
||||
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
|
||||
rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_meminfo failed - {}", err);
|
||||
@ -658,8 +667,8 @@ async fn generate_host_stats() {
|
||||
netin += item.receive;
|
||||
netout += item.send;
|
||||
}
|
||||
rrd_update_derive("host/netin", netin as f64);
|
||||
rrd_update_derive("host/netout", netout as f64);
|
||||
rrd_update_derive("host/netin", netin as f64, save);
|
||||
rrd_update_derive("host/netout", netout as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_prox_net_dev failed - {}", err);
|
||||
@ -668,7 +677,7 @@ async fn generate_host_stats() {
|
||||
|
||||
match read_loadavg() {
|
||||
Ok(loadavg) => {
|
||||
rrd_update_gauge("host/loadavg", loadavg.0 as f64);
|
||||
rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_loadavg failed - {}", err);
|
||||
@ -677,7 +686,7 @@ async fn generate_host_stats() {
|
||||
|
||||
let disk_manager = DiskManage::new();
|
||||
|
||||
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host");
|
||||
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
|
||||
|
||||
match datastore::config() {
|
||||
Ok((config, _)) => {
|
||||
@ -688,7 +697,7 @@ async fn generate_host_stats() {
|
||||
|
||||
let rrd_prefix = format!("datastore/{}", config.name);
|
||||
let path = std::path::Path::new(&config.path);
|
||||
gather_disk_stats(disk_manager.clone(), path, &rrd_prefix);
|
||||
gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
@ -699,100 +708,59 @@ async fn generate_host_stats() {
|
||||
});
|
||||
}
|
||||
|
||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
|
||||
|
||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str) {
|
||||
|
||||
match disk_usage(path) {
|
||||
match proxmox_backup::tools::disks::disk_usage(path) {
|
||||
Ok((total, used, _avail)) => {
|
||||
let rrd_key = format!("{}/total", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, total as f64);
|
||||
rrd_update_gauge(&rrd_key, total as f64, save);
|
||||
let rrd_key = format!("{}/used", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, used as f64);
|
||||
rrd_update_gauge(&rrd_key, used as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
||||
}
|
||||
}
|
||||
|
||||
match disk_manager.mount_info() {
|
||||
Ok(mountinfo) => {
|
||||
if let Some((fs_type, device, source)) = find_mounted_device(mountinfo, path) {
|
||||
let mut device_stat = None;
|
||||
match fs_type.as_str() {
|
||||
"zfs" => {
|
||||
if let Some(pool) = source {
|
||||
match zfs_pool_stats(&pool) {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
|
||||
match disk.read_stat() {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
|
||||
}
|
||||
match disk_manager.find_mounted_device(path) {
|
||||
Ok(None) => {},
|
||||
Ok(Some((fs_type, device, source))) => {
|
||||
let mut device_stat = None;
|
||||
match fs_type.as_str() {
|
||||
"zfs" => {
|
||||
if let Some(pool) = source {
|
||||
match zfs_pool_stats(&pool) {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(stat) = device_stat {
|
||||
let rrd_key = format!("{}/read_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.read_ios as f64);
|
||||
let rrd_key = format!("{}/read_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64);
|
||||
let rrd_key = format!("{}/read_ticks", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.read_ticks as f64)/1000.0);
|
||||
_ => {
|
||||
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
|
||||
match disk.read_stat() {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(stat) = device_stat {
|
||||
let rrd_key = format!("{}/read_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
|
||||
let rrd_key = format!("{}/read_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
|
||||
|
||||
let rrd_key = format!("{}/write_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.write_ios as f64);
|
||||
let rrd_key = format!("{}/write_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64);
|
||||
let rrd_key = format!("{}/write_ticks", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.write_ticks as f64)/1000.0);
|
||||
}
|
||||
let rrd_key = format!("{}/write_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
|
||||
let rrd_key = format!("{}/write_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
|
||||
|
||||
let rrd_key = format!("{}/io_ticks", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("disk_manager mount_info() failed - {}", err);
|
||||
eprintln!("find_mounted_device failed - {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns (total, used, avail)
|
||||
fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
|
||||
use nix::NixPath;
|
||||
|
||||
let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||
nix::errno::Errno::result(res)?;
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
|
||||
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
||||
}
|
||||
|
||||
// Returns (fs_type, device, mount_source)
|
||||
pub fn find_mounted_device(
|
||||
mountinfo: &MountInfo,
|
||||
path: &std::path::Path,
|
||||
) -> Option<(String, Device, Option<OsString>)> {
|
||||
|
||||
let mut result = None;
|
||||
let mut match_len = 0;
|
||||
|
||||
let root_path = std::path::Path::new("/");
|
||||
for (_id, entry) in mountinfo {
|
||||
if entry.root == root_path && path.starts_with(&entry.mount_point) {
|
||||
let len = entry.mount_point.as_path().as_os_str().len();
|
||||
if len > match_len {
|
||||
match_len = len;
|
||||
result = Some((entry.fs_type.clone(), entry.device, entry.mount_source.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Diplay node certificate information.
|
||||
/// Display node certificate information.
|
||||
fn cert_info() -> Result<(), Error> {
|
||||
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
|
@ -30,4 +30,7 @@ pub use pxar_decode_writer::*;
|
||||
mod backup_repo;
|
||||
pub use backup_repo::*;
|
||||
|
||||
mod backup_specification;
|
||||
pub use backup_specification::*;
|
||||
|
||||
pub mod pull;
|
||||
|
@ -138,7 +138,7 @@ impl BackupReader {
|
||||
|
||||
/// Download a .blob file
|
||||
///
|
||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The data is verified using
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
|
||||
/// the provided manifest.
|
||||
pub async fn download_blob(
|
||||
&self,
|
||||
@ -164,7 +164,7 @@ impl BackupReader {
|
||||
|
||||
/// Download dynamic index file
|
||||
///
|
||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
pub async fn download_dynamic_index(
|
||||
&self,
|
||||
@ -192,7 +192,7 @@ impl BackupReader {
|
||||
|
||||
/// Download fixed index file
|
||||
///
|
||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
pub async fn download_fixed_index(
|
||||
&self,
|
||||
|
@ -3,12 +3,8 @@ use std::fmt;
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::const_regex;
|
||||
|
||||
const_regex! {
|
||||
/// Regular expression to parse repository URLs
|
||||
pub BACKUP_REPO_URL_REGEX = r"^(?:(?:([\w@]+)@)?([\w\-_.]+):)?(\w+)$";
|
||||
}
|
||||
use crate::api2::types::*;
|
||||
|
||||
/// API schema format definition for repository URLs
|
||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||
|
39
src/client/backup_specification.rs
Normal file
39
src/client/backup_specification.rs
Normal file
@ -0,0 +1,39 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
proxmox::const_regex! {
|
||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||
}
|
||||
|
||||
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||
"Backup source specification ([<label>:<path>]).")
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
|
||||
pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
|
||||
|
||||
pub struct BackupSpecification {
|
||||
pub archive_name: String, // left part
|
||||
pub config_string: String, // right part
|
||||
pub spec_type: BackupSpecificationType,
|
||||
}
|
||||
|
||||
pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
|
||||
|
||||
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
|
||||
let archive_name = caps.get(1).unwrap().as_str().into();
|
||||
let extension = caps.get(2).unwrap().as_str();
|
||||
let config_string = caps.get(3).unwrap().as_str().into();
|
||||
let spec_type = match extension {
|
||||
"pxar" => BackupSpecificationType::PXAR,
|
||||
"img" => BackupSpecificationType::IMAGE,
|
||||
"conf" => BackupSpecificationType::CONFIG,
|
||||
"log" => BackupSpecificationType::LOGFILE,
|
||||
_ => bail!("unknown backup source type '{}'", extension),
|
||||
};
|
||||
return Ok(BackupSpecification { archive_name, config_string, spec_type });
|
||||
}
|
||||
|
||||
bail!("unable to parse backup source specification '{}'", value);
|
||||
}
|
@ -343,7 +343,7 @@ impl HttpClient {
|
||||
|
||||
/// Login
|
||||
///
|
||||
/// Login is done on demand, so this is onyl required if you need
|
||||
/// Login is done on demand, so this is only required if you need
|
||||
/// access to authentication data in 'AuthInfo'.
|
||||
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
||||
self.auth.listen().await
|
||||
@ -400,21 +400,22 @@ impl HttpClient {
|
||||
if interactive && tty::stdin_isatty() {
|
||||
println!("fingerprint: {}", fp_string);
|
||||
loop {
|
||||
print!("Want to trust? (y/n): ");
|
||||
print!("Are you sure you want to continue connecting? (y/n): ");
|
||||
let _ = std::io::stdout().flush();
|
||||
let mut buf = [0u8; 1];
|
||||
use std::io::Read;
|
||||
match std::io::stdin().read_exact(&mut buf) {
|
||||
Ok(()) => {
|
||||
if buf[0] == b'y' || buf[0] == b'Y' {
|
||||
use std::io::{BufRead, BufReader};
|
||||
let mut line = String::new();
|
||||
match BufReader::new(std::io::stdin()).read_line(&mut line) {
|
||||
Ok(_) => {
|
||||
let trimmed = line.trim();
|
||||
if trimmed == "y" || trimmed == "Y" {
|
||||
return (true, Some(fp_string));
|
||||
} else if buf[0] == b'n' || buf[0] == b'N' {
|
||||
} else if trimmed == "n" || trimmed == "N" {
|
||||
return (false, None);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
return (false, None);
|
||||
}
|
||||
Err(_) => return (false, None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -106,6 +106,34 @@ async fn pull_single_archive(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Note: The client.log.blob is uploaded after the backup, so it is
|
||||
// not mentioned in the manifest.
|
||||
async fn try_client_log_download(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
path: &std::path::Path,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut tmp_path = path.to_owned();
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&tmp_path)?;
|
||||
|
||||
// Note: be silent if there is no log - only log successful download
|
||||
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||
}
|
||||
worker.log(format!("got backup log file {:?}", CLIENT_LOG_BLOB_NAME));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn pull_snapshot(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
@ -117,6 +145,10 @@ async fn pull_snapshot(
|
||||
manifest_name.push(snapshot.relative_path());
|
||||
manifest_name.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
let mut client_log_name = tgt_store.base_path();
|
||||
client_log_name.push(snapshot.relative_path());
|
||||
client_log_name.push(CLIENT_LOG_BLOB_NAME);
|
||||
|
||||
let mut tmp_manifest_name = manifest_name.clone();
|
||||
tmp_manifest_name.set_extension("tmp");
|
||||
|
||||
@ -137,6 +169,10 @@ async fn pull_snapshot(
|
||||
})?;
|
||||
|
||||
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
||||
if !client_log_name.exists() {
|
||||
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||
}
|
||||
worker.log("no data changes");
|
||||
return Ok(()); // nothing changed
|
||||
}
|
||||
}
|
||||
@ -199,6 +235,10 @@ async fn pull_snapshot(
|
||||
bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
|
||||
}
|
||||
|
||||
if !client_log_name.exists() {
|
||||
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||
}
|
||||
|
||||
// cleanup - remove stale files
|
||||
tgt_store.cleanup_backup_dir(snapshot, &manifest)?;
|
||||
|
||||
@ -223,9 +263,11 @@ pub async fn pull_snapshot_from(
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
worker.log(format!("sync snapshot {:?} done", snapshot.relative_path()));
|
||||
} else {
|
||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -149,7 +149,7 @@ impl Interface {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write attributes not dependening on address family
|
||||
/// Write attributes not depending on address family
|
||||
fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> {
|
||||
|
||||
static EMPTY_LIST: Vec<String> = Vec::new();
|
||||
@ -187,7 +187,7 @@ impl Interface {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write attributes dependening on address family inet (IPv4)
|
||||
/// Write attributes depending on address family inet (IPv4)
|
||||
fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||
if method == NetworkConfigMethod::Static {
|
||||
if let Some(address) = &self.cidr {
|
||||
@ -211,7 +211,7 @@ impl Interface {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write attributes dependening on address family inet6 (IPv6)
|
||||
/// Write attributes depending on address family inet6 (IPv6)
|
||||
fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||
if method == NetworkConfigMethod::Static {
|
||||
if let Some(address) = &self.cidr6 {
|
||||
|
@ -46,7 +46,7 @@ lazy_static! {
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
@ -66,6 +66,79 @@ pub struct SyncJobConfig {
|
||||
pub schedule: Option<String>,
|
||||
}
|
||||
|
||||
// FIXME: generate duplicate schemas/structs from one listing?
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"next-run": {
|
||||
description: "Estimated time of the next run (UNIX epoch).",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
"last-run-state": {
|
||||
description: "Result of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-upid": {
|
||||
description: "Task UPID of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-endtime": {
|
||||
description: "Endtime of the last run.",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
/// Status of Sync Job
|
||||
pub struct SyncJobStatus {
|
||||
pub id: String,
|
||||
pub store: String,
|
||||
pub remote: String,
|
||||
pub remote_store: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub remove_vanished: Option<bool>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub next_run: Option<i64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_state: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_upid: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match SyncJobConfig::API_SCHEMA {
|
||||
Schema::Object(ref obj_schema) => obj_schema,
|
||||
|
@ -4,7 +4,7 @@
|
||||
//! format used in the [casync](https://github.com/systemd/casync)
|
||||
//! toolkit (we are not 100\% binary compatible). It is a file archive
|
||||
//! format defined by 'Lennart Poettering', specially defined for
|
||||
//! efficent deduplication.
|
||||
//! efficient deduplication.
|
||||
|
||||
//! Every archive contains items in the following order:
|
||||
//! * `ENTRY` -- containing general stat() data and related bits
|
||||
|
@ -61,7 +61,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
||||
}
|
||||
}
|
||||
|
||||
/// This function calls the provided `copy_func()` with the permutaion
|
||||
/// This function calls the provided `copy_func()` with the permutation
|
||||
/// info.
|
||||
///
|
||||
/// ```
|
||||
@ -71,7 +71,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// This will produce the folowing output:
|
||||
/// This will produce the following output:
|
||||
///
|
||||
/// ```no-compile
|
||||
/// Copy 3 to 0
|
||||
@ -81,7 +81,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
||||
/// Copy 4 to 2
|
||||
/// ```
|
||||
///
|
||||
/// So this generates the following permuation: `[3,1,4,0,2]`.
|
||||
/// So this generates the following permutation: `[3,1,4,0,2]`.
|
||||
|
||||
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
||||
n: usize,
|
||||
|
@ -1117,7 +1117,7 @@ impl<'a, W: Write, C: BackupCatalogWriter> Encoder<'a, W, C> {
|
||||
if pos != size {
|
||||
// Note:: casync format cannot handle that
|
||||
bail!(
|
||||
"detected shrinked file {:?} ({} < {})",
|
||||
"detected shrunk file {:?} ({} < {})",
|
||||
self.full_path(),
|
||||
pos,
|
||||
size
|
||||
|
@ -29,7 +29,7 @@ pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b;
|
||||
/// Marks item as hardlink
|
||||
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
||||
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
||||
/// Marks the beginnig of the payload (actual content) of regular files
|
||||
/// Marks the beginning of the payload (actual content) of regular files
|
||||
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
||||
/// Marks item as entry of goodbye table
|
||||
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
||||
|
@ -124,7 +124,7 @@ impl MatchPattern {
|
||||
Ok(Some((match_pattern, content_buffer, stat)))
|
||||
}
|
||||
|
||||
/// Interprete a byte buffer as a sinlge line containing a valid
|
||||
/// Interpret a byte buffer as a sinlge line containing a valid
|
||||
/// `MatchPattern`.
|
||||
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
||||
/// Pattern starting with '!' are interpreted as negative match pattern.
|
||||
|
@ -84,7 +84,7 @@ impl<R: Read> SequentialDecoder<R> {
|
||||
|
||||
pub(crate) fn read_link(&mut self, size: u64) -> Result<PathBuf, Error> {
|
||||
if size < (HEADER_SIZE + 2) {
|
||||
bail!("dectected short link target.");
|
||||
bail!("detected short link target.");
|
||||
}
|
||||
let target_len = size - HEADER_SIZE;
|
||||
|
||||
@ -104,7 +104,7 @@ impl<R: Read> SequentialDecoder<R> {
|
||||
|
||||
pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> {
|
||||
if size < (HEADER_SIZE + 8 + 2) {
|
||||
bail!("dectected short hardlink header.");
|
||||
bail!("detected short hardlink header.");
|
||||
}
|
||||
let offset: u64 = self.read_item()?;
|
||||
let target = self.read_link(size - 8)?;
|
||||
@ -121,7 +121,7 @@ impl<R: Read> SequentialDecoder<R> {
|
||||
|
||||
pub(crate) fn read_filename(&mut self, size: u64) -> Result<OsString, Error> {
|
||||
if size < (HEADER_SIZE + 2) {
|
||||
bail!("dectected short filename");
|
||||
bail!("detected short filename");
|
||||
}
|
||||
let name_len = size - HEADER_SIZE;
|
||||
|
||||
|
@ -40,7 +40,7 @@ fn now() -> Result<f64, Error> {
|
||||
Ok(time.as_secs_f64())
|
||||
}
|
||||
|
||||
pub fn update_value(rel_path: &str, value: f64, dst: DST) -> Result<(), Error> {
|
||||
pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<(), Error> {
|
||||
|
||||
let mut path = PathBuf::from(PBS_RRD_BASEDIR);
|
||||
path.push(rel_path);
|
||||
@ -52,7 +52,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST) -> Result<(), Error> {
|
||||
|
||||
if let Some(rrd) = map.get_mut(rel_path) {
|
||||
rrd.update(now, value);
|
||||
rrd.save(&path)?;
|
||||
if save { rrd.save(&path)?; }
|
||||
} else {
|
||||
let mut rrd = match RRD::load(&path) {
|
||||
Ok(rrd) => rrd,
|
||||
@ -64,7 +64,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST) -> Result<(), Error> {
|
||||
},
|
||||
};
|
||||
rrd.update(now, value);
|
||||
rrd.save(&path)?;
|
||||
if save { rrd.save(&path)?; }
|
||||
map.insert(rel_path.into(), rrd);
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
|
||||
/// If the task is spanned from a different process, we simply return if
|
||||
/// that process is still running. This information is good enough to detect
|
||||
/// stale tasks...
|
||||
fn worker_is_active_local(upid: &UPID) -> bool {
|
||||
pub fn worker_is_active_local(upid: &UPID) -> bool {
|
||||
if (upid.pid == *MY_PID) && (upid.pstart == *MY_PID_PSTART) {
|
||||
WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id)
|
||||
} else {
|
||||
@ -277,7 +277,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
||||
} else {
|
||||
match state {
|
||||
None => {
|
||||
println!("Detected stoped UPID {}", upid_str);
|
||||
println!("Detected stopped UPID {}", upid_str);
|
||||
let status = upid_read_status(&upid)
|
||||
.unwrap_or_else(|_| String::from("unknown"));
|
||||
finish_list.push(TaskListInfo {
|
||||
|
@ -127,7 +127,7 @@ pub fn lock_file<F: AsRawFd>(
|
||||
}
|
||||
|
||||
/// Open or create a lock file (append mode). Then try to
|
||||
/// aquire a lock using `lock_file()`.
|
||||
/// acquire a lock using `lock_file()`.
|
||||
pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
|
||||
let path = path.as_ref();
|
||||
let mut file = match OpenOptions::new().create(true).append(true).open(path) {
|
||||
@ -136,7 +136,7 @@ pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<Fi
|
||||
};
|
||||
match lock_file(&mut file, true, Some(timeout)) {
|
||||
Ok(_) => Ok(file),
|
||||
Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err),
|
||||
Err(err) => bail!("Unable to acquire lock {:?} - {}", path, err),
|
||||
}
|
||||
}
|
||||
|
||||
@ -441,7 +441,7 @@ pub fn join(data: &Vec<String>, sep: char) -> String {
|
||||
|
||||
/// Detect modified configuration files
|
||||
///
|
||||
/// This function fails with a resonable error message if checksums do not match.
|
||||
/// This function fails with a reasonable error message if checksums do not match.
|
||||
pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
|
||||
if digest1 != digest2 {
|
||||
bail!("detected modified configuration - file changed by other user? Try again.");
|
||||
|
@ -149,14 +149,14 @@ fn test_broadcast_future() {
|
||||
.map_ok(|res| {
|
||||
CHECKSUM.fetch_add(res, Ordering::SeqCst);
|
||||
})
|
||||
.map_err(|err| { panic!("got errror {}", err); })
|
||||
.map_err(|err| { panic!("got error {}", err); })
|
||||
.map(|_| ());
|
||||
|
||||
let receiver2 = sender.listen()
|
||||
.map_ok(|res| {
|
||||
CHECKSUM.fetch_add(res*2, Ordering::SeqCst);
|
||||
})
|
||||
.map_err(|err| { panic!("got errror {}", err); })
|
||||
.map_err(|err| { panic!("got error {}", err); })
|
||||
.map(|_| ());
|
||||
|
||||
let mut rt = tokio::runtime::Runtime::new().unwrap();
|
||||
|
@ -13,7 +13,7 @@ use libc::dev_t;
|
||||
use once_cell::sync::OnceCell;
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::sys::linux::procfs::MountInfo;
|
||||
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
|
||||
use proxmox::{io_bail, io_format_err};
|
||||
|
||||
pub mod zfs;
|
||||
@ -135,6 +135,28 @@ impl DiskManage {
|
||||
})
|
||||
}
|
||||
|
||||
/// Information about file system type and unsed device for a path
|
||||
///
|
||||
/// Returns tuple (fs_type, device, mount_source)
|
||||
pub fn find_mounted_device(
|
||||
&self,
|
||||
path: &std::path::Path,
|
||||
) -> Result<Option<(String, Device, Option<OsString>)>, Error> {
|
||||
|
||||
let stat = nix::sys::stat::stat(path)?;
|
||||
let device = Device::from_dev_t(stat.st_dev);
|
||||
|
||||
let root_path = std::path::Path::new("/");
|
||||
|
||||
for (_id, entry) in self.mount_info()? {
|
||||
if entry.root == root_path && entry.device == device {
|
||||
return Ok(Some((entry.fs_type.clone(), entry.device, entry.mount_source.clone())));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Check whether a specific device node is mounted.
|
||||
///
|
||||
/// Note that this tries to `stat` the sources of all mount points without caching the result
|
||||
@ -427,6 +449,8 @@ impl Disk {
|
||||
}
|
||||
|
||||
/// Read block device stats
|
||||
///
|
||||
/// see https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
pub fn read_stat(&self) -> std::io::Result<Option<BlockDevStat>> {
|
||||
if let Some(stat) = self.read_sys(Path::new("stat"))? {
|
||||
let stat = unsafe { std::str::from_utf8_unchecked(&stat) };
|
||||
@ -434,23 +458,35 @@ impl Disk {
|
||||
u64::from_str_radix(s, 10).unwrap_or(0)
|
||||
}).collect();
|
||||
|
||||
if stat.len() < 8 { return Ok(None); }
|
||||
if stat.len() < 15 { return Ok(None); }
|
||||
|
||||
return Ok(Some(BlockDevStat {
|
||||
read_ios: stat[0],
|
||||
read_merges: stat[1],
|
||||
read_sectors: stat[2],
|
||||
read_ticks: stat[3],
|
||||
write_ios: stat[4],
|
||||
write_merges: stat[5],
|
||||
write_sectors: stat[6],
|
||||
write_ticks: stat[7],
|
||||
}));
|
||||
write_ios: stat[4] + stat[11], // write + discard
|
||||
write_sectors: stat[6] + stat[13], // write + discard
|
||||
io_ticks: stat[10],
|
||||
}));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns disk usage information (total, used, avail)
|
||||
pub fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
|
||||
use nix::NixPath;
|
||||
|
||||
let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||
nix::errno::Errno::result(res)?;
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
|
||||
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
||||
}
|
||||
|
||||
/// This is just a rough estimate for a "type" of disk.
|
||||
pub enum DiskType {
|
||||
/// We know nothing.
|
||||
@ -470,11 +506,8 @@ pub enum DiskType {
|
||||
/// Represents the contents of the /sys/block/<dev>/stat file.
|
||||
pub struct BlockDevStat {
|
||||
pub read_ios: u64,
|
||||
pub read_merges: u64,
|
||||
pub read_sectors: u64,
|
||||
pub read_ticks: u64, //milliseconds
|
||||
pub write_ios: u64,
|
||||
pub write_merges: u64,
|
||||
pub write_sectors: u64,
|
||||
pub write_ticks: u64, //milliseconds
|
||||
pub io_ticks: u64, // milliseconds
|
||||
}
|
||||
|
@ -37,10 +37,7 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
||||
write_sectors: stat[1]>>9,
|
||||
read_ios: stat[2],
|
||||
write_ios: stat[3],
|
||||
read_merges: 0, // there is no such info
|
||||
write_merges: 0, // there is no such info
|
||||
write_ticks: ticks,
|
||||
read_ticks: ticks,
|
||||
io_ticks: ticks,
|
||||
};
|
||||
|
||||
Ok(Some(stat))
|
||||
|
@ -4,7 +4,7 @@ use std::io::Write;
|
||||
|
||||
/// Log messages with timestamps into files
|
||||
///
|
||||
/// Logs messages to file, and optionaly to standart output.
|
||||
/// Logs messages to file, and optionally to standard output.
|
||||
///
|
||||
///
|
||||
/// #### Example:
|
||||
|
@ -107,7 +107,7 @@ pub fn read_subdir<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> nix::Res
|
||||
}
|
||||
|
||||
/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
|
||||
/// results of `read_subdir`. Non-UTF8 comaptible file names are silently ignored.
|
||||
/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored.
|
||||
pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
|
||||
dirfd: RawFd,
|
||||
path: &P,
|
||||
|
@ -1,6 +1,6 @@
|
||||
//! Inter-process reader-writer lock builder.
|
||||
//!
|
||||
//! This implemenation uses fcntl record locks with non-blocking
|
||||
//! This implementation uses fcntl record locks with non-blocking
|
||||
//! F_SETLK command (never blocks).
|
||||
//!
|
||||
//! We maintain a map of shared locks with time stamps, so you can get
|
||||
@ -127,9 +127,9 @@ impl ProcessLocker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Try to aquire a shared lock
|
||||
/// Try to acquire a shared lock
|
||||
///
|
||||
/// On sucess, this makes sure that no other process can get an exclusive lock for the file.
|
||||
/// On success, this makes sure that no other process can get an exclusive lock for the file.
|
||||
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
|
||||
|
||||
let mut data = locker.lock().unwrap();
|
||||
@ -168,7 +168,7 @@ impl ProcessLocker {
|
||||
result
|
||||
}
|
||||
|
||||
/// Try to aquire a exclusive lock
|
||||
/// Try to acquire a exclusive lock
|
||||
///
|
||||
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
|
||||
pub fn try_exclusive_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockExclusiveGuard, Error> {
|
||||
|
@ -296,6 +296,9 @@ mod test {
|
||||
test_value("mon 2:*", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR)?;
|
||||
test_value("mon 2:50", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR + 50*MIN)?;
|
||||
|
||||
test_value("daily", THURSDAY_00_00, THURSDAY_00_00 + DAY)?;
|
||||
test_value("daily", THURSDAY_00_00+1, THURSDAY_00_00 + DAY)?;
|
||||
|
||||
let n = test_value("5/2:0", THURSDAY_00_00, THURSDAY_00_00 + 5*HOUR)?;
|
||||
let n = test_value("5/2:0", n, THURSDAY_00_00 + 7*HOUR)?;
|
||||
let n = test_value("5/2:0", n, THURSDAY_00_00 + 9*HOUR)?;
|
||||
|
@ -1,4 +1,4 @@
|
||||
//! Generate and verify Authentification tickets
|
||||
//! Generate and verify Authentication tickets
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use base64;
|
||||
|
@ -9,6 +9,7 @@ Ext.define('pbs-data-store-snapshots', {
|
||||
dateFormat: 'timestamp'
|
||||
},
|
||||
'files',
|
||||
'owner',
|
||||
{ name: 'size', type: 'int' },
|
||||
]
|
||||
});
|
||||
@ -76,7 +77,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
} else if (btype === 'host') {
|
||||
cls = 'fa-building';
|
||||
} else {
|
||||
console.warn(`got unkown backup-type '${btype}'`);
|
||||
console.warn(`got unknown backup-type '${btype}'`);
|
||||
continue; // FIXME: auto render? what do?
|
||||
}
|
||||
|
||||
@ -125,6 +126,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
group["backup-time"] = last_backup;
|
||||
group.files = item.files;
|
||||
group.size = item.size;
|
||||
group.owner = item.owner;
|
||||
}
|
||||
}
|
||||
group.count = group.children.length;
|
||||
@ -157,67 +159,59 @@ Ext.define('PBS.DataStoreContent', {
|
||||
}
|
||||
},
|
||||
|
||||
initComponent: function() {
|
||||
var me = this;
|
||||
columns: [
|
||||
{
|
||||
xtype: 'treecolumn',
|
||||
header: gettext("Backup Group"),
|
||||
dataIndex: 'text',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
xtype: 'datecolumn',
|
||||
header: gettext('Backup Time'),
|
||||
sortable: true,
|
||||
dataIndex: 'backup-time',
|
||||
format: 'Y-m-d H:i:s',
|
||||
width: 150
|
||||
},
|
||||
{
|
||||
header: gettext("Size"),
|
||||
sortable: true,
|
||||
dataIndex: 'size',
|
||||
renderer: Proxmox.Utils.format_size,
|
||||
},
|
||||
{
|
||||
xtype: 'numbercolumn',
|
||||
format: '0',
|
||||
header: gettext("Count"),
|
||||
sortable: true,
|
||||
dataIndex: 'count',
|
||||
},
|
||||
{
|
||||
header: gettext("Owner"),
|
||||
sortable: true,
|
||||
dataIndex: 'owner',
|
||||
},
|
||||
{
|
||||
header: gettext("Files"),
|
||||
sortable: false,
|
||||
dataIndex: 'files',
|
||||
flex: 2
|
||||
},
|
||||
],
|
||||
|
||||
var sm = Ext.create('Ext.selection.RowModel', {});
|
||||
|
||||
var prune_btn = new Proxmox.button.Button({
|
||||
tbar: [
|
||||
{
|
||||
text: gettext('Reload'),
|
||||
iconCls: 'fa fa-refresh',
|
||||
handler: 'reload',
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Prune'),
|
||||
disabled: true,
|
||||
selModel: sm,
|
||||
enableFn: function(record) { return !record.data.leaf; },
|
||||
handler: 'onPrune',
|
||||
});
|
||||
|
||||
Ext.apply(me, {
|
||||
selModel: sm,
|
||||
columns: [
|
||||
{
|
||||
xtype: 'treecolumn',
|
||||
header: gettext("Backup Group"),
|
||||
dataIndex: 'text',
|
||||
flex: 1
|
||||
},
|
||||
{
|
||||
xtype: 'datecolumn',
|
||||
header: gettext('Backup Time'),
|
||||
sortable: true,
|
||||
dataIndex: 'backup-time',
|
||||
format: 'Y-m-d H:i:s',
|
||||
width: 150
|
||||
},
|
||||
{
|
||||
header: gettext("Size"),
|
||||
sortable: true,
|
||||
dataIndex: 'size',
|
||||
renderer: Proxmox.Utils.format_size,
|
||||
},
|
||||
{
|
||||
xtype: 'numbercolumn',
|
||||
format: '0',
|
||||
header: gettext("Count"),
|
||||
sortable: true,
|
||||
dataIndex: 'count',
|
||||
},
|
||||
{
|
||||
header: gettext("Files"),
|
||||
sortable: false,
|
||||
dataIndex: 'files',
|
||||
flex: 2
|
||||
}
|
||||
],
|
||||
|
||||
tbar: [
|
||||
{
|
||||
text: gettext('Reload'),
|
||||
iconCls: 'fa fa-refresh',
|
||||
handler: 'reload',
|
||||
},
|
||||
prune_btn
|
||||
],
|
||||
});
|
||||
|
||||
me.callParent();
|
||||
},
|
||||
}
|
||||
],
|
||||
});
|
||||
|
@ -5,24 +5,18 @@ Ext.define('pve-rrd-datastore', {
|
||||
'total',
|
||||
'read_ios',
|
||||
'read_bytes',
|
||||
'read_ticks',
|
||||
'write_ios',
|
||||
'write_bytes',
|
||||
'write_ticks',
|
||||
'io_ticks',
|
||||
{
|
||||
name: 'read_delay', calculate: function(data) {
|
||||
if (data.read_ios === undefined || data.read_ios === 0 || data.read_ticks == undefined) {
|
||||
name: 'io_delay', calculate: function(data) {
|
||||
let ios = 0;
|
||||
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
||||
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
||||
if (ios == 0 || data.io_ticks === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return (data.read_ticks*1000)/data.read_ios;
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'write_delay', calculate: function(data) {
|
||||
if (data.write_ios === undefined || data.write_ios === 0 || data.write_ticks == undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return (data.write_ticks*1000)/data.write_ios;
|
||||
return (data.io_ticks*1000.0)/ios;
|
||||
}
|
||||
},
|
||||
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
||||
@ -85,9 +79,9 @@ Ext.define('PBS.DataStoreStatistic', {
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxRRDChart',
|
||||
title: gettext('Delay (ms)'),
|
||||
fields: ['read_delay','write_delay'],
|
||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||
title: gettext('IO Delay (ms)'),
|
||||
fields: ['io_delay'],
|
||||
fieldTitles: [gettext('IO Delay')],
|
||||
store: rrdstore
|
||||
},
|
||||
]
|
||||
|
@ -6,11 +6,15 @@ IMAGES := \
|
||||
|
||||
JSSRC= \
|
||||
form/UserSelector.js \
|
||||
form/RemoteSelector.js \
|
||||
form/DataStoreSelector.js \
|
||||
config/UserView.js \
|
||||
config/RemoteView.js \
|
||||
config/ACLView.js \
|
||||
config/SyncView.js \
|
||||
window/UserEdit.js \
|
||||
window/RemoteEdit.js \
|
||||
window/SyncJobEdit.js \
|
||||
window/ACLEdit.js \
|
||||
Utils.js \
|
||||
LoginView.js \
|
||||
|
@ -36,6 +36,12 @@ Ext.define('PBS.store.NavigationStore', {
|
||||
path: 'pbsRemoteView',
|
||||
leaf: true,
|
||||
},
|
||||
{
|
||||
text: gettext('Sync Jobs'),
|
||||
iconCls: 'fa fa-refresh',
|
||||
path: 'pbsSyncJobView',
|
||||
leaf: true,
|
||||
},
|
||||
{
|
||||
text: gettext('Data Store'),
|
||||
iconCls: 'fa fa-archive',
|
||||
|
@ -21,8 +21,24 @@ Ext.define('pve-rrd-node', {
|
||||
'memused',
|
||||
'swaptotal',
|
||||
'swapused',
|
||||
'roottotal',
|
||||
'rootused',
|
||||
'total',
|
||||
'used',
|
||||
'read_ios',
|
||||
'read_bytes',
|
||||
'write_ios',
|
||||
'write_bytes',
|
||||
'io_ticks',
|
||||
{
|
||||
name: 'io_delay', calculate: function(data) {
|
||||
let ios = 0;
|
||||
if (data.read_ios !== undefined) { ios += data.read_ios; }
|
||||
if (data.write_ios !== undefined) { ios += data.write_ios; }
|
||||
if (ios == 0 || data.io_ticks === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return (data.io_ticks*1000.0)/ios;
|
||||
}
|
||||
},
|
||||
'loadavg',
|
||||
{ type: 'date', dateFormat: 'timestamp', name: 'time' }
|
||||
]
|
||||
@ -92,7 +108,7 @@ Ext.define('PBS.ServerStatus', {
|
||||
xtype: 'proxmoxRRDChart',
|
||||
title: gettext('CPU usage'),
|
||||
fields: ['cpu','iowait'],
|
||||
fieldTitles: [gettext('CPU usage'), gettext('IO delay')],
|
||||
fieldTitles: [gettext('CPU usage'), gettext('IO wait')],
|
||||
store: rrdstore
|
||||
},
|
||||
{
|
||||
@ -146,8 +162,8 @@ Ext.define('PBS.ServerStatus', {
|
||||
{
|
||||
xtype: 'proxmoxRRDChart',
|
||||
title: gettext('Root Disk IO Delay (ms)'),
|
||||
fields: ['read_delay','write_delay'],
|
||||
fieldTitles: [gettext('Read'), gettext('Write')],
|
||||
fields: ['io_delay'],
|
||||
fieldTitles: [gettext('IO Delay')],
|
||||
store: rrdstore
|
||||
},
|
||||
]
|
||||
|
@ -7,12 +7,8 @@ Ext.define('PBS.Utils', {
|
||||
singleton: true,
|
||||
|
||||
updateLoginData: function(data) {
|
||||
Proxmox.CSRFPreventionToken = data.CSRFPreventionToken;
|
||||
Proxmox.UserName = data.username;
|
||||
//console.log(data.ticket);
|
||||
// fixme: use secure flag once we have TLS
|
||||
//Ext.util.Cookies.set('PBSAuthCookie', data.ticket, null, '/', null, true );
|
||||
Ext.util.Cookies.set('PBSAuthCookie', data.ticket, null, '/', null, false);
|
||||
|
||||
Proxmox.Utils.setAuthData(data);
|
||||
},
|
||||
|
||||
dataStorePrefix: 'DataStore-',
|
||||
@ -62,6 +58,7 @@ Ext.define('PBS.Utils', {
|
||||
Proxmox.Utils.override_task_descriptions({
|
||||
garbage_collection: ['Datastore', gettext('Garbage collect') ],
|
||||
sync: ['Datastore', gettext('Remote Sync') ],
|
||||
syncjob: [gettext('Sync Job'), gettext('Remote Sync') ],
|
||||
prune: (type, id) => {
|
||||
return PBS.Utils.render_datastore_worker_id(id, gettext('Prune'));
|
||||
},
|
||||
|
@ -1,6 +1,6 @@
|
||||
Ext.define('pmx-remotes', {
|
||||
extend: 'Ext.data.Model',
|
||||
fields: [ 'name', 'host', 'userid', 'fingerprint' ],
|
||||
fields: [ 'name', 'host', 'userid', 'fingerprint', 'comment' ],
|
||||
idProperty: 'name',
|
||||
proxy: {
|
||||
type: 'proxmox',
|
||||
@ -113,7 +113,7 @@ Ext.define('PBS.config.RemoteView', {
|
||||
},
|
||||
{
|
||||
header: gettext('User name'),
|
||||
width: 100,
|
||||
width: 200,
|
||||
sortable: true,
|
||||
renderer: Ext.String.htmlEncode,
|
||||
dataIndex: 'userid',
|
||||
@ -123,6 +123,13 @@ Ext.define('PBS.config.RemoteView', {
|
||||
sortable: false,
|
||||
renderer: Ext.String.htmlEncode,
|
||||
dataIndex: 'fingerprint',
|
||||
width: 200,
|
||||
},
|
||||
{
|
||||
header: gettext('Comment'),
|
||||
sortable: false,
|
||||
renderer: Ext.String.htmlEncode,
|
||||
dataIndex: 'comment',
|
||||
flex: 1,
|
||||
},
|
||||
],
|
||||
|
251
www/config/SyncView.js
Normal file
251
www/config/SyncView.js
Normal file
@ -0,0 +1,251 @@
|
||||
Ext.define('pbs-sync-jobs-status', {
|
||||
extend: 'Ext.data.Model',
|
||||
fields: [
|
||||
'id', 'remote', 'remote-store', 'store', 'schedule',
|
||||
'next-run', 'last-run-upid', 'last-run-state', 'last-run-endtime',
|
||||
{
|
||||
name: 'duration',
|
||||
calculate: function(data) {
|
||||
let endtime = data['last-run-endtime'];
|
||||
if (!endtime) return undefined;
|
||||
let task = Proxmox.Utils.parse_task_upid(data['last-run-upid']);
|
||||
return endtime - task.starttime;
|
||||
},
|
||||
},
|
||||
],
|
||||
idProperty: 'id',
|
||||
proxy: {
|
||||
type: 'proxmox',
|
||||
url: '/api2/json/admin/sync',
|
||||
},
|
||||
});
|
||||
|
||||
Ext.define('PBS.config.SyncJobView', {
|
||||
extend: 'Ext.grid.GridPanel',
|
||||
alias: 'widget.pbsSyncJobView',
|
||||
|
||||
stateful: true,
|
||||
stateId: 'grid-sync-jobs',
|
||||
|
||||
title: gettext('Sync Jobs'),
|
||||
|
||||
controller: {
|
||||
xclass: 'Ext.app.ViewController',
|
||||
|
||||
addSyncJob: function() {
|
||||
let me = this;
|
||||
Ext.create('PBS.window.SyncJobEdit', {
|
||||
listeners: {
|
||||
destroy: function() {
|
||||
me.reload();
|
||||
},
|
||||
},
|
||||
}).show();
|
||||
},
|
||||
|
||||
editSyncJob: function() {
|
||||
let me = this;
|
||||
let view = me.getView();
|
||||
let selection = view.getSelection();
|
||||
if (selection.length < 1) return;
|
||||
|
||||
Ext.create('PBS.window.SyncJobEdit', {
|
||||
id: selection[0].data.id,
|
||||
listeners: {
|
||||
destroy: function() {
|
||||
me.reload();
|
||||
},
|
||||
},
|
||||
}).show();
|
||||
},
|
||||
|
||||
openTaskLog: function() {
|
||||
let me = this;
|
||||
let view = me.getView();
|
||||
let selection = view.getSelection();
|
||||
if (selection.length < 1) return;
|
||||
|
||||
let upid = selection[0].data['last-run-upid'];
|
||||
if (!upid) return;
|
||||
|
||||
Ext.create('Proxmox.window.TaskViewer', {
|
||||
upid
|
||||
}).show();
|
||||
},
|
||||
|
||||
runSyncJob: function() {
|
||||
let me = this;
|
||||
let view = me.getView();
|
||||
let selection = view.getSelection();
|
||||
if (selection.length < 1) return;
|
||||
|
||||
let id = selection[0].data.id;
|
||||
Proxmox.Utils.API2Request({
|
||||
method: 'POST',
|
||||
url: `/admin/sync/${id}/run`,
|
||||
success: function(response, opt) {
|
||||
Ext.create('Proxmox.window.TaskViewer', {
|
||||
upid: response.result.data,
|
||||
taskDone: function(success) {
|
||||
me.reload();
|
||||
},
|
||||
}).show();
|
||||
},
|
||||
failure: function(response, opt) {
|
||||
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||
},
|
||||
});
|
||||
},
|
||||
|
||||
render_sync_status: function(value, metadata, record) {
|
||||
if (!record.data['last-run-upid']) {
|
||||
return '-';
|
||||
}
|
||||
|
||||
if (!record.data['last-run-endtime']) {
|
||||
metadata.tdCls = 'x-grid-row-loading';
|
||||
return '';
|
||||
}
|
||||
|
||||
if (value === 'OK') {
|
||||
return `<i class="fa fa-check good"></i> ${gettext("OK")}`;
|
||||
}
|
||||
|
||||
return `<i class="fa fa-times critical"></i> ${gettext("Error")}:${value}`;
|
||||
},
|
||||
|
||||
render_optional_timestamp: function(value) {
|
||||
if (!value) return '-';
|
||||
return Proxmox.Utils.render_timestamp(value);
|
||||
},
|
||||
|
||||
reload: function() { this.getView().getStore().rstore.load(); },
|
||||
|
||||
init: function(view) {
|
||||
Proxmox.Utils.monStoreErrors(view, view.getStore().rstore);
|
||||
},
|
||||
},
|
||||
|
||||
listeners: {
|
||||
activate: 'reload',
|
||||
itemdblclick: 'editSyncJob',
|
||||
},
|
||||
|
||||
store: {
|
||||
type: 'diff',
|
||||
autoDestroy: true,
|
||||
autoDestroyRstore: true,
|
||||
sorters: 'id',
|
||||
rstore: {
|
||||
type: 'update',
|
||||
storeid: 'pbs-sync-jobs-status',
|
||||
model: 'pbs-sync-jobs-status',
|
||||
autoStart: true,
|
||||
interval: 5000,
|
||||
},
|
||||
},
|
||||
|
||||
tbar: [
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Add'),
|
||||
handler: 'addSyncJob',
|
||||
selModel: false,
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Edit'),
|
||||
handler: 'editSyncJob',
|
||||
disabled: true,
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxStdRemoveButton',
|
||||
baseurl: '/config/sync/',
|
||||
callback: 'reload',
|
||||
},
|
||||
'-',
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Log'),
|
||||
handler: 'openTaskLog',
|
||||
enableFn: (rec) => !!rec.data['last-run-upid'],
|
||||
disabled: true,
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Run now'),
|
||||
handler: 'runSyncJob',
|
||||
disabled: true,
|
||||
},
|
||||
],
|
||||
|
||||
viewConfig: {
|
||||
trackOver: false,
|
||||
},
|
||||
|
||||
columns: [
|
||||
{
|
||||
header: gettext('Sync Job'),
|
||||
width: 200,
|
||||
sortable: true,
|
||||
renderer: Ext.String.htmlEncode,
|
||||
dataIndex: 'id',
|
||||
},
|
||||
{
|
||||
header: gettext('Remote'),
|
||||
width: 200,
|
||||
sortable: true,
|
||||
dataIndex: 'remote',
|
||||
},
|
||||
{
|
||||
header: gettext('Remote Store'),
|
||||
width: 200,
|
||||
sortable: true,
|
||||
dataIndex: 'remote-store',
|
||||
},
|
||||
{
|
||||
header: gettext('Local Store'),
|
||||
width: 200,
|
||||
sortable: true,
|
||||
dataIndex: 'store',
|
||||
},
|
||||
{
|
||||
header: gettext('Schedule'),
|
||||
sortable: true,
|
||||
dataIndex: 'schedule',
|
||||
},
|
||||
{
|
||||
header: gettext('Status'),
|
||||
dataIndex: 'last-run-state',
|
||||
flex: 1,
|
||||
renderer: 'render_sync_status',
|
||||
},
|
||||
{
|
||||
header: gettext('Last Sync'),
|
||||
sortable: true,
|
||||
minWidth: 200,
|
||||
renderer: 'render_optional_timestamp',
|
||||
dataIndex: 'last-run-endtime',
|
||||
},
|
||||
{
|
||||
text: gettext('Duration'),
|
||||
dataIndex: 'duration',
|
||||
width: 60,
|
||||
renderer: Proxmox.Utils.render_duration,
|
||||
},
|
||||
{
|
||||
header: gettext('Next Run'),
|
||||
sortable: true,
|
||||
minWidth: 200,
|
||||
renderer: 'render_optional_timestamp',
|
||||
dataIndex: 'next-run',
|
||||
},
|
||||
{
|
||||
header: gettext('Comment'),
|
||||
hidden: true,
|
||||
sortable: true,
|
||||
renderer: Ext.String.htmlEncode,
|
||||
dataIndex: 'comment',
|
||||
},
|
||||
],
|
||||
});
|
34
www/form/DataStoreSelector.js
Normal file
34
www/form/DataStoreSelector.js
Normal file
@ -0,0 +1,34 @@
|
||||
Ext.define('PBS.form.DataStoreSelector', {
|
||||
extend: 'Proxmox.form.ComboGrid',
|
||||
alias: 'widget.pbsDataStoreSelector',
|
||||
|
||||
allowBlank: false,
|
||||
autoSelect: false,
|
||||
valueField: 'store',
|
||||
displayField: 'store',
|
||||
|
||||
store: {
|
||||
model: 'pbs-datastore-list',
|
||||
autoLoad: true,
|
||||
sorters: 'store',
|
||||
},
|
||||
|
||||
listConfig: {
|
||||
columns: [
|
||||
{
|
||||
header: gettext('DataStore'),
|
||||
sortable: true,
|
||||
dataIndex: 'store',
|
||||
renderer: Ext.String.htmlEncode,
|
||||
flex: 1,
|
||||
},
|
||||
{
|
||||
header: gettext('Comment'),
|
||||
sortable: true,
|
||||
dataIndex: 'comment',
|
||||
renderer: Ext.String.htmlEncode,
|
||||
flex: 1,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
40
www/form/RemoteSelector.js
Normal file
40
www/form/RemoteSelector.js
Normal file
@ -0,0 +1,40 @@
|
||||
Ext.define('PBS.form.RemoteSelector', {
|
||||
extend: 'Proxmox.form.ComboGrid',
|
||||
alias: 'widget.pbsRemoteSelector',
|
||||
|
||||
allowBlank: false,
|
||||
autoSelect: false,
|
||||
valueField: 'name',
|
||||
displayField: 'name',
|
||||
|
||||
store: {
|
||||
model: 'pmx-remotes',
|
||||
autoLoad: true,
|
||||
sorters: 'name',
|
||||
},
|
||||
|
||||
listConfig: {
|
||||
columns: [
|
||||
{
|
||||
header: gettext('Remote'),
|
||||
sortable: true,
|
||||
dataIndex: 'name',
|
||||
renderer: Ext.String.htmlEncode,
|
||||
flex: 1,
|
||||
},
|
||||
{
|
||||
header: gettext('Host'),
|
||||
sortable: true,
|
||||
dataIndex: 'host',
|
||||
flex: 1,
|
||||
},
|
||||
{
|
||||
header: gettext('User name'),
|
||||
sortable: true,
|
||||
dataIndex: 'userid',
|
||||
renderer: Ext.String.htmlEncode,
|
||||
flex: 1,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
@ -73,6 +73,11 @@ Ext.define('PBS.window.RemoteEdit', {
|
||||
name: 'fingerprint',
|
||||
fieldLabel: gettext('Fingerprint'),
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxtextfield',
|
||||
name: 'comment',
|
||||
fieldLabel: gettext('Comment'),
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
|
84
www/window/SyncJobEdit.js
Normal file
84
www/window/SyncJobEdit.js
Normal file
@ -0,0 +1,84 @@
|
||||
Ext.define('PBS.window.SyncJobEdit', {
|
||||
extend: 'Proxmox.window.Edit',
|
||||
alias: 'widget.pbsSyncJobEdit',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
userid: undefined,
|
||||
|
||||
isAdd: true,
|
||||
|
||||
subject: gettext('SyncJob'),
|
||||
|
||||
fieldDefaults: { labelWidth: 120 },
|
||||
|
||||
cbindData: function(initialConfig) {
|
||||
let me = this;
|
||||
|
||||
let baseurl = '/api2/extjs/config/sync';
|
||||
let id = initialConfig.id;
|
||||
|
||||
me.isCreate = !id;
|
||||
me.url = id ? `${baseurl}/${id}` : baseurl;
|
||||
me.method = id ? 'PUT' : 'POST';
|
||||
me.autoLoad = !!id;
|
||||
return { };
|
||||
},
|
||||
|
||||
items: {
|
||||
xtype: 'inputpanel',
|
||||
column1: [
|
||||
{
|
||||
fieldLabel: gettext('Sync Job'),
|
||||
xtype: 'pmxDisplayEditField',
|
||||
name: 'id',
|
||||
renderer: Ext.htmlEncode,
|
||||
allowBlank: false,
|
||||
minLength: 4,
|
||||
cbind: {
|
||||
editable: '{isCreate}',
|
||||
},
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Remote'),
|
||||
xtype: 'pbsRemoteSelector',
|
||||
allowBlank: false,
|
||||
name: 'remote',
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Local Datastore'),
|
||||
xtype: 'pbsDataStoreSelector',
|
||||
allowBlank: false,
|
||||
name: 'store',
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Remote Datastore'),
|
||||
xtype: 'proxmoxtextfield',
|
||||
allowBlank: false,
|
||||
name: 'remote-store',
|
||||
},
|
||||
],
|
||||
|
||||
column2: [
|
||||
{
|
||||
fieldLabel: gettext('Remove vanished'),
|
||||
xtype: 'proxmoxcheckbox',
|
||||
name: 'remove-vanished',
|
||||
uncheckedValue: false,
|
||||
value: true,
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Schedule'),
|
||||
xtype: 'proxmoxtextfield',
|
||||
name: 'schedule',
|
||||
},
|
||||
],
|
||||
|
||||
columnB: [
|
||||
{
|
||||
fieldLabel: gettext('Comment'),
|
||||
xtype: 'proxmoxtextfield',
|
||||
name: 'comment',
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
Reference in New Issue
Block a user