Compare commits

...

10 Commits

Author SHA1 Message Date
43ba913977 bump version to 0.2.3-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-06-04 10:39:15 +02:00
a720894ff0 rrd: fix off-by-one in save interval calculation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-06-04 10:30:47 +02:00
a95a3fb893 fix csum calculation of not 'chunk_size' aligned images
the last chunk does not have to be as big as the chunk_size,
just use the already available 'chunk_end' function which does the
correct thing

this fixes restoration of images whose sizes are not a multiple of
'chunk_size' as well

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-06-04 10:18:30 +02:00
620911b426 src/tools/disks/lvm.rs: implement get_lvm_devices() 2020-06-04 09:12:19 +02:00
5c264c8d80 src/tools/disks.rs: add/use get_partition_type_info 2020-06-04 07:48:22 +02:00
8d78589969 improve display of 'next run' for sync jobs
if the last sync job is too far in the past (or there was none at all
for now) we run it at the next iteration, so we want to show that

we now calculate the next_run by using either the real last endtime
as time or 0

then in the frontend, we check if the next_run is < now and show 'pending'
(we do it this way also for replication on pve)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-06-04 07:03:54 +02:00
eed8a5ad79 tools/systemd/time: fix compute_next_event for weekdays
two things were wrong here:
* the range (x..y) does not include y, so the range
  (day_num+1..6) goes from (day_num+1) to 5 (but sunday is 6)

* WeekDays.bits() does not return the 'day_num' of that day, but
  the bit value (e.g. 64 for SUNDAY) but was treated as the index of
  the day of the week
  to fix this, we drop the map to WeekDays and use the 'indices'
  directly

this patch makes the test work again

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-06-04 07:02:33 +02:00
538b9c1c27 systemd/time: add tests for all weekdays
this fails for now

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-06-04 07:02:23 +02:00
55919bf141 verify_file: add missing closing parenthesis in error message
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-06-03 19:10:01 +02:00
456ad0c478 src/tools/disks/zfs.rs: add parser for zpool list output 2020-06-03 12:16:08 +02:00
11 changed files with 316 additions and 30 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "0.2.2"
version = "0.2.3"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018"
license = "AGPL-3"

13
debian/changelog vendored
View File

@ -1,3 +1,16 @@
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
* tools/systemd/time: fix compute_next_event for weekdays
* improve display of 'next run' for sync jobs
* fix csum calculation for images which do not have a 'chunk_size' aligned
size
* add parser for zpool list output
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Jun 2020 10:39:06 +0200
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
* proxmox-backup-client.rs: implement quiet flag

View File

@ -1,6 +1,5 @@
use anyhow::{Error};
use serde_json::Value;
use std::time::{SystemTime, UNIX_EPOCH};
use std::collections::HashMap;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
@ -51,25 +50,22 @@ pub fn list_sync_jobs(
}
}
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(epoch_now) => epoch_now.as_secs() as i64,
_ => 0i64,
};
for job in &mut list {
let mut last = 0;
if let Some(task) = last_tasks.get(&job.id) {
job.last_run_upid = Some(task.upid_str.clone());
if let Some((endtime, status)) = &task.state {
job.last_run_state = Some(String::from(status));
job.last_run_endtime = Some(*endtime);
last = *endtime;
}
}
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
compute_next_event(&event, now, false).ok()
compute_next_event(&event, last, false).ok()
})();
if let Some(task) = last_tasks.get(&job.id) {
job.last_run_upid = Some(task.upid_str.clone());
if let Some((endttime, status)) = &task.state {
job.last_run_state = Some(String::from(status));
job.last_run_endtime = Some(*endttime);
}
}
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();

View File

@ -198,7 +198,7 @@ impl FixedIndexReader {
let mut csum = openssl::sha::Sha256::new();
let mut chunk_end = 0;
for pos in 0..self.index_length {
chunk_end = ((pos + 1) * self.chunk_size) as u64;
chunk_end = self.chunk_end(pos);
let digest = self.chunk_digest(pos);
csum.update(digest);
}

View File

@ -73,7 +73,7 @@ impl BackupManifest {
let info = self.lookup_file_info(name)?;
if size != info.size {
bail!("wrong size for file '{}' ({} != {}", name, info.size, size);
bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
}
if csum != &info.csum {

View File

@ -15,7 +15,7 @@ use proxmox_backup::server;
use proxmox_backup::tools::daemon;
use proxmox_backup::server::{ApiConfig, rest::*};
use proxmox_backup::auth_helpers::*;
use proxmox_backup::tools::disks::{ DiskManage, zfs::zfs_pool_stats };
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
fn main() {
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
@ -601,7 +601,7 @@ async fn run_stat_generator() {
let mut count = 0;
loop {
count += 1;
let save = if count > 6 { count = 0; true } else { false };
let save = if count >= 6 { count = 0; true } else { false };
let delay_target = Instant::now() + Duration::from_secs(10);

View File

@ -1,6 +1,6 @@
//! Disk query/management utilities for.
use std::collections::HashSet;
use std::collections::{HashMap, HashSet};
use std::ffi::{OsStr, OsString};
use std::io;
use std::os::unix::ffi::{OsStrExt, OsStringExt};
@ -16,7 +16,10 @@ use proxmox::sys::error::io_err_other;
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
use proxmox::{io_bail, io_format_err};
pub mod zfs;
mod zfs;
pub use zfs::*;
mod lvm;
pub use lvm::*;
bitflags! {
/// Ways a device is being used.
@ -511,3 +514,42 @@ pub struct BlockDevStat {
pub write_sectors: u64,
pub io_ticks: u64, // milliseconds
}
/// Use lsblk to read partition type uuids.
pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error> {
const LSBLK_BIN_PATH: &str = "/usr/bin/lsblk";
let mut command = std::process::Command::new(LSBLK_BIN_PATH);
command.args(&["--json", "-o", "path,parttype"]);
let output = command.output()
.map_err(|err| format_err!("failed to execute '{}' - {}", LSBLK_BIN_PATH, err))?;
let output = crate::tools::command_output(output, None)
.map_err(|err| format_err!("lsblk command failed: {}", err))?;
let mut res: HashMap<String, Vec<String>> = HashMap::new();
let output: serde_json::Value = output.parse()?;
match output["blockdevices"].as_array() {
Some(list) => {
for info in list {
let path = match info["path"].as_str() {
Some(p) => p,
None => continue,
};
let partition_type = match info["parttype"].as_str() {
Some(t) => t.to_owned(),
None => continue,
};
let devices = res.entry(partition_type).or_insert(Vec::new());
devices.push(path.to_string());
}
}
None => {
}
}
Ok(res)
}

55
src/tools/disks/lvm.rs Normal file
View File

@ -0,0 +1,55 @@
use std::collections::{HashSet, HashMap};
use anyhow::{format_err, Error};
use serde_json::Value;
use lazy_static::lazy_static;
lazy_static!{
static ref LVM_UUIDS: HashSet<&'static str> = {
let mut set = HashSet::new();
set.insert("e6d6d379-f507-44c2-a23c-238f2a3df928");
set
};
}
/// Get list of devices used by LVM (pvs).
pub fn get_lvm_devices(
partition_type_map: &HashMap<String, Vec<String>>,
) -> Result<HashSet<String>, Error> {
const PVS_BIN_PATH: &str = "/sbin/pvs";
let mut command = std::process::Command::new(PVS_BIN_PATH);
command.args(&["--reportformat", "json", "--noheadings", "--readonly", "-o", "pv_name"]);
let output = command.output()
.map_err(|err| format_err!("failed to execute '{}' - {}", PVS_BIN_PATH, err))?;
let output = crate::tools::command_output(output, None)
.map_err(|err| format_err!("pvs command failed: {}", err))?;
let mut device_set: HashSet<String> = HashSet::new();
for device_list in partition_type_map.iter()
.filter_map(|(uuid, list)| if LVM_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
{
for device in device_list {
device_set.insert(device.clone());
}
}
let output: Value = output.parse()?;
match output["report"][0]["pv"].as_array() {
Some(list) => {
for info in list {
if let Some(pv_name) = info["pv_name"].as_str() {
device_set.insert(pv_name.to_string());
}
}
}
None => return Ok(device_set),
}
Ok(device_set)
}

View File

@ -1,9 +1,46 @@
use anyhow::{bail, Error};
use std::path::PathBuf;
use std::collections::{HashMap, HashSet};
use anyhow::{bail, Error};
use lazy_static::lazy_static;
use nom::{
error::VerboseError,
bytes::complete::{take_while, take_while1, take_till, take_till1},
combinator::{map_res, all_consuming, recognize},
sequence::{preceded, tuple},
character::complete::{space1, digit1, char, line_ending},
multi::{many0, many1},
};
use super::*;
lazy_static!{
static ref ZFS_UUIDS: HashSet<&'static str> = {
let mut set = HashSet::new();
set.insert("6a898cc3-1dd2-11b2-99a6-080020736631"); // apple
set.insert("516e7cba-6ecf-11d6-8ff8-00022d09712b"); // bsd
set
};
}
type IResult<I, O, E = VerboseError<I>> = Result<(I, O), nom::Err<E>>;
#[derive(Debug)]
pub struct ZFSPoolUsage {
total: u64,
used: u64,
free: u64,
}
#[derive(Debug)]
pub struct ZFSPoolStatus {
name: String,
usage: Option<ZFSPoolUsage>,
devices: Vec<String>,
}
/// Returns kernel IO-stats for zfs pools
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
let mut path = PathBuf::from("/proc/spl/kstat/zfs");
@ -42,3 +79,128 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
Ok(Some(stat))
}
/// Recognizes zero or more spaces and tabs (but not carage returns or line feeds)
fn multispace0(i: &str) -> IResult<&str, &str> {
take_while(|c| c == ' ' || c == '\t')(i)
}
/// Recognizes one or more spaces and tabs (but not carage returns or line feeds)
fn multispace1(i: &str) -> IResult<&str, &str> {
take_while1(|c| c == ' ' || c == '\t')(i)
}
fn parse_optional_u64(i: &str) -> IResult<&str, Option<u64>> {
if i.starts_with('-') {
Ok((&i[1..], None))
} else {
let (i, value) = map_res(recognize(digit1), str::parse)(i)?;
Ok((i, Some(value)))
}
}
fn parse_pool_device(i: &str) -> IResult<&str, String> {
let (i, (device, _, _rest)) = tuple((
preceded(multispace1, take_till1(|c| c == ' ' || c == '\t')),
multispace1,
preceded(take_till(|c| c == '\n'), char('\n')),
))(i)?;
Ok((i, device.to_string()))
}
fn parse_pool_header(i: &str) -> IResult<&str, ZFSPoolStatus> {
let (i, (text, total, used, free, _, _eol)) = tuple((
take_while1(|c| char::is_alphanumeric(c)),
preceded(multispace1, parse_optional_u64),
preceded(multispace1, parse_optional_u64),
preceded(multispace1, parse_optional_u64),
preceded(space1, take_till(|c| c == '\n')),
line_ending,
))(i)?;
let status = if let (Some(total), Some(used), Some(free)) = (total, used, free) {
ZFSPoolStatus {
name: text.into(),
usage: Some(ZFSPoolUsage { total, used, free }),
devices: Vec::new(),
}
} else {
ZFSPoolStatus {
name: text.into(), usage: None, devices: Vec::new(),
}
};
Ok((i, status))
}
fn parse_pool_status(i: &str) -> IResult<&str, ZFSPoolStatus> {
let (i, mut stat) = parse_pool_header(i)?;
let (i, devices) = many1(parse_pool_device)(i)?;
for device_path in devices.into_iter().filter(|n| n.starts_with("/dev/")) {
stat.devices.push(device_path);
}
let (i, _) = many0(tuple((multispace0, char('\n'))))(i)?; // skip empty lines
Ok((i, stat))
}
/// Parse zpool list outout
///
/// Note: This does not reveal any details on how the pool uses the devices, because
/// the zpool list output format is not really defined...
pub fn parse_zfs_list(i: &str) -> Result<Vec<ZFSPoolStatus>, Error> {
match all_consuming(many1(parse_pool_status))(i) {
Err(nom::Err::Error(err)) |
Err(nom::Err::Failure(err)) => {
bail!("unable to parse zfs list output - {}", nom::error::convert_error(i, err));
}
Err(err) => {
bail!("unable to parse calendar event: {}", err);
}
Ok((_, ce)) => Ok(ce),
}
}
/// List devices used by zfs (or a specific zfs pool)
pub fn zfs_devices(
partition_type_map: &HashMap<String, Vec<String>>,
pool: Option<&OsStr>,
) -> Result<HashSet<String>, Error> {
// Note: zpools list output can include entries for 'special', 'cache' and 'logs'
// and maybe other things.
let mut command = std::process::Command::new("/sbin/zpool");
command.args(&["list", "-H", "-v", "-p", "-P"]);
if let Some(pool) = pool { command.arg(pool); }
let output = command.output()
.map_err(|err| format_err!("failed to execute '/sbin/zpool' - {}", err))?;
let output = crate::tools::command_output(output, None)
.map_err(|err| format_err!("zpool list command failed: {}", err))?;
let list = parse_zfs_list(&output)?;
let mut device_set = HashSet::new();
for entry in list {
for device in entry.devices {
device_set.insert(device.clone());
}
}
for device_list in partition_type_map.iter()
.filter_map(|(uuid, list)| if ZFS_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
{
for device in device_list {
device_set.insert(device.clone());
}
}
Ok(device_set)
}

View File

@ -163,12 +163,11 @@ pub fn compute_next_event(
if event.days.contains(day) {
t.changes.remove(TMChanges::WDAY);
} else {
if let Some(n) = (day_num+1..6)
.map(|d| WeekDays::from_bits(1<<d).unwrap())
.find(|d| event.days.contains(*d))
if let Some(n) = ((day_num+1)..7)
.find(|d| event.days.contains(WeekDays::from_bits(1<<d).unwrap()))
{
// try next day
t.add_days((n.bits() as i32) - day_num, true);
t.add_days(n - day_num, true);
continue;
} else {
// try next week
@ -296,6 +295,13 @@ mod test {
test_value("mon 2:*", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR)?;
test_value("mon 2:50", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR + 50*MIN)?;
test_value("tue", THURSDAY_00_00, THURSDAY_00_00 + 5*DAY)?;
test_value("wed", THURSDAY_00_00, THURSDAY_00_00 + 6*DAY)?;
test_value("thu", THURSDAY_00_00, THURSDAY_00_00 + 7*DAY)?;
test_value("fri", THURSDAY_00_00, THURSDAY_00_00 + 1*DAY)?;
test_value("sat", THURSDAY_00_00, THURSDAY_00_00 + 2*DAY)?;
test_value("sun", THURSDAY_00_00, THURSDAY_00_00 + 3*DAY)?;
test_value("daily", THURSDAY_00_00, THURSDAY_00_00 + DAY)?;
test_value("daily", THURSDAY_00_00+1, THURSDAY_00_00 + DAY)?;

View File

@ -114,7 +114,19 @@ Ext.define('PBS.config.SyncJobView', {
return `<i class="fa fa-times critical"></i> ${gettext("Error")}:${value}`;
},
render_optional_timestamp: function(value) {
render_next_run: function(value, metadat, record) {
if (!value) return '-';
let now = new Date();
let next = new Date(value*1000);
if (next < now) {
return gettext('pending');
}
return Proxmox.Utils.render_timestamp(value);
},
render_optional_timestamp: function(value, metadata, record) {
if (!value) return '-';
return Proxmox.Utils.render_timestamp(value);
},
@ -237,7 +249,7 @@ Ext.define('PBS.config.SyncJobView', {
header: gettext('Next Run'),
sortable: true,
minWidth: 200,
renderer: 'render_optional_timestamp',
renderer: 'render_next_run',
dataIndex: 'next-run',
},
{