Compare commits
10 Commits
Author | SHA1 | Date | |
---|---|---|---|
43ba913977 | |||
a720894ff0 | |||
a95a3fb893 | |||
620911b426 | |||
5c264c8d80 | |||
8d78589969 | |||
eed8a5ad79 | |||
538b9c1c27 | |||
55919bf141 | |||
456ad0c478 |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.2.2"
|
version = "0.2.3"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
|
13
debian/changelog
vendored
13
debian/changelog
vendored
@ -1,3 +1,16 @@
|
|||||||
|
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tools/systemd/time: fix compute_next_event for weekdays
|
||||||
|
|
||||||
|
* improve display of 'next run' for sync jobs
|
||||||
|
|
||||||
|
* fix csum calculation for images which do not have a 'chunk_size' aligned
|
||||||
|
size
|
||||||
|
|
||||||
|
* add parser for zpool list output
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Jun 2020 10:39:06 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
|
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
* proxmox-backup-client.rs: implement quiet flag
|
* proxmox-backup-client.rs: implement quiet flag
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||||
@ -51,25 +50,22 @@ pub fn list_sync_jobs(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
|
||||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
|
||||||
_ => 0i64,
|
|
||||||
};
|
|
||||||
|
|
||||||
for job in &mut list {
|
for job in &mut list {
|
||||||
|
let mut last = 0;
|
||||||
|
if let Some(task) = last_tasks.get(&job.id) {
|
||||||
|
job.last_run_upid = Some(task.upid_str.clone());
|
||||||
|
if let Some((endtime, status)) = &task.state {
|
||||||
|
job.last_run_state = Some(String::from(status));
|
||||||
|
job.last_run_endtime = Some(*endtime);
|
||||||
|
last = *endtime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
job.next_run = (|| -> Option<i64> {
|
||||||
let schedule = job.schedule.as_ref()?;
|
let schedule = job.schedule.as_ref()?;
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
compute_next_event(&event, now, false).ok()
|
compute_next_event(&event, last, false).ok()
|
||||||
})();
|
})();
|
||||||
|
|
||||||
if let Some(task) = last_tasks.get(&job.id) {
|
|
||||||
job.last_run_upid = Some(task.upid_str.clone());
|
|
||||||
if let Some((endttime, status)) = &task.state {
|
|
||||||
job.last_run_state = Some(String::from(status));
|
|
||||||
job.last_run_endtime = Some(*endttime);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
@ -198,7 +198,7 @@ impl FixedIndexReader {
|
|||||||
let mut csum = openssl::sha::Sha256::new();
|
let mut csum = openssl::sha::Sha256::new();
|
||||||
let mut chunk_end = 0;
|
let mut chunk_end = 0;
|
||||||
for pos in 0..self.index_length {
|
for pos in 0..self.index_length {
|
||||||
chunk_end = ((pos + 1) * self.chunk_size) as u64;
|
chunk_end = self.chunk_end(pos);
|
||||||
let digest = self.chunk_digest(pos);
|
let digest = self.chunk_digest(pos);
|
||||||
csum.update(digest);
|
csum.update(digest);
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ impl BackupManifest {
|
|||||||
let info = self.lookup_file_info(name)?;
|
let info = self.lookup_file_info(name)?;
|
||||||
|
|
||||||
if size != info.size {
|
if size != info.size {
|
||||||
bail!("wrong size for file '{}' ({} != {}", name, info.size, size);
|
bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if csum != &info.csum {
|
if csum != &info.csum {
|
||||||
|
@ -15,7 +15,7 @@ use proxmox_backup::server;
|
|||||||
use proxmox_backup::tools::daemon;
|
use proxmox_backup::tools::daemon;
|
||||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
use proxmox_backup::tools::disks::{ DiskManage, zfs::zfs_pool_stats };
|
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||||
@ -601,7 +601,7 @@ async fn run_stat_generator() {
|
|||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
loop {
|
loop {
|
||||||
count += 1;
|
count += 1;
|
||||||
let save = if count > 6 { count = 0; true } else { false };
|
let save = if count >= 6 { count = 0; true } else { false };
|
||||||
|
|
||||||
let delay_target = Instant::now() + Duration::from_secs(10);
|
let delay_target = Instant::now() + Duration::from_secs(10);
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Disk query/management utilities for.
|
//! Disk query/management utilities for.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::ffi::{OsStr, OsString};
|
use std::ffi::{OsStr, OsString};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||||
@ -16,7 +16,10 @@ use proxmox::sys::error::io_err_other;
|
|||||||
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
|
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
|
||||||
use proxmox::{io_bail, io_format_err};
|
use proxmox::{io_bail, io_format_err};
|
||||||
|
|
||||||
pub mod zfs;
|
mod zfs;
|
||||||
|
pub use zfs::*;
|
||||||
|
mod lvm;
|
||||||
|
pub use lvm::*;
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Ways a device is being used.
|
/// Ways a device is being used.
|
||||||
@ -511,3 +514,42 @@ pub struct BlockDevStat {
|
|||||||
pub write_sectors: u64,
|
pub write_sectors: u64,
|
||||||
pub io_ticks: u64, // milliseconds
|
pub io_ticks: u64, // milliseconds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use lsblk to read partition type uuids.
|
||||||
|
pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error> {
|
||||||
|
|
||||||
|
const LSBLK_BIN_PATH: &str = "/usr/bin/lsblk";
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new(LSBLK_BIN_PATH);
|
||||||
|
command.args(&["--json", "-o", "path,parttype"]);
|
||||||
|
|
||||||
|
let output = command.output()
|
||||||
|
.map_err(|err| format_err!("failed to execute '{}' - {}", LSBLK_BIN_PATH, err))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, None)
|
||||||
|
.map_err(|err| format_err!("lsblk command failed: {}", err))?;
|
||||||
|
|
||||||
|
let mut res: HashMap<String, Vec<String>> = HashMap::new();
|
||||||
|
|
||||||
|
let output: serde_json::Value = output.parse()?;
|
||||||
|
match output["blockdevices"].as_array() {
|
||||||
|
Some(list) => {
|
||||||
|
for info in list {
|
||||||
|
let path = match info["path"].as_str() {
|
||||||
|
Some(p) => p,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let partition_type = match info["parttype"].as_str() {
|
||||||
|
Some(t) => t.to_owned(),
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let devices = res.entry(partition_type).or_insert(Vec::new());
|
||||||
|
devices.push(path.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
55
src/tools/disks/lvm.rs
Normal file
55
src/tools/disks/lvm.rs
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
use std::collections::{HashSet, HashMap};
|
||||||
|
|
||||||
|
use anyhow::{format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
|
lazy_static!{
|
||||||
|
static ref LVM_UUIDS: HashSet<&'static str> = {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert("e6d6d379-f507-44c2-a23c-238f2a3df928");
|
||||||
|
set
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get list of devices used by LVM (pvs).
|
||||||
|
pub fn get_lvm_devices(
|
||||||
|
partition_type_map: &HashMap<String, Vec<String>>,
|
||||||
|
) -> Result<HashSet<String>, Error> {
|
||||||
|
|
||||||
|
const PVS_BIN_PATH: &str = "/sbin/pvs";
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new(PVS_BIN_PATH);
|
||||||
|
command.args(&["--reportformat", "json", "--noheadings", "--readonly", "-o", "pv_name"]);
|
||||||
|
|
||||||
|
let output = command.output()
|
||||||
|
.map_err(|err| format_err!("failed to execute '{}' - {}", PVS_BIN_PATH, err))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, None)
|
||||||
|
.map_err(|err| format_err!("pvs command failed: {}", err))?;
|
||||||
|
|
||||||
|
let mut device_set: HashSet<String> = HashSet::new();
|
||||||
|
|
||||||
|
for device_list in partition_type_map.iter()
|
||||||
|
.filter_map(|(uuid, list)| if LVM_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||||
|
{
|
||||||
|
for device in device_list {
|
||||||
|
device_set.insert(device.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let output: Value = output.parse()?;
|
||||||
|
|
||||||
|
match output["report"][0]["pv"].as_array() {
|
||||||
|
Some(list) => {
|
||||||
|
for info in list {
|
||||||
|
if let Some(pv_name) = info["pv_name"].as_str() {
|
||||||
|
device_set.insert(pv_name.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => return Ok(device_set),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(device_set)
|
||||||
|
}
|
@ -1,9 +1,46 @@
|
|||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
|
use nom::{
|
||||||
|
error::VerboseError,
|
||||||
|
bytes::complete::{take_while, take_while1, take_till, take_till1},
|
||||||
|
combinator::{map_res, all_consuming, recognize},
|
||||||
|
sequence::{preceded, tuple},
|
||||||
|
character::complete::{space1, digit1, char, line_ending},
|
||||||
|
multi::{many0, many1},
|
||||||
|
};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
lazy_static!{
|
||||||
|
static ref ZFS_UUIDS: HashSet<&'static str> = {
|
||||||
|
let mut set = HashSet::new();
|
||||||
|
set.insert("6a898cc3-1dd2-11b2-99a6-080020736631"); // apple
|
||||||
|
set.insert("516e7cba-6ecf-11d6-8ff8-00022d09712b"); // bsd
|
||||||
|
set
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
type IResult<I, O, E = VerboseError<I>> = Result<(I, O), nom::Err<E>>;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ZFSPoolUsage {
|
||||||
|
total: u64,
|
||||||
|
used: u64,
|
||||||
|
free: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ZFSPoolStatus {
|
||||||
|
name: String,
|
||||||
|
usage: Option<ZFSPoolUsage>,
|
||||||
|
devices: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns kernel IO-stats for zfs pools
|
||||||
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
||||||
|
|
||||||
let mut path = PathBuf::from("/proc/spl/kstat/zfs");
|
let mut path = PathBuf::from("/proc/spl/kstat/zfs");
|
||||||
@ -42,3 +79,128 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
|||||||
|
|
||||||
Ok(Some(stat))
|
Ok(Some(stat))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Recognizes zero or more spaces and tabs (but not carage returns or line feeds)
|
||||||
|
fn multispace0(i: &str) -> IResult<&str, &str> {
|
||||||
|
take_while(|c| c == ' ' || c == '\t')(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Recognizes one or more spaces and tabs (but not carage returns or line feeds)
|
||||||
|
fn multispace1(i: &str) -> IResult<&str, &str> {
|
||||||
|
take_while1(|c| c == ' ' || c == '\t')(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_optional_u64(i: &str) -> IResult<&str, Option<u64>> {
|
||||||
|
if i.starts_with('-') {
|
||||||
|
Ok((&i[1..], None))
|
||||||
|
} else {
|
||||||
|
let (i, value) = map_res(recognize(digit1), str::parse)(i)?;
|
||||||
|
Ok((i, Some(value)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_pool_device(i: &str) -> IResult<&str, String> {
|
||||||
|
let (i, (device, _, _rest)) = tuple((
|
||||||
|
preceded(multispace1, take_till1(|c| c == ' ' || c == '\t')),
|
||||||
|
multispace1,
|
||||||
|
preceded(take_till(|c| c == '\n'), char('\n')),
|
||||||
|
))(i)?;
|
||||||
|
|
||||||
|
Ok((i, device.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_pool_header(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
||||||
|
let (i, (text, total, used, free, _, _eol)) = tuple((
|
||||||
|
take_while1(|c| char::is_alphanumeric(c)),
|
||||||
|
preceded(multispace1, parse_optional_u64),
|
||||||
|
preceded(multispace1, parse_optional_u64),
|
||||||
|
preceded(multispace1, parse_optional_u64),
|
||||||
|
preceded(space1, take_till(|c| c == '\n')),
|
||||||
|
line_ending,
|
||||||
|
))(i)?;
|
||||||
|
|
||||||
|
let status = if let (Some(total), Some(used), Some(free)) = (total, used, free) {
|
||||||
|
ZFSPoolStatus {
|
||||||
|
name: text.into(),
|
||||||
|
usage: Some(ZFSPoolUsage { total, used, free }),
|
||||||
|
devices: Vec::new(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ZFSPoolStatus {
|
||||||
|
name: text.into(), usage: None, devices: Vec::new(),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((i, status))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_pool_status(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
||||||
|
|
||||||
|
let (i, mut stat) = parse_pool_header(i)?;
|
||||||
|
let (i, devices) = many1(parse_pool_device)(i)?;
|
||||||
|
|
||||||
|
for device_path in devices.into_iter().filter(|n| n.starts_with("/dev/")) {
|
||||||
|
stat.devices.push(device_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (i, _) = many0(tuple((multispace0, char('\n'))))(i)?; // skip empty lines
|
||||||
|
|
||||||
|
Ok((i, stat))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse zpool list outout
|
||||||
|
///
|
||||||
|
/// Note: This does not reveal any details on how the pool uses the devices, because
|
||||||
|
/// the zpool list output format is not really defined...
|
||||||
|
pub fn parse_zfs_list(i: &str) -> Result<Vec<ZFSPoolStatus>, Error> {
|
||||||
|
match all_consuming(many1(parse_pool_status))(i) {
|
||||||
|
Err(nom::Err::Error(err)) |
|
||||||
|
Err(nom::Err::Failure(err)) => {
|
||||||
|
bail!("unable to parse zfs list output - {}", nom::error::convert_error(i, err));
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
bail!("unable to parse calendar event: {}", err);
|
||||||
|
}
|
||||||
|
Ok((_, ce)) => Ok(ce),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List devices used by zfs (or a specific zfs pool)
|
||||||
|
pub fn zfs_devices(
|
||||||
|
partition_type_map: &HashMap<String, Vec<String>>,
|
||||||
|
pool: Option<&OsStr>,
|
||||||
|
) -> Result<HashSet<String>, Error> {
|
||||||
|
|
||||||
|
// Note: zpools list output can include entries for 'special', 'cache' and 'logs'
|
||||||
|
// and maybe other things.
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("/sbin/zpool");
|
||||||
|
command.args(&["list", "-H", "-v", "-p", "-P"]);
|
||||||
|
|
||||||
|
if let Some(pool) = pool { command.arg(pool); }
|
||||||
|
|
||||||
|
let output = command.output()
|
||||||
|
.map_err(|err| format_err!("failed to execute '/sbin/zpool' - {}", err))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, None)
|
||||||
|
.map_err(|err| format_err!("zpool list command failed: {}", err))?;
|
||||||
|
|
||||||
|
let list = parse_zfs_list(&output)?;
|
||||||
|
|
||||||
|
let mut device_set = HashSet::new();
|
||||||
|
for entry in list {
|
||||||
|
for device in entry.devices {
|
||||||
|
device_set.insert(device.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for device_list in partition_type_map.iter()
|
||||||
|
.filter_map(|(uuid, list)| if ZFS_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||||
|
{
|
||||||
|
for device in device_list {
|
||||||
|
device_set.insert(device.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(device_set)
|
||||||
|
}
|
||||||
|
@ -163,12 +163,11 @@ pub fn compute_next_event(
|
|||||||
if event.days.contains(day) {
|
if event.days.contains(day) {
|
||||||
t.changes.remove(TMChanges::WDAY);
|
t.changes.remove(TMChanges::WDAY);
|
||||||
} else {
|
} else {
|
||||||
if let Some(n) = (day_num+1..6)
|
if let Some(n) = ((day_num+1)..7)
|
||||||
.map(|d| WeekDays::from_bits(1<<d).unwrap())
|
.find(|d| event.days.contains(WeekDays::from_bits(1<<d).unwrap()))
|
||||||
.find(|d| event.days.contains(*d))
|
|
||||||
{
|
{
|
||||||
// try next day
|
// try next day
|
||||||
t.add_days((n.bits() as i32) - day_num, true);
|
t.add_days(n - day_num, true);
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
// try next week
|
// try next week
|
||||||
@ -296,6 +295,13 @@ mod test {
|
|||||||
test_value("mon 2:*", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR)?;
|
test_value("mon 2:*", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR)?;
|
||||||
test_value("mon 2:50", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR + 50*MIN)?;
|
test_value("mon 2:50", THURSDAY_00_00, THURSDAY_00_00 + 4*DAY + 2*HOUR + 50*MIN)?;
|
||||||
|
|
||||||
|
test_value("tue", THURSDAY_00_00, THURSDAY_00_00 + 5*DAY)?;
|
||||||
|
test_value("wed", THURSDAY_00_00, THURSDAY_00_00 + 6*DAY)?;
|
||||||
|
test_value("thu", THURSDAY_00_00, THURSDAY_00_00 + 7*DAY)?;
|
||||||
|
test_value("fri", THURSDAY_00_00, THURSDAY_00_00 + 1*DAY)?;
|
||||||
|
test_value("sat", THURSDAY_00_00, THURSDAY_00_00 + 2*DAY)?;
|
||||||
|
test_value("sun", THURSDAY_00_00, THURSDAY_00_00 + 3*DAY)?;
|
||||||
|
|
||||||
test_value("daily", THURSDAY_00_00, THURSDAY_00_00 + DAY)?;
|
test_value("daily", THURSDAY_00_00, THURSDAY_00_00 + DAY)?;
|
||||||
test_value("daily", THURSDAY_00_00+1, THURSDAY_00_00 + DAY)?;
|
test_value("daily", THURSDAY_00_00+1, THURSDAY_00_00 + DAY)?;
|
||||||
|
|
||||||
|
@ -114,7 +114,19 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
return `<i class="fa fa-times critical"></i> ${gettext("Error")}:${value}`;
|
return `<i class="fa fa-times critical"></i> ${gettext("Error")}:${value}`;
|
||||||
},
|
},
|
||||||
|
|
||||||
render_optional_timestamp: function(value) {
|
render_next_run: function(value, metadat, record) {
|
||||||
|
if (!value) return '-';
|
||||||
|
|
||||||
|
let now = new Date();
|
||||||
|
let next = new Date(value*1000);
|
||||||
|
|
||||||
|
if (next < now) {
|
||||||
|
return gettext('pending');
|
||||||
|
}
|
||||||
|
return Proxmox.Utils.render_timestamp(value);
|
||||||
|
},
|
||||||
|
|
||||||
|
render_optional_timestamp: function(value, metadata, record) {
|
||||||
if (!value) return '-';
|
if (!value) return '-';
|
||||||
return Proxmox.Utils.render_timestamp(value);
|
return Proxmox.Utils.render_timestamp(value);
|
||||||
},
|
},
|
||||||
@ -237,7 +249,7 @@ Ext.define('PBS.config.SyncJobView', {
|
|||||||
header: gettext('Next Run'),
|
header: gettext('Next Run'),
|
||||||
sortable: true,
|
sortable: true,
|
||||||
minWidth: 200,
|
minWidth: 200,
|
||||||
renderer: 'render_optional_timestamp',
|
renderer: 'render_next_run',
|
||||||
dataIndex: 'next-run',
|
dataIndex: 'next-run',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
Reference in New Issue
Block a user