avoid chrono dependency, depend on proxmox 0.3.8
- remove chrono dependency - depend on proxmox 0.3.8 - remove epoch_now, epoch_now_u64 and epoch_now_f64 - remove tm_editor (moved to proxmox crate) - use new helpers from proxmox 0.3.8 * epoch_i64 and epoch_f64 * parse_rfc3339 * epoch_to_rfc3339_utc * strftime_local - BackupDir changes: * store epoch and rfc3339 string instead of DateTime * backup_time_to_string now return a Result * remove unnecessary TryFrom<(BackupGroup, i64)> for BackupDir - DynamicIndexHeader: change ctime to i64 - FixedIndexHeader: change ctime to i64
This commit is contained in:
parent
58169da46a
commit
6a7be83efe
|
@ -18,7 +18,6 @@ apt-pkg-native = "0.3.1" # custom patched version
|
|||
base64 = "0.12"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5"
|
||||
chrono = "0.4" # Date and time library for Rust
|
||||
crc32fast = "1"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
anyhow = "1.0"
|
||||
|
@ -39,7 +38,7 @@ pam-sys = "0.5"
|
|||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.3.5", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox = { version = "0.3.8", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
|
|
|
@ -2,8 +2,6 @@ use std::io::Write;
|
|||
|
||||
use anyhow::{Error};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||
|
||||
|
@ -36,7 +34,7 @@ async fn run() -> Result<(), Error> {
|
|||
|
||||
let client = HttpClient::new(host, username, options)?;
|
||||
|
||||
let backup_time = "2019-06-28T10:49:48Z".parse::<DateTime<Utc>>()?;
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||
|
||||
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
|
||||
.await?;
|
||||
|
|
|
@ -16,7 +16,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||
|
||||
let client = HttpClient::new(host, username, options)?;
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ fn list_groups(
|
|||
let result_item = GroupListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
last_backup: info.backup_dir.backup_time().timestamp(),
|
||||
last_backup: info.backup_dir.backup_time(),
|
||||
backup_count: list.len() as u64,
|
||||
files: info.files.clone(),
|
||||
owner: Some(owner),
|
||||
|
@ -403,7 +403,7 @@ pub fn list_snapshots (
|
|||
let result_item = SnapshotListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
backup_time: info.backup_dir.backup_time(),
|
||||
comment,
|
||||
verification,
|
||||
files,
|
||||
|
@ -673,7 +673,7 @@ fn prune(
|
|||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
}
|
||||
|
@ -697,7 +697,7 @@ fn prune(
|
|||
if keep_all { keep = true; }
|
||||
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let timestamp = BackupDir::backup_time_to_string(backup_time);
|
||||
let timestamp = info.backup_dir.backup_time_string();
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
|
||||
|
@ -714,7 +714,7 @@ fn prune(
|
|||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
|
||||
|
@ -1097,7 +1097,7 @@ fn upload_backup_log(
|
|||
}
|
||||
|
||||
println!("Upload backup log to {}/{}/{}/{}/{}", store,
|
||||
backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
|
||||
backup_type, backup_id, backup_dir.backup_time_string(), file_name);
|
||||
|
||||
let data = req_body
|
||||
.map_err(Error::from)
|
||||
|
|
|
@ -4,7 +4,6 @@ use serde_json::{Value, json};
|
|||
use proxmox::api::{api, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||
|
||||
pub fn create_value_from_rrd(
|
||||
|
@ -15,7 +14,7 @@ pub fn create_value_from_rrd(
|
|||
) -> Result<Value, Error> {
|
||||
|
||||
let mut result = Vec::new();
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
|
||||
for name in list {
|
||||
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use chrono::prelude::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
|
@ -57,10 +56,11 @@ fn read_etc_localtime() -> Result<String, Error> {
|
|||
)]
|
||||
/// Read server time and time zone settings.
|
||||
fn get_time(_param: Value) -> Result<Value, Error> {
|
||||
let datetime = Local::now();
|
||||
let offset = datetime.offset();
|
||||
let time = datetime.timestamp();
|
||||
let localtime = time + (offset.fix().local_minus_utc() as i64);
|
||||
let time = proxmox::tools::time::epoch_i64();
|
||||
let tm = proxmox::tools::time::localtime(time)?;
|
||||
let offset = tm.tm_gmtoff;
|
||||
|
||||
let localtime = time + offset;
|
||||
|
||||
Ok(json!({
|
||||
"timezone": read_etc_localtime()?,
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
//use chrono::{Local, TimeZone};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::header::{self, HeaderValue, UPGRADE};
|
||||
|
@ -88,7 +87,7 @@ fn upgrade_to_backup_reader_protocol(
|
|||
|
||||
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
||||
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||
|
||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = ReaderEnvironment::new(
|
||||
|
|
|
@ -23,7 +23,6 @@ use crate::api2::types::{
|
|||
use crate::server;
|
||||
use crate::backup::{DataStore};
|
||||
use crate::config::datastore;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{
|
||||
|
@ -110,7 +109,7 @@ fn datastore_status(
|
|||
});
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
|||
use proxmox::try_block;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
|
@ -32,7 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
|||
userid: &Userid,
|
||||
) -> String {
|
||||
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
let epoch = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, userid);
|
||||
|
||||
|
@ -69,7 +68,7 @@ pub fn verify_csrf_prevention_token(
|
|||
bail!("invalid signature.");
|
||||
}
|
||||
|
||||
let now = epoch_now_u64()? as i64;
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let age = now - ttime;
|
||||
if age < min_age {
|
||||
|
|
|
@ -2,11 +2,8 @@ use crate::tools;
|
|||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use regex::Regex;
|
||||
use std::convert::TryFrom;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use chrono::{DateTime, LocalResult, TimeZone, SecondsFormat, Utc};
|
||||
|
||||
use std::path::{PathBuf, Path};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
|
@ -106,8 +103,8 @@ impl BackupGroup {
|
|||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp())?;
|
||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), timestamp)?;
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
|
@ -117,7 +114,7 @@ impl BackupGroup {
|
|||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<DateTime<Utc>>, Error> {
|
||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
||||
|
||||
let mut last = None;
|
||||
|
||||
|
@ -143,11 +140,11 @@ impl BackupGroup {
|
|||
}
|
||||
}
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
if let Some(last_dt) = last {
|
||||
if dt > last_dt { last = Some(dt); }
|
||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||
if let Some(last_timestamp) = last {
|
||||
if timestamp > last_timestamp { last = Some(timestamp); }
|
||||
} else {
|
||||
last = Some(dt);
|
||||
last = Some(timestamp);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -204,48 +201,51 @@ pub struct BackupDir {
|
|||
/// Backup group
|
||||
group: BackupGroup,
|
||||
/// Backup timestamp
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
// backup_time as rfc3339
|
||||
backup_time_string: String
|
||||
}
|
||||
|
||||
impl BackupDir {
|
||||
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Result<Self, Error>
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
{
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
BackupDir::new_with_group(group, timestamp)
|
||||
BackupDir::new_with_group(group, backup_time)
|
||||
}
|
||||
|
||||
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Result<Self, Error> {
|
||||
let backup_time = match Utc.timestamp_opt(timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
_ => bail!("can't create BackupDir with invalid backup time {}", timestamp),
|
||||
};
|
||||
|
||||
Ok(Self { group, backup_time })
|
||||
pub fn new_with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||
Ok(Self { group, backup_time, backup_time_string })
|
||||
}
|
||||
|
||||
pub fn group(&self) -> &BackupGroup {
|
||||
&self.group
|
||||
}
|
||||
|
||||
pub fn backup_time(&self) -> DateTime<Utc> {
|
||||
pub fn backup_time(&self) -> i64 {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn backup_time_string(&self) -> &str {
|
||||
&self.backup_time_string
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = self.group.group_path();
|
||||
|
||||
relative_path.push(Self::backup_time_to_string(self.backup_time));
|
||||
relative_path.push(self.backup_time_string.clone());
|
||||
|
||||
relative_path
|
||||
}
|
||||
|
||||
pub fn backup_time_to_string(backup_time: DateTime<Utc>) -> String {
|
||||
backup_time.to_rfc3339_opts(SecondsFormat::Secs, true)
|
||||
pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
|
||||
// fixme: can this fail? (avoid unwrap)
|
||||
proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -260,8 +260,9 @@ impl std::str::FromStr for BackupDir {
|
|||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
BackupDir::try_from((group, backup_time.timestamp()))
|
||||
let backup_time_string = cap.get(3).unwrap().as_str().to_owned();
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
||||
Ok(BackupDir { group, backup_time, backup_time_string })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -269,16 +270,7 @@ impl std::fmt::Display for BackupDir {
|
|||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
let time = Self::backup_time_to_string(self.backup_time);
|
||||
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<(BackupGroup, i64)> for BackupDir {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from((group, timestamp): (BackupGroup, i64)) -> Result<Self, Error> {
|
||||
BackupDir::new_with_group(group, timestamp)
|
||||
write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -336,13 +328,18 @@ impl BackupInfo {
|
|||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
||||
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time_string, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp())?;
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339(backup_time_string)?;
|
||||
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
let backup_dir = BackupDir {
|
||||
group: BackupGroup::new(backup_type, backup_id),
|
||||
backup_time,
|
||||
backup_time_string: backup_time_string.to_owned(),
|
||||
};
|
||||
|
||||
let files = list_backup_files(l2_fd, backup_time_string)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ use std::io::{Read, Write, Seek, SeekFrom};
|
|||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local, LocalResult};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
|
@ -533,10 +532,10 @@ impl <R: Read + Seek> CatalogReader<R> {
|
|||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let mtime_string = match Local.timestamp_opt(mtime as i64, 0) {
|
||||
LocalResult::Single(time) => time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false),
|
||||
_ => (mtime as i64).to_string(),
|
||||
};
|
||||
let mut mtime_string = mtime.to_string();
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%FT%TZ", mtime as i64) {
|
||||
mtime_string = s;
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
use std::io::Write;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::{Local, DateTime};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
|
@ -216,10 +215,10 @@ impl CryptConfig {
|
|||
pub fn generate_rsa_encoded_key(
|
||||
&self,
|
||||
rsa: openssl::rsa::Rsa<openssl::pkey::Public>,
|
||||
created: DateTime<Local>,
|
||||
created: i64,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let modified = Local::now();
|
||||
let modified = proxmox::tools::time::epoch_i64();
|
||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ use std::convert::TryFrom;
|
|||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
|
@ -242,7 +241,7 @@ impl DataStore {
|
|||
/// Returns the time of the last successful backup
|
||||
///
|
||||
/// Or None if there is no backup in the group (or the group dir does not exist).
|
||||
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<DateTime<Utc>>, Error> {
|
||||
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
|
||||
let base_path = self.base_path();
|
||||
let mut group_path = base_path.clone();
|
||||
group_path.push(backup_group.group_path());
|
||||
|
|
|
@ -21,14 +21,14 @@ use super::read_chunk::ReadChunk;
|
|||
use super::Chunker;
|
||||
use super::IndexFile;
|
||||
use super::{DataBlob, DataChunkBuilder};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
use crate::tools;
|
||||
|
||||
/// Header format definition for dynamic index files (`.dixd`)
|
||||
#[repr(C)]
|
||||
pub struct DynamicIndexHeader {
|
||||
pub magic: [u8; 8],
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
/// Sha256 over the index ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||
pub index_csum: [u8; 32],
|
||||
reserved: [u8; 4032], // overall size is one page (4096 bytes)
|
||||
|
@ -77,7 +77,7 @@ pub struct DynamicIndexReader {
|
|||
pub size: usize,
|
||||
index: Mmap<DynamicEntry>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ impl DynamicIndexReader {
|
|||
bail!("got unknown magic number");
|
||||
}
|
||||
|
||||
let ctime = u64::from_le(header.ctime);
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let rawfd = file.as_raw_fd();
|
||||
|
||||
|
@ -480,7 +480,7 @@ pub struct DynamicIndexWriter {
|
|||
tmp_filename: PathBuf,
|
||||
csum: Option<openssl::sha::Sha256>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
}
|
||||
|
||||
impl Drop for DynamicIndexWriter {
|
||||
|
@ -506,13 +506,13 @@ impl DynamicIndexWriter {
|
|||
|
||||
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
let mut header = DynamicIndexHeader::zeroed();
|
||||
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.ctime = i64::to_le(ctime);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
// header.index_csum = [0u8; 32];
|
||||
writer.write_all(header.as_bytes())?;
|
||||
|
|
|
@ -4,9 +4,8 @@ use std::io::{Seek, SeekFrom};
|
|||
use super::chunk_stat::*;
|
||||
use super::chunk_store::*;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
use crate::tools;
|
||||
|
||||
use chrono::{Local, LocalResult, TimeZone};
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
@ -23,7 +22,7 @@ use proxmox::tools::Uuid;
|
|||
pub struct FixedIndexHeader {
|
||||
pub magic: [u8; 8],
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
/// Sha256 over the index ``SHA256(digest1||digest2||...)``
|
||||
pub index_csum: [u8; 32],
|
||||
pub size: u64,
|
||||
|
@ -41,7 +40,7 @@ pub struct FixedIndexReader {
|
|||
index_length: usize,
|
||||
index: *mut u8,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
|
@ -82,7 +81,7 @@ impl FixedIndexReader {
|
|||
}
|
||||
|
||||
let size = u64::from_le(header.size);
|
||||
let ctime = u64::from_le(header.ctime);
|
||||
let ctime = i64::from_le(header.ctime);
|
||||
let chunk_size = u64::from_le(header.chunk_size);
|
||||
|
||||
let index_length = ((size + chunk_size - 1) / chunk_size) as usize;
|
||||
|
@ -148,13 +147,13 @@ impl FixedIndexReader {
|
|||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
println!(
|
||||
"CTime: {}",
|
||||
match Local.timestamp_opt(self.ctime as i64, 0) {
|
||||
LocalResult::Single(ctime) => ctime.format("%c").to_string(),
|
||||
_ => (self.ctime as i64).to_string(),
|
||||
}
|
||||
);
|
||||
|
||||
let mut ctime_str = self.ctime.to_string();
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%c",self.ctime) {
|
||||
ctime_str = s;
|
||||
}
|
||||
|
||||
println!("CTime: {}", ctime_str);
|
||||
println!("UUID: {:?}", self.uuid);
|
||||
}
|
||||
}
|
||||
|
@ -231,7 +230,7 @@ pub struct FixedIndexWriter {
|
|||
index_length: usize,
|
||||
index: *mut u8,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
}
|
||||
|
||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
||||
|
@ -274,7 +273,7 @@ impl FixedIndexWriter {
|
|||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
|
@ -282,7 +281,7 @@ impl FixedIndexWriter {
|
|||
let header = unsafe { &mut *(buffer.as_ptr() as *mut FixedIndexHeader) };
|
||||
|
||||
header.magic = super::FIXED_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.ctime = i64::to_le(ctime);
|
||||
header.size = u64::to_le(size as u64);
|
||||
header.chunk_size = u64::to_le(chunk_size as u64);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use anyhow::{bail, format_err, Context, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, DateTime};
|
||||
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
@ -61,10 +60,10 @@ impl KeyDerivationConfig {
|
|||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub struct KeyConfig {
|
||||
pub kdf: Option<KeyDerivationConfig>,
|
||||
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")]
|
||||
pub created: DateTime<Local>,
|
||||
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")]
|
||||
pub modified: DateTime<Local>,
|
||||
#[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
|
||||
pub created: i64,
|
||||
#[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
|
||||
pub modified: i64,
|
||||
#[serde(with = "proxmox::tools::serde::bytes_as_base64")]
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
@ -136,7 +135,7 @@ pub fn encrypt_key_with_passphrase(
|
|||
enc_data.extend_from_slice(&tag);
|
||||
enc_data.extend_from_slice(&encrypted_key);
|
||||
|
||||
let created = Local::now();
|
||||
let created = proxmox::tools::time::epoch_i64();
|
||||
|
||||
Ok(KeyConfig {
|
||||
kdf: Some(kdf),
|
||||
|
@ -149,7 +148,7 @@ pub fn encrypt_key_with_passphrase(
|
|||
pub fn load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
do_load_and_decrypt_key(path, passphrase)
|
||||
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||
}
|
||||
|
@ -157,14 +156,14 @@ pub fn load_and_decrypt_key(
|
|||
fn do_load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||
}
|
||||
|
||||
pub fn decrypt_key(
|
||||
mut keydata: &[u8],
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||
|
||||
let raw_data = key_config.data;
|
||||
|
|
|
@ -103,7 +103,7 @@ impl BackupManifest {
|
|||
Self {
|
||||
backup_type: snapshot.group().backup_type().into(),
|
||||
backup_id: snapshot.group().backup_id().into(),
|
||||
backup_time: snapshot.backup_time().timestamp(),
|
||||
backup_time: snapshot.backup_time(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
signature: None,
|
||||
|
|
|
@ -2,18 +2,16 @@ use anyhow::{Error};
|
|||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use chrono::{DateTime, Timelike, Datelike, Local};
|
||||
|
||||
use super::{BackupDir, BackupInfo};
|
||||
use super::BackupInfo;
|
||||
|
||||
enum PruneMark { Keep, KeepPartial, Remove }
|
||||
|
||||
fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||
mark: &mut HashMap<PathBuf, PruneMark>,
|
||||
list: &Vec<BackupInfo>,
|
||||
keep: usize,
|
||||
select_id: F,
|
||||
) {
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut include_hash = HashSet::new();
|
||||
|
||||
|
@ -21,8 +19,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
|||
for info in list {
|
||||
let backup_id = info.backup_dir.relative_path();
|
||||
if let Some(PruneMark::Keep) = mark.get(&backup_id) {
|
||||
let local_time = info.backup_dir.backup_time().with_timezone(&Local);
|
||||
let sel_id: String = select_id(local_time, &info);
|
||||
let sel_id: String = select_id(&info)?;
|
||||
already_included.insert(sel_id);
|
||||
}
|
||||
}
|
||||
|
@ -30,8 +27,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
|||
for info in list {
|
||||
let backup_id = info.backup_dir.relative_path();
|
||||
if let Some(_) = mark.get(&backup_id) { continue; }
|
||||
let local_time = info.backup_dir.backup_time().with_timezone(&Local);
|
||||
let sel_id: String = select_id(local_time, &info);
|
||||
let sel_id: String = select_id(&info)?;
|
||||
|
||||
if already_included.contains(&sel_id) { continue; }
|
||||
|
||||
|
@ -43,6 +39,8 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
|||
mark.insert(backup_id, PruneMark::Remove);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_incomplete_snapshots(
|
||||
|
@ -182,44 +180,43 @@ pub fn compute_prune_info(
|
|||
remove_incomplete_snapshots(&mut mark, &list);
|
||||
|
||||
if let Some(keep_last) = options.keep_last {
|
||||
mark_selections(&mut mark, &list, keep_last as usize, |_local_time, info| {
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_last as usize, |info| {
|
||||
Ok(info.backup_dir.backup_time_string().to_owned())
|
||||
})?;
|
||||
}
|
||||
|
||||
use proxmox::tools::time::strftime_local;
|
||||
|
||||
if let Some(keep_hourly) = options.keep_hourly {
|
||||
mark_selections(&mut mark, &list, keep_hourly as usize, |local_time, _info| {
|
||||
format!("{}/{}/{}/{}", local_time.year(), local_time.month(),
|
||||
local_time.day(), local_time.hour())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
|
||||
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_daily) = options.keep_daily {
|
||||
mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| {
|
||||
format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_daily as usize, |info| {
|
||||
strftime_local("%Y/%m/%d", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_weekly) = options.keep_weekly {
|
||||
mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| {
|
||||
let iso_week = local_time.iso_week();
|
||||
let week = iso_week.week();
|
||||
// Note: This year number might not match the calendar year number.
|
||||
let iso_week_year = iso_week.year();
|
||||
format!("{}/{}", iso_week_year, week)
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
|
||||
// Note: Use iso-week year/week here. This year number
|
||||
// might not match the calendar year number.
|
||||
strftime_local("%G/%V", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_monthly) = options.keep_monthly {
|
||||
mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| {
|
||||
format!("{}/{}", local_time.year(), local_time.month())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
|
||||
strftime_local("%Y/%m", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_yearly) = options.keep_yearly {
|
||||
mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| {
|
||||
format!("{}/{}", local_time.year(), local_time.year())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
|
||||
strftime_local("%Y", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
let prune_info: Vec<(BackupInfo, bool)> = list.into_iter()
|
||||
|
|
|
@ -8,7 +8,6 @@ use std::sync::{Arc, Mutex};
|
|||
use std::task::Context;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, LocalResult, DateTime, Utc, TimeZone};
|
||||
use futures::future::FutureExt;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use serde_json::{json, Value};
|
||||
|
@ -16,11 +15,20 @@ use tokio::sync::mpsc;
|
|||
use xdg::BaseDirectories;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
use proxmox::{
|
||||
tools::{
|
||||
time::{strftime_local, epoch_i64},
|
||||
fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
|
||||
},
|
||||
api::{
|
||||
api,
|
||||
ApiHandler,
|
||||
ApiMethod,
|
||||
RpcEnvironment,
|
||||
schema::*,
|
||||
cli::*,
|
||||
},
|
||||
};
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
|
@ -246,7 +254,7 @@ pub async fn api_datastore_latest_snapshot(
|
|||
client: &HttpClient,
|
||||
store: &str,
|
||||
group: BackupGroup,
|
||||
) -> Result<(String, String, DateTime<Utc>), Error> {
|
||||
) -> Result<(String, String, i64), Error> {
|
||||
|
||||
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
|
||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
||||
|
@ -257,11 +265,7 @@ pub async fn api_datastore_latest_snapshot(
|
|||
|
||||
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
|
||||
|
||||
let backup_time = match Utc.timestamp_opt(list[0].backup_time, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
_ => bail!("last snapshot of backup group {:?} has invalid timestmap {}.",
|
||||
group.group_path(), list[0].backup_time),
|
||||
};
|
||||
let backup_time = list[0].backup_time;
|
||||
|
||||
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
||||
}
|
||||
|
@ -506,7 +510,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
|||
let result = client.delete(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
@ -643,7 +647,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
|||
let mut result = client.get(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
@ -990,26 +994,18 @@ async fn create_backup(
|
|||
}
|
||||
}
|
||||
|
||||
let backup_time = match backup_time_opt {
|
||||
Some(timestamp) => {
|
||||
match Utc.timestamp_opt(timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
_ => bail!("Invalid backup-time parameter: {}", timestamp),
|
||||
}
|
||||
},
|
||||
_ => Utc::now(),
|
||||
};
|
||||
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
|
||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
|
||||
|
||||
println!("Client name: {}", proxmox::tools::nodename());
|
||||
|
||||
let start_time = Local::now();
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
|
||||
|
||||
let (crypt_config, rsa_encrypted_key) = match keydata {
|
||||
None => (None, None),
|
||||
|
@ -1047,7 +1043,7 @@ async fn create_backup(
|
|||
None
|
||||
};
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp())?;
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
let mut catalog = None;
|
||||
|
@ -1162,11 +1158,11 @@ async fn create_backup(
|
|||
|
||||
client.finish().await?;
|
||||
|
||||
let end_time = Local::now();
|
||||
let elapsed = end_time.signed_duration_since(start_time);
|
||||
println!("Duration: {}", elapsed);
|
||||
let end_time = std::time::Instant::now();
|
||||
let elapsed = end_time.duration_since(start_time);
|
||||
println!("Duration: {:.2}s", elapsed.as_secs_f64());
|
||||
|
||||
println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
println!("End Time: {}", strftime_local("%c", epoch_i64())?);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
@ -1504,7 +1500,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
|||
let args = json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
});
|
||||
|
||||
let body = hyper::Body::from(raw_data);
|
||||
|
@ -1800,7 +1796,7 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
|||
let query = tools::json_object_to_query(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
})).unwrap();
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
|
||||
|
|
|
@ -13,7 +13,7 @@ use proxmox_backup::api2::types::Userid;
|
|||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
|
||||
use proxmox_backup::tools::daemon;
|
||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
|
@ -144,11 +144,12 @@ fn start_task_scheduler() {
|
|||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
use std::time:: {Instant, Duration};
|
||||
use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
|
||||
|
||||
fn next_minute() -> Result<Instant, Error> {
|
||||
let epoch_now = epoch_now()?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
let now = SystemTime::now();
|
||||
let epoch_now = now.duration_since(UNIX_EPOCH)?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
Ok(Instant::now() + epoch_next - epoch_now)
|
||||
}
|
||||
|
||||
|
@ -308,13 +309,8 @@ async fn schedule_datastore_garbage_collection() {
|
|||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let store2 = store.clone();
|
||||
|
@ -338,7 +334,7 @@ async fn schedule_datastore_garbage_collection() {
|
|||
async fn schedule_datastore_prune() {
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
|
||||
PruneOptions, DataStore, BackupGroup, compute_prune_info};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
|
@ -420,13 +416,8 @@ async fn schedule_datastore_prune() {
|
|||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let store2 = store.clone();
|
||||
|
@ -457,8 +448,7 @@ async fn schedule_datastore_prune() {
|
|||
"{} {}/{}/{}",
|
||||
if keep { "keep" } else { "remove" },
|
||||
group.backup_type(), group.backup_id(),
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
||||
|
||||
info.backup_dir.backup_time_string()));
|
||||
if !keep {
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
}
|
||||
|
@ -529,13 +519,8 @@ async fn schedule_datastore_sync_jobs() {
|
|||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
|
|
|
@ -3,7 +3,6 @@ use std::sync::Arc;
|
|||
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::Utc;
|
||||
use serde::Serialize;
|
||||
|
||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||
|
@ -212,7 +211,7 @@ async fn test_upload_speed(
|
|||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let backup_time = Utc::now();
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::Local;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
|
@ -112,7 +111,7 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
|||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let created = Local::now();
|
||||
let created = proxmox::tools::time::epoch_i64();
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
|
@ -180,7 +179,7 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
|
|||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let modified = Local::now();
|
||||
let modified = proxmox::tools::time::epoch_i64();
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
|
|
|
@ -4,7 +4,6 @@ use std::fs::File;
|
|||
use std::sync::Arc;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::future::AbortHandle;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
|
@ -41,14 +40,14 @@ impl BackupReader {
|
|||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
debug: bool,
|
||||
) -> Result<Arc<BackupReader>, Error> {
|
||||
|
||||
let param = json!({
|
||||
"backup-type": backup_type,
|
||||
"backup-id": backup_id,
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"store": datastore,
|
||||
"debug": debug,
|
||||
});
|
||||
|
|
|
@ -4,7 +4,6 @@ use std::sync::atomic::{AtomicUsize, Ordering};
|
|||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::*;
|
||||
use futures::stream::Stream;
|
||||
use futures::future::AbortHandle;
|
||||
|
@ -51,7 +50,7 @@ impl BackupWriter {
|
|||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
debug: bool,
|
||||
benchmark: bool
|
||||
) -> Result<Arc<BackupWriter>, Error> {
|
||||
|
@ -59,7 +58,7 @@ impl BackupWriter {
|
|||
let param = json!({
|
||||
"backup-type": backup_type,
|
||||
"backup-id": backup_id,
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"store": datastore,
|
||||
"debug": debug,
|
||||
"benchmark": benchmark
|
||||
|
|
|
@ -2,7 +2,6 @@ use std::io::Write;
|
|||
use std::task::{Context, Poll};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use chrono::Utc;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use http::Uri;
|
||||
|
@ -199,7 +198,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
|||
|
||||
let mut data = file_get_json(&path, Some(json!({})))?;
|
||||
|
||||
let now = Utc::now().timestamp();
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
|
||||
|
||||
|
@ -230,7 +229,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
|||
// usually /run/user/<uid>/...
|
||||
let path = base.place_runtime_file("tickets").ok()?;
|
||||
let data = file_get_json(&path, None).ok()?;
|
||||
let now = Utc::now().timestamp();
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
||||
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||
|
|
|
@ -48,7 +48,6 @@ use proxmox::tools::fs::{
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::server::{upid_read_status, worker_is_active_local, TaskState, UPID};
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
@ -178,7 +177,7 @@ impl JobState {
|
|||
}
|
||||
} else {
|
||||
Ok(JobState::Created {
|
||||
time: epoch_now_u64()? as i64 - 30,
|
||||
time: proxmox::tools::time::epoch_i64() - 30,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -199,7 +198,7 @@ impl Job {
|
|||
jobtype: jobtype.to_string(),
|
||||
jobname: jobname.to_string(),
|
||||
state: JobState::Created {
|
||||
time: epoch_now_u64()? as i64,
|
||||
time: proxmox::tools::time::epoch_i64(),
|
||||
},
|
||||
_lock,
|
||||
})
|
||||
|
|
|
@ -115,12 +115,10 @@ fn mode_string(entry: &Entry) -> String {
|
|||
}
|
||||
|
||||
fn format_mtime(mtime: &StatxTimestamp) -> String {
|
||||
use chrono::offset::TimeZone;
|
||||
|
||||
match chrono::Local.timestamp_opt(mtime.secs, mtime.nanos) {
|
||||
chrono::LocalResult::Single(mtime) => mtime.format("%Y-%m-%d %H:%M:%S").to_string(),
|
||||
_ => format!("{}.{}", mtime.secs, mtime.nanos),
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
|
||||
return s;
|
||||
}
|
||||
format!("{}.{}", mtime.secs, mtime.nanos)
|
||||
}
|
||||
|
||||
pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
|
|
|
@ -8,7 +8,6 @@ use lazy_static::lazy_static;
|
|||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
use crate::api2::types::{RRDMode, RRDTimeFrameResolution};
|
||||
use crate::tools::epoch_now_f64;
|
||||
|
||||
use super::*;
|
||||
|
||||
|
@ -42,7 +41,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<
|
|||
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||
|
||||
let mut map = RRD_CACHE.write().unwrap();
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
|
||||
if let Some(rrd) = map.get_mut(rel_path) {
|
||||
rrd.update(now, value);
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::Local;
|
||||
|
||||
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox::const_regex;
|
||||
|
@ -89,7 +88,7 @@ impl UPID {
|
|||
Ok(UPID {
|
||||
pid,
|
||||
pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime,
|
||||
starttime: Local::now().timestamp(),
|
||||
starttime: proxmox::tools::time::epoch_i64(),
|
||||
task_id,
|
||||
worker_type: worker_type.to_owned(),
|
||||
worker_id,
|
||||
|
|
|
@ -5,7 +5,6 @@ use std::panic::UnwindSafe;
|
|||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use chrono::Local;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use lazy_static::lazy_static;
|
||||
|
@ -231,9 +230,7 @@ pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
|||
|
||||
let mut iter = last_line.splitn(2, ": ");
|
||||
if let Some(time_str) = iter.next() {
|
||||
if let Ok(endtime) = chrono::DateTime::parse_from_rfc3339(time_str) {
|
||||
let endtime = endtime.timestamp();
|
||||
|
||||
if let Ok(endtime) = proxmox::tools::time::parse_rfc3339(time_str) {
|
||||
if let Some(rest) = iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
|
||||
if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
|
||||
status = state;
|
||||
|
@ -364,8 +361,9 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
|||
},
|
||||
None => {
|
||||
println!("Detected stopped UPID {}", upid_str);
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let status = upid_read_status(&upid)
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: Local::now().timestamp() });
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some(status)
|
||||
});
|
||||
|
@ -589,7 +587,7 @@ impl WorkerTask {
|
|||
pub fn create_state(&self, result: &Result<(), Error>) -> TaskState {
|
||||
let warn_count = self.data.lock().unwrap().warn_count;
|
||||
|
||||
let endtime = Local::now().timestamp();
|
||||
let endtime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if let Err(err) = result {
|
||||
TaskState::Error { message: err.to_string(), endtime }
|
||||
|
|
14
src/tools.rs
14
src/tools.rs
|
@ -8,8 +8,6 @@ use std::fs::File;
|
|||
use std::io::{self, BufRead, ErrorKind, Read};
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, SystemTimeError, UNIX_EPOCH};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
@ -547,18 +545,6 @@ pub fn file_get_non_comment_lines<P: AsRef<Path>>(
|
|||
}))
|
||||
}
|
||||
|
||||
pub fn epoch_now() -> Result<Duration, SystemTimeError> {
|
||||
SystemTime::now().duration_since(UNIX_EPOCH)
|
||||
}
|
||||
|
||||
pub fn epoch_now_f64() -> Result<f64, SystemTimeError> {
|
||||
Ok(epoch_now()?.as_secs_f64())
|
||||
}
|
||||
|
||||
pub fn epoch_now_u64() -> Result<u64, SystemTimeError> {
|
||||
Ok(epoch_now()?.as_secs())
|
||||
}
|
||||
|
||||
pub fn setup_safe_path_env() {
|
||||
std::env::set_var("PATH", "/sbin:/bin:/usr/sbin:/usr/bin");
|
||||
// Make %ENV safer - as suggested by https://perldoc.perl.org/perlsec.html
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use anyhow::{Error};
|
||||
use chrono::Local;
|
||||
use std::io::Write;
|
||||
|
||||
/// Log messages with timestamps into files
|
||||
|
@ -56,7 +55,10 @@ impl FileLogger {
|
|||
stdout.write_all(b"\n").unwrap();
|
||||
}
|
||||
|
||||
let line = format!("{}: {}\n", Local::now().to_rfc3339(), msg);
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let rfc3339 = proxmox::tools::time::epoch_to_rfc3339(now).unwrap();
|
||||
|
||||
let line = format!("{}: {}\n", rfc3339, msg);
|
||||
self.file.write_all(line.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::{Local, TimeZone, LocalResult};
|
||||
|
||||
pub fn strip_server_file_expenstion(name: &str) -> String {
|
||||
|
||||
|
@ -25,9 +24,10 @@ pub fn render_epoch(value: &Value, _record: &Value) -> Result<String, Error> {
|
|||
if value.is_null() { return Ok(String::new()); }
|
||||
let text = match value.as_i64() {
|
||||
Some(epoch) => {
|
||||
match Local.timestamp_opt(epoch, 0) {
|
||||
LocalResult::Single(epoch) => epoch.format("%c").to_string(),
|
||||
_ => epoch.to_string(),
|
||||
if let Ok(epoch_string) = proxmox::tools::time::strftime_local("%c", epoch as i64) {
|
||||
epoch_string
|
||||
} else {
|
||||
epoch.to_string()
|
||||
}
|
||||
},
|
||||
None => {
|
||||
|
|
|
@ -2,7 +2,6 @@ pub mod types;
|
|||
pub mod config;
|
||||
|
||||
mod parse_time;
|
||||
pub mod tm_editor;
|
||||
pub mod time;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
|
|
@ -3,8 +3,9 @@ use std::convert::TryInto;
|
|||
use anyhow::Error;
|
||||
use bitflags::bitflags;
|
||||
|
||||
use proxmox::tools::time::TmEditor;
|
||||
|
||||
pub use super::parse_time::*;
|
||||
use super::tm_editor::*;
|
||||
|
||||
bitflags!{
|
||||
#[derive(Default)]
|
||||
|
@ -161,7 +162,7 @@ pub fn compute_next_event(
|
|||
|
||||
let all_days = event.days.is_empty() || event.days.is_all();
|
||||
|
||||
let mut t = TmEditor::new(last, utc)?;
|
||||
let mut t = TmEditor::with_epoch(last, utc)?;
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
|
|
|
@ -1,119 +0,0 @@
|
|||
use anyhow::Error;
|
||||
|
||||
use proxmox::tools::time::*;
|
||||
|
||||
pub struct TmEditor {
|
||||
utc: bool,
|
||||
t: libc::tm,
|
||||
}
|
||||
|
||||
impl TmEditor {
|
||||
|
||||
pub fn new(epoch: i64, utc: bool) -> Result<Self, Error> {
|
||||
let t = if utc { gmtime(epoch)? } else { localtime(epoch)? };
|
||||
Ok(Self { utc, t })
|
||||
}
|
||||
|
||||
pub fn into_epoch(mut self) -> Result<i64, Error> {
|
||||
let epoch = if self.utc { timegm(&mut self.t)? } else { timelocal(&mut self.t)? };
|
||||
Ok(epoch)
|
||||
}
|
||||
|
||||
/// increases the year by 'years' and resets all smaller fields to their minimum
|
||||
pub fn add_years(&mut self, years: libc::c_int) -> Result<(), Error> {
|
||||
if years == 0 { return Ok(()); }
|
||||
self.t.tm_mon = 0;
|
||||
self.t.tm_mday = 1;
|
||||
self.t.tm_hour = 0;
|
||||
self.t.tm_min = 0;
|
||||
self.t.tm_sec = 0;
|
||||
self.t.tm_year += years;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
/// increases the month by 'months' and resets all smaller fields to their minimum
|
||||
pub fn add_months(&mut self, months: libc::c_int) -> Result<(), Error> {
|
||||
if months == 0 { return Ok(()); }
|
||||
self.t.tm_mday = 1;
|
||||
self.t.tm_hour = 0;
|
||||
self.t.tm_min = 0;
|
||||
self.t.tm_sec = 0;
|
||||
self.t.tm_mon += months;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
/// increases the day by 'days' and resets all smaller fields to their minimum
|
||||
pub fn add_days(&mut self, days: libc::c_int) -> Result<(), Error> {
|
||||
if days == 0 { return Ok(()); }
|
||||
self.t.tm_hour = 0;
|
||||
self.t.tm_min = 0;
|
||||
self.t.tm_sec = 0;
|
||||
self.t.tm_mday += days;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
pub fn year(&self) -> libc::c_int { self.t.tm_year + 1900 } // see man mktime
|
||||
pub fn month(&self) -> libc::c_int { self.t.tm_mon + 1 }
|
||||
pub fn day(&self) -> libc::c_int { self.t.tm_mday }
|
||||
pub fn hour(&self) -> libc::c_int { self.t.tm_hour }
|
||||
pub fn min(&self) -> libc::c_int { self.t.tm_min }
|
||||
pub fn sec(&self) -> libc::c_int { self.t.tm_sec }
|
||||
|
||||
// Note: tm_wday (0-6, Sunday = 0) => convert to Sunday = 6
|
||||
pub fn day_num(&self) -> libc::c_int {
|
||||
(self.t.tm_wday + 6) % 7
|
||||
}
|
||||
|
||||
pub fn set_time(&mut self, hour: libc::c_int, min: libc::c_int, sec: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_hour = hour;
|
||||
self.t.tm_min = min;
|
||||
self.t.tm_sec = sec;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
pub fn set_min_sec(&mut self, min: libc::c_int, sec: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_min = min;
|
||||
self.t.tm_sec = sec;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
fn normalize_time(&mut self) -> Result<(), Error> {
|
||||
// libc normalizes it for us
|
||||
if self.utc {
|
||||
timegm(&mut self.t)?;
|
||||
} else {
|
||||
timelocal(&mut self.t)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_sec(&mut self, v: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_sec = v;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
pub fn set_min(&mut self, v: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_min = v;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
pub fn set_hour(&mut self, v: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_hour = v;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
pub fn set_mday(&mut self, v: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_mday = v;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
pub fn set_mon(&mut self, v: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_mon = v - 1;
|
||||
self.normalize_time()
|
||||
}
|
||||
|
||||
pub fn set_year(&mut self, v: libc::c_int) -> Result<(), Error> {
|
||||
self.t.tm_year = v - 1900;
|
||||
self.normalize_time()
|
||||
}
|
||||
}
|
|
@ -11,7 +11,6 @@ use openssl::sign::{Signer, Verifier};
|
|||
use percent_encoding::{percent_decode_str, percent_encode, AsciiSet};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
pub const TICKET_LIFETIME: i64 = 3600 * 2; // 2 hours
|
||||
|
||||
|
@ -69,7 +68,7 @@ where
|
|||
Ok(Self {
|
||||
prefix: Cow::Borrowed(prefix),
|
||||
data: data.to_string(),
|
||||
time: epoch_now_u64()? as i64,
|
||||
time: proxmox::tools::time::epoch_i64(),
|
||||
signature: None,
|
||||
_type_marker: PhantomData,
|
||||
})
|
||||
|
@ -174,7 +173,7 @@ where
|
|||
None => bail!("invalid ticket without signature"),
|
||||
};
|
||||
|
||||
let age = epoch_now_u64()? as i64 - self.time;
|
||||
let age = proxmox::tools::time::epoch_i64() - self.time;
|
||||
if age < time_frame.start {
|
||||
bail!("invalid ticket - timestamp newer than expected");
|
||||
}
|
||||
|
@ -272,7 +271,6 @@ mod test {
|
|||
|
||||
use super::Ticket;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn simple_test<F>(key: &PKey<Private>, aad: Option<&str>, modify: F)
|
||||
where
|
||||
|
@ -314,7 +312,7 @@ mod test {
|
|||
false
|
||||
});
|
||||
simple_test(&key, None, |t| {
|
||||
t.change_time(epoch_now_u64().unwrap() as i64 + 0x1000_0000);
|
||||
t.change_time(proxmox::tools::time::epoch_i64() + 0x1000_0000);
|
||||
false
|
||||
});
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue