Compare commits
12 Commits
Author | SHA1 | Date | |
---|---|---|---|
e9764238df | |||
26f499b17b | |||
cc7995ac40 | |||
43abba4b4f | |||
58f950c546 | |||
c426e65893 | |||
caea8d611f | |||
7d0754a6d2 | |||
5afa0755ea | |||
40b63186a6 | |||
8f6088c130 | |||
2162e2c15d |
@ -441,13 +441,13 @@ pub fn verify(
|
|||||||
|
|
||||||
match (backup_type, backup_id, backup_time) {
|
match (backup_type, backup_id, backup_time) {
|
||||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||||
|
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||||
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
worker_id = format!("{}_{}", store, dir);
|
|
||||||
backup_dir = Some(dir);
|
backup_dir = Some(dir);
|
||||||
}
|
}
|
||||||
(Some(backup_type), Some(backup_id), None) => {
|
(Some(backup_type), Some(backup_id), None) => {
|
||||||
|
worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||||
let group = BackupGroup::new(backup_type, backup_id);
|
let group = BackupGroup::new(backup_type, backup_id);
|
||||||
worker_id = format!("{}_{}", store, group);
|
|
||||||
backup_group = Some(group);
|
backup_group = Some(group);
|
||||||
}
|
}
|
||||||
(None, None, None) => {
|
(None, None, None) => {
|
||||||
|
@ -45,7 +45,7 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
||||||
S: AsyncReadChunk + Unpin + 'static,
|
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||||
I: IndexFile + Unpin
|
I: IndexFile + Unpin
|
||||||
{
|
{
|
||||||
fn poll_read(
|
fn poll_read(
|
||||||
@ -74,7 +74,7 @@ I: IndexFile + Unpin
|
|||||||
|
|
||||||
this.current_chunk_digest = digest;
|
this.current_chunk_digest = digest;
|
||||||
|
|
||||||
let mut store = match this.store.take() {
|
let store = match this.store.take() {
|
||||||
Some(store) => store,
|
Some(store) => store,
|
||||||
None => {
|
None => {
|
||||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||||
|
@ -11,10 +11,10 @@ use super::datastore::DataStore;
|
|||||||
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
||||||
pub trait ReadChunk {
|
pub trait ReadChunk {
|
||||||
/// Returns the encoded chunk data
|
/// Returns the encoded chunk data
|
||||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
||||||
|
|
||||||
/// Returns the decoded chunk data
|
/// Returns the decoded chunk data
|
||||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@ -33,7 +33,7 @@ impl LocalChunkReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ReadChunk for LocalChunkReader {
|
impl ReadChunk for LocalChunkReader {
|
||||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
let (path, _) = self.store.chunk_path(digest);
|
let (path, _) = self.store.chunk_path(digest);
|
||||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||||
let chunk = DataBlob::from_raw(raw_data)?;
|
let chunk = DataBlob::from_raw(raw_data)?;
|
||||||
@ -42,7 +42,7 @@ impl ReadChunk for LocalChunkReader {
|
|||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
@ -56,20 +56,20 @@ impl ReadChunk for LocalChunkReader {
|
|||||||
pub trait AsyncReadChunk: Send {
|
pub trait AsyncReadChunk: Send {
|
||||||
/// Returns the encoded chunk data
|
/// Returns the encoded chunk data
|
||||||
fn read_raw_chunk<'a>(
|
fn read_raw_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
||||||
|
|
||||||
/// Returns the decoded chunk data
|
/// Returns the decoded chunk data
|
||||||
fn read_chunk<'a>(
|
fn read_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsyncReadChunk for LocalChunkReader {
|
impl AsyncReadChunk for LocalChunkReader {
|
||||||
fn read_raw_chunk<'a>(
|
fn read_raw_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||||
Box::pin(async move{
|
Box::pin(async move{
|
||||||
@ -84,7 +84,7 @@ impl AsyncReadChunk for LocalChunkReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk<'a>(
|
fn read_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use std::ffi::OsStr;
|
|
||||||
use std::io::{self, Write, Seek, SeekFrom};
|
use std::io::{self, Write, Seek, SeekFrom};
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::os::unix::io::RawFd;
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@ -11,11 +9,8 @@ use std::task::Context;
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||||
use futures::future::FutureExt;
|
use futures::future::FutureExt;
|
||||||
use futures::select;
|
|
||||||
use futures::stream::{StreamExt, TryStreamExt};
|
use futures::stream::{StreamExt, TryStreamExt};
|
||||||
use nix::unistd::{fork, ForkResult, pipe};
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tokio::signal::unix::{signal, SignalKind};
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use xdg::BaseDirectories;
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
@ -60,16 +55,19 @@ use proxmox_backup::backup::{
|
|||||||
Shell,
|
Shell,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mod proxmox_backup_client;
|
||||||
|
use proxmox_backup_client::*;
|
||||||
|
|
||||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||||
|
|
||||||
|
|
||||||
const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
||||||
.format(&BACKUP_REPO_URL)
|
.format(&BACKUP_REPO_URL)
|
||||||
.max_length(256)
|
.max_length(256)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
pub const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
||||||
"Path to encryption key. All data will be encrypted using this key.")
|
"Path to encryption key. All data will be encrypted using this key.")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
@ -84,7 +82,7 @@ fn get_default_repository() -> Option<String> {
|
|||||||
std::env::var("PBS_REPOSITORY").ok()
|
std::env::var("PBS_REPOSITORY").ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extract_repository_from_value(
|
pub fn extract_repository_from_value(
|
||||||
param: &Value,
|
param: &Value,
|
||||||
) -> Result<BackupRepository, Error> {
|
) -> Result<BackupRepository, Error> {
|
||||||
|
|
||||||
@ -157,7 +155,7 @@ fn record_repository(repo: &BackupRepository) {
|
|||||||
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
|
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
|
||||||
let mut result = vec![];
|
let mut result = vec![];
|
||||||
|
|
||||||
@ -241,7 +239,7 @@ async fn api_datastore_list_snapshots(
|
|||||||
Ok(result["data"].take())
|
Ok(result["data"].take())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn api_datastore_latest_snapshot(
|
pub async fn api_datastore_latest_snapshot(
|
||||||
client: &HttpClient,
|
client: &HttpClient,
|
||||||
store: &str,
|
store: &str,
|
||||||
group: BackupGroup,
|
group: BackupGroup,
|
||||||
@ -1121,7 +1119,7 @@ async fn dump_image<W: Write>(
|
|||||||
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
|
||||||
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
|
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
|
||||||
// and thus slows down reading. Instead, directly use RemoteChunkReader
|
// and thus slows down reading. Instead, directly use RemoteChunkReader
|
||||||
@ -1615,7 +1613,7 @@ async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1716,7 +1714,7 @@ fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<Stri
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
complete_server_file_name(arg, param)
|
complete_server_file_name(arg, param)
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|v| {
|
.filter_map(|v| {
|
||||||
@ -1985,36 +1983,6 @@ fn key_mgmt_cli() -> CliCommandMap {
|
|||||||
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mount(
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
||||||
if verbose {
|
|
||||||
// This will stay in foreground with debug output enabled as None is
|
|
||||||
// passed for the RawFd.
|
|
||||||
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process should be deamonized.
|
|
||||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
|
||||||
let pipe = pipe()?;
|
|
||||||
match fork() {
|
|
||||||
Ok(ForkResult::Parent { .. }) => {
|
|
||||||
nix::unistd::close(pipe.1).unwrap();
|
|
||||||
// Blocks the parent process until we are ready to go in the child
|
|
||||||
let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
Ok(ForkResult::Child) => {
|
|
||||||
nix::unistd::close(pipe.0).unwrap();
|
|
||||||
nix::unistd::setsid().unwrap();
|
|
||||||
proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
|
|
||||||
}
|
|
||||||
Err(_) => bail!("failed to daemonize process"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use proxmox_backup::client::RemoteChunkReader;
|
use proxmox_backup::client::RemoteChunkReader;
|
||||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||||
@ -2023,7 +1991,7 @@ use proxmox_backup::client::RemoteChunkReader;
|
|||||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||||
/// duplicate simultaneous reads over http.
|
/// duplicate simultaneous reads over http.
|
||||||
struct BufferedDynamicReadAt {
|
pub struct BufferedDynamicReadAt {
|
||||||
inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
|
inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2058,102 +2026,6 @@ impl ReadAt for BufferedDynamicReadAt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
|
||||||
let target = tools::required_string_param(¶m, "target")?;
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
record_repository(&repo);
|
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
|
||||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
|
||||||
let group: BackupGroup = path.parse()?;
|
|
||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
|
||||||
} else {
|
|
||||||
let snapshot: BackupDir = path.parse()?;
|
|
||||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
|
||||||
};
|
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
|
||||||
let crypt_config = match keyfile {
|
|
||||||
None => None,
|
|
||||||
Some(path) => {
|
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
|
||||||
format!("{}.didx", archive_name)
|
|
||||||
} else {
|
|
||||||
bail!("Can only mount pxar archives.");
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = BackupReader::start(
|
|
||||||
client,
|
|
||||||
crypt_config.clone(),
|
|
||||||
repo.store(),
|
|
||||||
&backup_type,
|
|
||||||
&backup_id,
|
|
||||||
backup_time,
|
|
||||||
true,
|
|
||||||
).await?;
|
|
||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
|
||||||
|
|
||||||
if server_archive_name.ends_with(".didx") {
|
|
||||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
|
||||||
let archive_size = reader.archive_size();
|
|
||||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
|
||||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
|
||||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
|
||||||
let options = OsStr::new("ro,default_permissions");
|
|
||||||
|
|
||||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
|
||||||
decoder,
|
|
||||||
&options,
|
|
||||||
false,
|
|
||||||
Path::new(target),
|
|
||||||
)
|
|
||||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
|
||||||
|
|
||||||
if let Some(pipe) = pipe {
|
|
||||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
|
||||||
// Finish creation of daemon by redirecting filedescriptors.
|
|
||||||
let nullfd = nix::fcntl::open(
|
|
||||||
"/dev/null",
|
|
||||||
nix::fcntl::OFlag::O_RDWR,
|
|
||||||
nix::sys::stat::Mode::empty(),
|
|
||||||
).unwrap();
|
|
||||||
nix::unistd::dup2(nullfd, 0).unwrap();
|
|
||||||
nix::unistd::dup2(nullfd, 1).unwrap();
|
|
||||||
nix::unistd::dup2(nullfd, 2).unwrap();
|
|
||||||
if nullfd > 2 {
|
|
||||||
nix::unistd::close(nullfd).unwrap();
|
|
||||||
}
|
|
||||||
// Signal the parent process that we are done with the setup and it can
|
|
||||||
// terminate.
|
|
||||||
nix::unistd::write(pipe, &[0u8])?;
|
|
||||||
nix::unistd::close(pipe).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
|
||||||
select! {
|
|
||||||
res = session.fuse() => res?,
|
|
||||||
_ = interrupt.recv().fuse() => {
|
|
||||||
// exit on interrupted
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bail!("unknown archive file extension (expected .pxar)");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -2288,138 +2160,6 @@ fn catalog_mgmt_cli() -> CliCommandMap {
|
|||||||
.insert("shell", catalog_shell_cmd_def)
|
.insert("shell", catalog_shell_cmd_def)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
repository: {
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
limit: {
|
|
||||||
description: "The maximal number of tasks to list.",
|
|
||||||
type: Integer,
|
|
||||||
optional: true,
|
|
||||||
minimum: 1,
|
|
||||||
maximum: 1000,
|
|
||||||
default: 50,
|
|
||||||
},
|
|
||||||
"output-format": {
|
|
||||||
schema: OUTPUT_FORMAT,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
all: {
|
|
||||||
type: Boolean,
|
|
||||||
description: "Also list stopped tasks.",
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
/// List running server tasks for this repo user
|
|
||||||
async fn task_list(param: Value) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
|
||||||
let running = !param["all"].as_bool().unwrap_or(false);
|
|
||||||
|
|
||||||
let args = json!({
|
|
||||||
"running": running,
|
|
||||||
"start": 0,
|
|
||||||
"limit": limit,
|
|
||||||
"userfilter": repo.user(),
|
|
||||||
"store": repo.store(),
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
|
||||||
let mut data = result["data"].take();
|
|
||||||
|
|
||||||
let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
|
||||||
|
|
||||||
let options = default_table_format_options()
|
|
||||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
|
||||||
.column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
|
|
||||||
.column(ColumnConfig::new("upid"))
|
|
||||||
.column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
|
|
||||||
|
|
||||||
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
repository: {
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
upid: {
|
|
||||||
schema: UPID_SCHEMA,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
/// Display the task log.
|
|
||||||
async fn task_log(param: Value) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let upid = tools::required_string_param(¶m, "upid")?;
|
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
display_task_log(client, upid, true).await?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
repository: {
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
upid: {
|
|
||||||
schema: UPID_SCHEMA,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
/// Try to stop a specific task.
|
|
||||||
async fn task_stop(param: Value) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let upid_str = tools::required_string_param(¶m, "upid")?;
|
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
|
||||||
let _ = client.delete(&path, None).await?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn task_mgmt_cli() -> CliCommandMap {
|
|
||||||
|
|
||||||
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
|
||||||
.completion_cb("repository", complete_repository);
|
|
||||||
|
|
||||||
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
|
||||||
.arg_param(&["upid"]);
|
|
||||||
|
|
||||||
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
|
||||||
.arg_param(&["upid"]);
|
|
||||||
|
|
||||||
CliCommandMap::new()
|
|
||||||
.insert("log", task_log_cmd_def)
|
|
||||||
.insert("list", task_list_cmd_def)
|
|
||||||
.insert("stop", task_stop_cmd_def)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
||||||
@ -2429,6 +2169,10 @@ fn main() {
|
|||||||
.completion_cb("keyfile", tools::complete_file_name)
|
.completion_cb("keyfile", tools::complete_file_name)
|
||||||
.completion_cb("chunk-size", complete_chunk_size);
|
.completion_cb("chunk-size", complete_chunk_size);
|
||||||
|
|
||||||
|
let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("keyfile", tools::complete_file_name);
|
||||||
|
|
||||||
let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
|
let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
|
||||||
.arg_param(&["snapshot", "logfile"])
|
.arg_param(&["snapshot", "logfile"])
|
||||||
.completion_cb("snapshot", complete_backup_snapshot)
|
.completion_cb("snapshot", complete_backup_snapshot)
|
||||||
@ -2478,30 +2222,6 @@ fn main() {
|
|||||||
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
||||||
.completion_cb("repository", complete_repository);
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&mount),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
"Mount pxar archive.",
|
|
||||||
&sorted!([
|
|
||||||
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
|
||||||
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
|
||||||
("target", false, &StringSchema::new("Target directory path.").schema()),
|
|
||||||
("repository", true, &REPO_URL_SCHEMA),
|
|
||||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
|
||||||
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
|
|
||||||
.arg_param(&["snapshot", "archive-name", "target"])
|
|
||||||
.completion_cb("repository", complete_repository)
|
|
||||||
.completion_cb("snapshot", complete_group_or_snapshot)
|
|
||||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
|
||||||
.completion_cb("target", tools::complete_file_name);
|
|
||||||
|
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("backup", backup_cmd_def)
|
.insert("backup", backup_cmd_def)
|
||||||
.insert("upload-log", upload_log_cmd_def)
|
.insert("upload-log", upload_log_cmd_def)
|
||||||
@ -2516,9 +2236,10 @@ fn main() {
|
|||||||
.insert("files", files_cmd_def)
|
.insert("files", files_cmd_def)
|
||||||
.insert("status", status_cmd_def)
|
.insert("status", status_cmd_def)
|
||||||
.insert("key", key_mgmt_cli())
|
.insert("key", key_mgmt_cli())
|
||||||
.insert("mount", mount_cmd_def)
|
.insert("mount", mount_cmd_def())
|
||||||
.insert("catalog", catalog_mgmt_cli())
|
.insert("catalog", catalog_mgmt_cli())
|
||||||
.insert("task", task_mgmt_cli());
|
.insert("task", task_mgmt_cli())
|
||||||
|
.insert("benchmark", benchmark_cmd_def);
|
||||||
|
|
||||||
let rpcenv = CliEnvironment::new();
|
let rpcenv = CliEnvironment::new();
|
||||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||||
|
82
src/bin/proxmox_backup_client/benchmark.rs
Normal file
82
src/bin/proxmox_backup_client/benchmark.rs
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use chrono::{TimeZone, Utc};
|
||||||
|
|
||||||
|
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
load_and_decrypt_key,
|
||||||
|
CryptConfig,
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
get_encryption_key_password,
|
||||||
|
record_repository,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
keyfile: {
|
||||||
|
schema: KEYFILE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Run benchmark tests
|
||||||
|
pub async fn benchmark(
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
Some(Arc::new(crypt_config))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
let client = BackupWriter::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
"host",
|
||||||
|
"benshmark",
|
||||||
|
backup_time,
|
||||||
|
false,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
println!("Start upload speed test");
|
||||||
|
let speed = client.upload_speedtest().await?;
|
||||||
|
|
||||||
|
println!("Upload speed: {} MiB/s", speed);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
6
src/bin/proxmox_backup_client/mod.rs
Normal file
6
src/bin/proxmox_backup_client/mod.rs
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
mod benchmark;
|
||||||
|
pub use benchmark::*;
|
||||||
|
mod mount;
|
||||||
|
pub use mount::*;
|
||||||
|
mod task;
|
||||||
|
pub use task::*;
|
196
src/bin/proxmox_backup_client/mount.rs
Normal file
196
src/bin/proxmox_backup_client/mount.rs
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::os::unix::io::RawFd;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
use nix::unistd::{fork, ForkResult, pipe};
|
||||||
|
use futures::select;
|
||||||
|
use futures::future::FutureExt;
|
||||||
|
|
||||||
|
use proxmox::{sortable, identity};
|
||||||
|
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||||
|
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
load_and_decrypt_key,
|
||||||
|
CryptConfig,
|
||||||
|
IndexFile,
|
||||||
|
BackupDir,
|
||||||
|
BackupGroup,
|
||||||
|
BufferedDynamicReader,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
get_encryption_key_password,
|
||||||
|
complete_pxar_archive_name,
|
||||||
|
complete_group_or_snapshot,
|
||||||
|
complete_repository,
|
||||||
|
record_repository,
|
||||||
|
connect,
|
||||||
|
api_datastore_latest_snapshot,
|
||||||
|
BufferedDynamicReadAt,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::Sync(&mount),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Mount pxar archive.",
|
||||||
|
&sorted!([
|
||||||
|
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||||
|
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||||
|
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||||
|
("repository", true, &REPO_URL_SCHEMA),
|
||||||
|
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||||
|
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
pub fn mount_cmd_def() -> CliCommand {
|
||||||
|
|
||||||
|
CliCommand::new(&API_METHOD_MOUNT)
|
||||||
|
.arg_param(&["snapshot", "archive-name", "target"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||||
|
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||||
|
.completion_cb("target", tools::complete_file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mount(
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||||
|
if verbose {
|
||||||
|
// This will stay in foreground with debug output enabled as None is
|
||||||
|
// passed for the RawFd.
|
||||||
|
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process should be deamonized.
|
||||||
|
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||||
|
let pipe = pipe()?;
|
||||||
|
match fork() {
|
||||||
|
Ok(ForkResult::Parent { .. }) => {
|
||||||
|
nix::unistd::close(pipe.1).unwrap();
|
||||||
|
// Blocks the parent process until we are ready to go in the child
|
||||||
|
let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
Ok(ForkResult::Child) => {
|
||||||
|
nix::unistd::close(pipe.0).unwrap();
|
||||||
|
nix::unistd::setsid().unwrap();
|
||||||
|
proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
|
||||||
|
}
|
||||||
|
Err(_) => bail!("failed to daemonize process"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
let target = tools::required_string_param(¶m, "target")?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
|
let group: BackupGroup = path.parse()?;
|
||||||
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
|
} else {
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
|
};
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
|
format!("{}.didx", archive_name)
|
||||||
|
} else {
|
||||||
|
bail!("Can only mount pxar archives.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&backup_type,
|
||||||
|
&backup_id,
|
||||||
|
backup_time,
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let manifest = client.download_manifest().await?;
|
||||||
|
|
||||||
|
if server_archive_name.ends_with(".didx") {
|
||||||
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
|
let options = OsStr::new("ro,default_permissions");
|
||||||
|
|
||||||
|
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||||
|
decoder,
|
||||||
|
&options,
|
||||||
|
false,
|
||||||
|
Path::new(target),
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||||
|
|
||||||
|
if let Some(pipe) = pipe {
|
||||||
|
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||||
|
// Finish creation of daemon by redirecting filedescriptors.
|
||||||
|
let nullfd = nix::fcntl::open(
|
||||||
|
"/dev/null",
|
||||||
|
nix::fcntl::OFlag::O_RDWR,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 0).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 1).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 2).unwrap();
|
||||||
|
if nullfd > 2 {
|
||||||
|
nix::unistd::close(nullfd).unwrap();
|
||||||
|
}
|
||||||
|
// Signal the parent process that we are done with the setup and it can
|
||||||
|
// terminate.
|
||||||
|
nix::unistd::write(pipe, &[0u8])?;
|
||||||
|
nix::unistd::close(pipe).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||||
|
select! {
|
||||||
|
res = session.fuse() => res?,
|
||||||
|
_ = interrupt.recv().fuse() => {
|
||||||
|
// exit on interrupted
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bail!("unknown archive file extension (expected .pxar)");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
148
src/bin/proxmox_backup_client/task.rs
Normal file
148
src/bin/proxmox_backup_client/task.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
use proxmox_backup::api2::types::UPID_SCHEMA;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
complete_repository,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
limit: {
|
||||||
|
description: "The maximal number of tasks to list.",
|
||||||
|
type: Integer,
|
||||||
|
optional: true,
|
||||||
|
minimum: 1,
|
||||||
|
maximum: 1000,
|
||||||
|
default: 50,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
all: {
|
||||||
|
type: Boolean,
|
||||||
|
description: "Also list stopped tasks.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// List running server tasks for this repo user
|
||||||
|
async fn task_list(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||||
|
let running = !param["all"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
|
let args = json!({
|
||||||
|
"running": running,
|
||||||
|
"start": 0,
|
||||||
|
"limit": limit,
|
||||||
|
"userfilter": repo.user(),
|
||||||
|
"store": repo.store(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||||
|
let mut data = result["data"].take();
|
||||||
|
|
||||||
|
let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
.column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
.column(ColumnConfig::new("upid"))
|
||||||
|
.column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Display the task log.
|
||||||
|
async fn task_log(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let upid = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
display_task_log(client, upid, true).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Try to stop a specific task.
|
||||||
|
async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
|
let mut client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||||
|
let _ = client.delete(&path, None).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn task_mgmt_cli() -> CliCommandMap {
|
||||||
|
|
||||||
|
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
||||||
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
|
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
||||||
|
.arg_param(&["upid"]);
|
||||||
|
|
||||||
|
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
||||||
|
.arg_param(&["upid"]);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("log", task_log_cmd_def)
|
||||||
|
.insert("list", task_list_cmd_def)
|
||||||
|
.insert("stop", task_stop_cmd_def)
|
||||||
|
}
|
@ -1,7 +1,7 @@
|
|||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ pub struct RemoteChunkReader {
|
|||||||
client: Arc<BackupReader>,
|
client: Arc<BackupReader>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
cache_hint: HashMap<[u8; 32], usize>,
|
cache_hint: HashMap<[u8; 32], usize>,
|
||||||
cache: HashMap<[u8; 32], Vec<u8>>,
|
cache: Mutex<HashMap<[u8; 32], Vec<u8>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RemoteChunkReader {
|
impl RemoteChunkReader {
|
||||||
@ -30,11 +30,11 @@ impl RemoteChunkReader {
|
|||||||
client,
|
client,
|
||||||
crypt_config,
|
crypt_config,
|
||||||
cache_hint,
|
cache_hint,
|
||||||
cache: HashMap::new(),
|
cache: Mutex::new(HashMap::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
@ -49,12 +49,12 @@ impl RemoteChunkReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ReadChunk for RemoteChunkReader {
|
impl ReadChunk for RemoteChunkReader {
|
||||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
block_on(Self::read_raw_chunk(self, digest))
|
block_on(Self::read_raw_chunk(self, digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||||
if let Some(raw_data) = self.cache.get(digest) {
|
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||||
return Ok(raw_data.to_vec());
|
return Ok(raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ impl ReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
let use_cache = self.cache_hint.contains_key(digest);
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
if use_cache {
|
if use_cache {
|
||||||
self.cache.insert(*digest, raw_data.to_vec());
|
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(raw_data)
|
Ok(raw_data)
|
||||||
@ -75,18 +75,18 @@ impl ReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
impl AsyncReadChunk for RemoteChunkReader {
|
impl AsyncReadChunk for RemoteChunkReader {
|
||||||
fn read_raw_chunk<'a>(
|
fn read_raw_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||||
Box::pin(Self::read_raw_chunk(self, digest))
|
Box::pin(Self::read_raw_chunk(self, digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk<'a>(
|
fn read_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
if let Some(raw_data) = self.cache.get(digest) {
|
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||||
return Ok(raw_data.to_vec());
|
return Ok(raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +98,7 @@ impl AsyncReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
let use_cache = self.cache_hint.contains_key(digest);
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
if use_cache {
|
if use_cache {
|
||||||
self.cache.insert(*digest, raw_data.to_vec());
|
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(raw_data)
|
Ok(raw_data)
|
||||||
|
@ -248,11 +248,15 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
|
/// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
|
||||||
|
///
|
||||||
|
/// The `existed` flag is set when iterating through a directory to note that we know the file
|
||||||
|
/// is supposed to exist and we should warn if it doesnt'.
|
||||||
fn open_file(
|
fn open_file(
|
||||||
&mut self,
|
&mut self,
|
||||||
parent: RawFd,
|
parent: RawFd,
|
||||||
file_name: &CStr,
|
file_name: &CStr,
|
||||||
oflags: OFlag,
|
oflags: OFlag,
|
||||||
|
existed: bool,
|
||||||
) -> Result<Option<Fd>, Error> {
|
) -> Result<Option<Fd>, Error> {
|
||||||
match Fd::openat(
|
match Fd::openat(
|
||||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||||
@ -261,9 +265,14 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
Mode::empty(),
|
Mode::empty(),
|
||||||
) {
|
) {
|
||||||
Ok(fd) => Ok(Some(fd)),
|
Ok(fd) => Ok(Some(fd)),
|
||||||
Err(nix::Error::Sys(Errno::ENOENT)) => Ok(None),
|
Err(nix::Error::Sys(Errno::ENOENT)) => {
|
||||||
|
if existed {
|
||||||
|
self.report_vanished_file()?;
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
Err(nix::Error::Sys(Errno::EACCES)) => {
|
Err(nix::Error::Sys(Errno::EACCES)) => {
|
||||||
write!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
|
writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
Err(other) => Err(Error::from(other)),
|
Err(other) => Err(Error::from(other)),
|
||||||
@ -275,6 +284,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
parent,
|
parent,
|
||||||
c_str!(".pxarexclude"),
|
c_str!(".pxarexclude"),
|
||||||
OFlag::O_RDONLY | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
OFlag::O_RDONLY | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||||
|
false,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let old_pattern_count = self.patterns.len();
|
let old_pattern_count = self.patterns.len();
|
||||||
@ -287,7 +297,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
let line = match line {
|
let line = match line {
|
||||||
Ok(line) => line,
|
Ok(line) => line,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let _ = write!(
|
let _ = writeln!(
|
||||||
self.errors,
|
self.errors,
|
||||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||||
self.path,
|
self.path,
|
||||||
@ -307,7 +317,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Exclude) {
|
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Exclude) {
|
||||||
Ok(pattern) => self.patterns.push(pattern),
|
Ok(pattern) => self.patterns.push(pattern),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let _ = write!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -410,12 +420,12 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn report_vanished_file(&mut self) -> Result<(), Error> {
|
fn report_vanished_file(&mut self) -> Result<(), Error> {
|
||||||
write!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
|
writeln!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
|
fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
|
||||||
write!(
|
writeln!(
|
||||||
self.errors,
|
self.errors,
|
||||||
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
|
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
|
||||||
self.path,
|
self.path,
|
||||||
@ -424,7 +434,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
|
fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
|
||||||
write!(
|
writeln!(
|
||||||
self.errors,
|
self.errors,
|
||||||
"warning: file size increased while reading: {:?}, file will be truncated!",
|
"warning: file size increased while reading: {:?}, file will be truncated!",
|
||||||
self.path,
|
self.path,
|
||||||
@ -452,14 +462,12 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
parent,
|
parent,
|
||||||
c_file_name,
|
c_file_name,
|
||||||
open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||||
|
true,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let fd = match fd {
|
let fd = match fd {
|
||||||
Some(fd) => fd,
|
Some(fd) => fd,
|
||||||
None => {
|
None => return Ok(()),
|
||||||
self.report_vanished_file()?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic)?;
|
let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic)?;
|
||||||
|
@ -79,6 +79,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
let url = `/api2/json/admin/datastore/${view.datastore}/snapshots`;
|
let url = `/api2/json/admin/datastore/${view.datastore}/snapshots`;
|
||||||
this.store.setProxy({
|
this.store.setProxy({
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
|
timeout: 300*1000, // 5 minutes, we should make that api call faster
|
||||||
url: url
|
url: url
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -199,6 +200,45 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
win.show();
|
win.show();
|
||||||
},
|
},
|
||||||
|
|
||||||
|
onVerify: function() {
|
||||||
|
var view = this.getView();
|
||||||
|
|
||||||
|
if (!view.datastore) return;
|
||||||
|
|
||||||
|
let rec = view.selModel.getSelection()[0];
|
||||||
|
if (!(rec && rec.data)) return;
|
||||||
|
let data = rec.data;
|
||||||
|
|
||||||
|
let params;
|
||||||
|
|
||||||
|
if (data.leaf) {
|
||||||
|
params = {
|
||||||
|
"backup-type": data["backup-type"],
|
||||||
|
"backup-id": data["backup-id"],
|
||||||
|
"backup-time": (data['backup-time'].getTime()/1000).toFixed(0),
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
params = {
|
||||||
|
"backup-type": data.backup_type,
|
||||||
|
"backup-id": data.backup_id,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
params: params,
|
||||||
|
url: `/admin/datastore/${view.datastore}/verify`,
|
||||||
|
method: 'POST',
|
||||||
|
failure: function(response) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
success: function(response, options) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
onForget: function() {
|
onForget: function() {
|
||||||
var view = this.getView();
|
var view = this.getView();
|
||||||
|
|
||||||
@ -356,6 +396,14 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
iconCls: 'fa fa-refresh',
|
iconCls: 'fa fa-refresh',
|
||||||
handler: 'reload',
|
handler: 'reload',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Verify'),
|
||||||
|
disabled: true,
|
||||||
|
parentXType: 'pbsDataStoreContent',
|
||||||
|
enableFn: function(record) { return !!record.data; },
|
||||||
|
handler: 'onVerify',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Prune'),
|
text: gettext('Prune'),
|
||||||
|
@ -40,7 +40,7 @@ Ext.define('PBS.DataStorePanel', {
|
|||||||
|
|
||||||
initComponent: function() {
|
initComponent: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
me.title = `${gettext("Data Store")}: ${me.datastore}`;
|
me.title = `${gettext("Datastore")}: ${me.datastore}`;
|
||||||
me.callParent();
|
me.callParent();
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
@ -80,7 +80,7 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Data Store'),
|
text: gettext('Datastore'),
|
||||||
iconCls: 'fa fa-archive',
|
iconCls: 'fa fa-archive',
|
||||||
path: 'pbsDataStoreConfig',
|
path: 'pbsDataStoreConfig',
|
||||||
expanded: true,
|
expanded: true,
|
||||||
|
23
www/Utils.js
23
www/Utils.js
@ -33,23 +33,18 @@ Ext.define('PBS.Utils', {
|
|||||||
},
|
},
|
||||||
|
|
||||||
render_datastore_worker_id: function(id, what) {
|
render_datastore_worker_id: function(id, what) {
|
||||||
const result = id.match(/^(\S+)_([^_\s]+)_([^_\s]+)$/);
|
const res = id.match(/^(\S+?)_(\S+?)_(\S+?)(_(.+))?$/);
|
||||||
if (result) {
|
|
||||||
let datastore = result[1], type = result[2], id = result[3];
|
|
||||||
return `Datastore ${datastore} - ${what} ${type}/${id}`;
|
|
||||||
}
|
|
||||||
return `Datastore ${id} - ${what}`;
|
|
||||||
},
|
|
||||||
|
|
||||||
render_datastore_time_worker_id: function(id, what) {
|
|
||||||
const res = id.match(/^(\S+)_([^_\s]+)_([^_\s]+)_([^_\s]+)$/);
|
|
||||||
if (res) {
|
if (res) {
|
||||||
let datastore = res[1], type = res[2], id = res[3];
|
let datastore = res[1], type = res[2], id = res[3];
|
||||||
let datetime = Ext.Date.parse(parseInt(res[4], 16), 'U');
|
if (res[4] !== undefined) {
|
||||||
|
let datetime = Ext.Date.parse(parseInt(res[5], 16), 'U');
|
||||||
let utctime = PBS.Utils.render_datetime_utc(datetime);
|
let utctime = PBS.Utils.render_datetime_utc(datetime);
|
||||||
return `Datastore ${datastore} - ${what} ${type}/${id}/${utctime}`;
|
return `Datastore ${datastore} ${what} ${type}/${id}/${utctime}`;
|
||||||
|
} else {
|
||||||
|
return `Datastore ${datastore} ${what} ${type}/${id}`;
|
||||||
}
|
}
|
||||||
return what;
|
}
|
||||||
|
return `Datastore ${what} ${id}`;
|
||||||
},
|
},
|
||||||
|
|
||||||
constructor: function() {
|
constructor: function() {
|
||||||
@ -70,7 +65,7 @@ Ext.define('PBS.Utils', {
|
|||||||
return PBS.Utils.render_datastore_worker_id(id, gettext('Backup'));
|
return PBS.Utils.render_datastore_worker_id(id, gettext('Backup'));
|
||||||
},
|
},
|
||||||
reader: (type, id) => {
|
reader: (type, id) => {
|
||||||
return PBS.Utils.render_datastore_time_worker_id(id, gettext('Read objects'));
|
return PBS.Utils.render_datastore_worker_id(id, gettext('Read objects'));
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ Ext.define('PBS.DataStoreConfig', {
|
|||||||
extend: 'Ext.grid.GridPanel',
|
extend: 'Ext.grid.GridPanel',
|
||||||
alias: 'widget.pbsDataStoreConfig',
|
alias: 'widget.pbsDataStoreConfig',
|
||||||
|
|
||||||
title: gettext('Data Store Configuration'),
|
title: gettext('Datastore Configuration'),
|
||||||
|
|
||||||
controller: {
|
controller: {
|
||||||
xclass: 'Ext.app.ViewController',
|
xclass: 'Ext.app.ViewController',
|
||||||
@ -58,6 +58,27 @@ Ext.define('PBS.DataStoreConfig', {
|
|||||||
}).show();
|
}).show();
|
||||||
},
|
},
|
||||||
|
|
||||||
|
onVerify: function() {
|
||||||
|
var view = this.getView();
|
||||||
|
|
||||||
|
let rec = view.selModel.getSelection()[0];
|
||||||
|
if (!(rec && rec.data)) return;
|
||||||
|
let data = rec.data;
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
url: `/admin/datastore/${data.name}/verify`,
|
||||||
|
method: 'POST',
|
||||||
|
failure: function(response) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
success: function(response, options) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
garbageCollect: function() {
|
garbageCollect: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
let view = me.getView();
|
let view = me.getView();
|
||||||
@ -115,6 +136,12 @@ Ext.define('PBS.DataStoreConfig', {
|
|||||||
},
|
},
|
||||||
// remove_btn
|
// remove_btn
|
||||||
'-',
|
'-',
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Verify'),
|
||||||
|
disabled: true,
|
||||||
|
handler: 'onVerify',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Start GC'),
|
text: gettext('Start GC'),
|
||||||
|
@ -16,7 +16,7 @@ Ext.define('PBS.form.DataStoreSelector', {
|
|||||||
listConfig: {
|
listConfig: {
|
||||||
columns: [
|
columns: [
|
||||||
{
|
{
|
||||||
header: gettext('DataStore'),
|
header: gettext('Datastore'),
|
||||||
sortable: true,
|
sortable: true,
|
||||||
dataIndex: 'store',
|
dataIndex: 'store',
|
||||||
renderer: Ext.String.htmlEncode,
|
renderer: Ext.String.htmlEncode,
|
||||||
|
@ -39,7 +39,7 @@ Ext.define('PBS.window.CreateDirectory', {
|
|||||||
{
|
{
|
||||||
xtype: 'proxmoxcheckbox',
|
xtype: 'proxmoxcheckbox',
|
||||||
name: 'add-datastore',
|
name: 'add-datastore',
|
||||||
fieldLabel: gettext('Add Data Store'),
|
fieldLabel: gettext('Add as Datastore'),
|
||||||
value: '1',
|
value: '1',
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
@ -28,7 +28,7 @@ Ext.define('PBS.window.CreateZFS', {
|
|||||||
{
|
{
|
||||||
xtype: 'proxmoxcheckbox',
|
xtype: 'proxmoxcheckbox',
|
||||||
name: 'add-datastore',
|
name: 'add-datastore',
|
||||||
fieldLabel: gettext('Add Datastore'),
|
fieldLabel: gettext('Add as Datastore'),
|
||||||
value: '1'
|
value: '1'
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
Reference in New Issue
Block a user