tree wide: clippy lint fixes
most (not all) where done automatically Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
c39852abdc
commit
fbfb64a6b2
|
@ -35,7 +35,7 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||||
|
|
||||||
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
|
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
|
||||||
let content = proxmox_sys::fs::file_read_optional_string(PRUNE_CFG_FILENAME)?;
|
let content = proxmox_sys::fs::file_read_optional_string(PRUNE_CFG_FILENAME)?;
|
||||||
let content = content.unwrap_or_else(String::new);
|
let content = content.unwrap_or_default();
|
||||||
|
|
||||||
let digest = openssl::sha::sha256(content.as_bytes());
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
let data = CONFIG.parse(PRUNE_CFG_FILENAME, &content)?;
|
let data = CONFIG.parse(PRUNE_CFG_FILENAME, &content)?;
|
||||||
|
|
|
@ -19,7 +19,7 @@ pub fn render_backup_file_list<S: Borrow<str>>(files: &[S]) -> String {
|
||||||
.map(|v| strip_server_file_extension(v.borrow()))
|
.map(|v| strip_server_file_extension(v.borrow()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
files.sort();
|
files.sort_unstable();
|
||||||
|
|
||||||
files.join(" ")
|
files.join(" ")
|
||||||
}
|
}
|
||||||
|
|
|
@ -163,7 +163,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||||
let path = required_string_param(¶m, "snapshot")?;
|
let path = required_string_param(¶m, "snapshot")?;
|
||||||
let archive_name = required_string_param(¶m, "archive-name")?;
|
let archive_name = required_string_param(¶m, "archive-name")?;
|
||||||
|
|
||||||
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
|
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?;
|
||||||
|
|
||||||
let crypto = crypto_parameters(¶m)?;
|
let crypto = crypto_parameters(¶m)?;
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ pub async fn dir_or_last_from_group(
|
||||||
match path.parse::<BackupPart>()? {
|
match path.parse::<BackupPart>()? {
|
||||||
BackupPart::Dir(dir) => Ok(dir),
|
BackupPart::Dir(dir) => Ok(dir),
|
||||||
BackupPart::Group(group) => {
|
BackupPart::Group(group) => {
|
||||||
api_datastore_latest_snapshot(&client, repo.store(), ns, group).await
|
api_datastore_latest_snapshot(client, repo.store(), ns, group).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1245,7 +1245,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||||
let ns = optional_ns_param(¶m)?;
|
let ns = optional_ns_param(¶m)?;
|
||||||
let path = json::required_string_param(¶m, "snapshot")?;
|
let path = json::required_string_param(¶m, "snapshot")?;
|
||||||
|
|
||||||
let backup_dir = dir_or_last_from_group(&client, &repo, &ns, &path).await?;
|
let backup_dir = dir_or_last_from_group(&client, &repo, &ns, path).await?;
|
||||||
|
|
||||||
let target = json::required_string_param(¶m, "target")?;
|
let target = json::required_string_param(¶m, "target")?;
|
||||||
let target = if target == "-" { None } else { Some(target) };
|
let target = if target == "-" { None } else { Some(target) };
|
||||||
|
|
|
@ -205,7 +205,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||||
|
|
||||||
let backup_ns = optional_ns_param(¶m)?;
|
let backup_ns = optional_ns_param(¶m)?;
|
||||||
let path = required_string_param(¶m, "snapshot")?;
|
let path = required_string_param(¶m, "snapshot")?;
|
||||||
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
|
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?;
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
let crypt_config = match keyfile {
|
let crypt_config = match keyfile {
|
||||||
|
|
|
@ -204,7 +204,6 @@ pub fn complete_block_driver_ids<S: BuildHasher>(
|
||||||
ALL_DRIVERS
|
ALL_DRIVERS
|
||||||
.iter()
|
.iter()
|
||||||
.map(BlockDriverType::resolve)
|
.map(BlockDriverType::resolve)
|
||||||
.map(|d| d.list())
|
.flat_map(|d| d.list())
|
||||||
.flatten()
|
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
|
@ -262,13 +262,11 @@ pub fn rotate_task_log_archive(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if let Err(err) = std::fs::remove_file(&file_name) {
|
||||||
if let Err(err) = std::fs::remove_file(&file_name) {
|
|
||||||
log::error!("could not remove {:?}: {}", file_name, err);
|
log::error!("could not remove {:?}: {}", file_name, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(rotated)
|
Ok(rotated)
|
||||||
}
|
}
|
||||||
|
@ -966,7 +964,7 @@ impl WorkerTask {
|
||||||
|
|
||||||
/// Set progress indicator
|
/// Set progress indicator
|
||||||
pub fn progress(&self, progress: f64) {
|
pub fn progress(&self, progress: f64) {
|
||||||
if progress >= 0.0 && progress <= 1.0 {
|
if (0.0..=1.0).contains(&progress) {
|
||||||
let mut data = self.data.lock().unwrap();
|
let mut data = self.data.lock().unwrap();
|
||||||
data.progress = progress;
|
data.progress = progress;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -107,14 +107,14 @@ impl Bucket {
|
||||||
Bucket::RawFs(_) => ty == "raw",
|
Bucket::RawFs(_) => ty == "raw",
|
||||||
Bucket::ZPool(data) => {
|
Bucket::ZPool(data) => {
|
||||||
if let Some(ref comp) = comp.get(0) {
|
if let Some(ref comp) = comp.get(0) {
|
||||||
ty == "zpool" && comp.as_ref() == &data.name
|
ty == "zpool" && comp.as_ref() == data.name
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Bucket::LVM(data) => {
|
Bucket::LVM(data) => {
|
||||||
if let (Some(ref vg), Some(ref lv)) = (comp.get(0), comp.get(1)) {
|
if let (Some(ref vg), Some(ref lv)) = (comp.get(0), comp.get(1)) {
|
||||||
ty == "lvm" && vg.as_ref() == &data.vg_name && lv.as_ref() == &data.lv_name
|
ty == "lvm" && vg.as_ref() == data.vg_name && lv.as_ref() == data.lv_name
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
|
@ -147,7 +147,7 @@ fn extract_archive(
|
||||||
feature_flags.remove(Flags::WITH_SOCKETS);
|
feature_flags.remove(Flags::WITH_SOCKETS);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
let pattern = pattern.unwrap_or_default();
|
||||||
let target = target.as_ref().map_or_else(|| ".", String::as_str);
|
let target = target.as_ref().map_or_else(|| ".", String::as_str);
|
||||||
|
|
||||||
let mut match_list = Vec::new();
|
let mut match_list = Vec::new();
|
||||||
|
@ -297,7 +297,7 @@ async fn create_archive(
|
||||||
entries_max: isize,
|
entries_max: isize,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let patterns = {
|
let patterns = {
|
||||||
let input = exclude.unwrap_or_else(Vec::new);
|
let input = exclude.unwrap_or_default();
|
||||||
let mut patterns = Vec::with_capacity(input.len());
|
let mut patterns = Vec::with_capacity(input.len());
|
||||||
for entry in input {
|
for entry in input {
|
||||||
patterns.push(
|
patterns.push(
|
||||||
|
|
|
@ -1557,7 +1557,7 @@ pub fn catalog(
|
||||||
&backup_dir.group,
|
&backup_dir.group,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||||
|
|
||||||
let file_name = CATALOG_NAME;
|
let file_name = CATALOG_NAME;
|
||||||
|
|
||||||
|
@ -1939,7 +1939,7 @@ pub fn get_notes(
|
||||||
&backup_dir.group,
|
&backup_dir.group,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||||
|
|
||||||
let (manifest, _) = backup_dir.load_manifest()?;
|
let (manifest, _) = backup_dir.load_manifest()?;
|
||||||
|
|
||||||
|
@ -1992,7 +1992,7 @@ pub fn set_notes(
|
||||||
&backup_dir.group,
|
&backup_dir.group,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||||
|
|
||||||
backup_dir
|
backup_dir
|
||||||
.update_manifest(|manifest| {
|
.update_manifest(|manifest| {
|
||||||
|
@ -2042,7 +2042,7 @@ pub fn get_protection(
|
||||||
&backup_dir.group,
|
&backup_dir.group,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||||
|
|
||||||
Ok(backup_dir.is_protected())
|
Ok(backup_dir.is_protected())
|
||||||
}
|
}
|
||||||
|
@ -2090,7 +2090,7 @@ pub fn set_protection(
|
||||||
&backup_dir.group,
|
&backup_dir.group,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||||
|
|
||||||
datastore.update_protection(&backup_dir, protected)
|
datastore.update_protection(&backup_dir, protected)
|
||||||
}
|
}
|
||||||
|
|
|
@ -230,7 +230,7 @@ pub fn do_tape_backup_job(
|
||||||
if let Err(err) = job.finish(status) {
|
if let Err(err) = job.finish(status) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"could not finish job state for {}: {}",
|
"could not finish job state for {}: {}",
|
||||||
job.jobtype().to_string(),
|
job.jobtype(),
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -837,10 +837,7 @@ async fn schedule_task_log_rotate() {
|
||||||
if !check_schedule(worker_type, schedule, job_id) {
|
if !check_schedule(worker_type, schedule, job_id) {
|
||||||
// if we never ran the rotation, schedule instantly
|
// if we never ran the rotation, schedule instantly
|
||||||
match jobstate::JobState::load(worker_type, job_id) {
|
match jobstate::JobState::load(worker_type, job_id) {
|
||||||
Ok(state) => match state {
|
Ok(jobstate::JobState::Created { .. }) => {}
|
||||||
jobstate::JobState::Created { .. } => {}
|
|
||||||
_ => return,
|
|
||||||
},
|
|
||||||
_ => return,
|
_ => return,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1183,10 +1180,6 @@ fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &st
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rate Limiter lookup
|
// Rate Limiter lookup
|
||||||
|
|
||||||
// Test WITH
|
|
||||||
// proxmox-backup-client restore vm/201/2021-10-22T09:55:56Z drive-scsi0.img img1.img --repository localhost:store2
|
|
||||||
|
|
||||||
async fn run_traffic_control_updater() {
|
async fn run_traffic_control_updater() {
|
||||||
loop {
|
loop {
|
||||||
let delay_target = Instant::now() + Duration::from_secs(1);
|
let delay_target = Instant::now() + Duration::from_secs(1);
|
||||||
|
|
|
@ -3,7 +3,6 @@ use std::io::{Read, Seek, SeekFrom, Write};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
|
use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
@ -69,7 +68,6 @@ fn recover_index(
|
||||||
ignore_missing_chunks: bool,
|
ignore_missing_chunks: bool,
|
||||||
ignore_corrupt_chunks: bool,
|
ignore_corrupt_chunks: bool,
|
||||||
output_path: Option<String>,
|
output_path: Option<String>,
|
||||||
_param: Value,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let file_path = Path::new(&file);
|
let file_path = Path::new(&file);
|
||||||
let chunks_path = Path::new(&chunks);
|
let chunks_path = Path::new(&chunks);
|
||||||
|
@ -150,7 +148,7 @@ fn recover_index(
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if ignore_missing_chunks && err.kind() == std::io::ErrorKind::NotFound {
|
if ignore_missing_chunks && err.kind() == std::io::ErrorKind::NotFound {
|
||||||
create_zero_chunk(format!("is missing"))?
|
create_zero_chunk("is missing".to_string())?
|
||||||
} else {
|
} else {
|
||||||
bail!("could not open chunk file - {}", err);
|
bail!("could not open chunk file - {}", err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ pub fn do_garbage_collection_job(
|
||||||
if let Err(err) = job.finish(status) {
|
if let Err(err) = job.finish(status) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"could not finish job state for {}: {}",
|
"could not finish job state for {}: {}",
|
||||||
job.jobtype().to_string(),
|
job.jobtype(),
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,7 +58,7 @@ pub fn prune_datastore(
|
||||||
|
|
||||||
for group in ListAccessibleBackupGroups::new_with_privs(
|
for group in ListAccessibleBackupGroups::new_with_privs(
|
||||||
&datastore,
|
&datastore,
|
||||||
ns.clone(),
|
ns,
|
||||||
max_depth,
|
max_depth,
|
||||||
Some(PRIV_DATASTORE_MODIFY), // overides the owner check
|
Some(PRIV_DATASTORE_MODIFY), // overides the owner check
|
||||||
Some(PRIV_DATASTORE_PRUNE), // additionally required if owner
|
Some(PRIV_DATASTORE_PRUNE), // additionally required if owner
|
||||||
|
@ -190,7 +190,7 @@ pub fn do_prune_job(
|
||||||
if let Err(err) = job.finish(status) {
|
if let Err(err) = job.finish(status) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"could not finish job state for {}: {}",
|
"could not finish job state for {}: {}",
|
||||||
job.jobtype().to_string(),
|
job.jobtype(),
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ pub fn do_verification_job(
|
||||||
if let Err(err) = job.finish(status) {
|
if let Err(err) = job.finish(status) {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"could not finish job state for {}: {}",
|
"could not finish job state for {}: {}",
|
||||||
job.jobtype().to_string(),
|
job.jobtype(),
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -458,7 +458,7 @@ pub fn request_and_load_media(
|
||||||
let label_string = format!(
|
let label_string = format!(
|
||||||
"{} ({})",
|
"{} ({})",
|
||||||
media_id.label.label_text,
|
media_id.label.label_text,
|
||||||
media_id.label.uuid.to_string(),
|
media_id.label.uuid,
|
||||||
);
|
);
|
||||||
TapeRequestError::WrongLabel(label_string)
|
TapeRequestError::WrongLabel(label_string)
|
||||||
}
|
}
|
||||||
|
|
|
@ -499,7 +499,7 @@ impl MediaCatalog {
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("L|{}|{}", file_number, uuid.to_string());
|
println!("L|{}|{}", file_number, uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'L');
|
self.pending.push(b'L');
|
||||||
|
@ -599,7 +599,7 @@ impl MediaCatalog {
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("A|{}|{}|{}", file_number, uuid.to_string(), store);
|
println!("A|{}|{}|{}", file_number, uuid, store);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'A');
|
self.pending.push(b'A');
|
||||||
|
@ -648,7 +648,7 @@ impl MediaCatalog {
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("E|{}|{}\n", file_number, uuid.to_string());
|
println!("E|{}|{}\n", file_number, uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'E');
|
self.pending.push(b'E');
|
||||||
|
@ -713,7 +713,7 @@ impl MediaCatalog {
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, path,);
|
println!("S|{}|{}|{}:{}", file_number, uuid, store, path,);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'S');
|
self.pending.push(b'S');
|
||||||
|
|
|
@ -449,7 +449,7 @@ impl PoolWriter {
|
||||||
self.catalog_set.lock().unwrap().register_snapshot(
|
self.catalog_set.lock().unwrap().register_snapshot(
|
||||||
content_uuid,
|
content_uuid,
|
||||||
current_file_number,
|
current_file_number,
|
||||||
&snapshot_reader.datastore_name().to_string(),
|
snapshot_reader.datastore_name(),
|
||||||
snapshot_reader.snapshot().backup_ns(),
|
snapshot_reader.snapshot().backup_ns(),
|
||||||
snapshot_reader.snapshot().as_ref(),
|
snapshot_reader.snapshot().as_ref(),
|
||||||
)?;
|
)?;
|
||||||
|
|
|
@ -132,7 +132,7 @@ fn get_changelog_url(
|
||||||
Some(captures) => {
|
Some(captures) => {
|
||||||
let base_capture = captures.get(1);
|
let base_capture = captures.get(1);
|
||||||
match base_capture {
|
match base_capture {
|
||||||
Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
|
Some(base_underscore) => base_underscore.as_str().replace('_', "/"),
|
||||||
None => bail!("incompatible filename, cannot find regex group"),
|
None => bail!("incompatible filename, cannot find regex group"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -278,7 +278,7 @@ pub fn read_subscription() -> Result<Option<SubscriptionInfo>, Error> {
|
||||||
};
|
};
|
||||||
|
|
||||||
let encoded: String = cfg.collect::<String>();
|
let encoded: String = cfg.collect::<String>();
|
||||||
let decoded = base64::decode(encoded.to_owned())?;
|
let decoded = base64::decode(&encoded)?;
|
||||||
let decoded = std::str::from_utf8(&decoded)?;
|
let decoded = std::str::from_utf8(&decoded)?;
|
||||||
|
|
||||||
let info: SubscriptionInfo = serde_json::from_str(decoded)?;
|
let info: SubscriptionInfo = serde_json::from_str(decoded)?;
|
||||||
|
|
Loading…
Reference in New Issue