tree wide: clippy lint fixes
most (not all) where done automatically Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
@ -1557,7 +1557,7 @@ pub fn catalog(
|
||||
&backup_dir.group,
|
||||
)?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||
|
||||
let file_name = CATALOG_NAME;
|
||||
|
||||
@ -1939,7 +1939,7 @@ pub fn get_notes(
|
||||
&backup_dir.group,
|
||||
)?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||
|
||||
let (manifest, _) = backup_dir.load_manifest()?;
|
||||
|
||||
@ -1992,7 +1992,7 @@ pub fn set_notes(
|
||||
&backup_dir.group,
|
||||
)?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||
|
||||
backup_dir
|
||||
.update_manifest(|manifest| {
|
||||
@ -2042,7 +2042,7 @@ pub fn get_protection(
|
||||
&backup_dir.group,
|
||||
)?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||
|
||||
Ok(backup_dir.is_protected())
|
||||
}
|
||||
@ -2090,7 +2090,7 @@ pub fn set_protection(
|
||||
&backup_dir.group,
|
||||
)?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(ns, backup_dir)?;
|
||||
|
||||
datastore.update_protection(&backup_dir, protected)
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ pub fn do_tape_backup_job(
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
job.jobtype(),
|
||||
err
|
||||
);
|
||||
}
|
||||
|
@ -837,10 +837,7 @@ async fn schedule_task_log_rotate() {
|
||||
if !check_schedule(worker_type, schedule, job_id) {
|
||||
// if we never ran the rotation, schedule instantly
|
||||
match jobstate::JobState::load(worker_type, job_id) {
|
||||
Ok(state) => match state {
|
||||
jobstate::JobState::Created { .. } => {}
|
||||
_ => return,
|
||||
},
|
||||
Ok(jobstate::JobState::Created { .. }) => {}
|
||||
_ => return,
|
||||
}
|
||||
}
|
||||
@ -1183,10 +1180,6 @@ fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &st
|
||||
}
|
||||
|
||||
// Rate Limiter lookup
|
||||
|
||||
// Test WITH
|
||||
// proxmox-backup-client restore vm/201/2021-10-22T09:55:56Z drive-scsi0.img img1.img --repository localhost:store2
|
||||
|
||||
async fn run_traffic_control_updater() {
|
||||
loop {
|
||||
let delay_target = Instant::now() + Duration::from_secs(1);
|
||||
|
@ -3,7 +3,6 @@ use std::io::{Read, Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
|
||||
use proxmox_schema::api;
|
||||
@ -69,7 +68,6 @@ fn recover_index(
|
||||
ignore_missing_chunks: bool,
|
||||
ignore_corrupt_chunks: bool,
|
||||
output_path: Option<String>,
|
||||
_param: Value,
|
||||
) -> Result<(), Error> {
|
||||
let file_path = Path::new(&file);
|
||||
let chunks_path = Path::new(&chunks);
|
||||
@ -150,7 +148,7 @@ fn recover_index(
|
||||
}
|
||||
Err(err) => {
|
||||
if ignore_missing_chunks && err.kind() == std::io::ErrorKind::NotFound {
|
||||
create_zero_chunk(format!("is missing"))?
|
||||
create_zero_chunk("is missing".to_string())?
|
||||
} else {
|
||||
bail!("could not open chunk file - {}", err);
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ pub fn do_garbage_collection_job(
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
job.jobtype(),
|
||||
err
|
||||
);
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ pub fn prune_datastore(
|
||||
|
||||
for group in ListAccessibleBackupGroups::new_with_privs(
|
||||
&datastore,
|
||||
ns.clone(),
|
||||
ns,
|
||||
max_depth,
|
||||
Some(PRIV_DATASTORE_MODIFY), // overides the owner check
|
||||
Some(PRIV_DATASTORE_PRUNE), // additionally required if owner
|
||||
@ -190,7 +190,7 @@ pub fn do_prune_job(
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
job.jobtype(),
|
||||
err
|
||||
);
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ pub fn do_verification_job(
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
job.jobtype(),
|
||||
err
|
||||
);
|
||||
}
|
||||
|
@ -458,7 +458,7 @@ pub fn request_and_load_media(
|
||||
let label_string = format!(
|
||||
"{} ({})",
|
||||
media_id.label.label_text,
|
||||
media_id.label.uuid.to_string(),
|
||||
media_id.label.uuid,
|
||||
);
|
||||
TapeRequestError::WrongLabel(label_string)
|
||||
}
|
||||
|
@ -499,7 +499,7 @@ impl MediaCatalog {
|
||||
};
|
||||
|
||||
if self.log_to_stdout {
|
||||
println!("L|{}|{}", file_number, uuid.to_string());
|
||||
println!("L|{}|{}", file_number, uuid);
|
||||
}
|
||||
|
||||
self.pending.push(b'L');
|
||||
@ -599,7 +599,7 @@ impl MediaCatalog {
|
||||
};
|
||||
|
||||
if self.log_to_stdout {
|
||||
println!("A|{}|{}|{}", file_number, uuid.to_string(), store);
|
||||
println!("A|{}|{}|{}", file_number, uuid, store);
|
||||
}
|
||||
|
||||
self.pending.push(b'A');
|
||||
@ -648,7 +648,7 @@ impl MediaCatalog {
|
||||
};
|
||||
|
||||
if self.log_to_stdout {
|
||||
println!("E|{}|{}\n", file_number, uuid.to_string());
|
||||
println!("E|{}|{}\n", file_number, uuid);
|
||||
}
|
||||
|
||||
self.pending.push(b'E');
|
||||
@ -713,7 +713,7 @@ impl MediaCatalog {
|
||||
};
|
||||
|
||||
if self.log_to_stdout {
|
||||
println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, path,);
|
||||
println!("S|{}|{}|{}:{}", file_number, uuid, store, path,);
|
||||
}
|
||||
|
||||
self.pending.push(b'S');
|
||||
|
@ -449,7 +449,7 @@ impl PoolWriter {
|
||||
self.catalog_set.lock().unwrap().register_snapshot(
|
||||
content_uuid,
|
||||
current_file_number,
|
||||
&snapshot_reader.datastore_name().to_string(),
|
||||
snapshot_reader.datastore_name(),
|
||||
snapshot_reader.snapshot().backup_ns(),
|
||||
snapshot_reader.snapshot().as_ref(),
|
||||
)?;
|
||||
|
@ -132,7 +132,7 @@ fn get_changelog_url(
|
||||
Some(captures) => {
|
||||
let base_capture = captures.get(1);
|
||||
match base_capture {
|
||||
Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
|
||||
Some(base_underscore) => base_underscore.as_str().replace('_', "/"),
|
||||
None => bail!("incompatible filename, cannot find regex group"),
|
||||
}
|
||||
}
|
||||
|
@ -278,7 +278,7 @@ pub fn read_subscription() -> Result<Option<SubscriptionInfo>, Error> {
|
||||
};
|
||||
|
||||
let encoded: String = cfg.collect::<String>();
|
||||
let decoded = base64::decode(encoded.to_owned())?;
|
||||
let decoded = base64::decode(&encoded)?;
|
||||
let decoded = std::str::from_utf8(&decoded)?;
|
||||
|
||||
let info: SubscriptionInfo = serde_json::from_str(decoded)?;
|
||||
|
Reference in New Issue
Block a user