tree wide: typo fixes through codespell

Most, not all, found and fixes using `codespell -wci3 -L crate`

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2022-06-05 10:30:23 +02:00
parent 6a35698796
commit d20137e5a9
17 changed files with 23 additions and 35 deletions

View File

@ -161,7 +161,7 @@ impl AcmeClient {
let mut data = Vec::<u8>::new(); let mut data = Vec::<u8>::new();
self.write_to(&mut data)?; self.write_to(&mut data)?;
let account_path = self.account_path.as_ref().ok_or_else(|| { let account_path = self.account_path.as_ref().ok_or_else(|| {
format_err!("no account path set, cannot save upated account information") format_err!("no account path set, cannot save updated account information")
})?; })?;
crate::config::acme::make_acme_account_dir()?; crate::config::acme::make_acme_account_dir()?;
replace_file( replace_file(

View File

@ -588,7 +588,7 @@ fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result
}; };
let snapshot_count = group.list_backups()?.len() as u64; let snapshot_count = group.list_backups()?.len() as u64;
// only include groups with snapshots, counting/displaying emtpy groups can confuse // only include groups with snapshots, counting/displaying empty groups can confuse
if snapshot_count > 0 { if snapshot_count > 0 {
let type_count = match group.backup_type() { let type_count = match group.backup_type() {
BackupType::Ct => counts.ct.get_or_insert(Default::default()), BackupType::Ct => counts.ct.get_or_insert(Default::default()),
@ -647,12 +647,12 @@ pub fn status(
false // allow at least counts, user can read groups anyway.. false // allow at least counts, user can read groups anyway..
} else { } else {
match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) { match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
// avoid leaking existance info if users hasn't at least any priv. below // avoid leaking existence info if users hasn't at least any priv. below
Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")), Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
_ => false, _ => false,
} }
}; };
let datastore = datastore?; // only unwrap no to avoid leaking existance info let datastore = datastore?; // only unwrap no to avoid leaking existence info
let (counts, gc_status) = if verbose { let (counts, gc_status) = if verbose {
let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 { let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {

View File

@ -133,7 +133,7 @@ pub fn list_namespaces(
}, },
"delete-groups": { "delete-groups": {
type: bool, type: bool,
description: "If set, all groups will be destroyed in the whole hierachy below and\ description: "If set, all groups will be destroyed in the whole hierarchy below and\
including `ns`. If not set, only empty namespaces will be pruned.", including `ns`. If not set, only empty namespaces will be pruned.",
optional: true, optional: true,
default: false, default: false,

View File

@ -121,7 +121,7 @@ pub fn update_webauthn_config(
} else { } else {
let rp = webauthn let rp = webauthn
.rp .rp
.ok_or_else(|| format_err!("missing proeprty: 'rp'"))?; .ok_or_else(|| format_err!("missing property: 'rp'"))?;
let origin = webauthn.origin; let origin = webauthn.origin;
let id = webauthn let id = webauthn
.id .id

View File

@ -317,7 +317,7 @@ fn upgrade_to_websocket(
} }
#[api] #[api]
/// List Nodes (only for compatiblity) /// List Nodes (only for compatibility)
fn list_nodes() -> Result<Value, Error> { fn list_nodes() -> Result<Value, Error> {
Ok(json!([ { "node": proxmox_sys::nodename().to_string() } ])) Ok(json!([ { "node": proxmox_sys::nodename().to_string() } ]))
} }

View File

@ -824,7 +824,7 @@ fn restore_list_worker(
if !media_file_chunk_map.is_empty() { if !media_file_chunk_map.is_empty() {
task_log!(worker, "Phase 2: restore chunks to datastores"); task_log!(worker, "Phase 2: restore chunks to datastores");
} else { } else {
task_log!(worker, "all chunks exist already, skipping phase 2..."); task_log!(worker, "All chunks are already present, skip phase 2...");
} }
for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() { for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() {
@ -1988,7 +1988,7 @@ pub fn fast_catalog_restore(
if &media_uuid != catalog_uuid { if &media_uuid != catalog_uuid {
task_log!( task_log!(
worker, worker,
"catalog uuid missmatch at pos {}", "catalog uuid mismatch at pos {}",
current_file_number current_file_number
); );
continue; continue;
@ -1996,7 +1996,7 @@ pub fn fast_catalog_restore(
if media_set_uuid != archive_header.media_set_uuid { if media_set_uuid != archive_header.media_set_uuid {
task_log!( task_log!(
worker, worker,
"catalog media_set missmatch at pos {}", "catalog media_set mismatch at pos {}",
current_file_number current_file_number
); );
continue; continue;

View File

@ -90,7 +90,7 @@ pub fn can_access_any_namespace(
}) })
} }
/// A priviledge aware iterator for all backup groups in all Namespaces below an anchor namespace, /// A privilege aware iterator for all backup groups in all Namespaces below an anchor namespace,
/// most often that will be the `BackupNamespace::root()` one. /// most often that will be the `BackupNamespace::root()` one.
/// ///
/// Is basically just a filter-iter for pbs_datastore::ListNamespacesRecursive including access and /// Is basically just a filter-iter for pbs_datastore::ListNamespacesRecursive including access and

View File

@ -1001,7 +1001,7 @@ async fn run_stat_generator() {
async fn generate_host_stats() { async fn generate_host_stats() {
match tokio::task::spawn_blocking(generate_host_stats_sync).await { match tokio::task::spawn_blocking(generate_host_stats_sync).await {
Ok(()) => (), Ok(()) => (),
Err(err) => log::error!("generate_host_stats paniced: {}", err), Err(err) => log::error!("generate_host_stats panicked: {}", err),
} }
} }

View File

@ -154,7 +154,7 @@ pub fn complete_acme_plugin(_arg: &str, _param: &HashMap<String, String>) -> Vec
pub fn complete_acme_plugin_type(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> { pub fn complete_acme_plugin_type(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
vec![ vec![
"dns".to_string(), "dns".to_string(),
//"http".to_string(), // makes currently not realyl sense to create or the like //"http".to_string(), // makes currently not really sense to create or the like
] ]
} }

View File

@ -40,11 +40,7 @@ pub fn do_garbage_collection_job(
let status = worker.create_state(&result); let status = worker.create_state(&result);
if let Err(err) = job.finish(status) { if let Err(err) = job.finish(status) {
eprintln!( eprintln!("could not finish job state for {}: {}", job.jobtype(), err);
"could not finish job state for {}: {}",
job.jobtype(),
err
);
} }
if let Some(email) = email { if let Some(email) = email {

View File

@ -60,7 +60,7 @@ pub fn prune_datastore(
&datastore, &datastore,
ns, ns,
max_depth, max_depth,
Some(PRIV_DATASTORE_MODIFY), // overides the owner check Some(PRIV_DATASTORE_MODIFY), // overrides the owner check
Some(PRIV_DATASTORE_PRUNE), // additionally required if owner Some(PRIV_DATASTORE_PRUNE), // additionally required if owner
Some(&auth_id), Some(&auth_id),
)? { )? {
@ -188,11 +188,7 @@ pub fn do_prune_job(
let status = worker.create_state(&result); let status = worker.create_state(&result);
if let Err(err) = job.finish(status) { if let Err(err) = job.finish(status) {
eprintln!( eprintln!("could not finish job state for {}: {}", job.jobtype(), err);
"could not finish job state for {}: {}",
job.jobtype(),
err
);
} }
result result

View File

@ -75,11 +75,7 @@ pub fn do_verification_job(
let status = worker.create_state(&job_result); let status = worker.create_state(&job_result);
if let Err(err) = job.finish(status) { if let Err(err) = job.finish(status) {
eprintln!( eprintln!("could not finish job state for {}: {}", job.jobtype(), err);
"could not finish job state for {}: {}",
job.jobtype(),
err
);
} }
if let Some(email) = email { if let Some(email) = email {

View File

@ -18,7 +18,7 @@
//! MediaSet Locking //! MediaSet Locking
//! //!
//! To add/remove media from a media set, or to modify catalogs we //! To add/remove media from a media set, or to modify catalogs we
//! always do lock_media_set(). Also, we aquire this lock during //! always do lock_media_set(). Also, we acquire this lock during
//! restore, to make sure it is not reused for backups. //! restore, to make sure it is not reused for backups.
//! //!

View File

@ -483,7 +483,7 @@ impl MediaCatalog {
pub fn register_label( pub fn register_label(
&mut self, &mut self,
uuid: &Uuid, // Media/MediaSet Uuid uuid: &Uuid, // Media/MediaSet Uuid
seq_nr: u64, // onyl used for media set labels seq_nr: u64, // only used for media set labels
file_number: u64, file_number: u64,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.check_register_label(file_number, uuid)?; self.check_register_label(file_number, uuid)?;

View File

@ -1,6 +1,6 @@
//! Media Pool //! Media Pool
//! //!
//! A set of backup medias. //! A set of backup mediums.
//! //!
//! This struct manages backup media state during backup. The main //! This struct manages backup media state during backup. The main
//! purpose is to allocate media sets and assign new tapes to it. //! purpose is to allocate media sets and assign new tapes to it.
@ -392,7 +392,7 @@ impl MediaPool {
let uuid = media_id.label.uuid.clone(); let uuid = media_id.label.uuid.clone();
MediaCatalog::overwrite(&self.state_path, &media_id, false)?; // overwite catalog MediaCatalog::overwrite(&self.state_path, &media_id, false)?; // overwrite catalog
let clear_media_status = true; // remove Full status let clear_media_status = true; // remove Full status
self.inventory.store(media_id, clear_media_status)?; // store persistently self.inventory.store(media_id, clear_media_status)?; // store persistently

View File

@ -18,7 +18,7 @@ pub struct NewChunksIterator {
impl NewChunksIterator { impl NewChunksIterator {
/// Creates the iterator, spawning a new thread /// Creates the iterator, spawning a new thread
/// ///
/// Make sure to join() the returnd thread handle. /// Make sure to join() the returned thread handle.
pub fn spawn( pub fn spawn(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
snapshot_reader: Arc<Mutex<SnapshotReader>>, snapshot_reader: Arc<Mutex<SnapshotReader>>,

View File

@ -34,7 +34,7 @@ struct ParsedTcRule {
/// Traffic control statistics /// Traffic control statistics
pub struct TrafficStat { pub struct TrafficStat {
/// Total incomming traffic (bytes) /// Total incoming traffic (bytes)
pub traffic_in: u64, pub traffic_in: u64,
/// Incoming data rate (bytes/second) /// Incoming data rate (bytes/second)
pub rate_in: u64, pub rate_in: u64,