typo fixes all over the place

found and semi-manually replaced by using:
 codespell -L mut -L crate -i 3 -w

Mostly in comments, but also email notification and two occurrences
of misspelled  'reserved' struct member, which where not used and
cargo build did not complain about the change, soo ...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2021-03-10 16:37:09 +01:00
parent 8b7f3b8f1d
commit d1d74c4367
46 changed files with 81 additions and 81 deletions

View File

@ -181,7 +181,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) { if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
match { match {
// scope to prevent the temprary iter from borrowing across the whole match // scope to prevent the temporary iter from borrowing across the whole match
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id); let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
entry.map(|(ty, index, _)| (ty, index)) entry.map(|(ty, index, _)| (ty, index))
} { } {
@ -259,7 +259,7 @@ fn delete_tfa(
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?; .ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
match { match {
// scope to prevent the temprary iter from borrowing across the whole match // scope to prevent the temporary iter from borrowing across the whole match
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id); let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
entry.map(|(ty, index, _)| (ty, index)) entry.map(|(ty, index, _)| (ty, index))
} { } {

View File

@ -1,4 +1,4 @@
//! Datastore Syncronization Job Management //! Datastore Synchronization Job Management
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::Value; use serde_json::Value;

View File

@ -119,7 +119,7 @@ pub fn change_passphrase(
let kdf = kdf.unwrap_or_default(); let kdf = kdf.unwrap_or_default();
if let Kdf::None = kdf { if let Kdf::None = kdf {
bail!("Please specify a key derivation funktion (none is not allowed here)."); bail!("Please specify a key derivation function (none is not allowed here).");
} }
let _lock = open_file_locked( let _lock = open_file_locked(
@ -187,7 +187,7 @@ pub fn create_key(
let kdf = kdf.unwrap_or_default(); let kdf = kdf.unwrap_or_default();
if let Kdf::None = kdf { if let Kdf::None = kdf {
bail!("Please specify a key derivation funktion (none is not allowed here)."); bail!("Please specify a key derivation function (none is not allowed here).");
} }
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?; let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;

View File

@ -85,7 +85,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
}, },
notify: { notify: {
type: bool, type: bool,
description: r#"Send notification mail about new package updates availanle to the description: r#"Send notification mail about new package updates available to the
email address configured for 'root@pam')."#, email address configured for 'root@pam')."#,
default: false, default: false,
optional: true, optional: true,

View File

@ -220,7 +220,7 @@ pub async fn load_slot(drive: String, source_slot: u64) -> Result<(), Error> {
}, },
}, },
returns: { returns: {
description: "The import-export slot number the media was transfered to.", description: "The import-export slot number the media was transferred to.",
type: u64, type: u64,
minimum: 1, minimum: 1,
}, },
@ -782,7 +782,7 @@ pub fn clean_drive(
} }
} }
worker.log("Drive cleaned sucessfully"); worker.log("Drive cleaned successfully");
Ok(()) Ok(())
}, },
@ -943,7 +943,7 @@ pub fn update_inventory(
} }
Ok((Some(media_id), _key_config)) => { Ok((Some(media_id), _key_config)) => {
if label_text != media_id.label.label_text { if label_text != media_id.label.label_text {
worker.warn(format!("label text missmatch ({} != {})", label_text, media_id.label.label_text)); worker.warn(format!("label text mismatch ({} != {})", label_text, media_id.label.label_text));
continue; continue;
} }
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid)); worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));

View File

@ -497,7 +497,7 @@ pub fn get_media_status(uuid: Uuid) -> Result<MediaStatus, Error> {
/// Update media status (None, 'full', 'damaged' or 'retired') /// Update media status (None, 'full', 'damaged' or 'retired')
/// ///
/// It is not allowed to set status to 'writable' or 'unknown' (those /// It is not allowed to set status to 'writable' or 'unknown' (those
/// are internaly managed states). /// are internally managed states).
pub fn update_media_status(uuid: Uuid, status: Option<MediaStatus>) -> Result<(), Error> { pub fn update_media_status(uuid: Uuid, status: Option<MediaStatus>) -> Result<(), Error> {
let status_path = Path::new(TAPE_STATUS_DIR); let status_path = Path::new(TAPE_STATUS_DIR);

View File

@ -1272,7 +1272,7 @@ pub struct APTUpdateInfo {
pub enum Notify { pub enum Notify {
/// Never send notification /// Never send notification
Never, Never,
/// Send notifications for failed and sucessful jobs /// Send notifications for failed and successful jobs
Always, Always,
/// Send notifications for failed jobs only /// Send notifications for failed jobs only
Error, Error,

View File

@ -21,7 +21,7 @@ pub struct OptionalDeviceIdentification {
#[api()] #[api()]
#[derive(Debug,Serialize,Deserialize)] #[derive(Debug,Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Kind of devive /// Kind of device
pub enum DeviceKind { pub enum DeviceKind {
/// Tape changer (Autoloader, Robot) /// Tape changer (Autoloader, Robot)
Changer, Changer,

View File

@ -75,7 +75,7 @@
//! //!
//! Since PBS allows multiple potentially interfering operations at the //! Since PBS allows multiple potentially interfering operations at the
//! same time (e.g. garbage collect, prune, multiple backup creations //! same time (e.g. garbage collect, prune, multiple backup creations
//! (only in seperate groups), forget, ...), these need to lock against //! (only in separate groups), forget, ...), these need to lock against
//! each other in certain scenarios. There is no overarching global lock //! each other in certain scenarios. There is no overarching global lock
//! though, instead always the finest grained lock possible is used, //! though, instead always the finest grained lock possible is used,
//! because running these operations concurrently is treated as a feature //! because running these operations concurrently is treated as a feature

View File

@ -452,7 +452,7 @@ impl ChunkStore {
#[test] #[test]
fn test_chunk_store1() { fn test_chunk_store1() {
let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path let mut path = std::fs::canonicalize(".").unwrap(); // we need absolute path
path.push(".testdir"); path.push(".testdir");
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ } if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }

View File

@ -448,7 +448,7 @@ impl DataStore {
if !self.chunk_store.cond_touch_chunk(digest, false)? { if !self.chunk_store.cond_touch_chunk(digest, false)? {
crate::task_warn!( crate::task_warn!(
worker, worker,
"warning: unable to access non-existant chunk {}, required by {:?}", "warning: unable to access non-existent chunk {}, required by {:?}",
proxmox::tools::digest_to_hex(digest), proxmox::tools::digest_to_hex(digest),
file_name, file_name,
); );

View File

@ -1453,7 +1453,7 @@ fn parse_archive_type(name: &str) -> (String, ArchiveType) {
type: String, type: String,
description: r###"Target directory path. Use '-' to write to standard output. description: r###"Target directory path. Use '-' to write to standard output.
We do not extraxt '.pxar' archives when writing to standard output. We do not extract '.pxar' archives when writing to standard output.
"### "###
}, },

View File

@ -330,7 +330,7 @@ async fn get_versions(verbose: bool, param: Value) -> Result<Value, Error> {
let options = default_table_format_options() let options = default_table_format_options()
.disable_sort() .disable_sort()
.noborder(true) // just not helpfull for version info which gets copy pasted often .noborder(true) // just not helpful for version info which gets copy pasted often
.column(ColumnConfig::new("Package")) .column(ColumnConfig::new("Package"))
.column(ColumnConfig::new("Version")) .column(ColumnConfig::new("Version"))
.column(ColumnConfig::new("ExtraInfo").header("Extra Info")) .column(ColumnConfig::new("ExtraInfo").header("Extra Info"))

View File

@ -527,7 +527,7 @@ fn show_master_pubkey(path: Option<String>, param: Value) -> Result<(), Error> {
optional: true, optional: true,
}, },
subject: { subject: {
description: "Include the specified subject as titel text.", description: "Include the specified subject as title text.",
optional: true, optional: true,
}, },
"output-format": { "output-format": {

View File

@ -140,7 +140,7 @@ fn mount(
return proxmox_backup::tools::runtime::main(mount_do(param, None)); return proxmox_backup::tools::runtime::main(mount_do(param, None));
} }
// Process should be deamonized. // Process should be daemonized.
// Make sure to fork before the async runtime is instantiated to avoid troubles. // Make sure to fork before the async runtime is instantiated to avoid troubles.
let (pr, pw) = proxmox_backup::tools::pipe()?; let (pr, pw) = proxmox_backup::tools::pipe()?;
match unsafe { fork() } { match unsafe { fork() } {

View File

@ -84,7 +84,7 @@ pub fn encryption_key_commands() -> CommandLineInterface {
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
}, },
subject: { subject: {
description: "Include the specified subject as titel text.", description: "Include the specified subject as title text.",
optional: true, optional: true,
}, },
"output-format": { "output-format": {
@ -128,7 +128,7 @@ fn paper_key(
}, },
}, },
)] )]
/// Print tthe encryption key's metadata. /// Print the encryption key's metadata.
fn show_key( fn show_key(
param: Value, param: Value,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,

View File

@ -1,6 +1,6 @@
/// Tape command implemented using scsi-generic raw commands /// Tape command implemented using scsi-generic raw commands
/// ///
/// SCSI-generic command needs root priviledges, so this binary need /// SCSI-generic command needs root privileges, so this binary need
/// to be setuid root. /// to be setuid root.
/// ///
/// This command can use STDIN as tape device handle. /// This command can use STDIN as tape device handle.

View File

@ -16,11 +16,11 @@ pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
/// namespaced directory for persistent logging /// namespaced directory for persistent logging
pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!(); pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();
/// logfile for all API reuests handled by the proxy and privileged API daemons. Note that not all /// logfile for all API requests handled by the proxy and privileged API daemons. Note that not all
/// failed logins can be logged here with full information, use the auth log for that. /// failed logins can be logged here with full information, use the auth log for that.
pub const API_ACCESS_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/access.log"); pub const API_ACCESS_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/access.log");
/// logfile for any failed authentication, via ticket or via token, and new successfull ticket /// logfile for any failed authentication, via ticket or via token, and new successful ticket
/// creations. This file can be useful for fail2ban. /// creations. This file can be useful for fail2ban.
pub const API_AUTH_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/auth.log"); pub const API_AUTH_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/auth.log");

View File

@ -509,7 +509,7 @@ impl BackupWriter {
} }
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other // We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
// funciton in the same path is `wid`, so those 3 could be in a struct, but there's no real use // function in the same path is `wid`, so those 3 could be in a struct, but there's no real use
// since this is a private method. // since this is a private method.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
fn upload_chunk_info_stream( fn upload_chunk_info_stream(

View File

@ -86,7 +86,7 @@ impl tower_service::Service<Uri> for VsockConnector {
Ok(connection) Ok(connection)
}) })
// unravel the thread JoinHandle to a useable future // unravel the thread JoinHandle to a usable future
.map(|res| match res { .map(|res| match res {
Ok(res) => res, Ok(res) => res,
Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)), Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),

View File

@ -82,7 +82,7 @@ pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> {
Ok(()) Ok(())
} }
// parse ip address with otional cidr mask // parse ip address with optional cidr mask
pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), Error> { pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), Error> {
lazy_static! { lazy_static! {

View File

@ -4,10 +4,10 @@
//! indexed by key fingerprint. //! indexed by key fingerprint.
//! //!
//! We store the plain key (unencrypted), as well as a encrypted //! We store the plain key (unencrypted), as well as a encrypted
//! version protected by passowrd (see struct `KeyConfig`) //! version protected by password (see struct `KeyConfig`)
//! //!
//! Tape backups store the password protected version on tape, so that //! Tape backups store the password protected version on tape, so that
//! it is possible to retore the key from tape if you know the //! it is possible to restore the key from tape if you know the
//! password. //! password.
use std::collections::HashMap; use std::collections::HashMap;

View File

@ -590,7 +590,7 @@ impl TfaUserChallengeData {
} }
/// Save the current data. Note that we do not replace the file here since we lock the file /// Save the current data. Note that we do not replace the file here since we lock the file
/// itself, as it is in `/run`, and the typicall error case for this particular situation /// itself, as it is in `/run`, and the typical error case for this particular situation
/// (machine loses power) simply prevents some login, but that'll probably fail anyway for /// (machine loses power) simply prevents some login, but that'll probably fail anyway for
/// other reasons then... /// other reasons then...
/// ///

View File

@ -43,7 +43,7 @@ Deduplication Factor: {{deduplication-factor}}
Garbage collection successful. Garbage collection successful.
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{datastore}}> <https://{{fqdn}}:{{port}}/#DataStore-{{datastore}}>
@ -57,7 +57,7 @@ Datastore: {{datastore}}
Garbage collection failed: {{error}} Garbage collection failed: {{error}}
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks> <https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
@ -71,7 +71,7 @@ Datastore: {{job.store}}
Verification successful. Verification successful.
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}> <https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
@ -89,7 +89,7 @@ Verification failed on these snapshots/groups:
{{/each}} {{/each}}
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks> <https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
@ -105,7 +105,7 @@ Remote Store: {{job.remote-store}}
Synchronization successful. Synchronization successful.
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}> <https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
@ -121,7 +121,7 @@ Remote Store: {{job.remote-store}}
Synchronization failed: {{error}} Synchronization failed: {{error}}
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks> <https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
@ -152,7 +152,7 @@ Tape Drive: {{job.drive}}
Tape Backup successful. Tape Backup successful.
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}> <https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
@ -171,7 +171,7 @@ Tape Drive: {{job.drive}}
Tape Backup failed: {{error}} Tape Backup failed: {{error}}
Please visit the web interface for futher details: Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks> <https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>

View File

@ -749,7 +749,7 @@ impl WorkerTask {
match data.abort_listeners.pop() { match data.abort_listeners.pop() {
None => { break; }, None => { break; },
Some(ch) => { Some(ch) => {
let _ = ch.send(()); // ignore erros here let _ = ch.send(()); // ignore errors here
}, },
} }
} }

View File

@ -35,7 +35,7 @@ use crate::api2::types::{
/// Changer element status. /// Changer element status.
/// ///
/// Drive and slots may be `Empty`, or contain some media, either /// Drive and slots may be `Empty`, or contain some media, either
/// with knwon volume tag `VolumeTag(String)`, or without (`Full`). /// with known volume tag `VolumeTag(String)`, or without (`Full`).
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub enum ElementStatus { pub enum ElementStatus {
Empty, Empty,
@ -87,7 +87,7 @@ pub struct MtxStatus {
pub drives: Vec<DriveStatus>, pub drives: Vec<DriveStatus>,
/// List of known storage slots /// List of known storage slots
pub slots: Vec<StorageElementStatus>, pub slots: Vec<StorageElementStatus>,
/// Tranport elements /// Transport elements
/// ///
/// Note: Some libraries do not report transport elements. /// Note: Some libraries do not report transport elements.
pub transports: Vec<TransportElementStatus>, pub transports: Vec<TransportElementStatus>,
@ -261,7 +261,7 @@ pub trait MediaChange {
/// List online media labels (label_text/barcodes) /// List online media labels (label_text/barcodes)
/// ///
/// List acessible (online) label texts. This does not include /// List accessible (online) label texts. This does not include
/// media inside import-export slots or cleaning media. /// media inside import-export slots or cleaning media.
fn online_media_label_texts(&mut self) -> Result<Vec<String>, Error> { fn online_media_label_texts(&mut self) -> Result<Vec<String>, Error> {
let status = self.status()?; let status = self.status()?;
@ -378,7 +378,7 @@ pub trait MediaChange {
/// Unload media to a free storage slot /// Unload media to a free storage slot
/// ///
/// If posible to the slot it was previously loaded from. /// If possible to the slot it was previously loaded from.
/// ///
/// Note: This method consumes status - so please use returned status afterward. /// Note: This method consumes status - so please use returned status afterward.
fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result<MtxStatus, Error> { fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result<MtxStatus, Error> {

View File

@ -1,4 +1,4 @@
//! Wrapper around expernal `mtx` command line tool //! Wrapper around external `mtx` command line tool
mod parse_mtx_status; mod parse_mtx_status;
pub use parse_mtx_status::*; pub use parse_mtx_status::*;

View File

@ -246,7 +246,7 @@ pub fn unload(
Ok(()) Ok(())
} }
/// Tranfer medium from one storage slot to another /// Transfer medium from one storage slot to another
pub fn transfer_medium<F: AsRawFd>( pub fn transfer_medium<F: AsRawFd>(
file: &mut F, file: &mut F,
from_slot: u64, from_slot: u64,
@ -362,7 +362,7 @@ pub fn read_element_status<F: AsRawFd>(file: &mut F) -> Result<MtxStatus, Error>
bail!("got wrong number of import/export elements"); bail!("got wrong number of import/export elements");
} }
if (setup.transfer_element_count as usize) != drives.len() { if (setup.transfer_element_count as usize) != drives.len() {
bail!("got wrong number of tranfer elements"); bail!("got wrong number of transfer elements");
} }
// create same virtual slot order as mtx(1) // create same virtual slot order as mtx(1)
@ -428,7 +428,7 @@ struct SubHeader {
element_type_code: u8, element_type_code: u8,
flags: u8, flags: u8,
descriptor_length: u16, descriptor_length: u16,
reseved: u8, reserved: u8,
byte_count_of_descriptor_data_available: [u8;3], byte_count_of_descriptor_data_available: [u8;3],
} }

View File

@ -196,7 +196,7 @@ struct SspDataEncryptionCapabilityPage {
page_code: u16, page_code: u16,
page_len: u16, page_len: u16,
extdecc_cfgp_byte: u8, extdecc_cfgp_byte: u8,
reserverd: [u8; 15], reserved: [u8; 15],
} }
#[derive(Endian)] #[derive(Endian)]
@ -241,13 +241,13 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result<u8, Error> {
let desc: SspDataEncryptionAlgorithmDescriptor = let desc: SspDataEncryptionAlgorithmDescriptor =
unsafe { reader.read_be_value()? }; unsafe { reader.read_be_value()? };
if desc.descriptor_len != 0x14 { if desc.descriptor_len != 0x14 {
bail!("got wrong key descriptior len"); bail!("got wrong key descriptor len");
} }
if (desc.control_byte_4 & 0b00000011) != 2 { if (desc.control_byte_4 & 0b00000011) != 2 {
continue; // cant encrypt in hardware continue; // can't encrypt in hardware
} }
if ((desc.control_byte_4 & 0b00001100) >> 2) != 2 { if ((desc.control_byte_4 & 0b00001100) >> 2) != 2 {
continue; // cant decrypt in hardware continue; // can't decrypt in hardware
} }
if desc.algorithm_code == 0x00010014 && desc.key_size == 32 { if desc.algorithm_code == 0x00010014 && desc.key_size == 32 {
aes_cgm_index = Some(desc.algorythm_index); aes_cgm_index = Some(desc.algorythm_index);
@ -276,7 +276,7 @@ struct SspDataEncryptionStatusPage {
control_byte: u8, control_byte: u8,
key_format: u8, key_format: u8,
key_len: u16, key_len: u16,
reserverd: [u8; 8], reserved: [u8; 8],
} }
fn decode_spin_data_encryption_status(data: &[u8]) -> Result<DataEncryptionStatus, Error> { fn decode_spin_data_encryption_status(data: &[u8]) -> Result<DataEncryptionStatus, Error> {

View File

@ -72,14 +72,14 @@ static MAM_ATTRIBUTES: &[ (u16, u16, MamFormat, &str) ] = &[
(0x08_02, 8, MamFormat::ASCII, "Application Version"), (0x08_02, 8, MamFormat::ASCII, "Application Version"),
(0x08_03, 160, MamFormat::ASCII, "User Medium Text Label"), (0x08_03, 160, MamFormat::ASCII, "User Medium Text Label"),
(0x08_04, 12, MamFormat::ASCII, "Date And Time Last Written"), (0x08_04, 12, MamFormat::ASCII, "Date And Time Last Written"),
(0x08_05, 1, MamFormat::BINARY, "Text Localization Identifer"), (0x08_05, 1, MamFormat::BINARY, "Text Localization Identifier"),
(0x08_06, 32, MamFormat::ASCII, "Barcode"), (0x08_06, 32, MamFormat::ASCII, "Barcode"),
(0x08_07, 80, MamFormat::ASCII, "Owning Host Textual Name"), (0x08_07, 80, MamFormat::ASCII, "Owning Host Textual Name"),
(0x08_08, 160, MamFormat::ASCII, "Media Pool"), (0x08_08, 160, MamFormat::ASCII, "Media Pool"),
(0x08_0B, 16, MamFormat::ASCII, "Application Format Version"), (0x08_0B, 16, MamFormat::ASCII, "Application Format Version"),
(0x08_0C, 50, MamFormat::ASCII, "Volume Coherency Information"), (0x08_0C, 50, MamFormat::ASCII, "Volume Coherency Information"),
(0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifer"), (0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifier"),
(0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifer"), (0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifier"),
(0x10_00, 28, MamFormat::BINARY, "Unique Cartridge Identify (UCI)"), (0x10_00, 28, MamFormat::BINARY, "Unique Cartridge Identify (UCI)"),
(0x10_01, 24, MamFormat::BINARY, "Alternate Unique Cartridge Identify (Alt-UCI)"), (0x10_01, 24, MamFormat::BINARY, "Alternate Unique Cartridge Identify (Alt-UCI)"),

View File

@ -209,7 +209,7 @@ pub trait TapeDriver {
/// Set or clear encryption key /// Set or clear encryption key
/// ///
/// We use the media_set_uuid to XOR the secret key with the /// We use the media_set_uuid to XOR the secret key with the
/// uuid (first 16 bytes), so that each media set uses an uique /// uuid (first 16 bytes), so that each media set uses an unique
/// key for encryption. /// key for encryption.
fn set_encryption( fn set_encryption(
&mut self, &mut self,
@ -465,7 +465,7 @@ pub fn request_and_load_media(
} }
} }
/// Aquires an exclusive lock for the tape device /// Acquires an exclusive lock for the tape device
/// ///
/// Basically calls lock_device_path() using the configured drive path. /// Basically calls lock_device_path() using the configured drive path.
pub fn lock_tape_device( pub fn lock_tape_device(
@ -539,7 +539,7 @@ fn tape_device_path(
pub struct DeviceLockGuard(std::fs::File); pub struct DeviceLockGuard(std::fs::File);
// Aquires an exclusive lock on `device_path` // Acquires an exclusive lock on `device_path`
// //
// Uses systemd escape_unit to compute a file name from `device_path`, the try // Uses systemd escape_unit to compute a file name from `device_path`, the try
// to lock `/var/lock/<name>`. // to lock `/var/lock/<name>`.

View File

@ -429,7 +429,7 @@ impl MediaChange for VirtualTapeHandle {
} }
fn transfer_media(&mut self, _from: u64, _to: u64) -> Result<MtxStatus, Error> { fn transfer_media(&mut self, _from: u64, _to: u64) -> Result<MtxStatus, Error> {
bail!("media tranfer is not implemented!"); bail!("media transfer is not implemented!");
} }
fn export_media(&mut self, _label_text: &str) -> Result<Option<u64>, Error> { fn export_media(&mut self, _label_text: &str) -> Result<Option<u64>, Error> {

View File

@ -77,7 +77,7 @@ impl <R: Read> BlockedReader<R> {
if seq_nr != buffer.seq_nr() { if seq_nr != buffer.seq_nr() {
proxmox::io_bail!( proxmox::io_bail!(
"detected tape block with wrong seqence number ({} != {})", "detected tape block with wrong sequence number ({} != {})",
seq_nr, buffer.seq_nr()) seq_nr, buffer.seq_nr())
} }

View File

@ -25,7 +25,7 @@ use crate::tape::{
/// ///
/// A chunk archive consists of a `MediaContentHeader` followed by a /// A chunk archive consists of a `MediaContentHeader` followed by a
/// list of chunks entries. Each chunk entry consists of a /// list of chunks entries. Each chunk entry consists of a
/// `ChunkArchiveEntryHeader` folowed by the chunk data (`DataBlob`). /// `ChunkArchiveEntryHeader` followed by the chunk data (`DataBlob`).
/// ///
/// `| MediaContentHeader | ( ChunkArchiveEntryHeader | DataBlob )* |` /// `| MediaContentHeader | ( ChunkArchiveEntryHeader | DataBlob )* |`
pub struct ChunkArchiveWriter<'a> { pub struct ChunkArchiveWriter<'a> {
@ -153,7 +153,7 @@ impl <R: Read> ChunkArchiveDecoder<R> {
Self { reader } Self { reader }
} }
/// Allow access to the underyling reader /// Allow access to the underlying reader
pub fn reader(&self) -> &R { pub fn reader(&self) -> &R {
&self.reader &self.reader
} }

View File

@ -21,7 +21,7 @@ use crate::tape::{
/// ///
/// This ignores file attributes like ACLs and xattrs. /// This ignores file attributes like ACLs and xattrs.
/// ///
/// Returns `Ok(Some(content_uuid))` on succees, and `Ok(None)` if /// Returns `Ok(Some(content_uuid))` on success, and `Ok(None)` if
/// `LEOM` was detected before all data was written. The stream is /// `LEOM` was detected before all data was written. The stream is
/// marked inclomplete in that case and does not contain all data (The /// marked inclomplete in that case and does not contain all data (The
/// backup task must rewrite the whole file on the next media). /// backup task must rewrite the whole file on the next media).

View File

@ -85,7 +85,7 @@ impl SnapshotReader {
Ok(file) Ok(file)
} }
/// Retunrs an iterator for all used chunks. /// Returns an iterator for all used chunks.
pub fn chunk_iterator(&self) -> Result<SnapshotChunkIterator, Error> { pub fn chunk_iterator(&self) -> Result<SnapshotChunkIterator, Error> {
SnapshotChunkIterator::new(&self) SnapshotChunkIterator::new(&self)
} }

View File

@ -561,7 +561,7 @@ impl Inventory {
// Helpers to simplify testing // Helpers to simplify testing
/// Genreate and insert a new free tape (test helper) /// Generate and insert a new free tape (test helper)
pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid { pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid {
let label = MediaLabel { let label = MediaLabel {
@ -576,7 +576,7 @@ impl Inventory {
uuid uuid
} }
/// Genreate and insert a new tape assigned to a specific pool /// Generate and insert a new tape assigned to a specific pool
/// (test helper) /// (test helper)
pub fn generate_assigned_tape( pub fn generate_assigned_tape(
&mut self, &mut self,
@ -600,7 +600,7 @@ impl Inventory {
uuid uuid
} }
/// Genreate and insert a used tape (test helper) /// Generate and insert a used tape (test helper)
pub fn generate_used_tape( pub fn generate_used_tape(
&mut self, &mut self,
label_text: &str, label_text: &str,

View File

@ -3,7 +3,7 @@
//! A set of backup medias. //! A set of backup medias.
//! //!
//! This struct manages backup media state during backup. The main //! This struct manages backup media state during backup. The main
//! purpose is to allocate media sets and assing new tapes to it. //! purpose is to allocate media sets and assign new tapes to it.
//! //!
//! //!
@ -137,7 +137,7 @@ impl MediaPool {
&self.name &self.name
} }
/// Retruns encryption settings /// Returns encryption settings
pub fn encrypt_fingerprint(&self) -> Option<Fingerprint> { pub fn encrypt_fingerprint(&self) -> Option<Fingerprint> {
self.encrypt_fingerprint.clone() self.encrypt_fingerprint.clone()
} }
@ -286,7 +286,7 @@ impl MediaPool {
Ok(list) Ok(list)
} }
// tests if the media data is considered as expired at sepcified time // tests if the media data is considered as expired at specified time
pub fn media_is_expired(&self, media: &BackupMedia, current_time: i64) -> bool { pub fn media_is_expired(&self, media: &BackupMedia, current_time: i64) -> bool {
if media.status() != &MediaStatus::Full { if media.status() != &MediaStatus::Full {
return false; return false;

View File

@ -48,7 +48,7 @@ impl MediaSet {
let seq_nr = seq_nr as usize; let seq_nr = seq_nr as usize;
if self.media_list.len() > seq_nr { if self.media_list.len() > seq_nr {
if self.media_list[seq_nr].is_some() { if self.media_list[seq_nr].is_some() {
bail!("found duplicate squence number in media set '{}/{}'", bail!("found duplicate sequence number in media set '{}/{}'",
self.uuid.to_string(), seq_nr); self.uuid.to_string(), seq_nr);
} }
} else { } else {

View File

@ -271,7 +271,7 @@ impl PoolWriter {
} }
} }
/// Move to EOM (if not aleady there), then creates a new snapshot /// Move to EOM (if not already there), then creates a new snapshot
/// archive writing specified files (as .pxar) into it. On /// archive writing specified files (as .pxar) into it. On
/// success, this return 'Ok(true)' and the media catalog gets /// success, this return 'Ok(true)' and the media catalog gets
/// updated. /// updated.
@ -330,7 +330,7 @@ impl PoolWriter {
Ok((done, bytes_written)) Ok((done, bytes_written))
} }
/// Move to EOM (if not aleady there), then creates a new chunk /// Move to EOM (if not already there), then creates a new chunk
/// archive and writes chunks from 'chunk_iter'. This stops when /// archive and writes chunks from 'chunk_iter'. This stops when
/// it detect LEOM or when we reach max archive size /// it detect LEOM or when we reach max archive size
/// (4GB). Written chunks are registered in the media catalog. /// (4GB). Written chunks are registered in the media catalog.

View File

@ -67,7 +67,7 @@ pub trait TapeWrite {
/// ///
/// See: https://github.com/torvalds/linux/blob/master/Documentation/scsi/st.rst /// See: https://github.com/torvalds/linux/blob/master/Documentation/scsi/st.rst
/// ///
/// On sucess, this returns if we en countered a EOM condition. /// On success, this returns if we en countered a EOM condition.
pub fn tape_device_write_block<W: Write>( pub fn tape_device_write_block<W: Write>(
writer: &mut W, writer: &mut W,
data: &[u8], data: &[u8],

View File

@ -173,7 +173,7 @@ fn test_alloc_writable_media_4() -> Result<(), Error> {
// next call fail because there is no free media // next call fail because there is no free media
assert!(pool.alloc_writable_media(start_time + 5).is_err()); assert!(pool.alloc_writable_media(start_time + 5).is_err());
// Create new nedia set, so that previous set can expire // Create new media set, so that previous set can expire
pool.start_write_session(start_time + 10)?; pool.start_write_session(start_time + 10)?;
assert!(pool.alloc_writable_media(start_time + 10).is_err()); assert!(pool.alloc_writable_media(start_time + 10).is_err());

View File

@ -302,7 +302,7 @@ impl<K, V> LinkedList<K, V> {
} }
} }
/// Remove the node referenced by `node_ptr` from the linke list and return it. /// Remove the node referenced by `node_ptr` from the linked list and return it.
fn remove(&mut self, node_ptr: *mut CacheNode<K, V>) -> Box<CacheNode<K, V>> { fn remove(&mut self, node_ptr: *mut CacheNode<K, V>) -> Box<CacheNode<K, V>> {
let node = unsafe { Box::from_raw(node_ptr) }; let node = unsafe { Box::from_raw(node_ptr) };

View File

@ -138,10 +138,10 @@ impl<I: Send + 'static> ParallelHandler<I> {
if let Err(panic) = handle.join() { if let Err(panic) = handle.join() {
match panic.downcast::<&str>() { match panic.downcast::<&str>() {
Ok(panic_msg) => msg_list.push( Ok(panic_msg) => msg_list.push(
format!("thread {} ({}) paniced: {}", self.name, i, panic_msg) format!("thread {} ({}) panicked: {}", self.name, i, panic_msg)
), ),
Err(_) => msg_list.push( Err(_) => msg_list.push(
format!("thread {} ({}) paniced", self.name, i) format!("thread {} ({}) panicked", self.name, i)
), ),
} }
} }

View File

@ -4,7 +4,7 @@
//! //!
//! See: `/usr/include/scsi/sg_pt.h` //! See: `/usr/include/scsi/sg_pt.h`
//! //!
//! The SCSI Commands Reference Manual also contains some usefull information. //! The SCSI Commands Reference Manual also contains some useful information.
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::ptr::NonNull; use std::ptr::NonNull;

View File

@ -210,7 +210,7 @@ fn test_parse_register_response() -> Result<(), Error> {
Ok(()) Ok(())
} }
/// querys the up to date subscription status and parses the response /// queries the up to date subscription status and parses the response
pub fn check_subscription(key: String, server_id: String) -> Result<SubscriptionInfo, Error> { pub fn check_subscription(key: String, server_id: String) -> Result<SubscriptionInfo, Error> {
let now = proxmox::tools::time::epoch_i64(); let now = proxmox::tools::time::epoch_i64();
@ -299,7 +299,7 @@ pub fn delete_subscription() -> Result<(), Error> {
Ok(()) Ok(())
} }
/// updates apt authenification for repo access /// updates apt authentication for repo access
pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<(), Error> { pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<(), Error> {
let auth_conf = std::path::Path::new(APT_AUTH_FN); let auth_conf = std::path::Path::new(APT_AUTH_FN);
match (key, password) { match (key, password) {