clippy: is_some/none/ok/err/empty
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
parent
397356096a
commit
3984a5fd77
|
@ -72,7 +72,7 @@ fn extract_acl_node_data(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (group, roles) in &node.groups {
|
for (group, roles) in &node.groups {
|
||||||
if let Some(_) = token_user {
|
if token_user.is_some() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,7 +210,7 @@ pub fn update_acl(
|
||||||
|
|
||||||
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
||||||
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
||||||
if let Some(_) = group {
|
if group.is_some() {
|
||||||
bail!("Unprivileged users are not allowed to create group ACL item.");
|
bail!("Unprivileged users are not allowed to create group ACL item.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -230,7 +230,7 @@ pub fn create_user(
|
||||||
|
|
||||||
let (mut config, _digest) = user::config()?;
|
let (mut config, _digest) = user::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
if config.sections.get(user.userid.as_str()).is_some() {
|
||||||
bail!("user '{}' already exists.", user.userid);
|
bail!("user '{}' already exists.", user.userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -595,7 +595,7 @@ pub fn generate_token(
|
||||||
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||||
let tokenid_string = tokenid.to_string();
|
let tokenid_string = tokenid.to_string();
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&tokenid_string) {
|
if config.sections.get(&tokenid_string).is_some() {
|
||||||
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -711,7 +711,7 @@ pub fn verify(
|
||||||
|
|
||||||
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
|
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
|
||||||
};
|
};
|
||||||
if failed_dirs.len() > 0 {
|
if !failed_dirs.is_empty() {
|
||||||
worker.log("Failed to verify the following snapshots/groups:");
|
worker.log("Failed to verify the following snapshots/groups:");
|
||||||
for dir in failed_dirs {
|
for dir in failed_dirs {
|
||||||
worker.log(format!("\t{}", dir));
|
worker.log(format!("\t{}", dir));
|
||||||
|
@ -1341,7 +1341,7 @@ fn catalog(
|
||||||
|
|
||||||
if filepath != "root" {
|
if filepath != "root" {
|
||||||
components = base64::decode(filepath)?;
|
components = base64::decode(filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if !components.is_empty() && components[0] == b'/' {
|
||||||
components.remove(0);
|
components.remove(0);
|
||||||
}
|
}
|
||||||
for component in components.split(|c| *c == '/' as u8) {
|
for component in components.split(|c| *c == '/' as u8) {
|
||||||
|
@ -1487,7 +1487,7 @@ fn pxar_file_download(
|
||||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||||
|
|
||||||
let mut components = base64::decode(&filepath)?;
|
let mut components = base64::decode(&filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if !components.is_empty() && components[0] == b'/' {
|
||||||
components.remove(0);
|
components.remove(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -465,7 +465,7 @@ impl BackupEnvironment {
|
||||||
state.ensure_unfinished()?;
|
state.ensure_unfinished()?;
|
||||||
|
|
||||||
// test if all writer are correctly closed
|
// test if all writer are correctly closed
|
||||||
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
|
if !state.dynamic_writers.is_empty() || !state.fixed_writers.is_empty() {
|
||||||
bail!("found open index writer - unable to finish backup");
|
bail!("found open index writer - unable to finish backup");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -124,7 +124,7 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let (mut config, _digest) = datastore::config()?;
|
let (mut config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&datastore.name) {
|
if config.sections.get(&datastore.name).is_some() {
|
||||||
bail!("datastore '{}' already exists.", datastore.name);
|
bail!("datastore '{}' already exists.", datastore.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let (mut config, _digest) = remote::config()?;
|
let (mut config, _digest) = remote::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&remote.name) {
|
if config.sections.get(&remote.name).is_some() {
|
||||||
bail!("remote '{}' already exists.", remote.name);
|
bail!("remote '{}' already exists.", remote.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -161,7 +161,7 @@ pub fn create_sync_job(
|
||||||
|
|
||||||
let (mut config, _digest) = sync::config()?;
|
let (mut config, _digest) = sync::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&sync_job.id) {
|
if config.sections.get(&sync_job.id).is_some() {
|
||||||
bail!("job '{}' already exists.", sync_job.id);
|
bail!("job '{}' already exists.", sync_job.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ pub fn create_verification_job(
|
||||||
|
|
||||||
let (mut config, _digest) = verify::config()?;
|
let (mut config, _digest) = verify::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&verification_job.id) {
|
if config.sections.get(&verification_job.id).is_some() {
|
||||||
bail!("job '{}' already exists.", verification_job.id);
|
bail!("job '{}' already exists.", verification_job.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -196,7 +196,7 @@ fn apt_get_changelog(
|
||||||
}
|
}
|
||||||
}, Some(&name));
|
}, Some(&name));
|
||||||
|
|
||||||
if pkg_info.len() == 0 {
|
if pkg_info.is_empty() {
|
||||||
bail!("Package '{}' not found", name);
|
bail!("Package '{}' not found", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -513,7 +513,7 @@ pub fn list_tasks(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut count = result.len() + start as usize;
|
let mut count = result.len() + start as usize;
|
||||||
if result.len() > 0 && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -747,11 +747,9 @@ pub fn update_inventory(
|
||||||
|
|
||||||
let label_text = label_text.to_string();
|
let label_text = label_text.to_string();
|
||||||
|
|
||||||
if !read_all_labels.unwrap_or(false) {
|
if !read_all_labels.unwrap_or(false) && inventory.find_media_by_label_text(&label_text).is_some() {
|
||||||
if let Some(_) = inventory.find_media_by_label_text(&label_text) {
|
worker.log(format!("media '{}' already inventoried", label_text));
|
||||||
worker.log(format!("media '{}' already inventoried", label_text));
|
continue;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = changer.load_media(&label_text) {
|
if let Err(err) = changer.load_media(&label_text) {
|
||||||
|
|
|
@ -1077,7 +1077,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||||
];
|
];
|
||||||
|
|
||||||
for fingerprint in invalid_fingerprints.iter() {
|
for fingerprint in invalid_fingerprints.iter() {
|
||||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
if parse_simple_value(fingerprint, &schema).is_ok() {
|
||||||
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1118,7 +1118,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||||
];
|
];
|
||||||
|
|
||||||
for name in invalid_user_ids.iter() {
|
for name in invalid_user_ids.iter() {
|
||||||
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
|
if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
|
||||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -401,7 +401,7 @@ impl ChunkStore {
|
||||||
file.write_all(raw_data)?;
|
file.write_all(raw_data)?;
|
||||||
|
|
||||||
if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
|
if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
|
||||||
if let Err(_) = std::fs::remove_file(&tmp_path) { /* ignore */ }
|
if std::fs::remove_file(&tmp_path).is_err() { /* ignore */ }
|
||||||
bail!(
|
bail!(
|
||||||
"Atomic rename on store '{}' failed for chunk {} - {}",
|
"Atomic rename on store '{}' failed for chunk {} - {}",
|
||||||
self.name,
|
self.name,
|
||||||
|
|
|
@ -59,7 +59,7 @@ where
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
this.scan_pos = 0;
|
this.scan_pos = 0;
|
||||||
if this.buffer.len() > 0 {
|
if !this.buffer.is_empty() {
|
||||||
return Poll::Ready(Some(Ok(this.buffer.split())));
|
return Poll::Ready(Some(Ok(this.buffer.split())));
|
||||||
} else {
|
} else {
|
||||||
return Poll::Ready(None);
|
return Poll::Ready(None);
|
||||||
|
@ -111,7 +111,7 @@ where
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// last chunk can have any size
|
// last chunk can have any size
|
||||||
if this.buffer.len() > 0 {
|
if !this.buffer.is_empty() {
|
||||||
return Poll::Ready(Some(Ok(this.buffer.split())));
|
return Poll::Ready(Some(Ok(this.buffer.split())));
|
||||||
} else {
|
} else {
|
||||||
return Poll::Ready(None);
|
return Poll::Ready(None);
|
||||||
|
|
|
@ -36,7 +36,7 @@ impl <R: BufRead> CryptReader<R> {
|
||||||
impl <R: BufRead> Read for CryptReader<R> {
|
impl <R: BufRead> Read for CryptReader<R> {
|
||||||
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||||
if self.small_read_buf.len() > 0 {
|
if !self.small_read_buf.is_empty() {
|
||||||
let max = if self.small_read_buf.len() > buf.len() { buf.len() } else { self.small_read_buf.len() };
|
let max = if self.small_read_buf.len() > buf.len() { buf.len() } else { self.small_read_buf.len() };
|
||||||
let rest = self.small_read_buf.split_off(max);
|
let rest = self.small_read_buf.split_off(max);
|
||||||
buf[..max].copy_from_slice(&self.small_read_buf);
|
buf[..max].copy_from_slice(&self.small_read_buf);
|
||||||
|
@ -50,7 +50,7 @@ impl <R: BufRead> Read for CryptReader<R> {
|
||||||
if buf.len() <= 2*self.block_size {
|
if buf.len() <= 2*self.block_size {
|
||||||
let mut outbuf = [0u8; 1024];
|
let mut outbuf = [0u8; 1024];
|
||||||
|
|
||||||
let count = if data.len() == 0 { // EOF
|
let count = if data.is_empty() { // EOF
|
||||||
let written = self.crypter.finalize(&mut outbuf)?;
|
let written = self.crypter.finalize(&mut outbuf)?;
|
||||||
self.finalized = true;
|
self.finalized = true;
|
||||||
written
|
written
|
||||||
|
@ -72,7 +72,7 @@ impl <R: BufRead> Read for CryptReader<R> {
|
||||||
buf[..count].copy_from_slice(&outbuf[..count]);
|
buf[..count].copy_from_slice(&outbuf[..count]);
|
||||||
Ok(count)
|
Ok(count)
|
||||||
}
|
}
|
||||||
} else if data.len() == 0 { // EOF
|
} else if data.is_empty() { // EOF
|
||||||
let rest = self.crypter.finalize(buf)?;
|
let rest = self.crypter.finalize(buf)?;
|
||||||
self.finalized = true;
|
self.finalized = true;
|
||||||
Ok(rest)
|
Ok(rest)
|
||||||
|
|
|
@ -26,7 +26,7 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||||
|
|
||||||
for info in list {
|
for info in list {
|
||||||
let backup_id = info.backup_dir.relative_path();
|
let backup_id = info.backup_dir.relative_path();
|
||||||
if let Some(_) = mark.get(&backup_id) { continue; }
|
if mark.get(&backup_id).is_some() { continue; }
|
||||||
let sel_id: String = select_id(&info)?;
|
let sel_id: String = select_id(&info)?;
|
||||||
|
|
||||||
if already_included.contains(&sel_id) { continue; }
|
if already_included.contains(&sel_id) { continue; }
|
||||||
|
|
|
@ -218,10 +218,8 @@ fn accept_connections(
|
||||||
|
|
||||||
match result {
|
match result {
|
||||||
Ok(Ok(())) => {
|
Ok(Ok(())) => {
|
||||||
if let Err(_) = sender.send(Ok(stream)).await {
|
if sender.send(Ok(stream)).await.is_err() && debug {
|
||||||
if debug {
|
eprintln!("detect closed connection channel");
|
||||||
eprintln!("detect closed connection channel");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Err(err)) => {
|
Ok(Err(err)) => {
|
||||||
|
|
|
@ -189,12 +189,12 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||||
};
|
};
|
||||||
|
|
||||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
if let None = target {
|
if target.is_none() {
|
||||||
bail!("use the 'mount' command to mount pxar archives");
|
bail!("use the 'mount' command to mount pxar archives");
|
||||||
}
|
}
|
||||||
format!("{}.didx", archive_name)
|
format!("{}.didx", archive_name)
|
||||||
} else if archive_name.ends_with(".img") {
|
} else if archive_name.ends_with(".img") {
|
||||||
if let Some(_) = target {
|
if target.is_some() {
|
||||||
bail!("use the 'map' command to map drive images");
|
bail!("use the 'map' command to map drive images");
|
||||||
}
|
}
|
||||||
format!("{}.fidx", archive_name)
|
format!("{}.fidx", archive_name)
|
||||||
|
|
|
@ -219,7 +219,7 @@ pub fn insert_key(key: [u8;32], key_config: KeyConfig, hint: String) -> Result<(
|
||||||
None => bail!("missing encryption key fingerprint - internal error"),
|
None => bail!("missing encryption key fingerprint - internal error"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(_) = config_map.get(&fingerprint) {
|
if config_map.get(&fingerprint).is_some() {
|
||||||
bail!("encryption key '{}' already exists.", fingerprint);
|
bail!("encryption key '{}' already exists.", fingerprint);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
|
||||||
return Ok(WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id));
|
return Ok(WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !procfs::check_process_running_pstart(upid.pid, upid.pstart).is_some() {
|
if procfs::check_process_running_pstart(upid.pid, upid.pstart).is_none() {
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
||||||
file.read_to_end(&mut data)?;
|
file.read_to_end(&mut data)?;
|
||||||
|
|
||||||
// task logs should end with newline, we do not want it here
|
// task logs should end with newline, we do not want it here
|
||||||
if data.len() > 0 && data[data.len()-1] == b'\n' {
|
if !data.is_empty() && data[data.len()-1] == b'\n' {
|
||||||
data.pop();
|
data.pop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,7 +270,7 @@ impl TaskState {
|
||||||
} else if let Some(warnings) = s.strip_prefix("WARNINGS: ") {
|
} else if let Some(warnings) = s.strip_prefix("WARNINGS: ") {
|
||||||
let count: u64 = warnings.parse()?;
|
let count: u64 = warnings.parse()?;
|
||||||
Ok(TaskState::Warning{ count, endtime })
|
Ok(TaskState::Warning{ count, endtime })
|
||||||
} else if s.len() > 0 {
|
} else if !s.is_empty() {
|
||||||
let message = if let Some(err) = s.strip_prefix("ERROR: ") { err } else { s }.to_string();
|
let message = if let Some(err) = s.strip_prefix("ERROR: ") { err } else { s }.to_string();
|
||||||
Ok(TaskState::Error{ message, endtime })
|
Ok(TaskState::Error{ message, endtime })
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -113,7 +113,7 @@ impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
||||||
abort_chan: Receiver<()>,
|
abort_chan: Receiver<()>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
if let None = self.session {
|
if self.session.is_none() {
|
||||||
panic!("internal error: fuse_loop::main called before ::map_loop");
|
panic!("internal error: fuse_loop::main called before ::map_loop");
|
||||||
}
|
}
|
||||||
let mut session = self.session.take().unwrap().fuse();
|
let mut session = self.session.take().unwrap().fuse();
|
||||||
|
@ -236,7 +236,7 @@ pub fn cleanup_unused_run_files(filter_name: Option<String>) {
|
||||||
|
|
||||||
// clean leftover FUSE instances (e.g. user called 'losetup -d' or similar)
|
// clean leftover FUSE instances (e.g. user called 'losetup -d' or similar)
|
||||||
// does nothing if files are already stagnant (e.g. instance crashed etc...)
|
// does nothing if files are already stagnant (e.g. instance crashed etc...)
|
||||||
if let Ok(_) = unmap_from_backing(&path, None) {
|
if unmap_from_backing(&path, None).is_ok() {
|
||||||
// we have reaped some leftover instance, tell the user
|
// we have reaped some leftover instance, tell the user
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Cleaned up dangling mapping '{}': no loop device assigned",
|
"Cleaned up dangling mapping '{}': no loop device assigned",
|
||||||
|
|
|
@ -13,7 +13,7 @@ fn verify_object_schema(schema: &ObjectSchema) -> Result<(), Error> {
|
||||||
|
|
||||||
let map = schema.properties;
|
let map = schema.properties;
|
||||||
|
|
||||||
if map.len() >= 1 {
|
if !map.is_empty() {
|
||||||
|
|
||||||
for i in 1..map.len() {
|
for i in 1..map.len() {
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ fn verify_dirmap(
|
||||||
dirmap: SubdirMap,
|
dirmap: SubdirMap,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
if dirmap.len() >= 1 {
|
if !dirmap.is_empty() {
|
||||||
|
|
||||||
for i in 1..dirmap.len() {
|
for i in 1..dirmap.len() {
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue