Merge branch 'master' of ssh://proxdev.maurer-it.com/rust/proxmox-backup

This commit is contained in:
Dietmar Maurer
2021-01-21 10:56:52 +01:00
122 changed files with 521 additions and 614 deletions

View File

@ -72,7 +72,7 @@ fn extract_acl_node_data(
}
}
for (group, roles) in &node.groups {
if let Some(_) = token_user {
if token_user.is_some() {
continue;
}
@ -210,7 +210,7 @@ pub fn update_acl(
let top_level_privs = user_info.lookup_privs(&current_auth_id, &["access", "acl"]);
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
if let Some(_) = group {
if group.is_some() {
bail!("Unprivileged users are not allowed to create group ACL item.");
}

View File

@ -46,7 +46,7 @@ fn list_roles() -> Result<Value, Error> {
let mut priv_list = Vec::new();
for (name, privilege) in PRIVILEGES.iter() {
if privs & privilege > 0 {
priv_list.push(name.clone());
priv_list.push(name);
}
}
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));

View File

@ -331,13 +331,11 @@ fn list_tfa(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<TfaUser>, Error> {
entries: to_data(data),
});
}
} else {
if let Some(data) = { tfa_data }.remove(authid.user()) {
out.push(TfaUser {
userid: authid.into(),
entries: to_data(data),
});
}
} else if let Some(data) = { tfa_data }.remove(authid.user()) {
out.push(TfaUser {
userid: authid.into(),
entries: to_data(data),
});
}
Ok(out)

View File

@ -169,7 +169,7 @@ pub fn list_users(
})
.collect()
} else {
iter.map(|user: user::User| UserWithTokens::new(user))
iter.map(UserWithTokens::new)
.collect()
};
@ -230,7 +230,7 @@ pub fn create_user(
let (mut config, _digest) = user::config()?;
if let Some(_) = config.sections.get(user.userid.as_str()) {
if config.sections.get(user.userid.as_str()).is_some() {
bail!("user '{}' already exists.", user.userid);
}
@ -595,7 +595,7 @@ pub fn generate_token(
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
let tokenid_string = tokenid.to_string();
if let Some(_) = config.sections.get(&tokenid_string) {
if config.sections.get(&tokenid_string).is_some() {
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
}
@ -603,7 +603,7 @@ pub fn generate_token(
token_shadow::set_secret(&tokenid, &secret)?;
let token = user::ApiToken {
tokenid: tokenid.clone(),
tokenid,
comment,
enable,
expire,

View File

@ -440,8 +440,8 @@ pub fn list_snapshots (
let files = info
.files
.into_iter()
.map(|x| BackupContent {
filename: x.to_string(),
.map(|filename| BackupContent {
filename,
size: None,
crypt_mode: None,
})
@ -662,11 +662,11 @@ pub fn verify(
_ => bail!("parameters do not specify a backup group or snapshot"),
}
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
worker_type,
Some(worker_id.clone()),
Some(worker_id),
auth_id.clone(),
to_stdout,
move |worker| {
@ -711,7 +711,7 @@ pub fn verify(
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
};
if failed_dirs.len() > 0 {
if !failed_dirs.is_empty() {
worker.log("Failed to verify the following snapshots/groups:");
for dir in failed_dirs {
worker.log(format!("\t{}", dir));
@ -855,7 +855,7 @@ fn prune(
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
if keep_all {
worker.log("No prune selection - keeping all files.");
@ -935,7 +935,7 @@ fn start_garbage_collection(
let job = Job::new("garbage_collection", &store)
.map_err(|_| format_err!("garbage collection already running"))?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
@ -1009,7 +1009,7 @@ fn get_datastore_list(
}
}
Ok(list.into())
Ok(list)
}
#[sortable]
@ -1066,7 +1066,7 @@ fn download_file(
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
.map_ok(|bytes| bytes.freeze())
.map_err(move |err| {
eprintln!("error during streaming of '{:?}' - {}", &path, err);
err
@ -1341,10 +1341,10 @@ fn catalog(
if filepath != "root" {
components = base64::decode(filepath)?;
if components.len() > 0 && components[0] == '/' as u8 {
if !components.is_empty() && components[0] == b'/' {
components.remove(0);
}
for component in components.split(|c| *c == '/' as u8) {
for component in components.split(|c| *c == b'/') {
if let Some(entry) = catalog_reader.lookup(&current, component)? {
current = entry;
} else {
@ -1357,7 +1357,7 @@ fn catalog(
for direntry in catalog_reader.read_dir(&current)? {
let mut components = components.clone();
components.push('/' as u8);
components.push(b'/');
components.extend(&direntry.name);
let path = base64::encode(components);
let text = String::from_utf8_lossy(&direntry.name);
@ -1487,13 +1487,13 @@ fn pxar_file_download(
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
let mut components = base64::decode(&filepath)?;
if components.len() > 0 && components[0] == '/' as u8 {
if !components.is_empty() && components[0] == b'/' {
components.remove(0);
}
let mut split = components.splitn(2, |c| *c == '/' as u8);
let mut split = components.splitn(2, |c| *c == b'/');
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
@ -1520,7 +1520,7 @@ fn pxar_file_download(
let root = decoder.open_root().await?;
let file = root
.lookup(OsStr::from_bytes(file_path)).await?
.ok_or(format_err!("error opening '{:?}'", file_path))?;
.ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
let body = match file.kind() {
EntryKind::File { .. } => Body::wrap_stream(

View File

@ -58,7 +58,7 @@ pub fn list_sync_jobs(
}
})
.filter(|job: &SyncJobStatus| {
let as_config: SyncJobConfig = job.clone().into();
let as_config: SyncJobConfig = job.into();
check_sync_job_read_access(&user_info, &auth_id, &as_config)
}).collect();
@ -81,13 +81,13 @@ pub fn list_sync_jobs(
job.last_run_state = state;
job.last_run_endtime = endtime;
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
let last = job.last_run_endtime.unwrap_or(starttime);
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
// ignore errors
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
compute_next_event(&event, last, false).unwrap_or(None)
})();
}

View File

@ -86,13 +86,13 @@ pub fn list_verification_jobs(
job.last_run_state = state;
job.last_run_endtime = endtime;
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
let last = job.last_run_endtime.unwrap_or(starttime);
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
// ignore errors
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
compute_next_event(&event, last, false).unwrap_or(None)
})();
}

View File

@ -138,7 +138,7 @@ async move {
}
};
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {

View File

@ -465,7 +465,7 @@ impl BackupEnvironment {
state.ensure_unfinished()?;
// test if all writer are correctly closed
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
if !state.dynamic_writers.is_empty() || !state.fixed_writers.is_empty() {
bail!("found open index writer - unable to finish backup");
}

View File

@ -120,11 +120,11 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
let (mut config, _digest) = datastore::config()?;
if let Some(_) = config.sections.get(&datastore.name) {
if config.sections.get(&datastore.name).is_some() {
bail!("datastore '{}' already exists.", datastore.name);
}

View File

@ -96,13 +96,13 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let mut data = param.clone();
let mut data = param;
data["password"] = Value::from(base64::encode(password.as_bytes()));
let remote: remote::Remote = serde_json::from_value(data)?;
let (mut config, _digest) = remote::config()?;
if let Some(_) = config.sections.get(&remote.name) {
if config.sections.get(&remote.name).is_some() {
bail!("remote '{}' already exists.", remote.name);
}

View File

@ -154,14 +154,14 @@ pub fn create_sync_job(
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?;
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
bail!("permission check failed");
}
let (mut config, _digest) = sync::config()?;
if let Some(_) = config.sections.get(&sync_job.id) {
if config.sections.get(&sync_job.id).is_some() {
bail!("job '{}' already exists.", sync_job.id);
}
@ -514,7 +514,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
// unless they have Datastore.Modify as well
job.store = "localstore3".to_string();
job.owner = Some(read_auth_id.clone());
job.owner = Some(read_auth_id);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);

View File

@ -98,7 +98,7 @@ pub fn create_verification_job(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param)?;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;
@ -106,7 +106,7 @@ pub fn create_verification_job(
let (mut config, _digest) = verify::config()?;
if let Some(_) = config.sections.get(&verification_job.id) {
if config.sections.get(&verification_job.id).is_some() {
bail!("job '{}' already exists.", verification_job.id);
}

View File

@ -16,7 +16,7 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
};
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
.map_ok(|bytes| bytes.freeze());
let body = Body::wrap_stream(payload);

View File

@ -121,7 +121,7 @@ async fn termproxy(
)?;
let mut command = Vec::new();
match cmd.as_ref().map(|x| x.as_str()) {
match cmd.as_deref() {
Some("login") | None => {
command.push("login");
if userid == "root@pam" {

View File

@ -35,18 +35,15 @@ use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
/// List available APT updates
fn apt_update_available(_param: Value) -> Result<Value, Error> {
match apt::pkg_cache_expired() {
Ok(false) => {
if let Ok(Some(cache)) = apt::read_pkg_state() {
return Ok(json!(cache.package_status));
}
},
_ => (),
if let Ok(false) = apt::pkg_cache_expired() {
if let Ok(Some(cache)) = apt::read_pkg_state() {
return Ok(json!(cache.package_status));
}
}
let cache = apt::update_cache()?;
return Ok(json!(cache.package_status));
Ok(json!(cache.package_status))
}
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
@ -90,8 +87,8 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
type: bool,
description: r#"Send notification mail about new package updates availanle to the
email address configured for 'root@pam')."#,
optional: true,
default: false,
optional: true,
},
quiet: {
description: "Only produces output suitable for logging, omitting progress indicators.",
@ -116,7 +113,7 @@ pub fn apt_update_database(
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
// FIXME: change to non-option in signature and drop below once we have proxmox-api-macro 0.2.3
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
let notify = notify.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_NOTIFY);
@ -196,7 +193,7 @@ fn apt_get_changelog(
}
}, Some(&name));
if pkg_info.len() == 0 {
if pkg_info.is_empty() {
bail!("Package '{}' not found", name);
}
@ -205,7 +202,7 @@ fn apt_get_changelog(
if changelog_url.starts_with("http://download.proxmox.com/") {
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
return Ok(json!(changelog));
Ok(json!(changelog))
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
let sub = match subscription::read_subscription()? {
@ -229,7 +226,7 @@ fn apt_get_changelog(
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
return Ok(json!(changelog));
Ok(json!(changelog))
} else {
let mut command = std::process::Command::new("apt-get");
@ -237,7 +234,7 @@ fn apt_get_changelog(
command.arg("-qq"); // don't display download progress
command.arg(name);
let output = crate::tools::run_command(command, None)?;
return Ok(json!(output));
Ok(json!(output))
}
}

View File

@ -138,7 +138,7 @@ pub fn initialize_disk(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;

View File

@ -132,7 +132,7 @@ pub fn create_datastore_disk(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -164,7 +164,7 @@ pub fn create_datastore_disk(
let manager = DiskManage::new();
let disk = manager.clone().disk_by_name(&disk)?;
let disk = manager.disk_by_name(&disk)?;
let partition = create_single_linux_partition(&disk)?;
create_file_system(&partition, filesystem)?;
@ -212,8 +212,7 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
let (config, _) = crate::config::datastore::config()?;
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
.filter(|ds| ds.path == path)
.next();
.find(|ds| ds.path == path);
if let Some(conflicting_datastore) = conflicting_datastore {
bail!("Can't remove '{}' since it's required by datastore '{}'",

View File

@ -254,7 +254,7 @@ pub fn create_zpool(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;

View File

@ -137,7 +137,7 @@ pub fn set_subscription(
let server_id = tools::get_hardware_address()?;
let info = subscription::check_subscription(key, server_id.to_owned())?;
let info = subscription::check_subscription(key, server_id)?;
subscription::write_subscription(info)
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;

View File

@ -513,7 +513,7 @@ pub fn list_tasks(
.collect();
let mut count = result.len() + start as usize;
if result.len() > 0 && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
count += 1;
}

View File

@ -88,7 +88,7 @@ pub fn do_sync_job(
let worker_future = async move {
let delete = sync_job.remove_vanished.unwrap_or(true);
let sync_owner = sync_job.owner.unwrap_or(Authid::root_auth_id().clone());
let sync_owner = sync_job.owner.unwrap_or_else(|| Authid::root_auth_id().clone());
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
worker.log(format!("Starting datastore sync job '{}'", job_id));

View File

@ -150,16 +150,16 @@ fn upgrade_to_backup_reader_protocol(
}
});
let abort_future = abort_future
.map(|_| Err(format_err!("task aborted")));
.map(|_| -> Result<(), anyhow::Error> { Err(format_err!("task aborted")) });
use futures::future::Either;
futures::future::select(req_fut, abort_future)
.map(move |res| {
let _guard = _guard;
match res {
Either::Left((Ok(res), _)) => Ok(res),
Either::Left((Ok(_), _)) => Ok(()),
Either::Left((Err(err), _)) => Err(err),
Either::Right((Ok(res), _)) => Ok(res),
Either::Right((Ok(_), _)) => Ok(()),
Either::Right((Err(err), _)) => Err(err),
}
})

View File

@ -127,49 +127,46 @@ fn datastore_status(
rrd_mode,
);
match (total_res, used_res) {
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
let mut usage_list: Vec<f64> = Vec::new();
let mut time_list: Vec<u64> = Vec::new();
let mut history = Vec::new();
if let (Some((start, reso, total_list)), Some((_, _, used_list))) = (total_res, used_res) {
let mut usage_list: Vec<f64> = Vec::new();
let mut time_list: Vec<u64> = Vec::new();
let mut history = Vec::new();
for (idx, used) in used_list.iter().enumerate() {
let total = if idx < total_list.len() {
total_list[idx]
for (idx, used) in used_list.iter().enumerate() {
let total = if idx < total_list.len() {
total_list[idx]
} else {
None
};
match (total, used) {
(Some(total), Some(used)) if total != 0.0 => {
time_list.push(start + (idx as u64)*reso);
let usage = used/total;
usage_list.push(usage);
history.push(json!(usage));
},
_ => {
history.push(json!(null))
}
}
}
entry["history-start"] = start.into();
entry["history-delta"] = reso.into();
entry["history"] = history.into();
// we skip the calculation for datastores with not enough data
if usage_list.len() >= 7 {
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
if b != 0.0 {
let estimate = (1.0 - a) / b;
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
} else {
None
};
match (total, used) {
(Some(total), Some(used)) if total != 0.0 => {
time_list.push(start + (idx as u64)*reso);
let usage = used/total;
usage_list.push(usage);
history.push(json!(usage));
},
_ => {
history.push(json!(null))
}
entry["estimated-full-date"] = Value::from(0);
}
}
entry["history-start"] = start.into();
entry["history-delta"] = reso.into();
entry["history"] = history.into();
// we skip the calculation for datastores with not enough data
if usage_list.len() >= 7 {
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
if b != 0.0 {
let estimate = (1.0 - a) / b;
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
} else {
entry["estimated-full-date"] = Value::from(0);
}
}
}
},
_ => {},
}
}
list.push(entry);

View File

@ -87,14 +87,14 @@ pub fn backup(
// early check before starting worker
check_drive_exists(&drive_config, &pool_config.drive)?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let eject_media = eject_media.unwrap_or(false);
let export_media_set = export_media_set.unwrap_or(false);
let upid_str = WorkerTask::new_thread(
"tape-backup",
Some(store.clone()),
Some(store),
auth_id,
to_stdout,
move |worker| {

View File

@ -226,7 +226,7 @@ pub fn erase_media(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"erase-media",
@ -267,7 +267,7 @@ pub fn rewind(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"rewind-media",
@ -353,7 +353,7 @@ pub fn label_media(
let (config, _digest) = config::drive::config()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"label-media",
@ -595,7 +595,7 @@ pub fn clean_drive(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"clean-drive",
@ -722,7 +722,7 @@ pub fn update_inventory(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"inventory-update",
@ -735,7 +735,7 @@ pub fn update_inventory(
let label_text_list = changer.online_media_label_texts()?;
if label_text_list.is_empty() {
worker.log(format!("changer device does not list any media labels"));
worker.log("changer device does not list any media labels".to_string());
}
let state_path = Path::new(TAPE_STATUS_DIR);
@ -752,11 +752,9 @@ pub fn update_inventory(
let label_text = label_text.to_string();
if !read_all_labels.unwrap_or(false) {
if let Some(_) = inventory.find_media_by_label_text(&label_text) {
worker.log(format!("media '{}' already inventoried", label_text));
continue;
}
if !read_all_labels.unwrap_or(false) && inventory.find_media_by_label_text(&label_text).is_some() {
worker.log(format!("media '{}' already inventoried", label_text));
continue;
}
if let Err(err) = changer.load_media(&label_text) {
@ -824,7 +822,7 @@ pub fn barcode_label_media(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"barcode-label-media",
@ -1002,7 +1000,7 @@ pub fn catalog_media(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"catalog-media",
@ -1060,10 +1058,8 @@ pub fn catalog_media(
let _lock = MediaPool::lock(status_path, &pool)?;
if MediaCatalog::exists(status_path, &media_id.label.uuid) {
if !force {
bail!("media catalog exists (please use --force to overwrite)");
}
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
bail!("media catalog exists (please use --force to overwrite)");
}
restore_media(&worker, &mut drive, &media_id, None, verbose)?;

View File

@ -197,7 +197,6 @@ pub fn destroy_media(label_text: String, force: Option<bool>,) -> Result<(), Err
}
let uuid = media_id.label.uuid.clone();
drop(media_id);
inventory.remove_media(&uuid)?;

View File

@ -115,7 +115,7 @@ pub fn restore(
// early check before starting worker
check_drive_exists(&drive_config, &pool_config.drive)?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
"tape-restore",
@ -128,7 +128,7 @@ pub fn restore(
let members = inventory.compute_media_set_members(&media_set_uuid)?;
let media_list = members.media_list().clone();
let media_list = members.media_list();
let mut media_id_list = Vec::new();
@ -234,7 +234,6 @@ pub fn restore_media(
Some(reader) => reader,
};
let target = target.clone();
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
}
@ -344,36 +343,26 @@ fn restore_chunk_archive<'a>(
let mut decoder = ChunkArchiveDecoder::new(reader);
let result: Result<_, Error> = proxmox::try_block!({
loop {
match decoder.next_chunk()? {
Some((digest, blob)) => {
while let Some((digest, blob)) = decoder.next_chunk()? {
if let Some(datastore) = datastore {
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
if !chunk_exists {
blob.verify_crc()?;
if let Some(datastore) = datastore {
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
if !chunk_exists {
blob.verify_crc()?;
if blob.crypt_mode()? == CryptMode::None {
blob.decode(None, Some(&digest))?; // verify digest
}
if verbose {
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
datastore.insert_chunk(&blob, &digest)?;
} else {
if verbose {
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
}
} else {
if verbose {
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
if blob.crypt_mode()? == CryptMode::None {
blob.decode(None, Some(&digest))?; // verify digest
}
chunks.push(digest);
if verbose {
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
datastore.insert_chunk(&blob, &digest)?;
} else if verbose {
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
None => break,
} else if verbose {
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
chunks.push(digest);
}
Ok(())
});
@ -390,7 +379,7 @@ fn restore_chunk_archive<'a>(
// check if this is an aborted stream without end marker
if let Ok(false) = reader.has_end_marker() {
worker.log(format!("missing stream end marker"));
worker.log("missing stream end marker".to_string());
return Ok(None);
}
@ -407,7 +396,7 @@ fn restore_snapshot_archive<'a>(
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
Ok(()) => return Ok(true),
Ok(()) => Ok(true),
Err(err) => {
let reader = decoder.input();
@ -422,7 +411,7 @@ fn restore_snapshot_archive<'a>(
}
// else the archive is corrupt
return Err(err);
Err(err)
}
}
}

View File

@ -1092,7 +1092,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
];
for fingerprint in invalid_fingerprints.iter() {
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
if parse_simple_value(fingerprint, &schema).is_ok() {
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
}
}
@ -1133,7 +1133,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
];
for name in invalid_user_ids.iter() {
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
}
}

View File

@ -277,7 +277,7 @@ impl PartialEq<&str> for RealmRef {
impl PartialEq<RealmRef> for Realm {
fn eq(&self, rhs: &RealmRef) -> bool {
self.0 == &rhs.0
self.0 == rhs.0
}
}
@ -638,7 +638,7 @@ impl std::str::FromStr for Authid {
.iter()
.rposition(|&b| b == b'!')
.map(|pos| if pos < name_len { id.len() } else { pos })
.unwrap_or(id.len());
.unwrap_or_else(|| id.len());
if realm_end == id.len() - 1 {
bail!("empty token name in userid");
@ -670,7 +670,7 @@ impl TryFrom<String> for Authid {
.iter()
.rposition(|&b| b == b'!')
.map(|pos| if pos < name_len { data.len() } else { pos })
.unwrap_or(data.len());
.unwrap_or_else(|| data.len());
if realm_end == data.len() - 1 {
bail!("empty token name in userid");