Merge branch 'master' of ssh://proxdev.maurer-it.com/rust/proxmox-backup
This commit is contained in:
commit
d543587d34
@ -72,7 +72,7 @@ fn extract_acl_node_data(
|
||||
}
|
||||
}
|
||||
for (group, roles) in &node.groups {
|
||||
if let Some(_) = token_user {
|
||||
if token_user.is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -210,7 +210,7 @@ pub fn update_acl(
|
||||
|
||||
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
||||
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
||||
if let Some(_) = group {
|
||||
if group.is_some() {
|
||||
bail!("Unprivileged users are not allowed to create group ACL item.");
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ fn list_roles() -> Result<Value, Error> {
|
||||
let mut priv_list = Vec::new();
|
||||
for (name, privilege) in PRIVILEGES.iter() {
|
||||
if privs & privilege > 0 {
|
||||
priv_list.push(name.clone());
|
||||
priv_list.push(name);
|
||||
}
|
||||
}
|
||||
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
||||
|
@ -331,13 +331,11 @@ fn list_tfa(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<TfaUser>, Error> {
|
||||
entries: to_data(data),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if let Some(data) = { tfa_data }.remove(authid.user()) {
|
||||
out.push(TfaUser {
|
||||
userid: authid.into(),
|
||||
entries: to_data(data),
|
||||
});
|
||||
}
|
||||
} else if let Some(data) = { tfa_data }.remove(authid.user()) {
|
||||
out.push(TfaUser {
|
||||
userid: authid.into(),
|
||||
entries: to_data(data),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(out)
|
||||
|
@ -169,7 +169,7 @@ pub fn list_users(
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
iter.map(|user: user::User| UserWithTokens::new(user))
|
||||
iter.map(UserWithTokens::new)
|
||||
.collect()
|
||||
};
|
||||
|
||||
@ -230,7 +230,7 @@ pub fn create_user(
|
||||
|
||||
let (mut config, _digest) = user::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
||||
if config.sections.get(user.userid.as_str()).is_some() {
|
||||
bail!("user '{}' already exists.", user.userid);
|
||||
}
|
||||
|
||||
@ -595,7 +595,7 @@ pub fn generate_token(
|
||||
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||
let tokenid_string = tokenid.to_string();
|
||||
|
||||
if let Some(_) = config.sections.get(&tokenid_string) {
|
||||
if config.sections.get(&tokenid_string).is_some() {
|
||||
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
||||
}
|
||||
|
||||
@ -603,7 +603,7 @@ pub fn generate_token(
|
||||
token_shadow::set_secret(&tokenid, &secret)?;
|
||||
|
||||
let token = user::ApiToken {
|
||||
tokenid: tokenid.clone(),
|
||||
tokenid,
|
||||
comment,
|
||||
enable,
|
||||
expire,
|
||||
|
@ -440,8 +440,8 @@ pub fn list_snapshots (
|
||||
let files = info
|
||||
.files
|
||||
.into_iter()
|
||||
.map(|x| BackupContent {
|
||||
filename: x.to_string(),
|
||||
.map(|filename| BackupContent {
|
||||
filename,
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
})
|
||||
@ -662,11 +662,11 @@ pub fn verify(
|
||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||
}
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(worker_id.clone()),
|
||||
Some(worker_id),
|
||||
auth_id.clone(),
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
@ -711,7 +711,7 @@ pub fn verify(
|
||||
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
|
||||
};
|
||||
if failed_dirs.len() > 0 {
|
||||
if !failed_dirs.is_empty() {
|
||||
worker.log("Failed to verify the following snapshots/groups:");
|
||||
for dir in failed_dirs {
|
||||
worker.log(format!("\t{}", dir));
|
||||
@ -855,7 +855,7 @@ fn prune(
|
||||
|
||||
|
||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
|
||||
|
||||
if keep_all {
|
||||
worker.log("No prune selection - keeping all files.");
|
||||
@ -935,7 +935,7 @@ fn start_garbage_collection(
|
||||
let job = Job::new("garbage_collection", &store)
|
||||
.map_err(|_| format_err!("garbage collection already running"))?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
|
||||
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
|
||||
@ -1009,7 +1009,7 @@ fn get_datastore_list(
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
@ -1066,7 +1066,7 @@ fn download_file(
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||
.map_ok(|bytes| bytes.freeze())
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
||||
err
|
||||
@ -1341,10 +1341,10 @@ fn catalog(
|
||||
|
||||
if filepath != "root" {
|
||||
components = base64::decode(filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
if !components.is_empty() && components[0] == b'/' {
|
||||
components.remove(0);
|
||||
}
|
||||
for component in components.split(|c| *c == '/' as u8) {
|
||||
for component in components.split(|c| *c == b'/') {
|
||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||
current = entry;
|
||||
} else {
|
||||
@ -1357,7 +1357,7 @@ fn catalog(
|
||||
|
||||
for direntry in catalog_reader.read_dir(¤t)? {
|
||||
let mut components = components.clone();
|
||||
components.push('/' as u8);
|
||||
components.push(b'/');
|
||||
components.extend(&direntry.name);
|
||||
let path = base64::encode(components);
|
||||
let text = String::from_utf8_lossy(&direntry.name);
|
||||
@ -1487,13 +1487,13 @@ fn pxar_file_download(
|
||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||
|
||||
let mut components = base64::decode(&filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
if !components.is_empty() && components[0] == b'/' {
|
||||
components.remove(0);
|
||||
}
|
||||
|
||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||
let mut split = components.splitn(2, |c| *c == b'/');
|
||||
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||
let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
@ -1520,7 +1520,7 @@ fn pxar_file_download(
|
||||
let root = decoder.open_root().await?;
|
||||
let file = root
|
||||
.lookup(OsStr::from_bytes(file_path)).await?
|
||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
||||
.ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
|
||||
|
||||
let body = match file.kind() {
|
||||
EntryKind::File { .. } => Body::wrap_stream(
|
||||
|
@ -58,7 +58,7 @@ pub fn list_sync_jobs(
|
||||
}
|
||||
})
|
||||
.filter(|job: &SyncJobStatus| {
|
||||
let as_config: SyncJobConfig = job.clone().into();
|
||||
let as_config: SyncJobConfig = job.into();
|
||||
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
||||
}).collect();
|
||||
|
||||
@ -81,13 +81,13 @@ pub fn list_sync_jobs(
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
||||
compute_next_event(&event, last, false).unwrap_or(None)
|
||||
})();
|
||||
}
|
||||
|
||||
|
@ -86,13 +86,13 @@ pub fn list_verification_jobs(
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
||||
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
||||
compute_next_event(&event, last, false).unwrap_or(None)
|
||||
})();
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ async move {
|
||||
}
|
||||
};
|
||||
|
||||
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
|
||||
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
|
||||
|
||||
let _last_guard = if let Some(last) = &last_backup {
|
||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||
|
@ -465,7 +465,7 @@ impl BackupEnvironment {
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
// test if all writer are correctly closed
|
||||
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
|
||||
if !state.dynamic_writers.is_empty() || !state.fixed_writers.is_empty() {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
|
||||
|
@ -120,11 +120,11 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
|
||||
|
||||
let (mut config, _digest) = datastore::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&datastore.name) {
|
||||
if config.sections.get(&datastore.name).is_some() {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
}
|
||||
|
||||
|
@ -96,13 +96,13 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let mut data = param.clone();
|
||||
let mut data = param;
|
||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||
let remote: remote::Remote = serde_json::from_value(data)?;
|
||||
|
||||
let (mut config, _digest) = remote::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&remote.name) {
|
||||
if config.sections.get(&remote.name).is_some() {
|
||||
bail!("remote '{}' already exists.", remote.name);
|
||||
}
|
||||
|
||||
|
@ -154,14 +154,14 @@ pub fn create_sync_job(
|
||||
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?;
|
||||
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
|
||||
bail!("permission check failed");
|
||||
}
|
||||
|
||||
let (mut config, _digest) = sync::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&sync_job.id) {
|
||||
if config.sections.get(&sync_job.id).is_some() {
|
||||
bail!("job '{}' already exists.", sync_job.id);
|
||||
}
|
||||
|
||||
@ -514,7 +514,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
||||
|
||||
// unless they have Datastore.Modify as well
|
||||
job.store = "localstore3".to_string();
|
||||
job.owner = Some(read_auth_id.clone());
|
||||
job.owner = Some(read_auth_id);
|
||||
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
||||
job.owner = None;
|
||||
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
||||
|
@ -98,7 +98,7 @@ pub fn create_verification_job(
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
|
||||
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param)?;
|
||||
|
||||
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;
|
||||
|
||||
@ -106,7 +106,7 @@ pub fn create_verification_job(
|
||||
|
||||
let (mut config, _digest) = verify::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&verification_job.id) {
|
||||
if config.sections.get(&verification_job.id).is_some() {
|
||||
bail!("job '{}' already exists.", verification_job.id);
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
|
||||
};
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
.map_ok(|bytes| bytes.freeze());
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
|
@ -121,7 +121,7 @@ async fn termproxy(
|
||||
)?;
|
||||
|
||||
let mut command = Vec::new();
|
||||
match cmd.as_ref().map(|x| x.as_str()) {
|
||||
match cmd.as_deref() {
|
||||
Some("login") | None => {
|
||||
command.push("login");
|
||||
if userid == "root@pam" {
|
||||
|
@ -35,18 +35,15 @@ use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||
/// List available APT updates
|
||||
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||
|
||||
match apt::pkg_cache_expired() {
|
||||
Ok(false) => {
|
||||
if let Ok(Some(cache)) = apt::read_pkg_state() {
|
||||
return Ok(json!(cache.package_status));
|
||||
}
|
||||
},
|
||||
_ => (),
|
||||
if let Ok(false) = apt::pkg_cache_expired() {
|
||||
if let Ok(Some(cache)) = apt::read_pkg_state() {
|
||||
return Ok(json!(cache.package_status));
|
||||
}
|
||||
}
|
||||
|
||||
let cache = apt::update_cache()?;
|
||||
|
||||
return Ok(json!(cache.package_status));
|
||||
Ok(json!(cache.package_status))
|
||||
}
|
||||
|
||||
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
||||
@ -90,8 +87,8 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
||||
type: bool,
|
||||
description: r#"Send notification mail about new package updates availanle to the
|
||||
email address configured for 'root@pam')."#,
|
||||
optional: true,
|
||||
default: false,
|
||||
optional: true,
|
||||
},
|
||||
quiet: {
|
||||
description: "Only produces output suitable for logging, omitting progress indicators.",
|
||||
@ -116,7 +113,7 @@ pub fn apt_update_database(
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
// FIXME: change to non-option in signature and drop below once we have proxmox-api-macro 0.2.3
|
||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||
let notify = notify.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_NOTIFY);
|
||||
@ -196,7 +193,7 @@ fn apt_get_changelog(
|
||||
}
|
||||
}, Some(&name));
|
||||
|
||||
if pkg_info.len() == 0 {
|
||||
if pkg_info.is_empty() {
|
||||
bail!("Package '{}' not found", name);
|
||||
}
|
||||
|
||||
@ -205,7 +202,7 @@ fn apt_get_changelog(
|
||||
if changelog_url.starts_with("http://download.proxmox.com/") {
|
||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
|
||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||
return Ok(json!(changelog));
|
||||
Ok(json!(changelog))
|
||||
|
||||
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
|
||||
let sub = match subscription::read_subscription()? {
|
||||
@ -229,7 +226,7 @@ fn apt_get_changelog(
|
||||
|
||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
|
||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||
return Ok(json!(changelog));
|
||||
Ok(json!(changelog))
|
||||
|
||||
} else {
|
||||
let mut command = std::process::Command::new("apt-get");
|
||||
@ -237,7 +234,7 @@ fn apt_get_changelog(
|
||||
command.arg("-qq"); // don't display download progress
|
||||
command.arg(name);
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
return Ok(json!(output));
|
||||
Ok(json!(output))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ pub fn initialize_disk(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
|
@ -132,7 +132,7 @@ pub fn create_datastore_disk(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
@ -164,7 +164,7 @@ pub fn create_datastore_disk(
|
||||
|
||||
let manager = DiskManage::new();
|
||||
|
||||
let disk = manager.clone().disk_by_name(&disk)?;
|
||||
let disk = manager.disk_by_name(&disk)?;
|
||||
|
||||
let partition = create_single_linux_partition(&disk)?;
|
||||
create_file_system(&partition, filesystem)?;
|
||||
@ -212,8 +212,7 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
|
||||
let (config, _) = crate::config::datastore::config()?;
|
||||
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
||||
.filter(|ds| ds.path == path)
|
||||
.next();
|
||||
.find(|ds| ds.path == path);
|
||||
|
||||
if let Some(conflicting_datastore) = conflicting_datastore {
|
||||
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
||||
|
@ -254,7 +254,7 @@ pub fn create_zpool(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
|
@ -137,7 +137,7 @@ pub fn set_subscription(
|
||||
|
||||
let server_id = tools::get_hardware_address()?;
|
||||
|
||||
let info = subscription::check_subscription(key, server_id.to_owned())?;
|
||||
let info = subscription::check_subscription(key, server_id)?;
|
||||
|
||||
subscription::write_subscription(info)
|
||||
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;
|
||||
|
@ -513,7 +513,7 @@ pub fn list_tasks(
|
||||
.collect();
|
||||
|
||||
let mut count = result.len() + start as usize;
|
||||
if result.len() > 0 && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
||||
if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
||||
count += 1;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ pub fn do_sync_job(
|
||||
let worker_future = async move {
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let sync_owner = sync_job.owner.unwrap_or(Authid::root_auth_id().clone());
|
||||
let sync_owner = sync_job.owner.unwrap_or_else(|| Authid::root_auth_id().clone());
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
|
||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||
|
@ -150,16 +150,16 @@ fn upgrade_to_backup_reader_protocol(
|
||||
}
|
||||
});
|
||||
let abort_future = abort_future
|
||||
.map(|_| Err(format_err!("task aborted")));
|
||||
.map(|_| -> Result<(), anyhow::Error> { Err(format_err!("task aborted")) });
|
||||
|
||||
use futures::future::Either;
|
||||
futures::future::select(req_fut, abort_future)
|
||||
.map(move |res| {
|
||||
let _guard = _guard;
|
||||
match res {
|
||||
Either::Left((Ok(res), _)) => Ok(res),
|
||||
Either::Left((Ok(_), _)) => Ok(()),
|
||||
Either::Left((Err(err), _)) => Err(err),
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Ok(_), _)) => Ok(()),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
}
|
||||
})
|
||||
|
@ -127,49 +127,46 @@ fn datastore_status(
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
match (total_res, used_res) {
|
||||
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
||||
let mut usage_list: Vec<f64> = Vec::new();
|
||||
let mut time_list: Vec<u64> = Vec::new();
|
||||
let mut history = Vec::new();
|
||||
if let (Some((start, reso, total_list)), Some((_, _, used_list))) = (total_res, used_res) {
|
||||
let mut usage_list: Vec<f64> = Vec::new();
|
||||
let mut time_list: Vec<u64> = Vec::new();
|
||||
let mut history = Vec::new();
|
||||
|
||||
for (idx, used) in used_list.iter().enumerate() {
|
||||
let total = if idx < total_list.len() {
|
||||
total_list[idx]
|
||||
for (idx, used) in used_list.iter().enumerate() {
|
||||
let total = if idx < total_list.len() {
|
||||
total_list[idx]
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match (total, used) {
|
||||
(Some(total), Some(used)) if total != 0.0 => {
|
||||
time_list.push(start + (idx as u64)*reso);
|
||||
let usage = used/total;
|
||||
usage_list.push(usage);
|
||||
history.push(json!(usage));
|
||||
},
|
||||
_ => {
|
||||
history.push(json!(null))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entry["history-start"] = start.into();
|
||||
entry["history-delta"] = reso.into();
|
||||
entry["history"] = history.into();
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match (total, used) {
|
||||
(Some(total), Some(used)) if total != 0.0 => {
|
||||
time_list.push(start + (idx as u64)*reso);
|
||||
let usage = used/total;
|
||||
usage_list.push(usage);
|
||||
history.push(json!(usage));
|
||||
},
|
||||
_ => {
|
||||
history.push(json!(null))
|
||||
}
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
}
|
||||
}
|
||||
|
||||
entry["history-start"] = start.into();
|
||||
entry["history-delta"] = reso.into();
|
||||
entry["history"] = history.into();
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
} else {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
|
||||
list.push(entry);
|
||||
|
@ -87,14 +87,14 @@ pub fn backup(
|
||||
// early check before starting worker
|
||||
check_drive_exists(&drive_config, &pool_config.drive)?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let eject_media = eject_media.unwrap_or(false);
|
||||
let export_media_set = export_media_set.unwrap_or(false);
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"tape-backup",
|
||||
Some(store.clone()),
|
||||
Some(store),
|
||||
auth_id,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
|
@ -226,7 +226,7 @@ pub fn erase_media(
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"erase-media",
|
||||
@ -267,7 +267,7 @@ pub fn rewind(
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"rewind-media",
|
||||
@ -353,7 +353,7 @@ pub fn label_media(
|
||||
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"label-media",
|
||||
@ -595,7 +595,7 @@ pub fn clean_drive(
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"clean-drive",
|
||||
@ -722,7 +722,7 @@ pub fn update_inventory(
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"inventory-update",
|
||||
@ -735,7 +735,7 @@ pub fn update_inventory(
|
||||
|
||||
let label_text_list = changer.online_media_label_texts()?;
|
||||
if label_text_list.is_empty() {
|
||||
worker.log(format!("changer device does not list any media labels"));
|
||||
worker.log("changer device does not list any media labels".to_string());
|
||||
}
|
||||
|
||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||
@ -752,11 +752,9 @@ pub fn update_inventory(
|
||||
|
||||
let label_text = label_text.to_string();
|
||||
|
||||
if !read_all_labels.unwrap_or(false) {
|
||||
if let Some(_) = inventory.find_media_by_label_text(&label_text) {
|
||||
worker.log(format!("media '{}' already inventoried", label_text));
|
||||
continue;
|
||||
}
|
||||
if !read_all_labels.unwrap_or(false) && inventory.find_media_by_label_text(&label_text).is_some() {
|
||||
worker.log(format!("media '{}' already inventoried", label_text));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Err(err) = changer.load_media(&label_text) {
|
||||
@ -824,7 +822,7 @@ pub fn barcode_label_media(
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"barcode-label-media",
|
||||
@ -1002,7 +1000,7 @@ pub fn catalog_media(
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"catalog-media",
|
||||
@ -1060,10 +1058,8 @@ pub fn catalog_media(
|
||||
|
||||
let _lock = MediaPool::lock(status_path, &pool)?;
|
||||
|
||||
if MediaCatalog::exists(status_path, &media_id.label.uuid) {
|
||||
if !force {
|
||||
bail!("media catalog exists (please use --force to overwrite)");
|
||||
}
|
||||
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
|
||||
bail!("media catalog exists (please use --force to overwrite)");
|
||||
}
|
||||
|
||||
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
|
||||
|
@ -197,7 +197,6 @@ pub fn destroy_media(label_text: String, force: Option<bool>,) -> Result<(), Err
|
||||
}
|
||||
|
||||
let uuid = media_id.label.uuid.clone();
|
||||
drop(media_id);
|
||||
|
||||
inventory.remove_media(&uuid)?;
|
||||
|
||||
|
@ -115,7 +115,7 @@ pub fn restore(
|
||||
// early check before starting worker
|
||||
check_drive_exists(&drive_config, &pool_config.drive)?;
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"tape-restore",
|
||||
@ -128,7 +128,7 @@ pub fn restore(
|
||||
|
||||
let members = inventory.compute_media_set_members(&media_set_uuid)?;
|
||||
|
||||
let media_list = members.media_list().clone();
|
||||
let media_list = members.media_list();
|
||||
|
||||
let mut media_id_list = Vec::new();
|
||||
|
||||
@ -234,7 +234,6 @@ pub fn restore_media(
|
||||
Some(reader) => reader,
|
||||
};
|
||||
|
||||
let target = target.clone();
|
||||
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
|
||||
}
|
||||
|
||||
@ -344,36 +343,26 @@ fn restore_chunk_archive<'a>(
|
||||
let mut decoder = ChunkArchiveDecoder::new(reader);
|
||||
|
||||
let result: Result<_, Error> = proxmox::try_block!({
|
||||
loop {
|
||||
match decoder.next_chunk()? {
|
||||
Some((digest, blob)) => {
|
||||
while let Some((digest, blob)) = decoder.next_chunk()? {
|
||||
if let Some(datastore) = datastore {
|
||||
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
||||
if !chunk_exists {
|
||||
blob.verify_crc()?;
|
||||
|
||||
if let Some(datastore) = datastore {
|
||||
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
||||
if !chunk_exists {
|
||||
blob.verify_crc()?;
|
||||
|
||||
if blob.crypt_mode()? == CryptMode::None {
|
||||
blob.decode(None, Some(&digest))?; // verify digest
|
||||
}
|
||||
if verbose {
|
||||
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
}
|
||||
datastore.insert_chunk(&blob, &digest)?;
|
||||
} else {
|
||||
if verbose {
|
||||
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if verbose {
|
||||
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
}
|
||||
if blob.crypt_mode()? == CryptMode::None {
|
||||
blob.decode(None, Some(&digest))?; // verify digest
|
||||
}
|
||||
chunks.push(digest);
|
||||
if verbose {
|
||||
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
}
|
||||
datastore.insert_chunk(&blob, &digest)?;
|
||||
} else if verbose {
|
||||
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
}
|
||||
None => break,
|
||||
} else if verbose {
|
||||
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||
}
|
||||
chunks.push(digest);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
@ -390,7 +379,7 @@ fn restore_chunk_archive<'a>(
|
||||
|
||||
// check if this is an aborted stream without end marker
|
||||
if let Ok(false) = reader.has_end_marker() {
|
||||
worker.log(format!("missing stream end marker"));
|
||||
worker.log("missing stream end marker".to_string());
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
@ -407,7 +396,7 @@ fn restore_snapshot_archive<'a>(
|
||||
|
||||
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
|
||||
match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
|
||||
Ok(()) => return Ok(true),
|
||||
Ok(()) => Ok(true),
|
||||
Err(err) => {
|
||||
let reader = decoder.input();
|
||||
|
||||
@ -422,7 +411,7 @@ fn restore_snapshot_archive<'a>(
|
||||
}
|
||||
|
||||
// else the archive is corrupt
|
||||
return Err(err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1092,7 +1092,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||
];
|
||||
|
||||
for fingerprint in invalid_fingerprints.iter() {
|
||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
||||
if parse_simple_value(fingerprint, &schema).is_ok() {
|
||||
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||
}
|
||||
}
|
||||
@ -1133,7 +1133,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
];
|
||||
|
||||
for name in invalid_user_ids.iter() {
|
||||
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
|
||||
if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
|
||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||
}
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ impl PartialEq<&str> for RealmRef {
|
||||
|
||||
impl PartialEq<RealmRef> for Realm {
|
||||
fn eq(&self, rhs: &RealmRef) -> bool {
|
||||
self.0 == &rhs.0
|
||||
self.0 == rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
@ -638,7 +638,7 @@ impl std::str::FromStr for Authid {
|
||||
.iter()
|
||||
.rposition(|&b| b == b'!')
|
||||
.map(|pos| if pos < name_len { id.len() } else { pos })
|
||||
.unwrap_or(id.len());
|
||||
.unwrap_or_else(|| id.len());
|
||||
|
||||
if realm_end == id.len() - 1 {
|
||||
bail!("empty token name in userid");
|
||||
@ -670,7 +670,7 @@ impl TryFrom<String> for Authid {
|
||||
.iter()
|
||||
.rposition(|&b| b == b'!')
|
||||
.map(|pos| if pos < name_len { data.len() } else { pos })
|
||||
.unwrap_or(data.len());
|
||||
.unwrap_or_else(|| data.len());
|
||||
|
||||
if realm_end == data.len() - 1 {
|
||||
bail!("empty token name in userid");
|
||||
|
@ -97,7 +97,7 @@ where
|
||||
let info = this
|
||||
.index
|
||||
.chunk_info(idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?;
|
||||
.ok_or_else(|| io_format_err!("could not get digest"))?;
|
||||
|
||||
this.current_chunk_offset = offset;
|
||||
this.current_chunk_idx = idx;
|
||||
|
@ -137,18 +137,12 @@ impl DirEntry {
|
||||
|
||||
/// Check if DirEntry is a directory
|
||||
pub fn is_directory(&self) -> bool {
|
||||
match self.attr {
|
||||
DirEntryAttribute::Directory { .. } => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.attr, DirEntryAttribute::Directory { .. })
|
||||
}
|
||||
|
||||
/// Check if DirEntry is a symlink
|
||||
pub fn is_symlink(&self) -> bool {
|
||||
match self.attr {
|
||||
DirEntryAttribute::Symlink { .. } => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.attr, DirEntryAttribute::Symlink { .. })
|
||||
}
|
||||
}
|
||||
|
||||
@ -591,6 +585,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
///
|
||||
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
|
||||
/// If the value is negative, we end with a zero byte (0x00).
|
||||
#[allow(clippy::neg_multiply)]
|
||||
pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error> {
|
||||
let mut enc = Vec::new();
|
||||
|
||||
@ -611,7 +606,7 @@ pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error>
|
||||
break;
|
||||
}
|
||||
enc.push((128 | (d & 127)) as u8);
|
||||
d = d >> 7;
|
||||
d >>= 7;
|
||||
}
|
||||
writer.write_all(&enc)?;
|
||||
|
||||
@ -623,6 +618,7 @@ pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error>
|
||||
/// We currently read maximal 11 bytes, which give a maximum of 70 bits + sign.
|
||||
/// this method is compatible with catalog_encode_u64 iff the
|
||||
/// value encoded is <= 2^63 (values > 2^63 cannot be represented in an i64)
|
||||
#[allow(clippy::neg_multiply)]
|
||||
pub fn catalog_decode_i64<R: Read>(reader: &mut R) -> Result<i64, Error> {
|
||||
|
||||
let mut v: u64 = 0;
|
||||
@ -665,7 +661,7 @@ pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error>
|
||||
break;
|
||||
}
|
||||
enc.push((128 | (d & 127)) as u8);
|
||||
d = d >> 7;
|
||||
d >>= 7;
|
||||
}
|
||||
writer.write_all(&enc)?;
|
||||
|
||||
|
@ -441,8 +441,7 @@ impl Shell {
|
||||
R: 'static,
|
||||
{
|
||||
let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
|
||||
let result = call(&mut *shell).await;
|
||||
result
|
||||
call(&mut *shell).await
|
||||
}
|
||||
|
||||
pub async fn shell(mut self) -> Result<(), Error> {
|
||||
|
@ -18,7 +18,7 @@ impl <W: Write> ChecksumWriter<W> {
|
||||
let hasher = crc32fast::Hasher::new();
|
||||
let signer = match config {
|
||||
Some(config) => {
|
||||
let tied_signer = Tied::new(config.clone(), |config| {
|
||||
let tied_signer = Tied::new(config, |config| {
|
||||
Box::new(unsafe { (*config).data_signer() })
|
||||
});
|
||||
Some(tied_signer)
|
||||
|
@ -44,7 +44,7 @@ fn digest_to_prefix(digest: &[u8]) -> PathBuf {
|
||||
buf.push(HEX_CHARS[(digest[0] as usize) &0xf]);
|
||||
buf.push(HEX_CHARS[(digest[1] as usize) >> 4]);
|
||||
buf.push(HEX_CHARS[(digest[1] as usize) & 0xf]);
|
||||
buf.push('/' as u8);
|
||||
buf.push(b'/');
|
||||
|
||||
let path = unsafe { String::from_utf8_unchecked(buf)};
|
||||
|
||||
@ -80,7 +80,7 @@ impl ChunkStore {
|
||||
|
||||
let default_options = CreateOptions::new();
|
||||
|
||||
match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
||||
match create_path(&base, Some(default_options), Some(options.clone())) {
|
||||
Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
|
||||
Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
|
||||
}
|
||||
@ -113,9 +113,8 @@ impl ChunkStore {
|
||||
}
|
||||
|
||||
fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
|
||||
let base: PathBuf = base.into();
|
||||
let mut lockfile_path: PathBuf = base.into();
|
||||
|
||||
let mut lockfile_path = base.clone();
|
||||
lockfile_path.push(".lock");
|
||||
|
||||
lockfile_path
|
||||
@ -227,7 +226,7 @@ impl ChunkStore {
|
||||
continue;
|
||||
}
|
||||
|
||||
let bad = bytes.ends_with(".bad".as_bytes());
|
||||
let bad = bytes.ends_with(b".bad");
|
||||
return Some((Ok(entry), percentage, bad));
|
||||
}
|
||||
Some(Err(err)) => {
|
||||
@ -402,7 +401,7 @@ impl ChunkStore {
|
||||
file.write_all(raw_data)?;
|
||||
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
|
||||
if let Err(_) = std::fs::remove_file(&tmp_path) { /* ignore */ }
|
||||
if std::fs::remove_file(&tmp_path).is_err() { /* ignore */ }
|
||||
bail!(
|
||||
"Atomic rename on store '{}' failed for chunk {} - {}",
|
||||
self.name,
|
||||
|
@ -59,7 +59,7 @@ where
|
||||
}
|
||||
None => {
|
||||
this.scan_pos = 0;
|
||||
if this.buffer.len() > 0 {
|
||||
if !this.buffer.is_empty() {
|
||||
return Poll::Ready(Some(Ok(this.buffer.split())));
|
||||
} else {
|
||||
return Poll::Ready(None);
|
||||
@ -111,7 +111,7 @@ where
|
||||
}
|
||||
None => {
|
||||
// last chunk can have any size
|
||||
if this.buffer.len() > 0 {
|
||||
if !this.buffer.is_empty() {
|
||||
return Poll::Ready(Some(Ok(this.buffer.split())));
|
||||
} else {
|
||||
return Poll::Ready(None);
|
||||
|
@ -36,7 +36,7 @@ impl <R: BufRead> CryptReader<R> {
|
||||
impl <R: BufRead> Read for CryptReader<R> {
|
||||
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
if self.small_read_buf.len() > 0 {
|
||||
if !self.small_read_buf.is_empty() {
|
||||
let max = if self.small_read_buf.len() > buf.len() { buf.len() } else { self.small_read_buf.len() };
|
||||
let rest = self.small_read_buf.split_off(max);
|
||||
buf[..max].copy_from_slice(&self.small_read_buf);
|
||||
@ -50,7 +50,7 @@ impl <R: BufRead> Read for CryptReader<R> {
|
||||
if buf.len() <= 2*self.block_size {
|
||||
let mut outbuf = [0u8; 1024];
|
||||
|
||||
let count = if data.len() == 0 { // EOF
|
||||
let count = if data.is_empty() { // EOF
|
||||
let written = self.crypter.finalize(&mut outbuf)?;
|
||||
self.finalized = true;
|
||||
written
|
||||
@ -72,7 +72,7 @@ impl <R: BufRead> Read for CryptReader<R> {
|
||||
buf[..count].copy_from_slice(&outbuf[..count]);
|
||||
Ok(count)
|
||||
}
|
||||
} else if data.len() == 0 { // EOF
|
||||
} else if data.is_empty() { // EOF
|
||||
let rest = self.crypter.finalize(buf)?;
|
||||
self.finalized = true;
|
||||
Ok(rest)
|
||||
|
@ -408,9 +408,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||
chunk_size: usize,
|
||||
compress: bool,
|
||||
) -> Result<(DataBlob, [u8; 32]), Error> {
|
||||
|
||||
let mut zero_bytes = Vec::with_capacity(chunk_size);
|
||||
zero_bytes.resize(chunk_size, 0u8);
|
||||
let zero_bytes = vec![0; chunk_size];
|
||||
let mut chunk_builder = DataChunkBuilder::new(&zero_bytes).compress(compress);
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||
|
@ -334,9 +334,7 @@ impl DataStore {
|
||||
auth_id: &Authid,
|
||||
) -> Result<(Authid, DirLockGuard), Error> {
|
||||
// create intermediate path first:
|
||||
let base_path = self.base_path();
|
||||
|
||||
let mut full_path = base_path.clone();
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_group.backup_type());
|
||||
std::fs::create_dir_all(&full_path)?;
|
||||
|
||||
@ -392,7 +390,7 @@ impl DataStore {
|
||||
fn is_hidden(entry: &walkdir::DirEntry) -> bool {
|
||||
entry.file_name()
|
||||
.to_str()
|
||||
.map(|s| s.starts_with("."))
|
||||
.map(|s| s.starts_with('.'))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
let handle_entry_err = |err: walkdir::Error| {
|
||||
@ -478,12 +476,11 @@ impl DataStore {
|
||||
let image_list = self.list_images()?;
|
||||
let image_count = image_list.len();
|
||||
|
||||
let mut done = 0;
|
||||
let mut last_percentage: usize = 0;
|
||||
|
||||
let mut strange_paths_count: u64 = 0;
|
||||
|
||||
for img in image_list {
|
||||
for (i, img) in image_list.into_iter().enumerate() {
|
||||
|
||||
worker.check_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
@ -516,15 +513,14 @@ impl DataStore {
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
|
||||
Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
|
||||
}
|
||||
done += 1;
|
||||
|
||||
let percentage = done*100/image_count;
|
||||
let percentage = (i + 1) * 100 / image_count;
|
||||
if percentage > last_percentage {
|
||||
crate::task_log!(
|
||||
worker,
|
||||
"marked {}% ({} of {} index files)",
|
||||
percentage,
|
||||
done,
|
||||
i + 1,
|
||||
image_count,
|
||||
);
|
||||
last_percentage = percentage;
|
||||
@ -548,7 +544,7 @@ impl DataStore {
|
||||
}
|
||||
|
||||
pub fn garbage_collection_running(&self) -> bool {
|
||||
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
|
||||
!matches!(self.gc_mutex.try_lock(), Ok(_))
|
||||
}
|
||||
|
||||
pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
|
||||
|
@ -194,7 +194,7 @@ impl IndexFile for DynamicIndexReader {
|
||||
if pos >= self.index.len() {
|
||||
None
|
||||
} else {
|
||||
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
||||
Some(unsafe { &*(self.chunk_digest(pos).as_ptr() as *const [u8; 32]) })
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ impl IndexFile for DynamicIndexReader {
|
||||
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: self.index[pos].digest.clone(),
|
||||
digest: self.index[pos].digest,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -63,11 +63,11 @@ pub struct EncryptedDataBlobHeader {
|
||||
///
|
||||
/// Panics on unknown magic numbers.
|
||||
pub fn header_size(magic: &[u8; 8]) -> usize {
|
||||
match magic {
|
||||
&UNCOMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
match *magic {
|
||||
UNCOMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||
COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||
ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
_ => panic!("unknown blob magic"),
|
||||
}
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ impl FixedIndexReader {
|
||||
pub fn open(path: &Path) -> Result<Self, Error> {
|
||||
File::open(path)
|
||||
.map_err(Error::from)
|
||||
.and_then(|file| Self::new(file))
|
||||
.and_then(Self::new)
|
||||
.map_err(|err| format_err!("Unable to open fixed index {:?} - {}", path, err))
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ impl FixedIndexReader {
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index == std::ptr::null_mut() {
|
||||
if self.index.is_null() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -166,7 +166,7 @@ impl IndexFile for FixedIndexReader {
|
||||
if pos >= self.index_length {
|
||||
None
|
||||
} else {
|
||||
Some(unsafe { std::mem::transmute(self.index.add(pos * 32)) })
|
||||
Some(unsafe { &*(self.index.add(pos * 32) as *const [u8; 32]) })
|
||||
}
|
||||
}
|
||||
|
||||
@ -324,7 +324,7 @@ impl FixedIndexWriter {
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index == std::ptr::null_mut() {
|
||||
if self.index.is_null() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -342,7 +342,7 @@ impl FixedIndexWriter {
|
||||
}
|
||||
|
||||
pub fn close(&mut self) -> Result<[u8; 32], Error> {
|
||||
if self.index == std::ptr::null_mut() {
|
||||
if self.index.is_null() {
|
||||
bail!("cannot close already closed index file.");
|
||||
}
|
||||
|
||||
@ -437,7 +437,7 @@ impl FixedIndexWriter {
|
||||
);
|
||||
}
|
||||
|
||||
if self.index == std::ptr::null_mut() {
|
||||
if self.index.is_null() {
|
||||
bail!("cannot write to closed index file.");
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ pub fn rsa_decrypt_key_config(
|
||||
let decrypted = rsa
|
||||
.private_decrypt(key, &mut buffer, openssl::rsa::Padding::PKCS1)
|
||||
.map_err(|err| format_err!("failed to decrypt KeyConfig using RSA - {}", err))?;
|
||||
decrypt_key(&mut buffer[..decrypted], passphrase)
|
||||
decrypt_key(&buffer[..decrypted], passphrase)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -372,9 +372,9 @@ fn encrypt_decrypt_test() -> Result<(), Error> {
|
||||
hint: None,
|
||||
};
|
||||
|
||||
let encrypted = rsa_encrypt_key_config(public.clone(), &key).expect("encryption failed");
|
||||
let encrypted = rsa_encrypt_key_config(public, &key).expect("encryption failed");
|
||||
let (decrypted, created, fingerprint) =
|
||||
rsa_decrypt_key_config(private.clone(), &encrypted, &passphrase)
|
||||
rsa_decrypt_key_config(private, &encrypted, &passphrase)
|
||||
.expect("decryption failed");
|
||||
|
||||
assert_eq!(key.created, created);
|
||||
|
@ -186,7 +186,7 @@ impl BackupManifest {
|
||||
manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
|
||||
}
|
||||
|
||||
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
|
||||
let manifest = serde_json::to_string_pretty(&manifest).unwrap();
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@ enum PruneMark { Keep, KeepPartial, Remove }
|
||||
|
||||
fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||
mark: &mut HashMap<PathBuf, PruneMark>,
|
||||
list: &Vec<BackupInfo>,
|
||||
list: &[BackupInfo],
|
||||
keep: usize,
|
||||
select_id: F,
|
||||
) -> Result<(), Error> {
|
||||
@ -26,7 +26,7 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||
|
||||
for info in list {
|
||||
let backup_id = info.backup_dir.relative_path();
|
||||
if let Some(_) = mark.get(&backup_id) { continue; }
|
||||
if mark.get(&backup_id).is_some() { continue; }
|
||||
let sel_id: String = select_id(&info)?;
|
||||
|
||||
if already_included.contains(&sel_id) { continue; }
|
||||
@ -45,7 +45,7 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||
|
||||
fn remove_incomplete_snapshots(
|
||||
mark: &mut HashMap<PathBuf, PruneMark>,
|
||||
list: &Vec<BackupInfo>,
|
||||
list: &[BackupInfo],
|
||||
) {
|
||||
|
||||
let mut keep_unfinished = true;
|
||||
|
@ -342,7 +342,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
};
|
||||
|
||||
if let Some(filter) = filter {
|
||||
if filter(&manifest) == false {
|
||||
if !filter(&manifest) {
|
||||
task_log!(
|
||||
worker,
|
||||
"SKIPPED: verify {}:{} (recently verified)",
|
||||
|
@ -898,7 +898,7 @@ async fn create_backup(
|
||||
}
|
||||
}
|
||||
|
||||
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
||||
let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
|
||||
|
||||
let client = connect(&repo)?;
|
||||
record_repository(&repo);
|
||||
@ -917,7 +917,7 @@ async fn create_backup(
|
||||
let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||
println!("Encryption key fingerprint: {}", fingerprint);
|
||||
|
||||
let crypt_config = CryptConfig::new(key.clone())?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
|
||||
match key::find_master_pubkey()? {
|
||||
Some(ref path) if path.exists() => {
|
||||
@ -1464,7 +1464,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
if quiet {
|
||||
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
|
||||
item["keep"].as_bool() == Some(false)
|
||||
}).map(|v| v.clone()).collect();
|
||||
}).cloned().collect();
|
||||
data = list.into();
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@ use std::os::unix::io::AsRawFd;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper;
|
||||
|
||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
@ -218,10 +218,8 @@ fn accept_connections(
|
||||
|
||||
match result {
|
||||
Ok(Ok(())) => {
|
||||
if let Err(_) = sender.send(Ok(stream)).await {
|
||||
if debug {
|
||||
eprintln!("detect closed connection channel");
|
||||
}
|
||||
if sender.send(Ok(stream)).await.is_err() && debug {
|
||||
eprintln!("detect closed connection channel");
|
||||
}
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
@ -583,16 +581,16 @@ async fn schedule_task_log_rotate() {
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
worker.log(format!("starting task log rotation"));
|
||||
worker.log("starting task log rotation".to_string());
|
||||
|
||||
let result = try_block!({
|
||||
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
||||
let max_files = 20; // times twenty files gives > 100000 task entries
|
||||
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
||||
if has_rotated {
|
||||
worker.log(format!("task log archive was rotated"));
|
||||
worker.log("task log archive was rotated".to_string());
|
||||
} else {
|
||||
worker.log(format!("task log archive was not rotated"));
|
||||
worker.log("task log archive was not rotated".to_string());
|
||||
}
|
||||
|
||||
let max_size = 32 * 1024 * 1024 - 1;
|
||||
@ -603,18 +601,18 @@ async fn schedule_task_log_rotate() {
|
||||
if logrotate.rotate(max_size, None, Some(max_files))? {
|
||||
println!("rotated access log, telling daemons to re-open log file");
|
||||
proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
|
||||
worker.log(format!("API access log was rotated"));
|
||||
worker.log("API access log was rotated".to_string());
|
||||
} else {
|
||||
worker.log(format!("API access log was not rotated"));
|
||||
worker.log("API access log was not rotated".to_string());
|
||||
}
|
||||
|
||||
let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true)
|
||||
.ok_or_else(|| format_err!("could not get API auth log file names"))?;
|
||||
|
||||
if logrotate.rotate(max_size, None, Some(max_files))? {
|
||||
worker.log(format!("API authentication log was rotated"));
|
||||
worker.log("API authentication log was rotated".to_string());
|
||||
} else {
|
||||
worker.log(format!("API authentication log was not rotated"));
|
||||
worker.log("API authentication log was not rotated".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -751,7 +749,7 @@ async fn generate_host_stats(save: bool) {
|
||||
match datastore::config() {
|
||||
Ok((config, _)) => {
|
||||
let datastore_list: Vec<datastore::DataStoreConfig> =
|
||||
config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
|
||||
config.convert_to_typed_array("datastore").unwrap_or_default();
|
||||
|
||||
for config in datastore_list {
|
||||
|
||||
|
@ -601,16 +601,14 @@ fn debug_scan(param: Value) -> Result<(), Error> {
|
||||
Ok(header) => {
|
||||
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
|
||||
println!("got MediaContentHeader with wrong magic: {:?}", header.magic);
|
||||
} else if let Some(name) = PROXMOX_BACKUP_CONTENT_NAME.get(&header.content_magic) {
|
||||
println!("got content header: {}", name);
|
||||
println!(" uuid: {}", header.content_uuid());
|
||||
println!(" ctime: {}", strftime_local("%c", header.ctime)?);
|
||||
println!(" hsize: {}", HumanByte::from(header.size as usize));
|
||||
println!(" part: {}", header.part_number);
|
||||
} else {
|
||||
if let Some(name) = PROXMOX_BACKUP_CONTENT_NAME.get(&header.content_magic) {
|
||||
println!("got content header: {}", name);
|
||||
println!(" uuid: {}", header.content_uuid());
|
||||
println!(" ctime: {}", strftime_local("%c", header.ctime)?);
|
||||
println!(" hsize: {}", HumanByte::from(header.size as usize));
|
||||
println!(" part: {}", header.part_number);
|
||||
} else {
|
||||
println!("got unknown content header: {:?}", header.content_magic);
|
||||
}
|
||||
println!("got unknown content header: {:?}", header.content_magic);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
|
@ -293,7 +293,7 @@ fn test_crypt_speed(
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.sha256.speed = Some(speed);
|
||||
|
||||
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
@ -308,7 +308,7 @@ fn test_crypt_speed(
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.compress.speed = Some(speed);
|
||||
|
||||
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
@ -328,7 +328,7 @@ fn test_crypt_speed(
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.decompress.speed = Some(speed);
|
||||
|
||||
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
@ -343,7 +343,7 @@ fn test_crypt_speed(
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.aes256_gcm.speed = Some(speed);
|
||||
|
||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
@ -361,7 +361,7 @@ fn test_crypt_speed(
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.verify.speed = Some(speed);
|
||||
|
||||
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -189,12 +189,12 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
if let None = target {
|
||||
if target.is_none() {
|
||||
bail!("use the 'mount' command to mount pxar archives");
|
||||
}
|
||||
format!("{}.didx", archive_name)
|
||||
} else if archive_name.ends_with(".img") {
|
||||
if let Some(_) = target {
|
||||
if target.is_some() {
|
||||
bail!("use the 'map' command to map drive images");
|
||||
}
|
||||
format!("{}.fidx", archive_name)
|
||||
|
@ -239,7 +239,7 @@ async fn get_status(
|
||||
}
|
||||
let text = value.as_str().unwrap().to_string();
|
||||
if text.is_empty() {
|
||||
return Ok(String::from("--FULL--"));
|
||||
Ok(String::from("--FULL--"))
|
||||
} else {
|
||||
Ok(text)
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
|
||||
let file = unsafe { File::from_raw_fd(fd) };
|
||||
check_tape_is_linux_tape_device(&file)?;
|
||||
LinuxTapeHandle::new(file)
|
||||
} else if let Some(name) = std::env::var("PROXMOX_TAPE_DRIVE").ok() {
|
||||
} else if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
||||
let (config, _digest) = config::drive::config()?;
|
||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
||||
eprintln!("using device {}", drive.path);
|
||||
@ -292,13 +292,11 @@ fn main() -> Result<(), Error> {
|
||||
bail!("this program needs to be run with setuid root");
|
||||
}
|
||||
|
||||
if !running_uid.is_root() {
|
||||
if running_uid != backup_uid || running_gid != backup_gid {
|
||||
bail!(
|
||||
"Not running as backup user or group (got uid {} gid {})",
|
||||
running_uid, running_gid,
|
||||
);
|
||||
}
|
||||
if !running_uid.is_root() && (running_uid != backup_uid || running_gid != backup_gid) {
|
||||
bail!(
|
||||
"Not running as backup user or group (got uid {} gid {})",
|
||||
running_uid, running_gid,
|
||||
);
|
||||
}
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
|
@ -74,12 +74,14 @@ pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Audit can view configuration and status information, but not modify it.
|
||||
pub const ROLE_AUDIT: u64 = 0
|
||||
| PRIV_SYS_AUDIT
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Admin can do anything on the datastore.
|
||||
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
@ -90,6 +92,7 @@ pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||
| PRIV_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Reader can read/verify datastore content and do restore
|
||||
pub const ROLE_DATASTORE_READER: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
@ -97,27 +100,32 @@ pub const ROLE_DATASTORE_READER: u64 = 0
|
||||
| PRIV_DATASTORE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Backup can do backup and restore, but no prune.
|
||||
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.PowerUser can do backup, restore, and prune.
|
||||
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
||||
| PRIV_DATASTORE_PRUNE
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Audit can audit the datastore.
|
||||
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Audit can audit the remote
|
||||
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Admin can do anything on the remote.
|
||||
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
@ -125,6 +133,7 @@ pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||
| PRIV_REMOTE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.SyncOperator can do read and prune on the remote.
|
||||
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
@ -363,6 +372,7 @@ impl AclTreeNode {
|
||||
fn extract_group_roles(&self, _user: &Userid, leaf: bool) -> HashMap<String, bool> {
|
||||
let mut map = HashMap::new();
|
||||
|
||||
#[allow(clippy::for_kv_map)]
|
||||
for (_group, roles) in &self.groups {
|
||||
let is_member = false; // fixme: check if user is member of the group
|
||||
if !is_member {
|
||||
@ -402,7 +412,7 @@ impl AclTreeNode {
|
||||
}
|
||||
|
||||
fn insert_group_role(&mut self, group: String, role: String, propagate: bool) {
|
||||
let map = self.groups.entry(group).or_insert_with(|| HashMap::new());
|
||||
let map = self.groups.entry(group).or_insert_with(HashMap::new);
|
||||
if role == ROLE_NAME_NO_ACCESS {
|
||||
map.clear();
|
||||
map.insert(role, propagate);
|
||||
@ -413,7 +423,7 @@ impl AclTreeNode {
|
||||
}
|
||||
|
||||
fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
|
||||
let map = self.users.entry(auth_id).or_insert_with(|| HashMap::new());
|
||||
let map = self.users.entry(auth_id).or_insert_with(HashMap::new);
|
||||
if role == ROLE_NAME_NO_ACCESS {
|
||||
map.clear();
|
||||
map.insert(role, propagate);
|
||||
@ -435,7 +445,7 @@ impl AclTree {
|
||||
/// Iterates over the tree looking for a node matching `path`.
|
||||
pub fn find_node(&mut self, path: &str) -> Option<&mut AclTreeNode> {
|
||||
let path = split_acl_path(path);
|
||||
return self.get_node(&path);
|
||||
self.get_node(&path)
|
||||
}
|
||||
|
||||
fn get_node(&mut self, path: &[&str]) -> Option<&mut AclTreeNode> {
|
||||
@ -455,7 +465,7 @@ impl AclTree {
|
||||
node = node
|
||||
.children
|
||||
.entry(String::from(*comp))
|
||||
.or_insert_with(|| AclTreeNode::new());
|
||||
.or_insert_with(AclTreeNode::new);
|
||||
}
|
||||
node
|
||||
}
|
||||
@ -521,12 +531,12 @@ impl AclTree {
|
||||
if *propagate {
|
||||
role_ug_map1
|
||||
.entry(role)
|
||||
.or_insert_with(|| BTreeSet::new())
|
||||
.or_insert_with(BTreeSet::new)
|
||||
.insert(auth_id);
|
||||
} else {
|
||||
role_ug_map0
|
||||
.entry(role)
|
||||
.or_insert_with(|| BTreeSet::new())
|
||||
.or_insert_with(BTreeSet::new)
|
||||
.insert(auth_id);
|
||||
}
|
||||
}
|
||||
@ -538,12 +548,12 @@ impl AclTree {
|
||||
if *propagate {
|
||||
role_ug_map1
|
||||
.entry(role)
|
||||
.or_insert_with(|| BTreeSet::new())
|
||||
.or_insert_with(BTreeSet::new)
|
||||
.insert(group);
|
||||
} else {
|
||||
role_ug_map0
|
||||
.entry(role)
|
||||
.or_insert_with(|| BTreeSet::new())
|
||||
.or_insert_with(BTreeSet::new)
|
||||
.insert(group);
|
||||
}
|
||||
}
|
||||
@ -563,7 +573,7 @@ impl AclTree {
|
||||
});
|
||||
result_map
|
||||
.entry(item_list)
|
||||
.or_insert_with(|| BTreeSet::new())
|
||||
.or_insert_with(BTreeSet::new)
|
||||
.insert(item.to_string());
|
||||
}
|
||||
result_map
|
||||
@ -651,8 +661,7 @@ impl AclTree {
|
||||
if !ROLE_NAMES.contains_key(role) {
|
||||
bail!("unknown role '{}'", role);
|
||||
}
|
||||
if user_or_group.starts_with('@') {
|
||||
let group = &user_or_group[1..];
|
||||
if let Some(group) = user_or_group.strip_prefix('@') {
|
||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||
} else {
|
||||
node.insert_user_role(user_or_group.parse()?, role.to_string(), propagate);
|
||||
|
@ -98,7 +98,7 @@ impl CachedUserInfo {
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
true
|
||||
}
|
||||
|
||||
pub fn check_privs(
|
||||
|
@ -135,8 +135,8 @@ pub const DATASTORE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.datastore.lck";
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?;
|
||||
let content = content.unwrap_or(String::from(""));
|
||||
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;
|
||||
|
@ -68,8 +68,8 @@ pub fn lock() -> Result<std::fs::File, Error> {
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?;
|
||||
let content = content.unwrap_or(String::from(""));
|
||||
let content = proxmox::tools::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(DRIVE_CFG_FILENAME, &content)?;
|
||||
|
@ -43,8 +43,8 @@ fn init() -> SectionConfig {
|
||||
config
|
||||
}
|
||||
|
||||
pub const MEDIA_POOL_CFG_FILENAME: &'static str = "/etc/proxmox-backup/media-pool.cfg";
|
||||
pub const MEDIA_POOL_CFG_LOCKFILE: &'static str = "/etc/proxmox-backup/.media-pool.lck";
|
||||
pub const MEDIA_POOL_CFG_FILENAME: &str = "/etc/proxmox-backup/media-pool.cfg";
|
||||
pub const MEDIA_POOL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.media-pool.lck";
|
||||
|
||||
pub fn lock() -> Result<std::fs::File, Error> {
|
||||
open_file_locked(MEDIA_POOL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)
|
||||
@ -52,8 +52,8 @@ pub fn lock() -> Result<std::fs::File, Error> {
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?;
|
||||
let content = content.unwrap_or(String::from(""));
|
||||
let content = proxmox::tools::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(MEDIA_POOL_CFG_FILENAME, &content)?;
|
||||
|
@ -386,9 +386,9 @@ impl NetworkConfig {
|
||||
pub fn check_mtu(&self, parent_name: &str, child_name: &str) -> Result<(), Error> {
|
||||
|
||||
let parent = self.interfaces.get(parent_name)
|
||||
.ok_or(format_err!("check_mtu - missing parent interface '{}'", parent_name))?;
|
||||
.ok_or_else(|| format_err!("check_mtu - missing parent interface '{}'", parent_name))?;
|
||||
let child = self.interfaces.get(child_name)
|
||||
.ok_or(format_err!("check_mtu - missing child interface '{}'", child_name))?;
|
||||
.ok_or_else(|| format_err!("check_mtu - missing child interface '{}'", child_name))?;
|
||||
|
||||
let child_mtu = match child.mtu {
|
||||
Some(mtu) => mtu,
|
||||
@ -515,7 +515,7 @@ pub fn config() -> Result<(NetworkConfig, [u8;32]), Error> {
|
||||
Some(content) => content,
|
||||
None => {
|
||||
let content = proxmox::tools::fs::file_get_optional_contents(NETWORK_INTERFACES_FILENAME)?;
|
||||
content.unwrap_or(Vec::new())
|
||||
content.unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
@ -577,8 +577,8 @@ pub fn complete_port_list(arg: &str, _param: &HashMap<String, String>) -> Vec<St
|
||||
Err(_) => return vec![],
|
||||
};
|
||||
|
||||
let arg = arg.clone().trim();
|
||||
let prefix = if let Some(idx) = arg.rfind(",") { &arg[..idx+1] } else { "" };
|
||||
let arg = arg.trim();
|
||||
let prefix = if let Some(idx) = arg.rfind(',') { &arg[..idx+1] } else { "" };
|
||||
ports.iter().map(|port| format!("{}{}", prefix, port)).collect()
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,7 @@ pub static IPV4_REVERSE_MASK: &[&str] = &[
|
||||
lazy_static! {
|
||||
pub static ref IPV4_MASK_HASH_LOCALNET: HashMap<&'static str, u8> = {
|
||||
let mut map = HashMap::new();
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 8..32 {
|
||||
map.insert(IPV4_REVERSE_MASK[i], i as u8);
|
||||
}
|
||||
@ -61,22 +62,23 @@ lazy_static! {
|
||||
pub fn parse_cidr(cidr: &str) -> Result<(String, u8, bool), Error> {
|
||||
let (address, mask, is_v6) = parse_address_or_cidr(cidr)?;
|
||||
if let Some(mask) = mask {
|
||||
return Ok((address, mask, is_v6));
|
||||
Ok((address, mask, is_v6))
|
||||
} else {
|
||||
bail!("missing netmask in '{}'", cidr);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> {
|
||||
if is_v6 {
|
||||
if !(mask >= 1 && mask <= 128) {
|
||||
bail!("IPv6 mask '{}' is out of range (1..128).", mask);
|
||||
}
|
||||
let (ver, min, max) = if is_v6 {
|
||||
("IPv6", 1, 128)
|
||||
} else {
|
||||
if !(mask > 0 && mask <= 32) {
|
||||
bail!("IPv4 mask '{}' is out of range (1..32).", mask);
|
||||
}
|
||||
("IPv4", 1, 32)
|
||||
};
|
||||
|
||||
if !(mask >= min && mask <= max) {
|
||||
bail!("{} mask '{}' is out of range ({}..{}).", ver, mask, min, max);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -97,18 +99,18 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), E
|
||||
if let Some(mask) = caps.get(2) {
|
||||
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
||||
check_netmask(mask, false)?;
|
||||
return Ok((address.to_string(), Some(mask), false));
|
||||
Ok((address.to_string(), Some(mask), false))
|
||||
} else {
|
||||
return Ok((address.to_string(), None, false));
|
||||
Ok((address.to_string(), None, false))
|
||||
}
|
||||
} else if let Some(caps) = CIDR_V6_REGEX.captures(&cidr) {
|
||||
let address = &caps[1];
|
||||
if let Some(mask) = caps.get(2) {
|
||||
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
||||
check_netmask(mask, true)?;
|
||||
return Ok((address.to_string(), Some(mask), true));
|
||||
Ok((address.to_string(), Some(mask), true))
|
||||
} else {
|
||||
return Ok((address.to_string(), None, true));
|
||||
Ok((address.to_string(), None, true))
|
||||
}
|
||||
} else {
|
||||
bail!("invalid address/mask '{}'", cidr);
|
||||
|
@ -74,9 +74,9 @@ impl <R: BufRead> Lexer<R> {
|
||||
}
|
||||
|
||||
fn split_line(line: &str) -> VecDeque<(Token, String)> {
|
||||
if line.starts_with("#") {
|
||||
if let Some(comment) = line.strip_prefix('#') {
|
||||
let mut res = VecDeque::new();
|
||||
res.push_back((Token::Comment, line[1..].trim().to_string()));
|
||||
res.push_back((Token::Comment, comment.trim().to_string()));
|
||||
return res;
|
||||
}
|
||||
let mut list: VecDeque<(Token, String)> = line.split_ascii_whitespace().map(|text| {
|
||||
@ -114,14 +114,14 @@ impl <R: BufRead> Iterator for Lexer<R> {
|
||||
Some(ref mut cur_line) => {
|
||||
if cur_line.is_empty() {
|
||||
self.cur_line = None;
|
||||
return Some(Ok((Token::Newline, String::from("\n"))));
|
||||
Some(Ok((Token::Newline, String::from("\n"))))
|
||||
} else {
|
||||
let (token, text) = cur_line.pop_front().unwrap();
|
||||
return Some(Ok((token, text)));
|
||||
Some(Ok((token, text)))
|
||||
}
|
||||
}
|
||||
None => {
|
||||
return None;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ impl <R: BufRead> NetworkParser<R> {
|
||||
bail!("input error - {}", err);
|
||||
}
|
||||
Some(Ok((token, _))) => {
|
||||
return Ok(*token);
|
||||
Ok(*token)
|
||||
}
|
||||
None => {
|
||||
bail!("got unexpected end of stream (inside peek)");
|
||||
@ -44,7 +44,7 @@ impl <R: BufRead> NetworkParser<R> {
|
||||
}
|
||||
Some(Ok((token, text))) => {
|
||||
if token == Token::Newline { self.line_nr += 1; }
|
||||
return Ok((token, text));
|
||||
Ok((token, text))
|
||||
}
|
||||
None => {
|
||||
bail!("got unexpected end of stream (inside peek)");
|
||||
@ -215,12 +215,12 @@ impl <R: BufRead> NetworkParser<R> {
|
||||
Token::Comment => {
|
||||
let comment = self.eat(Token::Comment)?;
|
||||
if !address_family_v4 && address_family_v6 {
|
||||
let mut comments = interface.comments6.take().unwrap_or(String::new());
|
||||
let mut comments = interface.comments6.take().unwrap_or_default();
|
||||
if !comments.is_empty() { comments.push('\n'); }
|
||||
comments.push_str(&comment);
|
||||
interface.comments6 = Some(comments);
|
||||
} else {
|
||||
let mut comments = interface.comments.take().unwrap_or(String::new());
|
||||
let mut comments = interface.comments.take().unwrap_or_default();
|
||||
if !comments.is_empty() { comments.push('\n'); }
|
||||
comments.push_str(&comment);
|
||||
interface.comments = Some(comments);
|
||||
|
@ -92,8 +92,8 @@ pub const REMOTE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.remote.lck";
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?;
|
||||
let content = content.unwrap_or(String::from(""));
|
||||
let content = proxmox::tools::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(REMOTE_CFG_FILENAME, &content)?;
|
||||
|
@ -79,7 +79,7 @@ impl From<&SyncJobStatus> for SyncJobConfig {
|
||||
owner: job_status.owner.clone(),
|
||||
remote: job_status.remote.clone(),
|
||||
remote_store: job_status.remote_store.clone(),
|
||||
remove_vanished: job_status.remove_vanished.clone(),
|
||||
remove_vanished: job_status.remove_vanished,
|
||||
comment: job_status.comment.clone(),
|
||||
schedule: job_status.schedule.clone(),
|
||||
}
|
||||
@ -183,8 +183,8 @@ pub const SYNC_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.sync.lck";
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(SYNC_CFG_FILENAME)?;
|
||||
let content = content.unwrap_or(String::from(""));
|
||||
let content = proxmox::tools::fs::file_read_optional_string(SYNC_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(SYNC_CFG_FILENAME, &content)?;
|
||||
|
@ -53,7 +53,7 @@ pub struct EncryptionKeyInfo {
|
||||
}
|
||||
|
||||
pub fn compute_tape_key_fingerprint(key: &[u8; 32]) -> Result<Fingerprint, Error> {
|
||||
let crypt_config = CryptConfig::new(key.clone())?;
|
||||
let crypt_config = CryptConfig::new(*key)?;
|
||||
Ok(crypt_config.fingerprint())
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ pub fn insert_key(key: [u8;32], key_config: KeyConfig, force: bool) -> Result<()
|
||||
};
|
||||
|
||||
if !force {
|
||||
if let Some(_) = config_map.get(&fingerprint) {
|
||||
if config_map.get(&fingerprint).is_some() {
|
||||
bail!("encryption key '{}' already exists.", fingerprint);
|
||||
}
|
||||
}
|
||||
|
@ -1380,14 +1380,14 @@ impl std::str::FromStr for TfaResponse {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Error> {
|
||||
Ok(if s.starts_with("totp:") {
|
||||
TfaResponse::Totp(s[5..].to_string())
|
||||
} else if s.starts_with("u2f:") {
|
||||
TfaResponse::U2f(serde_json::from_str(&s[4..])?)
|
||||
} else if s.starts_with("webauthn:") {
|
||||
TfaResponse::Webauthn(serde_json::from_str(&s[9..])?)
|
||||
} else if s.starts_with("recovery:") {
|
||||
TfaResponse::Recovery(s[9..].to_string())
|
||||
Ok(if let Some(totp) = s.strip_prefix("totp:") {
|
||||
TfaResponse::Totp(totp.to_string())
|
||||
} else if let Some(u2f) = s.strip_prefix("u2f:") {
|
||||
TfaResponse::U2f(serde_json::from_str(u2f)?)
|
||||
} else if let Some(webauthn) = s.strip_prefix("webauthn:") {
|
||||
TfaResponse::Webauthn(serde_json::from_str(webauthn)?)
|
||||
} else if let Some(recovery) = s.strip_prefix("recovery:") {
|
||||
TfaResponse::Recovery(recovery.to_string())
|
||||
} else {
|
||||
bail!("invalid tfa response");
|
||||
})
|
||||
|
@ -157,8 +157,8 @@ pub const USER_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.user.lck";
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(USER_CFG_FILENAME)?;
|
||||
let content = content.unwrap_or(String::from(""));
|
||||
let content = proxmox::tools::fs::file_read_optional_string(USER_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let mut data = CONFIG.parse(USER_CFG_FILENAME, &content)?;
|
||||
|
@ -40,8 +40,7 @@ fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
|
||||
pub fn is_virtual_file_system(magic: i64) -> bool {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
|
||||
match magic {
|
||||
BINFMTFS_MAGIC |
|
||||
matches!(magic, BINFMTFS_MAGIC |
|
||||
CGROUP2_SUPER_MAGIC |
|
||||
CGROUP_SUPER_MAGIC |
|
||||
CONFIGFS_MAGIC |
|
||||
@ -58,9 +57,7 @@ pub fn is_virtual_file_system(magic: i64) -> bool {
|
||||
SECURITYFS_MAGIC |
|
||||
SELINUX_MAGIC |
|
||||
SMACK_MAGIC |
|
||||
SYSFS_MAGIC => true,
|
||||
_ => false
|
||||
}
|
||||
SYSFS_MAGIC)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -228,7 +228,7 @@ impl Extractor {
|
||||
allow_existing_dirs,
|
||||
feature_flags,
|
||||
current_path: Arc::new(Mutex::new(OsString::new())),
|
||||
on_error: Box::new(|err| Err(err)),
|
||||
on_error: Box::new(Err),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -480,11 +480,11 @@ impl SessionImpl {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn lookup<'a>(
|
||||
&'a self,
|
||||
async fn lookup(
|
||||
&'_ self,
|
||||
parent: u64,
|
||||
file_name: &OsStr,
|
||||
) -> Result<(EntryParam, LookupRef<'a>), Error> {
|
||||
) -> Result<(EntryParam, LookupRef<'_>), Error> {
|
||||
let dir = self.open_dir(parent).await?;
|
||||
|
||||
let entry = match { dir }.lookup(file_name).await? {
|
||||
@ -519,10 +519,10 @@ impl SessionImpl {
|
||||
to_stat(inode, &entry)
|
||||
}
|
||||
|
||||
async fn readdirplus<'a>(
|
||||
&'a self,
|
||||
async fn readdirplus(
|
||||
&'_ self,
|
||||
request: &mut requests::ReaddirPlus,
|
||||
) -> Result<Vec<LookupRef<'a>>, Error> {
|
||||
) -> Result<Vec<LookupRef<'_>>, Error> {
|
||||
let mut lookups = Vec::new();
|
||||
let offset = usize::try_from(request.offset)
|
||||
.map_err(|_| io_format_err!("directory offset out of range"))?;
|
||||
|
@ -345,10 +345,7 @@ fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Resul
|
||||
}
|
||||
|
||||
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
|
||||
match errno {
|
||||
Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
|
||||
}
|
||||
|
||||
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
|
||||
|
@ -128,25 +128,20 @@ impl RRA {
|
||||
// derive counter value
|
||||
if self.flags.intersects(RRAFlags::DST_DERIVE | RRAFlags::DST_COUNTER) {
|
||||
let time_diff = time - self.last_update;
|
||||
let is_counter = self.flags.contains(RRAFlags::DST_COUNTER);
|
||||
|
||||
let diff = if self.counter_value.is_nan() {
|
||||
0.0
|
||||
} else if is_counter && value < 0.0 {
|
||||
eprintln!("rrdb update failed - got negative value for counter");
|
||||
return;
|
||||
} else if is_counter && value < self.counter_value {
|
||||
// Note: We do not try automatic overflow corrections
|
||||
self.counter_value = value;
|
||||
eprintln!("rrdb update failed - conter overflow/reset detected");
|
||||
return;
|
||||
} else {
|
||||
if self.flags.contains(RRAFlags::DST_COUNTER) { // check for overflow
|
||||
if value < 0.0 {
|
||||
eprintln!("rrdb update failed - got negative value for counter");
|
||||
return;
|
||||
}
|
||||
// Note: We do not try automatic overflow corrections
|
||||
if value < self.counter_value { // overflow or counter reset
|
||||
self.counter_value = value;
|
||||
eprintln!("rrdb update failed - conter overflow/reset detected");
|
||||
return;
|
||||
} else {
|
||||
value - self.counter_value
|
||||
}
|
||||
} else {
|
||||
value - self.counter_value
|
||||
}
|
||||
value - self.counter_value
|
||||
};
|
||||
self.counter_value = value;
|
||||
value = diff/time_diff;
|
||||
|
@ -127,13 +127,13 @@ pub async fn send_command<P>(
|
||||
if rx.read_line(&mut data).await? == 0 {
|
||||
bail!("no response");
|
||||
}
|
||||
if data.starts_with("OK: ") {
|
||||
match data[4..].parse::<Value>() {
|
||||
if let Some(res) = data.strip_prefix("OK: ") {
|
||||
match res.parse::<Value>() {
|
||||
Ok(v) => Ok(v),
|
||||
Err(err) => bail!("unable to parse json response - {}", err),
|
||||
}
|
||||
} else if data.starts_with("ERROR: ") {
|
||||
bail!("{}", &data[7..]);
|
||||
} else if let Some(err) = data.strip_prefix("ERROR: ") {
|
||||
bail!("{}", err);
|
||||
} else {
|
||||
bail!("unable to parse response: {}", data);
|
||||
}
|
||||
|
@ -57,9 +57,9 @@ impl ApiConfig {
|
||||
prefix.push_str(components[0]);
|
||||
if let Some(subdir) = self.aliases.get(&prefix) {
|
||||
filename.push(subdir);
|
||||
for i in 1..comp_len { filename.push(components[i]) }
|
||||
components.iter().skip(1).for_each(|comp| filename.push(comp));
|
||||
} else {
|
||||
for i in 0..comp_len { filename.push(components[i]) }
|
||||
components.iter().for_each(|comp| filename.push(comp));
|
||||
}
|
||||
}
|
||||
filename
|
||||
|
@ -376,7 +376,7 @@ fn get_server_url() -> (String, usize) {
|
||||
}
|
||||
|
||||
pub fn send_updates_available(
|
||||
updates: &Vec<&APTUpdateInfo>,
|
||||
updates: &[&APTUpdateInfo],
|
||||
) -> Result<(), Error> {
|
||||
// update mails always go to the root@pam configured email..
|
||||
if let Some(email) = lookup_user_email(Userid::root_userid()) {
|
||||
@ -403,7 +403,7 @@ fn lookup_user_email(userid: &Userid) -> Option<String> {
|
||||
|
||||
if let Ok(user_config) = user::cached_config() {
|
||||
if let Ok(user) = user_config.lookup::<User>("user", userid.as_str()) {
|
||||
return user.email.clone();
|
||||
return user.email;
|
||||
}
|
||||
}
|
||||
|
||||
@ -434,7 +434,7 @@ pub fn lookup_datastore_notify_settings(
|
||||
None => lookup_user_email(Userid::root_userid()),
|
||||
};
|
||||
|
||||
let notify_str = config.notify.unwrap_or(String::new());
|
||||
let notify_str = config.notify.unwrap_or_default();
|
||||
|
||||
if let Ok(value) = parse_property_string(¬ify_str, &DatastoreNotify::API_SCHEMA) {
|
||||
if let Ok(notify) = serde_json::from_value(value) {
|
||||
@ -456,7 +456,7 @@ fn handlebars_humam_bytes_helper(
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).map(|v| v.value().as_u64())
|
||||
.flatten()
|
||||
.ok_or(RenderError::new("human-bytes: param not found"))?;
|
||||
.ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
|
||||
|
||||
out.write(&HumanByte::from(param).to_string())?;
|
||||
|
||||
@ -472,10 +472,10 @@ fn handlebars_relative_percentage_helper(
|
||||
) -> HelperResult {
|
||||
let param0 = h.param(0).map(|v| v.value().as_f64())
|
||||
.flatten()
|
||||
.ok_or(RenderError::new("relative-percentage: param0 not found"))?;
|
||||
.ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
|
||||
let param1 = h.param(1).map(|v| v.value().as_f64())
|
||||
.flatten()
|
||||
.ok_or(RenderError::new("relative-percentage: param1 not found"))?;
|
||||
.ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
|
||||
|
||||
if param1 == 0.0 {
|
||||
out.write("-")?;
|
||||
|
@ -48,6 +48,6 @@ impl RpcEnvironment for RestEnvironment {
|
||||
}
|
||||
|
||||
fn get_client_ip(&self) -> Option<std::net::SocketAddr> {
|
||||
self.client_ip.clone()
|
||||
self.client_ip
|
||||
}
|
||||
}
|
||||
|
@ -39,13 +39,12 @@ pub fn do_garbage_collection_job(
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
match job.finish(status) {
|
||||
Err(err) => eprintln!(
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
err
|
||||
),
|
||||
Ok(_) => (),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(email) = email {
|
||||
|
@ -97,7 +97,7 @@ impl <E: RpcEnvironment + Clone> tower_service::Service<Request<Body>> for H2Ser
|
||||
let method = req.method().clone();
|
||||
let worker = self.worker.clone();
|
||||
|
||||
std::pin::Pin::from(self.handle_request(req))
|
||||
self.handle_request(req)
|
||||
.map(move |result| match result {
|
||||
Ok(res) => {
|
||||
Self::log_response(worker, method, &path, &res);
|
||||
|
@ -207,11 +207,8 @@ impl Job {
|
||||
/// Start the job and update the statefile accordingly
|
||||
/// Fails if the job was already started
|
||||
pub fn start(&mut self, upid: &str) -> Result<(), Error> {
|
||||
match self.state {
|
||||
JobState::Started { .. } => {
|
||||
bail!("cannot start job that is started!");
|
||||
}
|
||||
_ => {}
|
||||
if let JobState::Started { .. } = self.state {
|
||||
bail!("cannot start job that is started!");
|
||||
}
|
||||
|
||||
self.state = JobState::Started {
|
||||
|
@ -39,7 +39,7 @@ fn function_calls() -> Vec<(&'static str, fn() -> String)> {
|
||||
};
|
||||
|
||||
let mut list = Vec::new();
|
||||
for (store, _) in &config.sections {
|
||||
for store in config.sections.keys() {
|
||||
list.push(store.as_str());
|
||||
}
|
||||
list.join(", ")
|
||||
|
@ -147,7 +147,7 @@ fn log_response(
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
// time format which apache/nginx use (by default), copied from pve-http-server
|
||||
let datetime = proxmox::tools::time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
|
||||
.unwrap_or("-".into());
|
||||
.unwrap_or_else(|_| "-".to_string());
|
||||
|
||||
logfile
|
||||
.lock()
|
||||
@ -161,7 +161,7 @@ fn log_response(
|
||||
path,
|
||||
status.as_str(),
|
||||
resp.body().size_hint().lower(),
|
||||
user_agent.unwrap_or("-".into()),
|
||||
user_agent.unwrap_or_else(|| "-".to_string()),
|
||||
));
|
||||
}
|
||||
}
|
||||
@ -517,7 +517,7 @@ async fn chuncked_static_file_download(filename: PathBuf) -> Result<Response<Bod
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
.map_ok(|bytes| bytes.freeze());
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
|
@ -68,8 +68,8 @@ impl std::str::FromStr for ApiTicket {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Error> {
|
||||
if s.starts_with("!tfa!") {
|
||||
Ok(ApiTicket::Partial(serde_json::from_str(&s[5..])?))
|
||||
if let Some(tfa_ticket) = s.strip_prefix("!tfa!") {
|
||||
Ok(ApiTicket::Partial(serde_json::from_str(tfa_ticket)?))
|
||||
} else {
|
||||
Ok(ApiTicket::Full(s.parse()?))
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ pub fn do_verification_job(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&verification_job.store)?;
|
||||
|
||||
let outdated_after = verification_job.outdated_after.clone();
|
||||
let outdated_after = verification_job.outdated_after;
|
||||
let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
|
||||
|
||||
let filter = move |manifest: &BackupManifest| {
|
||||
@ -33,7 +33,7 @@ pub fn do_verification_job(
|
||||
|
||||
let raw_verify_state = manifest.unprotected["verify_state"].clone();
|
||||
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
|
||||
Err(_) => return true, // no last verification, always include
|
||||
Err(_) => true, // no last verification, always include
|
||||
Ok(last_verify) => {
|
||||
match outdated_after {
|
||||
None => false, // never re-verify if ignored and no max age
|
||||
@ -83,13 +83,12 @@ pub fn do_verification_job(
|
||||
|
||||
let status = worker.create_state(&job_result);
|
||||
|
||||
match job.finish(status) {
|
||||
Err(err) => eprintln!(
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!(
|
||||
"could not finish job state for {}: {}",
|
||||
job.jobtype().to_string(),
|
||||
err
|
||||
),
|
||||
Ok(_) => (),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(email) = email {
|
||||
|
@ -48,7 +48,7 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
|
||||
return Ok(WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id));
|
||||
}
|
||||
|
||||
if !procfs::check_process_running_pstart(upid.pid, upid.pstart).is_some() {
|
||||
if procfs::check_process_running_pstart(upid.pid, upid.pstart).is_none() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
||||
file.read_to_end(&mut data)?;
|
||||
|
||||
// task logs should end with newline, we do not want it here
|
||||
if data.len() > 0 && data[data.len()-1] == b'\n' {
|
||||
if !data.is_empty() && data[data.len()-1] == b'\n' {
|
||||
data.pop();
|
||||
}
|
||||
|
||||
@ -267,11 +267,11 @@ impl TaskState {
|
||||
Ok(TaskState::Unknown { endtime })
|
||||
} else if s == "OK" {
|
||||
Ok(TaskState::OK { endtime })
|
||||
} else if s.starts_with("WARNINGS: ") {
|
||||
let count: u64 = s[10..].parse()?;
|
||||
} else if let Some(warnings) = s.strip_prefix("WARNINGS: ") {
|
||||
let count: u64 = warnings.parse()?;
|
||||
Ok(TaskState::Warning{ count, endtime })
|
||||
} else if s.len() > 0 {
|
||||
let message = if s.starts_with("ERROR: ") { &s[7..] } else { s }.to_string();
|
||||
} else if !s.is_empty() {
|
||||
let message = if let Some(err) = s.strip_prefix("ERROR: ") { err } else { s }.to_string();
|
||||
Ok(TaskState::Error{ message, endtime })
|
||||
} else {
|
||||
bail!("unable to parse Task Status '{}'", s);
|
||||
@ -330,7 +330,7 @@ pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: O
|
||||
let _lock = lock_task_list_files(true)?;
|
||||
|
||||
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress)
|
||||
.ok_or(format_err!("could not get archive file names"))?;
|
||||
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||
|
||||
logrotate.rotate(size_threshold, None, max_files)
|
||||
}
|
||||
@ -362,8 +362,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
||||
if !worker_is_active_local(&info.upid) {
|
||||
// println!("Detected stopped task '{}'", &info.upid_str);
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let status = upid_read_status(&info.upid)
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
||||
let status = upid_read_status(&info.upid).unwrap_or(TaskState::Unknown { endtime: now });
|
||||
finish_list.push(TaskListInfo {
|
||||
upid: info.upid,
|
||||
upid_str: info.upid_str,
|
||||
|
@ -187,11 +187,9 @@ pub trait MediaChange {
|
||||
if let ElementStatus::Empty = element_status {
|
||||
to = Some(i as u64 + 1);
|
||||
}
|
||||
} else {
|
||||
if let ElementStatus::VolumeTag(ref tag) = element_status {
|
||||
if tag == label_text {
|
||||
from = Some(i as u64 + 1);
|
||||
}
|
||||
} else if let ElementStatus::VolumeTag(ref tag) = element_status {
|
||||
if tag == label_text {
|
||||
from = Some(i as u64 + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -58,13 +58,12 @@ fn parse_drive_status(i: &str) -> IResult<&str, DriveStatus> {
|
||||
|
||||
let mut loaded_slot = None;
|
||||
|
||||
if i.starts_with("Empty") {
|
||||
return Ok((&i[5..], DriveStatus { loaded_slot, status: ElementStatus::Empty }));
|
||||
if let Some(empty) = i.strip_suffix("Empty") {
|
||||
return Ok((empty, DriveStatus { loaded_slot, status: ElementStatus::Empty }));
|
||||
}
|
||||
let (mut i, _) = tag("Full (")(i)?;
|
||||
|
||||
if i.starts_with("Storage Element ") {
|
||||
let n = &i[16..];
|
||||
if let Some(n) = i.strip_prefix("Storage Element ") {
|
||||
let (n, id) = parse_u64(n)?;
|
||||
loaded_slot = Some(id);
|
||||
let (n, _) = tag(" Loaded")(n)?;
|
||||
@ -76,8 +75,7 @@ fn parse_drive_status(i: &str) -> IResult<&str, DriveStatus> {
|
||||
|
||||
let (i, _) = tag(")")(i)?;
|
||||
|
||||
if i.starts_with(":VolumeTag = ") {
|
||||
let i = &i[13..];
|
||||
if let Some(i) = i.strip_prefix(":VolumeTag = ") {
|
||||
let (i, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(i)?;
|
||||
let (i, _) = take_while(|c| c != '\n')(i)?; // skip to eol
|
||||
return Ok((i, DriveStatus { loaded_slot, status: ElementStatus::VolumeTag(tag.to_string()) }));
|
||||
@ -89,14 +87,11 @@ fn parse_drive_status(i: &str) -> IResult<&str, DriveStatus> {
|
||||
}
|
||||
|
||||
fn parse_slot_status(i: &str) -> IResult<&str, ElementStatus> {
|
||||
if i.starts_with("Empty") {
|
||||
return Ok((&i[5..], ElementStatus::Empty));
|
||||
if let Some(empty) = i.strip_prefix("Empty") {
|
||||
return Ok((empty, ElementStatus::Empty));
|
||||
}
|
||||
if i.starts_with("Full ") {
|
||||
let mut n = &i[5..];
|
||||
|
||||
if n.starts_with(":VolumeTag=") {
|
||||
n = &n[11..];
|
||||
if let Some(n) = i.strip_prefix("Full ") {
|
||||
if let Some(n) = n.strip_prefix(":VolumeTag=") {
|
||||
let (n, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(n)?;
|
||||
let (n, _) = take_while(|c| c != '\n')(n)?; // skip to eol
|
||||
return Ok((n, ElementStatus::VolumeTag(tag.to_string())));
|
||||
|
@ -62,15 +62,11 @@ impl <'a> ChunkArchiveWriter<'a> {
|
||||
}
|
||||
|
||||
fn write_all(&mut self, data: &[u8]) -> Result<bool, std::io::Error> {
|
||||
let result = match self.writer {
|
||||
Some(ref mut writer) => {
|
||||
let leom = writer.write_all(data)?;
|
||||
Ok(leom)
|
||||
}
|
||||
match self.writer {
|
||||
Some(ref mut writer) => writer.write_all(data),
|
||||
None => proxmox::io_bail!(
|
||||
"detected write after archive finished - internal error"),
|
||||
};
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// Write chunk into archive.
|
||||
|
@ -24,10 +24,7 @@ pub fn has_encryption<F: AsRawFd>(
|
||||
Ok(data) => data,
|
||||
Err(_) => return false,
|
||||
};
|
||||
match decode_spin_data_encryption_caps(&data) {
|
||||
Ok(_) => true,
|
||||
Err(_) => false,
|
||||
}
|
||||
decode_spin_data_encryption_caps(&data).is_ok()
|
||||
}
|
||||
|
||||
/// Set or clear encryption key
|
||||
|
@ -85,12 +85,12 @@ pub fn linux_tape_changer_list() -> Vec<TapeDeviceInfo> {
|
||||
let vendor = device.property_value("ID_VENDOR")
|
||||
.map(std::ffi::OsString::from)
|
||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||
.unwrap_or(String::from("unknown"));
|
||||
.unwrap_or_else(|| String::from("unknown"));
|
||||
|
||||
let model = device.property_value("ID_MODEL")
|
||||
.map(std::ffi::OsString::from)
|
||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||
.unwrap_or(String::from("unknown"));
|
||||
.unwrap_or_else(|| String::from("unknown"));
|
||||
|
||||
let dev_path = format!("/dev/tape/by-id/scsi-{}", serial);
|
||||
|
||||
@ -166,12 +166,12 @@ pub fn linux_tape_device_list() -> Vec<TapeDeviceInfo> {
|
||||
let vendor = device.property_value("ID_VENDOR")
|
||||
.map(std::ffi::OsString::from)
|
||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||
.unwrap_or(String::from("unknown"));
|
||||
.unwrap_or_else(|| String::from("unknown"));
|
||||
|
||||
let model = device.property_value("ID_MODEL")
|
||||
.map(std::ffi::OsString::from)
|
||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||
.unwrap_or(String::from("unknown"));
|
||||
.unwrap_or_else(|| String::from("unknown"));
|
||||
|
||||
let dev_path = format!("/dev/tape/by-id/scsi-{}-nst", serial);
|
||||
|
||||
|
@ -98,16 +98,14 @@ impl LinuxTapeDrive {
|
||||
|
||||
if drive_status.blocksize == 0 {
|
||||
// device is variable block size - OK
|
||||
} else {
|
||||
if drive_status.blocksize != PROXMOX_TAPE_BLOCK_SIZE as u32 {
|
||||
eprintln!("device is in fixed block size mode with wrong size ({} bytes)", drive_status.blocksize);
|
||||
eprintln!("trying to set variable block size mode...");
|
||||
if handle.set_block_size(0).is_err() {
|
||||
bail!("set variable block size mod failed - device uses wrong blocksize.");
|
||||
}
|
||||
} else {
|
||||
// device is in fixed block size mode with correct block size
|
||||
} else if drive_status.blocksize != PROXMOX_TAPE_BLOCK_SIZE as u32 {
|
||||
eprintln!("device is in fixed block size mode with wrong size ({} bytes)", drive_status.blocksize);
|
||||
eprintln!("trying to set variable block size mode...");
|
||||
if handle.set_block_size(0).is_err() {
|
||||
bail!("set variable block size mod failed - device uses wrong blocksize.");
|
||||
}
|
||||
} else {
|
||||
// device is in fixed block size mode with correct block size
|
||||
}
|
||||
|
||||
// Only root can set driver options, so we cannot
|
||||
@ -528,7 +526,7 @@ impl TapeDriver for LinuxTapeHandle {
|
||||
let result: Result<u64, String> = serde_json::from_str(&output)?;
|
||||
result
|
||||
.map_err(|err| format_err!("{}", err))
|
||||
.map(|bits| TapeAlertFlags::from_bits_truncate(bits))
|
||||
.map(TapeAlertFlags::from_bits_truncate)
|
||||
}
|
||||
|
||||
/// Set or clear encryption key
|
||||
|
@ -32,7 +32,7 @@ enum MamFormat {
|
||||
DEC,
|
||||
}
|
||||
|
||||
static MAM_ATTRIBUTES: &'static [ (u16, u16, MamFormat, &'static str) ] = &[
|
||||
static MAM_ATTRIBUTES: &[ (u16, u16, MamFormat, &str) ] = &[
|
||||
(0x00_00, 8, MamFormat::DEC, "Remaining Capacity In Partition"),
|
||||
(0x00_01, 8, MamFormat::DEC, "Maximum Capacity In Partition"),
|
||||
(0x00_02, 8, MamFormat::DEC, "Tapealert Flags"),
|
||||
|
@ -258,13 +258,13 @@ pub fn required_media_changer(
|
||||
) -> Result<(Box<dyn MediaChange>, String), Error> {
|
||||
match media_changer(config, drive) {
|
||||
Ok(Some(result)) => {
|
||||
return Ok(result);
|
||||
Ok(result)
|
||||
}
|
||||
Ok(None) => {
|
||||
bail!("drive '{}' has no associated changer device", drive);
|
||||
},
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -339,7 +339,7 @@ pub fn request_and_load_media(
|
||||
|
||||
let media_id = check_label(handle.as_mut(), &label.uuid)?;
|
||||
|
||||
return Ok((handle, media_id));
|
||||
Ok((handle, media_id))
|
||||
}
|
||||
"linux" => {
|
||||
let drive_config = LinuxTapeDrive::deserialize(config)?;
|
||||
@ -390,20 +390,18 @@ pub fn request_and_load_media(
|
||||
media_id.label.uuid.to_string(),
|
||||
));
|
||||
return Ok((Box::new(handle), media_id));
|
||||
} else {
|
||||
if Some(media_id.label.uuid.clone()) != last_media_uuid {
|
||||
worker.log(format!(
|
||||
"wrong media label {} ({})",
|
||||
media_id.label.label_text,
|
||||
media_id.label.uuid.to_string(),
|
||||
));
|
||||
last_media_uuid = Some(media_id.label.uuid);
|
||||
}
|
||||
} else if Some(media_id.label.uuid.clone()) != last_media_uuid {
|
||||
worker.log(format!(
|
||||
"wrong media label {} ({})",
|
||||
media_id.label.label_text,
|
||||
media_id.label.uuid.to_string(),
|
||||
));
|
||||
last_media_uuid = Some(media_id.label.uuid);
|
||||
}
|
||||
}
|
||||
Ok((None, _)) => {
|
||||
if last_media_uuid.is_some() {
|
||||
worker.log(format!("found empty media without label (please label all tapes first)"));
|
||||
worker.log("found empty media without label (please label all tapes first)".to_string());
|
||||
last_media_uuid = None;
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ bitflags::bitflags!{
|
||||
///
|
||||
/// See LTO SCSI Reference LOG_SENSE - LP 2Eh: TapeAlerts
|
||||
pub struct TapeAlertFlags: u64 {
|
||||
#[allow(clippy::eq_op)]
|
||||
const READ_WARNING = 1 << (0x0001 -1);
|
||||
const WRITE_WARNING = 1 << (0x0002 -1);
|
||||
const HARD_ERROR = 1 << (0x0003 -1);
|
||||
|
@ -168,8 +168,8 @@ impl VirtualTapeHandle {
|
||||
if path.is_file() && path.extension() == Some(std::ffi::OsStr::new("json")) {
|
||||
if let Some(name) = path.file_stem() {
|
||||
if let Some(name) = name.to_str() {
|
||||
if name.starts_with("tape-") {
|
||||
list.push(name[5..].to_string());
|
||||
if let Some(label) = name.strip_prefix("tape-") {
|
||||
list.push(label.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -95,19 +95,16 @@ fn decode_volume_statistics(data: &[u8]) -> Result<Lp17VolumeStatistics, Error>
|
||||
|
||||
let read_be_counter = |reader: &mut &[u8], len: u8| {
|
||||
let len = len as usize;
|
||||
|
||||
if len == 0 || len > 8 {
|
||||
bail!("invalid conter size '{}'", len);
|
||||
}
|
||||
let mut buffer = [0u8; 8];
|
||||
reader.read_exact(&mut buffer[..len])?;
|
||||
|
||||
let mut value: u64 = 0;
|
||||
|
||||
for i in 0..len {
|
||||
value = value << 8;
|
||||
value = value | buffer[i] as u64;
|
||||
}
|
||||
let value = buffer
|
||||
.iter()
|
||||
.take(len)
|
||||
.fold(0, |value, curr| (value << 8) | *curr as u64);
|
||||
|
||||
Ok(value)
|
||||
};
|
||||
|
@ -81,10 +81,8 @@ impl <R: Read> BlockedReader<R> {
|
||||
|
||||
if size > buffer.payload.len() {
|
||||
proxmox::io_bail!("detected tape block with wrong payload size ({} > {}", size, buffer.payload.len());
|
||||
} else if size == 0 {
|
||||
if !found_end_marker{
|
||||
proxmox::io_bail!("detected tape block with zero payload size");
|
||||
}
|
||||
} else if size == 0 && !found_end_marker {
|
||||
proxmox::io_bail!("detected tape block with zero payload size");
|
||||
}
|
||||
|
||||
|
||||
@ -179,7 +177,7 @@ impl <R: Read> Read for BlockedReader<R> {
|
||||
}
|
||||
|
||||
if rest <= 0 {
|
||||
return Ok(0);
|
||||
Ok(0)
|
||||
} else {
|
||||
let copy_len = if (buffer.len() as isize) < rest {
|
||||
buffer.len()
|
||||
@ -189,7 +187,7 @@ impl <R: Read> Read for BlockedReader<R> {
|
||||
buffer[..copy_len].copy_from_slice(
|
||||
&self.buffer.payload[self.read_pos..(self.read_pos + copy_len)]);
|
||||
self.read_pos += copy_len;
|
||||
return Ok(copy_len);
|
||||
Ok(copy_len)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ impl <W: Write> BlockedWriter<W> {
|
||||
self.bytes_written += BlockHeader::SIZE;
|
||||
|
||||
} else {
|
||||
self.buffer_pos = self.buffer_pos + bytes;
|
||||
self.buffer_pos += bytes;
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
|
@ -50,7 +50,7 @@ impl SnapshotReader {
|
||||
}
|
||||
};
|
||||
|
||||
let mut client_log_path = snapshot_path.clone();
|
||||
let mut client_log_path = snapshot_path;
|
||||
client_log_path.push(CLIENT_LOG_BLOB_NAME);
|
||||
|
||||
let mut file_list = Vec::new();
|
||||
|
@ -215,12 +215,13 @@ impl Inventory {
|
||||
|
||||
/// find media by label_text
|
||||
pub fn find_media_by_label_text(&self, label_text: &str) -> Option<&MediaId> {
|
||||
for (_uuid, entry) in &self.map {
|
||||
self.map.values().find_map(|entry| {
|
||||
if entry.id.label.label_text == label_text {
|
||||
return Some(&entry.id);
|
||||
Some(&entry.id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
/// Lookup media pool
|
||||
@ -245,7 +246,7 @@ impl Inventory {
|
||||
pub fn list_pool_media(&self, pool: &str) -> Vec<MediaId> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_uuid, entry) in &self.map {
|
||||
for entry in self.map.values() {
|
||||
match entry.id.media_set_label {
|
||||
None => continue, // not assigned to any pool
|
||||
Some(ref set) => {
|
||||
@ -272,7 +273,7 @@ impl Inventory {
|
||||
pub fn list_used_media(&self) -> Vec<MediaId> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_uuid, entry) in &self.map {
|
||||
for entry in self.map.values() {
|
||||
match entry.id.media_set_label {
|
||||
None => continue, // not assigned to any pool
|
||||
Some(ref set) => {
|
||||
@ -288,19 +289,17 @@ impl Inventory {
|
||||
|
||||
/// List media not assigned to any pool
|
||||
pub fn list_unassigned_media(&self) -> Vec<MediaId> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_uuid, entry) in &self.map {
|
||||
self.map.values().filter_map(|entry|
|
||||
if entry.id.media_set_label.is_none() {
|
||||
list.push(entry.id.clone());
|
||||
Some(entry.id.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
list
|
||||
).collect()
|
||||
}
|
||||
|
||||
pub fn media_set_start_time(&self, media_set_uuid: &Uuid) -> Option<i64> {
|
||||
self.media_set_start_times.get(media_set_uuid).map(|t| *t)
|
||||
self.media_set_start_times.get(media_set_uuid).copied()
|
||||
}
|
||||
|
||||
/// Lookup media set pool
|
||||
@ -383,7 +382,7 @@ impl Inventory {
|
||||
|
||||
let set_list = self.map.values()
|
||||
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
||||
.filter(|set| &set.pool == &pool && set.uuid.as_ref() != [0u8;16]);
|
||||
.filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8;16]);
|
||||
|
||||
for set in set_list {
|
||||
match last_set {
|
||||
@ -406,7 +405,7 @@ impl Inventory {
|
||||
// consistency check - must be the only set with that ctime
|
||||
let set_list = self.map.values()
|
||||
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
||||
.filter(|set| &set.pool == &pool && set.uuid.as_ref() != [0u8;16]);
|
||||
.filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8;16]);
|
||||
|
||||
for set in set_list {
|
||||
if set.uuid != uuid && set.ctime >= ctime { // should not happen
|
||||
@ -437,7 +436,7 @@ impl Inventory {
|
||||
|
||||
let set_list = self.map.values()
|
||||
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
||||
.filter(|set| (&set.uuid != media_set_uuid) && (&set.pool == &pool));
|
||||
.filter(|set| (&set.uuid != media_set_uuid) && (set.pool == pool));
|
||||
|
||||
let mut next_ctime = None;
|
||||
|
||||
@ -522,7 +521,7 @@ impl Inventory {
|
||||
) -> Result<String, Error> {
|
||||
|
||||
if let Some(ctime) = self.media_set_start_time(media_set_uuid) {
|
||||
let mut template = template.unwrap_or(String::from("%c"));
|
||||
let mut template = template.unwrap_or_else(|| String::from("%c"));
|
||||
template = template.replace("%id%", &media_set_uuid.to_string());
|
||||
proxmox::tools::time::strftime_local(&template, ctime)
|
||||
} else {
|
||||
@ -675,20 +674,18 @@ impl Inventory {
|
||||
for (uuid, entry) in self.map.iter_mut() {
|
||||
if let Some(changer_name) = online_map.lookup_changer(uuid) {
|
||||
entry.location = Some(MediaLocation::Online(changer_name.to_string()));
|
||||
} else {
|
||||
if let Some(MediaLocation::Online(ref changer_name)) = entry.location {
|
||||
match online_map.online_map(changer_name) {
|
||||
None => {
|
||||
// no such changer device
|
||||
entry.location = Some(MediaLocation::Offline);
|
||||
}
|
||||
Some(None) => {
|
||||
// got no info - do nothing
|
||||
}
|
||||
Some(Some(_)) => {
|
||||
// media changer changed
|
||||
entry.location = Some(MediaLocation::Offline);
|
||||
}
|
||||
} else if let Some(MediaLocation::Online(ref changer_name)) = entry.location {
|
||||
match online_map.online_map(changer_name) {
|
||||
None => {
|
||||
// no such changer device
|
||||
entry.location = Some(MediaLocation::Offline);
|
||||
}
|
||||
Some(None) => {
|
||||
// got no info - do nothing
|
||||
}
|
||||
Some(Some(_)) => {
|
||||
// media changer changed
|
||||
entry.location = Some(MediaLocation::Offline);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ impl MediaCatalog {
|
||||
|
||||
/// Returns the chunk archive file number
|
||||
pub fn lookup_snapshot(&self, snapshot: &str) -> Option<u64> {
|
||||
self.snapshot_index.get(snapshot).map(|n| *n)
|
||||
self.snapshot_index.get(snapshot).copied()
|
||||
}
|
||||
|
||||
/// Test if the catalog already contain a chunk
|
||||
@ -333,7 +333,7 @@ impl MediaCatalog {
|
||||
|
||||
/// Returns the chunk archive file number
|
||||
pub fn lookup_chunk(&self, digest: &[u8;32]) -> Option<u64> {
|
||||
self.chunk_index.get(digest).map(|n| *n)
|
||||
self.chunk_index.get(digest).copied()
|
||||
}
|
||||
|
||||
fn check_register_label(&self, file_number: u64) -> Result<(), Error> {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user