tree-wide: fix needless borrows
found and fixed via clippy Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
@ -233,7 +233,7 @@ impl AcmeClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = account.post_request(&account.location, &nonce, data)?;
|
||||
let request = account.post_request(&account.location, nonce, data)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(response) => break response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
@ -402,7 +402,7 @@ impl AcmeClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = revocation.request(&directory, nonce)?;
|
||||
let request = revocation.request(directory, nonce)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(_response) => return Ok(()),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
|
@ -270,7 +270,7 @@ impl AcmePlugin for StandaloneServer {
|
||||
let token = challenge
|
||||
.token()
|
||||
.ok_or_else(|| format_err!("missing token in challenge"))?;
|
||||
let key_auth = Arc::new(client.key_authorization(&token)?);
|
||||
let key_auth = Arc::new(client.key_authorization(token)?);
|
||||
let path = Arc::new(format!("/.well-known/acme-challenge/{}", token));
|
||||
|
||||
let service = make_service_fn(move |_| {
|
||||
|
@ -121,7 +121,7 @@ pub fn read_acl(
|
||||
let mut list: Vec<AclListItem> = Vec::new();
|
||||
if let Some(path) = &path {
|
||||
if let Some(node) = &tree.find_node(path) {
|
||||
extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter);
|
||||
extract_acl_node_data(node, path, &mut list, exact, &auth_id_filter);
|
||||
}
|
||||
} else {
|
||||
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
|
||||
|
@ -118,7 +118,7 @@ fn authenticate_2nd(
|
||||
challenge_ticket: &str,
|
||||
response: &str,
|
||||
) -> Result<AuthResult, Error> {
|
||||
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(&challenge_ticket)?
|
||||
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(challenge_ticket)?
|
||||
.verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)?
|
||||
.require_partial()?;
|
||||
|
||||
|
@ -83,7 +83,7 @@ fn check_priv_or_backup_owner(
|
||||
required_privs: u64,
|
||||
) -> Result<(), Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
|
||||
let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
|
||||
|
||||
if privs & required_privs == 0 {
|
||||
let owner = store.get_owner(group)?;
|
||||
@ -125,7 +125,7 @@ fn get_all_snapshot_files(
|
||||
info: &BackupInfo,
|
||||
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||
|
||||
let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
|
||||
let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
@ -536,7 +536,7 @@ pub fn list_snapshots (
|
||||
snapshots.extend(
|
||||
group_backups
|
||||
.into_iter()
|
||||
.map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
|
||||
.map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
|
||||
);
|
||||
|
||||
Ok(snapshots)
|
||||
@ -549,7 +549,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
|
||||
|
||||
groups.iter()
|
||||
.filter(|group| {
|
||||
let owner = match store.get_owner(&group) {
|
||||
let owner = match store.get_owner(group) {
|
||||
Ok(owner) => owner,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to get owner of group '{}/{}' - {}",
|
||||
@ -1071,7 +1071,7 @@ pub fn get_datastore_list(
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, data)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if allowed {
|
||||
list.push(
|
||||
@ -1401,7 +1401,7 @@ pub fn catalog(
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
manifest.verify_file(file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
@ -1446,7 +1446,7 @@ pub fn pxar_file_download(
|
||||
|
||||
async move {
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
@ -1483,7 +1483,7 @@ pub fn pxar_file_download(
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&pxar_name, &csum, size)?;
|
||||
manifest.verify_file(pxar_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
@ -73,7 +73,7 @@ pub fn list_sync_jobs(
|
||||
}
|
||||
})
|
||||
.filter(|job: &SyncJobConfig| {
|
||||
check_sync_job_read_access(&user_info, &auth_id, &job)
|
||||
check_sync_job_read_access(&user_info, &auth_id, job)
|
||||
});
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
@ -95,7 +95,7 @@ pub fn update_webauthn_config(
|
||||
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(
|
||||
&digest,
|
||||
&crate::config::tfa::webauthn_config_digest(&wa)?,
|
||||
&crate::config::tfa::webauthn_config_digest(wa)?,
|
||||
)?;
|
||||
}
|
||||
|
||||
|
@ -524,7 +524,7 @@ pub fn list_plugins(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginCon
|
||||
rpcenv["digest"] = hex::encode(&digest).into();
|
||||
Ok(plugins
|
||||
.iter()
|
||||
.map(|(id, (ty, data))| modify_cfg_for_api(&id, &ty, data))
|
||||
.map(|(id, (ty, data))| modify_cfg_for_api(id, ty, data))
|
||||
.collect())
|
||||
}
|
||||
|
||||
@ -546,7 +546,7 @@ pub fn get_plugin(id: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<Plu
|
||||
rpcenv["digest"] = hex::encode(&digest).into();
|
||||
|
||||
match plugins.get(&id) {
|
||||
Some((ty, data)) => Ok(modify_cfg_for_api(&id, &ty, &data)),
|
||||
Some((ty, data)) => Ok(modify_cfg_for_api(&id, ty, data)),
|
||||
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||
}
|
||||
}
|
||||
|
@ -20,12 +20,12 @@ pub fn check_sync_job_read_access(
|
||||
auth_id: &Authid,
|
||||
job: &SyncJobConfig,
|
||||
) -> bool {
|
||||
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
|
||||
if datastore_privs & PRIV_DATASTORE_AUDIT == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote]);
|
||||
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
|
||||
remote_privs & PRIV_REMOTE_AUDIT != 0
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@ pub fn check_sync_job_modify_access(
|
||||
auth_id: &Authid,
|
||||
job: &SyncJobConfig,
|
||||
) -> bool {
|
||||
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
|
||||
if datastore_privs & PRIV_DATASTORE_BACKUP == 0 {
|
||||
return false;
|
||||
}
|
||||
@ -62,7 +62,7 @@ pub fn check_sync_job_modify_access(
|
||||
return false;
|
||||
}
|
||||
|
||||
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote, &job.remote_store]);
|
||||
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]);
|
||||
remote_privs & PRIV_REMOTE_READ != 0
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ pub fn list_sync_jobs(
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, &sync_job))
|
||||
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
|
||||
.collect();
|
||||
Ok(list)
|
||||
}
|
||||
@ -429,8 +429,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
||||
};
|
||||
|
||||
// should work without ACLs
|
||||
assert_eq!(check_sync_job_read_access(&user_info, &root_auth_id, &job), true);
|
||||
assert_eq!(check_sync_job_modify_access(&user_info, &root_auth_id, &job), true);
|
||||
assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true);
|
||||
assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true);
|
||||
|
||||
// user without permissions must fail
|
||||
assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
|
||||
|
@ -330,7 +330,7 @@ async fn order_certificate(
|
||||
|
||||
for auth_url in &order.data.authorizations {
|
||||
task_log!(worker, "Getting authorization details from '{}'", auth_url);
|
||||
let mut auth = acme.get_authorization(&auth_url).await?;
|
||||
let mut auth = acme.get_authorization(auth_url).await?;
|
||||
|
||||
let domain = match &mut auth.identifier {
|
||||
Identifier::Dns(domain) => domain.to_ascii_lowercase(),
|
||||
@ -442,7 +442,7 @@ async fn request_validation(
|
||||
validation_url: &str,
|
||||
) -> Result<(), Error> {
|
||||
task_log!(worker, "Triggering validation");
|
||||
acme.request_challenge_validation(&validation_url).await?;
|
||||
acme.request_challenge_validation(validation_url).await?;
|
||||
|
||||
task_log!(worker, "Sleeping for 5 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
@ -450,7 +450,7 @@ async fn request_validation(
|
||||
loop {
|
||||
use proxmox_acme_rs::authorization::Status;
|
||||
|
||||
let auth = acme.get_authorization(&auth_url).await?;
|
||||
let auth = acme.get_authorization(auth_url).await?;
|
||||
match auth.status {
|
||||
Status::Pending => {
|
||||
task_log!(worker, "Status is still 'pending', trying again in 10 seconds");
|
||||
|
@ -282,7 +282,7 @@ fn create_datastore_mount_unit(
|
||||
what: &str,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(&mount_point, true);
|
||||
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
|
@ -55,9 +55,9 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
|
||||
for line in data.lines() {
|
||||
|
||||
if let Some(caps) = DOMAIN_REGEX.captures(&line) {
|
||||
if let Some(caps) = DOMAIN_REGEX.captures(line) {
|
||||
result["search"] = Value::from(&caps[1]);
|
||||
} else if let Some(caps) = SERVER_REGEX.captures(&line) {
|
||||
} else if let Some(caps) = SERVER_REGEX.captures(line) {
|
||||
nscount += 1;
|
||||
if nscount > 3 { continue };
|
||||
let nameserver = &caps[1];
|
||||
|
@ -121,7 +121,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
|
||||
|
||||
let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?.sign(
|
||||
private_auth_key(),
|
||||
Some(&tools::ticket::term_aad(&userid, &path, port)),
|
||||
Some(&tools::ticket::term_aad(userid, path, port)),
|
||||
)?;
|
||||
|
||||
let mut command = Vec::new();
|
||||
@ -161,7 +161,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
|
||||
arguments.push(&fd_string);
|
||||
arguments.extend_from_slice(&[
|
||||
"--path",
|
||||
&path,
|
||||
path,
|
||||
"--perm",
|
||||
"Sys.Console",
|
||||
"--authport",
|
||||
@ -293,7 +293,7 @@ fn upgrade_to_websocket(
|
||||
Ticket::<Empty>::parse(ticket)?.verify(
|
||||
crate::auth_helpers::public_auth_key(),
|
||||
ticket::TERM_PREFIX,
|
||||
Some(&tools::ticket::term_aad(&userid, "/system", port)),
|
||||
Some(&tools::ticket::term_aad(userid, "/system", port)),
|
||||
)?;
|
||||
|
||||
let (ws, response) = WebSocket::new(parts.headers.clone())?;
|
||||
|
@ -17,7 +17,7 @@ use pbs_config::network::{self, NetworkConfig};
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
|
||||
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
|
||||
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(&list)?;
|
||||
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?;
|
||||
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
|
||||
}
|
||||
|
||||
|
@ -176,9 +176,9 @@ fn get_service_state(
|
||||
bail!("unknown service name '{}'", service);
|
||||
}
|
||||
|
||||
let status = get_full_service_state(&service)?;
|
||||
let status = get_full_service_state(service)?;
|
||||
|
||||
Ok(json_service_state(&service, status))
|
||||
Ok(json_service_state(service, status))
|
||||
}
|
||||
|
||||
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
|
||||
|
@ -24,9 +24,9 @@ use pbs_config::CachedUserInfo;
|
||||
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
|
||||
match (upid.worker_type.as_str(), &upid.worker_id) {
|
||||
("verificationjob", Some(workerid)) => {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
if let Some(store) = captures.get(1) {
|
||||
return user_info.check_privs(&auth_id,
|
||||
return user_info.check_privs(auth_id,
|
||||
&["datastore", store.as_str()],
|
||||
PRIV_DATASTORE_VERIFY,
|
||||
true);
|
||||
@ -34,7 +34,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
}
|
||||
},
|
||||
("syncjob", Some(workerid)) => {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
let remote = captures.get(1);
|
||||
let remote_store = captures.get(2);
|
||||
let local_store = captures.get(3);
|
||||
@ -42,7 +42,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
if let (Some(remote), Some(remote_store), Some(local_store)) =
|
||||
(remote, remote_store, local_store) {
|
||||
|
||||
return check_pull_privs(&auth_id,
|
||||
return check_pull_privs(auth_id,
|
||||
local_store.as_str(),
|
||||
remote.as_str(),
|
||||
remote_store.as_str(),
|
||||
@ -51,15 +51,15 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
}
|
||||
},
|
||||
("garbage_collection", Some(workerid)) => {
|
||||
return user_info.check_privs(&auth_id,
|
||||
&["datastore", &workerid],
|
||||
return user_info.check_privs(auth_id,
|
||||
&["datastore", workerid],
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
true)
|
||||
},
|
||||
("prune", Some(workerid)) => {
|
||||
return user_info.check_privs(&auth_id,
|
||||
return user_info.check_privs(auth_id,
|
||||
&["datastore",
|
||||
&workerid],
|
||||
workerid],
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
true);
|
||||
},
|
||||
@ -73,7 +73,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
fn check_job_store(upid: &UPID, store: &str) -> bool {
|
||||
match (upid.worker_type.as_str(), &upid.worker_id) {
|
||||
(workertype, Some(workerid)) if workertype.starts_with("verif") => {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
if let Some(jobstore) = captures.get(1) {
|
||||
return store == jobstore.as_str();
|
||||
}
|
||||
@ -82,7 +82,7 @@ fn check_job_store(upid: &UPID, store: &str) -> bool {
|
||||
}
|
||||
}
|
||||
("syncjob", Some(workerid)) => {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
if let Some(local_store) = captures.get(3) {
|
||||
return store == local_store.as_str();
|
||||
}
|
||||
@ -112,7 +112,7 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
|
||||
// or task == job which the user/token could have configured/manually executed
|
||||
|
||||
user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
|
||||
.or_else(|_| check_job_privs(&auth_id, &user_info, upid))
|
||||
.or_else(|_| check_job_privs(auth_id, &user_info, upid))
|
||||
.or_else(|_| bail!("task access not allowed"))
|
||||
}
|
||||
}
|
||||
@ -250,7 +250,7 @@ async fn get_task_status(
|
||||
|
||||
fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
||||
|
||||
let upid_str = pbs_tools::json::required_string_param(¶m, "upid")?;
|
||||
let upid_str = pbs_tools::json::required_string_param(param, "upid")?;
|
||||
|
||||
upid_str.parse::<UPID>()
|
||||
}
|
||||
@ -569,7 +569,7 @@ const UPID_API_SUBDIRS: SubdirMap = &sorted!([
|
||||
pub const UPID_API_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
|
||||
.delete(&API_METHOD_STOP_TASK)
|
||||
.subdirs(&UPID_API_SUBDIRS);
|
||||
.subdirs(UPID_API_SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TASKS)
|
||||
|
@ -91,13 +91,13 @@ pub fn datastore_status(
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
}
|
||||
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
let datastore = match DataStore::lookup_datastore(store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
list.push(json!({
|
||||
|
@ -182,7 +182,7 @@ pub fn do_tape_backup_job(
|
||||
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
||||
};
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
@ -363,7 +363,7 @@ pub fn backup(
|
||||
|
||||
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
@ -423,7 +423,7 @@ fn backup_worker(
|
||||
task_log!(worker, "update media online status");
|
||||
let changer_name = update_media_online_status(&setup.drive)?;
|
||||
|
||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
||||
let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?;
|
||||
|
||||
let mut pool_writer = PoolWriter::new(
|
||||
pool,
|
||||
@ -443,7 +443,7 @@ fn backup_worker(
|
||||
};
|
||||
|
||||
let group_count_full = group_list.len();
|
||||
let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, &group_filters)).collect();
|
||||
let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, group_filters)).collect();
|
||||
let group_count = list.len();
|
||||
task_log!(worker, "found {} groups (out of {} total)", group_count, group_count_full);
|
||||
(list, group_count)
|
||||
|
@ -96,7 +96,7 @@ pub async fn get_status(
|
||||
for (id, drive_status) in status.drives.iter().enumerate() {
|
||||
let mut state = None;
|
||||
if let Some(drive) = drive_map.get(&(id as u64)) {
|
||||
state = get_tape_device_state(&config, &drive)?;
|
||||
state = get_tape_device_state(&config, drive)?;
|
||||
}
|
||||
let entry = MtxStatusEntry {
|
||||
entry_kind: MtxEntryKind::Drive,
|
||||
@ -231,7 +231,7 @@ const SUBDIRS: SubdirMap = &[
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(&SUBDIRS);
|
||||
.subdirs(SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_CHANGERS)
|
||||
|
@ -542,7 +542,7 @@ fn write_media_label(
|
||||
let media_id = if let Some(ref pool) = pool {
|
||||
// assign media to pool by writing special media set label
|
||||
task_log!(worker, "Label media '{}' for pool '{}'", label.label_text, pool);
|
||||
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
|
||||
let set = MediaSetLabel::with_data(pool, [0u8; 16].into(), 0, label.ctime, None);
|
||||
|
||||
drive.write_media_set_label(&set, None)?;
|
||||
|
||||
@ -1473,7 +1473,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(&SUBDIRS);
|
||||
.subdirs(SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DRIVES)
|
||||
|
@ -138,7 +138,7 @@ fn check_datastore_privs(
|
||||
auth_id: &Authid,
|
||||
owner: &Option<Authid>,
|
||||
) -> Result<(), Error> {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
|
||||
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
||||
bail!("no permissions on /datastore/{}", store);
|
||||
}
|
||||
@ -220,7 +220,7 @@ pub fn restore(
|
||||
}
|
||||
|
||||
for store in used_datastores.iter() {
|
||||
check_datastore_privs(&user_info, &store, &auth_id, &owner)?;
|
||||
check_datastore_privs(&user_info, store, &auth_id, &owner)?;
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
||||
@ -448,7 +448,7 @@ fn restore_list_worker(
|
||||
})?;
|
||||
|
||||
let (owner, _group_lock) =
|
||||
datastore.create_locked_backup_group(backup_dir.group(), &restore_owner)?;
|
||||
datastore.create_locked_backup_group(backup_dir.group(), restore_owner)?;
|
||||
if restore_owner != &owner {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
bail!(
|
||||
@ -460,7 +460,7 @@ fn restore_list_worker(
|
||||
}
|
||||
|
||||
let (media_id, file_num) = if let Some((media_uuid, file_num)) =
|
||||
catalog.lookup_snapshot(&source_datastore, &snapshot)
|
||||
catalog.lookup_snapshot(source_datastore, snapshot)
|
||||
{
|
||||
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
||||
(media_id, file_num)
|
||||
@ -516,7 +516,7 @@ fn restore_list_worker(
|
||||
let (drive, info) = request_and_load_media(
|
||||
&worker,
|
||||
&drive_config,
|
||||
&drive_name,
|
||||
drive_name,
|
||||
&media_id.label,
|
||||
&email,
|
||||
)?;
|
||||
@ -568,7 +568,7 @@ fn restore_list_worker(
|
||||
let (mut drive, _info) = request_and_load_media(
|
||||
&worker,
|
||||
&drive_config,
|
||||
&drive_name,
|
||||
drive_name,
|
||||
&media_id.label,
|
||||
&email,
|
||||
)?;
|
||||
@ -591,7 +591,7 @@ fn restore_list_worker(
|
||||
let backup_dir: BackupDir = snapshot.parse()?;
|
||||
|
||||
let datastore = store_map
|
||||
.get_datastore(&source_datastore)
|
||||
.get_datastore(source_datastore)
|
||||
.ok_or_else(|| format_err!("unexpected source datastore: {}", source_datastore))?;
|
||||
|
||||
let mut tmp_path = base_path.clone();
|
||||
@ -646,7 +646,7 @@ fn get_media_set_catalog(
|
||||
}
|
||||
Some(media_uuid) => {
|
||||
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
||||
let media_catalog = MediaCatalog::open(status_path, &media_id, false, false)?;
|
||||
let media_catalog = MediaCatalog::open(status_path, media_id, false, false)?;
|
||||
catalog.append_catalog(media_catalog)?;
|
||||
}
|
||||
}
|
||||
@ -899,7 +899,7 @@ pub fn request_and_restore_media(
|
||||
Some(ref set) => &set.uuid,
|
||||
};
|
||||
|
||||
let (mut drive, info) = request_and_load_media(&worker, &drive_config, &drive_name, &media_id.label, email)?;
|
||||
let (mut drive, info) = request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?;
|
||||
|
||||
match info.media_set_label {
|
||||
None => {
|
||||
@ -923,7 +923,7 @@ pub fn request_and_restore_media(
|
||||
worker,
|
||||
&mut drive,
|
||||
&info,
|
||||
Some((&store_map, restore_owner)),
|
||||
Some((store_map, restore_owner)),
|
||||
checked_chunks_map,
|
||||
false,
|
||||
)
|
||||
|
@ -301,7 +301,7 @@ pub fn verify_backup_dir(
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<bool, Error> {
|
||||
let snap_lock = lock_dir_noblock_shared(
|
||||
&verify_worker.datastore.snapshot_path(&backup_dir),
|
||||
&verify_worker.datastore.snapshot_path(backup_dir),
|
||||
"snapshot",
|
||||
"locked by another operation",
|
||||
);
|
||||
@ -330,7 +330,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
_snap_lock: Dir,
|
||||
) -> Result<bool, Error> {
|
||||
let manifest = match verify_worker.datastore.load_manifest(&backup_dir) {
|
||||
let manifest = match verify_worker.datastore.load_manifest(backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
@ -365,10 +365,10 @@ pub fn verify_backup_dir_with_lock(
|
||||
let result = proxmox_lang::try_block!({
|
||||
task_log!(verify_worker.worker, " check {}", info.filename);
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, &backup_dir, info),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, &backup_dir, info),
|
||||
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
|
||||
ArchiveType::Blob => {
|
||||
verify_blob(verify_worker.datastore.clone(), &backup_dir, info)
|
||||
verify_blob(verify_worker.datastore.clone(), backup_dir, info)
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -397,7 +397,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
let verify_state = serde_json::to_value(verify_state)?;
|
||||
verify_worker
|
||||
.datastore
|
||||
.update_manifest(&backup_dir, |manifest| {
|
||||
.update_manifest(backup_dir, |manifest| {
|
||||
manifest.unprotected["verify_state"] = verify_state;
|
||||
})
|
||||
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||
|
@ -270,7 +270,7 @@ fn dump_api_method_schema(
|
||||
|
||||
data["parameters"] = dump_property_schema(&api_method.parameters);
|
||||
|
||||
let mut returns = dump_schema(&api_method.returns.schema);
|
||||
let mut returns = dump_schema(api_method.returns.schema);
|
||||
if api_method.returns.optional {
|
||||
returns["optional"] = 1.into();
|
||||
}
|
||||
|
@ -730,7 +730,7 @@ async fn schedule_datastore_verify_jobs() {
|
||||
let worker_type = "verificationjob";
|
||||
let auth_id = Authid::root_auth_id().clone();
|
||||
if check_schedule(worker_type, &event_str, &job_id) {
|
||||
let job = match Job::new(&worker_type, &job_id) {
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
@ -766,7 +766,7 @@ async fn schedule_tape_backup_jobs() {
|
||||
let worker_type = "tape-backup-job";
|
||||
let auth_id = Authid::root_auth_id().clone();
|
||||
if check_schedule(worker_type, &event_str, &job_id) {
|
||||
let job = match Job::new(&worker_type, &job_id) {
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
@ -1033,7 +1033,7 @@ fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
|
||||
}
|
||||
};
|
||||
|
||||
let last = match jobstate::last_run_time(worker_type, &id) {
|
||||
let last = match jobstate::last_run_time(worker_type, id) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, id, err);
|
||||
|
@ -94,7 +94,7 @@ async fn get_child_links(
|
||||
path: &str,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let (path, components) = normalize_uri_path(&path)?;
|
||||
let (path, components) = normalize_uri_path(path)?;
|
||||
|
||||
let info = &proxmox_backup::api2::ROUTER
|
||||
.find_route(&components, &mut HashMap::new())
|
||||
@ -132,7 +132,7 @@ fn get_api_method(
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let mut uri_param = HashMap::new();
|
||||
let (path, components) = normalize_uri_path(&path)?;
|
||||
let (path, components) = normalize_uri_path(path)?;
|
||||
if let Some(method) =
|
||||
&proxmox_backup::api2::ROUTER.find_method(&components, method.clone(), &mut uri_param)
|
||||
{
|
||||
@ -446,7 +446,7 @@ async fn ls(path: Option<String>, mut param: Value, rpcenv: &mut dyn RpcEnvironm
|
||||
&mut serde_json::to_value(res)?,
|
||||
&proxmox_schema::ReturnType {
|
||||
optional: false,
|
||||
schema: &LS_SCHEMA,
|
||||
schema: LS_SCHEMA,
|
||||
},
|
||||
&output_format,
|
||||
&options,
|
||||
|
@ -51,7 +51,7 @@ fn decode_blob(
|
||||
|
||||
if blob.is_encrypted() && key_file.is_some() {
|
||||
let (key, _created, _fingerprint) =
|
||||
load_and_decrypt_key(&key_file.unwrap(), &get_encryption_key_password)?;
|
||||
load_and_decrypt_key(key_file.unwrap(), &get_encryption_key_password)?;
|
||||
crypt_conf = CryptConfig::new(key)?;
|
||||
crypt_conf_opt = Some(&crypt_conf);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ fn recover_index(
|
||||
|
||||
let crypt_conf_opt = if let Some(key_file_path) = key_file_path {
|
||||
let (key, _created, _fingerprint) =
|
||||
load_and_decrypt_key(&key_file_path, &get_encryption_key_password)?;
|
||||
load_and_decrypt_key(key_file_path, &get_encryption_key_password)?;
|
||||
Some(CryptConfig::new(key)?)
|
||||
} else {
|
||||
None
|
||||
|
@ -55,7 +55,7 @@ fn list_acls(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Err
|
||||
pub fn acl_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_ACLS))
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_ACLS))
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
||||
|
@ -93,7 +93,7 @@ async fn create_datastore(mut param: Value) -> Result<Value, Error> {
|
||||
|
||||
let mut client = connect_to_localhost()?;
|
||||
|
||||
let result = client.post(&"api2/json/config/datastore", Some(param)).await?;
|
||||
let result = client.post("api2/json/config/datastore", Some(param)).await?;
|
||||
|
||||
view_task_result(&mut client, result, &output_format).await?;
|
||||
|
||||
|
@ -73,8 +73,8 @@ fn show_openid_realm(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Va
|
||||
pub fn openid_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_OPENID_REALMS))
|
||||
.insert("show", CliCommand::new(&&API_METHOD_SHOW_OPENID_REALM)
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_OPENID_REALMS))
|
||||
.insert("show", CliCommand::new(&API_METHOD_SHOW_OPENID_REALM)
|
||||
.arg_param(&["realm"])
|
||||
.completion_cb("realm", pbs_config::domains::complete_openid_realm_name)
|
||||
)
|
||||
|
@ -75,7 +75,7 @@ fn show_remote(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, E
|
||||
pub fn remote_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES))
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_REMOTES))
|
||||
.insert(
|
||||
"show",
|
||||
CliCommand::new(&API_METHOD_SHOW_REMOTE)
|
||||
|
@ -94,7 +94,7 @@ async fn show_current_traffic(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let client = connect_to_localhost()?;
|
||||
|
||||
let mut result = client.get(&"api2/json/admin/traffic-control", None).await?;
|
||||
let mut result = client.get("api2/json/admin/traffic-control", None).await?;
|
||||
|
||||
let mut data = result["data"].take();
|
||||
|
||||
|
@ -171,7 +171,7 @@ fn list_permissions(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Val
|
||||
pub fn user_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_USERS))
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_USERS))
|
||||
.insert(
|
||||
"create",
|
||||
// fixme: howto handle password parameter?
|
||||
@ -192,7 +192,7 @@ pub fn user_commands() -> CommandLineInterface {
|
||||
)
|
||||
.insert(
|
||||
"list-tokens",
|
||||
CliCommand::new(&&API_METHOD_LIST_TOKENS)
|
||||
CliCommand::new(&API_METHOD_LIST_TOKENS)
|
||||
.arg_param(&["userid"])
|
||||
.completion_cb("userid", pbs_config::user::complete_userid)
|
||||
)
|
||||
@ -211,7 +211,7 @@ pub fn user_commands() -> CommandLineInterface {
|
||||
)
|
||||
.insert(
|
||||
"permissions",
|
||||
CliCommand::new(&&API_METHOD_LIST_PERMISSIONS)
|
||||
CliCommand::new(&API_METHOD_LIST_PERMISSIONS)
|
||||
.arg_param(&["auth-id"])
|
||||
.completion_cb("auth-id", pbs_config::user::complete_authid)
|
||||
.completion_cb("path", pbs_config::datastore::complete_acl_path)
|
||||
|
@ -34,12 +34,12 @@ fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
|
||||
|
||||
let handle = if let Some(name) = param["drive"].as_str() {
|
||||
let (config, _digest) = pbs_config::drive::config()?;
|
||||
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||
let drive: LtoTapeDrive = config.lookup("lto", name)?;
|
||||
eprintln!("using device {}", drive.path);
|
||||
open_lto_tape_drive(&drive)?
|
||||
} else if let Some(device) = param["device"].as_str() {
|
||||
eprintln!("using device {}", device);
|
||||
LtoTapeHandle::new(open_lto_tape_device(&device)?)?
|
||||
LtoTapeHandle::new(open_lto_tape_device(device)?)?
|
||||
} else if let Some(true) = param["stdin"].as_bool() {
|
||||
eprintln!("using stdin");
|
||||
let fd = std::io::stdin().as_raw_fd();
|
||||
@ -62,7 +62,7 @@ fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
|
||||
|
||||
if drive_names.len() == 1 {
|
||||
let name = drive_names[0];
|
||||
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||
let drive: LtoTapeDrive = config.lookup("lto", name)?;
|
||||
eprintln!("using device {}", drive.path);
|
||||
open_lto_tape_drive(&drive)?
|
||||
} else {
|
||||
|
@ -185,7 +185,7 @@ pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(
|
||||
create_configdir()?;
|
||||
pbs_config::replace_backup_config(&key_path, key_pem)
|
||||
.map_err(|err| format_err!("error writing certificate private key - {}", err))?;
|
||||
pbs_config::replace_backup_config(&cert_path, &cert_pem)
|
||||
pbs_config::replace_backup_config(&cert_path, cert_pem)
|
||||
.map_err(|err| format_err!("error writing certificate file - {}", err))?;
|
||||
|
||||
Ok(())
|
||||
|
@ -141,7 +141,7 @@ impl NodeConfig {
|
||||
/// Returns the parsed ProxyConfig
|
||||
pub fn http_proxy(&self) -> Option<ProxyConfig> {
|
||||
if let Some(http_proxy) = &self.http_proxy {
|
||||
match ProxyConfig::parse_proxy_url(&http_proxy) {
|
||||
match ProxyConfig::parse_proxy_url(http_proxy) {
|
||||
Ok(proxy) => Some(proxy),
|
||||
Err(_) => None,
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ pub async fn check_pbs_auth(
|
||||
verify_csrf_prevention_token(
|
||||
csrf_secret(),
|
||||
&userid,
|
||||
&csrf_token,
|
||||
csrf_token,
|
||||
-300,
|
||||
ticket_lifetime,
|
||||
)?;
|
||||
|
@ -245,8 +245,8 @@ fn send_job_status_mail(
|
||||
|
||||
sendmail(
|
||||
&[email],
|
||||
&subject,
|
||||
Some(&text),
|
||||
subject,
|
||||
Some(text),
|
||||
Some(&html),
|
||||
None,
|
||||
Some(&author),
|
||||
|
@ -438,7 +438,7 @@ async fn pull_snapshot(
|
||||
&mut chunk_reader,
|
||||
tgt_store.clone(),
|
||||
snapshot,
|
||||
&item,
|
||||
item,
|
||||
downloaded_chunks.clone(),
|
||||
)
|
||||
.await?;
|
||||
@ -465,7 +465,7 @@ pub async fn pull_snapshot_from(
|
||||
snapshot: &BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?;
|
||||
|
||||
if is_new {
|
||||
task_log!(worker, "sync snapshot {:?}", snapshot.relative_path());
|
||||
@ -474,12 +474,12 @@ pub async fn pull_snapshot_from(
|
||||
worker,
|
||||
reader,
|
||||
tgt_store.clone(),
|
||||
&snapshot,
|
||||
snapshot,
|
||||
downloaded_chunks,
|
||||
)
|
||||
.await
|
||||
{
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(snapshot, true) {
|
||||
task_log!(worker, "cleanup error - {}", cleanup_err);
|
||||
}
|
||||
return Err(err);
|
||||
@ -491,7 +491,7 @@ pub async fn pull_snapshot_from(
|
||||
worker,
|
||||
reader,
|
||||
tgt_store.clone(),
|
||||
&snapshot,
|
||||
snapshot,
|
||||
downloaded_chunks,
|
||||
)
|
||||
.await?;
|
||||
@ -713,7 +713,7 @@ pub async fn pull_store(
|
||||
let list:Vec<BackupGroup> = list
|
||||
.into_iter()
|
||||
.filter(|group| {
|
||||
apply_filters(&group, group_filter)
|
||||
apply_filters(group, group_filter)
|
||||
})
|
||||
.collect();
|
||||
task_log!(worker, "found {} groups to sync (out of {} total)", list.len(), unfiltered_count);
|
||||
|
@ -265,9 +265,9 @@ impl ScsiMediaChange for ScsiTapeChanger {
|
||||
}
|
||||
|
||||
let status = if USE_MTX {
|
||||
mtx::mtx_status(&self)
|
||||
mtx::mtx_status(self)
|
||||
} else {
|
||||
sg_pt_changer::status(&self)
|
||||
sg_pt_changer::status(self)
|
||||
};
|
||||
|
||||
match &status {
|
||||
|
@ -21,7 +21,7 @@ pub fn mtx_status(config: &ScsiTapeChanger) -> Result<MtxStatus, Error> {
|
||||
|
||||
let mut status = parse_mtx_status(&output)?;
|
||||
|
||||
status.mark_import_export_slots(&config)?;
|
||||
status.mark_import_export_slots(config)?;
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ Data Transfer Element 1:Empty
|
||||
Storage Element 24 IMPORT/EXPORT:Empty
|
||||
"###;
|
||||
|
||||
let _ = parse_mtx_status(&output)?;
|
||||
let _ = parse_mtx_status(output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -192,11 +192,11 @@ pub fn update_changer_online_status(
|
||||
let mut online_map = OnlineStatusMap::new(drive_config)?;
|
||||
let mut online_set = HashSet::new();
|
||||
for label_text in label_text_list.iter() {
|
||||
if let Some(media_id) = inventory.find_media_by_label_text(&label_text) {
|
||||
if let Some(media_id) = inventory.find_media_by_label_text(label_text) {
|
||||
online_set.insert(media_id.label.uuid.clone());
|
||||
}
|
||||
}
|
||||
online_map.update_online_status(&changer_name, online_set)?;
|
||||
online_map.update_online_status(changer_name, online_set)?;
|
||||
inventory.update_online_status(&online_map)?;
|
||||
|
||||
Ok(())
|
||||
|
@ -827,7 +827,7 @@ pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap<String, String>)
|
||||
None => return Vec::new(),
|
||||
};
|
||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||
let inventory = match Inventory::load(&status_path) {
|
||||
let inventory = match Inventory::load(status_path) {
|
||||
Ok(inventory) => inventory,
|
||||
Err(_) => return Vec::new(),
|
||||
};
|
||||
|
@ -413,7 +413,7 @@ impl MediaCatalog {
|
||||
|
||||
let uuid = &media_id.label.uuid;
|
||||
|
||||
let me = Self::create_temporary_database(base_path, &media_id, log_to_stdout)?;
|
||||
let me = Self::create_temporary_database(base_path, media_id, log_to_stdout)?;
|
||||
|
||||
Self::finish_temporary_database(base_path, uuid, true)?;
|
||||
|
||||
|
@ -289,7 +289,7 @@ impl MediaPool {
|
||||
create_new_set = Some(String::from("policy is AlwaysCreate"));
|
||||
}
|
||||
MediaSetPolicy::CreateAt(event) => {
|
||||
if let Some(set_start_time) = self.inventory.media_set_start_time(&self.current_media_set.uuid()) {
|
||||
if let Some(set_start_time) = self.inventory.media_set_start_time(self.current_media_set.uuid()) {
|
||||
if let Ok(Some(alloc_time)) = event.compute_next_event(set_start_time as i64) {
|
||||
if current_time >= alloc_time {
|
||||
create_new_set = Some(String::from("policy CreateAt event triggered"));
|
||||
@ -407,7 +407,7 @@ impl MediaPool {
|
||||
|
||||
for media_id in media_list {
|
||||
|
||||
let (status, location) = self.compute_media_state(&media_id);
|
||||
let (status, location) = self.compute_media_state(media_id);
|
||||
if media_id.media_set_label.is_some() { continue; } // should not happen
|
||||
|
||||
if !self.location_is_available(&location) {
|
||||
@ -478,7 +478,7 @@ impl MediaPool {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !self.media_is_expired(&media, current_time) {
|
||||
if !self.media_is_expired(media, current_time) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ impl CatalogSet {
|
||||
}
|
||||
|
||||
// remove read-only version from set (in case it is there)
|
||||
self.media_set_catalog.remove_catalog(&new_catalog.uuid());
|
||||
self.media_set_catalog.remove_catalog(new_catalog.uuid());
|
||||
|
||||
self.catalog = Some(new_catalog);
|
||||
|
||||
|
@ -117,7 +117,7 @@ impl PoolWriter {
|
||||
|
||||
/// Set media status to FULL (persistent - stores pool status)
|
||||
pub fn set_media_status_full(&mut self, uuid: &Uuid) -> Result<(), Error> {
|
||||
self.pool.set_media_status_full(&uuid)?;
|
||||
self.pool.set_media_status_full(uuid)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -556,7 +556,7 @@ fn write_chunk_archive<'a>(
|
||||
|
||||
//println!("CHUNK {} size {}", hex::encode(digest), blob.raw_size());
|
||||
|
||||
match writer.try_write_chunk(&digest, &blob) {
|
||||
match writer.try_write_chunk(digest, blob) {
|
||||
Ok(true) => {
|
||||
chunk_list.push(*digest);
|
||||
chunk_iter.next(); // consume
|
||||
@ -627,7 +627,7 @@ fn update_media_set_label(
|
||||
if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
|
||||
bail!("detected changed encryption fingerprint - internal error");
|
||||
}
|
||||
media_catalog = MediaCatalog::open(status_path, &media_id, true, false)?;
|
||||
media_catalog = MediaCatalog::open(status_path, media_id, true, false)?;
|
||||
|
||||
// todo: verify last content/media_catalog somehow?
|
||||
|
||||
|
@ -53,7 +53,7 @@ impl NewChunksIterator {
|
||||
continue;
|
||||
}
|
||||
|
||||
if catalog_set.lock().unwrap().contains_chunk(&datastore_name, &digest) {
|
||||
if catalog_set.lock().unwrap().contains_chunk(datastore_name, &digest) {
|
||||
continue;
|
||||
};
|
||||
|
||||
|
@ -25,7 +25,7 @@ pub struct PkgState {
|
||||
pub fn write_pkg_cache(state: &PkgState) -> Result<(), Error> {
|
||||
let serialized_state = serde_json::to_string(state)?;
|
||||
|
||||
replace_file(APT_PKG_STATE_FN, &serialized_state.as_bytes(), CreateOptions::new(), false)
|
||||
replace_file(APT_PKG_STATE_FN, serialized_state.as_bytes(), CreateOptions::new(), false)
|
||||
.map_err(|err| format_err!("Error writing package cache - {}", err))?;
|
||||
Ok(())
|
||||
}
|
||||
@ -206,7 +206,7 @@ pub fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
|
||||
drop(cache_iter);
|
||||
// also loop through missing dependencies, as they would be installed
|
||||
for pkg in depends.iter() {
|
||||
let mut iter = cache.find_by_name(&pkg);
|
||||
let mut iter = cache.find_by_name(pkg);
|
||||
let view = match iter.next() {
|
||||
Some(view) => view,
|
||||
None => continue // package not found, ignore
|
||||
|
@ -20,7 +20,7 @@ lazy_static!{
|
||||
pub fn get_pool_from_dataset(dataset: &OsStr) -> Option<&OsStr> {
|
||||
if let Some(dataset) = dataset.to_str() {
|
||||
if let Some(idx) = dataset.find('/') {
|
||||
return Some(&dataset[0..idx].as_ref());
|
||||
return Some(dataset[0..idx].as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,13 +157,13 @@ fn test_zfs_parse_list() -> Result<(), Error> {
|
||||
|
||||
let output = "";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let data = parse_zpool_list(output)?;
|
||||
let expect = Vec::new();
|
||||
|
||||
assert_eq!(data, expect);
|
||||
|
||||
let output = "btest 427349245952 405504 427348840448 - - 0 0 1.00 ONLINE -\n";
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let data = parse_zpool_list(output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: "btest".to_string(),
|
||||
@ -190,7 +190,7 @@ logs
|
||||
|
||||
";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let data = parse_zpool_list(output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: String::from("rpool"),
|
||||
@ -232,7 +232,7 @@ logs - - - - - - - - -
|
||||
/dev/sda5 213674622976 0 213674622976 - - 0 0 - ONLINE
|
||||
";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let data = parse_zpool_list(output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: String::from("b-test"),
|
||||
@ -267,7 +267,7 @@ b.test 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
||||
/dev/sda1 - - - - - - - - ONLINE
|
||||
";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let data = parse_zpool_list(output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: String::from("b.test"),
|
||||
|
@ -189,7 +189,7 @@ pub fn parse_zpool_status_config_tree(i: &str) -> Result<Vec<ZFSPoolVDevState>,
|
||||
}
|
||||
|
||||
fn parse_zpool_status(input: &str) -> Result<Vec<(String, String)>, Error> {
|
||||
parse_complete("zfs status output", &input, many0(parse_zpool_status_field))
|
||||
parse_complete("zfs status output", input, many0(parse_zpool_status_field))
|
||||
}
|
||||
|
||||
pub fn vdev_list_to_tree(vdev_list: &[ZFSPoolVDevState]) -> Result<Value, Error> {
|
||||
@ -220,7 +220,7 @@ where
|
||||
};
|
||||
|
||||
for item in items {
|
||||
let (node, node_level) = to_node(&item);
|
||||
let (node, node_level) = to_node(item);
|
||||
let vdev_level = 1 + node_level;
|
||||
let mut node = match node {
|
||||
Value::Object(map) => map,
|
||||
@ -373,7 +373,7 @@ pub fn zpool_status(pool: &str) -> Result<Vec<(String, String)>, Error> {
|
||||
fn test_parse(output: &str) -> Result<(), Error> {
|
||||
let mut found_config = false;
|
||||
|
||||
for (k, v) in parse_zpool_status(&output)? {
|
||||
for (k, v) in parse_zpool_status(output)? {
|
||||
println!("<{}> => '{}'", k, v);
|
||||
if k == "config" {
|
||||
let vdev_list = parse_zpool_status_config_tree(&v)?;
|
||||
|
@ -125,7 +125,7 @@ pub fn parse_systemd_mount(filename: &str) -> Result<SectionConfigData, Error> {
|
||||
}
|
||||
|
||||
fn save_systemd_config(config: &SectionConfig, filename: &str, data: &SectionConfigData) -> Result<(), Error> {
|
||||
let raw = config.write(filename, &data)?;
|
||||
let raw = config.write(filename, data)?;
|
||||
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
|
||||
// set the correct owner/group/permissions while saving file, owner(rw) = root
|
||||
|
Reference in New Issue
Block a user