tree-wide: fix needless borrows
found and fixed via clippy Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
@ -121,7 +121,7 @@ pub fn read_acl(
|
||||
let mut list: Vec<AclListItem> = Vec::new();
|
||||
if let Some(path) = &path {
|
||||
if let Some(node) = &tree.find_node(path) {
|
||||
extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter);
|
||||
extract_acl_node_data(node, path, &mut list, exact, &auth_id_filter);
|
||||
}
|
||||
} else {
|
||||
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
|
||||
|
@ -118,7 +118,7 @@ fn authenticate_2nd(
|
||||
challenge_ticket: &str,
|
||||
response: &str,
|
||||
) -> Result<AuthResult, Error> {
|
||||
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(&challenge_ticket)?
|
||||
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(challenge_ticket)?
|
||||
.verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)?
|
||||
.require_partial()?;
|
||||
|
||||
|
@ -83,7 +83,7 @@ fn check_priv_or_backup_owner(
|
||||
required_privs: u64,
|
||||
) -> Result<(), Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
|
||||
let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
|
||||
|
||||
if privs & required_privs == 0 {
|
||||
let owner = store.get_owner(group)?;
|
||||
@ -125,7 +125,7 @@ fn get_all_snapshot_files(
|
||||
info: &BackupInfo,
|
||||
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||
|
||||
let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
|
||||
let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
@ -536,7 +536,7 @@ pub fn list_snapshots (
|
||||
snapshots.extend(
|
||||
group_backups
|
||||
.into_iter()
|
||||
.map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
|
||||
.map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
|
||||
);
|
||||
|
||||
Ok(snapshots)
|
||||
@ -549,7 +549,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
|
||||
|
||||
groups.iter()
|
||||
.filter(|group| {
|
||||
let owner = match store.get_owner(&group) {
|
||||
let owner = match store.get_owner(group) {
|
||||
Ok(owner) => owner,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to get owner of group '{}/{}' - {}",
|
||||
@ -1071,7 +1071,7 @@ pub fn get_datastore_list(
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, data)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if allowed {
|
||||
list.push(
|
||||
@ -1401,7 +1401,7 @@ pub fn catalog(
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
manifest.verify_file(file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
@ -1446,7 +1446,7 @@ pub fn pxar_file_download(
|
||||
|
||||
async move {
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
@ -1483,7 +1483,7 @@ pub fn pxar_file_download(
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&pxar_name, &csum, size)?;
|
||||
manifest.verify_file(pxar_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
@ -73,7 +73,7 @@ pub fn list_sync_jobs(
|
||||
}
|
||||
})
|
||||
.filter(|job: &SyncJobConfig| {
|
||||
check_sync_job_read_access(&user_info, &auth_id, &job)
|
||||
check_sync_job_read_access(&user_info, &auth_id, job)
|
||||
});
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
@ -95,7 +95,7 @@ pub fn update_webauthn_config(
|
||||
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(
|
||||
&digest,
|
||||
&crate::config::tfa::webauthn_config_digest(&wa)?,
|
||||
&crate::config::tfa::webauthn_config_digest(wa)?,
|
||||
)?;
|
||||
}
|
||||
|
||||
|
@ -524,7 +524,7 @@ pub fn list_plugins(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginCon
|
||||
rpcenv["digest"] = hex::encode(&digest).into();
|
||||
Ok(plugins
|
||||
.iter()
|
||||
.map(|(id, (ty, data))| modify_cfg_for_api(&id, &ty, data))
|
||||
.map(|(id, (ty, data))| modify_cfg_for_api(id, ty, data))
|
||||
.collect())
|
||||
}
|
||||
|
||||
@ -546,7 +546,7 @@ pub fn get_plugin(id: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<Plu
|
||||
rpcenv["digest"] = hex::encode(&digest).into();
|
||||
|
||||
match plugins.get(&id) {
|
||||
Some((ty, data)) => Ok(modify_cfg_for_api(&id, &ty, &data)),
|
||||
Some((ty, data)) => Ok(modify_cfg_for_api(&id, ty, data)),
|
||||
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||
}
|
||||
}
|
||||
|
@ -20,12 +20,12 @@ pub fn check_sync_job_read_access(
|
||||
auth_id: &Authid,
|
||||
job: &SyncJobConfig,
|
||||
) -> bool {
|
||||
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
|
||||
if datastore_privs & PRIV_DATASTORE_AUDIT == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote]);
|
||||
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
|
||||
remote_privs & PRIV_REMOTE_AUDIT != 0
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@ pub fn check_sync_job_modify_access(
|
||||
auth_id: &Authid,
|
||||
job: &SyncJobConfig,
|
||||
) -> bool {
|
||||
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
|
||||
if datastore_privs & PRIV_DATASTORE_BACKUP == 0 {
|
||||
return false;
|
||||
}
|
||||
@ -62,7 +62,7 @@ pub fn check_sync_job_modify_access(
|
||||
return false;
|
||||
}
|
||||
|
||||
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote, &job.remote_store]);
|
||||
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]);
|
||||
remote_privs & PRIV_REMOTE_READ != 0
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ pub fn list_sync_jobs(
|
||||
|
||||
let list = list
|
||||
.into_iter()
|
||||
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, &sync_job))
|
||||
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
|
||||
.collect();
|
||||
Ok(list)
|
||||
}
|
||||
@ -429,8 +429,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
||||
};
|
||||
|
||||
// should work without ACLs
|
||||
assert_eq!(check_sync_job_read_access(&user_info, &root_auth_id, &job), true);
|
||||
assert_eq!(check_sync_job_modify_access(&user_info, &root_auth_id, &job), true);
|
||||
assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true);
|
||||
assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true);
|
||||
|
||||
// user without permissions must fail
|
||||
assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
|
||||
|
@ -330,7 +330,7 @@ async fn order_certificate(
|
||||
|
||||
for auth_url in &order.data.authorizations {
|
||||
task_log!(worker, "Getting authorization details from '{}'", auth_url);
|
||||
let mut auth = acme.get_authorization(&auth_url).await?;
|
||||
let mut auth = acme.get_authorization(auth_url).await?;
|
||||
|
||||
let domain = match &mut auth.identifier {
|
||||
Identifier::Dns(domain) => domain.to_ascii_lowercase(),
|
||||
@ -442,7 +442,7 @@ async fn request_validation(
|
||||
validation_url: &str,
|
||||
) -> Result<(), Error> {
|
||||
task_log!(worker, "Triggering validation");
|
||||
acme.request_challenge_validation(&validation_url).await?;
|
||||
acme.request_challenge_validation(validation_url).await?;
|
||||
|
||||
task_log!(worker, "Sleeping for 5 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
@ -450,7 +450,7 @@ async fn request_validation(
|
||||
loop {
|
||||
use proxmox_acme_rs::authorization::Status;
|
||||
|
||||
let auth = acme.get_authorization(&auth_url).await?;
|
||||
let auth = acme.get_authorization(auth_url).await?;
|
||||
match auth.status {
|
||||
Status::Pending => {
|
||||
task_log!(worker, "Status is still 'pending', trying again in 10 seconds");
|
||||
|
@ -282,7 +282,7 @@ fn create_datastore_mount_unit(
|
||||
what: &str,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(&mount_point, true);
|
||||
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
|
@ -55,9 +55,9 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
|
||||
for line in data.lines() {
|
||||
|
||||
if let Some(caps) = DOMAIN_REGEX.captures(&line) {
|
||||
if let Some(caps) = DOMAIN_REGEX.captures(line) {
|
||||
result["search"] = Value::from(&caps[1]);
|
||||
} else if let Some(caps) = SERVER_REGEX.captures(&line) {
|
||||
} else if let Some(caps) = SERVER_REGEX.captures(line) {
|
||||
nscount += 1;
|
||||
if nscount > 3 { continue };
|
||||
let nameserver = &caps[1];
|
||||
|
@ -121,7 +121,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
|
||||
|
||||
let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?.sign(
|
||||
private_auth_key(),
|
||||
Some(&tools::ticket::term_aad(&userid, &path, port)),
|
||||
Some(&tools::ticket::term_aad(userid, path, port)),
|
||||
)?;
|
||||
|
||||
let mut command = Vec::new();
|
||||
@ -161,7 +161,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
|
||||
arguments.push(&fd_string);
|
||||
arguments.extend_from_slice(&[
|
||||
"--path",
|
||||
&path,
|
||||
path,
|
||||
"--perm",
|
||||
"Sys.Console",
|
||||
"--authport",
|
||||
@ -293,7 +293,7 @@ fn upgrade_to_websocket(
|
||||
Ticket::<Empty>::parse(ticket)?.verify(
|
||||
crate::auth_helpers::public_auth_key(),
|
||||
ticket::TERM_PREFIX,
|
||||
Some(&tools::ticket::term_aad(&userid, "/system", port)),
|
||||
Some(&tools::ticket::term_aad(userid, "/system", port)),
|
||||
)?;
|
||||
|
||||
let (ws, response) = WebSocket::new(parts.headers.clone())?;
|
||||
|
@ -17,7 +17,7 @@ use pbs_config::network::{self, NetworkConfig};
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
|
||||
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
|
||||
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(&list)?;
|
||||
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?;
|
||||
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
|
||||
}
|
||||
|
||||
|
@ -176,9 +176,9 @@ fn get_service_state(
|
||||
bail!("unknown service name '{}'", service);
|
||||
}
|
||||
|
||||
let status = get_full_service_state(&service)?;
|
||||
let status = get_full_service_state(service)?;
|
||||
|
||||
Ok(json_service_state(&service, status))
|
||||
Ok(json_service_state(service, status))
|
||||
}
|
||||
|
||||
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
|
||||
|
@ -24,9 +24,9 @@ use pbs_config::CachedUserInfo;
|
||||
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
|
||||
match (upid.worker_type.as_str(), &upid.worker_id) {
|
||||
("verificationjob", Some(workerid)) => {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
if let Some(store) = captures.get(1) {
|
||||
return user_info.check_privs(&auth_id,
|
||||
return user_info.check_privs(auth_id,
|
||||
&["datastore", store.as_str()],
|
||||
PRIV_DATASTORE_VERIFY,
|
||||
true);
|
||||
@ -34,7 +34,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
}
|
||||
},
|
||||
("syncjob", Some(workerid)) => {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
let remote = captures.get(1);
|
||||
let remote_store = captures.get(2);
|
||||
let local_store = captures.get(3);
|
||||
@ -42,7 +42,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
if let (Some(remote), Some(remote_store), Some(local_store)) =
|
||||
(remote, remote_store, local_store) {
|
||||
|
||||
return check_pull_privs(&auth_id,
|
||||
return check_pull_privs(auth_id,
|
||||
local_store.as_str(),
|
||||
remote.as_str(),
|
||||
remote_store.as_str(),
|
||||
@ -51,15 +51,15 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
}
|
||||
},
|
||||
("garbage_collection", Some(workerid)) => {
|
||||
return user_info.check_privs(&auth_id,
|
||||
&["datastore", &workerid],
|
||||
return user_info.check_privs(auth_id,
|
||||
&["datastore", workerid],
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
true)
|
||||
},
|
||||
("prune", Some(workerid)) => {
|
||||
return user_info.check_privs(&auth_id,
|
||||
return user_info.check_privs(auth_id,
|
||||
&["datastore",
|
||||
&workerid],
|
||||
workerid],
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
true);
|
||||
},
|
||||
@ -73,7 +73,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
fn check_job_store(upid: &UPID, store: &str) -> bool {
|
||||
match (upid.worker_type.as_str(), &upid.worker_id) {
|
||||
(workertype, Some(workerid)) if workertype.starts_with("verif") => {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
if let Some(jobstore) = captures.get(1) {
|
||||
return store == jobstore.as_str();
|
||||
}
|
||||
@ -82,7 +82,7 @@ fn check_job_store(upid: &UPID, store: &str) -> bool {
|
||||
}
|
||||
}
|
||||
("syncjob", Some(workerid)) => {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||
if let Some(local_store) = captures.get(3) {
|
||||
return store == local_store.as_str();
|
||||
}
|
||||
@ -112,7 +112,7 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
|
||||
// or task == job which the user/token could have configured/manually executed
|
||||
|
||||
user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
|
||||
.or_else(|_| check_job_privs(&auth_id, &user_info, upid))
|
||||
.or_else(|_| check_job_privs(auth_id, &user_info, upid))
|
||||
.or_else(|_| bail!("task access not allowed"))
|
||||
}
|
||||
}
|
||||
@ -250,7 +250,7 @@ async fn get_task_status(
|
||||
|
||||
fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
||||
|
||||
let upid_str = pbs_tools::json::required_string_param(¶m, "upid")?;
|
||||
let upid_str = pbs_tools::json::required_string_param(param, "upid")?;
|
||||
|
||||
upid_str.parse::<UPID>()
|
||||
}
|
||||
@ -569,7 +569,7 @@ const UPID_API_SUBDIRS: SubdirMap = &sorted!([
|
||||
pub const UPID_API_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
|
||||
.delete(&API_METHOD_STOP_TASK)
|
||||
.subdirs(&UPID_API_SUBDIRS);
|
||||
.subdirs(UPID_API_SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TASKS)
|
||||
|
@ -91,13 +91,13 @@ pub fn datastore_status(
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
}
|
||||
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
let datastore = match DataStore::lookup_datastore(store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
list.push(json!({
|
||||
|
@ -182,7 +182,7 @@ pub fn do_tape_backup_job(
|
||||
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
||||
};
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
@ -363,7 +363,7 @@ pub fn backup(
|
||||
|
||||
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
||||
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
|
||||
let email = lookup_user_email(notify_user);
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
@ -423,7 +423,7 @@ fn backup_worker(
|
||||
task_log!(worker, "update media online status");
|
||||
let changer_name = update_media_online_status(&setup.drive)?;
|
||||
|
||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
||||
let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?;
|
||||
|
||||
let mut pool_writer = PoolWriter::new(
|
||||
pool,
|
||||
@ -443,7 +443,7 @@ fn backup_worker(
|
||||
};
|
||||
|
||||
let group_count_full = group_list.len();
|
||||
let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, &group_filters)).collect();
|
||||
let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, group_filters)).collect();
|
||||
let group_count = list.len();
|
||||
task_log!(worker, "found {} groups (out of {} total)", group_count, group_count_full);
|
||||
(list, group_count)
|
||||
|
@ -96,7 +96,7 @@ pub async fn get_status(
|
||||
for (id, drive_status) in status.drives.iter().enumerate() {
|
||||
let mut state = None;
|
||||
if let Some(drive) = drive_map.get(&(id as u64)) {
|
||||
state = get_tape_device_state(&config, &drive)?;
|
||||
state = get_tape_device_state(&config, drive)?;
|
||||
}
|
||||
let entry = MtxStatusEntry {
|
||||
entry_kind: MtxEntryKind::Drive,
|
||||
@ -231,7 +231,7 @@ const SUBDIRS: SubdirMap = &[
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(&SUBDIRS);
|
||||
.subdirs(SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_CHANGERS)
|
||||
|
@ -542,7 +542,7 @@ fn write_media_label(
|
||||
let media_id = if let Some(ref pool) = pool {
|
||||
// assign media to pool by writing special media set label
|
||||
task_log!(worker, "Label media '{}' for pool '{}'", label.label_text, pool);
|
||||
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
|
||||
let set = MediaSetLabel::with_data(pool, [0u8; 16].into(), 0, label.ctime, None);
|
||||
|
||||
drive.write_media_set_label(&set, None)?;
|
||||
|
||||
@ -1473,7 +1473,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(&SUBDIRS);
|
||||
.subdirs(SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DRIVES)
|
||||
|
@ -138,7 +138,7 @@ fn check_datastore_privs(
|
||||
auth_id: &Authid,
|
||||
owner: &Option<Authid>,
|
||||
) -> Result<(), Error> {
|
||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
|
||||
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
||||
bail!("no permissions on /datastore/{}", store);
|
||||
}
|
||||
@ -220,7 +220,7 @@ pub fn restore(
|
||||
}
|
||||
|
||||
for store in used_datastores.iter() {
|
||||
check_datastore_privs(&user_info, &store, &auth_id, &owner)?;
|
||||
check_datastore_privs(&user_info, store, &auth_id, &owner)?;
|
||||
}
|
||||
|
||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
||||
@ -448,7 +448,7 @@ fn restore_list_worker(
|
||||
})?;
|
||||
|
||||
let (owner, _group_lock) =
|
||||
datastore.create_locked_backup_group(backup_dir.group(), &restore_owner)?;
|
||||
datastore.create_locked_backup_group(backup_dir.group(), restore_owner)?;
|
||||
if restore_owner != &owner {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
bail!(
|
||||
@ -460,7 +460,7 @@ fn restore_list_worker(
|
||||
}
|
||||
|
||||
let (media_id, file_num) = if let Some((media_uuid, file_num)) =
|
||||
catalog.lookup_snapshot(&source_datastore, &snapshot)
|
||||
catalog.lookup_snapshot(source_datastore, snapshot)
|
||||
{
|
||||
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
||||
(media_id, file_num)
|
||||
@ -516,7 +516,7 @@ fn restore_list_worker(
|
||||
let (drive, info) = request_and_load_media(
|
||||
&worker,
|
||||
&drive_config,
|
||||
&drive_name,
|
||||
drive_name,
|
||||
&media_id.label,
|
||||
&email,
|
||||
)?;
|
||||
@ -568,7 +568,7 @@ fn restore_list_worker(
|
||||
let (mut drive, _info) = request_and_load_media(
|
||||
&worker,
|
||||
&drive_config,
|
||||
&drive_name,
|
||||
drive_name,
|
||||
&media_id.label,
|
||||
&email,
|
||||
)?;
|
||||
@ -591,7 +591,7 @@ fn restore_list_worker(
|
||||
let backup_dir: BackupDir = snapshot.parse()?;
|
||||
|
||||
let datastore = store_map
|
||||
.get_datastore(&source_datastore)
|
||||
.get_datastore(source_datastore)
|
||||
.ok_or_else(|| format_err!("unexpected source datastore: {}", source_datastore))?;
|
||||
|
||||
let mut tmp_path = base_path.clone();
|
||||
@ -646,7 +646,7 @@ fn get_media_set_catalog(
|
||||
}
|
||||
Some(media_uuid) => {
|
||||
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
||||
let media_catalog = MediaCatalog::open(status_path, &media_id, false, false)?;
|
||||
let media_catalog = MediaCatalog::open(status_path, media_id, false, false)?;
|
||||
catalog.append_catalog(media_catalog)?;
|
||||
}
|
||||
}
|
||||
@ -899,7 +899,7 @@ pub fn request_and_restore_media(
|
||||
Some(ref set) => &set.uuid,
|
||||
};
|
||||
|
||||
let (mut drive, info) = request_and_load_media(&worker, &drive_config, &drive_name, &media_id.label, email)?;
|
||||
let (mut drive, info) = request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?;
|
||||
|
||||
match info.media_set_label {
|
||||
None => {
|
||||
@ -923,7 +923,7 @@ pub fn request_and_restore_media(
|
||||
worker,
|
||||
&mut drive,
|
||||
&info,
|
||||
Some((&store_map, restore_owner)),
|
||||
Some((store_map, restore_owner)),
|
||||
checked_chunks_map,
|
||||
false,
|
||||
)
|
||||
|
Reference in New Issue
Block a user