Merge branch 'master' of ssh://proxdev.maurer-it.com/rust/proxmox-backup
This commit is contained in:
commit
d543587d34
@ -72,7 +72,7 @@ fn extract_acl_node_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (group, roles) in &node.groups {
|
for (group, roles) in &node.groups {
|
||||||
if let Some(_) = token_user {
|
if token_user.is_some() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ pub fn update_acl(
|
|||||||
|
|
||||||
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
||||||
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
||||||
if let Some(_) = group {
|
if group.is_some() {
|
||||||
bail!("Unprivileged users are not allowed to create group ACL item.");
|
bail!("Unprivileged users are not allowed to create group ACL item.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ fn list_roles() -> Result<Value, Error> {
|
|||||||
let mut priv_list = Vec::new();
|
let mut priv_list = Vec::new();
|
||||||
for (name, privilege) in PRIVILEGES.iter() {
|
for (name, privilege) in PRIVILEGES.iter() {
|
||||||
if privs & privilege > 0 {
|
if privs & privilege > 0 {
|
||||||
priv_list.push(name.clone());
|
priv_list.push(name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
||||||
|
@ -331,13 +331,11 @@ fn list_tfa(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<TfaUser>, Error> {
|
|||||||
entries: to_data(data),
|
entries: to_data(data),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} else {
|
} else if let Some(data) = { tfa_data }.remove(authid.user()) {
|
||||||
if let Some(data) = { tfa_data }.remove(authid.user()) {
|
out.push(TfaUser {
|
||||||
out.push(TfaUser {
|
userid: authid.into(),
|
||||||
userid: authid.into(),
|
entries: to_data(data),
|
||||||
entries: to_data(data),
|
});
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(out)
|
Ok(out)
|
||||||
|
@ -169,7 +169,7 @@ pub fn list_users(
|
|||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
} else {
|
} else {
|
||||||
iter.map(|user: user::User| UserWithTokens::new(user))
|
iter.map(UserWithTokens::new)
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -230,7 +230,7 @@ pub fn create_user(
|
|||||||
|
|
||||||
let (mut config, _digest) = user::config()?;
|
let (mut config, _digest) = user::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
if config.sections.get(user.userid.as_str()).is_some() {
|
||||||
bail!("user '{}' already exists.", user.userid);
|
bail!("user '{}' already exists.", user.userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -595,7 +595,7 @@ pub fn generate_token(
|
|||||||
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||||
let tokenid_string = tokenid.to_string();
|
let tokenid_string = tokenid.to_string();
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&tokenid_string) {
|
if config.sections.get(&tokenid_string).is_some() {
|
||||||
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -603,7 +603,7 @@ pub fn generate_token(
|
|||||||
token_shadow::set_secret(&tokenid, &secret)?;
|
token_shadow::set_secret(&tokenid, &secret)?;
|
||||||
|
|
||||||
let token = user::ApiToken {
|
let token = user::ApiToken {
|
||||||
tokenid: tokenid.clone(),
|
tokenid,
|
||||||
comment,
|
comment,
|
||||||
enable,
|
enable,
|
||||||
expire,
|
expire,
|
||||||
|
@ -440,8 +440,8 @@ pub fn list_snapshots (
|
|||||||
let files = info
|
let files = info
|
||||||
.files
|
.files
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|x| BackupContent {
|
.map(|filename| BackupContent {
|
||||||
filename: x.to_string(),
|
filename,
|
||||||
size: None,
|
size: None,
|
||||||
crypt_mode: None,
|
crypt_mode: None,
|
||||||
})
|
})
|
||||||
@ -662,11 +662,11 @@ pub fn verify(
|
|||||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||||
}
|
}
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(worker_id.clone()),
|
Some(worker_id),
|
||||||
auth_id.clone(),
|
auth_id.clone(),
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
@ -711,7 +711,7 @@ pub fn verify(
|
|||||||
|
|
||||||
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
|
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
|
||||||
};
|
};
|
||||||
if failed_dirs.len() > 0 {
|
if !failed_dirs.is_empty() {
|
||||||
worker.log("Failed to verify the following snapshots/groups:");
|
worker.log("Failed to verify the following snapshots/groups:");
|
||||||
for dir in failed_dirs {
|
for dir in failed_dirs {
|
||||||
worker.log(format!("\t{}", dir));
|
worker.log(format!("\t{}", dir));
|
||||||
@ -855,7 +855,7 @@ fn prune(
|
|||||||
|
|
||||||
|
|
||||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||||
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
|
let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
|
||||||
|
|
||||||
if keep_all {
|
if keep_all {
|
||||||
worker.log("No prune selection - keeping all files.");
|
worker.log("No prune selection - keeping all files.");
|
||||||
@ -935,7 +935,7 @@ fn start_garbage_collection(
|
|||||||
let job = Job::new("garbage_collection", &store)
|
let job = Job::new("garbage_collection", &store)
|
||||||
.map_err(|_| format_err!("garbage collection already running"))?;
|
.map_err(|_| format_err!("garbage collection already running"))?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
|
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
|
||||||
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
|
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
|
||||||
@ -1009,7 +1009,7 @@ fn get_datastore_list(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(list.into())
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
@ -1066,7 +1066,7 @@ fn download_file(
|
|||||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
.map_ok(|bytes| bytes.freeze())
|
||||||
.map_err(move |err| {
|
.map_err(move |err| {
|
||||||
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
||||||
err
|
err
|
||||||
@ -1341,10 +1341,10 @@ fn catalog(
|
|||||||
|
|
||||||
if filepath != "root" {
|
if filepath != "root" {
|
||||||
components = base64::decode(filepath)?;
|
components = base64::decode(filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if !components.is_empty() && components[0] == b'/' {
|
||||||
components.remove(0);
|
components.remove(0);
|
||||||
}
|
}
|
||||||
for component in components.split(|c| *c == '/' as u8) {
|
for component in components.split(|c| *c == b'/') {
|
||||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||||
current = entry;
|
current = entry;
|
||||||
} else {
|
} else {
|
||||||
@ -1357,7 +1357,7 @@ fn catalog(
|
|||||||
|
|
||||||
for direntry in catalog_reader.read_dir(¤t)? {
|
for direntry in catalog_reader.read_dir(¤t)? {
|
||||||
let mut components = components.clone();
|
let mut components = components.clone();
|
||||||
components.push('/' as u8);
|
components.push(b'/');
|
||||||
components.extend(&direntry.name);
|
components.extend(&direntry.name);
|
||||||
let path = base64::encode(components);
|
let path = base64::encode(components);
|
||||||
let text = String::from_utf8_lossy(&direntry.name);
|
let text = String::from_utf8_lossy(&direntry.name);
|
||||||
@ -1487,13 +1487,13 @@ fn pxar_file_download(
|
|||||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||||
|
|
||||||
let mut components = base64::decode(&filepath)?;
|
let mut components = base64::decode(&filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if !components.is_empty() && components[0] == b'/' {
|
||||||
components.remove(0);
|
components.remove(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
let mut split = components.splitn(2, |c| *c == b'/');
|
||||||
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
|
||||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
for file in files {
|
for file in files {
|
||||||
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
@ -1520,7 +1520,7 @@ fn pxar_file_download(
|
|||||||
let root = decoder.open_root().await?;
|
let root = decoder.open_root().await?;
|
||||||
let file = root
|
let file = root
|
||||||
.lookup(OsStr::from_bytes(file_path)).await?
|
.lookup(OsStr::from_bytes(file_path)).await?
|
||||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
.ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
|
||||||
|
|
||||||
let body = match file.kind() {
|
let body = match file.kind() {
|
||||||
EntryKind::File { .. } => Body::wrap_stream(
|
EntryKind::File { .. } => Body::wrap_stream(
|
||||||
|
@ -58,7 +58,7 @@ pub fn list_sync_jobs(
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.filter(|job: &SyncJobStatus| {
|
.filter(|job: &SyncJobStatus| {
|
||||||
let as_config: SyncJobConfig = job.clone().into();
|
let as_config: SyncJobConfig = job.into();
|
||||||
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
@ -81,13 +81,13 @@ pub fn list_sync_jobs(
|
|||||||
job.last_run_state = state;
|
job.last_run_state = state;
|
||||||
job.last_run_endtime = endtime;
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
job.next_run = (|| -> Option<i64> {
|
||||||
let schedule = job.schedule.as_ref()?;
|
let schedule = job.schedule.as_ref()?;
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
// ignore errors
|
// ignore errors
|
||||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
compute_next_event(&event, last, false).unwrap_or(None)
|
||||||
})();
|
})();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,13 +86,13 @@ pub fn list_verification_jobs(
|
|||||||
job.last_run_state = state;
|
job.last_run_state = state;
|
||||||
job.last_run_endtime = endtime;
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
job.next_run = (|| -> Option<i64> {
|
||||||
let schedule = job.schedule.as_ref()?;
|
let schedule = job.schedule.as_ref()?;
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
// ignore errors
|
// ignore errors
|
||||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
compute_next_event(&event, last, false).unwrap_or(None)
|
||||||
})();
|
})();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ async move {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
|
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
|
||||||
|
|
||||||
let _last_guard = if let Some(last) = &last_backup {
|
let _last_guard = if let Some(last) = &last_backup {
|
||||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||||
|
@ -465,7 +465,7 @@ impl BackupEnvironment {
|
|||||||
state.ensure_unfinished()?;
|
state.ensure_unfinished()?;
|
||||||
|
|
||||||
// test if all writer are correctly closed
|
// test if all writer are correctly closed
|
||||||
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
|
if !state.dynamic_writers.is_empty() || !state.fixed_writers.is_empty() {
|
||||||
bail!("found open index writer - unable to finish backup");
|
bail!("found open index writer - unable to finish backup");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,11 +120,11 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
|
||||||
|
|
||||||
let (mut config, _digest) = datastore::config()?;
|
let (mut config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&datastore.name) {
|
if config.sections.get(&datastore.name).is_some() {
|
||||||
bail!("datastore '{}' already exists.", datastore.name);
|
bail!("datastore '{}' already exists.", datastore.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,13 +96,13 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let mut data = param.clone();
|
let mut data = param;
|
||||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||||
let remote: remote::Remote = serde_json::from_value(data)?;
|
let remote: remote::Remote = serde_json::from_value(data)?;
|
||||||
|
|
||||||
let (mut config, _digest) = remote::config()?;
|
let (mut config, _digest) = remote::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&remote.name) {
|
if config.sections.get(&remote.name).is_some() {
|
||||||
bail!("remote '{}' already exists.", remote.name);
|
bail!("remote '{}' already exists.", remote.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,14 +154,14 @@ pub fn create_sync_job(
|
|||||||
|
|
||||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?;
|
||||||
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
|
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
|
||||||
bail!("permission check failed");
|
bail!("permission check failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut config, _digest) = sync::config()?;
|
let (mut config, _digest) = sync::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&sync_job.id) {
|
if config.sections.get(&sync_job.id).is_some() {
|
||||||
bail!("job '{}' already exists.", sync_job.id);
|
bail!("job '{}' already exists.", sync_job.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -514,7 +514,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
|||||||
|
|
||||||
// unless they have Datastore.Modify as well
|
// unless they have Datastore.Modify as well
|
||||||
job.store = "localstore3".to_string();
|
job.store = "localstore3".to_string();
|
||||||
job.owner = Some(read_auth_id.clone());
|
job.owner = Some(read_auth_id);
|
||||||
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
||||||
job.owner = None;
|
job.owner = None;
|
||||||
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
||||||
|
@ -98,7 +98,7 @@ pub fn create_verification_job(
|
|||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
|
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param)?;
|
||||||
|
|
||||||
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;
|
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;
|
||||||
|
|
||||||
@ -106,7 +106,7 @@ pub fn create_verification_job(
|
|||||||
|
|
||||||
let (mut config, _digest) = verify::config()?;
|
let (mut config, _digest) = verify::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&verification_job.id) {
|
if config.sections.get(&verification_job.id).is_some() {
|
||||||
bail!("job '{}' already exists.", verification_job.id);
|
bail!("job '{}' already exists.", verification_job.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
|
|||||||
};
|
};
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
.map_ok(|bytes| bytes.freeze());
|
||||||
|
|
||||||
let body = Body::wrap_stream(payload);
|
let body = Body::wrap_stream(payload);
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ async fn termproxy(
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut command = Vec::new();
|
let mut command = Vec::new();
|
||||||
match cmd.as_ref().map(|x| x.as_str()) {
|
match cmd.as_deref() {
|
||||||
Some("login") | None => {
|
Some("login") | None => {
|
||||||
command.push("login");
|
command.push("login");
|
||||||
if userid == "root@pam" {
|
if userid == "root@pam" {
|
||||||
|
@ -35,18 +35,15 @@ use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
|||||||
/// List available APT updates
|
/// List available APT updates
|
||||||
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
match apt::pkg_cache_expired() {
|
if let Ok(false) = apt::pkg_cache_expired() {
|
||||||
Ok(false) => {
|
if let Ok(Some(cache)) = apt::read_pkg_state() {
|
||||||
if let Ok(Some(cache)) = apt::read_pkg_state() {
|
return Ok(json!(cache.package_status));
|
||||||
return Ok(json!(cache.package_status));
|
}
|
||||||
}
|
|
||||||
},
|
|
||||||
_ => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let cache = apt::update_cache()?;
|
let cache = apt::update_cache()?;
|
||||||
|
|
||||||
return Ok(json!(cache.package_status));
|
Ok(json!(cache.package_status))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
||||||
@ -90,8 +87,8 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
|||||||
type: bool,
|
type: bool,
|
||||||
description: r#"Send notification mail about new package updates availanle to the
|
description: r#"Send notification mail about new package updates availanle to the
|
||||||
email address configured for 'root@pam')."#,
|
email address configured for 'root@pam')."#,
|
||||||
optional: true,
|
|
||||||
default: false,
|
default: false,
|
||||||
|
optional: true,
|
||||||
},
|
},
|
||||||
quiet: {
|
quiet: {
|
||||||
description: "Only produces output suitable for logging, omitting progress indicators.",
|
description: "Only produces output suitable for logging, omitting progress indicators.",
|
||||||
@ -116,7 +113,7 @@ pub fn apt_update_database(
|
|||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
// FIXME: change to non-option in signature and drop below once we have proxmox-api-macro 0.2.3
|
// FIXME: change to non-option in signature and drop below once we have proxmox-api-macro 0.2.3
|
||||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||||
let notify = notify.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_NOTIFY);
|
let notify = notify.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_NOTIFY);
|
||||||
@ -196,7 +193,7 @@ fn apt_get_changelog(
|
|||||||
}
|
}
|
||||||
}, Some(&name));
|
}, Some(&name));
|
||||||
|
|
||||||
if pkg_info.len() == 0 {
|
if pkg_info.is_empty() {
|
||||||
bail!("Package '{}' not found", name);
|
bail!("Package '{}' not found", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +202,7 @@ fn apt_get_changelog(
|
|||||||
if changelog_url.starts_with("http://download.proxmox.com/") {
|
if changelog_url.starts_with("http://download.proxmox.com/") {
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
|
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
return Ok(json!(changelog));
|
Ok(json!(changelog))
|
||||||
|
|
||||||
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
|
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
|
||||||
let sub = match subscription::read_subscription()? {
|
let sub = match subscription::read_subscription()? {
|
||||||
@ -229,7 +226,7 @@ fn apt_get_changelog(
|
|||||||
|
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
|
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
return Ok(json!(changelog));
|
Ok(json!(changelog))
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
let mut command = std::process::Command::new("apt-get");
|
let mut command = std::process::Command::new("apt-get");
|
||||||
@ -237,7 +234,7 @@ fn apt_get_changelog(
|
|||||||
command.arg("-qq"); // don't display download progress
|
command.arg("-qq"); // don't display download progress
|
||||||
command.arg(name);
|
command.arg(name);
|
||||||
let output = crate::tools::run_command(command, None)?;
|
let output = crate::tools::run_command(command, None)?;
|
||||||
return Ok(json!(output));
|
Ok(json!(output))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ pub fn initialize_disk(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ pub fn create_datastore_disk(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ pub fn create_datastore_disk(
|
|||||||
|
|
||||||
let manager = DiskManage::new();
|
let manager = DiskManage::new();
|
||||||
|
|
||||||
let disk = manager.clone().disk_by_name(&disk)?;
|
let disk = manager.disk_by_name(&disk)?;
|
||||||
|
|
||||||
let partition = create_single_linux_partition(&disk)?;
|
let partition = create_single_linux_partition(&disk)?;
|
||||||
create_file_system(&partition, filesystem)?;
|
create_file_system(&partition, filesystem)?;
|
||||||
@ -212,8 +212,7 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
|
|||||||
let (config, _) = crate::config::datastore::config()?;
|
let (config, _) = crate::config::datastore::config()?;
|
||||||
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||||
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
||||||
.filter(|ds| ds.path == path)
|
.find(|ds| ds.path == path);
|
||||||
.next();
|
|
||||||
|
|
||||||
if let Some(conflicting_datastore) = conflicting_datastore {
|
if let Some(conflicting_datastore) = conflicting_datastore {
|
||||||
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
||||||
|
@ -254,7 +254,7 @@ pub fn create_zpool(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ pub fn set_subscription(
|
|||||||
|
|
||||||
let server_id = tools::get_hardware_address()?;
|
let server_id = tools::get_hardware_address()?;
|
||||||
|
|
||||||
let info = subscription::check_subscription(key, server_id.to_owned())?;
|
let info = subscription::check_subscription(key, server_id)?;
|
||||||
|
|
||||||
subscription::write_subscription(info)
|
subscription::write_subscription(info)
|
||||||
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;
|
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;
|
||||||
|
@ -513,7 +513,7 @@ pub fn list_tasks(
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut count = result.len() + start as usize;
|
let mut count = result.len() + start as usize;
|
||||||
if result.len() > 0 && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ pub fn do_sync_job(
|
|||||||
let worker_future = async move {
|
let worker_future = async move {
|
||||||
|
|
||||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||||
let sync_owner = sync_job.owner.unwrap_or(Authid::root_auth_id().clone());
|
let sync_owner = sync_job.owner.unwrap_or_else(|| Authid::root_auth_id().clone());
|
||||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||||
|
|
||||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||||
|
@ -150,16 +150,16 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
let abort_future = abort_future
|
let abort_future = abort_future
|
||||||
.map(|_| Err(format_err!("task aborted")));
|
.map(|_| -> Result<(), anyhow::Error> { Err(format_err!("task aborted")) });
|
||||||
|
|
||||||
use futures::future::Either;
|
use futures::future::Either;
|
||||||
futures::future::select(req_fut, abort_future)
|
futures::future::select(req_fut, abort_future)
|
||||||
.map(move |res| {
|
.map(move |res| {
|
||||||
let _guard = _guard;
|
let _guard = _guard;
|
||||||
match res {
|
match res {
|
||||||
Either::Left((Ok(res), _)) => Ok(res),
|
Either::Left((Ok(_), _)) => Ok(()),
|
||||||
Either::Left((Err(err), _)) => Err(err),
|
Either::Left((Err(err), _)) => Err(err),
|
||||||
Either::Right((Ok(res), _)) => Ok(res),
|
Either::Right((Ok(_), _)) => Ok(()),
|
||||||
Either::Right((Err(err), _)) => Err(err),
|
Either::Right((Err(err), _)) => Err(err),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -127,49 +127,46 @@ fn datastore_status(
|
|||||||
rrd_mode,
|
rrd_mode,
|
||||||
);
|
);
|
||||||
|
|
||||||
match (total_res, used_res) {
|
if let (Some((start, reso, total_list)), Some((_, _, used_list))) = (total_res, used_res) {
|
||||||
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
let mut usage_list: Vec<f64> = Vec::new();
|
||||||
let mut usage_list: Vec<f64> = Vec::new();
|
let mut time_list: Vec<u64> = Vec::new();
|
||||||
let mut time_list: Vec<u64> = Vec::new();
|
let mut history = Vec::new();
|
||||||
let mut history = Vec::new();
|
|
||||||
|
|
||||||
for (idx, used) in used_list.iter().enumerate() {
|
for (idx, used) in used_list.iter().enumerate() {
|
||||||
let total = if idx < total_list.len() {
|
let total = if idx < total_list.len() {
|
||||||
total_list[idx]
|
total_list[idx]
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
match (total, used) {
|
||||||
|
(Some(total), Some(used)) if total != 0.0 => {
|
||||||
|
time_list.push(start + (idx as u64)*reso);
|
||||||
|
let usage = used/total;
|
||||||
|
usage_list.push(usage);
|
||||||
|
history.push(json!(usage));
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
history.push(json!(null))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entry["history-start"] = start.into();
|
||||||
|
entry["history-delta"] = reso.into();
|
||||||
|
entry["history"] = history.into();
|
||||||
|
|
||||||
|
// we skip the calculation for datastores with not enough data
|
||||||
|
if usage_list.len() >= 7 {
|
||||||
|
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||||
|
if b != 0.0 {
|
||||||
|
let estimate = (1.0 - a) / b;
|
||||||
|
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||||
} else {
|
} else {
|
||||||
None
|
entry["estimated-full-date"] = Value::from(0);
|
||||||
};
|
|
||||||
|
|
||||||
match (total, used) {
|
|
||||||
(Some(total), Some(used)) if total != 0.0 => {
|
|
||||||
time_list.push(start + (idx as u64)*reso);
|
|
||||||
let usage = used/total;
|
|
||||||
usage_list.push(usage);
|
|
||||||
history.push(json!(usage));
|
|
||||||
},
|
|
||||||
_ => {
|
|
||||||
history.push(json!(null))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
entry["history-start"] = start.into();
|
|
||||||
entry["history-delta"] = reso.into();
|
|
||||||
entry["history"] = history.into();
|
|
||||||
|
|
||||||
// we skip the calculation for datastores with not enough data
|
|
||||||
if usage_list.len() >= 7 {
|
|
||||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
|
||||||
if b != 0.0 {
|
|
||||||
let estimate = (1.0 - a) / b;
|
|
||||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
|
||||||
} else {
|
|
||||||
entry["estimated-full-date"] = Value::from(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ => {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
list.push(entry);
|
list.push(entry);
|
||||||
|
@ -87,14 +87,14 @@ pub fn backup(
|
|||||||
// early check before starting worker
|
// early check before starting worker
|
||||||
check_drive_exists(&drive_config, &pool_config.drive)?;
|
check_drive_exists(&drive_config, &pool_config.drive)?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let eject_media = eject_media.unwrap_or(false);
|
let eject_media = eject_media.unwrap_or(false);
|
||||||
let export_media_set = export_media_set.unwrap_or(false);
|
let export_media_set = export_media_set.unwrap_or(false);
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"tape-backup",
|
"tape-backup",
|
||||||
Some(store.clone()),
|
Some(store),
|
||||||
auth_id,
|
auth_id,
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
|
@ -226,7 +226,7 @@ pub fn erase_media(
|
|||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"erase-media",
|
"erase-media",
|
||||||
@ -267,7 +267,7 @@ pub fn rewind(
|
|||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"rewind-media",
|
"rewind-media",
|
||||||
@ -353,7 +353,7 @@ pub fn label_media(
|
|||||||
|
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"label-media",
|
"label-media",
|
||||||
@ -595,7 +595,7 @@ pub fn clean_drive(
|
|||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"clean-drive",
|
"clean-drive",
|
||||||
@ -722,7 +722,7 @@ pub fn update_inventory(
|
|||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"inventory-update",
|
"inventory-update",
|
||||||
@ -735,7 +735,7 @@ pub fn update_inventory(
|
|||||||
|
|
||||||
let label_text_list = changer.online_media_label_texts()?;
|
let label_text_list = changer.online_media_label_texts()?;
|
||||||
if label_text_list.is_empty() {
|
if label_text_list.is_empty() {
|
||||||
worker.log(format!("changer device does not list any media labels"));
|
worker.log("changer device does not list any media labels".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||||
@ -752,11 +752,9 @@ pub fn update_inventory(
|
|||||||
|
|
||||||
let label_text = label_text.to_string();
|
let label_text = label_text.to_string();
|
||||||
|
|
||||||
if !read_all_labels.unwrap_or(false) {
|
if !read_all_labels.unwrap_or(false) && inventory.find_media_by_label_text(&label_text).is_some() {
|
||||||
if let Some(_) = inventory.find_media_by_label_text(&label_text) {
|
worker.log(format!("media '{}' already inventoried", label_text));
|
||||||
worker.log(format!("media '{}' already inventoried", label_text));
|
continue;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = changer.load_media(&label_text) {
|
if let Err(err) = changer.load_media(&label_text) {
|
||||||
@ -824,7 +822,7 @@ pub fn barcode_label_media(
|
|||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"barcode-label-media",
|
"barcode-label-media",
|
||||||
@ -1002,7 +1000,7 @@ pub fn catalog_media(
|
|||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"catalog-media",
|
"catalog-media",
|
||||||
@ -1060,10 +1058,8 @@ pub fn catalog_media(
|
|||||||
|
|
||||||
let _lock = MediaPool::lock(status_path, &pool)?;
|
let _lock = MediaPool::lock(status_path, &pool)?;
|
||||||
|
|
||||||
if MediaCatalog::exists(status_path, &media_id.label.uuid) {
|
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
|
||||||
if !force {
|
bail!("media catalog exists (please use --force to overwrite)");
|
||||||
bail!("media catalog exists (please use --force to overwrite)");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
|
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
|
||||||
|
@ -197,7 +197,6 @@ pub fn destroy_media(label_text: String, force: Option<bool>,) -> Result<(), Err
|
|||||||
}
|
}
|
||||||
|
|
||||||
let uuid = media_id.label.uuid.clone();
|
let uuid = media_id.label.uuid.clone();
|
||||||
drop(media_id);
|
|
||||||
|
|
||||||
inventory.remove_media(&uuid)?;
|
inventory.remove_media(&uuid)?;
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ pub fn restore(
|
|||||||
// early check before starting worker
|
// early check before starting worker
|
||||||
check_drive_exists(&drive_config, &pool_config.drive)?;
|
check_drive_exists(&drive_config, &pool_config.drive)?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"tape-restore",
|
"tape-restore",
|
||||||
@ -128,7 +128,7 @@ pub fn restore(
|
|||||||
|
|
||||||
let members = inventory.compute_media_set_members(&media_set_uuid)?;
|
let members = inventory.compute_media_set_members(&media_set_uuid)?;
|
||||||
|
|
||||||
let media_list = members.media_list().clone();
|
let media_list = members.media_list();
|
||||||
|
|
||||||
let mut media_id_list = Vec::new();
|
let mut media_id_list = Vec::new();
|
||||||
|
|
||||||
@ -234,7 +234,6 @@ pub fn restore_media(
|
|||||||
Some(reader) => reader,
|
Some(reader) => reader,
|
||||||
};
|
};
|
||||||
|
|
||||||
let target = target.clone();
|
|
||||||
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
|
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,36 +343,26 @@ fn restore_chunk_archive<'a>(
|
|||||||
let mut decoder = ChunkArchiveDecoder::new(reader);
|
let mut decoder = ChunkArchiveDecoder::new(reader);
|
||||||
|
|
||||||
let result: Result<_, Error> = proxmox::try_block!({
|
let result: Result<_, Error> = proxmox::try_block!({
|
||||||
loop {
|
while let Some((digest, blob)) = decoder.next_chunk()? {
|
||||||
match decoder.next_chunk()? {
|
if let Some(datastore) = datastore {
|
||||||
Some((digest, blob)) => {
|
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
||||||
|
if !chunk_exists {
|
||||||
|
blob.verify_crc()?;
|
||||||
|
|
||||||
if let Some(datastore) = datastore {
|
if blob.crypt_mode()? == CryptMode::None {
|
||||||
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
blob.decode(None, Some(&digest))?; // verify digest
|
||||||
if !chunk_exists {
|
|
||||||
blob.verify_crc()?;
|
|
||||||
|
|
||||||
if blob.crypt_mode()? == CryptMode::None {
|
|
||||||
blob.decode(None, Some(&digest))?; // verify digest
|
|
||||||
}
|
|
||||||
if verbose {
|
|
||||||
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
|
||||||
}
|
|
||||||
datastore.insert_chunk(&blob, &digest)?;
|
|
||||||
} else {
|
|
||||||
if verbose {
|
|
||||||
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if verbose {
|
|
||||||
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
chunks.push(digest);
|
if verbose {
|
||||||
|
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||||
|
}
|
||||||
|
datastore.insert_chunk(&blob, &digest)?;
|
||||||
|
} else if verbose {
|
||||||
|
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||||
}
|
}
|
||||||
None => break,
|
} else if verbose {
|
||||||
|
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||||
}
|
}
|
||||||
|
chunks.push(digest);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
});
|
});
|
||||||
@ -390,7 +379,7 @@ fn restore_chunk_archive<'a>(
|
|||||||
|
|
||||||
// check if this is an aborted stream without end marker
|
// check if this is an aborted stream without end marker
|
||||||
if let Ok(false) = reader.has_end_marker() {
|
if let Ok(false) = reader.has_end_marker() {
|
||||||
worker.log(format!("missing stream end marker"));
|
worker.log("missing stream end marker".to_string());
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -407,7 +396,7 @@ fn restore_snapshot_archive<'a>(
|
|||||||
|
|
||||||
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
|
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
|
||||||
match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
|
match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
|
||||||
Ok(()) => return Ok(true),
|
Ok(()) => Ok(true),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let reader = decoder.input();
|
let reader = decoder.input();
|
||||||
|
|
||||||
@ -422,7 +411,7 @@ fn restore_snapshot_archive<'a>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// else the archive is corrupt
|
// else the archive is corrupt
|
||||||
return Err(err);
|
Err(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1092,7 +1092,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for fingerprint in invalid_fingerprints.iter() {
|
for fingerprint in invalid_fingerprints.iter() {
|
||||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
if parse_simple_value(fingerprint, &schema).is_ok() {
|
||||||
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1133,7 +1133,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for name in invalid_user_ids.iter() {
|
for name in invalid_user_ids.iter() {
|
||||||
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
|
if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
|
||||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -277,7 +277,7 @@ impl PartialEq<&str> for RealmRef {
|
|||||||
|
|
||||||
impl PartialEq<RealmRef> for Realm {
|
impl PartialEq<RealmRef> for Realm {
|
||||||
fn eq(&self, rhs: &RealmRef) -> bool {
|
fn eq(&self, rhs: &RealmRef) -> bool {
|
||||||
self.0 == &rhs.0
|
self.0 == rhs.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -638,7 +638,7 @@ impl std::str::FromStr for Authid {
|
|||||||
.iter()
|
.iter()
|
||||||
.rposition(|&b| b == b'!')
|
.rposition(|&b| b == b'!')
|
||||||
.map(|pos| if pos < name_len { id.len() } else { pos })
|
.map(|pos| if pos < name_len { id.len() } else { pos })
|
||||||
.unwrap_or(id.len());
|
.unwrap_or_else(|| id.len());
|
||||||
|
|
||||||
if realm_end == id.len() - 1 {
|
if realm_end == id.len() - 1 {
|
||||||
bail!("empty token name in userid");
|
bail!("empty token name in userid");
|
||||||
@ -670,7 +670,7 @@ impl TryFrom<String> for Authid {
|
|||||||
.iter()
|
.iter()
|
||||||
.rposition(|&b| b == b'!')
|
.rposition(|&b| b == b'!')
|
||||||
.map(|pos| if pos < name_len { data.len() } else { pos })
|
.map(|pos| if pos < name_len { data.len() } else { pos })
|
||||||
.unwrap_or(data.len());
|
.unwrap_or_else(|| data.len());
|
||||||
|
|
||||||
if realm_end == data.len() - 1 {
|
if realm_end == data.len() - 1 {
|
||||||
bail!("empty token name in userid");
|
bail!("empty token name in userid");
|
||||||
|
@ -97,7 +97,7 @@ where
|
|||||||
let info = this
|
let info = this
|
||||||
.index
|
.index
|
||||||
.chunk_info(idx)
|
.chunk_info(idx)
|
||||||
.ok_or(io_format_err!("could not get digest"))?;
|
.ok_or_else(|| io_format_err!("could not get digest"))?;
|
||||||
|
|
||||||
this.current_chunk_offset = offset;
|
this.current_chunk_offset = offset;
|
||||||
this.current_chunk_idx = idx;
|
this.current_chunk_idx = idx;
|
||||||
|
@ -137,18 +137,12 @@ impl DirEntry {
|
|||||||
|
|
||||||
/// Check if DirEntry is a directory
|
/// Check if DirEntry is a directory
|
||||||
pub fn is_directory(&self) -> bool {
|
pub fn is_directory(&self) -> bool {
|
||||||
match self.attr {
|
matches!(self.attr, DirEntryAttribute::Directory { .. })
|
||||||
DirEntryAttribute::Directory { .. } => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if DirEntry is a symlink
|
/// Check if DirEntry is a symlink
|
||||||
pub fn is_symlink(&self) -> bool {
|
pub fn is_symlink(&self) -> bool {
|
||||||
match self.attr {
|
matches!(self.attr, DirEntryAttribute::Symlink { .. })
|
||||||
DirEntryAttribute::Symlink { .. } => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -591,6 +585,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
|||||||
///
|
///
|
||||||
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
|
/// Stores 7 bits per byte, Bit 8 indicates the end of the sequence (when not set).
|
||||||
/// If the value is negative, we end with a zero byte (0x00).
|
/// If the value is negative, we end with a zero byte (0x00).
|
||||||
|
#[allow(clippy::neg_multiply)]
|
||||||
pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error> {
|
pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error> {
|
||||||
let mut enc = Vec::new();
|
let mut enc = Vec::new();
|
||||||
|
|
||||||
@ -611,7 +606,7 @@ pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error>
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
enc.push((128 | (d & 127)) as u8);
|
enc.push((128 | (d & 127)) as u8);
|
||||||
d = d >> 7;
|
d >>= 7;
|
||||||
}
|
}
|
||||||
writer.write_all(&enc)?;
|
writer.write_all(&enc)?;
|
||||||
|
|
||||||
@ -623,6 +618,7 @@ pub fn catalog_encode_i64<W: Write>(writer: &mut W, v: i64) -> Result<(), Error>
|
|||||||
/// We currently read maximal 11 bytes, which give a maximum of 70 bits + sign.
|
/// We currently read maximal 11 bytes, which give a maximum of 70 bits + sign.
|
||||||
/// this method is compatible with catalog_encode_u64 iff the
|
/// this method is compatible with catalog_encode_u64 iff the
|
||||||
/// value encoded is <= 2^63 (values > 2^63 cannot be represented in an i64)
|
/// value encoded is <= 2^63 (values > 2^63 cannot be represented in an i64)
|
||||||
|
#[allow(clippy::neg_multiply)]
|
||||||
pub fn catalog_decode_i64<R: Read>(reader: &mut R) -> Result<i64, Error> {
|
pub fn catalog_decode_i64<R: Read>(reader: &mut R) -> Result<i64, Error> {
|
||||||
|
|
||||||
let mut v: u64 = 0;
|
let mut v: u64 = 0;
|
||||||
@ -665,7 +661,7 @@ pub fn catalog_encode_u64<W: Write>(writer: &mut W, v: u64) -> Result<(), Error>
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
enc.push((128 | (d & 127)) as u8);
|
enc.push((128 | (d & 127)) as u8);
|
||||||
d = d >> 7;
|
d >>= 7;
|
||||||
}
|
}
|
||||||
writer.write_all(&enc)?;
|
writer.write_all(&enc)?;
|
||||||
|
|
||||||
|
@ -441,8 +441,7 @@ impl Shell {
|
|||||||
R: 'static,
|
R: 'static,
|
||||||
{
|
{
|
||||||
let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
|
let shell: &mut Shell = unsafe { std::mem::transmute(SHELL.unwrap()) };
|
||||||
let result = call(&mut *shell).await;
|
call(&mut *shell).await
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn shell(mut self) -> Result<(), Error> {
|
pub async fn shell(mut self) -> Result<(), Error> {
|
||||||
|
@ -18,7 +18,7 @@ impl <W: Write> ChecksumWriter<W> {
|
|||||||
let hasher = crc32fast::Hasher::new();
|
let hasher = crc32fast::Hasher::new();
|
||||||
let signer = match config {
|
let signer = match config {
|
||||||
Some(config) => {
|
Some(config) => {
|
||||||
let tied_signer = Tied::new(config.clone(), |config| {
|
let tied_signer = Tied::new(config, |config| {
|
||||||
Box::new(unsafe { (*config).data_signer() })
|
Box::new(unsafe { (*config).data_signer() })
|
||||||
});
|
});
|
||||||
Some(tied_signer)
|
Some(tied_signer)
|
||||||
|
@ -44,7 +44,7 @@ fn digest_to_prefix(digest: &[u8]) -> PathBuf {
|
|||||||
buf.push(HEX_CHARS[(digest[0] as usize) &0xf]);
|
buf.push(HEX_CHARS[(digest[0] as usize) &0xf]);
|
||||||
buf.push(HEX_CHARS[(digest[1] as usize) >> 4]);
|
buf.push(HEX_CHARS[(digest[1] as usize) >> 4]);
|
||||||
buf.push(HEX_CHARS[(digest[1] as usize) & 0xf]);
|
buf.push(HEX_CHARS[(digest[1] as usize) & 0xf]);
|
||||||
buf.push('/' as u8);
|
buf.push(b'/');
|
||||||
|
|
||||||
let path = unsafe { String::from_utf8_unchecked(buf)};
|
let path = unsafe { String::from_utf8_unchecked(buf)};
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ impl ChunkStore {
|
|||||||
|
|
||||||
let default_options = CreateOptions::new();
|
let default_options = CreateOptions::new();
|
||||||
|
|
||||||
match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
match create_path(&base, Some(default_options), Some(options.clone())) {
|
||||||
Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
|
Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
|
||||||
Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
|
Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
|
||||||
}
|
}
|
||||||
@ -113,9 +113,8 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
|
fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
|
||||||
let base: PathBuf = base.into();
|
let mut lockfile_path: PathBuf = base.into();
|
||||||
|
|
||||||
let mut lockfile_path = base.clone();
|
|
||||||
lockfile_path.push(".lock");
|
lockfile_path.push(".lock");
|
||||||
|
|
||||||
lockfile_path
|
lockfile_path
|
||||||
@ -227,7 +226,7 @@ impl ChunkStore {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let bad = bytes.ends_with(".bad".as_bytes());
|
let bad = bytes.ends_with(b".bad");
|
||||||
return Some((Ok(entry), percentage, bad));
|
return Some((Ok(entry), percentage, bad));
|
||||||
}
|
}
|
||||||
Some(Err(err)) => {
|
Some(Err(err)) => {
|
||||||
@ -402,7 +401,7 @@ impl ChunkStore {
|
|||||||
file.write_all(raw_data)?;
|
file.write_all(raw_data)?;
|
||||||
|
|
||||||
if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
|
if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
|
||||||
if let Err(_) = std::fs::remove_file(&tmp_path) { /* ignore */ }
|
if std::fs::remove_file(&tmp_path).is_err() { /* ignore */ }
|
||||||
bail!(
|
bail!(
|
||||||
"Atomic rename on store '{}' failed for chunk {} - {}",
|
"Atomic rename on store '{}' failed for chunk {} - {}",
|
||||||
self.name,
|
self.name,
|
||||||
|
@ -59,7 +59,7 @@ where
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
this.scan_pos = 0;
|
this.scan_pos = 0;
|
||||||
if this.buffer.len() > 0 {
|
if !this.buffer.is_empty() {
|
||||||
return Poll::Ready(Some(Ok(this.buffer.split())));
|
return Poll::Ready(Some(Ok(this.buffer.split())));
|
||||||
} else {
|
} else {
|
||||||
return Poll::Ready(None);
|
return Poll::Ready(None);
|
||||||
@ -111,7 +111,7 @@ where
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// last chunk can have any size
|
// last chunk can have any size
|
||||||
if this.buffer.len() > 0 {
|
if !this.buffer.is_empty() {
|
||||||
return Poll::Ready(Some(Ok(this.buffer.split())));
|
return Poll::Ready(Some(Ok(this.buffer.split())));
|
||||||
} else {
|
} else {
|
||||||
return Poll::Ready(None);
|
return Poll::Ready(None);
|
||||||
|
@ -36,7 +36,7 @@ impl <R: BufRead> CryptReader<R> {
|
|||||||
impl <R: BufRead> Read for CryptReader<R> {
|
impl <R: BufRead> Read for CryptReader<R> {
|
||||||
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||||
if self.small_read_buf.len() > 0 {
|
if !self.small_read_buf.is_empty() {
|
||||||
let max = if self.small_read_buf.len() > buf.len() { buf.len() } else { self.small_read_buf.len() };
|
let max = if self.small_read_buf.len() > buf.len() { buf.len() } else { self.small_read_buf.len() };
|
||||||
let rest = self.small_read_buf.split_off(max);
|
let rest = self.small_read_buf.split_off(max);
|
||||||
buf[..max].copy_from_slice(&self.small_read_buf);
|
buf[..max].copy_from_slice(&self.small_read_buf);
|
||||||
@ -50,7 +50,7 @@ impl <R: BufRead> Read for CryptReader<R> {
|
|||||||
if buf.len() <= 2*self.block_size {
|
if buf.len() <= 2*self.block_size {
|
||||||
let mut outbuf = [0u8; 1024];
|
let mut outbuf = [0u8; 1024];
|
||||||
|
|
||||||
let count = if data.len() == 0 { // EOF
|
let count = if data.is_empty() { // EOF
|
||||||
let written = self.crypter.finalize(&mut outbuf)?;
|
let written = self.crypter.finalize(&mut outbuf)?;
|
||||||
self.finalized = true;
|
self.finalized = true;
|
||||||
written
|
written
|
||||||
@ -72,7 +72,7 @@ impl <R: BufRead> Read for CryptReader<R> {
|
|||||||
buf[..count].copy_from_slice(&outbuf[..count]);
|
buf[..count].copy_from_slice(&outbuf[..count]);
|
||||||
Ok(count)
|
Ok(count)
|
||||||
}
|
}
|
||||||
} else if data.len() == 0 { // EOF
|
} else if data.is_empty() { // EOF
|
||||||
let rest = self.crypter.finalize(buf)?;
|
let rest = self.crypter.finalize(buf)?;
|
||||||
self.finalized = true;
|
self.finalized = true;
|
||||||
Ok(rest)
|
Ok(rest)
|
||||||
|
@ -408,9 +408,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
|||||||
chunk_size: usize,
|
chunk_size: usize,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
) -> Result<(DataBlob, [u8; 32]), Error> {
|
) -> Result<(DataBlob, [u8; 32]), Error> {
|
||||||
|
let zero_bytes = vec![0; chunk_size];
|
||||||
let mut zero_bytes = Vec::with_capacity(chunk_size);
|
|
||||||
zero_bytes.resize(chunk_size, 0u8);
|
|
||||||
let mut chunk_builder = DataChunkBuilder::new(&zero_bytes).compress(compress);
|
let mut chunk_builder = DataChunkBuilder::new(&zero_bytes).compress(compress);
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||||
|
@ -334,9 +334,7 @@ impl DataStore {
|
|||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
) -> Result<(Authid, DirLockGuard), Error> {
|
) -> Result<(Authid, DirLockGuard), Error> {
|
||||||
// create intermediate path first:
|
// create intermediate path first:
|
||||||
let base_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
|
|
||||||
let mut full_path = base_path.clone();
|
|
||||||
full_path.push(backup_group.backup_type());
|
full_path.push(backup_group.backup_type());
|
||||||
std::fs::create_dir_all(&full_path)?;
|
std::fs::create_dir_all(&full_path)?;
|
||||||
|
|
||||||
@ -392,7 +390,7 @@ impl DataStore {
|
|||||||
fn is_hidden(entry: &walkdir::DirEntry) -> bool {
|
fn is_hidden(entry: &walkdir::DirEntry) -> bool {
|
||||||
entry.file_name()
|
entry.file_name()
|
||||||
.to_str()
|
.to_str()
|
||||||
.map(|s| s.starts_with("."))
|
.map(|s| s.starts_with('.'))
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
let handle_entry_err = |err: walkdir::Error| {
|
let handle_entry_err = |err: walkdir::Error| {
|
||||||
@ -478,12 +476,11 @@ impl DataStore {
|
|||||||
let image_list = self.list_images()?;
|
let image_list = self.list_images()?;
|
||||||
let image_count = image_list.len();
|
let image_count = image_list.len();
|
||||||
|
|
||||||
let mut done = 0;
|
|
||||||
let mut last_percentage: usize = 0;
|
let mut last_percentage: usize = 0;
|
||||||
|
|
||||||
let mut strange_paths_count: u64 = 0;
|
let mut strange_paths_count: u64 = 0;
|
||||||
|
|
||||||
for img in image_list {
|
for (i, img) in image_list.into_iter().enumerate() {
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
tools::fail_on_shutdown()?;
|
tools::fail_on_shutdown()?;
|
||||||
@ -516,15 +513,14 @@ impl DataStore {
|
|||||||
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
|
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
|
||||||
Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
|
Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
|
||||||
}
|
}
|
||||||
done += 1;
|
|
||||||
|
|
||||||
let percentage = done*100/image_count;
|
let percentage = (i + 1) * 100 / image_count;
|
||||||
if percentage > last_percentage {
|
if percentage > last_percentage {
|
||||||
crate::task_log!(
|
crate::task_log!(
|
||||||
worker,
|
worker,
|
||||||
"marked {}% ({} of {} index files)",
|
"marked {}% ({} of {} index files)",
|
||||||
percentage,
|
percentage,
|
||||||
done,
|
i + 1,
|
||||||
image_count,
|
image_count,
|
||||||
);
|
);
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
@ -548,7 +544,7 @@ impl DataStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn garbage_collection_running(&self) -> bool {
|
pub fn garbage_collection_running(&self) -> bool {
|
||||||
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
|
!matches!(self.gc_mutex.try_lock(), Ok(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
|
pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
|
||||||
|
@ -194,7 +194,7 @@ impl IndexFile for DynamicIndexReader {
|
|||||||
if pos >= self.index.len() {
|
if pos >= self.index.len() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
Some(unsafe { &*(self.chunk_digest(pos).as_ptr() as *const [u8; 32]) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,7 +229,7 @@ impl IndexFile for DynamicIndexReader {
|
|||||||
|
|
||||||
Some(ChunkReadInfo {
|
Some(ChunkReadInfo {
|
||||||
range: start..end,
|
range: start..end,
|
||||||
digest: self.index[pos].digest.clone(),
|
digest: self.index[pos].digest,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,11 +63,11 @@ pub struct EncryptedDataBlobHeader {
|
|||||||
///
|
///
|
||||||
/// Panics on unknown magic numbers.
|
/// Panics on unknown magic numbers.
|
||||||
pub fn header_size(magic: &[u8; 8]) -> usize {
|
pub fn header_size(magic: &[u8; 8]) -> usize {
|
||||||
match magic {
|
match *magic {
|
||||||
&UNCOMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
UNCOMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||||
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||||
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||||
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||||
_ => panic!("unknown blob magic"),
|
_ => panic!("unknown blob magic"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ impl FixedIndexReader {
|
|||||||
pub fn open(path: &Path) -> Result<Self, Error> {
|
pub fn open(path: &Path) -> Result<Self, Error> {
|
||||||
File::open(path)
|
File::open(path)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(|file| Self::new(file))
|
.and_then(Self::new)
|
||||||
.map_err(|err| format_err!("Unable to open fixed index {:?} - {}", path, err))
|
.map_err(|err| format_err!("Unable to open fixed index {:?} - {}", path, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ impl FixedIndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn unmap(&mut self) -> Result<(), Error> {
|
fn unmap(&mut self) -> Result<(), Error> {
|
||||||
if self.index == std::ptr::null_mut() {
|
if self.index.is_null() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ impl IndexFile for FixedIndexReader {
|
|||||||
if pos >= self.index_length {
|
if pos >= self.index_length {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(unsafe { std::mem::transmute(self.index.add(pos * 32)) })
|
Some(unsafe { &*(self.index.add(pos * 32) as *const [u8; 32]) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,7 +324,7 @@ impl FixedIndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn unmap(&mut self) -> Result<(), Error> {
|
fn unmap(&mut self) -> Result<(), Error> {
|
||||||
if self.index == std::ptr::null_mut() {
|
if self.index.is_null() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,7 +342,7 @@ impl FixedIndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn close(&mut self) -> Result<[u8; 32], Error> {
|
pub fn close(&mut self) -> Result<[u8; 32], Error> {
|
||||||
if self.index == std::ptr::null_mut() {
|
if self.index.is_null() {
|
||||||
bail!("cannot close already closed index file.");
|
bail!("cannot close already closed index file.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -437,7 +437,7 @@ impl FixedIndexWriter {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.index == std::ptr::null_mut() {
|
if self.index.is_null() {
|
||||||
bail!("cannot write to closed index file.");
|
bail!("cannot write to closed index file.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,7 +336,7 @@ pub fn rsa_decrypt_key_config(
|
|||||||
let decrypted = rsa
|
let decrypted = rsa
|
||||||
.private_decrypt(key, &mut buffer, openssl::rsa::Padding::PKCS1)
|
.private_decrypt(key, &mut buffer, openssl::rsa::Padding::PKCS1)
|
||||||
.map_err(|err| format_err!("failed to decrypt KeyConfig using RSA - {}", err))?;
|
.map_err(|err| format_err!("failed to decrypt KeyConfig using RSA - {}", err))?;
|
||||||
decrypt_key(&mut buffer[..decrypted], passphrase)
|
decrypt_key(&buffer[..decrypted], passphrase)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -372,9 +372,9 @@ fn encrypt_decrypt_test() -> Result<(), Error> {
|
|||||||
hint: None,
|
hint: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let encrypted = rsa_encrypt_key_config(public.clone(), &key).expect("encryption failed");
|
let encrypted = rsa_encrypt_key_config(public, &key).expect("encryption failed");
|
||||||
let (decrypted, created, fingerprint) =
|
let (decrypted, created, fingerprint) =
|
||||||
rsa_decrypt_key_config(private.clone(), &encrypted, &passphrase)
|
rsa_decrypt_key_config(private, &encrypted, &passphrase)
|
||||||
.expect("decryption failed");
|
.expect("decryption failed");
|
||||||
|
|
||||||
assert_eq!(key.created, created);
|
assert_eq!(key.created, created);
|
||||||
|
@ -186,7 +186,7 @@ impl BackupManifest {
|
|||||||
manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
|
manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
|
let manifest = serde_json::to_string_pretty(&manifest).unwrap();
|
||||||
Ok(manifest)
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ enum PruneMark { Keep, KeepPartial, Remove }
|
|||||||
|
|
||||||
fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||||
mark: &mut HashMap<PathBuf, PruneMark>,
|
mark: &mut HashMap<PathBuf, PruneMark>,
|
||||||
list: &Vec<BackupInfo>,
|
list: &[BackupInfo],
|
||||||
keep: usize,
|
keep: usize,
|
||||||
select_id: F,
|
select_id: F,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -26,7 +26,7 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
|||||||
|
|
||||||
for info in list {
|
for info in list {
|
||||||
let backup_id = info.backup_dir.relative_path();
|
let backup_id = info.backup_dir.relative_path();
|
||||||
if let Some(_) = mark.get(&backup_id) { continue; }
|
if mark.get(&backup_id).is_some() { continue; }
|
||||||
let sel_id: String = select_id(&info)?;
|
let sel_id: String = select_id(&info)?;
|
||||||
|
|
||||||
if already_included.contains(&sel_id) { continue; }
|
if already_included.contains(&sel_id) { continue; }
|
||||||
@ -45,7 +45,7 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
|||||||
|
|
||||||
fn remove_incomplete_snapshots(
|
fn remove_incomplete_snapshots(
|
||||||
mark: &mut HashMap<PathBuf, PruneMark>,
|
mark: &mut HashMap<PathBuf, PruneMark>,
|
||||||
list: &Vec<BackupInfo>,
|
list: &[BackupInfo],
|
||||||
) {
|
) {
|
||||||
|
|
||||||
let mut keep_unfinished = true;
|
let mut keep_unfinished = true;
|
||||||
|
@ -342,7 +342,7 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(filter) = filter {
|
if let Some(filter) = filter {
|
||||||
if filter(&manifest) == false {
|
if !filter(&manifest) {
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"SKIPPED: verify {}:{} (recently verified)",
|
"SKIPPED: verify {}:{} (recently verified)",
|
||||||
|
@ -898,7 +898,7 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
|
||||||
|
|
||||||
let client = connect(&repo)?;
|
let client = connect(&repo)?;
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
@ -917,7 +917,7 @@ async fn create_backup(
|
|||||||
let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||||
println!("Encryption key fingerprint: {}", fingerprint);
|
println!("Encryption key fingerprint: {}", fingerprint);
|
||||||
|
|
||||||
let crypt_config = CryptConfig::new(key.clone())?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
|
||||||
match key::find_master_pubkey()? {
|
match key::find_master_pubkey()? {
|
||||||
Some(ref path) if path.exists() => {
|
Some(ref path) if path.exists() => {
|
||||||
@ -1464,7 +1464,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
|||||||
if quiet {
|
if quiet {
|
||||||
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
|
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
|
||||||
item["keep"].as_bool() == Some(false)
|
item["keep"].as_bool() == Some(false)
|
||||||
}).map(|v| v.clone()).collect();
|
}).cloned().collect();
|
||||||
data = list.into();
|
data = list.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ use std::os::unix::io::AsRawFd;
|
|||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper;
|
|
||||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
@ -218,10 +218,8 @@ fn accept_connections(
|
|||||||
|
|
||||||
match result {
|
match result {
|
||||||
Ok(Ok(())) => {
|
Ok(Ok(())) => {
|
||||||
if let Err(_) = sender.send(Ok(stream)).await {
|
if sender.send(Ok(stream)).await.is_err() && debug {
|
||||||
if debug {
|
eprintln!("detect closed connection channel");
|
||||||
eprintln!("detect closed connection channel");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Err(err)) => {
|
Ok(Err(err)) => {
|
||||||
@ -583,16 +581,16 @@ async fn schedule_task_log_rotate() {
|
|||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
worker.log(format!("starting task log rotation"));
|
worker.log("starting task log rotation".to_string());
|
||||||
|
|
||||||
let result = try_block!({
|
let result = try_block!({
|
||||||
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
||||||
let max_files = 20; // times twenty files gives > 100000 task entries
|
let max_files = 20; // times twenty files gives > 100000 task entries
|
||||||
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
||||||
if has_rotated {
|
if has_rotated {
|
||||||
worker.log(format!("task log archive was rotated"));
|
worker.log("task log archive was rotated".to_string());
|
||||||
} else {
|
} else {
|
||||||
worker.log(format!("task log archive was not rotated"));
|
worker.log("task log archive was not rotated".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
let max_size = 32 * 1024 * 1024 - 1;
|
let max_size = 32 * 1024 * 1024 - 1;
|
||||||
@ -603,18 +601,18 @@ async fn schedule_task_log_rotate() {
|
|||||||
if logrotate.rotate(max_size, None, Some(max_files))? {
|
if logrotate.rotate(max_size, None, Some(max_files))? {
|
||||||
println!("rotated access log, telling daemons to re-open log file");
|
println!("rotated access log, telling daemons to re-open log file");
|
||||||
proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
|
proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
|
||||||
worker.log(format!("API access log was rotated"));
|
worker.log("API access log was rotated".to_string());
|
||||||
} else {
|
} else {
|
||||||
worker.log(format!("API access log was not rotated"));
|
worker.log("API access log was not rotated".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true)
|
let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true)
|
||||||
.ok_or_else(|| format_err!("could not get API auth log file names"))?;
|
.ok_or_else(|| format_err!("could not get API auth log file names"))?;
|
||||||
|
|
||||||
if logrotate.rotate(max_size, None, Some(max_files))? {
|
if logrotate.rotate(max_size, None, Some(max_files))? {
|
||||||
worker.log(format!("API authentication log was rotated"));
|
worker.log("API authentication log was rotated".to_string());
|
||||||
} else {
|
} else {
|
||||||
worker.log(format!("API authentication log was not rotated"));
|
worker.log("API authentication log was not rotated".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -751,7 +749,7 @@ async fn generate_host_stats(save: bool) {
|
|||||||
match datastore::config() {
|
match datastore::config() {
|
||||||
Ok((config, _)) => {
|
Ok((config, _)) => {
|
||||||
let datastore_list: Vec<datastore::DataStoreConfig> =
|
let datastore_list: Vec<datastore::DataStoreConfig> =
|
||||||
config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
|
config.convert_to_typed_array("datastore").unwrap_or_default();
|
||||||
|
|
||||||
for config in datastore_list {
|
for config in datastore_list {
|
||||||
|
|
||||||
|
@ -601,16 +601,14 @@ fn debug_scan(param: Value) -> Result<(), Error> {
|
|||||||
Ok(header) => {
|
Ok(header) => {
|
||||||
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
|
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
|
||||||
println!("got MediaContentHeader with wrong magic: {:?}", header.magic);
|
println!("got MediaContentHeader with wrong magic: {:?}", header.magic);
|
||||||
|
} else if let Some(name) = PROXMOX_BACKUP_CONTENT_NAME.get(&header.content_magic) {
|
||||||
|
println!("got content header: {}", name);
|
||||||
|
println!(" uuid: {}", header.content_uuid());
|
||||||
|
println!(" ctime: {}", strftime_local("%c", header.ctime)?);
|
||||||
|
println!(" hsize: {}", HumanByte::from(header.size as usize));
|
||||||
|
println!(" part: {}", header.part_number);
|
||||||
} else {
|
} else {
|
||||||
if let Some(name) = PROXMOX_BACKUP_CONTENT_NAME.get(&header.content_magic) {
|
println!("got unknown content header: {:?}", header.content_magic);
|
||||||
println!("got content header: {}", name);
|
|
||||||
println!(" uuid: {}", header.content_uuid());
|
|
||||||
println!(" ctime: {}", strftime_local("%c", header.ctime)?);
|
|
||||||
println!(" hsize: {}", HumanByte::from(header.size as usize));
|
|
||||||
println!(" part: {}", header.part_number);
|
|
||||||
} else {
|
|
||||||
println!("got unknown content header: {:?}", header.content_magic);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
@ -293,7 +293,7 @@ fn test_crypt_speed(
|
|||||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
benchmark_result.sha256.speed = Some(speed);
|
benchmark_result.sha256.speed = Some(speed);
|
||||||
|
|
||||||
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000_.0);
|
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000.0);
|
||||||
|
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
@ -308,7 +308,7 @@ fn test_crypt_speed(
|
|||||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
benchmark_result.compress.speed = Some(speed);
|
benchmark_result.compress.speed = Some(speed);
|
||||||
|
|
||||||
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000_.0);
|
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000.0);
|
||||||
|
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
@ -328,7 +328,7 @@ fn test_crypt_speed(
|
|||||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
benchmark_result.decompress.speed = Some(speed);
|
benchmark_result.decompress.speed = Some(speed);
|
||||||
|
|
||||||
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000_.0);
|
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000.0);
|
||||||
|
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
@ -343,7 +343,7 @@ fn test_crypt_speed(
|
|||||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
benchmark_result.aes256_gcm.speed = Some(speed);
|
benchmark_result.aes256_gcm.speed = Some(speed);
|
||||||
|
|
||||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000.0);
|
||||||
|
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
@ -361,7 +361,7 @@ fn test_crypt_speed(
|
|||||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
benchmark_result.verify.speed = Some(speed);
|
benchmark_result.verify.speed = Some(speed);
|
||||||
|
|
||||||
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000_.0);
|
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000.0);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -189,12 +189,12 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
if let None = target {
|
if target.is_none() {
|
||||||
bail!("use the 'mount' command to mount pxar archives");
|
bail!("use the 'mount' command to mount pxar archives");
|
||||||
}
|
}
|
||||||
format!("{}.didx", archive_name)
|
format!("{}.didx", archive_name)
|
||||||
} else if archive_name.ends_with(".img") {
|
} else if archive_name.ends_with(".img") {
|
||||||
if let Some(_) = target {
|
if target.is_some() {
|
||||||
bail!("use the 'map' command to map drive images");
|
bail!("use the 'map' command to map drive images");
|
||||||
}
|
}
|
||||||
format!("{}.fidx", archive_name)
|
format!("{}.fidx", archive_name)
|
||||||
|
@ -239,7 +239,7 @@ async fn get_status(
|
|||||||
}
|
}
|
||||||
let text = value.as_str().unwrap().to_string();
|
let text = value.as_str().unwrap().to_string();
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return Ok(String::from("--FULL--"));
|
Ok(String::from("--FULL--"))
|
||||||
} else {
|
} else {
|
||||||
Ok(text)
|
Ok(text)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ fn get_tape_handle(param: &Value) -> Result<LinuxTapeHandle, Error> {
|
|||||||
let file = unsafe { File::from_raw_fd(fd) };
|
let file = unsafe { File::from_raw_fd(fd) };
|
||||||
check_tape_is_linux_tape_device(&file)?;
|
check_tape_is_linux_tape_device(&file)?;
|
||||||
LinuxTapeHandle::new(file)
|
LinuxTapeHandle::new(file)
|
||||||
} else if let Some(name) = std::env::var("PROXMOX_TAPE_DRIVE").ok() {
|
} else if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
||||||
let (config, _digest) = config::drive::config()?;
|
let (config, _digest) = config::drive::config()?;
|
||||||
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let drive: LinuxTapeDrive = config.lookup("linux", &name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
@ -292,13 +292,11 @@ fn main() -> Result<(), Error> {
|
|||||||
bail!("this program needs to be run with setuid root");
|
bail!("this program needs to be run with setuid root");
|
||||||
}
|
}
|
||||||
|
|
||||||
if !running_uid.is_root() {
|
if !running_uid.is_root() && (running_uid != backup_uid || running_gid != backup_gid) {
|
||||||
if running_uid != backup_uid || running_gid != backup_gid {
|
bail!(
|
||||||
bail!(
|
"Not running as backup user or group (got uid {} gid {})",
|
||||||
"Not running as backup user or group (got uid {} gid {})",
|
running_uid, running_gid,
|
||||||
running_uid, running_gid,
|
);
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
|
@ -74,12 +74,14 @@ pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
|||||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Audit can view configuration and status information, but not modify it.
|
/// Audit can view configuration and status information, but not modify it.
|
||||||
pub const ROLE_AUDIT: u64 = 0
|
pub const ROLE_AUDIT: u64 = 0
|
||||||
| PRIV_SYS_AUDIT
|
| PRIV_SYS_AUDIT
|
||||||
| PRIV_DATASTORE_AUDIT;
|
| PRIV_DATASTORE_AUDIT;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Datastore.Admin can do anything on the datastore.
|
/// Datastore.Admin can do anything on the datastore.
|
||||||
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||||
| PRIV_DATASTORE_AUDIT
|
| PRIV_DATASTORE_AUDIT
|
||||||
@ -90,6 +92,7 @@ pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
|||||||
| PRIV_DATASTORE_PRUNE;
|
| PRIV_DATASTORE_PRUNE;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Datastore.Reader can read/verify datastore content and do restore
|
/// Datastore.Reader can read/verify datastore content and do restore
|
||||||
pub const ROLE_DATASTORE_READER: u64 = 0
|
pub const ROLE_DATASTORE_READER: u64 = 0
|
||||||
| PRIV_DATASTORE_AUDIT
|
| PRIV_DATASTORE_AUDIT
|
||||||
@ -97,27 +100,32 @@ pub const ROLE_DATASTORE_READER: u64 = 0
|
|||||||
| PRIV_DATASTORE_READ;
|
| PRIV_DATASTORE_READ;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Datastore.Backup can do backup and restore, but no prune.
|
/// Datastore.Backup can do backup and restore, but no prune.
|
||||||
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
||||||
| PRIV_DATASTORE_BACKUP;
|
| PRIV_DATASTORE_BACKUP;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Datastore.PowerUser can do backup, restore, and prune.
|
/// Datastore.PowerUser can do backup, restore, and prune.
|
||||||
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
||||||
| PRIV_DATASTORE_PRUNE
|
| PRIV_DATASTORE_PRUNE
|
||||||
| PRIV_DATASTORE_BACKUP;
|
| PRIV_DATASTORE_BACKUP;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Datastore.Audit can audit the datastore.
|
/// Datastore.Audit can audit the datastore.
|
||||||
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
||||||
| PRIV_DATASTORE_AUDIT;
|
| PRIV_DATASTORE_AUDIT;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Remote.Audit can audit the remote
|
/// Remote.Audit can audit the remote
|
||||||
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
||||||
| PRIV_REMOTE_AUDIT;
|
| PRIV_REMOTE_AUDIT;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Remote.Admin can do anything on the remote.
|
/// Remote.Admin can do anything on the remote.
|
||||||
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||||
| PRIV_REMOTE_AUDIT
|
| PRIV_REMOTE_AUDIT
|
||||||
@ -125,6 +133,7 @@ pub const ROLE_REMOTE_ADMIN: u64 = 0
|
|||||||
| PRIV_REMOTE_READ;
|
| PRIV_REMOTE_READ;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
/// Remote.SyncOperator can do read and prune on the remote.
|
/// Remote.SyncOperator can do read and prune on the remote.
|
||||||
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
||||||
| PRIV_REMOTE_AUDIT
|
| PRIV_REMOTE_AUDIT
|
||||||
@ -363,6 +372,7 @@ impl AclTreeNode {
|
|||||||
fn extract_group_roles(&self, _user: &Userid, leaf: bool) -> HashMap<String, bool> {
|
fn extract_group_roles(&self, _user: &Userid, leaf: bool) -> HashMap<String, bool> {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
|
|
||||||
|
#[allow(clippy::for_kv_map)]
|
||||||
for (_group, roles) in &self.groups {
|
for (_group, roles) in &self.groups {
|
||||||
let is_member = false; // fixme: check if user is member of the group
|
let is_member = false; // fixme: check if user is member of the group
|
||||||
if !is_member {
|
if !is_member {
|
||||||
@ -402,7 +412,7 @@ impl AclTreeNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn insert_group_role(&mut self, group: String, role: String, propagate: bool) {
|
fn insert_group_role(&mut self, group: String, role: String, propagate: bool) {
|
||||||
let map = self.groups.entry(group).or_insert_with(|| HashMap::new());
|
let map = self.groups.entry(group).or_insert_with(HashMap::new);
|
||||||
if role == ROLE_NAME_NO_ACCESS {
|
if role == ROLE_NAME_NO_ACCESS {
|
||||||
map.clear();
|
map.clear();
|
||||||
map.insert(role, propagate);
|
map.insert(role, propagate);
|
||||||
@ -413,7 +423,7 @@ impl AclTreeNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
|
fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
|
||||||
let map = self.users.entry(auth_id).or_insert_with(|| HashMap::new());
|
let map = self.users.entry(auth_id).or_insert_with(HashMap::new);
|
||||||
if role == ROLE_NAME_NO_ACCESS {
|
if role == ROLE_NAME_NO_ACCESS {
|
||||||
map.clear();
|
map.clear();
|
||||||
map.insert(role, propagate);
|
map.insert(role, propagate);
|
||||||
@ -435,7 +445,7 @@ impl AclTree {
|
|||||||
/// Iterates over the tree looking for a node matching `path`.
|
/// Iterates over the tree looking for a node matching `path`.
|
||||||
pub fn find_node(&mut self, path: &str) -> Option<&mut AclTreeNode> {
|
pub fn find_node(&mut self, path: &str) -> Option<&mut AclTreeNode> {
|
||||||
let path = split_acl_path(path);
|
let path = split_acl_path(path);
|
||||||
return self.get_node(&path);
|
self.get_node(&path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_node(&mut self, path: &[&str]) -> Option<&mut AclTreeNode> {
|
fn get_node(&mut self, path: &[&str]) -> Option<&mut AclTreeNode> {
|
||||||
@ -455,7 +465,7 @@ impl AclTree {
|
|||||||
node = node
|
node = node
|
||||||
.children
|
.children
|
||||||
.entry(String::from(*comp))
|
.entry(String::from(*comp))
|
||||||
.or_insert_with(|| AclTreeNode::new());
|
.or_insert_with(AclTreeNode::new);
|
||||||
}
|
}
|
||||||
node
|
node
|
||||||
}
|
}
|
||||||
@ -521,12 +531,12 @@ impl AclTree {
|
|||||||
if *propagate {
|
if *propagate {
|
||||||
role_ug_map1
|
role_ug_map1
|
||||||
.entry(role)
|
.entry(role)
|
||||||
.or_insert_with(|| BTreeSet::new())
|
.or_insert_with(BTreeSet::new)
|
||||||
.insert(auth_id);
|
.insert(auth_id);
|
||||||
} else {
|
} else {
|
||||||
role_ug_map0
|
role_ug_map0
|
||||||
.entry(role)
|
.entry(role)
|
||||||
.or_insert_with(|| BTreeSet::new())
|
.or_insert_with(BTreeSet::new)
|
||||||
.insert(auth_id);
|
.insert(auth_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -538,12 +548,12 @@ impl AclTree {
|
|||||||
if *propagate {
|
if *propagate {
|
||||||
role_ug_map1
|
role_ug_map1
|
||||||
.entry(role)
|
.entry(role)
|
||||||
.or_insert_with(|| BTreeSet::new())
|
.or_insert_with(BTreeSet::new)
|
||||||
.insert(group);
|
.insert(group);
|
||||||
} else {
|
} else {
|
||||||
role_ug_map0
|
role_ug_map0
|
||||||
.entry(role)
|
.entry(role)
|
||||||
.or_insert_with(|| BTreeSet::new())
|
.or_insert_with(BTreeSet::new)
|
||||||
.insert(group);
|
.insert(group);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -563,7 +573,7 @@ impl AclTree {
|
|||||||
});
|
});
|
||||||
result_map
|
result_map
|
||||||
.entry(item_list)
|
.entry(item_list)
|
||||||
.or_insert_with(|| BTreeSet::new())
|
.or_insert_with(BTreeSet::new)
|
||||||
.insert(item.to_string());
|
.insert(item.to_string());
|
||||||
}
|
}
|
||||||
result_map
|
result_map
|
||||||
@ -651,8 +661,7 @@ impl AclTree {
|
|||||||
if !ROLE_NAMES.contains_key(role) {
|
if !ROLE_NAMES.contains_key(role) {
|
||||||
bail!("unknown role '{}'", role);
|
bail!("unknown role '{}'", role);
|
||||||
}
|
}
|
||||||
if user_or_group.starts_with('@') {
|
if let Some(group) = user_or_group.strip_prefix('@') {
|
||||||
let group = &user_or_group[1..];
|
|
||||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||||
} else {
|
} else {
|
||||||
node.insert_user_role(user_or_group.parse()?, role.to_string(), propagate);
|
node.insert_user_role(user_or_group.parse()?, role.to_string(), propagate);
|
||||||
|
@ -98,7 +98,7 @@ impl CachedUserInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_privs(
|
pub fn check_privs(
|
||||||
|
@ -135,8 +135,8 @@ pub const DATASTORE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.datastore.lck";
|
|||||||
|
|
||||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?;
|
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
|
||||||
let content = content.unwrap_or(String::from(""));
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
let digest = openssl::sha::sha256(content.as_bytes());
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;
|
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;
|
||||||
|
@ -68,8 +68,8 @@ pub fn lock() -> Result<std::fs::File, Error> {
|
|||||||
|
|
||||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
let content = proxmox::tools::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?;
|
let content = proxmox::tools::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?
|
||||||
let content = content.unwrap_or(String::from(""));
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
let digest = openssl::sha::sha256(content.as_bytes());
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
let data = CONFIG.parse(DRIVE_CFG_FILENAME, &content)?;
|
let data = CONFIG.parse(DRIVE_CFG_FILENAME, &content)?;
|
||||||
|
@ -43,8 +43,8 @@ fn init() -> SectionConfig {
|
|||||||
config
|
config
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const MEDIA_POOL_CFG_FILENAME: &'static str = "/etc/proxmox-backup/media-pool.cfg";
|
pub const MEDIA_POOL_CFG_FILENAME: &str = "/etc/proxmox-backup/media-pool.cfg";
|
||||||
pub const MEDIA_POOL_CFG_LOCKFILE: &'static str = "/etc/proxmox-backup/.media-pool.lck";
|
pub const MEDIA_POOL_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.media-pool.lck";
|
||||||
|
|
||||||
pub fn lock() -> Result<std::fs::File, Error> {
|
pub fn lock() -> Result<std::fs::File, Error> {
|
||||||
open_file_locked(MEDIA_POOL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)
|
open_file_locked(MEDIA_POOL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)
|
||||||
@ -52,8 +52,8 @@ pub fn lock() -> Result<std::fs::File, Error> {
|
|||||||
|
|
||||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
let content = proxmox::tools::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?;
|
let content = proxmox::tools::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?
|
||||||
let content = content.unwrap_or(String::from(""));
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
let digest = openssl::sha::sha256(content.as_bytes());
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
let data = CONFIG.parse(MEDIA_POOL_CFG_FILENAME, &content)?;
|
let data = CONFIG.parse(MEDIA_POOL_CFG_FILENAME, &content)?;
|
||||||
|
@ -386,9 +386,9 @@ impl NetworkConfig {
|
|||||||
pub fn check_mtu(&self, parent_name: &str, child_name: &str) -> Result<(), Error> {
|
pub fn check_mtu(&self, parent_name: &str, child_name: &str) -> Result<(), Error> {
|
||||||
|
|
||||||
let parent = self.interfaces.get(parent_name)
|
let parent = self.interfaces.get(parent_name)
|
||||||
.ok_or(format_err!("check_mtu - missing parent interface '{}'", parent_name))?;
|
.ok_or_else(|| format_err!("check_mtu - missing parent interface '{}'", parent_name))?;
|
||||||
let child = self.interfaces.get(child_name)
|
let child = self.interfaces.get(child_name)
|
||||||
.ok_or(format_err!("check_mtu - missing child interface '{}'", child_name))?;
|
.ok_or_else(|| format_err!("check_mtu - missing child interface '{}'", child_name))?;
|
||||||
|
|
||||||
let child_mtu = match child.mtu {
|
let child_mtu = match child.mtu {
|
||||||
Some(mtu) => mtu,
|
Some(mtu) => mtu,
|
||||||
@ -515,7 +515,7 @@ pub fn config() -> Result<(NetworkConfig, [u8;32]), Error> {
|
|||||||
Some(content) => content,
|
Some(content) => content,
|
||||||
None => {
|
None => {
|
||||||
let content = proxmox::tools::fs::file_get_optional_contents(NETWORK_INTERFACES_FILENAME)?;
|
let content = proxmox::tools::fs::file_get_optional_contents(NETWORK_INTERFACES_FILENAME)?;
|
||||||
content.unwrap_or(Vec::new())
|
content.unwrap_or_default()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -577,8 +577,8 @@ pub fn complete_port_list(arg: &str, _param: &HashMap<String, String>) -> Vec<St
|
|||||||
Err(_) => return vec![],
|
Err(_) => return vec![],
|
||||||
};
|
};
|
||||||
|
|
||||||
let arg = arg.clone().trim();
|
let arg = arg.trim();
|
||||||
let prefix = if let Some(idx) = arg.rfind(",") { &arg[..idx+1] } else { "" };
|
let prefix = if let Some(idx) = arg.rfind(',') { &arg[..idx+1] } else { "" };
|
||||||
ports.iter().map(|port| format!("{}{}", prefix, port)).collect()
|
ports.iter().map(|port| format!("{}{}", prefix, port)).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,6 +51,7 @@ pub static IPV4_REVERSE_MASK: &[&str] = &[
|
|||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref IPV4_MASK_HASH_LOCALNET: HashMap<&'static str, u8> = {
|
pub static ref IPV4_MASK_HASH_LOCALNET: HashMap<&'static str, u8> = {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
|
#[allow(clippy::needless_range_loop)]
|
||||||
for i in 8..32 {
|
for i in 8..32 {
|
||||||
map.insert(IPV4_REVERSE_MASK[i], i as u8);
|
map.insert(IPV4_REVERSE_MASK[i], i as u8);
|
||||||
}
|
}
|
||||||
@ -61,22 +62,23 @@ lazy_static! {
|
|||||||
pub fn parse_cidr(cidr: &str) -> Result<(String, u8, bool), Error> {
|
pub fn parse_cidr(cidr: &str) -> Result<(String, u8, bool), Error> {
|
||||||
let (address, mask, is_v6) = parse_address_or_cidr(cidr)?;
|
let (address, mask, is_v6) = parse_address_or_cidr(cidr)?;
|
||||||
if let Some(mask) = mask {
|
if let Some(mask) = mask {
|
||||||
return Ok((address, mask, is_v6));
|
Ok((address, mask, is_v6))
|
||||||
} else {
|
} else {
|
||||||
bail!("missing netmask in '{}'", cidr);
|
bail!("missing netmask in '{}'", cidr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> {
|
pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> {
|
||||||
if is_v6 {
|
let (ver, min, max) = if is_v6 {
|
||||||
if !(mask >= 1 && mask <= 128) {
|
("IPv6", 1, 128)
|
||||||
bail!("IPv6 mask '{}' is out of range (1..128).", mask);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if !(mask > 0 && mask <= 32) {
|
("IPv4", 1, 32)
|
||||||
bail!("IPv4 mask '{}' is out of range (1..32).", mask);
|
};
|
||||||
}
|
|
||||||
|
if !(mask >= min && mask <= max) {
|
||||||
|
bail!("{} mask '{}' is out of range ({}..{}).", ver, mask, min, max);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,18 +99,18 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), E
|
|||||||
if let Some(mask) = caps.get(2) {
|
if let Some(mask) = caps.get(2) {
|
||||||
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
||||||
check_netmask(mask, false)?;
|
check_netmask(mask, false)?;
|
||||||
return Ok((address.to_string(), Some(mask), false));
|
Ok((address.to_string(), Some(mask), false))
|
||||||
} else {
|
} else {
|
||||||
return Ok((address.to_string(), None, false));
|
Ok((address.to_string(), None, false))
|
||||||
}
|
}
|
||||||
} else if let Some(caps) = CIDR_V6_REGEX.captures(&cidr) {
|
} else if let Some(caps) = CIDR_V6_REGEX.captures(&cidr) {
|
||||||
let address = &caps[1];
|
let address = &caps[1];
|
||||||
if let Some(mask) = caps.get(2) {
|
if let Some(mask) = caps.get(2) {
|
||||||
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
||||||
check_netmask(mask, true)?;
|
check_netmask(mask, true)?;
|
||||||
return Ok((address.to_string(), Some(mask), true));
|
Ok((address.to_string(), Some(mask), true))
|
||||||
} else {
|
} else {
|
||||||
return Ok((address.to_string(), None, true));
|
Ok((address.to_string(), None, true))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bail!("invalid address/mask '{}'", cidr);
|
bail!("invalid address/mask '{}'", cidr);
|
||||||
|
@ -74,9 +74,9 @@ impl <R: BufRead> Lexer<R> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn split_line(line: &str) -> VecDeque<(Token, String)> {
|
fn split_line(line: &str) -> VecDeque<(Token, String)> {
|
||||||
if line.starts_with("#") {
|
if let Some(comment) = line.strip_prefix('#') {
|
||||||
let mut res = VecDeque::new();
|
let mut res = VecDeque::new();
|
||||||
res.push_back((Token::Comment, line[1..].trim().to_string()));
|
res.push_back((Token::Comment, comment.trim().to_string()));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
let mut list: VecDeque<(Token, String)> = line.split_ascii_whitespace().map(|text| {
|
let mut list: VecDeque<(Token, String)> = line.split_ascii_whitespace().map(|text| {
|
||||||
@ -114,14 +114,14 @@ impl <R: BufRead> Iterator for Lexer<R> {
|
|||||||
Some(ref mut cur_line) => {
|
Some(ref mut cur_line) => {
|
||||||
if cur_line.is_empty() {
|
if cur_line.is_empty() {
|
||||||
self.cur_line = None;
|
self.cur_line = None;
|
||||||
return Some(Ok((Token::Newline, String::from("\n"))));
|
Some(Ok((Token::Newline, String::from("\n"))))
|
||||||
} else {
|
} else {
|
||||||
let (token, text) = cur_line.pop_front().unwrap();
|
let (token, text) = cur_line.pop_front().unwrap();
|
||||||
return Some(Ok((token, text)));
|
Some(Ok((token, text)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
return None;
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ impl <R: BufRead> NetworkParser<R> {
|
|||||||
bail!("input error - {}", err);
|
bail!("input error - {}", err);
|
||||||
}
|
}
|
||||||
Some(Ok((token, _))) => {
|
Some(Ok((token, _))) => {
|
||||||
return Ok(*token);
|
Ok(*token)
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
bail!("got unexpected end of stream (inside peek)");
|
bail!("got unexpected end of stream (inside peek)");
|
||||||
@ -44,7 +44,7 @@ impl <R: BufRead> NetworkParser<R> {
|
|||||||
}
|
}
|
||||||
Some(Ok((token, text))) => {
|
Some(Ok((token, text))) => {
|
||||||
if token == Token::Newline { self.line_nr += 1; }
|
if token == Token::Newline { self.line_nr += 1; }
|
||||||
return Ok((token, text));
|
Ok((token, text))
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
bail!("got unexpected end of stream (inside peek)");
|
bail!("got unexpected end of stream (inside peek)");
|
||||||
@ -215,12 +215,12 @@ impl <R: BufRead> NetworkParser<R> {
|
|||||||
Token::Comment => {
|
Token::Comment => {
|
||||||
let comment = self.eat(Token::Comment)?;
|
let comment = self.eat(Token::Comment)?;
|
||||||
if !address_family_v4 && address_family_v6 {
|
if !address_family_v4 && address_family_v6 {
|
||||||
let mut comments = interface.comments6.take().unwrap_or(String::new());
|
let mut comments = interface.comments6.take().unwrap_or_default();
|
||||||
if !comments.is_empty() { comments.push('\n'); }
|
if !comments.is_empty() { comments.push('\n'); }
|
||||||
comments.push_str(&comment);
|
comments.push_str(&comment);
|
||||||
interface.comments6 = Some(comments);
|
interface.comments6 = Some(comments);
|
||||||
} else {
|
} else {
|
||||||
let mut comments = interface.comments.take().unwrap_or(String::new());
|
let mut comments = interface.comments.take().unwrap_or_default();
|
||||||
if !comments.is_empty() { comments.push('\n'); }
|
if !comments.is_empty() { comments.push('\n'); }
|
||||||
comments.push_str(&comment);
|
comments.push_str(&comment);
|
||||||
interface.comments = Some(comments);
|
interface.comments = Some(comments);
|
||||||
|
@ -92,8 +92,8 @@ pub const REMOTE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.remote.lck";
|
|||||||
|
|
||||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
let content = proxmox::tools::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?;
|
let content = proxmox::tools::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?
|
||||||
let content = content.unwrap_or(String::from(""));
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
let digest = openssl::sha::sha256(content.as_bytes());
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
let data = CONFIG.parse(REMOTE_CFG_FILENAME, &content)?;
|
let data = CONFIG.parse(REMOTE_CFG_FILENAME, &content)?;
|
||||||
|
@ -79,7 +79,7 @@ impl From<&SyncJobStatus> for SyncJobConfig {
|
|||||||
owner: job_status.owner.clone(),
|
owner: job_status.owner.clone(),
|
||||||
remote: job_status.remote.clone(),
|
remote: job_status.remote.clone(),
|
||||||
remote_store: job_status.remote_store.clone(),
|
remote_store: job_status.remote_store.clone(),
|
||||||
remove_vanished: job_status.remove_vanished.clone(),
|
remove_vanished: job_status.remove_vanished,
|
||||||
comment: job_status.comment.clone(),
|
comment: job_status.comment.clone(),
|
||||||
schedule: job_status.schedule.clone(),
|
schedule: job_status.schedule.clone(),
|
||||||
}
|
}
|
||||||
@ -183,8 +183,8 @@ pub const SYNC_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.sync.lck";
|
|||||||
|
|
||||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
let content = proxmox::tools::fs::file_read_optional_string(SYNC_CFG_FILENAME)?;
|
let content = proxmox::tools::fs::file_read_optional_string(SYNC_CFG_FILENAME)?
|
||||||
let content = content.unwrap_or(String::from(""));
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
let digest = openssl::sha::sha256(content.as_bytes());
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
let data = CONFIG.parse(SYNC_CFG_FILENAME, &content)?;
|
let data = CONFIG.parse(SYNC_CFG_FILENAME, &content)?;
|
||||||
|
@ -53,7 +53,7 @@ pub struct EncryptionKeyInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn compute_tape_key_fingerprint(key: &[u8; 32]) -> Result<Fingerprint, Error> {
|
pub fn compute_tape_key_fingerprint(key: &[u8; 32]) -> Result<Fingerprint, Error> {
|
||||||
let crypt_config = CryptConfig::new(key.clone())?;
|
let crypt_config = CryptConfig::new(*key)?;
|
||||||
Ok(crypt_config.fingerprint())
|
Ok(crypt_config.fingerprint())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +193,7 @@ pub fn insert_key(key: [u8;32], key_config: KeyConfig, force: bool) -> Result<()
|
|||||||
};
|
};
|
||||||
|
|
||||||
if !force {
|
if !force {
|
||||||
if let Some(_) = config_map.get(&fingerprint) {
|
if config_map.get(&fingerprint).is_some() {
|
||||||
bail!("encryption key '{}' already exists.", fingerprint);
|
bail!("encryption key '{}' already exists.", fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1380,14 +1380,14 @@ impl std::str::FromStr for TfaResponse {
|
|||||||
type Err = Error;
|
type Err = Error;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Error> {
|
fn from_str(s: &str) -> Result<Self, Error> {
|
||||||
Ok(if s.starts_with("totp:") {
|
Ok(if let Some(totp) = s.strip_prefix("totp:") {
|
||||||
TfaResponse::Totp(s[5..].to_string())
|
TfaResponse::Totp(totp.to_string())
|
||||||
} else if s.starts_with("u2f:") {
|
} else if let Some(u2f) = s.strip_prefix("u2f:") {
|
||||||
TfaResponse::U2f(serde_json::from_str(&s[4..])?)
|
TfaResponse::U2f(serde_json::from_str(u2f)?)
|
||||||
} else if s.starts_with("webauthn:") {
|
} else if let Some(webauthn) = s.strip_prefix("webauthn:") {
|
||||||
TfaResponse::Webauthn(serde_json::from_str(&s[9..])?)
|
TfaResponse::Webauthn(serde_json::from_str(webauthn)?)
|
||||||
} else if s.starts_with("recovery:") {
|
} else if let Some(recovery) = s.strip_prefix("recovery:") {
|
||||||
TfaResponse::Recovery(s[9..].to_string())
|
TfaResponse::Recovery(recovery.to_string())
|
||||||
} else {
|
} else {
|
||||||
bail!("invalid tfa response");
|
bail!("invalid tfa response");
|
||||||
})
|
})
|
||||||
|
@ -157,8 +157,8 @@ pub const USER_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.user.lck";
|
|||||||
|
|
||||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
let content = proxmox::tools::fs::file_read_optional_string(USER_CFG_FILENAME)?;
|
let content = proxmox::tools::fs::file_read_optional_string(USER_CFG_FILENAME)?
|
||||||
let content = content.unwrap_or(String::from(""));
|
.unwrap_or_else(|| "".to_string());
|
||||||
|
|
||||||
let digest = openssl::sha::sha256(content.as_bytes());
|
let digest = openssl::sha::sha256(content.as_bytes());
|
||||||
let mut data = CONFIG.parse(USER_CFG_FILENAME, &content)?;
|
let mut data = CONFIG.parse(USER_CFG_FILENAME, &content)?;
|
||||||
|
@ -40,8 +40,7 @@ fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
|
|||||||
pub fn is_virtual_file_system(magic: i64) -> bool {
|
pub fn is_virtual_file_system(magic: i64) -> bool {
|
||||||
use proxmox::sys::linux::magic::*;
|
use proxmox::sys::linux::magic::*;
|
||||||
|
|
||||||
match magic {
|
matches!(magic, BINFMTFS_MAGIC |
|
||||||
BINFMTFS_MAGIC |
|
|
||||||
CGROUP2_SUPER_MAGIC |
|
CGROUP2_SUPER_MAGIC |
|
||||||
CGROUP_SUPER_MAGIC |
|
CGROUP_SUPER_MAGIC |
|
||||||
CONFIGFS_MAGIC |
|
CONFIGFS_MAGIC |
|
||||||
@ -58,9 +57,7 @@ pub fn is_virtual_file_system(magic: i64) -> bool {
|
|||||||
SECURITYFS_MAGIC |
|
SECURITYFS_MAGIC |
|
||||||
SELINUX_MAGIC |
|
SELINUX_MAGIC |
|
||||||
SMACK_MAGIC |
|
SMACK_MAGIC |
|
||||||
SYSFS_MAGIC => true,
|
SYSFS_MAGIC)
|
||||||
_ => false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -228,7 +228,7 @@ impl Extractor {
|
|||||||
allow_existing_dirs,
|
allow_existing_dirs,
|
||||||
feature_flags,
|
feature_flags,
|
||||||
current_path: Arc::new(Mutex::new(OsString::new())),
|
current_path: Arc::new(Mutex::new(OsString::new())),
|
||||||
on_error: Box::new(|err| Err(err)),
|
on_error: Box::new(Err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -480,11 +480,11 @@ impl SessionImpl {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn lookup<'a>(
|
async fn lookup(
|
||||||
&'a self,
|
&'_ self,
|
||||||
parent: u64,
|
parent: u64,
|
||||||
file_name: &OsStr,
|
file_name: &OsStr,
|
||||||
) -> Result<(EntryParam, LookupRef<'a>), Error> {
|
) -> Result<(EntryParam, LookupRef<'_>), Error> {
|
||||||
let dir = self.open_dir(parent).await?;
|
let dir = self.open_dir(parent).await?;
|
||||||
|
|
||||||
let entry = match { dir }.lookup(file_name).await? {
|
let entry = match { dir }.lookup(file_name).await? {
|
||||||
@ -519,10 +519,10 @@ impl SessionImpl {
|
|||||||
to_stat(inode, &entry)
|
to_stat(inode, &entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn readdirplus<'a>(
|
async fn readdirplus(
|
||||||
&'a self,
|
&'_ self,
|
||||||
request: &mut requests::ReaddirPlus,
|
request: &mut requests::ReaddirPlus,
|
||||||
) -> Result<Vec<LookupRef<'a>>, Error> {
|
) -> Result<Vec<LookupRef<'_>>, Error> {
|
||||||
let mut lookups = Vec::new();
|
let mut lookups = Vec::new();
|
||||||
let offset = usize::try_from(request.offset)
|
let offset = usize::try_from(request.offset)
|
||||||
.map_err(|_| io_format_err!("directory offset out of range"))?;
|
.map_err(|_| io_format_err!("directory offset out of range"))?;
|
||||||
|
@ -345,10 +345,7 @@ fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Resul
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
|
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
|
||||||
match errno {
|
matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
|
||||||
Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
|
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
|
||||||
|
@ -128,25 +128,20 @@ impl RRA {
|
|||||||
// derive counter value
|
// derive counter value
|
||||||
if self.flags.intersects(RRAFlags::DST_DERIVE | RRAFlags::DST_COUNTER) {
|
if self.flags.intersects(RRAFlags::DST_DERIVE | RRAFlags::DST_COUNTER) {
|
||||||
let time_diff = time - self.last_update;
|
let time_diff = time - self.last_update;
|
||||||
|
let is_counter = self.flags.contains(RRAFlags::DST_COUNTER);
|
||||||
|
|
||||||
let diff = if self.counter_value.is_nan() {
|
let diff = if self.counter_value.is_nan() {
|
||||||
0.0
|
0.0
|
||||||
|
} else if is_counter && value < 0.0 {
|
||||||
|
eprintln!("rrdb update failed - got negative value for counter");
|
||||||
|
return;
|
||||||
|
} else if is_counter && value < self.counter_value {
|
||||||
|
// Note: We do not try automatic overflow corrections
|
||||||
|
self.counter_value = value;
|
||||||
|
eprintln!("rrdb update failed - conter overflow/reset detected");
|
||||||
|
return;
|
||||||
} else {
|
} else {
|
||||||
if self.flags.contains(RRAFlags::DST_COUNTER) { // check for overflow
|
value - self.counter_value
|
||||||
if value < 0.0 {
|
|
||||||
eprintln!("rrdb update failed - got negative value for counter");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Note: We do not try automatic overflow corrections
|
|
||||||
if value < self.counter_value { // overflow or counter reset
|
|
||||||
self.counter_value = value;
|
|
||||||
eprintln!("rrdb update failed - conter overflow/reset detected");
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
value - self.counter_value
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
value - self.counter_value
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
self.counter_value = value;
|
self.counter_value = value;
|
||||||
value = diff/time_diff;
|
value = diff/time_diff;
|
||||||
|
@ -127,13 +127,13 @@ pub async fn send_command<P>(
|
|||||||
if rx.read_line(&mut data).await? == 0 {
|
if rx.read_line(&mut data).await? == 0 {
|
||||||
bail!("no response");
|
bail!("no response");
|
||||||
}
|
}
|
||||||
if data.starts_with("OK: ") {
|
if let Some(res) = data.strip_prefix("OK: ") {
|
||||||
match data[4..].parse::<Value>() {
|
match res.parse::<Value>() {
|
||||||
Ok(v) => Ok(v),
|
Ok(v) => Ok(v),
|
||||||
Err(err) => bail!("unable to parse json response - {}", err),
|
Err(err) => bail!("unable to parse json response - {}", err),
|
||||||
}
|
}
|
||||||
} else if data.starts_with("ERROR: ") {
|
} else if let Some(err) = data.strip_prefix("ERROR: ") {
|
||||||
bail!("{}", &data[7..]);
|
bail!("{}", err);
|
||||||
} else {
|
} else {
|
||||||
bail!("unable to parse response: {}", data);
|
bail!("unable to parse response: {}", data);
|
||||||
}
|
}
|
||||||
|
@ -57,9 +57,9 @@ impl ApiConfig {
|
|||||||
prefix.push_str(components[0]);
|
prefix.push_str(components[0]);
|
||||||
if let Some(subdir) = self.aliases.get(&prefix) {
|
if let Some(subdir) = self.aliases.get(&prefix) {
|
||||||
filename.push(subdir);
|
filename.push(subdir);
|
||||||
for i in 1..comp_len { filename.push(components[i]) }
|
components.iter().skip(1).for_each(|comp| filename.push(comp));
|
||||||
} else {
|
} else {
|
||||||
for i in 0..comp_len { filename.push(components[i]) }
|
components.iter().for_each(|comp| filename.push(comp));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
filename
|
filename
|
||||||
|
@ -376,7 +376,7 @@ fn get_server_url() -> (String, usize) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_updates_available(
|
pub fn send_updates_available(
|
||||||
updates: &Vec<&APTUpdateInfo>,
|
updates: &[&APTUpdateInfo],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// update mails always go to the root@pam configured email..
|
// update mails always go to the root@pam configured email..
|
||||||
if let Some(email) = lookup_user_email(Userid::root_userid()) {
|
if let Some(email) = lookup_user_email(Userid::root_userid()) {
|
||||||
@ -403,7 +403,7 @@ fn lookup_user_email(userid: &Userid) -> Option<String> {
|
|||||||
|
|
||||||
if let Ok(user_config) = user::cached_config() {
|
if let Ok(user_config) = user::cached_config() {
|
||||||
if let Ok(user) = user_config.lookup::<User>("user", userid.as_str()) {
|
if let Ok(user) = user_config.lookup::<User>("user", userid.as_str()) {
|
||||||
return user.email.clone();
|
return user.email;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -434,7 +434,7 @@ pub fn lookup_datastore_notify_settings(
|
|||||||
None => lookup_user_email(Userid::root_userid()),
|
None => lookup_user_email(Userid::root_userid()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let notify_str = config.notify.unwrap_or(String::new());
|
let notify_str = config.notify.unwrap_or_default();
|
||||||
|
|
||||||
if let Ok(value) = parse_property_string(¬ify_str, &DatastoreNotify::API_SCHEMA) {
|
if let Ok(value) = parse_property_string(¬ify_str, &DatastoreNotify::API_SCHEMA) {
|
||||||
if let Ok(notify) = serde_json::from_value(value) {
|
if let Ok(notify) = serde_json::from_value(value) {
|
||||||
@ -456,7 +456,7 @@ fn handlebars_humam_bytes_helper(
|
|||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param = h.param(0).map(|v| v.value().as_u64())
|
let param = h.param(0).map(|v| v.value().as_u64())
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or(RenderError::new("human-bytes: param not found"))?;
|
.ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
|
||||||
|
|
||||||
out.write(&HumanByte::from(param).to_string())?;
|
out.write(&HumanByte::from(param).to_string())?;
|
||||||
|
|
||||||
@ -472,10 +472,10 @@ fn handlebars_relative_percentage_helper(
|
|||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param0 = h.param(0).map(|v| v.value().as_f64())
|
let param0 = h.param(0).map(|v| v.value().as_f64())
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or(RenderError::new("relative-percentage: param0 not found"))?;
|
.ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
|
||||||
let param1 = h.param(1).map(|v| v.value().as_f64())
|
let param1 = h.param(1).map(|v| v.value().as_f64())
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or(RenderError::new("relative-percentage: param1 not found"))?;
|
.ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
|
||||||
|
|
||||||
if param1 == 0.0 {
|
if param1 == 0.0 {
|
||||||
out.write("-")?;
|
out.write("-")?;
|
||||||
|
@ -48,6 +48,6 @@ impl RpcEnvironment for RestEnvironment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_client_ip(&self) -> Option<std::net::SocketAddr> {
|
fn get_client_ip(&self) -> Option<std::net::SocketAddr> {
|
||||||
self.client_ip.clone()
|
self.client_ip
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,13 +39,12 @@ pub fn do_garbage_collection_job(
|
|||||||
|
|
||||||
let status = worker.create_state(&result);
|
let status = worker.create_state(&result);
|
||||||
|
|
||||||
match job.finish(status) {
|
if let Err(err) = job.finish(status) {
|
||||||
Err(err) => eprintln!(
|
eprintln!(
|
||||||
"could not finish job state for {}: {}",
|
"could not finish job state for {}: {}",
|
||||||
job.jobtype().to_string(),
|
job.jobtype().to_string(),
|
||||||
err
|
err
|
||||||
),
|
);
|
||||||
Ok(_) => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(email) = email {
|
if let Some(email) = email {
|
||||||
|
@ -97,7 +97,7 @@ impl <E: RpcEnvironment + Clone> tower_service::Service<Request<Body>> for H2Ser
|
|||||||
let method = req.method().clone();
|
let method = req.method().clone();
|
||||||
let worker = self.worker.clone();
|
let worker = self.worker.clone();
|
||||||
|
|
||||||
std::pin::Pin::from(self.handle_request(req))
|
self.handle_request(req)
|
||||||
.map(move |result| match result {
|
.map(move |result| match result {
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
Self::log_response(worker, method, &path, &res);
|
Self::log_response(worker, method, &path, &res);
|
||||||
|
@ -207,11 +207,8 @@ impl Job {
|
|||||||
/// Start the job and update the statefile accordingly
|
/// Start the job and update the statefile accordingly
|
||||||
/// Fails if the job was already started
|
/// Fails if the job was already started
|
||||||
pub fn start(&mut self, upid: &str) -> Result<(), Error> {
|
pub fn start(&mut self, upid: &str) -> Result<(), Error> {
|
||||||
match self.state {
|
if let JobState::Started { .. } = self.state {
|
||||||
JobState::Started { .. } => {
|
bail!("cannot start job that is started!");
|
||||||
bail!("cannot start job that is started!");
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.state = JobState::Started {
|
self.state = JobState::Started {
|
||||||
|
@ -39,7 +39,7 @@ fn function_calls() -> Vec<(&'static str, fn() -> String)> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
for (store, _) in &config.sections {
|
for store in config.sections.keys() {
|
||||||
list.push(store.as_str());
|
list.push(store.as_str());
|
||||||
}
|
}
|
||||||
list.join(", ")
|
list.join(", ")
|
||||||
|
@ -147,7 +147,7 @@ fn log_response(
|
|||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
// time format which apache/nginx use (by default), copied from pve-http-server
|
// time format which apache/nginx use (by default), copied from pve-http-server
|
||||||
let datetime = proxmox::tools::time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
|
let datetime = proxmox::tools::time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
|
||||||
.unwrap_or("-".into());
|
.unwrap_or_else(|_| "-".to_string());
|
||||||
|
|
||||||
logfile
|
logfile
|
||||||
.lock()
|
.lock()
|
||||||
@ -161,7 +161,7 @@ fn log_response(
|
|||||||
path,
|
path,
|
||||||
status.as_str(),
|
status.as_str(),
|
||||||
resp.body().size_hint().lower(),
|
resp.body().size_hint().lower(),
|
||||||
user_agent.unwrap_or("-".into()),
|
user_agent.unwrap_or_else(|| "-".to_string()),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -517,7 +517,7 @@ async fn chuncked_static_file_download(filename: PathBuf) -> Result<Response<Bod
|
|||||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
.map_ok(|bytes| bytes.freeze());
|
||||||
let body = Body::wrap_stream(payload);
|
let body = Body::wrap_stream(payload);
|
||||||
|
|
||||||
// fixme: set other headers ?
|
// fixme: set other headers ?
|
||||||
|
@ -68,8 +68,8 @@ impl std::str::FromStr for ApiTicket {
|
|||||||
type Err = Error;
|
type Err = Error;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Error> {
|
fn from_str(s: &str) -> Result<Self, Error> {
|
||||||
if s.starts_with("!tfa!") {
|
if let Some(tfa_ticket) = s.strip_prefix("!tfa!") {
|
||||||
Ok(ApiTicket::Partial(serde_json::from_str(&s[5..])?))
|
Ok(ApiTicket::Partial(serde_json::from_str(tfa_ticket)?))
|
||||||
} else {
|
} else {
|
||||||
Ok(ApiTicket::Full(s.parse()?))
|
Ok(ApiTicket::Full(s.parse()?))
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ pub fn do_verification_job(
|
|||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&verification_job.store)?;
|
let datastore = DataStore::lookup_datastore(&verification_job.store)?;
|
||||||
|
|
||||||
let outdated_after = verification_job.outdated_after.clone();
|
let outdated_after = verification_job.outdated_after;
|
||||||
let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
|
let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
|
||||||
|
|
||||||
let filter = move |manifest: &BackupManifest| {
|
let filter = move |manifest: &BackupManifest| {
|
||||||
@ -33,7 +33,7 @@ pub fn do_verification_job(
|
|||||||
|
|
||||||
let raw_verify_state = manifest.unprotected["verify_state"].clone();
|
let raw_verify_state = manifest.unprotected["verify_state"].clone();
|
||||||
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
|
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
|
||||||
Err(_) => return true, // no last verification, always include
|
Err(_) => true, // no last verification, always include
|
||||||
Ok(last_verify) => {
|
Ok(last_verify) => {
|
||||||
match outdated_after {
|
match outdated_after {
|
||||||
None => false, // never re-verify if ignored and no max age
|
None => false, // never re-verify if ignored and no max age
|
||||||
@ -83,13 +83,12 @@ pub fn do_verification_job(
|
|||||||
|
|
||||||
let status = worker.create_state(&job_result);
|
let status = worker.create_state(&job_result);
|
||||||
|
|
||||||
match job.finish(status) {
|
if let Err(err) = job.finish(status) {
|
||||||
Err(err) => eprintln!(
|
eprintln!(
|
||||||
"could not finish job state for {}: {}",
|
"could not finish job state for {}: {}",
|
||||||
job.jobtype().to_string(),
|
job.jobtype().to_string(),
|
||||||
err
|
err
|
||||||
),
|
);
|
||||||
Ok(_) => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(email) = email {
|
if let Some(email) = email {
|
||||||
|
@ -48,7 +48,7 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
|
|||||||
return Ok(WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id));
|
return Ok(WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !procfs::check_process_running_pstart(upid.pid, upid.pstart).is_some() {
|
if procfs::check_process_running_pstart(upid.pid, upid.pstart).is_none() {
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,7 +191,7 @@ pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
|||||||
file.read_to_end(&mut data)?;
|
file.read_to_end(&mut data)?;
|
||||||
|
|
||||||
// task logs should end with newline, we do not want it here
|
// task logs should end with newline, we do not want it here
|
||||||
if data.len() > 0 && data[data.len()-1] == b'\n' {
|
if !data.is_empty() && data[data.len()-1] == b'\n' {
|
||||||
data.pop();
|
data.pop();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,11 +267,11 @@ impl TaskState {
|
|||||||
Ok(TaskState::Unknown { endtime })
|
Ok(TaskState::Unknown { endtime })
|
||||||
} else if s == "OK" {
|
} else if s == "OK" {
|
||||||
Ok(TaskState::OK { endtime })
|
Ok(TaskState::OK { endtime })
|
||||||
} else if s.starts_with("WARNINGS: ") {
|
} else if let Some(warnings) = s.strip_prefix("WARNINGS: ") {
|
||||||
let count: u64 = s[10..].parse()?;
|
let count: u64 = warnings.parse()?;
|
||||||
Ok(TaskState::Warning{ count, endtime })
|
Ok(TaskState::Warning{ count, endtime })
|
||||||
} else if s.len() > 0 {
|
} else if !s.is_empty() {
|
||||||
let message = if s.starts_with("ERROR: ") { &s[7..] } else { s }.to_string();
|
let message = if let Some(err) = s.strip_prefix("ERROR: ") { err } else { s }.to_string();
|
||||||
Ok(TaskState::Error{ message, endtime })
|
Ok(TaskState::Error{ message, endtime })
|
||||||
} else {
|
} else {
|
||||||
bail!("unable to parse Task Status '{}'", s);
|
bail!("unable to parse Task Status '{}'", s);
|
||||||
@ -330,7 +330,7 @@ pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: O
|
|||||||
let _lock = lock_task_list_files(true)?;
|
let _lock = lock_task_list_files(true)?;
|
||||||
|
|
||||||
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress)
|
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress)
|
||||||
.ok_or(format_err!("could not get archive file names"))?;
|
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
||||||
|
|
||||||
logrotate.rotate(size_threshold, None, max_files)
|
logrotate.rotate(size_threshold, None, max_files)
|
||||||
}
|
}
|
||||||
@ -362,8 +362,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
|||||||
if !worker_is_active_local(&info.upid) {
|
if !worker_is_active_local(&info.upid) {
|
||||||
// println!("Detected stopped task '{}'", &info.upid_str);
|
// println!("Detected stopped task '{}'", &info.upid_str);
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
let status = upid_read_status(&info.upid)
|
let status = upid_read_status(&info.upid).unwrap_or(TaskState::Unknown { endtime: now });
|
||||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
|
||||||
finish_list.push(TaskListInfo {
|
finish_list.push(TaskListInfo {
|
||||||
upid: info.upid,
|
upid: info.upid,
|
||||||
upid_str: info.upid_str,
|
upid_str: info.upid_str,
|
||||||
|
@ -187,11 +187,9 @@ pub trait MediaChange {
|
|||||||
if let ElementStatus::Empty = element_status {
|
if let ElementStatus::Empty = element_status {
|
||||||
to = Some(i as u64 + 1);
|
to = Some(i as u64 + 1);
|
||||||
}
|
}
|
||||||
} else {
|
} else if let ElementStatus::VolumeTag(ref tag) = element_status {
|
||||||
if let ElementStatus::VolumeTag(ref tag) = element_status {
|
if tag == label_text {
|
||||||
if tag == label_text {
|
from = Some(i as u64 + 1);
|
||||||
from = Some(i as u64 + 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -58,13 +58,12 @@ fn parse_drive_status(i: &str) -> IResult<&str, DriveStatus> {
|
|||||||
|
|
||||||
let mut loaded_slot = None;
|
let mut loaded_slot = None;
|
||||||
|
|
||||||
if i.starts_with("Empty") {
|
if let Some(empty) = i.strip_suffix("Empty") {
|
||||||
return Ok((&i[5..], DriveStatus { loaded_slot, status: ElementStatus::Empty }));
|
return Ok((empty, DriveStatus { loaded_slot, status: ElementStatus::Empty }));
|
||||||
}
|
}
|
||||||
let (mut i, _) = tag("Full (")(i)?;
|
let (mut i, _) = tag("Full (")(i)?;
|
||||||
|
|
||||||
if i.starts_with("Storage Element ") {
|
if let Some(n) = i.strip_prefix("Storage Element ") {
|
||||||
let n = &i[16..];
|
|
||||||
let (n, id) = parse_u64(n)?;
|
let (n, id) = parse_u64(n)?;
|
||||||
loaded_slot = Some(id);
|
loaded_slot = Some(id);
|
||||||
let (n, _) = tag(" Loaded")(n)?;
|
let (n, _) = tag(" Loaded")(n)?;
|
||||||
@ -76,8 +75,7 @@ fn parse_drive_status(i: &str) -> IResult<&str, DriveStatus> {
|
|||||||
|
|
||||||
let (i, _) = tag(")")(i)?;
|
let (i, _) = tag(")")(i)?;
|
||||||
|
|
||||||
if i.starts_with(":VolumeTag = ") {
|
if let Some(i) = i.strip_prefix(":VolumeTag = ") {
|
||||||
let i = &i[13..];
|
|
||||||
let (i, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(i)?;
|
let (i, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(i)?;
|
||||||
let (i, _) = take_while(|c| c != '\n')(i)?; // skip to eol
|
let (i, _) = take_while(|c| c != '\n')(i)?; // skip to eol
|
||||||
return Ok((i, DriveStatus { loaded_slot, status: ElementStatus::VolumeTag(tag.to_string()) }));
|
return Ok((i, DriveStatus { loaded_slot, status: ElementStatus::VolumeTag(tag.to_string()) }));
|
||||||
@ -89,14 +87,11 @@ fn parse_drive_status(i: &str) -> IResult<&str, DriveStatus> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn parse_slot_status(i: &str) -> IResult<&str, ElementStatus> {
|
fn parse_slot_status(i: &str) -> IResult<&str, ElementStatus> {
|
||||||
if i.starts_with("Empty") {
|
if let Some(empty) = i.strip_prefix("Empty") {
|
||||||
return Ok((&i[5..], ElementStatus::Empty));
|
return Ok((empty, ElementStatus::Empty));
|
||||||
}
|
}
|
||||||
if i.starts_with("Full ") {
|
if let Some(n) = i.strip_prefix("Full ") {
|
||||||
let mut n = &i[5..];
|
if let Some(n) = n.strip_prefix(":VolumeTag=") {
|
||||||
|
|
||||||
if n.starts_with(":VolumeTag=") {
|
|
||||||
n = &n[11..];
|
|
||||||
let (n, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(n)?;
|
let (n, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(n)?;
|
||||||
let (n, _) = take_while(|c| c != '\n')(n)?; // skip to eol
|
let (n, _) = take_while(|c| c != '\n')(n)?; // skip to eol
|
||||||
return Ok((n, ElementStatus::VolumeTag(tag.to_string())));
|
return Ok((n, ElementStatus::VolumeTag(tag.to_string())));
|
||||||
|
@ -62,15 +62,11 @@ impl <'a> ChunkArchiveWriter<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn write_all(&mut self, data: &[u8]) -> Result<bool, std::io::Error> {
|
fn write_all(&mut self, data: &[u8]) -> Result<bool, std::io::Error> {
|
||||||
let result = match self.writer {
|
match self.writer {
|
||||||
Some(ref mut writer) => {
|
Some(ref mut writer) => writer.write_all(data),
|
||||||
let leom = writer.write_all(data)?;
|
|
||||||
Ok(leom)
|
|
||||||
}
|
|
||||||
None => proxmox::io_bail!(
|
None => proxmox::io_bail!(
|
||||||
"detected write after archive finished - internal error"),
|
"detected write after archive finished - internal error"),
|
||||||
};
|
}
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write chunk into archive.
|
/// Write chunk into archive.
|
||||||
|
@ -24,10 +24,7 @@ pub fn has_encryption<F: AsRawFd>(
|
|||||||
Ok(data) => data,
|
Ok(data) => data,
|
||||||
Err(_) => return false,
|
Err(_) => return false,
|
||||||
};
|
};
|
||||||
match decode_spin_data_encryption_caps(&data) {
|
decode_spin_data_encryption_caps(&data).is_ok()
|
||||||
Ok(_) => true,
|
|
||||||
Err(_) => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set or clear encryption key
|
/// Set or clear encryption key
|
||||||
|
@ -85,12 +85,12 @@ pub fn linux_tape_changer_list() -> Vec<TapeDeviceInfo> {
|
|||||||
let vendor = device.property_value("ID_VENDOR")
|
let vendor = device.property_value("ID_VENDOR")
|
||||||
.map(std::ffi::OsString::from)
|
.map(std::ffi::OsString::from)
|
||||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||||
.unwrap_or(String::from("unknown"));
|
.unwrap_or_else(|| String::from("unknown"));
|
||||||
|
|
||||||
let model = device.property_value("ID_MODEL")
|
let model = device.property_value("ID_MODEL")
|
||||||
.map(std::ffi::OsString::from)
|
.map(std::ffi::OsString::from)
|
||||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||||
.unwrap_or(String::from("unknown"));
|
.unwrap_or_else(|| String::from("unknown"));
|
||||||
|
|
||||||
let dev_path = format!("/dev/tape/by-id/scsi-{}", serial);
|
let dev_path = format!("/dev/tape/by-id/scsi-{}", serial);
|
||||||
|
|
||||||
@ -166,12 +166,12 @@ pub fn linux_tape_device_list() -> Vec<TapeDeviceInfo> {
|
|||||||
let vendor = device.property_value("ID_VENDOR")
|
let vendor = device.property_value("ID_VENDOR")
|
||||||
.map(std::ffi::OsString::from)
|
.map(std::ffi::OsString::from)
|
||||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||||
.unwrap_or(String::from("unknown"));
|
.unwrap_or_else(|| String::from("unknown"));
|
||||||
|
|
||||||
let model = device.property_value("ID_MODEL")
|
let model = device.property_value("ID_MODEL")
|
||||||
.map(std::ffi::OsString::from)
|
.map(std::ffi::OsString::from)
|
||||||
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
.and_then(|s| if let Ok(s) = s.into_string() { Some(s) } else { None })
|
||||||
.unwrap_or(String::from("unknown"));
|
.unwrap_or_else(|| String::from("unknown"));
|
||||||
|
|
||||||
let dev_path = format!("/dev/tape/by-id/scsi-{}-nst", serial);
|
let dev_path = format!("/dev/tape/by-id/scsi-{}-nst", serial);
|
||||||
|
|
||||||
|
@ -98,16 +98,14 @@ impl LinuxTapeDrive {
|
|||||||
|
|
||||||
if drive_status.blocksize == 0 {
|
if drive_status.blocksize == 0 {
|
||||||
// device is variable block size - OK
|
// device is variable block size - OK
|
||||||
} else {
|
} else if drive_status.blocksize != PROXMOX_TAPE_BLOCK_SIZE as u32 {
|
||||||
if drive_status.blocksize != PROXMOX_TAPE_BLOCK_SIZE as u32 {
|
eprintln!("device is in fixed block size mode with wrong size ({} bytes)", drive_status.blocksize);
|
||||||
eprintln!("device is in fixed block size mode with wrong size ({} bytes)", drive_status.blocksize);
|
eprintln!("trying to set variable block size mode...");
|
||||||
eprintln!("trying to set variable block size mode...");
|
if handle.set_block_size(0).is_err() {
|
||||||
if handle.set_block_size(0).is_err() {
|
bail!("set variable block size mod failed - device uses wrong blocksize.");
|
||||||
bail!("set variable block size mod failed - device uses wrong blocksize.");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// device is in fixed block size mode with correct block size
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// device is in fixed block size mode with correct block size
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only root can set driver options, so we cannot
|
// Only root can set driver options, so we cannot
|
||||||
@ -528,7 +526,7 @@ impl TapeDriver for LinuxTapeHandle {
|
|||||||
let result: Result<u64, String> = serde_json::from_str(&output)?;
|
let result: Result<u64, String> = serde_json::from_str(&output)?;
|
||||||
result
|
result
|
||||||
.map_err(|err| format_err!("{}", err))
|
.map_err(|err| format_err!("{}", err))
|
||||||
.map(|bits| TapeAlertFlags::from_bits_truncate(bits))
|
.map(TapeAlertFlags::from_bits_truncate)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set or clear encryption key
|
/// Set or clear encryption key
|
||||||
|
@ -32,7 +32,7 @@ enum MamFormat {
|
|||||||
DEC,
|
DEC,
|
||||||
}
|
}
|
||||||
|
|
||||||
static MAM_ATTRIBUTES: &'static [ (u16, u16, MamFormat, &'static str) ] = &[
|
static MAM_ATTRIBUTES: &[ (u16, u16, MamFormat, &str) ] = &[
|
||||||
(0x00_00, 8, MamFormat::DEC, "Remaining Capacity In Partition"),
|
(0x00_00, 8, MamFormat::DEC, "Remaining Capacity In Partition"),
|
||||||
(0x00_01, 8, MamFormat::DEC, "Maximum Capacity In Partition"),
|
(0x00_01, 8, MamFormat::DEC, "Maximum Capacity In Partition"),
|
||||||
(0x00_02, 8, MamFormat::DEC, "Tapealert Flags"),
|
(0x00_02, 8, MamFormat::DEC, "Tapealert Flags"),
|
||||||
|
@ -258,13 +258,13 @@ pub fn required_media_changer(
|
|||||||
) -> Result<(Box<dyn MediaChange>, String), Error> {
|
) -> Result<(Box<dyn MediaChange>, String), Error> {
|
||||||
match media_changer(config, drive) {
|
match media_changer(config, drive) {
|
||||||
Ok(Some(result)) => {
|
Ok(Some(result)) => {
|
||||||
return Ok(result);
|
Ok(result)
|
||||||
}
|
}
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
bail!("drive '{}' has no associated changer device", drive);
|
bail!("drive '{}' has no associated changer device", drive);
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
return Err(err);
|
Err(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ pub fn request_and_load_media(
|
|||||||
|
|
||||||
let media_id = check_label(handle.as_mut(), &label.uuid)?;
|
let media_id = check_label(handle.as_mut(), &label.uuid)?;
|
||||||
|
|
||||||
return Ok((handle, media_id));
|
Ok((handle, media_id))
|
||||||
}
|
}
|
||||||
"linux" => {
|
"linux" => {
|
||||||
let drive_config = LinuxTapeDrive::deserialize(config)?;
|
let drive_config = LinuxTapeDrive::deserialize(config)?;
|
||||||
@ -390,20 +390,18 @@ pub fn request_and_load_media(
|
|||||||
media_id.label.uuid.to_string(),
|
media_id.label.uuid.to_string(),
|
||||||
));
|
));
|
||||||
return Ok((Box::new(handle), media_id));
|
return Ok((Box::new(handle), media_id));
|
||||||
} else {
|
} else if Some(media_id.label.uuid.clone()) != last_media_uuid {
|
||||||
if Some(media_id.label.uuid.clone()) != last_media_uuid {
|
worker.log(format!(
|
||||||
worker.log(format!(
|
"wrong media label {} ({})",
|
||||||
"wrong media label {} ({})",
|
media_id.label.label_text,
|
||||||
media_id.label.label_text,
|
media_id.label.uuid.to_string(),
|
||||||
media_id.label.uuid.to_string(),
|
));
|
||||||
));
|
last_media_uuid = Some(media_id.label.uuid);
|
||||||
last_media_uuid = Some(media_id.label.uuid);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok((None, _)) => {
|
Ok((None, _)) => {
|
||||||
if last_media_uuid.is_some() {
|
if last_media_uuid.is_some() {
|
||||||
worker.log(format!("found empty media without label (please label all tapes first)"));
|
worker.log("found empty media without label (please label all tapes first)".to_string());
|
||||||
last_media_uuid = None;
|
last_media_uuid = None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ bitflags::bitflags!{
|
|||||||
///
|
///
|
||||||
/// See LTO SCSI Reference LOG_SENSE - LP 2Eh: TapeAlerts
|
/// See LTO SCSI Reference LOG_SENSE - LP 2Eh: TapeAlerts
|
||||||
pub struct TapeAlertFlags: u64 {
|
pub struct TapeAlertFlags: u64 {
|
||||||
|
#[allow(clippy::eq_op)]
|
||||||
const READ_WARNING = 1 << (0x0001 -1);
|
const READ_WARNING = 1 << (0x0001 -1);
|
||||||
const WRITE_WARNING = 1 << (0x0002 -1);
|
const WRITE_WARNING = 1 << (0x0002 -1);
|
||||||
const HARD_ERROR = 1 << (0x0003 -1);
|
const HARD_ERROR = 1 << (0x0003 -1);
|
||||||
|
@ -168,8 +168,8 @@ impl VirtualTapeHandle {
|
|||||||
if path.is_file() && path.extension() == Some(std::ffi::OsStr::new("json")) {
|
if path.is_file() && path.extension() == Some(std::ffi::OsStr::new("json")) {
|
||||||
if let Some(name) = path.file_stem() {
|
if let Some(name) = path.file_stem() {
|
||||||
if let Some(name) = name.to_str() {
|
if let Some(name) = name.to_str() {
|
||||||
if name.starts_with("tape-") {
|
if let Some(label) = name.strip_prefix("tape-") {
|
||||||
list.push(name[5..].to_string());
|
list.push(label.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -95,19 +95,16 @@ fn decode_volume_statistics(data: &[u8]) -> Result<Lp17VolumeStatistics, Error>
|
|||||||
|
|
||||||
let read_be_counter = |reader: &mut &[u8], len: u8| {
|
let read_be_counter = |reader: &mut &[u8], len: u8| {
|
||||||
let len = len as usize;
|
let len = len as usize;
|
||||||
|
|
||||||
if len == 0 || len > 8 {
|
if len == 0 || len > 8 {
|
||||||
bail!("invalid conter size '{}'", len);
|
bail!("invalid conter size '{}'", len);
|
||||||
}
|
}
|
||||||
let mut buffer = [0u8; 8];
|
let mut buffer = [0u8; 8];
|
||||||
reader.read_exact(&mut buffer[..len])?;
|
reader.read_exact(&mut buffer[..len])?;
|
||||||
|
|
||||||
let mut value: u64 = 0;
|
let value = buffer
|
||||||
|
.iter()
|
||||||
for i in 0..len {
|
.take(len)
|
||||||
value = value << 8;
|
.fold(0, |value, curr| (value << 8) | *curr as u64);
|
||||||
value = value | buffer[i] as u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(value)
|
Ok(value)
|
||||||
};
|
};
|
||||||
|
@ -81,10 +81,8 @@ impl <R: Read> BlockedReader<R> {
|
|||||||
|
|
||||||
if size > buffer.payload.len() {
|
if size > buffer.payload.len() {
|
||||||
proxmox::io_bail!("detected tape block with wrong payload size ({} > {}", size, buffer.payload.len());
|
proxmox::io_bail!("detected tape block with wrong payload size ({} > {}", size, buffer.payload.len());
|
||||||
} else if size == 0 {
|
} else if size == 0 && !found_end_marker {
|
||||||
if !found_end_marker{
|
proxmox::io_bail!("detected tape block with zero payload size");
|
||||||
proxmox::io_bail!("detected tape block with zero payload size");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -179,7 +177,7 @@ impl <R: Read> Read for BlockedReader<R> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rest <= 0 {
|
if rest <= 0 {
|
||||||
return Ok(0);
|
Ok(0)
|
||||||
} else {
|
} else {
|
||||||
let copy_len = if (buffer.len() as isize) < rest {
|
let copy_len = if (buffer.len() as isize) < rest {
|
||||||
buffer.len()
|
buffer.len()
|
||||||
@ -189,7 +187,7 @@ impl <R: Read> Read for BlockedReader<R> {
|
|||||||
buffer[..copy_len].copy_from_slice(
|
buffer[..copy_len].copy_from_slice(
|
||||||
&self.buffer.payload[self.read_pos..(self.read_pos + copy_len)]);
|
&self.buffer.payload[self.read_pos..(self.read_pos + copy_len)]);
|
||||||
self.read_pos += copy_len;
|
self.read_pos += copy_len;
|
||||||
return Ok(copy_len);
|
Ok(copy_len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ impl <W: Write> BlockedWriter<W> {
|
|||||||
self.bytes_written += BlockHeader::SIZE;
|
self.bytes_written += BlockHeader::SIZE;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
self.buffer_pos = self.buffer_pos + bytes;
|
self.buffer_pos += bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
|
@ -50,7 +50,7 @@ impl SnapshotReader {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut client_log_path = snapshot_path.clone();
|
let mut client_log_path = snapshot_path;
|
||||||
client_log_path.push(CLIENT_LOG_BLOB_NAME);
|
client_log_path.push(CLIENT_LOG_BLOB_NAME);
|
||||||
|
|
||||||
let mut file_list = Vec::new();
|
let mut file_list = Vec::new();
|
||||||
|
@ -215,12 +215,13 @@ impl Inventory {
|
|||||||
|
|
||||||
/// find media by label_text
|
/// find media by label_text
|
||||||
pub fn find_media_by_label_text(&self, label_text: &str) -> Option<&MediaId> {
|
pub fn find_media_by_label_text(&self, label_text: &str) -> Option<&MediaId> {
|
||||||
for (_uuid, entry) in &self.map {
|
self.map.values().find_map(|entry| {
|
||||||
if entry.id.label.label_text == label_text {
|
if entry.id.label.label_text == label_text {
|
||||||
return Some(&entry.id);
|
Some(&entry.id)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lookup media pool
|
/// Lookup media pool
|
||||||
@ -245,7 +246,7 @@ impl Inventory {
|
|||||||
pub fn list_pool_media(&self, pool: &str) -> Vec<MediaId> {
|
pub fn list_pool_media(&self, pool: &str) -> Vec<MediaId> {
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (_uuid, entry) in &self.map {
|
for entry in self.map.values() {
|
||||||
match entry.id.media_set_label {
|
match entry.id.media_set_label {
|
||||||
None => continue, // not assigned to any pool
|
None => continue, // not assigned to any pool
|
||||||
Some(ref set) => {
|
Some(ref set) => {
|
||||||
@ -272,7 +273,7 @@ impl Inventory {
|
|||||||
pub fn list_used_media(&self) -> Vec<MediaId> {
|
pub fn list_used_media(&self) -> Vec<MediaId> {
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (_uuid, entry) in &self.map {
|
for entry in self.map.values() {
|
||||||
match entry.id.media_set_label {
|
match entry.id.media_set_label {
|
||||||
None => continue, // not assigned to any pool
|
None => continue, // not assigned to any pool
|
||||||
Some(ref set) => {
|
Some(ref set) => {
|
||||||
@ -288,19 +289,17 @@ impl Inventory {
|
|||||||
|
|
||||||
/// List media not assigned to any pool
|
/// List media not assigned to any pool
|
||||||
pub fn list_unassigned_media(&self) -> Vec<MediaId> {
|
pub fn list_unassigned_media(&self) -> Vec<MediaId> {
|
||||||
let mut list = Vec::new();
|
self.map.values().filter_map(|entry|
|
||||||
|
|
||||||
for (_uuid, entry) in &self.map {
|
|
||||||
if entry.id.media_set_label.is_none() {
|
if entry.id.media_set_label.is_none() {
|
||||||
list.push(entry.id.clone());
|
Some(entry.id.clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
).collect()
|
||||||
|
|
||||||
list
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn media_set_start_time(&self, media_set_uuid: &Uuid) -> Option<i64> {
|
pub fn media_set_start_time(&self, media_set_uuid: &Uuid) -> Option<i64> {
|
||||||
self.media_set_start_times.get(media_set_uuid).map(|t| *t)
|
self.media_set_start_times.get(media_set_uuid).copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lookup media set pool
|
/// Lookup media set pool
|
||||||
@ -383,7 +382,7 @@ impl Inventory {
|
|||||||
|
|
||||||
let set_list = self.map.values()
|
let set_list = self.map.values()
|
||||||
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
||||||
.filter(|set| &set.pool == &pool && set.uuid.as_ref() != [0u8;16]);
|
.filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8;16]);
|
||||||
|
|
||||||
for set in set_list {
|
for set in set_list {
|
||||||
match last_set {
|
match last_set {
|
||||||
@ -406,7 +405,7 @@ impl Inventory {
|
|||||||
// consistency check - must be the only set with that ctime
|
// consistency check - must be the only set with that ctime
|
||||||
let set_list = self.map.values()
|
let set_list = self.map.values()
|
||||||
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
||||||
.filter(|set| &set.pool == &pool && set.uuid.as_ref() != [0u8;16]);
|
.filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8;16]);
|
||||||
|
|
||||||
for set in set_list {
|
for set in set_list {
|
||||||
if set.uuid != uuid && set.ctime >= ctime { // should not happen
|
if set.uuid != uuid && set.ctime >= ctime { // should not happen
|
||||||
@ -437,7 +436,7 @@ impl Inventory {
|
|||||||
|
|
||||||
let set_list = self.map.values()
|
let set_list = self.map.values()
|
||||||
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
.filter_map(|entry| entry.id.media_set_label.as_ref())
|
||||||
.filter(|set| (&set.uuid != media_set_uuid) && (&set.pool == &pool));
|
.filter(|set| (&set.uuid != media_set_uuid) && (set.pool == pool));
|
||||||
|
|
||||||
let mut next_ctime = None;
|
let mut next_ctime = None;
|
||||||
|
|
||||||
@ -522,7 +521,7 @@ impl Inventory {
|
|||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
if let Some(ctime) = self.media_set_start_time(media_set_uuid) {
|
if let Some(ctime) = self.media_set_start_time(media_set_uuid) {
|
||||||
let mut template = template.unwrap_or(String::from("%c"));
|
let mut template = template.unwrap_or_else(|| String::from("%c"));
|
||||||
template = template.replace("%id%", &media_set_uuid.to_string());
|
template = template.replace("%id%", &media_set_uuid.to_string());
|
||||||
proxmox::tools::time::strftime_local(&template, ctime)
|
proxmox::tools::time::strftime_local(&template, ctime)
|
||||||
} else {
|
} else {
|
||||||
@ -675,20 +674,18 @@ impl Inventory {
|
|||||||
for (uuid, entry) in self.map.iter_mut() {
|
for (uuid, entry) in self.map.iter_mut() {
|
||||||
if let Some(changer_name) = online_map.lookup_changer(uuid) {
|
if let Some(changer_name) = online_map.lookup_changer(uuid) {
|
||||||
entry.location = Some(MediaLocation::Online(changer_name.to_string()));
|
entry.location = Some(MediaLocation::Online(changer_name.to_string()));
|
||||||
} else {
|
} else if let Some(MediaLocation::Online(ref changer_name)) = entry.location {
|
||||||
if let Some(MediaLocation::Online(ref changer_name)) = entry.location {
|
match online_map.online_map(changer_name) {
|
||||||
match online_map.online_map(changer_name) {
|
None => {
|
||||||
None => {
|
// no such changer device
|
||||||
// no such changer device
|
entry.location = Some(MediaLocation::Offline);
|
||||||
entry.location = Some(MediaLocation::Offline);
|
}
|
||||||
}
|
Some(None) => {
|
||||||
Some(None) => {
|
// got no info - do nothing
|
||||||
// got no info - do nothing
|
}
|
||||||
}
|
Some(Some(_)) => {
|
||||||
Some(Some(_)) => {
|
// media changer changed
|
||||||
// media changer changed
|
entry.location = Some(MediaLocation::Offline);
|
||||||
entry.location = Some(MediaLocation::Offline);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -323,7 +323,7 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
/// Returns the chunk archive file number
|
/// Returns the chunk archive file number
|
||||||
pub fn lookup_snapshot(&self, snapshot: &str) -> Option<u64> {
|
pub fn lookup_snapshot(&self, snapshot: &str) -> Option<u64> {
|
||||||
self.snapshot_index.get(snapshot).map(|n| *n)
|
self.snapshot_index.get(snapshot).copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a chunk
|
/// Test if the catalog already contain a chunk
|
||||||
@ -333,7 +333,7 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
/// Returns the chunk archive file number
|
/// Returns the chunk archive file number
|
||||||
pub fn lookup_chunk(&self, digest: &[u8;32]) -> Option<u64> {
|
pub fn lookup_chunk(&self, digest: &[u8;32]) -> Option<u64> {
|
||||||
self.chunk_index.get(digest).map(|n| *n)
|
self.chunk_index.get(digest).copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_register_label(&self, file_number: u64) -> Result<(), Error> {
|
fn check_register_label(&self, file_number: u64) -> Result<(), Error> {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user