tree-wide: fix needless borrows
found and fixed via clippy Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
parent
a0c69902c8
commit
9a37bd6c84
|
@ -556,10 +556,7 @@ impl Authid {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tokenname(&self) -> Option<&TokennameRef> {
|
pub fn tokenname(&self) -> Option<&TokennameRef> {
|
||||||
match &self.tokenname {
|
self.tokenname.as_deref()
|
||||||
Some(name) => Some(&name),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the "root@pam" auth id.
|
/// Get the "root@pam" auth id.
|
||||||
|
|
|
@ -37,7 +37,7 @@ impl BackupRepository {
|
||||||
return auth_id;
|
return auth_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
&Authid::root_auth_id()
|
Authid::root_auth_id()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn user(&self) -> &Userid {
|
pub fn user(&self) -> &Userid {
|
||||||
|
|
|
@ -321,7 +321,7 @@ impl BackupWriter {
|
||||||
self.h2.clone(),
|
self.h2.clone(),
|
||||||
wid,
|
wid,
|
||||||
stream,
|
stream,
|
||||||
&prefix,
|
prefix,
|
||||||
known_chunks.clone(),
|
known_chunks.clone(),
|
||||||
if options.encrypt {
|
if options.encrypt {
|
||||||
self.crypt_config.clone()
|
self.crypt_config.clone()
|
||||||
|
|
|
@ -529,7 +529,7 @@ impl Shell {
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_stack =
|
let new_stack =
|
||||||
Self::lookup(&stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
|
Self::lookup(stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
|
||||||
|
|
||||||
*stack = new_stack;
|
*stack = new_stack;
|
||||||
|
|
||||||
|
@ -993,7 +993,7 @@ impl Shell {
|
||||||
&mut self.catalog,
|
&mut self.catalog,
|
||||||
dir_stack,
|
dir_stack,
|
||||||
extractor,
|
extractor,
|
||||||
&match_list,
|
match_list,
|
||||||
&self.accessor,
|
&self.accessor,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -1118,7 +1118,7 @@ impl<'a> ExtractorState<'a> {
|
||||||
self.path_len_stack.push(self.path_len);
|
self.path_len_stack.push(self.path_len);
|
||||||
self.path_len = self.path.len();
|
self.path_len = self.path.len();
|
||||||
|
|
||||||
Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
|
Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?;
|
||||||
let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap();
|
let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap();
|
||||||
let dir_meta = dir_pxar.entry().metadata().clone();
|
let dir_meta = dir_pxar.entry().metadata().clone();
|
||||||
let create = self.matches && match_result != Some(MatchType::Exclude);
|
let create = self.matches && match_result != Some(MatchType::Exclude);
|
||||||
|
@ -1141,7 +1141,7 @@ impl<'a> ExtractorState<'a> {
|
||||||
}
|
}
|
||||||
(true, DirEntryAttribute::File { .. }) => {
|
(true, DirEntryAttribute::File { .. }) => {
|
||||||
self.dir_stack.push(PathStackEntry::new(entry));
|
self.dir_stack.push(PathStackEntry::new(entry));
|
||||||
let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
|
let file = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?;
|
||||||
self.extract_file(file).await?;
|
self.extract_file(file).await?;
|
||||||
self.dir_stack.pop();
|
self.dir_stack.pop();
|
||||||
}
|
}
|
||||||
|
@ -1153,7 +1153,7 @@ impl<'a> ExtractorState<'a> {
|
||||||
| (true, DirEntryAttribute::Hardlink) => {
|
| (true, DirEntryAttribute::Hardlink) => {
|
||||||
let attr = entry.attr.clone();
|
let attr = entry.attr.clone();
|
||||||
self.dir_stack.push(PathStackEntry::new(entry));
|
self.dir_stack.push(PathStackEntry::new(entry));
|
||||||
let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?;
|
let file = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?;
|
||||||
self.extract_special(file, attr).await?;
|
self.extract_special(file, attr).await?;
|
||||||
self.dir_stack.pop();
|
self.dir_stack.pop();
|
||||||
}
|
}
|
||||||
|
|
|
@ -547,7 +547,7 @@ impl Archiver {
|
||||||
None => return Ok(()),
|
None => return Ok(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?;
|
let metadata = get_metadata(fd.as_raw_fd(), stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?;
|
||||||
|
|
||||||
if self
|
if self
|
||||||
.patterns
|
.patterns
|
||||||
|
@ -629,14 +629,14 @@ impl Archiver {
|
||||||
catalog.lock().unwrap().add_block_device(c_file_name)?;
|
catalog.lock().unwrap().add_block_device(c_file_name)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.add_device(encoder, file_name, &metadata, &stat).await
|
self.add_device(encoder, file_name, &metadata, stat).await
|
||||||
}
|
}
|
||||||
mode::IFCHR => {
|
mode::IFCHR => {
|
||||||
if let Some(ref catalog) = self.catalog {
|
if let Some(ref catalog) = self.catalog {
|
||||||
catalog.lock().unwrap().add_char_device(c_file_name)?;
|
catalog.lock().unwrap().add_char_device(c_file_name)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.add_device(encoder, file_name, &metadata, &stat).await
|
self.add_device(encoder, file_name, &metadata, stat).await
|
||||||
}
|
}
|
||||||
other => bail!(
|
other => bail!(
|
||||||
"encountered unknown file type: 0x{:x} (0o{:o})",
|
"encountered unknown file type: 0x{:x} (0o{:o})",
|
||||||
|
@ -656,7 +656,7 @@ impl Archiver {
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let dir_name = OsStr::from_bytes(dir_name.to_bytes());
|
let dir_name = OsStr::from_bytes(dir_name.to_bytes());
|
||||||
|
|
||||||
let mut encoder = encoder.create_directory(dir_name, &metadata).await?;
|
let mut encoder = encoder.create_directory(dir_name, metadata).await?;
|
||||||
|
|
||||||
let old_fs_magic = self.fs_magic;
|
let old_fs_magic = self.fs_magic;
|
||||||
let old_fs_feature_flags = self.fs_feature_flags;
|
let old_fs_feature_flags = self.fs_feature_flags;
|
||||||
|
@ -820,17 +820,17 @@ fn get_xattr_fcaps_acl(
|
||||||
};
|
};
|
||||||
|
|
||||||
for attr in &xattrs {
|
for attr in &xattrs {
|
||||||
if xattr::is_security_capability(&attr) {
|
if xattr::is_security_capability(attr) {
|
||||||
get_fcaps(meta, fd, flags, fs_feature_flags)?;
|
get_fcaps(meta, fd, flags, fs_feature_flags)?;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if xattr::is_acl(&attr) {
|
if xattr::is_acl(attr) {
|
||||||
get_acl(meta, proc_path, flags, fs_feature_flags)?;
|
get_acl(meta, proc_path, flags, fs_feature_flags)?;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !xattr::is_valid_xattr_name(&attr) {
|
if !xattr::is_valid_xattr_name(attr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -649,7 +649,7 @@ impl SessionImpl {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
|
fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
|
||||||
to_entry_param(to_inode(&entry), &entry)
|
to_entry_param(to_inode(entry), entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
|
|
@ -50,7 +50,7 @@ impl RemoteChunkReader {
|
||||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
.download_chunk(&digest, &mut chunk_data)
|
.download_chunk(digest, &mut chunk_data)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
|
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
|
||||||
|
|
|
@ -110,7 +110,7 @@ pub async fn view_task_result(
|
||||||
display_task_log(client, upid, true).await?;
|
display_task_log(client, upid, true).await?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
format_and_print_result(&data, &output_format);
|
format_and_print_result(data, output_format);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -319,7 +319,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
||||||
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
complete_server_file_name(arg, param)
|
complete_server_file_name(arg, param)
|
||||||
.iter()
|
.iter()
|
||||||
.map(|v| pbs_tools::format::strip_server_file_extension(&v).to_owned())
|
.map(|v| pbs_tools::format::strip_server_file_extension(v).to_owned())
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ impl CachedUserInfo {
|
||||||
required_privs: u64,
|
required_privs: u64,
|
||||||
partial: bool,
|
partial: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let privs = self.lookup_privs(&auth_id, path);
|
let privs = self.lookup_privs(auth_id, path);
|
||||||
let allowed = if partial {
|
let allowed = if partial {
|
||||||
(privs & required_privs) != 0
|
(privs & required_privs) != 0
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -45,7 +45,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(DATASTORE_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(DATASTORE_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(DATASTORE_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(DATASTORE_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(DOMAINS_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(DOMAINS_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
/// Save the configuration file
|
/// Save the configuration file
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(DRIVE_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(DRIVE_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(DRIVE_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(DRIVE_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ impl KeyDerivationConfig {
|
||||||
// estimated scrypt memory usage is 128*r*n*p
|
// estimated scrypt memory usage is 128*r*n*p
|
||||||
openssl::pkcs5::scrypt(
|
openssl::pkcs5::scrypt(
|
||||||
passphrase,
|
passphrase,
|
||||||
&salt,
|
salt,
|
||||||
*n, *r, *p,
|
*n, *r, *p,
|
||||||
1025*1024*1024,
|
1025*1024*1024,
|
||||||
&mut key,
|
&mut key,
|
||||||
|
@ -52,7 +52,7 @@ impl KeyDerivationConfig {
|
||||||
|
|
||||||
openssl::pkcs5::pbkdf2_hmac(
|
openssl::pkcs5::pbkdf2_hmac(
|
||||||
passphrase,
|
passphrase,
|
||||||
&salt,
|
salt,
|
||||||
*iter,
|
*iter,
|
||||||
openssl::hash::MessageDigest::sha256(),
|
openssl::hash::MessageDigest::sha256(),
|
||||||
&mut key,
|
&mut key,
|
||||||
|
@ -235,10 +235,10 @@ impl KeyConfig {
|
||||||
openssl::symm::decrypt_aead(
|
openssl::symm::decrypt_aead(
|
||||||
cipher,
|
cipher,
|
||||||
&derived_key,
|
&derived_key,
|
||||||
Some(&iv),
|
Some(iv),
|
||||||
b"",
|
b"",
|
||||||
&enc_data,
|
enc_data,
|
||||||
&tag,
|
tag,
|
||||||
).map_err(|err| {
|
).map_err(|err| {
|
||||||
match self.hint {
|
match self.hint {
|
||||||
Some(ref hint) => {
|
Some(ref hint) => {
|
||||||
|
|
|
@ -59,7 +59,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
/// Save the configuration file
|
/// Save the configuration file
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(MEDIA_POOL_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(MEDIA_POOL_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(MEDIA_POOL_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(MEDIA_POOL_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), E
|
||||||
).unwrap();
|
).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(caps) = CIDR_V4_REGEX.captures(&cidr) {
|
if let Some(caps) = CIDR_V4_REGEX.captures(cidr) {
|
||||||
let address = &caps[1];
|
let address = &caps[1];
|
||||||
if let Some(mask) = caps.get(2) {
|
if let Some(mask) = caps.get(2) {
|
||||||
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
||||||
|
@ -104,7 +104,7 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), E
|
||||||
} else {
|
} else {
|
||||||
Ok((address.to_string(), None, false))
|
Ok((address.to_string(), None, false))
|
||||||
}
|
}
|
||||||
} else if let Some(caps) = CIDR_V6_REGEX.captures(&cidr) {
|
} else if let Some(caps) = CIDR_V6_REGEX.captures(cidr) {
|
||||||
let address = &caps[1];
|
let address = &caps[1];
|
||||||
if let Some(mask) = caps.get(2) {
|
if let Some(mask) = caps.get(2) {
|
||||||
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
let mask = u8::from_str_radix(mask.as_str(), 10)?;
|
||||||
|
|
|
@ -46,7 +46,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(REMOTE_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(REMOTE_CFG_FILENAME, config)?;
|
||||||
crate::replace_backup_config(REMOTE_CFG_FILENAME, raw.as_bytes())
|
crate::replace_backup_config(REMOTE_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(SYNC_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(SYNC_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(SYNC_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(SYNC_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(TAPE_JOB_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(TAPE_JOB_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(TAPE_JOB_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(TAPE_JOB_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||||
let data = read_file()?;
|
let data = read_file()?;
|
||||||
match data.get(tokenid) {
|
match data.get(tokenid) {
|
||||||
Some(hashed_secret) => {
|
Some(hashed_secret) => {
|
||||||
proxmox_sys::crypt::verify_crypt_pw(secret, &hashed_secret)
|
proxmox_sys::crypt::verify_crypt_pw(secret, hashed_secret)
|
||||||
},
|
},
|
||||||
None => bail!("invalid API token"),
|
None => bail!("invalid API token"),
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
|
|
||||||
/// Save the configuration file
|
/// Save the configuration file
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(TRAFFIC_CONTROL_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(TRAFFIC_CONTROL_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(TRAFFIC_CONTROL_CFG_FILENAME, raw.as_bytes())?;
|
replace_backup_config(TRAFFIC_CONTROL_CFG_FILENAME, raw.as_bytes())?;
|
||||||
|
|
||||||
// increase traffic control version
|
// increase traffic control version
|
||||||
|
@ -88,7 +88,7 @@ mod test {
|
||||||
timeframe mon..wed 8:00-16:30
|
timeframe mon..wed 8:00-16:30
|
||||||
timeframe fri 9:00-12:00
|
timeframe fri 9:00-12:00
|
||||||
";
|
";
|
||||||
let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, &content)?;
|
let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, content)?;
|
||||||
eprintln!("GOT {:?}", data);
|
eprintln!("GOT {:?}", data);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -117,7 +117,7 @@ pub fn cached_config() -> Result<Arc<SectionConfigData>, Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(USER_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(USER_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(USER_CFG_FILENAME, raw.as_bytes())?;
|
replace_backup_config(USER_CFG_FILENAME, raw.as_bytes())?;
|
||||||
|
|
||||||
// increase user version
|
// increase user version
|
||||||
|
|
|
@ -46,7 +46,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
let raw = CONFIG.write(VERIFICATION_CFG_FILENAME, &config)?;
|
let raw = CONFIG.write(VERIFICATION_CFG_FILENAME, config)?;
|
||||||
replace_backup_config(VERIFICATION_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(VERIFICATION_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -160,7 +160,7 @@ impl BackupGroup {
|
||||||
|
|
||||||
pub fn matches(&self, filter: &GroupFilter) -> bool {
|
pub fn matches(&self, filter: &GroupFilter) -> bool {
|
||||||
match filter {
|
match filter {
|
||||||
GroupFilter::Group(backup_group) => match BackupGroup::from_str(&backup_group) {
|
GroupFilter::Group(backup_group) => match BackupGroup::from_str(backup_group) {
|
||||||
Ok(group) => &group == self,
|
Ok(group) => &group == self,
|
||||||
Err(_) => false, // shouldn't happen if value is schema-checked
|
Err(_) => false, // shouldn't happen if value is schema-checked
|
||||||
},
|
},
|
||||||
|
|
|
@ -506,7 +506,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||||
if let Some(entry) = self.lookup(¤t, comp)? {
|
if let Some(entry) = self.lookup(¤t, comp)? {
|
||||||
current = entry;
|
current = entry;
|
||||||
} else {
|
} else {
|
||||||
bail!("path {:?} not found in catalog", String::from_utf8_lossy(&path));
|
bail!("path {:?} not found in catalog", String::from_utf8_lossy(path));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(current)
|
Ok(current)
|
||||||
|
@ -612,7 +612,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||||
file_path.extend(&e.name);
|
file_path.extend(&e.name);
|
||||||
match match_list.matches(&file_path, e.get_file_mode()) {
|
match match_list.matches(&file_path, e.get_file_mode()) {
|
||||||
Some(MatchType::Exclude) => continue,
|
Some(MatchType::Exclude) => continue,
|
||||||
Some(MatchType::Include) => callback(&file_path)?,
|
Some(MatchType::Include) => callback(file_path)?,
|
||||||
None => (),
|
None => (),
|
||||||
}
|
}
|
||||||
if is_dir {
|
if is_dir {
|
||||||
|
|
|
@ -123,7 +123,7 @@ impl DataBlob {
|
||||||
raw_data.write_le_value(dummy_head)?;
|
raw_data.write_le_value(dummy_head)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (iv, tag) = Self::encrypt_to(&config, data, &mut raw_data)?;
|
let (iv, tag) = Self::encrypt_to(config, data, &mut raw_data)?;
|
||||||
|
|
||||||
let head = EncryptedDataBlobHeader {
|
let head = EncryptedDataBlobHeader {
|
||||||
head: DataBlobHeader { magic, crc: [0; 4] }, iv, tag,
|
head: DataBlobHeader { magic, crc: [0; 4] }, iv, tag,
|
||||||
|
@ -491,7 +491,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||||
|
|
||||||
fn compute_digest(&mut self) {
|
fn compute_digest(&mut self) {
|
||||||
if !self.digest_computed {
|
if !self.digest_computed {
|
||||||
if let Some(ref config) = self.config {
|
if let Some(config) = self.config {
|
||||||
self.digest = config.compute_digest(self.orig_data);
|
self.digest = config.compute_digest(self.orig_data);
|
||||||
} else {
|
} else {
|
||||||
self.digest = openssl::sha::sha256(self.orig_data);
|
self.digest = openssl::sha::sha256(self.orig_data);
|
||||||
|
@ -531,7 +531,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||||
) -> Result<(DataBlob, [u8; 32]), Error> {
|
) -> Result<(DataBlob, [u8; 32]), Error> {
|
||||||
let zero_bytes = vec![0; chunk_size];
|
let zero_bytes = vec![0; chunk_size];
|
||||||
let mut chunk_builder = DataChunkBuilder::new(&zero_bytes).compress(compress);
|
let mut chunk_builder = DataChunkBuilder::new(&zero_bytes).compress(compress);
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(crypt_config) = crypt_config {
|
||||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -839,7 +839,7 @@ impl DataStore {
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _guard = self.lock_manifest(backup_dir)?;
|
let _guard = self.lock_manifest(backup_dir)?;
|
||||||
let (mut manifest, _) = self.load_manifest(&backup_dir)?;
|
let (mut manifest, _) = self.load_manifest(backup_dir)?;
|
||||||
|
|
||||||
update_fn(&mut manifest);
|
update_fn(&mut manifest);
|
||||||
|
|
||||||
|
@ -919,7 +919,7 @@ impl DataStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sorting by inode improves data locality, which makes it lots faster on spinners
|
// sorting by inode improves data locality, which makes it lots faster on spinners
|
||||||
chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(&ino_b));
|
chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(ino_b));
|
||||||
|
|
||||||
Ok(chunk_list)
|
Ok(chunk_list)
|
||||||
}
|
}
|
||||||
|
|
|
@ -214,7 +214,7 @@ impl BackupManifest {
|
||||||
let json: Value = serde_json::from_slice(data)?;
|
let json: Value = serde_json::from_slice(data)?;
|
||||||
let signature = json["signature"].as_str().map(String::from);
|
let signature = json["signature"].as_str().map(String::from);
|
||||||
|
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(crypt_config) = crypt_config {
|
||||||
if let Some(signature) = signature {
|
if let Some(signature) = signature {
|
||||||
let expected_signature = hex::encode(&Self::json_signature(&json, crypt_config)?);
|
let expected_signature = hex::encode(&Self::json_signature(&json, crypt_config)?);
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ pub fn generate_paper_key<W: Write>(
|
||||||
|
|
||||||
(lines, true)
|
(lines, true)
|
||||||
} else {
|
} else {
|
||||||
match serde_json::from_str::<KeyConfig>(&data) {
|
match serde_json::from_str::<KeyConfig>(data) {
|
||||||
Ok(key_config) => {
|
Ok(key_config) => {
|
||||||
let lines = serde_json::to_string_pretty(&key_config)?
|
let lines = serde_json::to_string_pretty(&key_config)?
|
||||||
.lines()
|
.lines()
|
||||||
|
@ -216,7 +216,7 @@ fn paperkey_text<W: Write>(
|
||||||
}
|
}
|
||||||
writeln!(output, "-----END PROXMOX BACKUP KEY-----")?;
|
writeln!(output, "-----END PROXMOX BACKUP KEY-----")?;
|
||||||
|
|
||||||
let qr_code = generate_qr_code("utf8i", &lines)?;
|
let qr_code = generate_qr_code("utf8i", lines)?;
|
||||||
let qr_code = String::from_utf8(qr_code)
|
let qr_code = String::from_utf8(qr_code)
|
||||||
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||||
for info in list {
|
for info in list {
|
||||||
let backup_id = info.backup_dir.relative_path();
|
let backup_id = info.backup_dir.relative_path();
|
||||||
if let Some(PruneMark::Keep) = mark.get(&backup_id) {
|
if let Some(PruneMark::Keep) = mark.get(&backup_id) {
|
||||||
let sel_id: String = select_id(&info)?;
|
let sel_id: String = select_id(info)?;
|
||||||
already_included.insert(sel_id);
|
already_included.insert(sel_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||||
mark.insert(backup_id, PruneMark::Protected);
|
mark.insert(backup_id, PruneMark::Protected);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let sel_id: String = select_id(&info)?;
|
let sel_id: String = select_id(info)?;
|
||||||
|
|
||||||
if already_included.contains(&sel_id) { continue; }
|
if already_included.contains(&sel_id) { continue; }
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ impl SnapshotReader {
|
||||||
|
|
||||||
/// Returns an iterator for all used chunks.
|
/// Returns an iterator for all used chunks.
|
||||||
pub fn chunk_iterator(&self) -> Result<SnapshotChunkIterator, Error> {
|
pub fn chunk_iterator(&self) -> Result<SnapshotChunkIterator, Error> {
|
||||||
SnapshotChunkIterator::new(&self)
|
SnapshotChunkIterator::new(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,14 +62,14 @@ fn get_tape_handle(param: &Value) -> Result<SgTape, Error> {
|
||||||
|
|
||||||
if let Some(name) = param["drive"].as_str() {
|
if let Some(name) = param["drive"].as_str() {
|
||||||
let (config, _digest) = pbs_config::drive::config()?;
|
let (config, _digest) = pbs_config::drive::config()?;
|
||||||
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
return SgTape::new(open_lto_tape_device(&drive.path)?);
|
return SgTape::new(open_lto_tape_device(&drive.path)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(device) = param["device"].as_str() {
|
if let Some(device) = param["device"].as_str() {
|
||||||
eprintln!("using device {}", device);
|
eprintln!("using device {}", device);
|
||||||
return SgTape::new(open_lto_tape_device(&device)?);
|
return SgTape::new(open_lto_tape_device(device)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
|
||||||
|
@ -94,7 +94,7 @@ fn get_tape_handle(param: &Value) -> Result<SgTape, Error> {
|
||||||
|
|
||||||
if drive_names.len() == 1 {
|
if drive_names.len() == 1 {
|
||||||
let name = drive_names[0];
|
let name = drive_names[0];
|
||||||
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
return SgTape::new(open_lto_tape_device(&drive.path)?);
|
return SgTape::new(open_lto_tape_device(&drive.path)?);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ fn get_changer_handle(param: &Value) -> Result<File, Error> {
|
||||||
|
|
||||||
if let Some(name) = param["changer"].as_str() {
|
if let Some(name) = param["changer"].as_str() {
|
||||||
let (config, _digest) = pbs_config::drive::config()?;
|
let (config, _digest) = pbs_config::drive::config()?;
|
||||||
let changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
let changer_config: ScsiTapeChanger = config.lookup("changer", name)?;
|
||||||
eprintln!("using device {}", changer_config.path);
|
eprintln!("using device {}", changer_config.path);
|
||||||
return sg_pt_changer::open(&changer_config.path);
|
return sg_pt_changer::open(&changer_config.path);
|
||||||
}
|
}
|
||||||
|
|
|
@ -312,7 +312,7 @@ impl MtxStatus {
|
||||||
let mut export_slots: HashSet<u64> = HashSet::new();
|
let mut export_slots: HashSet<u64> = HashSet::new();
|
||||||
|
|
||||||
if let Some(slots) = &config.export_slots {
|
if let Some(slots) = &config.export_slots {
|
||||||
let slots: Value = SLOT_ARRAY_SCHEMA.parse_property_string(&slots)?;
|
let slots: Value = SLOT_ARRAY_SCHEMA.parse_property_string(slots)?;
|
||||||
export_slots = slots
|
export_slots = slots
|
||||||
.as_array()
|
.as_array()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
|
|
@ -86,7 +86,7 @@ fn execute_scsi_command<F: AsRawFd>(
|
||||||
let mut timeout = std::time::Duration::new(5, 0); // short timeout by default
|
let mut timeout = std::time::Duration::new(5, 0); // short timeout by default
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
match sg_raw.do_command(&cmd) {
|
match sg_raw.do_command(cmd) {
|
||||||
Ok(data) => return Ok(data.to_vec()),
|
Ok(data) => return Ok(data.to_vec()),
|
||||||
Err(err) if !retry => bail!("{} failed: {}", error_prefix, err),
|
Err(err) if !retry => bail!("{} failed: {}", error_prefix, err),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
@ -487,7 +487,7 @@ pub fn status(config: &ScsiTapeChanger) -> Result<MtxStatus, Error> {
|
||||||
let mut status = read_element_status(&mut file)
|
let mut status = read_element_status(&mut file)
|
||||||
.map_err(|err| format_err!("error reading element status: {}", err))?;
|
.map_err(|err| format_err!("error reading element status: {}", err))?;
|
||||||
|
|
||||||
status.mark_import_export_slots(&config)?;
|
status.mark_import_export_slots(config)?;
|
||||||
|
|
||||||
Ok(status)
|
Ok(status)
|
||||||
}
|
}
|
||||||
|
@ -827,7 +827,7 @@ mod test {
|
||||||
element_type: u8,
|
element_type: u8,
|
||||||
) -> Vec<u8> {
|
) -> Vec<u8> {
|
||||||
let descs: Vec<Vec<u8>> = descriptors.iter().map(|desc| {
|
let descs: Vec<Vec<u8>> = descriptors.iter().map(|desc| {
|
||||||
build_storage_descriptor(&desc, trailing)
|
build_storage_descriptor(desc, trailing)
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
let (desc_len, address) = if let Some(el) = descs.get(0) {
|
let (desc_len, address) = if let Some(el) = descs.get(0) {
|
||||||
|
|
|
@ -46,7 +46,7 @@ impl CertInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_pem(cert_pem: &[u8]) -> Result<Self, Error> {
|
pub fn from_pem(cert_pem: &[u8]) -> Result<Self, Error> {
|
||||||
let x509 = openssl::x509::X509::from_pem(&cert_pem)?;
|
let x509 = openssl::x509::X509::from_pem(cert_pem)?;
|
||||||
Ok(Self{
|
Ok(Self{
|
||||||
x509
|
x509
|
||||||
})
|
})
|
||||||
|
@ -87,11 +87,11 @@ impl CertInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn not_before_unix(&self) -> Result<i64, Error> {
|
pub fn not_before_unix(&self) -> Result<i64, Error> {
|
||||||
asn1_time_to_unix(&self.not_before())
|
asn1_time_to_unix(self.not_before())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn not_after_unix(&self) -> Result<i64, Error> {
|
pub fn not_after_unix(&self) -> Result<i64, Error> {
|
||||||
asn1_time_to_unix(&self.not_after())
|
asn1_time_to_unix(self.not_after())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if the certificate is expired at or after a specific unix epoch.
|
/// Check if the certificate is expired at or after a specific unix epoch.
|
||||||
|
|
|
@ -64,7 +64,7 @@ pub fn json_object_to_query(data: Value) -> Result<String, Error> {
|
||||||
query.append_pair(key, &n.to_string());
|
query.append_pair(key, &n.to_string());
|
||||||
}
|
}
|
||||||
Value::String(s) => {
|
Value::String(s) => {
|
||||||
query.append_pair(key, &s);
|
query.append_pair(key, s);
|
||||||
}
|
}
|
||||||
Value::Array(arr) => {
|
Value::Array(arr) => {
|
||||||
for element in arr {
|
for element in arr {
|
||||||
|
@ -76,7 +76,7 @@ pub fn json_object_to_query(data: Value) -> Result<String, Error> {
|
||||||
query.append_pair(key, &n.to_string());
|
query.append_pair(key, &n.to_string());
|
||||||
}
|
}
|
||||||
Value::String(s) => {
|
Value::String(s) => {
|
||||||
query.append_pair(key, &s);
|
query.append_pair(key, s);
|
||||||
}
|
}
|
||||||
_ => bail!(
|
_ => bail!(
|
||||||
"json_object_to_query: unable to handle complex array data types."
|
"json_object_to_query: unable to handle complex array data types."
|
||||||
|
@ -121,14 +121,14 @@ pub fn required_integer_property(param: &Value, name: &str) -> Result<i64, Error
|
||||||
|
|
||||||
pub fn required_array_param<'a>(param: &'a Value, name: &str) -> Result<&'a [Value], Error> {
|
pub fn required_array_param<'a>(param: &'a Value, name: &str) -> Result<&'a [Value], Error> {
|
||||||
match param[name].as_array() {
|
match param[name].as_array() {
|
||||||
Some(s) => Ok(&s),
|
Some(s) => Ok(s),
|
||||||
None => bail!("missing parameter '{}'", name),
|
None => bail!("missing parameter '{}'", name),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<&'a [Value], Error> {
|
pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<&'a [Value], Error> {
|
||||||
match param[name].as_array() {
|
match param[name].as_array() {
|
||||||
Some(s) => Ok(&s),
|
Some(s) => Ok(s),
|
||||||
None => bail!("missing property '{}'", name),
|
None => bail!("missing property '{}'", name),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,8 +97,8 @@ where
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"{}:{}:{:08X}",
|
"{}:{}:{:08X}",
|
||||||
percent_encode(self.prefix.as_bytes(), &TICKET_ASCIISET),
|
percent_encode(self.prefix.as_bytes(), TICKET_ASCIISET),
|
||||||
percent_encode(self.data.as_bytes(), &TICKET_ASCIISET),
|
percent_encode(self.data.as_bytes(), TICKET_ASCIISET),
|
||||||
self.time,
|
self.time,
|
||||||
)
|
)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
|
@ -107,7 +107,7 @@ where
|
||||||
/// Write additional authentication data to the verifier.
|
/// Write additional authentication data to the verifier.
|
||||||
fn write_aad(f: &mut dyn io::Write, aad: Option<&str>) -> Result<(), Error> {
|
fn write_aad(f: &mut dyn io::Write, aad: Option<&str>) -> Result<(), Error> {
|
||||||
if let Some(aad) = aad {
|
if let Some(aad) = aad {
|
||||||
write!(f, ":{}", percent_encode(aad.as_bytes(), &TICKET_ASCIISET))?;
|
write!(f, ":{}", percent_encode(aad.as_bytes(), TICKET_ASCIISET))?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ where
|
||||||
/// Sign the ticket.
|
/// Sign the ticket.
|
||||||
pub fn sign(&mut self, keypair: &PKey<Private>, aad: Option<&str>) -> Result<String, Error> {
|
pub fn sign(&mut self, keypair: &PKey<Private>, aad: Option<&str>) -> Result<String, Error> {
|
||||||
let mut output = Vec::<u8>::new();
|
let mut output = Vec::<u8>::new();
|
||||||
let mut signer = Signer::new(MessageDigest::sha256(), &keypair)
|
let mut signer = Signer::new(MessageDigest::sha256(), keypair)
|
||||||
.map_err(|err| format_err!("openssl error creating signer for ticket: {}", err))?;
|
.map_err(|err| format_err!("openssl error creating signer for ticket: {}", err))?;
|
||||||
|
|
||||||
self.write_data(&mut output)
|
self.write_data(&mut output)
|
||||||
|
@ -179,14 +179,14 @@ where
|
||||||
bail!("invalid ticket - expired");
|
bail!("invalid ticket - expired");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut verifier = Verifier::new(MessageDigest::sha256(), &keypair)?;
|
let mut verifier = Verifier::new(MessageDigest::sha256(), keypair)?;
|
||||||
|
|
||||||
self.write_data(&mut verifier)
|
self.write_data(&mut verifier)
|
||||||
.and_then(|()| Self::write_aad(&mut verifier, aad))
|
.and_then(|()| Self::write_aad(&mut verifier, aad))
|
||||||
.map_err(|err| format_err!("error verifying ticket: {}", err))?;
|
.map_err(|err| format_err!("error verifying ticket: {}", err))?;
|
||||||
|
|
||||||
let is_valid: bool = verifier
|
let is_valid: bool = verifier
|
||||||
.verify(&signature)
|
.verify(signature)
|
||||||
.map_err(|err| format_err!("openssl error verifying ticket: {}", err))?;
|
.map_err(|err| format_err!("openssl error verifying ticket: {}", err))?;
|
||||||
|
|
||||||
if !is_valid {
|
if !is_valid {
|
||||||
|
|
|
@ -90,8 +90,8 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
&snapshot.group().backup_type(),
|
snapshot.group().backup_type(),
|
||||||
&snapshot.group().backup_id(),
|
snapshot.group().backup_id(),
|
||||||
snapshot.backup_time(),
|
snapshot.backup_time(),
|
||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
@ -103,7 +103,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
|
||||||
|
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
let mut catalogfile = std::fs::OpenOptions::new()
|
let mut catalogfile = std::fs::OpenOptions::new()
|
||||||
|
|
|
@ -654,7 +654,7 @@ async fn create_backup(
|
||||||
|
|
||||||
let crypto = crypto_parameters(¶m)?;
|
let crypto = crypto_parameters(¶m)?;
|
||||||
|
|
||||||
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox_sys::nodename());
|
let backup_id = param["backup-id"].as_str().unwrap_or(proxmox_sys::nodename());
|
||||||
|
|
||||||
let backup_type = param["backup-type"].as_str().unwrap_or("host");
|
let backup_type = param["backup-type"].as_str().unwrap_or("host");
|
||||||
|
|
||||||
|
@ -794,7 +794,7 @@ async fn create_backup(
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
backup_type,
|
backup_type,
|
||||||
&backup_id,
|
backup_id,
|
||||||
backup_time,
|
backup_time,
|
||||||
verbose,
|
verbose,
|
||||||
false
|
false
|
||||||
|
@ -1003,7 +1003,7 @@ async fn dump_image<W: Write>(
|
||||||
|
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
let digest = index.index_digest(pos).unwrap();
|
let digest = index.index_digest(pos).unwrap();
|
||||||
let raw_data = chunk_reader.read_chunk(&digest).await?;
|
let raw_data = chunk_reader.read_chunk(digest).await?;
|
||||||
writer.write_all(&raw_data)?;
|
writer.write_all(&raw_data)?;
|
||||||
bytes += raw_data.len();
|
bytes += raw_data.len();
|
||||||
if verbose {
|
if verbose {
|
||||||
|
|
|
@ -259,7 +259,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||||
|
|
||||||
let session = pbs_client::pxar::fuse::Session::mount(
|
let session = pbs_client::pxar::fuse::Session::mount(
|
||||||
decoder,
|
decoder,
|
||||||
&options,
|
options,
|
||||||
false,
|
false,
|
||||||
Path::new(target.unwrap()),
|
Path::new(target.unwrap()),
|
||||||
)
|
)
|
||||||
|
|
|
@ -325,7 +325,7 @@ impl BlockRestoreDriver for QemuBlockDriver {
|
||||||
match VMStateMap::load_read_only() {
|
match VMStateMap::load_read_only() {
|
||||||
Ok(state) => state
|
Ok(state) => state
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(&name).ok())
|
.filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(name).ok())
|
||||||
.collect(),
|
.collect(),
|
||||||
Err(_) => Vec::new(),
|
Err(_) => Vec::new(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -175,8 +175,8 @@ async fn list(
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
&snapshot.group().backup_type(),
|
snapshot.group().backup_type(),
|
||||||
&snapshot.group().backup_id(),
|
snapshot.group().backup_id(),
|
||||||
snapshot.backup_time(),
|
snapshot.backup_time(),
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
|
@ -209,7 +209,7 @@ async fn list(
|
||||||
.download_dynamic_index(&manifest, CATALOG_NAME)
|
.download_dynamic_index(&manifest, CATALOG_NAME)
|
||||||
.await?;
|
.await?;
|
||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
|
||||||
let chunk_reader = RemoteChunkReader::new(
|
let chunk_reader = RemoteChunkReader::new(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
crypt_config,
|
crypt_config,
|
||||||
|
@ -348,8 +348,8 @@ async fn extract(
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
&snapshot.group().backup_type(),
|
snapshot.group().backup_type(),
|
||||||
&snapshot.group().backup_id(),
|
snapshot.group().backup_id(),
|
||||||
snapshot.backup_time(),
|
snapshot.backup_time(),
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
|
|
|
@ -241,7 +241,7 @@ fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
|
||||||
static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
|
static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
|
||||||
}
|
}
|
||||||
let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
|
let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
|
||||||
let capture = RE.captures(&forwarded)?;
|
let capture = RE.captures(forwarded)?;
|
||||||
let rhost = capture.get(1)?.as_str();
|
let rhost = capture.get(1)?.as_str();
|
||||||
|
|
||||||
rhost.parse().ok()
|
rhost.parse().ok()
|
||||||
|
|
|
@ -151,7 +151,7 @@ impl WorkerTaskSetup {
|
||||||
|
|
||||||
finish_list.sort_unstable_by(|a, b| {
|
finish_list.sort_unstable_by(|a, b| {
|
||||||
match (&a.state, &b.state) {
|
match (&a.state, &b.state) {
|
||||||
(Some(s1), Some(s2)) => s1.cmp(&s2),
|
(Some(s1), Some(s2)) => s1.cmp(s2),
|
||||||
(Some(_), None) => std::cmp::Ordering::Less,
|
(Some(_), None) => std::cmp::Ordering::Less,
|
||||||
(None, Some(_)) => std::cmp::Ordering::Greater,
|
(None, Some(_)) => std::cmp::Ordering::Greater,
|
||||||
_ => a.upid.starttime.cmp(&b.upid.starttime),
|
_ => a.upid.starttime.cmp(&b.upid.starttime),
|
||||||
|
@ -170,7 +170,7 @@ impl WorkerTaskSetup {
|
||||||
false,
|
false,
|
||||||
)?;
|
)?;
|
||||||
for info in &finish_list {
|
for info in &finish_list {
|
||||||
writer.write_all(render_task_line(&info).as_bytes())?;
|
writer.write_all(render_task_line(info).as_bytes())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -580,7 +580,7 @@ fn render_task_line(info: &TaskListInfo) -> String {
|
||||||
fn render_task_list(list: &[TaskListInfo]) -> String {
|
fn render_task_list(list: &[TaskListInfo]) -> String {
|
||||||
let mut raw = String::new();
|
let mut raw = String::new();
|
||||||
for info in list {
|
for info in list {
|
||||||
raw.push_str(&render_task_line(&info));
|
raw.push_str(&render_task_line(info));
|
||||||
}
|
}
|
||||||
raw
|
raw
|
||||||
}
|
}
|
||||||
|
@ -980,7 +980,7 @@ pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
||||||
|
|
||||||
/// Request abort of a local worker (if existing and running)
|
/// Request abort of a local worker (if existing and running)
|
||||||
pub fn abort_local_worker(upid: UPID) {
|
pub fn abort_local_worker(upid: UPID) {
|
||||||
if let Some(ref worker) = WORKER_TASK_LIST.lock().unwrap().get(&upid.task_id) {
|
if let Some(worker) = WORKER_TASK_LIST.lock().unwrap().get(&upid.task_id) {
|
||||||
worker.request_abort();
|
worker.request_abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,7 +146,7 @@ fn list(
|
||||||
let param_path_buf = Path::new(path_str);
|
let param_path_buf = Path::new(path_str);
|
||||||
|
|
||||||
let mut disk_state = crate::DISK_STATE.lock().unwrap();
|
let mut disk_state = crate::DISK_STATE.lock().unwrap();
|
||||||
let query_result = disk_state.resolve(¶m_path_buf)?;
|
let query_result = disk_state.resolve(param_path_buf)?;
|
||||||
|
|
||||||
match query_result {
|
match query_result {
|
||||||
ResolveResult::Path(vm_path) => {
|
ResolveResult::Path(vm_path) => {
|
||||||
|
@ -275,7 +275,7 @@ fn extract(
|
||||||
|
|
||||||
let query_result = {
|
let query_result = {
|
||||||
let mut disk_state = crate::DISK_STATE.lock().unwrap();
|
let mut disk_state = crate::DISK_STATE.lock().unwrap();
|
||||||
disk_state.resolve(&path)?
|
disk_state.resolve(path)?
|
||||||
};
|
};
|
||||||
|
|
||||||
let vm_path = match query_result {
|
let vm_path = match query_result {
|
||||||
|
|
|
@ -398,7 +398,7 @@ impl DiskState {
|
||||||
|
|
||||||
// attempt to mount device directly
|
// attempt to mount device directly
|
||||||
let dev_node = format!("/dev/{}", name);
|
let dev_node = format!("/dev/{}", name);
|
||||||
let size = Self::make_dev_node(&dev_node, &sys_path)?;
|
let size = Self::make_dev_node(&dev_node, sys_path)?;
|
||||||
let mut dfs_bucket = Bucket::RawFs(PartitionBucketData {
|
let mut dfs_bucket = Bucket::RawFs(PartitionBucketData {
|
||||||
dev_node: dev_node.clone(),
|
dev_node: dev_node.clone(),
|
||||||
number: 0,
|
number: 0,
|
||||||
|
@ -755,7 +755,7 @@ impl DiskState {
|
||||||
fn make_dev_node(devnode: &str, sys_path: &str) -> Result<u64, Error> {
|
fn make_dev_node(devnode: &str, sys_path: &str) -> Result<u64, Error> {
|
||||||
let dev_num_str = fs::file_read_firstline(&format!("{}/dev", sys_path))?;
|
let dev_num_str = fs::file_read_firstline(&format!("{}/dev", sys_path))?;
|
||||||
let (major, minor) = dev_num_str.split_at(dev_num_str.find(':').unwrap());
|
let (major, minor) = dev_num_str.split_at(dev_num_str.find(':').unwrap());
|
||||||
Self::mknod_blk(&devnode, major.parse()?, minor[1..].trim_end().parse()?)?;
|
Self::mknod_blk(devnode, major.parse()?, minor[1..].trim_end().parse()?)?;
|
||||||
|
|
||||||
// this *always* contains the number of 512-byte sectors, regardless of the true
|
// this *always* contains the number of 512-byte sectors, regardless of the true
|
||||||
// blocksize of this disk - which should always be 512 here anyway
|
// blocksize of this disk - which should always be 512 here anyway
|
||||||
|
|
|
@ -393,10 +393,10 @@ fn commit_journal_impl(
|
||||||
// save all RRDs - we only need a read lock here
|
// save all RRDs - we only need a read lock here
|
||||||
// Note: no fsync here (we do it afterwards)
|
// Note: no fsync here (we do it afterwards)
|
||||||
for rel_path in files.iter() {
|
for rel_path in files.iter() {
|
||||||
let parent_dir = rrd_parent_dir(&config.basedir, &rel_path);
|
let parent_dir = rrd_parent_dir(&config.basedir, rel_path);
|
||||||
dir_set.insert(parent_dir);
|
dir_set.insert(parent_dir);
|
||||||
rrd_file_count += 1;
|
rrd_file_count += 1;
|
||||||
if let Err(err) = rrd_map.read().unwrap().flush_rrd_file(&rel_path) {
|
if let Err(err) = rrd_map.read().unwrap().flush_rrd_file(rel_path) {
|
||||||
errors += 1;
|
errors += 1;
|
||||||
log::error!("unable to save rrd {}: {}", rel_path, err);
|
log::error!("unable to save rrd {}: {}", rel_path, err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -317,7 +317,7 @@ impl RRD {
|
||||||
}
|
}
|
||||||
|
|
||||||
let rrd = if raw[0..8] == rrd_v1::PROXMOX_RRD_MAGIC_1_0 {
|
let rrd = if raw[0..8] == rrd_v1::PROXMOX_RRD_MAGIC_1_0 {
|
||||||
let v1 = rrd_v1::RRDv1::from_raw(&raw)?;
|
let v1 = rrd_v1::RRDv1::from_raw(raw)?;
|
||||||
v1.to_rrd_v2()
|
v1.to_rrd_v2()
|
||||||
.map_err(|err| format_err!("unable to convert from old V1 format - {}", err))?
|
.map_err(|err| format_err!("unable to convert from old V1 format - {}", err))?
|
||||||
} else if raw[0..8] == PROXMOX_RRD_MAGIC_2_0 {
|
} else if raw[0..8] == PROXMOX_RRD_MAGIC_2_0 {
|
||||||
|
|
|
@ -195,7 +195,7 @@ fn extract_archive(
|
||||||
let mut reader = stdin.lock();
|
let mut reader = stdin.lock();
|
||||||
extract_archive_from_reader(
|
extract_archive_from_reader(
|
||||||
&mut reader,
|
&mut reader,
|
||||||
&target,
|
target,
|
||||||
feature_flags,
|
feature_flags,
|
||||||
verbose,
|
verbose,
|
||||||
options,
|
options,
|
||||||
|
@ -208,7 +208,7 @@ fn extract_archive(
|
||||||
let mut reader = std::io::BufReader::new(file);
|
let mut reader = std::io::BufReader::new(file);
|
||||||
extract_archive_from_reader(
|
extract_archive_from_reader(
|
||||||
&mut reader,
|
&mut reader,
|
||||||
&target,
|
target,
|
||||||
feature_flags,
|
feature_flags,
|
||||||
verbose,
|
verbose,
|
||||||
options,
|
options,
|
||||||
|
@ -409,7 +409,7 @@ async fn mount_archive(
|
||||||
let mountpoint = Path::new(&mountpoint);
|
let mountpoint = Path::new(&mountpoint);
|
||||||
let options = OsStr::new("ro,default_permissions");
|
let options = OsStr::new("ro,default_permissions");
|
||||||
|
|
||||||
let session = fuse::Session::mount_path(&archive, &options, verbose, mountpoint)
|
let session = fuse::Session::mount_path(archive, options, verbose, mountpoint)
|
||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||||
|
|
||||||
|
|
|
@ -233,7 +233,7 @@ impl AcmeClient {
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let request = account.post_request(&account.location, &nonce, data)?;
|
let request = account.post_request(&account.location, nonce, data)?;
|
||||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||||
Ok(response) => break response,
|
Ok(response) => break response,
|
||||||
Err(err) if err.is_bad_nonce() => continue,
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
@ -402,7 +402,7 @@ impl AcmeClient {
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let request = revocation.request(&directory, nonce)?;
|
let request = revocation.request(directory, nonce)?;
|
||||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||||
Ok(_response) => return Ok(()),
|
Ok(_response) => return Ok(()),
|
||||||
Err(err) if err.is_bad_nonce() => continue,
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
|
|
@ -270,7 +270,7 @@ impl AcmePlugin for StandaloneServer {
|
||||||
let token = challenge
|
let token = challenge
|
||||||
.token()
|
.token()
|
||||||
.ok_or_else(|| format_err!("missing token in challenge"))?;
|
.ok_or_else(|| format_err!("missing token in challenge"))?;
|
||||||
let key_auth = Arc::new(client.key_authorization(&token)?);
|
let key_auth = Arc::new(client.key_authorization(token)?);
|
||||||
let path = Arc::new(format!("/.well-known/acme-challenge/{}", token));
|
let path = Arc::new(format!("/.well-known/acme-challenge/{}", token));
|
||||||
|
|
||||||
let service = make_service_fn(move |_| {
|
let service = make_service_fn(move |_| {
|
||||||
|
|
|
@ -121,7 +121,7 @@ pub fn read_acl(
|
||||||
let mut list: Vec<AclListItem> = Vec::new();
|
let mut list: Vec<AclListItem> = Vec::new();
|
||||||
if let Some(path) = &path {
|
if let Some(path) = &path {
|
||||||
if let Some(node) = &tree.find_node(path) {
|
if let Some(node) = &tree.find_node(path) {
|
||||||
extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter);
|
extract_acl_node_data(node, path, &mut list, exact, &auth_id_filter);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
|
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
|
||||||
|
|
|
@ -118,7 +118,7 @@ fn authenticate_2nd(
|
||||||
challenge_ticket: &str,
|
challenge_ticket: &str,
|
||||||
response: &str,
|
response: &str,
|
||||||
) -> Result<AuthResult, Error> {
|
) -> Result<AuthResult, Error> {
|
||||||
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(&challenge_ticket)?
|
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(challenge_ticket)?
|
||||||
.verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)?
|
.verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)?
|
||||||
.require_partial()?;
|
.require_partial()?;
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ fn check_priv_or_backup_owner(
|
||||||
required_privs: u64,
|
required_privs: u64,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
|
let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
|
||||||
|
|
||||||
if privs & required_privs == 0 {
|
if privs & required_privs == 0 {
|
||||||
let owner = store.get_owner(group)?;
|
let owner = store.get_owner(group)?;
|
||||||
|
@ -125,7 +125,7 @@ fn get_all_snapshot_files(
|
||||||
info: &BackupInfo,
|
info: &BackupInfo,
|
||||||
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||||
|
|
||||||
let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
|
let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
|
||||||
|
|
||||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||||
acc.insert(item.filename.clone());
|
acc.insert(item.filename.clone());
|
||||||
|
@ -536,7 +536,7 @@ pub fn list_snapshots (
|
||||||
snapshots.extend(
|
snapshots.extend(
|
||||||
group_backups
|
group_backups
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
|
.map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(snapshots)
|
Ok(snapshots)
|
||||||
|
@ -549,7 +549,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
|
||||||
|
|
||||||
groups.iter()
|
groups.iter()
|
||||||
.filter(|group| {
|
.filter(|group| {
|
||||||
let owner = match store.get_owner(&group) {
|
let owner = match store.get_owner(group) {
|
||||||
Ok(owner) => owner,
|
Ok(owner) => owner,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("Failed to get owner of group '{}/{}' - {}",
|
eprintln!("Failed to get owner of group '{}/{}' - {}",
|
||||||
|
@ -1071,7 +1071,7 @@ pub fn get_datastore_list(
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (store, (_, data)) in &config.sections {
|
for (store, (_, data)) in &config.sections {
|
||||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
if allowed {
|
if allowed {
|
||||||
list.push(
|
list.push(
|
||||||
|
@ -1401,7 +1401,7 @@ pub fn catalog(
|
||||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(&file_name, &csum, size)?;
|
manifest.verify_file(file_name, &csum, size)?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
@ -1446,7 +1446,7 @@ pub fn pxar_file_download(
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let store = required_string_param(¶m, "store")?;
|
let store = required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
@ -1483,7 +1483,7 @@ pub fn pxar_file_download(
|
||||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(&pxar_name, &csum, size)?;
|
manifest.verify_file(pxar_name, &csum, size)?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
|
@ -73,7 +73,7 @@ pub fn list_sync_jobs(
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.filter(|job: &SyncJobConfig| {
|
.filter(|job: &SyncJobConfig| {
|
||||||
check_sync_job_read_access(&user_info, &auth_id, &job)
|
check_sync_job_read_access(&user_info, &auth_id, job)
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
|
@ -95,7 +95,7 @@ pub fn update_webauthn_config(
|
||||||
let digest = <[u8; 32]>::from_hex(digest)?;
|
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||||
crate::tools::detect_modified_configuration_file(
|
crate::tools::detect_modified_configuration_file(
|
||||||
&digest,
|
&digest,
|
||||||
&crate::config::tfa::webauthn_config_digest(&wa)?,
|
&crate::config::tfa::webauthn_config_digest(wa)?,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -524,7 +524,7 @@ pub fn list_plugins(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginCon
|
||||||
rpcenv["digest"] = hex::encode(&digest).into();
|
rpcenv["digest"] = hex::encode(&digest).into();
|
||||||
Ok(plugins
|
Ok(plugins
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(id, (ty, data))| modify_cfg_for_api(&id, &ty, data))
|
.map(|(id, (ty, data))| modify_cfg_for_api(id, ty, data))
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -546,7 +546,7 @@ pub fn get_plugin(id: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<Plu
|
||||||
rpcenv["digest"] = hex::encode(&digest).into();
|
rpcenv["digest"] = hex::encode(&digest).into();
|
||||||
|
|
||||||
match plugins.get(&id) {
|
match plugins.get(&id) {
|
||||||
Some((ty, data)) => Ok(modify_cfg_for_api(&id, &ty, &data)),
|
Some((ty, data)) => Ok(modify_cfg_for_api(&id, ty, data)),
|
||||||
None => http_bail!(NOT_FOUND, "no such plugin"),
|
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,12 +20,12 @@ pub fn check_sync_job_read_access(
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
job: &SyncJobConfig,
|
job: &SyncJobConfig,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
|
||||||
if datastore_privs & PRIV_DATASTORE_AUDIT == 0 {
|
if datastore_privs & PRIV_DATASTORE_AUDIT == 0 {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote]);
|
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
|
||||||
remote_privs & PRIV_REMOTE_AUDIT != 0
|
remote_privs & PRIV_REMOTE_AUDIT != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ pub fn check_sync_job_modify_access(
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
job: &SyncJobConfig,
|
job: &SyncJobConfig,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]);
|
||||||
if datastore_privs & PRIV_DATASTORE_BACKUP == 0 {
|
if datastore_privs & PRIV_DATASTORE_BACKUP == 0 {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ pub fn check_sync_job_modify_access(
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote, &job.remote_store]);
|
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]);
|
||||||
remote_privs & PRIV_REMOTE_READ != 0
|
remote_privs & PRIV_REMOTE_READ != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ pub fn list_sync_jobs(
|
||||||
|
|
||||||
let list = list
|
let list = list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, &sync_job))
|
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
|
||||||
.collect();
|
.collect();
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
@ -429,8 +429,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
||||||
};
|
};
|
||||||
|
|
||||||
// should work without ACLs
|
// should work without ACLs
|
||||||
assert_eq!(check_sync_job_read_access(&user_info, &root_auth_id, &job), true);
|
assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true);
|
||||||
assert_eq!(check_sync_job_modify_access(&user_info, &root_auth_id, &job), true);
|
assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true);
|
||||||
|
|
||||||
// user without permissions must fail
|
// user without permissions must fail
|
||||||
assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
|
assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
|
||||||
|
|
|
@ -330,7 +330,7 @@ async fn order_certificate(
|
||||||
|
|
||||||
for auth_url in &order.data.authorizations {
|
for auth_url in &order.data.authorizations {
|
||||||
task_log!(worker, "Getting authorization details from '{}'", auth_url);
|
task_log!(worker, "Getting authorization details from '{}'", auth_url);
|
||||||
let mut auth = acme.get_authorization(&auth_url).await?;
|
let mut auth = acme.get_authorization(auth_url).await?;
|
||||||
|
|
||||||
let domain = match &mut auth.identifier {
|
let domain = match &mut auth.identifier {
|
||||||
Identifier::Dns(domain) => domain.to_ascii_lowercase(),
|
Identifier::Dns(domain) => domain.to_ascii_lowercase(),
|
||||||
|
@ -442,7 +442,7 @@ async fn request_validation(
|
||||||
validation_url: &str,
|
validation_url: &str,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
task_log!(worker, "Triggering validation");
|
task_log!(worker, "Triggering validation");
|
||||||
acme.request_challenge_validation(&validation_url).await?;
|
acme.request_challenge_validation(validation_url).await?;
|
||||||
|
|
||||||
task_log!(worker, "Sleeping for 5 seconds");
|
task_log!(worker, "Sleeping for 5 seconds");
|
||||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||||
|
@ -450,7 +450,7 @@ async fn request_validation(
|
||||||
loop {
|
loop {
|
||||||
use proxmox_acme_rs::authorization::Status;
|
use proxmox_acme_rs::authorization::Status;
|
||||||
|
|
||||||
let auth = acme.get_authorization(&auth_url).await?;
|
let auth = acme.get_authorization(auth_url).await?;
|
||||||
match auth.status {
|
match auth.status {
|
||||||
Status::Pending => {
|
Status::Pending => {
|
||||||
task_log!(worker, "Status is still 'pending', trying again in 10 seconds");
|
task_log!(worker, "Status is still 'pending', trying again in 10 seconds");
|
||||||
|
|
|
@ -282,7 +282,7 @@ fn create_datastore_mount_unit(
|
||||||
what: &str,
|
what: &str,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(&mount_point, true);
|
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true);
|
||||||
mount_unit_name.push_str(".mount");
|
mount_unit_name.push_str(".mount");
|
||||||
|
|
||||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||||
|
|
|
@ -55,9 +55,9 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||||
|
|
||||||
for line in data.lines() {
|
for line in data.lines() {
|
||||||
|
|
||||||
if let Some(caps) = DOMAIN_REGEX.captures(&line) {
|
if let Some(caps) = DOMAIN_REGEX.captures(line) {
|
||||||
result["search"] = Value::from(&caps[1]);
|
result["search"] = Value::from(&caps[1]);
|
||||||
} else if let Some(caps) = SERVER_REGEX.captures(&line) {
|
} else if let Some(caps) = SERVER_REGEX.captures(line) {
|
||||||
nscount += 1;
|
nscount += 1;
|
||||||
if nscount > 3 { continue };
|
if nscount > 3 { continue };
|
||||||
let nameserver = &caps[1];
|
let nameserver = &caps[1];
|
||||||
|
|
|
@ -121,7 +121,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
|
||||||
|
|
||||||
let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?.sign(
|
let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?.sign(
|
||||||
private_auth_key(),
|
private_auth_key(),
|
||||||
Some(&tools::ticket::term_aad(&userid, &path, port)),
|
Some(&tools::ticket::term_aad(userid, path, port)),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut command = Vec::new();
|
let mut command = Vec::new();
|
||||||
|
@ -161,7 +161,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
|
||||||
arguments.push(&fd_string);
|
arguments.push(&fd_string);
|
||||||
arguments.extend_from_slice(&[
|
arguments.extend_from_slice(&[
|
||||||
"--path",
|
"--path",
|
||||||
&path,
|
path,
|
||||||
"--perm",
|
"--perm",
|
||||||
"Sys.Console",
|
"Sys.Console",
|
||||||
"--authport",
|
"--authport",
|
||||||
|
@ -293,7 +293,7 @@ fn upgrade_to_websocket(
|
||||||
Ticket::<Empty>::parse(ticket)?.verify(
|
Ticket::<Empty>::parse(ticket)?.verify(
|
||||||
crate::auth_helpers::public_auth_key(),
|
crate::auth_helpers::public_auth_key(),
|
||||||
ticket::TERM_PREFIX,
|
ticket::TERM_PREFIX,
|
||||||
Some(&tools::ticket::term_aad(&userid, "/system", port)),
|
Some(&tools::ticket::term_aad(userid, "/system", port)),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let (ws, response) = WebSocket::new(parts.headers.clone())?;
|
let (ws, response) = WebSocket::new(parts.headers.clone())?;
|
||||||
|
|
|
@ -17,7 +17,7 @@ use pbs_config::network::{self, NetworkConfig};
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
|
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
|
||||||
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(&list)?;
|
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?;
|
||||||
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
|
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,9 +176,9 @@ fn get_service_state(
|
||||||
bail!("unknown service name '{}'", service);
|
bail!("unknown service name '{}'", service);
|
||||||
}
|
}
|
||||||
|
|
||||||
let status = get_full_service_state(&service)?;
|
let status = get_full_service_state(service)?;
|
||||||
|
|
||||||
Ok(json_service_state(&service, status))
|
Ok(json_service_state(service, status))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
|
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
|
||||||
|
|
|
@ -24,9 +24,9 @@ use pbs_config::CachedUserInfo;
|
||||||
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
|
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
|
||||||
match (upid.worker_type.as_str(), &upid.worker_id) {
|
match (upid.worker_type.as_str(), &upid.worker_id) {
|
||||||
("verificationjob", Some(workerid)) => {
|
("verificationjob", Some(workerid)) => {
|
||||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||||
if let Some(store) = captures.get(1) {
|
if let Some(store) = captures.get(1) {
|
||||||
return user_info.check_privs(&auth_id,
|
return user_info.check_privs(auth_id,
|
||||||
&["datastore", store.as_str()],
|
&["datastore", store.as_str()],
|
||||||
PRIV_DATASTORE_VERIFY,
|
PRIV_DATASTORE_VERIFY,
|
||||||
true);
|
true);
|
||||||
|
@ -34,7 +34,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
("syncjob", Some(workerid)) => {
|
("syncjob", Some(workerid)) => {
|
||||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||||
let remote = captures.get(1);
|
let remote = captures.get(1);
|
||||||
let remote_store = captures.get(2);
|
let remote_store = captures.get(2);
|
||||||
let local_store = captures.get(3);
|
let local_store = captures.get(3);
|
||||||
|
@ -42,7 +42,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||||
if let (Some(remote), Some(remote_store), Some(local_store)) =
|
if let (Some(remote), Some(remote_store), Some(local_store)) =
|
||||||
(remote, remote_store, local_store) {
|
(remote, remote_store, local_store) {
|
||||||
|
|
||||||
return check_pull_privs(&auth_id,
|
return check_pull_privs(auth_id,
|
||||||
local_store.as_str(),
|
local_store.as_str(),
|
||||||
remote.as_str(),
|
remote.as_str(),
|
||||||
remote_store.as_str(),
|
remote_store.as_str(),
|
||||||
|
@ -51,15 +51,15 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
("garbage_collection", Some(workerid)) => {
|
("garbage_collection", Some(workerid)) => {
|
||||||
return user_info.check_privs(&auth_id,
|
return user_info.check_privs(auth_id,
|
||||||
&["datastore", &workerid],
|
&["datastore", workerid],
|
||||||
PRIV_DATASTORE_MODIFY,
|
PRIV_DATASTORE_MODIFY,
|
||||||
true)
|
true)
|
||||||
},
|
},
|
||||||
("prune", Some(workerid)) => {
|
("prune", Some(workerid)) => {
|
||||||
return user_info.check_privs(&auth_id,
|
return user_info.check_privs(auth_id,
|
||||||
&["datastore",
|
&["datastore",
|
||||||
&workerid],
|
workerid],
|
||||||
PRIV_DATASTORE_MODIFY,
|
PRIV_DATASTORE_MODIFY,
|
||||||
true);
|
true);
|
||||||
},
|
},
|
||||||
|
@ -73,7 +73,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||||
fn check_job_store(upid: &UPID, store: &str) -> bool {
|
fn check_job_store(upid: &UPID, store: &str) -> bool {
|
||||||
match (upid.worker_type.as_str(), &upid.worker_id) {
|
match (upid.worker_type.as_str(), &upid.worker_id) {
|
||||||
(workertype, Some(workerid)) if workertype.starts_with("verif") => {
|
(workertype, Some(workerid)) if workertype.starts_with("verif") => {
|
||||||
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||||
if let Some(jobstore) = captures.get(1) {
|
if let Some(jobstore) = captures.get(1) {
|
||||||
return store == jobstore.as_str();
|
return store == jobstore.as_str();
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,7 @@ fn check_job_store(upid: &UPID, store: &str) -> bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
("syncjob", Some(workerid)) => {
|
("syncjob", Some(workerid)) => {
|
||||||
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) {
|
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
|
||||||
if let Some(local_store) = captures.get(3) {
|
if let Some(local_store) = captures.get(3) {
|
||||||
return store == local_store.as_str();
|
return store == local_store.as_str();
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
|
||||||
// or task == job which the user/token could have configured/manually executed
|
// or task == job which the user/token could have configured/manually executed
|
||||||
|
|
||||||
user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
|
user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
|
||||||
.or_else(|_| check_job_privs(&auth_id, &user_info, upid))
|
.or_else(|_| check_job_privs(auth_id, &user_info, upid))
|
||||||
.or_else(|_| bail!("task access not allowed"))
|
.or_else(|_| bail!("task access not allowed"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -250,7 +250,7 @@ async fn get_task_status(
|
||||||
|
|
||||||
fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
||||||
|
|
||||||
let upid_str = pbs_tools::json::required_string_param(¶m, "upid")?;
|
let upid_str = pbs_tools::json::required_string_param(param, "upid")?;
|
||||||
|
|
||||||
upid_str.parse::<UPID>()
|
upid_str.parse::<UPID>()
|
||||||
}
|
}
|
||||||
|
@ -569,7 +569,7 @@ const UPID_API_SUBDIRS: SubdirMap = &sorted!([
|
||||||
pub const UPID_API_ROUTER: Router = Router::new()
|
pub const UPID_API_ROUTER: Router = Router::new()
|
||||||
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
|
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
|
||||||
.delete(&API_METHOD_STOP_TASK)
|
.delete(&API_METHOD_STOP_TASK)
|
||||||
.subdirs(&UPID_API_SUBDIRS);
|
.subdirs(UPID_API_SUBDIRS);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_LIST_TASKS)
|
.get(&API_METHOD_LIST_TASKS)
|
||||||
|
|
|
@ -91,13 +91,13 @@ pub fn datastore_status(
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (store, (_, _)) in &config.sections {
|
for (store, (_, _)) in &config.sections {
|
||||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
if !allowed {
|
if !allowed {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let datastore = match DataStore::lookup_datastore(&store) {
|
let datastore = match DataStore::lookup_datastore(store) {
|
||||||
Ok(datastore) => datastore,
|
Ok(datastore) => datastore,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
list.push(json!({
|
list.push(json!({
|
||||||
|
|
|
@ -182,7 +182,7 @@ pub fn do_tape_backup_job(
|
||||||
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
||||||
};
|
};
|
||||||
|
|
||||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
|
||||||
let email = lookup_user_email(notify_user);
|
let email = lookup_user_email(notify_user);
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
@ -363,7 +363,7 @@ pub fn backup(
|
||||||
|
|
||||||
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
||||||
|
|
||||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid());
|
||||||
let email = lookup_user_email(notify_user);
|
let email = lookup_user_email(notify_user);
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
@ -423,7 +423,7 @@ fn backup_worker(
|
||||||
task_log!(worker, "update media online status");
|
task_log!(worker, "update media online status");
|
||||||
let changer_name = update_media_online_status(&setup.drive)?;
|
let changer_name = update_media_online_status(&setup.drive)?;
|
||||||
|
|
||||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?;
|
||||||
|
|
||||||
let mut pool_writer = PoolWriter::new(
|
let mut pool_writer = PoolWriter::new(
|
||||||
pool,
|
pool,
|
||||||
|
@ -443,7 +443,7 @@ fn backup_worker(
|
||||||
};
|
};
|
||||||
|
|
||||||
let group_count_full = group_list.len();
|
let group_count_full = group_list.len();
|
||||||
let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, &group_filters)).collect();
|
let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, group_filters)).collect();
|
||||||
let group_count = list.len();
|
let group_count = list.len();
|
||||||
task_log!(worker, "found {} groups (out of {} total)", group_count, group_count_full);
|
task_log!(worker, "found {} groups (out of {} total)", group_count, group_count_full);
|
||||||
(list, group_count)
|
(list, group_count)
|
||||||
|
|
|
@ -96,7 +96,7 @@ pub async fn get_status(
|
||||||
for (id, drive_status) in status.drives.iter().enumerate() {
|
for (id, drive_status) in status.drives.iter().enumerate() {
|
||||||
let mut state = None;
|
let mut state = None;
|
||||||
if let Some(drive) = drive_map.get(&(id as u64)) {
|
if let Some(drive) = drive_map.get(&(id as u64)) {
|
||||||
state = get_tape_device_state(&config, &drive)?;
|
state = get_tape_device_state(&config, drive)?;
|
||||||
}
|
}
|
||||||
let entry = MtxStatusEntry {
|
let entry = MtxStatusEntry {
|
||||||
entry_kind: MtxEntryKind::Drive,
|
entry_kind: MtxEntryKind::Drive,
|
||||||
|
@ -231,7 +231,7 @@ const SUBDIRS: SubdirMap = &[
|
||||||
|
|
||||||
const ITEM_ROUTER: Router = Router::new()
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
.subdirs(&SUBDIRS);
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_LIST_CHANGERS)
|
.get(&API_METHOD_LIST_CHANGERS)
|
||||||
|
|
|
@ -542,7 +542,7 @@ fn write_media_label(
|
||||||
let media_id = if let Some(ref pool) = pool {
|
let media_id = if let Some(ref pool) = pool {
|
||||||
// assign media to pool by writing special media set label
|
// assign media to pool by writing special media set label
|
||||||
task_log!(worker, "Label media '{}' for pool '{}'", label.label_text, pool);
|
task_log!(worker, "Label media '{}' for pool '{}'", label.label_text, pool);
|
||||||
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
|
let set = MediaSetLabel::with_data(pool, [0u8; 16].into(), 0, label.ctime, None);
|
||||||
|
|
||||||
drive.write_media_set_label(&set, None)?;
|
drive.write_media_set_label(&set, None)?;
|
||||||
|
|
||||||
|
@ -1473,7 +1473,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
||||||
|
|
||||||
const ITEM_ROUTER: Router = Router::new()
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
.subdirs(&SUBDIRS);
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_LIST_DRIVES)
|
.get(&API_METHOD_LIST_DRIVES)
|
||||||
|
|
|
@ -138,7 +138,7 @@ fn check_datastore_privs(
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
owner: &Option<Authid>,
|
owner: &Option<Authid>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
|
||||||
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
|
||||||
bail!("no permissions on /datastore/{}", store);
|
bail!("no permissions on /datastore/{}", store);
|
||||||
}
|
}
|
||||||
|
@ -220,7 +220,7 @@ pub fn restore(
|
||||||
}
|
}
|
||||||
|
|
||||||
for store in used_datastores.iter() {
|
for store in used_datastores.iter() {
|
||||||
check_datastore_privs(&user_info, &store, &auth_id, &owner)?;
|
check_datastore_privs(&user_info, store, &auth_id, &owner)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
||||||
|
@ -448,7 +448,7 @@ fn restore_list_worker(
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let (owner, _group_lock) =
|
let (owner, _group_lock) =
|
||||||
datastore.create_locked_backup_group(backup_dir.group(), &restore_owner)?;
|
datastore.create_locked_backup_group(backup_dir.group(), restore_owner)?;
|
||||||
if restore_owner != &owner {
|
if restore_owner != &owner {
|
||||||
// only the owner is allowed to create additional snapshots
|
// only the owner is allowed to create additional snapshots
|
||||||
bail!(
|
bail!(
|
||||||
|
@ -460,7 +460,7 @@ fn restore_list_worker(
|
||||||
}
|
}
|
||||||
|
|
||||||
let (media_id, file_num) = if let Some((media_uuid, file_num)) =
|
let (media_id, file_num) = if let Some((media_uuid, file_num)) =
|
||||||
catalog.lookup_snapshot(&source_datastore, &snapshot)
|
catalog.lookup_snapshot(source_datastore, snapshot)
|
||||||
{
|
{
|
||||||
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
||||||
(media_id, file_num)
|
(media_id, file_num)
|
||||||
|
@ -516,7 +516,7 @@ fn restore_list_worker(
|
||||||
let (drive, info) = request_and_load_media(
|
let (drive, info) = request_and_load_media(
|
||||||
&worker,
|
&worker,
|
||||||
&drive_config,
|
&drive_config,
|
||||||
&drive_name,
|
drive_name,
|
||||||
&media_id.label,
|
&media_id.label,
|
||||||
&email,
|
&email,
|
||||||
)?;
|
)?;
|
||||||
|
@ -568,7 +568,7 @@ fn restore_list_worker(
|
||||||
let (mut drive, _info) = request_and_load_media(
|
let (mut drive, _info) = request_and_load_media(
|
||||||
&worker,
|
&worker,
|
||||||
&drive_config,
|
&drive_config,
|
||||||
&drive_name,
|
drive_name,
|
||||||
&media_id.label,
|
&media_id.label,
|
||||||
&email,
|
&email,
|
||||||
)?;
|
)?;
|
||||||
|
@ -591,7 +591,7 @@ fn restore_list_worker(
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
let datastore = store_map
|
let datastore = store_map
|
||||||
.get_datastore(&source_datastore)
|
.get_datastore(source_datastore)
|
||||||
.ok_or_else(|| format_err!("unexpected source datastore: {}", source_datastore))?;
|
.ok_or_else(|| format_err!("unexpected source datastore: {}", source_datastore))?;
|
||||||
|
|
||||||
let mut tmp_path = base_path.clone();
|
let mut tmp_path = base_path.clone();
|
||||||
|
@ -646,7 +646,7 @@ fn get_media_set_catalog(
|
||||||
}
|
}
|
||||||
Some(media_uuid) => {
|
Some(media_uuid) => {
|
||||||
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
||||||
let media_catalog = MediaCatalog::open(status_path, &media_id, false, false)?;
|
let media_catalog = MediaCatalog::open(status_path, media_id, false, false)?;
|
||||||
catalog.append_catalog(media_catalog)?;
|
catalog.append_catalog(media_catalog)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -899,7 +899,7 @@ pub fn request_and_restore_media(
|
||||||
Some(ref set) => &set.uuid,
|
Some(ref set) => &set.uuid,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (mut drive, info) = request_and_load_media(&worker, &drive_config, &drive_name, &media_id.label, email)?;
|
let (mut drive, info) = request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?;
|
||||||
|
|
||||||
match info.media_set_label {
|
match info.media_set_label {
|
||||||
None => {
|
None => {
|
||||||
|
@ -923,7 +923,7 @@ pub fn request_and_restore_media(
|
||||||
worker,
|
worker,
|
||||||
&mut drive,
|
&mut drive,
|
||||||
&info,
|
&info,
|
||||||
Some((&store_map, restore_owner)),
|
Some((store_map, restore_owner)),
|
||||||
checked_chunks_map,
|
checked_chunks_map,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
|
@ -301,7 +301,7 @@ pub fn verify_backup_dir(
|
||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
let snap_lock = lock_dir_noblock_shared(
|
let snap_lock = lock_dir_noblock_shared(
|
||||||
&verify_worker.datastore.snapshot_path(&backup_dir),
|
&verify_worker.datastore.snapshot_path(backup_dir),
|
||||||
"snapshot",
|
"snapshot",
|
||||||
"locked by another operation",
|
"locked by another operation",
|
||||||
);
|
);
|
||||||
|
@ -330,7 +330,7 @@ pub fn verify_backup_dir_with_lock(
|
||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
_snap_lock: Dir,
|
_snap_lock: Dir,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
let manifest = match verify_worker.datastore.load_manifest(&backup_dir) {
|
let manifest = match verify_worker.datastore.load_manifest(backup_dir) {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
task_log!(
|
||||||
|
@ -365,10 +365,10 @@ pub fn verify_backup_dir_with_lock(
|
||||||
let result = proxmox_lang::try_block!({
|
let result = proxmox_lang::try_block!({
|
||||||
task_log!(verify_worker.worker, " check {}", info.filename);
|
task_log!(verify_worker.worker, " check {}", info.filename);
|
||||||
match archive_type(&info.filename)? {
|
match archive_type(&info.filename)? {
|
||||||
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, &backup_dir, info),
|
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
|
||||||
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, &backup_dir, info),
|
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
|
||||||
ArchiveType::Blob => {
|
ArchiveType::Blob => {
|
||||||
verify_blob(verify_worker.datastore.clone(), &backup_dir, info)
|
verify_blob(verify_worker.datastore.clone(), backup_dir, info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -397,7 +397,7 @@ pub fn verify_backup_dir_with_lock(
|
||||||
let verify_state = serde_json::to_value(verify_state)?;
|
let verify_state = serde_json::to_value(verify_state)?;
|
||||||
verify_worker
|
verify_worker
|
||||||
.datastore
|
.datastore
|
||||||
.update_manifest(&backup_dir, |manifest| {
|
.update_manifest(backup_dir, |manifest| {
|
||||||
manifest.unprotected["verify_state"] = verify_state;
|
manifest.unprotected["verify_state"] = verify_state;
|
||||||
})
|
})
|
||||||
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
|
|
|
@ -270,7 +270,7 @@ fn dump_api_method_schema(
|
||||||
|
|
||||||
data["parameters"] = dump_property_schema(&api_method.parameters);
|
data["parameters"] = dump_property_schema(&api_method.parameters);
|
||||||
|
|
||||||
let mut returns = dump_schema(&api_method.returns.schema);
|
let mut returns = dump_schema(api_method.returns.schema);
|
||||||
if api_method.returns.optional {
|
if api_method.returns.optional {
|
||||||
returns["optional"] = 1.into();
|
returns["optional"] = 1.into();
|
||||||
}
|
}
|
||||||
|
|
|
@ -730,7 +730,7 @@ async fn schedule_datastore_verify_jobs() {
|
||||||
let worker_type = "verificationjob";
|
let worker_type = "verificationjob";
|
||||||
let auth_id = Authid::root_auth_id().clone();
|
let auth_id = Authid::root_auth_id().clone();
|
||||||
if check_schedule(worker_type, &event_str, &job_id) {
|
if check_schedule(worker_type, &event_str, &job_id) {
|
||||||
let job = match Job::new(&worker_type, &job_id) {
|
let job = match Job::new(worker_type, &job_id) {
|
||||||
Ok(job) => job,
|
Ok(job) => job,
|
||||||
Err(_) => continue, // could not get lock
|
Err(_) => continue, // could not get lock
|
||||||
};
|
};
|
||||||
|
@ -766,7 +766,7 @@ async fn schedule_tape_backup_jobs() {
|
||||||
let worker_type = "tape-backup-job";
|
let worker_type = "tape-backup-job";
|
||||||
let auth_id = Authid::root_auth_id().clone();
|
let auth_id = Authid::root_auth_id().clone();
|
||||||
if check_schedule(worker_type, &event_str, &job_id) {
|
if check_schedule(worker_type, &event_str, &job_id) {
|
||||||
let job = match Job::new(&worker_type, &job_id) {
|
let job = match Job::new(worker_type, &job_id) {
|
||||||
Ok(job) => job,
|
Ok(job) => job,
|
||||||
Err(_) => continue, // could not get lock
|
Err(_) => continue, // could not get lock
|
||||||
};
|
};
|
||||||
|
@ -1033,7 +1033,7 @@ fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let last = match jobstate::last_run_time(worker_type, &id) {
|
let last = match jobstate::last_run_time(worker_type, id) {
|
||||||
Ok(time) => time,
|
Ok(time) => time,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("could not get last run time of {} {}: {}", worker_type, id, err);
|
eprintln!("could not get last run time of {} {}: {}", worker_type, id, err);
|
||||||
|
|
|
@ -94,7 +94,7 @@ async fn get_child_links(
|
||||||
path: &str,
|
path: &str,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<String>, Error> {
|
) -> Result<Vec<String>, Error> {
|
||||||
let (path, components) = normalize_uri_path(&path)?;
|
let (path, components) = normalize_uri_path(path)?;
|
||||||
|
|
||||||
let info = &proxmox_backup::api2::ROUTER
|
let info = &proxmox_backup::api2::ROUTER
|
||||||
.find_route(&components, &mut HashMap::new())
|
.find_route(&components, &mut HashMap::new())
|
||||||
|
@ -132,7 +132,7 @@ fn get_api_method(
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let mut uri_param = HashMap::new();
|
let mut uri_param = HashMap::new();
|
||||||
let (path, components) = normalize_uri_path(&path)?;
|
let (path, components) = normalize_uri_path(path)?;
|
||||||
if let Some(method) =
|
if let Some(method) =
|
||||||
&proxmox_backup::api2::ROUTER.find_method(&components, method.clone(), &mut uri_param)
|
&proxmox_backup::api2::ROUTER.find_method(&components, method.clone(), &mut uri_param)
|
||||||
{
|
{
|
||||||
|
@ -446,7 +446,7 @@ async fn ls(path: Option<String>, mut param: Value, rpcenv: &mut dyn RpcEnvironm
|
||||||
&mut serde_json::to_value(res)?,
|
&mut serde_json::to_value(res)?,
|
||||||
&proxmox_schema::ReturnType {
|
&proxmox_schema::ReturnType {
|
||||||
optional: false,
|
optional: false,
|
||||||
schema: &LS_SCHEMA,
|
schema: LS_SCHEMA,
|
||||||
},
|
},
|
||||||
&output_format,
|
&output_format,
|
||||||
&options,
|
&options,
|
||||||
|
|
|
@ -51,7 +51,7 @@ fn decode_blob(
|
||||||
|
|
||||||
if blob.is_encrypted() && key_file.is_some() {
|
if blob.is_encrypted() && key_file.is_some() {
|
||||||
let (key, _created, _fingerprint) =
|
let (key, _created, _fingerprint) =
|
||||||
load_and_decrypt_key(&key_file.unwrap(), &get_encryption_key_password)?;
|
load_and_decrypt_key(key_file.unwrap(), &get_encryption_key_password)?;
|
||||||
crypt_conf = CryptConfig::new(key)?;
|
crypt_conf = CryptConfig::new(key)?;
|
||||||
crypt_conf_opt = Some(&crypt_conf);
|
crypt_conf_opt = Some(&crypt_conf);
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ fn recover_index(
|
||||||
|
|
||||||
let crypt_conf_opt = if let Some(key_file_path) = key_file_path {
|
let crypt_conf_opt = if let Some(key_file_path) = key_file_path {
|
||||||
let (key, _created, _fingerprint) =
|
let (key, _created, _fingerprint) =
|
||||||
load_and_decrypt_key(&key_file_path, &get_encryption_key_password)?;
|
load_and_decrypt_key(key_file_path, &get_encryption_key_password)?;
|
||||||
Some(CryptConfig::new(key)?)
|
Some(CryptConfig::new(key)?)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
|
|
@ -55,7 +55,7 @@ fn list_acls(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Err
|
||||||
pub fn acl_commands() -> CommandLineInterface {
|
pub fn acl_commands() -> CommandLineInterface {
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_ACLS))
|
.insert("list", CliCommand::new(&API_METHOD_LIST_ACLS))
|
||||||
.insert(
|
.insert(
|
||||||
"update",
|
"update",
|
||||||
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
||||||
|
|
|
@ -93,7 +93,7 @@ async fn create_datastore(mut param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
let mut client = connect_to_localhost()?;
|
let mut client = connect_to_localhost()?;
|
||||||
|
|
||||||
let result = client.post(&"api2/json/config/datastore", Some(param)).await?;
|
let result = client.post("api2/json/config/datastore", Some(param)).await?;
|
||||||
|
|
||||||
view_task_result(&mut client, result, &output_format).await?;
|
view_task_result(&mut client, result, &output_format).await?;
|
||||||
|
|
||||||
|
|
|
@ -73,8 +73,8 @@ fn show_openid_realm(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Va
|
||||||
pub fn openid_commands() -> CommandLineInterface {
|
pub fn openid_commands() -> CommandLineInterface {
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_OPENID_REALMS))
|
.insert("list", CliCommand::new(&API_METHOD_LIST_OPENID_REALMS))
|
||||||
.insert("show", CliCommand::new(&&API_METHOD_SHOW_OPENID_REALM)
|
.insert("show", CliCommand::new(&API_METHOD_SHOW_OPENID_REALM)
|
||||||
.arg_param(&["realm"])
|
.arg_param(&["realm"])
|
||||||
.completion_cb("realm", pbs_config::domains::complete_openid_realm_name)
|
.completion_cb("realm", pbs_config::domains::complete_openid_realm_name)
|
||||||
)
|
)
|
||||||
|
|
|
@ -75,7 +75,7 @@ fn show_remote(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, E
|
||||||
pub fn remote_commands() -> CommandLineInterface {
|
pub fn remote_commands() -> CommandLineInterface {
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES))
|
.insert("list", CliCommand::new(&API_METHOD_LIST_REMOTES))
|
||||||
.insert(
|
.insert(
|
||||||
"show",
|
"show",
|
||||||
CliCommand::new(&API_METHOD_SHOW_REMOTE)
|
CliCommand::new(&API_METHOD_SHOW_REMOTE)
|
||||||
|
|
|
@ -94,7 +94,7 @@ async fn show_current_traffic(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
let client = connect_to_localhost()?;
|
let client = connect_to_localhost()?;
|
||||||
|
|
||||||
let mut result = client.get(&"api2/json/admin/traffic-control", None).await?;
|
let mut result = client.get("api2/json/admin/traffic-control", None).await?;
|
||||||
|
|
||||||
let mut data = result["data"].take();
|
let mut data = result["data"].take();
|
||||||
|
|
||||||
|
|
|
@ -171,7 +171,7 @@ fn list_permissions(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Val
|
||||||
pub fn user_commands() -> CommandLineInterface {
|
pub fn user_commands() -> CommandLineInterface {
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_USERS))
|
.insert("list", CliCommand::new(&API_METHOD_LIST_USERS))
|
||||||
.insert(
|
.insert(
|
||||||
"create",
|
"create",
|
||||||
// fixme: howto handle password parameter?
|
// fixme: howto handle password parameter?
|
||||||
|
@ -192,7 +192,7 @@ pub fn user_commands() -> CommandLineInterface {
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"list-tokens",
|
"list-tokens",
|
||||||
CliCommand::new(&&API_METHOD_LIST_TOKENS)
|
CliCommand::new(&API_METHOD_LIST_TOKENS)
|
||||||
.arg_param(&["userid"])
|
.arg_param(&["userid"])
|
||||||
.completion_cb("userid", pbs_config::user::complete_userid)
|
.completion_cb("userid", pbs_config::user::complete_userid)
|
||||||
)
|
)
|
||||||
|
@ -211,7 +211,7 @@ pub fn user_commands() -> CommandLineInterface {
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"permissions",
|
"permissions",
|
||||||
CliCommand::new(&&API_METHOD_LIST_PERMISSIONS)
|
CliCommand::new(&API_METHOD_LIST_PERMISSIONS)
|
||||||
.arg_param(&["auth-id"])
|
.arg_param(&["auth-id"])
|
||||||
.completion_cb("auth-id", pbs_config::user::complete_authid)
|
.completion_cb("auth-id", pbs_config::user::complete_authid)
|
||||||
.completion_cb("path", pbs_config::datastore::complete_acl_path)
|
.completion_cb("path", pbs_config::datastore::complete_acl_path)
|
||||||
|
|
|
@ -34,12 +34,12 @@ fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
|
||||||
|
|
||||||
let handle = if let Some(name) = param["drive"].as_str() {
|
let handle = if let Some(name) = param["drive"].as_str() {
|
||||||
let (config, _digest) = pbs_config::drive::config()?;
|
let (config, _digest) = pbs_config::drive::config()?;
|
||||||
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
open_lto_tape_drive(&drive)?
|
open_lto_tape_drive(&drive)?
|
||||||
} else if let Some(device) = param["device"].as_str() {
|
} else if let Some(device) = param["device"].as_str() {
|
||||||
eprintln!("using device {}", device);
|
eprintln!("using device {}", device);
|
||||||
LtoTapeHandle::new(open_lto_tape_device(&device)?)?
|
LtoTapeHandle::new(open_lto_tape_device(device)?)?
|
||||||
} else if let Some(true) = param["stdin"].as_bool() {
|
} else if let Some(true) = param["stdin"].as_bool() {
|
||||||
eprintln!("using stdin");
|
eprintln!("using stdin");
|
||||||
let fd = std::io::stdin().as_raw_fd();
|
let fd = std::io::stdin().as_raw_fd();
|
||||||
|
@ -62,7 +62,7 @@ fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
|
||||||
|
|
||||||
if drive_names.len() == 1 {
|
if drive_names.len() == 1 {
|
||||||
let name = drive_names[0];
|
let name = drive_names[0];
|
||||||
let drive: LtoTapeDrive = config.lookup("lto", &name)?;
|
let drive: LtoTapeDrive = config.lookup("lto", name)?;
|
||||||
eprintln!("using device {}", drive.path);
|
eprintln!("using device {}", drive.path);
|
||||||
open_lto_tape_drive(&drive)?
|
open_lto_tape_drive(&drive)?
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -185,7 +185,7 @@ pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(
|
||||||
create_configdir()?;
|
create_configdir()?;
|
||||||
pbs_config::replace_backup_config(&key_path, key_pem)
|
pbs_config::replace_backup_config(&key_path, key_pem)
|
||||||
.map_err(|err| format_err!("error writing certificate private key - {}", err))?;
|
.map_err(|err| format_err!("error writing certificate private key - {}", err))?;
|
||||||
pbs_config::replace_backup_config(&cert_path, &cert_pem)
|
pbs_config::replace_backup_config(&cert_path, cert_pem)
|
||||||
.map_err(|err| format_err!("error writing certificate file - {}", err))?;
|
.map_err(|err| format_err!("error writing certificate file - {}", err))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -141,7 +141,7 @@ impl NodeConfig {
|
||||||
/// Returns the parsed ProxyConfig
|
/// Returns the parsed ProxyConfig
|
||||||
pub fn http_proxy(&self) -> Option<ProxyConfig> {
|
pub fn http_proxy(&self) -> Option<ProxyConfig> {
|
||||||
if let Some(http_proxy) = &self.http_proxy {
|
if let Some(http_proxy) = &self.http_proxy {
|
||||||
match ProxyConfig::parse_proxy_url(&http_proxy) {
|
match ProxyConfig::parse_proxy_url(http_proxy) {
|
||||||
Ok(proxy) => Some(proxy),
|
Ok(proxy) => Some(proxy),
|
||||||
Err(_) => None,
|
Err(_) => None,
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ pub async fn check_pbs_auth(
|
||||||
verify_csrf_prevention_token(
|
verify_csrf_prevention_token(
|
||||||
csrf_secret(),
|
csrf_secret(),
|
||||||
&userid,
|
&userid,
|
||||||
&csrf_token,
|
csrf_token,
|
||||||
-300,
|
-300,
|
||||||
ticket_lifetime,
|
ticket_lifetime,
|
||||||
)?;
|
)?;
|
||||||
|
|
|
@ -245,8 +245,8 @@ fn send_job_status_mail(
|
||||||
|
|
||||||
sendmail(
|
sendmail(
|
||||||
&[email],
|
&[email],
|
||||||
&subject,
|
subject,
|
||||||
Some(&text),
|
Some(text),
|
||||||
Some(&html),
|
Some(&html),
|
||||||
None,
|
None,
|
||||||
Some(&author),
|
Some(&author),
|
||||||
|
|
|
@ -438,7 +438,7 @@ async fn pull_snapshot(
|
||||||
&mut chunk_reader,
|
&mut chunk_reader,
|
||||||
tgt_store.clone(),
|
tgt_store.clone(),
|
||||||
snapshot,
|
snapshot,
|
||||||
&item,
|
item,
|
||||||
downloaded_chunks.clone(),
|
downloaded_chunks.clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -465,7 +465,7 @@ pub async fn pull_snapshot_from(
|
||||||
snapshot: &BackupDir,
|
snapshot: &BackupDir,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
|
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?;
|
||||||
|
|
||||||
if is_new {
|
if is_new {
|
||||||
task_log!(worker, "sync snapshot {:?}", snapshot.relative_path());
|
task_log!(worker, "sync snapshot {:?}", snapshot.relative_path());
|
||||||
|
@ -474,12 +474,12 @@ pub async fn pull_snapshot_from(
|
||||||
worker,
|
worker,
|
||||||
reader,
|
reader,
|
||||||
tgt_store.clone(),
|
tgt_store.clone(),
|
||||||
&snapshot,
|
snapshot,
|
||||||
downloaded_chunks,
|
downloaded_chunks,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
|
if let Err(cleanup_err) = tgt_store.remove_backup_dir(snapshot, true) {
|
||||||
task_log!(worker, "cleanup error - {}", cleanup_err);
|
task_log!(worker, "cleanup error - {}", cleanup_err);
|
||||||
}
|
}
|
||||||
return Err(err);
|
return Err(err);
|
||||||
|
@ -491,7 +491,7 @@ pub async fn pull_snapshot_from(
|
||||||
worker,
|
worker,
|
||||||
reader,
|
reader,
|
||||||
tgt_store.clone(),
|
tgt_store.clone(),
|
||||||
&snapshot,
|
snapshot,
|
||||||
downloaded_chunks,
|
downloaded_chunks,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -713,7 +713,7 @@ pub async fn pull_store(
|
||||||
let list:Vec<BackupGroup> = list
|
let list:Vec<BackupGroup> = list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|group| {
|
.filter(|group| {
|
||||||
apply_filters(&group, group_filter)
|
apply_filters(group, group_filter)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
task_log!(worker, "found {} groups to sync (out of {} total)", list.len(), unfiltered_count);
|
task_log!(worker, "found {} groups to sync (out of {} total)", list.len(), unfiltered_count);
|
||||||
|
|
|
@ -265,9 +265,9 @@ impl ScsiMediaChange for ScsiTapeChanger {
|
||||||
}
|
}
|
||||||
|
|
||||||
let status = if USE_MTX {
|
let status = if USE_MTX {
|
||||||
mtx::mtx_status(&self)
|
mtx::mtx_status(self)
|
||||||
} else {
|
} else {
|
||||||
sg_pt_changer::status(&self)
|
sg_pt_changer::status(self)
|
||||||
};
|
};
|
||||||
|
|
||||||
match &status {
|
match &status {
|
||||||
|
|
|
@ -21,7 +21,7 @@ pub fn mtx_status(config: &ScsiTapeChanger) -> Result<MtxStatus, Error> {
|
||||||
|
|
||||||
let mut status = parse_mtx_status(&output)?;
|
let mut status = parse_mtx_status(&output)?;
|
||||||
|
|
||||||
status.mark_import_export_slots(&config)?;
|
status.mark_import_export_slots(config)?;
|
||||||
|
|
||||||
Ok(status)
|
Ok(status)
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,7 +203,7 @@ Data Transfer Element 1:Empty
|
||||||
Storage Element 24 IMPORT/EXPORT:Empty
|
Storage Element 24 IMPORT/EXPORT:Empty
|
||||||
"###;
|
"###;
|
||||||
|
|
||||||
let _ = parse_mtx_status(&output)?;
|
let _ = parse_mtx_status(output)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -192,11 +192,11 @@ pub fn update_changer_online_status(
|
||||||
let mut online_map = OnlineStatusMap::new(drive_config)?;
|
let mut online_map = OnlineStatusMap::new(drive_config)?;
|
||||||
let mut online_set = HashSet::new();
|
let mut online_set = HashSet::new();
|
||||||
for label_text in label_text_list.iter() {
|
for label_text in label_text_list.iter() {
|
||||||
if let Some(media_id) = inventory.find_media_by_label_text(&label_text) {
|
if let Some(media_id) = inventory.find_media_by_label_text(label_text) {
|
||||||
online_set.insert(media_id.label.uuid.clone());
|
online_set.insert(media_id.label.uuid.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
online_map.update_online_status(&changer_name, online_set)?;
|
online_map.update_online_status(changer_name, online_set)?;
|
||||||
inventory.update_online_status(&online_map)?;
|
inventory.update_online_status(&online_map)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -827,7 +827,7 @@ pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap<String, String>)
|
||||||
None => return Vec::new(),
|
None => return Vec::new(),
|
||||||
};
|
};
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
let inventory = match Inventory::load(&status_path) {
|
let inventory = match Inventory::load(status_path) {
|
||||||
Ok(inventory) => inventory,
|
Ok(inventory) => inventory,
|
||||||
Err(_) => return Vec::new(),
|
Err(_) => return Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
|
@ -413,7 +413,7 @@ impl MediaCatalog {
|
||||||
|
|
||||||
let uuid = &media_id.label.uuid;
|
let uuid = &media_id.label.uuid;
|
||||||
|
|
||||||
let me = Self::create_temporary_database(base_path, &media_id, log_to_stdout)?;
|
let me = Self::create_temporary_database(base_path, media_id, log_to_stdout)?;
|
||||||
|
|
||||||
Self::finish_temporary_database(base_path, uuid, true)?;
|
Self::finish_temporary_database(base_path, uuid, true)?;
|
||||||
|
|
||||||
|
|
|
@ -289,7 +289,7 @@ impl MediaPool {
|
||||||
create_new_set = Some(String::from("policy is AlwaysCreate"));
|
create_new_set = Some(String::from("policy is AlwaysCreate"));
|
||||||
}
|
}
|
||||||
MediaSetPolicy::CreateAt(event) => {
|
MediaSetPolicy::CreateAt(event) => {
|
||||||
if let Some(set_start_time) = self.inventory.media_set_start_time(&self.current_media_set.uuid()) {
|
if let Some(set_start_time) = self.inventory.media_set_start_time(self.current_media_set.uuid()) {
|
||||||
if let Ok(Some(alloc_time)) = event.compute_next_event(set_start_time as i64) {
|
if let Ok(Some(alloc_time)) = event.compute_next_event(set_start_time as i64) {
|
||||||
if current_time >= alloc_time {
|
if current_time >= alloc_time {
|
||||||
create_new_set = Some(String::from("policy CreateAt event triggered"));
|
create_new_set = Some(String::from("policy CreateAt event triggered"));
|
||||||
|
@ -407,7 +407,7 @@ impl MediaPool {
|
||||||
|
|
||||||
for media_id in media_list {
|
for media_id in media_list {
|
||||||
|
|
||||||
let (status, location) = self.compute_media_state(&media_id);
|
let (status, location) = self.compute_media_state(media_id);
|
||||||
if media_id.media_set_label.is_some() { continue; } // should not happen
|
if media_id.media_set_label.is_some() { continue; } // should not happen
|
||||||
|
|
||||||
if !self.location_is_available(&location) {
|
if !self.location_is_available(&location) {
|
||||||
|
@ -478,7 +478,7 @@ impl MediaPool {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.media_is_expired(&media, current_time) {
|
if !self.media_is_expired(media, current_time) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ impl CatalogSet {
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove read-only version from set (in case it is there)
|
// remove read-only version from set (in case it is there)
|
||||||
self.media_set_catalog.remove_catalog(&new_catalog.uuid());
|
self.media_set_catalog.remove_catalog(new_catalog.uuid());
|
||||||
|
|
||||||
self.catalog = Some(new_catalog);
|
self.catalog = Some(new_catalog);
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,7 @@ impl PoolWriter {
|
||||||
|
|
||||||
/// Set media status to FULL (persistent - stores pool status)
|
/// Set media status to FULL (persistent - stores pool status)
|
||||||
pub fn set_media_status_full(&mut self, uuid: &Uuid) -> Result<(), Error> {
|
pub fn set_media_status_full(&mut self, uuid: &Uuid) -> Result<(), Error> {
|
||||||
self.pool.set_media_status_full(&uuid)?;
|
self.pool.set_media_status_full(uuid)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -556,7 +556,7 @@ fn write_chunk_archive<'a>(
|
||||||
|
|
||||||
//println!("CHUNK {} size {}", hex::encode(digest), blob.raw_size());
|
//println!("CHUNK {} size {}", hex::encode(digest), blob.raw_size());
|
||||||
|
|
||||||
match writer.try_write_chunk(&digest, &blob) {
|
match writer.try_write_chunk(digest, blob) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
chunk_list.push(*digest);
|
chunk_list.push(*digest);
|
||||||
chunk_iter.next(); // consume
|
chunk_iter.next(); // consume
|
||||||
|
@ -627,7 +627,7 @@ fn update_media_set_label(
|
||||||
if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
|
if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
|
||||||
bail!("detected changed encryption fingerprint - internal error");
|
bail!("detected changed encryption fingerprint - internal error");
|
||||||
}
|
}
|
||||||
media_catalog = MediaCatalog::open(status_path, &media_id, true, false)?;
|
media_catalog = MediaCatalog::open(status_path, media_id, true, false)?;
|
||||||
|
|
||||||
// todo: verify last content/media_catalog somehow?
|
// todo: verify last content/media_catalog somehow?
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ impl NewChunksIterator {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if catalog_set.lock().unwrap().contains_chunk(&datastore_name, &digest) {
|
if catalog_set.lock().unwrap().contains_chunk(datastore_name, &digest) {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ pub struct PkgState {
|
||||||
pub fn write_pkg_cache(state: &PkgState) -> Result<(), Error> {
|
pub fn write_pkg_cache(state: &PkgState) -> Result<(), Error> {
|
||||||
let serialized_state = serde_json::to_string(state)?;
|
let serialized_state = serde_json::to_string(state)?;
|
||||||
|
|
||||||
replace_file(APT_PKG_STATE_FN, &serialized_state.as_bytes(), CreateOptions::new(), false)
|
replace_file(APT_PKG_STATE_FN, serialized_state.as_bytes(), CreateOptions::new(), false)
|
||||||
.map_err(|err| format_err!("Error writing package cache - {}", err))?;
|
.map_err(|err| format_err!("Error writing package cache - {}", err))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ pub fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
|
||||||
drop(cache_iter);
|
drop(cache_iter);
|
||||||
// also loop through missing dependencies, as they would be installed
|
// also loop through missing dependencies, as they would be installed
|
||||||
for pkg in depends.iter() {
|
for pkg in depends.iter() {
|
||||||
let mut iter = cache.find_by_name(&pkg);
|
let mut iter = cache.find_by_name(pkg);
|
||||||
let view = match iter.next() {
|
let view = match iter.next() {
|
||||||
Some(view) => view,
|
Some(view) => view,
|
||||||
None => continue // package not found, ignore
|
None => continue // package not found, ignore
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue