typo fixes all over the place
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
1610c45a86
commit
add5861e8d
@ -107,7 +107,7 @@ async move {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||||
if !is_new { bail!("backup directorty already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||||
let mut env = BackupEnvironment::new(
|
let mut env = BackupEnvironment::new(
|
||||||
@ -151,7 +151,7 @@ async move {
|
|||||||
|
|
||||||
match (res, env.ensure_finished()) {
|
match (res, env.ensure_finished()) {
|
||||||
(Ok(_), Ok(())) => {
|
(Ok(_), Ok(())) => {
|
||||||
env.log("backup finished sucessfully");
|
env.log("backup finished successfully");
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Err(err), Ok(())) => {
|
(Err(err), Ok(())) => {
|
||||||
@ -378,7 +378,7 @@ fn dynamic_append (
|
|||||||
|
|
||||||
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
||||||
|
|
||||||
env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
@ -443,7 +443,7 @@ fn fixed_append (
|
|||||||
|
|
||||||
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
||||||
|
|
||||||
env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
@ -498,7 +498,7 @@ fn close_dynamic_index (
|
|||||||
|
|
||||||
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
|
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
|
||||||
|
|
||||||
env.log(format!("sucessfully closed dynamic index {}", wid));
|
env.log(format!("successfully closed dynamic index {}", wid));
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
@ -552,7 +552,7 @@ fn close_fixed_index (
|
|||||||
|
|
||||||
env.fixed_writer_close(wid, chunk_count, size, csum)?;
|
env.fixed_writer_close(wid, chunk_count, size, csum)?;
|
||||||
|
|
||||||
env.log(format!("sucessfully closed fixed index {}", wid));
|
env.log(format!("successfully closed fixed index {}", wid));
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
@ -566,7 +566,7 @@ fn finish_backup (
|
|||||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||||
|
|
||||||
env.finish_backup()?;
|
env.finish_backup()?;
|
||||||
env.log("sucessfully finished backup");
|
env.log("successfully finished backup");
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ struct FixedWriterState {
|
|||||||
struct SharedBackupState {
|
struct SharedBackupState {
|
||||||
finished: bool,
|
finished: bool,
|
||||||
uid_counter: usize,
|
uid_counter: usize,
|
||||||
file_counter: usize, // sucessfully uploaded files
|
file_counter: usize, // successfully uploaded files
|
||||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||||
known_chunks: HashMap<[u8;32], u32>,
|
known_chunks: HashMap<[u8;32], u32>,
|
||||||
|
@ -338,7 +338,7 @@ pub enum DeletableProperty {
|
|||||||
autostart,
|
autostart,
|
||||||
/// Delete bridge ports (set to 'none')
|
/// Delete bridge ports (set to 'none')
|
||||||
bridge_ports,
|
bridge_ports,
|
||||||
/// Delet bridge-vlan-aware flag
|
/// Delete bridge-vlan-aware flag
|
||||||
bridge_vlan_aware,
|
bridge_vlan_aware,
|
||||||
/// Delete bond-slaves (set to 'none')
|
/// Delete bond-slaves (set to 'none')
|
||||||
slaves,
|
slaves,
|
||||||
|
@ -256,7 +256,7 @@ fn stop_service(
|
|||||||
_param: Value,
|
_param: Value,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
log::info!("stoping service {}", service);
|
log::info!("stopping service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "stop")
|
run_service_command(&service, "stop")
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,7 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
Either::Right((Ok(res), _)) => Ok(res),
|
Either::Right((Ok(res), _)) => Ok(res),
|
||||||
Either::Right((Err(err), _)) => Err(err),
|
Either::Right((Err(err), _)) => Err(err),
|
||||||
})
|
})
|
||||||
.map_ok(move |_| env.log("reader finished sucessfully"))
|
.map_ok(move |_| env.log("reader finished successfully"))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
|
@ -822,7 +822,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
|||||||
|
|
||||||
for fingerprint in invalid_fingerprints.iter() {
|
for fingerprint in invalid_fingerprints.iter() {
|
||||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
||||||
bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint);
|
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -866,7 +866,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
|||||||
|
|
||||||
for name in invalid_user_ids.iter() {
|
for name in invalid_user_ids.iter() {
|
||||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
if let Ok(_) = parse_simple_value(name, &schema) {
|
||||||
bail!("test userid '{}' failed - got Ok() while expection an error.", name);
|
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,7 +311,7 @@ impl DataBlob {
|
|||||||
/// Verify digest and data length for unencrypted chunks.
|
/// Verify digest and data length for unencrypted chunks.
|
||||||
///
|
///
|
||||||
/// To do that, we need to decompress data first. Please note that
|
/// To do that, we need to decompress data first. Please note that
|
||||||
/// this is noth possible for encrypted chunks.
|
/// this is north possible for encrypted chunks.
|
||||||
pub fn verify_unencrypted(
|
pub fn verify_unencrypted(
|
||||||
&self,
|
&self,
|
||||||
expected_chunk_size: usize,
|
expected_chunk_size: usize,
|
||||||
|
@ -49,7 +49,7 @@ fn hello_command(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api(input: { properties: {} })]
|
#[api(input: { properties: {} })]
|
||||||
/// Quit command. Exit the programm.
|
/// Quit command. Exit the program.
|
||||||
///
|
///
|
||||||
/// Returns: nothing
|
/// Returns: nothing
|
||||||
fn quit_command() -> Result<(), Error> {
|
fn quit_command() -> Result<(), Error> {
|
||||||
|
@ -16,7 +16,7 @@ use std::io::Write;
|
|||||||
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
|
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
|
||||||
|
|
||||||
// # pxar create test.pxar ./dyntest1/
|
// # pxar create test.pxar ./dyntest1/
|
||||||
// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||||
|
|
||||||
fn create_large_file(path: PathBuf) {
|
fn create_large_file(path: PathBuf) {
|
||||||
|
|
||||||
|
@ -2000,7 +2000,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
|
|
||||||
if let Some(pipe) = pipe {
|
if let Some(pipe) = pipe {
|
||||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||||
// Finish creation of deamon by redirecting filedescriptors.
|
// Finish creation of daemon by redirecting filedescriptors.
|
||||||
let nullfd = nix::fcntl::open(
|
let nullfd = nix::fcntl::open(
|
||||||
"/dev/null",
|
"/dev/null",
|
||||||
nix::fcntl::OFlag::O_RDWR,
|
nix::fcntl::OFlag::O_RDWR,
|
||||||
|
@ -17,7 +17,7 @@ fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api]
|
#[api]
|
||||||
/// Diplay node certificate information.
|
/// Display node certificate information.
|
||||||
fn cert_info() -> Result<(), Error> {
|
fn cert_info() -> Result<(), Error> {
|
||||||
|
|
||||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||||
|
@ -138,7 +138,7 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download a .blob file
|
/// Download a .blob file
|
||||||
///
|
///
|
||||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The data is verified using
|
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
|
||||||
/// the provided manifest.
|
/// the provided manifest.
|
||||||
pub async fn download_blob(
|
pub async fn download_blob(
|
||||||
&self,
|
&self,
|
||||||
@ -164,7 +164,7 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download dynamic index file
|
/// Download dynamic index file
|
||||||
///
|
///
|
||||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||||
/// the provided manifest.
|
/// the provided manifest.
|
||||||
pub async fn download_dynamic_index(
|
pub async fn download_dynamic_index(
|
||||||
&self,
|
&self,
|
||||||
@ -192,7 +192,7 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download fixed index file
|
/// Download fixed index file
|
||||||
///
|
///
|
||||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||||
/// the provided manifest.
|
/// the provided manifest.
|
||||||
pub async fn download_fixed_index(
|
pub async fn download_fixed_index(
|
||||||
&self,
|
&self,
|
||||||
|
@ -343,7 +343,7 @@ impl HttpClient {
|
|||||||
|
|
||||||
/// Login
|
/// Login
|
||||||
///
|
///
|
||||||
/// Login is done on demand, so this is onyl required if you need
|
/// Login is done on demand, so this is only required if you need
|
||||||
/// access to authentication data in 'AuthInfo'.
|
/// access to authentication data in 'AuthInfo'.
|
||||||
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
||||||
self.auth.listen().await
|
self.auth.listen().await
|
||||||
|
@ -123,12 +123,12 @@ async fn try_client_log_download(
|
|||||||
.read(true)
|
.read(true)
|
||||||
.open(&tmp_path)?;
|
.open(&tmp_path)?;
|
||||||
|
|
||||||
// Note: be silent if there is no log - only log sucessful download
|
// Note: be silent if there is no log - only log successful download
|
||||||
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||||
}
|
}
|
||||||
worker.log(format!("got bakup log file {:?}", CLIENT_LOG_BLOB_NAME));
|
worker.log(format!("got backup log file {:?}", CLIENT_LOG_BLOB_NAME));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -149,7 +149,7 @@ impl Interface {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write attributes not dependening on address family
|
/// Write attributes not depending on address family
|
||||||
fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> {
|
fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> {
|
||||||
|
|
||||||
static EMPTY_LIST: Vec<String> = Vec::new();
|
static EMPTY_LIST: Vec<String> = Vec::new();
|
||||||
@ -187,7 +187,7 @@ impl Interface {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write attributes dependening on address family inet (IPv4)
|
/// Write attributes depending on address family inet (IPv4)
|
||||||
fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||||
if method == NetworkConfigMethod::Static {
|
if method == NetworkConfigMethod::Static {
|
||||||
if let Some(address) = &self.cidr {
|
if let Some(address) = &self.cidr {
|
||||||
@ -211,7 +211,7 @@ impl Interface {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write attributes dependening on address family inet6 (IPv6)
|
/// Write attributes depending on address family inet6 (IPv6)
|
||||||
fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||||
if method == NetworkConfigMethod::Static {
|
if method == NetworkConfigMethod::Static {
|
||||||
if let Some(address) = &self.cidr6 {
|
if let Some(address) = &self.cidr6 {
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
//! format used in the [casync](https://github.com/systemd/casync)
|
//! format used in the [casync](https://github.com/systemd/casync)
|
||||||
//! toolkit (we are not 100\% binary compatible). It is a file archive
|
//! toolkit (we are not 100\% binary compatible). It is a file archive
|
||||||
//! format defined by 'Lennart Poettering', specially defined for
|
//! format defined by 'Lennart Poettering', specially defined for
|
||||||
//! efficent deduplication.
|
//! efficient deduplication.
|
||||||
|
|
||||||
//! Every archive contains items in the following order:
|
//! Every archive contains items in the following order:
|
||||||
//! * `ENTRY` -- containing general stat() data and related bits
|
//! * `ENTRY` -- containing general stat() data and related bits
|
||||||
|
@ -61,7 +61,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This function calls the provided `copy_func()` with the permutaion
|
/// This function calls the provided `copy_func()` with the permutation
|
||||||
/// info.
|
/// info.
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
@ -71,7 +71,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
|||||||
/// });
|
/// });
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// This will produce the folowing output:
|
/// This will produce the following output:
|
||||||
///
|
///
|
||||||
/// ```no-compile
|
/// ```no-compile
|
||||||
/// Copy 3 to 0
|
/// Copy 3 to 0
|
||||||
@ -81,7 +81,7 @@ fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
|||||||
/// Copy 4 to 2
|
/// Copy 4 to 2
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// So this generates the following permuation: `[3,1,4,0,2]`.
|
/// So this generates the following permutation: `[3,1,4,0,2]`.
|
||||||
|
|
||||||
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
||||||
n: usize,
|
n: usize,
|
||||||
|
@ -1117,7 +1117,7 @@ impl<'a, W: Write, C: BackupCatalogWriter> Encoder<'a, W, C> {
|
|||||||
if pos != size {
|
if pos != size {
|
||||||
// Note:: casync format cannot handle that
|
// Note:: casync format cannot handle that
|
||||||
bail!(
|
bail!(
|
||||||
"detected shrinked file {:?} ({} < {})",
|
"detected shrunk file {:?} ({} < {})",
|
||||||
self.full_path(),
|
self.full_path(),
|
||||||
pos,
|
pos,
|
||||||
size
|
size
|
||||||
|
@ -29,7 +29,7 @@ pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b;
|
|||||||
/// Marks item as hardlink
|
/// Marks item as hardlink
|
||||||
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
||||||
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
||||||
/// Marks the beginnig of the payload (actual content) of regular files
|
/// Marks the beginning of the payload (actual content) of regular files
|
||||||
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
||||||
/// Marks item as entry of goodbye table
|
/// Marks item as entry of goodbye table
|
||||||
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
||||||
|
@ -124,7 +124,7 @@ impl MatchPattern {
|
|||||||
Ok(Some((match_pattern, content_buffer, stat)))
|
Ok(Some((match_pattern, content_buffer, stat)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Interprete a byte buffer as a sinlge line containing a valid
|
/// Interpret a byte buffer as a sinlge line containing a valid
|
||||||
/// `MatchPattern`.
|
/// `MatchPattern`.
|
||||||
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
||||||
/// Pattern starting with '!' are interpreted as negative match pattern.
|
/// Pattern starting with '!' are interpreted as negative match pattern.
|
||||||
|
@ -84,7 +84,7 @@ impl<R: Read> SequentialDecoder<R> {
|
|||||||
|
|
||||||
pub(crate) fn read_link(&mut self, size: u64) -> Result<PathBuf, Error> {
|
pub(crate) fn read_link(&mut self, size: u64) -> Result<PathBuf, Error> {
|
||||||
if size < (HEADER_SIZE + 2) {
|
if size < (HEADER_SIZE + 2) {
|
||||||
bail!("dectected short link target.");
|
bail!("detected short link target.");
|
||||||
}
|
}
|
||||||
let target_len = size - HEADER_SIZE;
|
let target_len = size - HEADER_SIZE;
|
||||||
|
|
||||||
@ -104,7 +104,7 @@ impl<R: Read> SequentialDecoder<R> {
|
|||||||
|
|
||||||
pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> {
|
pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> {
|
||||||
if size < (HEADER_SIZE + 8 + 2) {
|
if size < (HEADER_SIZE + 8 + 2) {
|
||||||
bail!("dectected short hardlink header.");
|
bail!("detected short hardlink header.");
|
||||||
}
|
}
|
||||||
let offset: u64 = self.read_item()?;
|
let offset: u64 = self.read_item()?;
|
||||||
let target = self.read_link(size - 8)?;
|
let target = self.read_link(size - 8)?;
|
||||||
@ -121,7 +121,7 @@ impl<R: Read> SequentialDecoder<R> {
|
|||||||
|
|
||||||
pub(crate) fn read_filename(&mut self, size: u64) -> Result<OsString, Error> {
|
pub(crate) fn read_filename(&mut self, size: u64) -> Result<OsString, Error> {
|
||||||
if size < (HEADER_SIZE + 2) {
|
if size < (HEADER_SIZE + 2) {
|
||||||
bail!("dectected short filename");
|
bail!("detected short filename");
|
||||||
}
|
}
|
||||||
let name_len = size - HEADER_SIZE;
|
let name_len = size - HEADER_SIZE;
|
||||||
|
|
||||||
|
@ -277,7 +277,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
|||||||
} else {
|
} else {
|
||||||
match state {
|
match state {
|
||||||
None => {
|
None => {
|
||||||
println!("Detected stoped UPID {}", upid_str);
|
println!("Detected stopped UPID {}", upid_str);
|
||||||
let status = upid_read_status(&upid)
|
let status = upid_read_status(&upid)
|
||||||
.unwrap_or_else(|_| String::from("unknown"));
|
.unwrap_or_else(|_| String::from("unknown"));
|
||||||
finish_list.push(TaskListInfo {
|
finish_list.push(TaskListInfo {
|
||||||
|
@ -127,7 +127,7 @@ pub fn lock_file<F: AsRawFd>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Open or create a lock file (append mode). Then try to
|
/// Open or create a lock file (append mode). Then try to
|
||||||
/// aquire a lock using `lock_file()`.
|
/// acquire a lock using `lock_file()`.
|
||||||
pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
|
pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
|
||||||
let path = path.as_ref();
|
let path = path.as_ref();
|
||||||
let mut file = match OpenOptions::new().create(true).append(true).open(path) {
|
let mut file = match OpenOptions::new().create(true).append(true).open(path) {
|
||||||
@ -136,7 +136,7 @@ pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<Fi
|
|||||||
};
|
};
|
||||||
match lock_file(&mut file, true, Some(timeout)) {
|
match lock_file(&mut file, true, Some(timeout)) {
|
||||||
Ok(_) => Ok(file),
|
Ok(_) => Ok(file),
|
||||||
Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err),
|
Err(err) => bail!("Unable to acquire lock {:?} - {}", path, err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,7 +441,7 @@ pub fn join(data: &Vec<String>, sep: char) -> String {
|
|||||||
|
|
||||||
/// Detect modified configuration files
|
/// Detect modified configuration files
|
||||||
///
|
///
|
||||||
/// This function fails with a resonable error message if checksums do not match.
|
/// This function fails with a reasonable error message if checksums do not match.
|
||||||
pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
|
pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
|
||||||
if digest1 != digest2 {
|
if digest1 != digest2 {
|
||||||
bail!("detected modified configuration - file changed by other user? Try again.");
|
bail!("detected modified configuration - file changed by other user? Try again.");
|
||||||
|
@ -149,14 +149,14 @@ fn test_broadcast_future() {
|
|||||||
.map_ok(|res| {
|
.map_ok(|res| {
|
||||||
CHECKSUM.fetch_add(res, Ordering::SeqCst);
|
CHECKSUM.fetch_add(res, Ordering::SeqCst);
|
||||||
})
|
})
|
||||||
.map_err(|err| { panic!("got errror {}", err); })
|
.map_err(|err| { panic!("got error {}", err); })
|
||||||
.map(|_| ());
|
.map(|_| ());
|
||||||
|
|
||||||
let receiver2 = sender.listen()
|
let receiver2 = sender.listen()
|
||||||
.map_ok(|res| {
|
.map_ok(|res| {
|
||||||
CHECKSUM.fetch_add(res*2, Ordering::SeqCst);
|
CHECKSUM.fetch_add(res*2, Ordering::SeqCst);
|
||||||
})
|
})
|
||||||
.map_err(|err| { panic!("got errror {}", err); })
|
.map_err(|err| { panic!("got error {}", err); })
|
||||||
.map(|_| ());
|
.map(|_| ());
|
||||||
|
|
||||||
let mut rt = tokio::runtime::Runtime::new().unwrap();
|
let mut rt = tokio::runtime::Runtime::new().unwrap();
|
||||||
|
@ -4,7 +4,7 @@ use std::io::Write;
|
|||||||
|
|
||||||
/// Log messages with timestamps into files
|
/// Log messages with timestamps into files
|
||||||
///
|
///
|
||||||
/// Logs messages to file, and optionaly to standart output.
|
/// Logs messages to file, and optionally to standard output.
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
/// #### Example:
|
/// #### Example:
|
||||||
|
@ -107,7 +107,7 @@ pub fn read_subdir<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> nix::Res
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
|
/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
|
||||||
/// results of `read_subdir`. Non-UTF8 comaptible file names are silently ignored.
|
/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored.
|
||||||
pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
|
pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
|
||||||
dirfd: RawFd,
|
dirfd: RawFd,
|
||||||
path: &P,
|
path: &P,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Inter-process reader-writer lock builder.
|
//! Inter-process reader-writer lock builder.
|
||||||
//!
|
//!
|
||||||
//! This implemenation uses fcntl record locks with non-blocking
|
//! This implementation uses fcntl record locks with non-blocking
|
||||||
//! F_SETLK command (never blocks).
|
//! F_SETLK command (never blocks).
|
||||||
//!
|
//!
|
||||||
//! We maintain a map of shared locks with time stamps, so you can get
|
//! We maintain a map of shared locks with time stamps, so you can get
|
||||||
@ -127,9 +127,9 @@ impl ProcessLocker {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try to aquire a shared lock
|
/// Try to acquire a shared lock
|
||||||
///
|
///
|
||||||
/// On sucess, this makes sure that no other process can get an exclusive lock for the file.
|
/// On success, this makes sure that no other process can get an exclusive lock for the file.
|
||||||
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
|
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
|
||||||
|
|
||||||
let mut data = locker.lock().unwrap();
|
let mut data = locker.lock().unwrap();
|
||||||
@ -168,7 +168,7 @@ impl ProcessLocker {
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try to aquire a exclusive lock
|
/// Try to acquire a exclusive lock
|
||||||
///
|
///
|
||||||
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
|
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
|
||||||
pub fn try_exclusive_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockExclusiveGuard, Error> {
|
pub fn try_exclusive_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockExclusiveGuard, Error> {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//! Generate and verify Authentification tickets
|
//! Generate and verify Authentication tickets
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use base64;
|
use base64;
|
||||||
|
@ -77,7 +77,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
} else if (btype === 'host') {
|
} else if (btype === 'host') {
|
||||||
cls = 'fa-building';
|
cls = 'fa-building';
|
||||||
} else {
|
} else {
|
||||||
console.warn(`got unkown backup-type '${btype}'`);
|
console.warn(`got unknown backup-type '${btype}'`);
|
||||||
continue; // FIXME: auto render? what do?
|
continue; // FIXME: auto render? what do?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user