replace Userid with Authid
in most generic places. this is accompanied by a change in RpcEnvironment to purposefully break existing call sites. Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
parent
e10c5c74f6
commit
e6dc35acb8
|
@ -31,7 +31,8 @@ fn authenticate_user(
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
if !user_info.is_active_user(&userid) {
|
let auth_id = Authid::from(userid.clone());
|
||||||
|
if !user_info.is_active_auth_id(&auth_id) {
|
||||||
bail!("user account disabled or expired.");
|
bail!("user account disabled or expired.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,8 +70,7 @@ fn authenticate_user(
|
||||||
path_vec.push(part);
|
path_vec.push(part);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
|
||||||
user_info.check_privs(userid, &path_vec, *privilege, false)?;
|
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,9 +213,10 @@ fn change_password(
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let current_user: Userid = rpcenv
|
let current_user: Userid = rpcenv
|
||||||
.get_user()
|
.get_auth_id()
|
||||||
.ok_or_else(|| format_err!("unknown user"))?
|
.ok_or_else(|| format_err!("unknown user"))?
|
||||||
.parse()?;
|
.parse()?;
|
||||||
|
let current_auth = Authid::from(current_user.clone());
|
||||||
|
|
||||||
let mut allowed = userid == current_user;
|
let mut allowed = userid == current_user;
|
||||||
|
|
||||||
|
@ -223,7 +224,7 @@ fn change_password(
|
||||||
|
|
||||||
if !allowed {
|
if !allowed {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let privs = user_info.lookup_privs(¤t_user, &[]);
|
let privs = user_info.lookup_privs(¤t_auth, &[]);
|
||||||
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
|
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -140,9 +140,9 @@ pub fn read_acl(
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: ACL_PROPAGATE_SCHEMA,
|
schema: ACL_PROPAGATE_SCHEMA,
|
||||||
},
|
},
|
||||||
userid: {
|
auth_id: {
|
||||||
optional: true,
|
optional: true,
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
group: {
|
group: {
|
||||||
optional: true,
|
optional: true,
|
||||||
|
@ -168,7 +168,7 @@ pub fn update_acl(
|
||||||
path: String,
|
path: String,
|
||||||
role: String,
|
role: String,
|
||||||
propagate: Option<bool>,
|
propagate: Option<bool>,
|
||||||
userid: Option<Userid>,
|
auth_id: Option<Authid>,
|
||||||
group: Option<String>,
|
group: Option<String>,
|
||||||
delete: Option<bool>,
|
delete: Option<bool>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
|
@ -190,11 +190,12 @@ pub fn update_acl(
|
||||||
|
|
||||||
if let Some(ref _group) = group {
|
if let Some(ref _group) = group {
|
||||||
bail!("parameter 'group' - groups are currently not supported.");
|
bail!("parameter 'group' - groups are currently not supported.");
|
||||||
} else if let Some(ref userid) = userid {
|
} else if let Some(ref auth_id) = auth_id {
|
||||||
if !delete { // Note: we allow to delete non-existent users
|
if !delete { // Note: we allow to delete non-existent users
|
||||||
let user_cfg = crate::config::user::cached_config()?;
|
let user_cfg = crate::config::user::cached_config()?;
|
||||||
if user_cfg.sections.get(&userid.to_string()).is_none() {
|
if user_cfg.sections.get(&auth_id.to_string()).is_none() {
|
||||||
bail!("no such user.");
|
bail!(format!("no such {}.",
|
||||||
|
if auth_id.is_token() { "API token" } else { "user" }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -205,11 +206,11 @@ pub fn update_acl(
|
||||||
acl::check_acl_path(&path)?;
|
acl::check_acl_path(&path)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(userid) = userid {
|
if let Some(auth_id) = auth_id {
|
||||||
if delete {
|
if delete {
|
||||||
tree.delete_user_role(&path, &userid, &role);
|
tree.delete_user_role(&path, &auth_id, &role);
|
||||||
} else {
|
} else {
|
||||||
tree.insert_user_role(&path, &userid, &role, propagate);
|
tree.insert_user_role(&path, &auth_id, &role, propagate);
|
||||||
}
|
}
|
||||||
} else if let Some(group) = group {
|
} else if let Some(group) = group {
|
||||||
if delete {
|
if delete {
|
||||||
|
|
|
@ -39,10 +39,13 @@ pub fn list_users(
|
||||||
|
|
||||||
let (config, digest) = user::config()?;
|
let (config, digest) = user::config()?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
// intentionally user only for now
|
||||||
|
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let auth_id = Authid::from(userid.clone());
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
|
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
|
||||||
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
let filter_by_privs = |user: &user::User| {
|
let filter_by_privs = |user: &user::User| {
|
||||||
|
|
|
@ -47,11 +47,11 @@ use crate::config::acl::{
|
||||||
fn check_backup_owner(
|
fn check_backup_owner(
|
||||||
store: &DataStore,
|
store: &DataStore,
|
||||||
group: &BackupGroup,
|
group: &BackupGroup,
|
||||||
userid: &Userid,
|
auth_id: &Authid,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let owner = store.get_owner(group)?;
|
let owner = store.get_owner(group)?;
|
||||||
if &owner != userid {
|
if &owner != auth_id {
|
||||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
bail!("backup owner check failed ({} != {})", auth_id, owner);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -149,9 +149,9 @@ fn list_groups(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<GroupListItem>, Error> {
|
) -> Result<Vec<GroupListItem>, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ fn list_groups(
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
let owner = datastore.get_owner(group)?;
|
let owner = datastore.get_owner(group)?;
|
||||||
if !list_all && owner != userid {
|
if !list_all && owner != auth_id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,16 +230,16 @@ pub fn list_snapshot_files(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<BackupContent>, Error> {
|
) -> Result<Vec<BackupContent>, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
|
||||||
|
|
||||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||||
|
|
||||||
|
@ -282,16 +282,16 @@ fn delete_snapshot(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
|
||||||
|
|
||||||
datastore.remove_backup_dir(&snapshot, false)?;
|
datastore.remove_backup_dir(&snapshot, false)?;
|
||||||
|
|
||||||
|
@ -338,9 +338,9 @@ pub fn list_snapshots (
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<SnapshotListItem>, Error> {
|
) -> Result<Vec<SnapshotListItem>, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
@ -362,7 +362,7 @@ pub fn list_snapshots (
|
||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
let owner = datastore.get_owner(group)?;
|
let owner = datastore.get_owner(group)?;
|
||||||
|
|
||||||
if !list_all && owner != userid {
|
if !list_all && owner != auth_id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,13 +570,13 @@ pub fn verify(
|
||||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||||
}
|
}
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(worker_id.clone()),
|
Some(worker_id.clone()),
|
||||||
userid,
|
auth_id,
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||||
|
@ -705,9 +705,9 @@ fn prune(
|
||||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
|
@ -716,7 +716,7 @@ fn prune(
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
|
if !allowed { check_backup_owner(&datastore, &group, &auth_id)?; }
|
||||||
|
|
||||||
let prune_options = PruneOptions {
|
let prune_options = PruneOptions {
|
||||||
keep_last: param["keep-last"].as_u64(),
|
keep_last: param["keep-last"].as_u64(),
|
||||||
|
@ -758,7 +758,7 @@ fn prune(
|
||||||
|
|
||||||
|
|
||||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||||
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
|
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
|
||||||
|
|
||||||
if keep_all {
|
if keep_all {
|
||||||
worker.log("No prune selection - keeping all files.");
|
worker.log("No prune selection - keeping all files.");
|
||||||
|
@ -833,6 +833,7 @@ fn start_garbage_collection(
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
println!("Starting garbage collection on store {}", store);
|
println!("Starting garbage collection on store {}", store);
|
||||||
|
|
||||||
|
@ -841,7 +842,7 @@ fn start_garbage_collection(
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"garbage_collection",
|
"garbage_collection",
|
||||||
Some(store.clone()),
|
Some(store.clone()),
|
||||||
Userid::root_userid().clone(),
|
auth_id.clone(),
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
worker.log(format!("starting garbage collection on store {}", store));
|
worker.log(format!("starting garbage collection on store {}", store));
|
||||||
|
@ -911,13 +912,13 @@ fn get_datastore_list(
|
||||||
|
|
||||||
let (config, _digest) = datastore::config()?;
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (store, (_, data)) in &config.sections {
|
for (store, (_, data)) in &config.sections {
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
if allowed {
|
if allowed {
|
||||||
let mut entry = json!({ "store": store });
|
let mut entry = json!({ "store": store });
|
||||||
|
@ -962,9 +963,9 @@ fn download_file(
|
||||||
let store = tools::required_string_param(¶m, "store")?;
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(store)?;
|
let datastore = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
|
@ -975,7 +976,7 @@ fn download_file(
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
|
||||||
|
|
||||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||||
|
|
||||||
|
@ -1035,9 +1036,9 @@ fn download_file_decoded(
|
||||||
let store = tools::required_string_param(¶m, "store")?;
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(store)?;
|
let datastore = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
|
@ -1048,7 +1049,7 @@ fn download_file_decoded(
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
|
||||||
|
|
||||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
for file in files {
|
for file in files {
|
||||||
|
@ -1160,8 +1161,8 @@ fn upload_backup_log(
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
|
check_backup_owner(&datastore, backup_dir.group(), &auth_id)?;
|
||||||
|
|
||||||
let mut path = datastore.base_path();
|
let mut path = datastore.base_path();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
|
@ -1230,14 +1231,14 @@ fn catalog(
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
|
||||||
|
|
||||||
let file_name = CATALOG_NAME;
|
let file_name = CATALOG_NAME;
|
||||||
|
|
||||||
|
@ -1401,9 +1402,9 @@ fn pxar_file_download(
|
||||||
let store = tools::required_string_param(¶m, "store")?;
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||||
|
|
||||||
|
@ -1414,7 +1415,7 @@ fn pxar_file_download(
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
|
||||||
|
|
||||||
let mut components = base64::decode(&filepath)?;
|
let mut components = base64::decode(&filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if components.len() > 0 && components[0] == '/' as u8 {
|
||||||
|
@ -1580,14 +1581,14 @@ fn get_notes(
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
|
||||||
|
|
||||||
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
|
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
|
||||||
|
|
||||||
|
@ -1633,14 +1634,14 @@ fn set_notes(
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
|
||||||
|
|
||||||
datastore.update_manifest(&backup_dir,|manifest| {
|
datastore.update_manifest(&backup_dir,|manifest| {
|
||||||
manifest.unprotected["notes"] = notes.into();
|
manifest.unprotected["notes"] = notes.into();
|
||||||
|
@ -1662,7 +1663,7 @@ fn set_notes(
|
||||||
schema: BACKUP_ID_SCHEMA,
|
schema: BACKUP_ID_SCHEMA,
|
||||||
},
|
},
|
||||||
"new-owner": {
|
"new-owner": {
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1675,7 +1676,7 @@ fn set_backup_owner(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: String,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
new_owner: Userid,
|
new_owner: Authid,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
@ -1685,8 +1686,14 @@ fn set_backup_owner(
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
if !user_info.is_active_user(&new_owner) {
|
if !user_info.is_active_auth_id(&new_owner) {
|
||||||
bail!("user '{}' is inactive or non-existent", new_owner);
|
bail!("{} '{}' is inactive or non-existent",
|
||||||
|
if new_owner.is_token() {
|
||||||
|
"API token".to_string()
|
||||||
|
} else {
|
||||||
|
"user".to_string()
|
||||||
|
},
|
||||||
|
new_owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
datastore.set_owner(&backup_group, &new_owner, true)?;
|
datastore.set_owner(&backup_group, &new_owner, true)?;
|
||||||
|
|
|
@ -101,11 +101,11 @@ fn run_sync_job(
|
||||||
let (config, _digest) = sync::config()?;
|
let (config, _digest) = sync::config()?;
|
||||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let job = Job::new("syncjob", &id)?;
|
let job = Job::new("syncjob", &id)?;
|
||||||
|
|
||||||
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
|
let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
|
||||||
|
|
||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,11 +101,11 @@ fn run_verification_job(
|
||||||
let (config, _digest) = verify::config()?;
|
let (config, _digest) = verify::config()?;
|
||||||
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
|
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let job = Job::new("verificationjob", &id)?;
|
let job = Job::new("verificationjob", &id)?;
|
||||||
|
|
||||||
let upid_str = do_verification_job(job, verification_job, &userid, None)?;
|
let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
|
||||||
|
|
||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,12 +59,12 @@ async move {
|
||||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||||
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
@ -105,12 +105,12 @@ async move {
|
||||||
};
|
};
|
||||||
|
|
||||||
// lock backup group to only allow one backup per group at a time
|
// lock backup group to only allow one backup per group at a time
|
||||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
|
||||||
|
|
||||||
// permission check
|
// permission check
|
||||||
if owner != userid && worker_type != "benchmark" {
|
if owner != auth_id && worker_type != "benchmark" {
|
||||||
// only the owner is allowed to create additional snapshots
|
// only the owner is allowed to create additional snapshots
|
||||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
bail!("backup owner check failed ({} != {})", auth_id, owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
let last_backup = {
|
let last_backup = {
|
||||||
|
@ -153,9 +153,9 @@ async move {
|
||||||
if !is_new { bail!("backup directory already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
|
|
||||||
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
|
WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
|
||||||
let mut env = BackupEnvironment::new(
|
let mut env = BackupEnvironment::new(
|
||||||
env_type, userid, worker.clone(), datastore, backup_dir);
|
env_type, auth_id, worker.clone(), datastore, backup_dir);
|
||||||
|
|
||||||
env.debug = debug;
|
env.debug = debug;
|
||||||
env.last_backup = last_backup;
|
env.last_backup = last_backup;
|
||||||
|
|
|
@ -10,7 +10,7 @@ use proxmox::tools::digest_to_hex;
|
||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||||
|
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Authid;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::server::formatter::*;
|
use crate::server::formatter::*;
|
||||||
|
@ -104,7 +104,7 @@ impl SharedBackupState {
|
||||||
pub struct BackupEnvironment {
|
pub struct BackupEnvironment {
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
result_attributes: Value,
|
result_attributes: Value,
|
||||||
user: Userid,
|
auth_id: Authid,
|
||||||
pub debug: bool,
|
pub debug: bool,
|
||||||
pub formatter: &'static OutputFormatter,
|
pub formatter: &'static OutputFormatter,
|
||||||
pub worker: Arc<WorkerTask>,
|
pub worker: Arc<WorkerTask>,
|
||||||
|
@ -117,7 +117,7 @@ pub struct BackupEnvironment {
|
||||||
impl BackupEnvironment {
|
impl BackupEnvironment {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
user: Userid,
|
auth_id: Authid,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<WorkerTask>,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: BackupDir,
|
backup_dir: BackupDir,
|
||||||
|
@ -137,7 +137,7 @@ impl BackupEnvironment {
|
||||||
Self {
|
Self {
|
||||||
result_attributes: json!({}),
|
result_attributes: json!({}),
|
||||||
env_type,
|
env_type,
|
||||||
user,
|
auth_id,
|
||||||
worker,
|
worker,
|
||||||
datastore,
|
datastore,
|
||||||
debug: false,
|
debug: false,
|
||||||
|
@ -518,7 +518,7 @@ impl BackupEnvironment {
|
||||||
WorkerTask::new_thread(
|
WorkerTask::new_thread(
|
||||||
"verify",
|
"verify",
|
||||||
Some(worker_id),
|
Some(worker_id),
|
||||||
self.user.clone(),
|
self.auth_id.clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
worker.log("Automatically verifying newly added snapshot");
|
worker.log("Automatically verifying newly added snapshot");
|
||||||
|
@ -599,12 +599,12 @@ impl RpcEnvironment for BackupEnvironment {
|
||||||
self.env_type
|
self.env_type
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_user(&mut self, _user: Option<String>) {
|
fn set_auth_id(&mut self, _auth_id: Option<String>) {
|
||||||
panic!("unable to change user");
|
panic!("unable to change auth_id");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_user(&self) -> Option<String> {
|
fn get_auth_id(&self) -> Option<String> {
|
||||||
Some(self.user.to_string())
|
Some(self.auth_id.to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,14 +35,14 @@ pub fn list_datastores(
|
||||||
|
|
||||||
let (config, digest) = datastore::config()?;
|
let (config, digest) = datastore::config()?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||||
let filter_by_privs = |store: &DataStoreConfig| {
|
let filter_by_privs = |store: &DataStoreConfig| {
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
|
||||||
(user_privs & PRIV_DATASTORE_AUDIT) != 0
|
(user_privs & PRIV_DATASTORE_AUDIT) != 0
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,7 @@ pub fn list_remotes(
|
||||||
default: 8007,
|
default: 8007,
|
||||||
},
|
},
|
||||||
userid: {
|
userid: {
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
||||||
|
@ -167,7 +167,7 @@ pub enum DeletableProperty {
|
||||||
},
|
},
|
||||||
userid: {
|
userid: {
|
||||||
optional: true,
|
optional: true,
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
optional: true,
|
optional: true,
|
||||||
|
|
|
@ -91,10 +91,12 @@ async fn termproxy(
|
||||||
cmd: Option<String>,
|
cmd: Option<String>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
// intentionally user only for now
|
||||||
let userid: Userid = rpcenv
|
let userid: Userid = rpcenv
|
||||||
.get_user()
|
.get_auth_id()
|
||||||
.ok_or_else(|| format_err!("unknown user"))?
|
.ok_or_else(|| format_err!("unknown user"))?
|
||||||
.parse()?;
|
.parse()?;
|
||||||
|
let auth_id = Authid::from(userid.clone());
|
||||||
|
|
||||||
if userid.realm() != "pam" {
|
if userid.realm() != "pam" {
|
||||||
bail!("only pam users can use the console");
|
bail!("only pam users can use the console");
|
||||||
|
@ -137,7 +139,7 @@ async fn termproxy(
|
||||||
let upid = WorkerTask::spawn(
|
let upid = WorkerTask::spawn(
|
||||||
"termproxy",
|
"termproxy",
|
||||||
None,
|
None,
|
||||||
userid,
|
auth_id,
|
||||||
false,
|
false,
|
||||||
move |worker| async move {
|
move |worker| async move {
|
||||||
// move inside the worker so that it survives and does not close the port
|
// move inside the worker so that it survives and does not close the port
|
||||||
|
@ -272,7 +274,8 @@ fn upgrade_to_websocket(
|
||||||
rpcenv: Box<dyn RpcEnvironment>,
|
rpcenv: Box<dyn RpcEnvironment>,
|
||||||
) -> ApiResponseFuture {
|
) -> ApiResponseFuture {
|
||||||
async move {
|
async move {
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
// intentionally user only for now
|
||||||
|
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
||||||
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ use crate::server::WorkerTask;
|
||||||
use crate::tools::http;
|
use crate::tools::http;
|
||||||
|
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
|
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
VERSION_EPOCH_REGEX = r"^\d+:";
|
VERSION_EPOCH_REGEX = r"^\d+:";
|
||||||
|
@ -351,11 +351,11 @@ pub fn apt_update_database(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
|
let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
|
||||||
if !quiet { worker.log("starting apt-get update") }
|
if !quiet { worker.log("starting apt-get update") }
|
||||||
|
|
||||||
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||||
|
|
|
@ -13,7 +13,7 @@ use crate::tools::disks::{
|
||||||
};
|
};
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
use crate::api2::types::{Authid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||||
|
|
||||||
pub mod directory;
|
pub mod directory;
|
||||||
pub mod zfs;
|
pub mod zfs;
|
||||||
|
@ -140,7 +140,7 @@ pub fn initialize_disk(
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let info = get_disk_usage_info(&disk, true)?;
|
let info = get_disk_usage_info(&disk, true)?;
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ pub fn initialize_disk(
|
||||||
}
|
}
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
|
"diskinit", Some(disk.clone()), auth_id, to_stdout, move |worker|
|
||||||
{
|
{
|
||||||
worker.log(format!("initialize disk {}", disk));
|
worker.log(format!("initialize disk {}", disk));
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,7 @@ pub fn create_datastore_disk(
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let info = get_disk_usage_info(&disk, true)?;
|
let info = get_disk_usage_info(&disk, true)?;
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ pub fn create_datastore_disk(
|
||||||
}
|
}
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"dircreate", Some(name.clone()), userid, to_stdout, move |worker|
|
"dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
|
||||||
{
|
{
|
||||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||||
|
|
||||||
|
|
|
@ -256,7 +256,7 @@ pub fn create_zpool(
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let add_datastore = add_datastore.unwrap_or(false);
|
let add_datastore = add_datastore.unwrap_or(false);
|
||||||
|
|
||||||
|
@ -316,7 +316,7 @@ pub fn create_zpool(
|
||||||
}
|
}
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
|
"zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
|
||||||
{
|
{
|
||||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||||
|
|
||||||
|
|
|
@ -684,9 +684,9 @@ pub async fn reload_network_config(
|
||||||
|
|
||||||
network::assert_ifupdown2_installed()?;
|
network::assert_ifupdown2_installed()?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
|
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id, true, |_worker| async {
|
||||||
|
|
||||||
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
||||||
|
|
||||||
|
|
|
@ -182,7 +182,7 @@ fn get_service_state(
|
||||||
Ok(json_service_state(&service, status))
|
Ok(json_service_state(&service, status))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
|
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
|
||||||
|
|
||||||
let workerid = format!("srv{}", &cmd);
|
let workerid = format!("srv{}", &cmd);
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value
|
||||||
let upid = WorkerTask::new_thread(
|
let upid = WorkerTask::new_thread(
|
||||||
&workerid,
|
&workerid,
|
||||||
Some(service.clone()),
|
Some(service.clone()),
|
||||||
userid,
|
auth_id,
|
||||||
false,
|
false,
|
||||||
move |_worker| {
|
move |_worker| {
|
||||||
|
|
||||||
|
@ -244,11 +244,11 @@ fn start_service(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("starting service {}", service);
|
log::info!("starting service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "start", userid)
|
run_service_command(&service, "start", auth_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -274,11 +274,11 @@ fn stop_service(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("stopping service {}", service);
|
log::info!("stopping service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "stop", userid)
|
run_service_command(&service, "stop", auth_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -304,15 +304,15 @@ fn restart_service(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("re-starting service {}", service);
|
log::info!("re-starting service {}", service);
|
||||||
|
|
||||||
if &service == "proxmox-backup-proxy" {
|
if &service == "proxmox-backup-proxy" {
|
||||||
// special case, avoid aborting running tasks
|
// special case, avoid aborting running tasks
|
||||||
run_service_command(&service, "reload", userid)
|
run_service_command(&service, "reload", auth_id)
|
||||||
} else {
|
} else {
|
||||||
run_service_command(&service, "restart", userid)
|
run_service_command(&service, "restart", auth_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,11 +339,11 @@ fn reload_service(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("reloading service {}", service);
|
log::info!("reloading service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "reload", userid)
|
run_service_command(&service, "reload", auth_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::tools;
|
||||||
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
|
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::api2::types::{NODE_SCHEMA, Userid};
|
use crate::api2::types::{NODE_SCHEMA, Authid};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
|
@ -100,9 +100,9 @@ fn get_subscription(
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &[]);
|
let user_privs = user_info.lookup_privs(&auth_id, &[]);
|
||||||
|
|
||||||
if (user_privs & PRIV_SYS_AUDIT) == 0 {
|
if (user_privs & PRIV_SYS_AUDIT) == 0 {
|
||||||
// not enough privileges for full state
|
// not enough privileges for full state
|
||||||
|
|
|
@ -84,11 +84,11 @@ async fn get_task_status(
|
||||||
|
|
||||||
let upid = extract_upid(¶m)?;
|
let upid = extract_upid(¶m)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
if userid != upid.userid {
|
if auth_id != upid.auth_id {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
|
@ -99,7 +99,7 @@ async fn get_task_status(
|
||||||
"starttime": upid.starttime,
|
"starttime": upid.starttime,
|
||||||
"type": upid.worker_type,
|
"type": upid.worker_type,
|
||||||
"id": upid.worker_id,
|
"id": upid.worker_id,
|
||||||
"user": upid.userid,
|
"user": upid.auth_id,
|
||||||
});
|
});
|
||||||
|
|
||||||
if crate::server::worker_is_active(&upid).await? {
|
if crate::server::worker_is_active(&upid).await? {
|
||||||
|
@ -161,11 +161,11 @@ async fn read_task_log(
|
||||||
|
|
||||||
let upid = extract_upid(¶m)?;
|
let upid = extract_upid(¶m)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
if userid != upid.userid {
|
if auth_id != upid.auth_id {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
||||||
|
@ -234,11 +234,11 @@ fn stop_task(
|
||||||
|
|
||||||
let upid = extract_upid(¶m)?;
|
let upid = extract_upid(¶m)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
if userid != upid.userid {
|
if auth_id != upid.auth_id {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
server::abort_worker_async(upid);
|
server::abort_worker_async(upid);
|
||||||
|
@ -308,9 +308,9 @@ pub fn list_tasks(
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<TaskListItem>, Error> {
|
) -> Result<Vec<TaskListItem>, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
|
@ -326,10 +326,10 @@ pub fn list_tasks(
|
||||||
Err(_) => return None,
|
Err(_) => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
if !list_all && info.upid.userid != userid { return None; }
|
if !list_all && info.upid.auth_id != auth_id { return None; }
|
||||||
|
|
||||||
if let Some(userid) = &userfilter {
|
if let Some(needle) = &userfilter {
|
||||||
if !info.upid.userid.as_str().contains(userid) { return None; }
|
if !info.upid.auth_id.to_string().contains(needle) { return None; }
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(store) = store {
|
if let Some(store) = store {
|
||||||
|
|
|
@ -20,7 +20,7 @@ use crate::config::{
|
||||||
|
|
||||||
|
|
||||||
pub fn check_pull_privs(
|
pub fn check_pull_privs(
|
||||||
userid: &Userid,
|
auth_id: &Authid,
|
||||||
store: &str,
|
store: &str,
|
||||||
remote: &str,
|
remote: &str,
|
||||||
remote_store: &str,
|
remote_store: &str,
|
||||||
|
@ -29,11 +29,11 @@ pub fn check_pull_privs(
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||||
user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||||
|
|
||||||
if delete {
|
if delete {
|
||||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -68,19 +68,19 @@ pub async fn get_pull_parameters(
|
||||||
pub fn do_sync_job(
|
pub fn do_sync_job(
|
||||||
mut job: Job,
|
mut job: Job,
|
||||||
sync_job: SyncJobConfig,
|
sync_job: SyncJobConfig,
|
||||||
userid: &Userid,
|
auth_id: &Authid,
|
||||||
schedule: Option<String>,
|
schedule: Option<String>,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let job_id = job.jobname().to_string();
|
let job_id = job.jobname().to_string();
|
||||||
let worker_type = job.jobtype().to_string();
|
let worker_type = job.jobtype().to_string();
|
||||||
|
|
||||||
let email = crate::server::lookup_user_email(userid);
|
let email = crate::server::lookup_user_email(auth_id.user());
|
||||||
|
|
||||||
let upid_str = WorkerTask::spawn(
|
let upid_str = WorkerTask::spawn(
|
||||||
&worker_type,
|
&worker_type,
|
||||||
Some(job.jobname().to_string()),
|
Some(job.jobname().to_string()),
|
||||||
userid.clone(),
|
auth_id.clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| async move {
|
move |worker| async move {
|
||||||
|
|
||||||
|
@ -101,7 +101,9 @@ pub fn do_sync_job(
|
||||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||||
sync_job.store, sync_job.remote, sync_job.remote_store));
|
sync_job.store, sync_job.remote, sync_job.remote_store));
|
||||||
|
|
||||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
|
let backup_auth_id = Authid::backup_auth_id();
|
||||||
|
|
||||||
|
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, backup_auth_id.clone()).await?;
|
||||||
|
|
||||||
worker.log(format!("sync job '{}' end", &job_id));
|
worker.log(format!("sync job '{}' end", &job_id));
|
||||||
|
|
||||||
|
@ -173,19 +175,19 @@ async fn pull (
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let delete = remove_vanished.unwrap_or(true);
|
let delete = remove_vanished.unwrap_or(true);
|
||||||
|
|
||||||
check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
|
check_pull_privs(&auth_id, &store, &remote, &remote_store, delete)?;
|
||||||
|
|
||||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||||
|
|
||||||
// fixme: set to_stdout to false?
|
// fixme: set to_stdout to false?
|
||||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
|
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.clone(), true, move |worker| async move {
|
||||||
|
|
||||||
worker.log(format!("sync datastore '{}' start", store));
|
worker.log(format!("sync datastore '{}' start", store));
|
||||||
|
|
||||||
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
|
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, auth_id);
|
||||||
let future = select!{
|
let future = select!{
|
||||||
success = pull_future.fuse() => success,
|
success = pull_future.fuse() => success,
|
||||||
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
|
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
|
||||||
|
|
|
@ -55,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
|
||||||
async move {
|
async move {
|
||||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
|
|
||||||
let priv_read = privs & PRIV_DATASTORE_READ != 0;
|
let priv_read = privs & PRIV_DATASTORE_READ != 0;
|
||||||
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
|
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
|
||||||
|
@ -94,7 +94,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||||
if !priv_read {
|
if !priv_read {
|
||||||
let owner = datastore.get_owner(backup_dir.group())?;
|
let owner = datastore.get_owner(backup_dir.group())?;
|
||||||
if owner != userid {
|
if owner != auth_id {
|
||||||
bail!("backup owner check failed!");
|
bail!("backup owner check failed!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,10 +110,10 @@ fn upgrade_to_backup_reader_protocol(
|
||||||
|
|
||||||
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||||
|
|
||||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| {
|
||||||
let mut env = ReaderEnvironment::new(
|
let mut env = ReaderEnvironment::new(
|
||||||
env_type,
|
env_type,
|
||||||
userid,
|
auth_id,
|
||||||
worker.clone(),
|
worker.clone(),
|
||||||
datastore,
|
datastore,
|
||||||
backup_dir,
|
backup_dir,
|
||||||
|
|
|
@ -5,7 +5,7 @@ use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||||
|
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Authid;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::server::formatter::*;
|
use crate::server::formatter::*;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
@ -17,7 +17,7 @@ use crate::server::WorkerTask;
|
||||||
pub struct ReaderEnvironment {
|
pub struct ReaderEnvironment {
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
result_attributes: Value,
|
result_attributes: Value,
|
||||||
user: Userid,
|
auth_id: Authid,
|
||||||
pub debug: bool,
|
pub debug: bool,
|
||||||
pub formatter: &'static OutputFormatter,
|
pub formatter: &'static OutputFormatter,
|
||||||
pub worker: Arc<WorkerTask>,
|
pub worker: Arc<WorkerTask>,
|
||||||
|
@ -29,7 +29,7 @@ pub struct ReaderEnvironment {
|
||||||
impl ReaderEnvironment {
|
impl ReaderEnvironment {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
user: Userid,
|
auth_id: Authid,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<WorkerTask>,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: BackupDir,
|
backup_dir: BackupDir,
|
||||||
|
@ -39,7 +39,7 @@ impl ReaderEnvironment {
|
||||||
Self {
|
Self {
|
||||||
result_attributes: json!({}),
|
result_attributes: json!({}),
|
||||||
env_type,
|
env_type,
|
||||||
user,
|
auth_id,
|
||||||
worker,
|
worker,
|
||||||
datastore,
|
datastore,
|
||||||
debug: false,
|
debug: false,
|
||||||
|
@ -82,12 +82,12 @@ impl RpcEnvironment for ReaderEnvironment {
|
||||||
self.env_type
|
self.env_type
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_user(&mut self, _user: Option<String>) {
|
fn set_auth_id(&mut self, _auth_id: Option<String>) {
|
||||||
panic!("unable to change user");
|
panic!("unable to change auth_id");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_user(&self) -> Option<String> {
|
fn get_auth_id(&self) -> Option<String> {
|
||||||
Some(self.user.to_string())
|
Some(self.auth_id.to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,9 +16,9 @@ use crate::api2::types::{
|
||||||
DATASTORE_SCHEMA,
|
DATASTORE_SCHEMA,
|
||||||
RRDMode,
|
RRDMode,
|
||||||
RRDTimeFrameResolution,
|
RRDTimeFrameResolution,
|
||||||
|
Authid,
|
||||||
TaskListItem,
|
TaskListItem,
|
||||||
TaskStateType,
|
TaskStateType,
|
||||||
Userid,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::server;
|
use crate::server;
|
||||||
|
@ -87,13 +87,13 @@ fn datastore_status(
|
||||||
|
|
||||||
let (config, _digest) = datastore::config()?;
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (store, (_, _)) in &config.sections {
|
for (store, (_, _)) in &config.sections {
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
if !allowed {
|
if !allowed {
|
||||||
continue;
|
continue;
|
||||||
|
@ -221,9 +221,9 @@ pub fn list_tasks(
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<TaskListItem>, Error> {
|
) -> Result<Vec<TaskListItem>, Error> {
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
let since = since.unwrap_or_else(|| 0);
|
let since = since.unwrap_or_else(|| 0);
|
||||||
|
@ -238,7 +238,7 @@ pub fn list_tasks(
|
||||||
.filter_map(|info| {
|
.filter_map(|info| {
|
||||||
match info {
|
match info {
|
||||||
Ok(info) => {
|
Ok(info) => {
|
||||||
if list_all || info.upid.userid == userid {
|
if list_all || info.upid.auth_id == auth_id {
|
||||||
if let Some(filter) = &typefilter {
|
if let Some(filter) = &typefilter {
|
||||||
if !info.upid.worker_type.contains(filter) {
|
if !info.upid.worker_type.contains(filter) {
|
||||||
return None;
|
return None;
|
||||||
|
|
|
@ -376,7 +376,7 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
owner: {
|
owner: {
|
||||||
type: Userid,
|
type: Authid,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -394,7 +394,7 @@ pub struct GroupListItem {
|
||||||
pub files: Vec<String>,
|
pub files: Vec<String>,
|
||||||
/// The owner of group
|
/// The owner of group
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub owner: Option<Userid>,
|
pub owner: Option<Authid>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
|
@ -452,7 +452,7 @@ pub struct SnapshotVerifyState {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
owner: {
|
owner: {
|
||||||
type: Userid,
|
type: Authid,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -477,7 +477,7 @@ pub struct SnapshotListItem {
|
||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
/// The owner of the snapshots group
|
/// The owner of the snapshots group
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub owner: Option<Userid>,
|
pub owner: Option<Authid>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -692,7 +692,7 @@ pub struct DataStoreStatus {
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
upid: { schema: UPID_SCHEMA },
|
upid: { schema: UPID_SCHEMA },
|
||||||
user: { type: Userid },
|
userid: { type: Authid },
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
|
@ -711,8 +711,8 @@ pub struct TaskListItem {
|
||||||
pub worker_type: String,
|
pub worker_type: String,
|
||||||
/// Worker ID (arbitrary ASCII string)
|
/// Worker ID (arbitrary ASCII string)
|
||||||
pub worker_id: Option<String>,
|
pub worker_id: Option<String>,
|
||||||
/// The user who started the task
|
/// The authenticated entity who started the task
|
||||||
pub user: Userid,
|
pub userid: Authid,
|
||||||
/// The task end time (Epoch)
|
/// The task end time (Epoch)
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub endtime: Option<i64>,
|
pub endtime: Option<i64>,
|
||||||
|
@ -735,7 +735,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||||
starttime: info.upid.starttime,
|
starttime: info.upid.starttime,
|
||||||
worker_type: info.upid.worker_type,
|
worker_type: info.upid.worker_type,
|
||||||
worker_id: info.upid.worker_id,
|
worker_id: info.upid.worker_id,
|
||||||
user: info.upid.userid,
|
userid: info.upid.auth_id,
|
||||||
endtime,
|
endtime,
|
||||||
status,
|
status,
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ use crate::task::TaskState;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::tools::format::HumanByte;
|
use crate::tools::format::HumanByte;
|
||||||
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||||
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
use crate::api2::types::{Authid, GarbageCollectionStatus};
|
||||||
use crate::server::UPID;
|
use crate::server::UPID;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
|
@ -276,8 +276,8 @@ impl DataStore {
|
||||||
|
|
||||||
/// Returns the backup owner.
|
/// Returns the backup owner.
|
||||||
///
|
///
|
||||||
/// The backup owner is the user who first created the backup group.
|
/// The backup owner is the entity who first created the backup group.
|
||||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
|
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_group.group_path());
|
full_path.push(backup_group.group_path());
|
||||||
full_path.push("owner");
|
full_path.push("owner");
|
||||||
|
@ -289,7 +289,7 @@ impl DataStore {
|
||||||
pub fn set_owner(
|
pub fn set_owner(
|
||||||
&self,
|
&self,
|
||||||
backup_group: &BackupGroup,
|
backup_group: &BackupGroup,
|
||||||
userid: &Userid,
|
auth_id: &Authid,
|
||||||
force: bool,
|
force: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut path = self.base_path();
|
let mut path = self.base_path();
|
||||||
|
@ -309,7 +309,7 @@ impl DataStore {
|
||||||
let mut file = open_options.open(&path)
|
let mut file = open_options.open(&path)
|
||||||
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
||||||
|
|
||||||
writeln!(file, "{}", userid)
|
writeln!(file, "{}", auth_id)
|
||||||
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -324,8 +324,8 @@ impl DataStore {
|
||||||
pub fn create_locked_backup_group(
|
pub fn create_locked_backup_group(
|
||||||
&self,
|
&self,
|
||||||
backup_group: &BackupGroup,
|
backup_group: &BackupGroup,
|
||||||
userid: &Userid,
|
auth_id: &Authid,
|
||||||
) -> Result<(Userid, DirLockGuard), Error> {
|
) -> Result<(Authid, DirLockGuard), Error> {
|
||||||
// create intermediate path first:
|
// create intermediate path first:
|
||||||
let base_path = self.base_path();
|
let base_path = self.base_path();
|
||||||
|
|
||||||
|
@ -339,7 +339,7 @@ impl DataStore {
|
||||||
match std::fs::create_dir(&full_path) {
|
match std::fs::create_dir(&full_path) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||||
self.set_owner(backup_group, userid, false)?;
|
self.set_owner(backup_group, auth_id, false)?;
|
||||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||||
Ok((owner, guard))
|
Ok((owner, guard))
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ use proxmox_backup::api2::types::*;
|
||||||
use proxmox_backup::api2::version;
|
use proxmox_backup::api2::version;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
use proxmox_backup::pxar::catalog::*;
|
use proxmox_backup::pxar::catalog::*;
|
||||||
use proxmox_backup::config::user::complete_user_name;
|
use proxmox_backup::config::user::complete_userid;
|
||||||
use proxmox_backup::backup::{
|
use proxmox_backup::backup::{
|
||||||
archive_type,
|
archive_type,
|
||||||
decrypt_key,
|
decrypt_key,
|
||||||
|
@ -425,7 +425,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||||
description: "Backup group.",
|
description: "Backup group.",
|
||||||
},
|
},
|
||||||
"new-owner": {
|
"new-owner": {
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2013,7 +2013,7 @@ fn main() {
|
||||||
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
|
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
|
||||||
.arg_param(&["group", "new-owner"])
|
.arg_param(&["group", "new-owner"])
|
||||||
.completion_cb("group", complete_backup_group)
|
.completion_cb("group", complete_backup_group)
|
||||||
.completion_cb("new-owner", complete_user_name)
|
.completion_cb("new-owner", complete_userid)
|
||||||
.completion_cb("repository", complete_repository);
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
|
|
|
@ -388,7 +388,7 @@ fn main() {
|
||||||
|
|
||||||
|
|
||||||
let mut rpcenv = CliEnvironment::new();
|
let mut rpcenv = CliEnvironment::new();
|
||||||
rpcenv.set_user(Some(String::from("root@pam")));
|
rpcenv.set_auth_id(Some(String::from("root@pam")));
|
||||||
|
|
||||||
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
|
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ use proxmox_backup::{
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Userid;
|
use proxmox_backup::api2::types::{Authid, Userid};
|
||||||
use proxmox_backup::configdir;
|
use proxmox_backup::configdir;
|
||||||
use proxmox_backup::buildcfg;
|
use proxmox_backup::buildcfg;
|
||||||
use proxmox_backup::server;
|
use proxmox_backup::server;
|
||||||
|
@ -334,7 +334,7 @@ async fn schedule_datastore_garbage_collection() {
|
||||||
if let Err(err) = WorkerTask::new_thread(
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(store.clone()),
|
Some(store.clone()),
|
||||||
Userid::backup_userid().clone(),
|
Authid::backup_auth_id().clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
@ -463,7 +463,7 @@ async fn schedule_datastore_prune() {
|
||||||
if let Err(err) = WorkerTask::new_thread(
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(store.clone()),
|
Some(store.clone()),
|
||||||
Userid::backup_userid().clone(),
|
Authid::backup_auth_id().clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
|
|
||||||
|
@ -579,9 +579,9 @@ async fn schedule_datastore_sync_jobs() {
|
||||||
Err(_) => continue, // could not get lock
|
Err(_) => continue, // could not get lock
|
||||||
};
|
};
|
||||||
|
|
||||||
let userid = Userid::backup_userid();
|
let auth_id = Authid::backup_auth_id();
|
||||||
|
|
||||||
if let Err(err) = do_sync_job(job, job_config, userid, Some(event_str)) {
|
if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
|
||||||
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
|
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -642,8 +642,8 @@ async fn schedule_datastore_verify_jobs() {
|
||||||
Ok(job) => job,
|
Ok(job) => job,
|
||||||
Err(_) => continue, // could not get lock
|
Err(_) => continue, // could not get lock
|
||||||
};
|
};
|
||||||
let userid = Userid::backup_userid().clone();
|
let auth_id = Authid::backup_auth_id();
|
||||||
if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
|
if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) {
|
||||||
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
|
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -704,7 +704,7 @@ async fn schedule_task_log_rotate() {
|
||||||
if let Err(err) = WorkerTask::new_thread(
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(job_id.to_string()),
|
Some(job_id.to_string()),
|
||||||
Userid::backup_userid().clone(),
|
Authid::backup_auth_id().clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
|
@ -60,7 +60,7 @@ pub fn acl_commands() -> CommandLineInterface {
|
||||||
"update",
|
"update",
|
||||||
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
||||||
.arg_param(&["path", "role"])
|
.arg_param(&["path", "role"])
|
||||||
.completion_cb("userid", config::user::complete_user_name)
|
.completion_cb("userid", config::user::complete_userid)
|
||||||
.completion_cb("path", config::datastore::complete_acl_path)
|
.completion_cb("path", config::datastore::complete_acl_path)
|
||||||
|
|
||||||
);
|
);
|
||||||
|
|
|
@ -62,13 +62,13 @@ pub fn user_commands() -> CommandLineInterface {
|
||||||
"update",
|
"update",
|
||||||
CliCommand::new(&api2::access::user::API_METHOD_UPDATE_USER)
|
CliCommand::new(&api2::access::user::API_METHOD_UPDATE_USER)
|
||||||
.arg_param(&["userid"])
|
.arg_param(&["userid"])
|
||||||
.completion_cb("userid", config::user::complete_user_name)
|
.completion_cb("userid", config::user::complete_userid)
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"remove",
|
"remove",
|
||||||
CliCommand::new(&api2::access::user::API_METHOD_DELETE_USER)
|
CliCommand::new(&api2::access::user::API_METHOD_DELETE_USER)
|
||||||
.arg_param(&["userid"])
|
.arg_param(&["userid"])
|
||||||
.completion_cb("userid", config::user::complete_user_name)
|
.completion_cb("userid", config::user::complete_userid)
|
||||||
);
|
);
|
||||||
|
|
||||||
cmd_def.into()
|
cmd_def.into()
|
||||||
|
|
|
@ -491,7 +491,7 @@ pub async fn pull_store(
|
||||||
src_repo: &BackupRepository,
|
src_repo: &BackupRepository,
|
||||||
tgt_store: Arc<DataStore>,
|
tgt_store: Arc<DataStore>,
|
||||||
delete: bool,
|
delete: bool,
|
||||||
userid: Userid,
|
auth_id: Authid,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
// explicit create shared lock to prevent GC on newly created chunks
|
// explicit create shared lock to prevent GC on newly created chunks
|
||||||
|
@ -524,11 +524,11 @@ pub async fn pull_store(
|
||||||
for (groups_done, item) in list.into_iter().enumerate() {
|
for (groups_done, item) in list.into_iter().enumerate() {
|
||||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||||
|
|
||||||
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
|
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &auth_id)?;
|
||||||
// permission check
|
// permission check
|
||||||
if userid != owner { // only the owner is allowed to create additional snapshots
|
if auth_id != owner { // only the owner is allowed to create additional snapshots
|
||||||
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
||||||
item.backup_type, item.backup_id, userid, owner));
|
item.backup_type, item.backup_id, auth_id, owner));
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
|
|
||||||
} else if let Err(err) = pull_group(
|
} else if let Err(err) = pull_group(
|
||||||
|
|
|
@ -15,7 +15,7 @@ use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||||
use proxmox::constnamedbitmap;
|
use proxmox::constnamedbitmap;
|
||||||
use proxmox::api::{api, schema::*};
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::{Authid,Userid};
|
||||||
|
|
||||||
// define Privilege bitfield
|
// define Privilege bitfield
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ pub struct AclTree {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct AclTreeNode {
|
pub struct AclTreeNode {
|
||||||
pub users: HashMap<Userid, HashMap<String, bool>>,
|
pub users: HashMap<Authid, HashMap<String, bool>>,
|
||||||
pub groups: HashMap<String, HashMap<String, bool>>,
|
pub groups: HashMap<String, HashMap<String, bool>>,
|
||||||
pub children: BTreeMap<String, AclTreeNode>,
|
pub children: BTreeMap<String, AclTreeNode>,
|
||||||
}
|
}
|
||||||
|
@ -246,21 +246,21 @@ impl AclTreeNode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
|
pub fn extract_roles(&self, auth_id: &Authid, all: bool) -> HashSet<String> {
|
||||||
let user_roles = self.extract_user_roles(user, all);
|
let user_roles = self.extract_user_roles(auth_id, all);
|
||||||
if !user_roles.is_empty() {
|
if !user_roles.is_empty() {
|
||||||
// user privs always override group privs
|
// user privs always override group privs
|
||||||
return user_roles
|
return user_roles
|
||||||
};
|
};
|
||||||
|
|
||||||
self.extract_group_roles(user, all)
|
self.extract_group_roles(auth_id.user(), all)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_user_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
|
pub fn extract_user_roles(&self, auth_id: &Authid, all: bool) -> HashSet<String> {
|
||||||
|
|
||||||
let mut set = HashSet::new();
|
let mut set = HashSet::new();
|
||||||
|
|
||||||
let roles = match self.users.get(user) {
|
let roles = match self.users.get(auth_id) {
|
||||||
Some(m) => m,
|
Some(m) => m,
|
||||||
None => return set,
|
None => return set,
|
||||||
};
|
};
|
||||||
|
@ -312,8 +312,8 @@ impl AclTreeNode {
|
||||||
roles.remove(role);
|
roles.remove(role);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_user_role(&mut self, userid: &Userid, role: &str) {
|
pub fn delete_user_role(&mut self, auth_id: &Authid, role: &str) {
|
||||||
let roles = match self.users.get_mut(userid) {
|
let roles = match self.users.get_mut(auth_id) {
|
||||||
Some(r) => r,
|
Some(r) => r,
|
||||||
None => return,
|
None => return,
|
||||||
};
|
};
|
||||||
|
@ -331,8 +331,8 @@ impl AclTreeNode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert_user_role(&mut self, user: Userid, role: String, propagate: bool) {
|
pub fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
|
||||||
let map = self.users.entry(user).or_insert_with(|| HashMap::new());
|
let map = self.users.entry(auth_id).or_insert_with(|| HashMap::new());
|
||||||
if role == ROLE_NAME_NO_ACCESS {
|
if role == ROLE_NAME_NO_ACCESS {
|
||||||
map.clear();
|
map.clear();
|
||||||
map.insert(role, propagate);
|
map.insert(role, propagate);
|
||||||
|
@ -383,13 +383,13 @@ impl AclTree {
|
||||||
node.delete_group_role(group, role);
|
node.delete_group_role(group, role);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_user_role(&mut self, path: &str, userid: &Userid, role: &str) {
|
pub fn delete_user_role(&mut self, path: &str, auth_id: &Authid, role: &str) {
|
||||||
let path = split_acl_path(path);
|
let path = split_acl_path(path);
|
||||||
let node = match self.get_node(&path) {
|
let node = match self.get_node(&path) {
|
||||||
Some(n) => n,
|
Some(n) => n,
|
||||||
None => return,
|
None => return,
|
||||||
};
|
};
|
||||||
node.delete_user_role(userid, role);
|
node.delete_user_role(auth_id, role);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert_group_role(&mut self, path: &str, group: &str, role: &str, propagate: bool) {
|
pub fn insert_group_role(&mut self, path: &str, group: &str, role: &str, propagate: bool) {
|
||||||
|
@ -398,10 +398,10 @@ impl AclTree {
|
||||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert_user_role(&mut self, path: &str, user: &Userid, role: &str, propagate: bool) {
|
pub fn insert_user_role(&mut self, path: &str, auth_id: &Authid, role: &str, propagate: bool) {
|
||||||
let path = split_acl_path(path);
|
let path = split_acl_path(path);
|
||||||
let node = self.get_or_insert_node(&path);
|
let node = self.get_or_insert_node(&path);
|
||||||
node.insert_user_role(user.to_owned(), role.to_string(), propagate);
|
node.insert_user_role(auth_id.to_owned(), role.to_string(), propagate);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_node_config(
|
fn write_node_config(
|
||||||
|
@ -413,18 +413,18 @@ impl AclTree {
|
||||||
let mut role_ug_map0 = HashMap::new();
|
let mut role_ug_map0 = HashMap::new();
|
||||||
let mut role_ug_map1 = HashMap::new();
|
let mut role_ug_map1 = HashMap::new();
|
||||||
|
|
||||||
for (user, roles) in &node.users {
|
for (auth_id, roles) in &node.users {
|
||||||
// no need to save, because root is always 'Administrator'
|
// no need to save, because root is always 'Administrator'
|
||||||
if user == "root@pam" { continue; }
|
if !auth_id.is_token() && auth_id.user() == "root@pam" { continue; }
|
||||||
for (role, propagate) in roles {
|
for (role, propagate) in roles {
|
||||||
let role = role.as_str();
|
let role = role.as_str();
|
||||||
let user = user.to_string();
|
let auth_id = auth_id.to_string();
|
||||||
if *propagate {
|
if *propagate {
|
||||||
role_ug_map1.entry(role).or_insert_with(|| BTreeSet::new())
|
role_ug_map1.entry(role).or_insert_with(|| BTreeSet::new())
|
||||||
.insert(user);
|
.insert(auth_id);
|
||||||
} else {
|
} else {
|
||||||
role_ug_map0.entry(role).or_insert_with(|| BTreeSet::new())
|
role_ug_map0.entry(role).or_insert_with(|| BTreeSet::new())
|
||||||
.insert(user);
|
.insert(auth_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -576,10 +576,10 @@ impl AclTree {
|
||||||
Ok(tree)
|
Ok(tree)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn roles(&self, userid: &Userid, path: &[&str]) -> HashSet<String> {
|
pub fn roles(&self, auth_id: &Authid, path: &[&str]) -> HashSet<String> {
|
||||||
|
|
||||||
let mut node = &self.root;
|
let mut node = &self.root;
|
||||||
let mut role_set = node.extract_roles(userid, path.is_empty());
|
let mut role_set = node.extract_roles(auth_id, path.is_empty());
|
||||||
|
|
||||||
for (pos, comp) in path.iter().enumerate() {
|
for (pos, comp) in path.iter().enumerate() {
|
||||||
let last_comp = (pos + 1) == path.len();
|
let last_comp = (pos + 1) == path.len();
|
||||||
|
@ -587,7 +587,7 @@ impl AclTree {
|
||||||
Some(n) => n,
|
Some(n) => n,
|
||||||
None => return role_set, // path not found
|
None => return role_set, // path not found
|
||||||
};
|
};
|
||||||
let new_set = node.extract_roles(userid, last_comp);
|
let new_set = node.extract_roles(auth_id, last_comp);
|
||||||
if !new_set.is_empty() {
|
if !new_set.is_empty() {
|
||||||
// overwrite previous settings
|
// overwrite previous settings
|
||||||
role_set = new_set;
|
role_set = new_set;
|
||||||
|
@ -675,22 +675,22 @@ mod test {
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use super::AclTree;
|
use super::AclTree;
|
||||||
|
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Authid;
|
||||||
|
|
||||||
fn check_roles(
|
fn check_roles(
|
||||||
tree: &AclTree,
|
tree: &AclTree,
|
||||||
user: &Userid,
|
auth_id: &Authid,
|
||||||
path: &str,
|
path: &str,
|
||||||
expected_roles: &str,
|
expected_roles: &str,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
let path_vec = super::split_acl_path(path);
|
let path_vec = super::split_acl_path(path);
|
||||||
let mut roles = tree.roles(user, &path_vec)
|
let mut roles = tree.roles(auth_id, &path_vec)
|
||||||
.iter().map(|v| v.clone()).collect::<Vec<String>>();
|
.iter().map(|v| v.clone()).collect::<Vec<String>>();
|
||||||
roles.sort();
|
roles.sort();
|
||||||
let roles = roles.join(",");
|
let roles = roles.join(",");
|
||||||
|
|
||||||
assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", user, path);
|
assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", auth_id, path);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -721,13 +721,13 @@ acl:1:/storage:user1@pbs:Admin
|
||||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||||
acl:1:/storage/store2:user2@pbs:DatastoreBackup
|
acl:1:/storage/store2:user2@pbs:DatastoreBackup
|
||||||
"###)?;
|
"###)?;
|
||||||
let user1: Userid = "user1@pbs".parse()?;
|
let user1: Authid = "user1@pbs".parse()?;
|
||||||
check_roles(&tree, &user1, "/", "");
|
check_roles(&tree, &user1, "/", "");
|
||||||
check_roles(&tree, &user1, "/storage", "Admin");
|
check_roles(&tree, &user1, "/storage", "Admin");
|
||||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||||
check_roles(&tree, &user1, "/storage/store2", "Admin");
|
check_roles(&tree, &user1, "/storage/store2", "Admin");
|
||||||
|
|
||||||
let user2: Userid = "user2@pbs".parse()?;
|
let user2: Authid = "user2@pbs".parse()?;
|
||||||
check_roles(&tree, &user2, "/", "");
|
check_roles(&tree, &user2, "/", "");
|
||||||
check_roles(&tree, &user2, "/storage", "");
|
check_roles(&tree, &user2, "/storage", "");
|
||||||
check_roles(&tree, &user2, "/storage/store1", "");
|
check_roles(&tree, &user2, "/storage/store1", "");
|
||||||
|
@ -744,7 +744,7 @@ acl:1:/:user1@pbs:Admin
|
||||||
acl:1:/storage:user1@pbs:NoAccess
|
acl:1:/storage:user1@pbs:NoAccess
|
||||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||||
"###)?;
|
"###)?;
|
||||||
let user1: Userid = "user1@pbs".parse()?;
|
let user1: Authid = "user1@pbs".parse()?;
|
||||||
check_roles(&tree, &user1, "/", "Admin");
|
check_roles(&tree, &user1, "/", "Admin");
|
||||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||||
|
@ -770,7 +770,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||||
|
|
||||||
let mut tree = AclTree::new();
|
let mut tree = AclTree::new();
|
||||||
|
|
||||||
let user1: Userid = "user1@pbs".parse()?;
|
let user1: Authid = "user1@pbs".parse()?;
|
||||||
|
|
||||||
tree.insert_user_role("/", &user1, "Admin", true);
|
tree.insert_user_role("/", &user1, "Admin", true);
|
||||||
tree.insert_user_role("/", &user1, "Audit", true);
|
tree.insert_user_role("/", &user1, "Audit", true);
|
||||||
|
@ -794,7 +794,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||||
|
|
||||||
let mut tree = AclTree::new();
|
let mut tree = AclTree::new();
|
||||||
|
|
||||||
let user1: Userid = "user1@pbs".parse()?;
|
let user1: Authid = "user1@pbs".parse()?;
|
||||||
|
|
||||||
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,10 @@ use lazy_static::lazy_static;
|
||||||
use proxmox::api::UserInformation;
|
use proxmox::api::UserInformation;
|
||||||
|
|
||||||
use super::acl::{AclTree, ROLE_NAMES, ROLE_ADMIN};
|
use super::acl::{AclTree, ROLE_NAMES, ROLE_ADMIN};
|
||||||
use super::user::User;
|
use super::user::{ApiToken, User};
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::{Authid, Userid};
|
||||||
|
|
||||||
/// Cache User/Group/Acl configuration data for fast permission tests
|
/// Cache User/Group/Token/Acl configuration data for fast permission tests
|
||||||
pub struct CachedUserInfo {
|
pub struct CachedUserInfo {
|
||||||
user_cfg: Arc<SectionConfigData>,
|
user_cfg: Arc<SectionConfigData>,
|
||||||
acl_tree: Arc<AclTree>,
|
acl_tree: Arc<AclTree>,
|
||||||
|
@ -57,8 +57,10 @@ impl CachedUserInfo {
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test if a user account is enabled and not expired
|
/// Test if a authentication id is enabled and not expired
|
||||||
pub fn is_active_user(&self, userid: &Userid) -> bool {
|
pub fn is_active_auth_id(&self, auth_id: &Authid) -> bool {
|
||||||
|
let userid = auth_id.user();
|
||||||
|
|
||||||
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
|
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
|
||||||
if !info.enable.unwrap_or(true) {
|
if !info.enable.unwrap_or(true) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -68,24 +70,41 @@ impl CachedUserInfo {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if auth_id.is_token() {
|
||||||
|
if let Ok(info) = self.user_cfg.lookup::<ApiToken>("token", &auth_id.to_string()) {
|
||||||
|
if !info.enable.unwrap_or(true) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if let Some(expire) = info.expire {
|
||||||
|
if expire > 0 && expire <= now() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_privs(
|
pub fn check_privs(
|
||||||
&self,
|
&self,
|
||||||
userid: &Userid,
|
auth_id: &Authid,
|
||||||
path: &[&str],
|
path: &[&str],
|
||||||
required_privs: u64,
|
required_privs: u64,
|
||||||
partial: bool,
|
partial: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let user_privs = self.lookup_privs(&userid, path);
|
let privs = self.lookup_privs(&auth_id, path);
|
||||||
let allowed = if partial {
|
let allowed = if partial {
|
||||||
(user_privs & required_privs) != 0
|
(privs & required_privs) != 0
|
||||||
} else {
|
} else {
|
||||||
(user_privs & required_privs) == required_privs
|
(privs & required_privs) == required_privs
|
||||||
};
|
};
|
||||||
if !allowed {
|
if !allowed {
|
||||||
// printing the path doesn't leaks any information as long as we
|
// printing the path doesn't leaks any information as long as we
|
||||||
|
@ -95,27 +114,33 @@ impl CachedUserInfo {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_superuser(&self, userid: &Userid) -> bool {
|
pub fn is_superuser(&self, auth_id: &Authid) -> bool {
|
||||||
userid == "root@pam"
|
!auth_id.is_token() && auth_id.user() == "root@pam"
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_group_member(&self, _userid: &Userid, _group: &str) -> bool {
|
pub fn is_group_member(&self, _userid: &Userid, _group: &str) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn lookup_privs(&self, userid: &Userid, path: &[&str]) -> u64 {
|
pub fn lookup_privs(&self, auth_id: &Authid, path: &[&str]) -> u64 {
|
||||||
|
if self.is_superuser(auth_id) {
|
||||||
if self.is_superuser(userid) {
|
|
||||||
return ROLE_ADMIN;
|
return ROLE_ADMIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
let roles = self.acl_tree.roles(userid, path);
|
let roles = self.acl_tree.roles(auth_id, path);
|
||||||
let mut privs: u64 = 0;
|
let mut privs: u64 = 0;
|
||||||
for role in roles {
|
for role in roles {
|
||||||
if let Some((role_privs, _)) = ROLE_NAMES.get(role.as_str()) {
|
if let Some((role_privs, _)) = ROLE_NAMES.get(role.as_str()) {
|
||||||
privs |= role_privs;
|
privs |= role_privs;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if auth_id.is_token() {
|
||||||
|
// limit privs to that of owning user
|
||||||
|
let user_auth_id = Authid::from(auth_id.user().clone());
|
||||||
|
privs &= self.lookup_privs(&user_auth_id, path);
|
||||||
|
}
|
||||||
|
|
||||||
privs
|
privs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,9 +154,9 @@ impl UserInformation for CachedUserInfo {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lookup_privs(&self, userid: &str, path: &[&str]) -> u64 {
|
fn lookup_privs(&self, auth_id: &str, path: &[&str]) -> u64 {
|
||||||
match userid.parse::<Userid>() {
|
match auth_id.parse::<Authid>() {
|
||||||
Ok(userid) => Self::lookup_privs(self, &userid, path),
|
Ok(auth_id) => Self::lookup_privs(self, &auth_id, path),
|
||||||
Err(_) => 0,
|
Err(_) => 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth t
|
||||||
type: u16,
|
type: u16,
|
||||||
},
|
},
|
||||||
userid: {
|
userid: {
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
schema: REMOTE_PASSWORD_SCHEMA,
|
schema: REMOTE_PASSWORD_SCHEMA,
|
||||||
|
|
|
@ -52,6 +52,36 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
tokenid: {
|
||||||
|
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// ApiToken properties.
|
||||||
|
pub struct ApiToken {
|
||||||
|
pub tokenid: Authid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
|
@ -103,15 +133,21 @@ pub struct User {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init() -> SectionConfig {
|
fn init() -> SectionConfig {
|
||||||
let obj_schema = match User::API_SCHEMA {
|
let mut config = SectionConfig::new(&Authid::API_SCHEMA);
|
||||||
Schema::Object(ref obj_schema) => obj_schema,
|
|
||||||
|
let user_schema = match User::API_SCHEMA {
|
||||||
|
Schema::Object(ref user_schema) => user_schema,
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
let user_plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), user_schema);
|
||||||
|
config.register_plugin(user_plugin);
|
||||||
|
|
||||||
let plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), obj_schema);
|
let token_schema = match ApiToken::API_SCHEMA {
|
||||||
let mut config = SectionConfig::new(&Userid::API_SCHEMA);
|
Schema::Object(ref token_schema) => token_schema,
|
||||||
|
_ => unreachable!(),
|
||||||
config.register_plugin(plugin);
|
};
|
||||||
|
let token_plugin = SectionConfigPlugin::new("token".to_string(), Some("tokenid".to_string()), token_schema);
|
||||||
|
config.register_plugin(token_plugin);
|
||||||
|
|
||||||
config
|
config
|
||||||
}
|
}
|
||||||
|
@ -206,9 +242,26 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// shell completion helper
|
// shell completion helper
|
||||||
pub fn complete_user_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_userid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
match config() {
|
match config() {
|
||||||
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
Ok((data, _digest)) => {
|
||||||
|
data.sections.iter()
|
||||||
|
.filter_map(|(id, (section_type, _))| {
|
||||||
|
if section_type == "user" {
|
||||||
|
Some(id.to_string())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}).collect()
|
||||||
|
},
|
||||||
Err(_) => return vec![],
|
Err(_) => return vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shell completion helper
|
||||||
|
pub fn complete_authid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
match config() {
|
||||||
|
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||||
|
Err(_) => vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||||
pub struct RestEnvironment {
|
pub struct RestEnvironment {
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
result_attributes: Value,
|
result_attributes: Value,
|
||||||
user: Option<String>,
|
auth_id: Option<String>,
|
||||||
client_ip: Option<std::net::SocketAddr>,
|
client_ip: Option<std::net::SocketAddr>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ impl RestEnvironment {
|
||||||
pub fn new(env_type: RpcEnvironmentType) -> Self {
|
pub fn new(env_type: RpcEnvironmentType) -> Self {
|
||||||
Self {
|
Self {
|
||||||
result_attributes: json!({}),
|
result_attributes: json!({}),
|
||||||
user: None,
|
auth_id: None,
|
||||||
client_ip: None,
|
client_ip: None,
|
||||||
env_type,
|
env_type,
|
||||||
}
|
}
|
||||||
|
@ -35,12 +35,12 @@ impl RpcEnvironment for RestEnvironment {
|
||||||
self.env_type
|
self.env_type
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_user(&mut self, user: Option<String>) {
|
fn set_auth_id(&mut self, auth_id: Option<String>) {
|
||||||
self.user = user;
|
self.auth_id = auth_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_user(&self) -> Option<String> {
|
fn get_auth_id(&self) -> Option<String> {
|
||||||
self.user.clone()
|
self.auth_id.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_client_ip(&mut self, client_ip: Option<std::net::SocketAddr>) {
|
fn set_client_ip(&mut self, client_ip: Option<std::net::SocketAddr>) {
|
||||||
|
|
|
@ -42,7 +42,7 @@ use super::formatter::*;
|
||||||
use super::ApiConfig;
|
use super::ApiConfig;
|
||||||
|
|
||||||
use crate::auth_helpers::*;
|
use crate::auth_helpers::*;
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::{Authid, Userid};
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::tools::FileLogger;
|
use crate::tools::FileLogger;
|
||||||
use crate::tools::ticket::Ticket;
|
use crate::tools::ticket::Ticket;
|
||||||
|
@ -138,9 +138,9 @@ fn log_response(
|
||||||
log::error!("{} {}: {} {}: [client {}] {}", method.as_str(), path, status.as_str(), reason, peer, message);
|
log::error!("{} {}: {} {}: [client {}] {}", method.as_str(), path, status.as_str(), reason, peer, message);
|
||||||
}
|
}
|
||||||
if let Some(logfile) = logfile {
|
if let Some(logfile) = logfile {
|
||||||
let user = match resp.extensions().get::<Userid>() {
|
let auth_id = match resp.extensions().get::<Authid>() {
|
||||||
Some(userid) => userid.as_str(),
|
Some(auth_id) => auth_id.to_string(),
|
||||||
None => "-",
|
None => "-".to_string(),
|
||||||
};
|
};
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
// time format which apache/nginx use (by default), copied from pve-http-server
|
// time format which apache/nginx use (by default), copied from pve-http-server
|
||||||
|
@ -153,7 +153,7 @@ fn log_response(
|
||||||
.log(format!(
|
.log(format!(
|
||||||
"{} - {} [{}] \"{} {}\" {} {} {}",
|
"{} - {} [{}] \"{} {}\" {} {} {}",
|
||||||
peer.ip(),
|
peer.ip(),
|
||||||
user,
|
auth_id,
|
||||||
datetime,
|
datetime,
|
||||||
method.as_str(),
|
method.as_str(),
|
||||||
path,
|
path,
|
||||||
|
@ -441,7 +441,7 @@ fn get_index(
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
if let Some(userid) = userid {
|
if let Some(userid) = userid {
|
||||||
resp.extensions_mut().insert(userid);
|
resp.extensions_mut().insert(Authid::from((userid, None)));
|
||||||
}
|
}
|
||||||
|
|
||||||
resp
|
resp
|
||||||
|
@ -555,14 +555,15 @@ fn check_auth(
|
||||||
ticket: &Option<String>,
|
ticket: &Option<String>,
|
||||||
csrf_token: &Option<String>,
|
csrf_token: &Option<String>,
|
||||||
user_info: &CachedUserInfo,
|
user_info: &CachedUserInfo,
|
||||||
) -> Result<Userid, Error> {
|
) -> Result<Authid, Error> {
|
||||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||||
|
|
||||||
let ticket = ticket.as_ref().map(String::as_str);
|
let ticket = ticket.as_ref().map(String::as_str);
|
||||||
let userid: Userid = Ticket::parse(&ticket.ok_or_else(|| format_err!("missing ticket"))?)?
|
let userid: Userid = Ticket::parse(&ticket.ok_or_else(|| format_err!("missing ticket"))?)?
|
||||||
.verify_with_time_frame(public_auth_key(), "PBS", None, -300..ticket_lifetime)?;
|
.verify_with_time_frame(public_auth_key(), "PBS", None, -300..ticket_lifetime)?;
|
||||||
|
|
||||||
if !user_info.is_active_user(&userid) {
|
let auth_id = Authid::from(userid.clone());
|
||||||
|
if !user_info.is_active_auth_id(&auth_id) {
|
||||||
bail!("user account disabled or expired.");
|
bail!("user account disabled or expired.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -574,7 +575,7 @@ fn check_auth(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(userid)
|
Ok(Authid::from(userid))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_request(
|
async fn handle_request(
|
||||||
|
@ -632,7 +633,7 @@ async fn handle_request(
|
||||||
if auth_required {
|
if auth_required {
|
||||||
let (ticket, csrf_token, _) = extract_auth_data(&parts.headers);
|
let (ticket, csrf_token, _) = extract_auth_data(&parts.headers);
|
||||||
match check_auth(&method, &ticket, &csrf_token, &user_info) {
|
match check_auth(&method, &ticket, &csrf_token, &user_info) {
|
||||||
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
Ok(authid) => rpcenv.set_auth_id(Some(authid.to_string())),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// always delay unauthorized calls by 3 seconds (from start of request)
|
// always delay unauthorized calls by 3 seconds (from start of request)
|
||||||
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
|
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
|
||||||
|
@ -648,8 +649,8 @@ async fn handle_request(
|
||||||
return Ok((formatter.format_error)(err));
|
return Ok((formatter.format_error)(err));
|
||||||
}
|
}
|
||||||
Some(api_method) => {
|
Some(api_method) => {
|
||||||
let user = rpcenv.get_user();
|
let auth_id = rpcenv.get_auth_id();
|
||||||
if !check_api_permission(api_method.access.permission, user.as_deref(), &uri_param, user_info.as_ref()) {
|
if !check_api_permission(api_method.access.permission, auth_id.as_deref(), &uri_param, user_info.as_ref()) {
|
||||||
let err = http_err!(FORBIDDEN, "permission check failed");
|
let err = http_err!(FORBIDDEN, "permission check failed");
|
||||||
tokio::time::delay_until(Instant::from_std(access_forbidden_time)).await;
|
tokio::time::delay_until(Instant::from_std(access_forbidden_time)).await;
|
||||||
return Ok((formatter.format_error)(err));
|
return Ok((formatter.format_error)(err));
|
||||||
|
@ -666,9 +667,9 @@ async fn handle_request(
|
||||||
Err(err) => (formatter.format_error)(err),
|
Err(err) => (formatter.format_error)(err),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(user) = user {
|
if let Some(auth_id) = auth_id {
|
||||||
let userid: Userid = user.parse()?;
|
let auth_id: Authid = auth_id.parse()?;
|
||||||
response.extensions_mut().insert(userid);
|
response.extensions_mut().insert(auth_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
return Ok(response);
|
return Ok(response);
|
||||||
|
@ -687,9 +688,10 @@ async fn handle_request(
|
||||||
let (ticket, csrf_token, language) = extract_auth_data(&parts.headers);
|
let (ticket, csrf_token, language) = extract_auth_data(&parts.headers);
|
||||||
if ticket != None {
|
if ticket != None {
|
||||||
match check_auth(&method, &ticket, &csrf_token, &user_info) {
|
match check_auth(&method, &ticket, &csrf_token, &user_info) {
|
||||||
Ok(userid) => {
|
Ok(auth_id) => {
|
||||||
let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
|
let userid = auth_id.user();
|
||||||
return Ok(get_index(Some(userid), Some(new_csrf_token), language, &api, parts));
|
let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), userid);
|
||||||
|
return Ok(get_index(Some(userid.clone()), Some(new_csrf_token), language, &api, parts));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||||
|
|
|
@ -6,7 +6,7 @@ use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
use proxmox::sys::linux::procfs;
|
use proxmox::sys::linux::procfs;
|
||||||
|
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Authid;
|
||||||
|
|
||||||
/// Unique Process/Task Identifier
|
/// Unique Process/Task Identifier
|
||||||
///
|
///
|
||||||
|
@ -34,8 +34,8 @@ pub struct UPID {
|
||||||
pub worker_type: String,
|
pub worker_type: String,
|
||||||
/// Worker ID (arbitrary ASCII string)
|
/// Worker ID (arbitrary ASCII string)
|
||||||
pub worker_id: Option<String>,
|
pub worker_id: Option<String>,
|
||||||
/// The user who started the task
|
/// The authenticated entity who started the task
|
||||||
pub userid: Userid,
|
pub auth_id: Authid,
|
||||||
/// The node name.
|
/// The node name.
|
||||||
pub node: String,
|
pub node: String,
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,7 @@ const_regex! {
|
||||||
pub PROXMOX_UPID_REGEX = concat!(
|
pub PROXMOX_UPID_REGEX = concat!(
|
||||||
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
||||||
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
||||||
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<userid>[^:\s]+):$"
|
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ impl UPID {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
worker_type: &str,
|
worker_type: &str,
|
||||||
worker_id: Option<String>,
|
worker_id: Option<String>,
|
||||||
userid: Userid,
|
auth_id: Authid,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
|
|
||||||
let pid = unsafe { libc::getpid() };
|
let pid = unsafe { libc::getpid() };
|
||||||
|
@ -87,7 +87,7 @@ impl UPID {
|
||||||
task_id,
|
task_id,
|
||||||
worker_type: worker_type.to_owned(),
|
worker_type: worker_type.to_owned(),
|
||||||
worker_id,
|
worker_id,
|
||||||
userid,
|
auth_id,
|
||||||
node: proxmox::tools::nodename().to_owned(),
|
node: proxmox::tools::nodename().to_owned(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ impl std::str::FromStr for UPID {
|
||||||
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
||||||
worker_type: cap["wtype"].to_string(),
|
worker_type: cap["wtype"].to_string(),
|
||||||
worker_id,
|
worker_id,
|
||||||
userid: cap["userid"].parse()?,
|
auth_id: cap["authid"].parse()?,
|
||||||
node: cap["node"].to_string(),
|
node: cap["node"].to_string(),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
|
@ -146,6 +146,6 @@ impl std::fmt::Display for UPID {
|
||||||
// more that 8 characters for pstart
|
// more that 8 characters for pstart
|
||||||
|
|
||||||
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
||||||
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.userid)
|
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ use crate::{
|
||||||
pub fn do_verification_job(
|
pub fn do_verification_job(
|
||||||
mut job: Job,
|
mut job: Job,
|
||||||
verification_job: VerificationJobConfig,
|
verification_job: VerificationJobConfig,
|
||||||
userid: &Userid,
|
auth_id: &Authid,
|
||||||
schedule: Option<String>,
|
schedule: Option<String>,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
@ -48,14 +48,14 @@ pub fn do_verification_job(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let email = crate::server::lookup_user_email(userid);
|
let email = crate::server::lookup_user_email(auth_id.user());
|
||||||
|
|
||||||
let job_id = job.jobname().to_string();
|
let job_id = job.jobname().to_string();
|
||||||
let worker_type = job.jobtype().to_string();
|
let worker_type = job.jobtype().to_string();
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
&worker_type,
|
&worker_type,
|
||||||
Some(job.jobname().to_string()),
|
Some(job.jobname().to_string()),
|
||||||
userid.clone(),
|
auth_id.clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
|
@ -21,7 +21,7 @@ use super::UPID;
|
||||||
|
|
||||||
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
|
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
|
||||||
use crate::tools::{FileLogger, FileLogOptions};
|
use crate::tools::{FileLogger, FileLogOptions};
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Authid;
|
||||||
|
|
||||||
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
||||||
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
||||||
|
@ -611,10 +611,10 @@ impl Drop for WorkerTask {
|
||||||
|
|
||||||
impl WorkerTask {
|
impl WorkerTask {
|
||||||
|
|
||||||
pub fn new(worker_type: &str, worker_id: Option<String>, userid: Userid, to_stdout: bool) -> Result<Arc<Self>, Error> {
|
pub fn new(worker_type: &str, worker_id: Option<String>, auth_id: Authid, to_stdout: bool) -> Result<Arc<Self>, Error> {
|
||||||
println!("register worker");
|
println!("register worker");
|
||||||
|
|
||||||
let upid = UPID::new(worker_type, worker_id, userid)?;
|
let upid = UPID::new(worker_type, worker_id, auth_id)?;
|
||||||
let task_id = upid.task_id;
|
let task_id = upid.task_id;
|
||||||
|
|
||||||
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
|
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
|
||||||
|
@ -664,14 +664,14 @@ impl WorkerTask {
|
||||||
pub fn spawn<F, T>(
|
pub fn spawn<F, T>(
|
||||||
worker_type: &str,
|
worker_type: &str,
|
||||||
worker_id: Option<String>,
|
worker_id: Option<String>,
|
||||||
userid: Userid,
|
auth_id: Authid,
|
||||||
to_stdout: bool,
|
to_stdout: bool,
|
||||||
f: F,
|
f: F,
|
||||||
) -> Result<String, Error>
|
) -> Result<String, Error>
|
||||||
where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
|
where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
|
||||||
T: Send + 'static + Future<Output = Result<(), Error>>,
|
T: Send + 'static + Future<Output = Result<(), Error>>,
|
||||||
{
|
{
|
||||||
let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
|
let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
|
||||||
let upid_str = worker.upid.to_string();
|
let upid_str = worker.upid.to_string();
|
||||||
let f = f(worker.clone());
|
let f = f(worker.clone());
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
@ -686,7 +686,7 @@ impl WorkerTask {
|
||||||
pub fn new_thread<F>(
|
pub fn new_thread<F>(
|
||||||
worker_type: &str,
|
worker_type: &str,
|
||||||
worker_id: Option<String>,
|
worker_id: Option<String>,
|
||||||
userid: Userid,
|
auth_id: Authid,
|
||||||
to_stdout: bool,
|
to_stdout: bool,
|
||||||
f: F,
|
f: F,
|
||||||
) -> Result<String, Error>
|
) -> Result<String, Error>
|
||||||
|
@ -694,7 +694,7 @@ impl WorkerTask {
|
||||||
{
|
{
|
||||||
println!("register worker thread");
|
println!("register worker thread");
|
||||||
|
|
||||||
let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
|
let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
|
||||||
let upid_str = worker.upid.to_string();
|
let upid_str = worker.upid.to_string();
|
||||||
|
|
||||||
let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
|
let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
|
||||||
|
|
|
@ -57,7 +57,7 @@ fn worker_task_abort() -> Result<(), Error> {
|
||||||
let res = server::WorkerTask::new_thread(
|
let res = server::WorkerTask::new_thread(
|
||||||
"garbage_collection",
|
"garbage_collection",
|
||||||
None,
|
None,
|
||||||
proxmox_backup::api2::types::Userid::root_userid().clone(),
|
proxmox_backup::api2::types::Authid::root_auth_id().clone(),
|
||||||
true,
|
true,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
println!("WORKER {}", worker);
|
println!("WORKER {}", worker);
|
||||||
|
|
|
@ -53,7 +53,7 @@ Ext.define('PBS.config.ACLView', {
|
||||||
'delete': 1,
|
'delete': 1,
|
||||||
path: rec.data.path,
|
path: rec.data.path,
|
||||||
role: rec.data.roleid,
|
role: rec.data.roleid,
|
||||||
userid: rec.data.ugid,
|
auth_id: rec.data.ugid,
|
||||||
},
|
},
|
||||||
callback: function() {
|
callback: function() {
|
||||||
me.reload();
|
me.reload();
|
||||||
|
|
|
@ -40,7 +40,7 @@ Ext.define('PBS.window.ACLEdit', {
|
||||||
{
|
{
|
||||||
xtype: 'pbsUserSelector',
|
xtype: 'pbsUserSelector',
|
||||||
fieldLabel: gettext('User'),
|
fieldLabel: gettext('User'),
|
||||||
name: 'userid',
|
name: 'auth_id',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue