replace Userid with Authid

in most generic places. this is accompanied by a change in
RpcEnvironment to purposefully break existing call sites.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler
2020-10-23 13:33:21 +02:00
committed by Wolfgang Bumiller
parent e10c5c74f6
commit e6dc35acb8
43 changed files with 400 additions and 303 deletions

View File

@ -31,7 +31,8 @@ fn authenticate_user(
) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?;
if !user_info.is_active_user(&userid) {
let auth_id = Authid::from(userid.clone());
if !user_info.is_active_auth_id(&auth_id) {
bail!("user account disabled or expired.");
}
@ -69,8 +70,7 @@ fn authenticate_user(
path_vec.push(part);
}
}
user_info.check_privs(userid, &path_vec, *privilege, false)?;
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
return Ok(false);
}
}
@ -213,9 +213,10 @@ fn change_password(
) -> Result<Value, Error> {
let current_user: Userid = rpcenv
.get_user()
.get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
let current_auth = Authid::from(current_user.clone());
let mut allowed = userid == current_user;
@ -223,7 +224,7 @@ fn change_password(
if !allowed {
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&current_user, &[]);
let privs = user_info.lookup_privs(&current_auth, &[]);
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
}

View File

@ -140,9 +140,9 @@ pub fn read_acl(
optional: true,
schema: ACL_PROPAGATE_SCHEMA,
},
userid: {
auth_id: {
optional: true,
type: Userid,
type: Authid,
},
group: {
optional: true,
@ -168,7 +168,7 @@ pub fn update_acl(
path: String,
role: String,
propagate: Option<bool>,
userid: Option<Userid>,
auth_id: Option<Authid>,
group: Option<String>,
delete: Option<bool>,
digest: Option<String>,
@ -190,11 +190,12 @@ pub fn update_acl(
if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported.");
} else if let Some(ref userid) = userid {
} else if let Some(ref auth_id) = auth_id {
if !delete { // Note: we allow to delete non-existent users
let user_cfg = crate::config::user::cached_config()?;
if user_cfg.sections.get(&userid.to_string()).is_none() {
bail!("no such user.");
if user_cfg.sections.get(&auth_id.to_string()).is_none() {
bail!(format!("no such {}.",
if auth_id.is_token() { "API token" } else { "user" }));
}
}
} else {
@ -205,11 +206,11 @@ pub fn update_acl(
acl::check_acl_path(&path)?;
}
if let Some(userid) = userid {
if let Some(auth_id) = auth_id {
if delete {
tree.delete_user_role(&path, &userid, &role);
tree.delete_user_role(&path, &auth_id, &role);
} else {
tree.insert_user_role(&path, &userid, &role, propagate);
tree.insert_user_role(&path, &auth_id, &role, propagate);
}
} else if let Some(group) = group {
if delete {

View File

@ -39,10 +39,13 @@ pub fn list_users(
let (config, digest) = user::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
// intentionally user only for now
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
let auth_id = Authid::from(userid.clone());
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let filter_by_privs = |user: &user::User| {

View File

@ -47,11 +47,11 @@ use crate::config::acl::{
fn check_backup_owner(
store: &DataStore,
group: &BackupGroup,
userid: &Userid,
auth_id: &Authid,
) -> Result<(), Error> {
let owner = store.get_owner(group)?;
if &owner != userid {
bail!("backup owner check failed ({} != {})", userid, owner);
if &owner != auth_id {
bail!("backup owner check failed ({} != {})", auth_id, owner);
}
Ok(())
}
@ -149,9 +149,9 @@ fn list_groups(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
@ -171,7 +171,7 @@ fn list_groups(
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
if !list_all && owner != userid {
if !list_all && owner != auth_id {
continue;
}
@ -230,16 +230,16 @@ pub fn list_snapshot_files(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
@ -282,16 +282,16 @@ fn delete_snapshot(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
datastore.remove_backup_dir(&snapshot, false)?;
@ -338,9 +338,9 @@ pub fn list_snapshots (
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
@ -362,7 +362,7 @@ pub fn list_snapshots (
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
if !list_all && owner != userid {
if !list_all && owner != auth_id {
continue;
}
@ -570,13 +570,13 @@ pub fn verify(
_ => bail!("parameters do not specify a backup group or snapshot"),
}
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread(
worker_type,
Some(worker_id.clone()),
userid,
auth_id,
to_stdout,
move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
@ -705,9 +705,9 @@ fn prune(
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
@ -716,7 +716,7 @@ fn prune(
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
if !allowed { check_backup_owner(&datastore, &group, &auth_id)?; }
let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(),
@ -758,7 +758,7 @@ fn prune(
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
if keep_all {
worker.log("No prune selection - keeping all files.");
@ -833,6 +833,7 @@ fn start_garbage_collection(
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
println!("Starting garbage collection on store {}", store);
@ -841,7 +842,7 @@ fn start_garbage_collection(
let upid_str = WorkerTask::new_thread(
"garbage_collection",
Some(store.clone()),
Userid::root_userid().clone(),
auth_id.clone(),
to_stdout,
move |worker| {
worker.log(format!("starting garbage collection on store {}", store));
@ -911,13 +912,13 @@ fn get_datastore_list(
let (config, _digest) = datastore::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, data)) in &config.sections {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if allowed {
let mut entry = json!({ "store": store });
@ -962,9 +963,9 @@ fn download_file(
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
@ -975,7 +976,7 @@ fn download_file(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
@ -1035,9 +1036,9 @@ fn download_file_decoded(
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
@ -1048,7 +1049,7 @@ fn download_file_decoded(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
@ -1160,8 +1161,8 @@ fn upload_backup_log(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
check_backup_owner(&datastore, backup_dir.group(), &auth_id)?;
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
@ -1230,14 +1231,14 @@ fn catalog(
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let file_name = CATALOG_NAME;
@ -1401,9 +1402,9 @@ fn pxar_file_download(
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
@ -1414,7 +1415,7 @@ fn pxar_file_download(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let mut components = base64::decode(&filepath)?;
if components.len() > 0 && components[0] == '/' as u8 {
@ -1580,14 +1581,14 @@ fn get_notes(
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
@ -1633,14 +1634,14 @@ fn set_notes(
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
datastore.update_manifest(&backup_dir,|manifest| {
manifest.unprotected["notes"] = notes.into();
@ -1662,7 +1663,7 @@ fn set_notes(
schema: BACKUP_ID_SCHEMA,
},
"new-owner": {
type: Userid,
type: Authid,
},
},
},
@ -1675,7 +1676,7 @@ fn set_backup_owner(
store: String,
backup_type: String,
backup_id: String,
new_owner: Userid,
new_owner: Authid,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
@ -1685,8 +1686,14 @@ fn set_backup_owner(
let user_info = CachedUserInfo::new()?;
if !user_info.is_active_user(&new_owner) {
bail!("user '{}' is inactive or non-existent", new_owner);
if !user_info.is_active_auth_id(&new_owner) {
bail!("{} '{}' is inactive or non-existent",
if new_owner.is_token() {
"API token".to_string()
} else {
"user".to_string()
},
new_owner);
}
datastore.set_owner(&backup_group, &new_owner, true)?;

View File

@ -101,11 +101,11 @@ fn run_sync_job(
let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let job = Job::new("syncjob", &id)?;
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
Ok(upid_str)
}

View File

@ -101,11 +101,11 @@ fn run_verification_job(
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let job = Job::new("verificationjob", &id)?;
let upid_str = do_verification_job(job, verification_job, &userid, None)?;
let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
Ok(upid_str)
}

View File

@ -59,12 +59,12 @@ async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store)?;
@ -105,12 +105,12 @@ async move {
};
// lock backup group to only allow one backup per group at a time
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
// permission check
if owner != userid && worker_type != "benchmark" {
if owner != auth_id && worker_type != "benchmark" {
// only the owner is allowed to create additional snapshots
bail!("backup owner check failed ({} != {})", userid, owner);
bail!("backup owner check failed ({} != {})", auth_id, owner);
}
let last_backup = {
@ -153,9 +153,9 @@ async move {
if !is_new { bail!("backup directory already exists."); }
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = BackupEnvironment::new(
env_type, userid, worker.clone(), datastore, backup_dir);
env_type, auth_id, worker.clone(), datastore, backup_dir);
env.debug = debug;
env.last_backup = last_backup;

View File

@ -10,7 +10,7 @@ use proxmox::tools::digest_to_hex;
use proxmox::tools::fs::{replace_file, CreateOptions};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
use crate::api2::types::Userid;
use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::WorkerTask;
use crate::server::formatter::*;
@ -104,7 +104,7 @@ impl SharedBackupState {
pub struct BackupEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
user: Userid,
auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
@ -117,7 +117,7 @@ pub struct BackupEnvironment {
impl BackupEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
user: Userid,
auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
@ -137,7 +137,7 @@ impl BackupEnvironment {
Self {
result_attributes: json!({}),
env_type,
user,
auth_id,
worker,
datastore,
debug: false,
@ -518,7 +518,7 @@ impl BackupEnvironment {
WorkerTask::new_thread(
"verify",
Some(worker_id),
self.user.clone(),
self.auth_id.clone(),
false,
move |worker| {
worker.log("Automatically verifying newly added snapshot");
@ -599,12 +599,12 @@ impl RpcEnvironment for BackupEnvironment {
self.env_type
}
fn set_user(&mut self, _user: Option<String>) {
panic!("unable to change user");
fn set_auth_id(&mut self, _auth_id: Option<String>) {
panic!("unable to change auth_id");
}
fn get_user(&self) -> Option<String> {
Some(self.user.to_string())
fn get_auth_id(&self) -> Option<String> {
Some(self.auth_id.to_string())
}
}

View File

@ -35,14 +35,14 @@ pub fn list_datastores(
let (config, digest) = datastore::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
};

View File

@ -66,7 +66,7 @@ pub fn list_remotes(
default: 8007,
},
userid: {
type: Userid,
type: Authid,
},
password: {
schema: remote::REMOTE_PASSWORD_SCHEMA,
@ -167,7 +167,7 @@ pub enum DeletableProperty {
},
userid: {
optional: true,
type: Userid,
type: Authid,
},
password: {
optional: true,

View File

@ -91,10 +91,12 @@ async fn termproxy(
cmd: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
// intentionally user only for now
let userid: Userid = rpcenv
.get_user()
.get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
let auth_id = Authid::from(userid.clone());
if userid.realm() != "pam" {
bail!("only pam users can use the console");
@ -137,7 +139,7 @@ async fn termproxy(
let upid = WorkerTask::spawn(
"termproxy",
None,
userid,
auth_id,
false,
move |worker| async move {
// move inside the worker so that it survives and does not close the port
@ -272,7 +274,8 @@ fn upgrade_to_websocket(
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
// intentionally user only for now
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
let ticket = tools::required_string_param(&param, "vncticket")?;
let port: u16 = tools::required_integer_param(&param, "port")? as u16;

View File

@ -12,7 +12,7 @@ use crate::server::WorkerTask;
use crate::tools::http;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
const_regex! {
VERSION_EPOCH_REGEX = r"^\d+:";
@ -351,11 +351,11 @@ pub fn apt_update_database(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
if !quiet { worker.log("starting apt-get update") }
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE

View File

@ -13,7 +13,7 @@ use crate::tools::disks::{
};
use crate::server::WorkerTask;
use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
use crate::api2::types::{Authid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
pub mod directory;
pub mod zfs;
@ -140,7 +140,7 @@ pub fn initialize_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
@ -149,7 +149,7 @@ pub fn initialize_disk(
}
let upid_str = WorkerTask::new_thread(
"diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
"diskinit", Some(disk.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("initialize disk {}", disk));

View File

@ -134,7 +134,7 @@ pub fn create_datastore_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
@ -143,7 +143,7 @@ pub fn create_datastore_disk(
}
let upid_str = WorkerTask::new_thread(
"dircreate", Some(name.clone()), userid, to_stdout, move |worker|
"dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create datastore '{}' on disk {}", name, disk));

View File

@ -256,7 +256,7 @@ pub fn create_zpool(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let add_datastore = add_datastore.unwrap_or(false);
@ -316,7 +316,7 @@ pub fn create_zpool(
}
let upid_str = WorkerTask::new_thread(
"zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
"zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));

View File

@ -684,9 +684,9 @@ pub async fn reload_network_config(
network::assert_ifupdown2_installed()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id, true, |_worker| async {
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);

View File

@ -182,7 +182,7 @@ fn get_service_state(
Ok(json_service_state(&service, status))
}
fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
let workerid = format!("srv{}", &cmd);
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value
let upid = WorkerTask::new_thread(
&workerid,
Some(service.clone()),
userid,
auth_id,
false,
move |_worker| {
@ -244,11 +244,11 @@ fn start_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("starting service {}", service);
run_service_command(&service, "start", userid)
run_service_command(&service, "start", auth_id)
}
#[api(
@ -274,11 +274,11 @@ fn stop_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("stopping service {}", service);
run_service_command(&service, "stop", userid)
run_service_command(&service, "stop", auth_id)
}
#[api(
@ -304,15 +304,15 @@ fn restart_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("re-starting service {}", service);
if &service == "proxmox-backup-proxy" {
// special case, avoid aborting running tasks
run_service_command(&service, "reload", userid)
run_service_command(&service, "reload", auth_id)
} else {
run_service_command(&service, "restart", userid)
run_service_command(&service, "restart", auth_id)
}
}
@ -339,11 +339,11 @@ fn reload_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("reloading service {}", service);
run_service_command(&service, "reload", userid)
run_service_command(&service, "reload", auth_id)
}

View File

@ -7,7 +7,7 @@ use crate::tools;
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
use crate::api2::types::{NODE_SCHEMA, Userid};
use crate::api2::types::{NODE_SCHEMA, Authid};
#[api(
input: {
@ -100,9 +100,9 @@ fn get_subscription(
},
};
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &[]);
let user_privs = user_info.lookup_privs(&auth_id, &[]);
if (user_privs & PRIV_SYS_AUDIT) == 0 {
// not enough privileges for full state

View File

@ -84,11 +84,11 @@ async fn get_task_status(
let upid = extract_upid(&param)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
if userid != upid.userid {
if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let mut result = json!({
@ -99,7 +99,7 @@ async fn get_task_status(
"starttime": upid.starttime,
"type": upid.worker_type,
"id": upid.worker_id,
"user": upid.userid,
"user": upid.auth_id,
});
if crate::server::worker_is_active(&upid).await? {
@ -161,11 +161,11 @@ async fn read_task_log(
let upid = extract_upid(&param)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
if userid != upid.userid {
if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let test_status = param["test-status"].as_bool().unwrap_or(false);
@ -234,11 +234,11 @@ fn stop_task(
let upid = extract_upid(&param)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
if userid != upid.userid {
if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
}
server::abort_worker_async(upid);
@ -308,9 +308,9 @@ pub fn list_tasks(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
@ -326,10 +326,10 @@ pub fn list_tasks(
Err(_) => return None,
};
if !list_all && info.upid.userid != userid { return None; }
if !list_all && info.upid.auth_id != auth_id { return None; }
if let Some(userid) = &userfilter {
if !info.upid.userid.as_str().contains(userid) { return None; }
if let Some(needle) = &userfilter {
if !info.upid.auth_id.to_string().contains(needle) { return None; }
}
if let Some(store) = store {

View File

@ -20,7 +20,7 @@ use crate::config::{
pub fn check_pull_privs(
userid: &Userid,
auth_id: &Authid,
store: &str,
remote: &str,
remote_store: &str,
@ -29,11 +29,11 @@ pub fn check_pull_privs(
let user_info = CachedUserInfo::new()?;
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
if delete {
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
}
Ok(())
@ -68,19 +68,19 @@ pub async fn get_pull_parameters(
pub fn do_sync_job(
mut job: Job,
sync_job: SyncJobConfig,
userid: &Userid,
auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
let job_id = job.jobname().to_string();
let worker_type = job.jobtype().to_string();
let email = crate::server::lookup_user_email(userid);
let email = crate::server::lookup_user_email(auth_id.user());
let upid_str = WorkerTask::spawn(
&worker_type,
Some(job.jobname().to_string()),
userid.clone(),
auth_id.clone(),
false,
move |worker| async move {
@ -101,7 +101,9 @@ pub fn do_sync_job(
worker.log(format!("Sync datastore '{}' from '{}/{}'",
sync_job.store, sync_job.remote, sync_job.remote_store));
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
let backup_auth_id = Authid::backup_auth_id();
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, backup_auth_id.clone()).await?;
worker.log(format!("sync job '{}' end", &job_id));
@ -173,19 +175,19 @@ async fn pull (
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(true);
check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
check_pull_privs(&auth_id, &store, &remote, &remote_store, delete)?;
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
// fixme: set to_stdout to false?
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.clone(), true, move |worker| async move {
worker.log(format!("sync datastore '{}' start", store));
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, auth_id);
let future = select!{
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,

View File

@ -55,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let priv_read = privs & PRIV_DATASTORE_READ != 0;
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
@ -94,7 +94,7 @@ fn upgrade_to_backup_reader_protocol(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
if !priv_read {
let owner = datastore.get_owner(backup_dir.group())?;
if owner != userid {
if owner != auth_id {
bail!("backup owner check failed!");
}
}
@ -110,10 +110,10 @@ fn upgrade_to_backup_reader_protocol(
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = ReaderEnvironment::new(
env_type,
userid,
auth_id,
worker.clone(),
datastore,
backup_dir,

View File

@ -5,7 +5,7 @@ use serde_json::{json, Value};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
use crate::api2::types::Userid;
use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::formatter::*;
use crate::server::WorkerTask;
@ -17,7 +17,7 @@ use crate::server::WorkerTask;
pub struct ReaderEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
user: Userid,
auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
@ -29,7 +29,7 @@ pub struct ReaderEnvironment {
impl ReaderEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
user: Userid,
auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
@ -39,7 +39,7 @@ impl ReaderEnvironment {
Self {
result_attributes: json!({}),
env_type,
user,
auth_id,
worker,
datastore,
debug: false,
@ -82,12 +82,12 @@ impl RpcEnvironment for ReaderEnvironment {
self.env_type
}
fn set_user(&mut self, _user: Option<String>) {
panic!("unable to change user");
fn set_auth_id(&mut self, _auth_id: Option<String>) {
panic!("unable to change auth_id");
}
fn get_user(&self) -> Option<String> {
Some(self.user.to_string())
fn get_auth_id(&self) -> Option<String> {
Some(self.auth_id.to_string())
}
}

View File

@ -16,9 +16,9 @@ use crate::api2::types::{
DATASTORE_SCHEMA,
RRDMode,
RRDTimeFrameResolution,
Authid,
TaskListItem,
TaskStateType,
Userid,
};
use crate::server;
@ -87,13 +87,13 @@ fn datastore_status(
let (config, _digest) = datastore::config()?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, _)) in &config.sections {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if !allowed {
continue;
@ -221,9 +221,9 @@ pub fn list_tasks(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
let since = since.unwrap_or_else(|| 0);
@ -238,7 +238,7 @@ pub fn list_tasks(
.filter_map(|info| {
match info {
Ok(info) => {
if list_all || info.upid.userid == userid {
if list_all || info.upid.auth_id == auth_id {
if let Some(filter) = &typefilter {
if !info.upid.worker_type.contains(filter) {
return None;

View File

@ -376,7 +376,7 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
},
},
owner: {
type: Userid,
type: Authid,
optional: true,
},
},
@ -394,7 +394,7 @@ pub struct GroupListItem {
pub files: Vec<String>,
/// The owner of group
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<Userid>,
pub owner: Option<Authid>,
}
#[api()]
@ -452,7 +452,7 @@ pub struct SnapshotVerifyState {
},
},
owner: {
type: Userid,
type: Authid,
optional: true,
},
},
@ -477,7 +477,7 @@ pub struct SnapshotListItem {
pub size: Option<u64>,
/// The owner of the snapshots group
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<Userid>,
pub owner: Option<Authid>,
}
#[api(
@ -692,7 +692,7 @@ pub struct DataStoreStatus {
#[api(
properties: {
upid: { schema: UPID_SCHEMA },
user: { type: Userid },
userid: { type: Authid },
},
)]
#[derive(Serialize, Deserialize)]
@ -711,8 +711,8 @@ pub struct TaskListItem {
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
/// The user who started the task
pub user: Userid,
/// The authenticated entity who started the task
pub userid: Authid,
/// The task end time (Epoch)
#[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>,
@ -735,7 +735,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
starttime: info.upid.starttime,
worker_type: info.upid.worker_type,
worker_id: info.upid.worker_id,
user: info.upid.userid,
userid: info.upid.auth_id,
endtime,
status,
}