clippy: remove unnecessary clones

and from::<T>(T)

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler
2021-01-15 14:38:27 +01:00
parent 47ea98e0e3
commit 4428818412
35 changed files with 47 additions and 51 deletions

View File

@ -46,7 +46,7 @@ fn list_roles() -> Result<Value, Error> {
let mut priv_list = Vec::new();
for (name, privilege) in PRIVILEGES.iter() {
if privs & privilege > 0 {
priv_list.push(name.clone());
priv_list.push(name);
}
}
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));

View File

@ -603,7 +603,7 @@ pub fn generate_token(
token_shadow::set_secret(&tokenid, &secret)?;
let token = user::ApiToken {
tokenid: tokenid.clone(),
tokenid,
comment,
enable,
expire,

View File

@ -440,8 +440,8 @@ pub fn list_snapshots (
let files = info
.files
.into_iter()
.map(|x| BackupContent {
filename: x.to_string(),
.map(|filename| BackupContent {
filename,
size: None,
crypt_mode: None,
})
@ -666,7 +666,7 @@ pub fn verify(
let upid_str = WorkerTask::new_thread(
worker_type,
Some(worker_id.clone()),
Some(worker_id),
auth_id.clone(),
to_stdout,
move |worker| {
@ -855,7 +855,7 @@ fn prune(
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
if keep_all {
worker.log("No prune selection - keeping all files.");
@ -1009,7 +1009,7 @@ fn get_datastore_list(
}
}
Ok(list.into())
Ok(list)
}
#[sortable]
@ -1066,7 +1066,7 @@ fn download_file(
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
.map_ok(|bytes| bytes.freeze())
.map_err(move |err| {
eprintln!("error during streaming of '{:?}' - {}", &path, err);
err

View File

@ -58,7 +58,7 @@ pub fn list_sync_jobs(
}
})
.filter(|job: &SyncJobStatus| {
let as_config: SyncJobConfig = job.clone().into();
let as_config: SyncJobConfig = job.into();
check_sync_job_read_access(&user_info, &auth_id, &as_config)
}).collect();

View File

@ -138,7 +138,7 @@ async move {
}
};
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {

View File

@ -120,7 +120,7 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
let (mut config, _digest) = datastore::config()?;

View File

@ -96,7 +96,7 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let mut data = param.clone();
let mut data = param;
data["password"] = Value::from(base64::encode(password.as_bytes()));
let remote: remote::Remote = serde_json::from_value(data)?;

View File

@ -154,7 +154,7 @@ pub fn create_sync_job(
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?;
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
bail!("permission check failed");
}
@ -514,7 +514,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
// unless they have Datastore.Modify as well
job.store = "localstore3".to_string();
job.owner = Some(read_auth_id.clone());
job.owner = Some(read_auth_id);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);

View File

@ -98,7 +98,7 @@ pub fn create_verification_job(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param)?;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;

View File

@ -16,7 +16,7 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
};
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
.map_ok(|bytes| bytes.freeze());
let body = Body::wrap_stream(payload);

View File

@ -164,7 +164,7 @@ pub fn create_datastore_disk(
let manager = DiskManage::new();
let disk = manager.clone().disk_by_name(&disk)?;
let disk = manager.disk_by_name(&disk)?;
let partition = create_single_linux_partition(&disk)?;
create_file_system(&partition, filesystem)?;

View File

@ -137,7 +137,7 @@ pub fn set_subscription(
let server_id = tools::get_hardware_address()?;
let info = subscription::check_subscription(key, server_id.to_owned())?;
let info = subscription::check_subscription(key, server_id)?;
subscription::write_subscription(info)
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;

View File

@ -94,7 +94,7 @@ pub fn backup(
let upid_str = WorkerTask::new_thread(
"tape-backup",
Some(store.clone()),
Some(store),
auth_id,
to_stdout,
move |worker| {

View File

@ -128,7 +128,7 @@ pub fn restore(
let members = inventory.compute_media_set_members(&media_set_uuid)?;
let media_list = members.media_list().clone();
let media_list = members.media_list();
let mut media_id_list = Vec::new();
@ -234,7 +234,6 @@ pub fn restore_media(
Some(reader) => reader,
};
let target = target.clone();
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
}