GC scheduling: avoid triggering operation tracking error for upfront checks
without that one gets a "failed to lookup datastore X" in the log for every datastore that is in read-only or offline maintenance mode, even if they aren't scheduled for GC anyway. Avoid that by first opening the datastore through a Lookup operation, and only re-open it as Write op once we know that GC needs to get scheduled for it. Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
0408f60b58
commit
e22ad28302
|
@ -572,14 +572,6 @@ async fn schedule_datastore_garbage_collection() {
|
||||||
};
|
};
|
||||||
|
|
||||||
for (store, (_, store_config)) in config.sections {
|
for (store, (_, store_config)) in config.sections {
|
||||||
let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Write)) {
|
|
||||||
Ok(datastore) => datastore,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("lookup_datastore failed - {}", err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
||||||
Ok(c) => c,
|
Ok(c) => c,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
@ -601,8 +593,18 @@ async fn schedule_datastore_garbage_collection() {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if datastore.garbage_collection_running() {
|
{ // limit datastore scope due to Op::Lookup
|
||||||
continue;
|
let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Lookup)) {
|
||||||
|
Ok(datastore) => datastore,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("lookup_datastore failed - {}", err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if datastore.garbage_collection_running() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let worker_type = "garbage_collection";
|
let worker_type = "garbage_collection";
|
||||||
|
@ -610,10 +612,7 @@ async fn schedule_datastore_garbage_collection() {
|
||||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||||
Ok(time) => time,
|
Ok(time) => time,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!(
|
eprintln!("could not get last run time of {worker_type} {store}: {err}");
|
||||||
"could not get last run time of {} {}: {}",
|
|
||||||
worker_type, store, err
|
|
||||||
);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -638,6 +637,14 @@ async fn schedule_datastore_garbage_collection() {
|
||||||
Err(_) => continue, // could not get lock
|
Err(_) => continue, // could not get lock
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Write)) {
|
||||||
|
Ok(datastore) => datastore,
|
||||||
|
Err(err) => {
|
||||||
|
log::warn!("skipping scheduled GC on {store}, could look it up - {err}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let auth_id = Authid::root_auth_id();
|
let auth_id = Authid::root_auth_id();
|
||||||
|
|
||||||
if let Err(err) = crate::server::do_garbage_collection_job(
|
if let Err(err) = crate::server::do_garbage_collection_job(
|
||||||
|
|
Loading…
Reference in New Issue