From 8268c9d1619a8ae55ca2f8b1bae8feff39946582 Mon Sep 17 00:00:00 2001 From: Stefan Reiter Date: Tue, 23 Jun 2020 14:43:09 +0200 Subject: [PATCH] fix overflow panic during upload if *only* data chunks are registered (high chance during incremental backup), then chunk_count might be one lower then upload_stat.count because of the zero chunk being unconditionally uploaded but not used. Thus when subtracting the two, an overflow would occur. In general, don't let the client make the server panic, instead just set duplicates to 0. Signed-off-by: Stefan Reiter --- src/api2/backup/environment.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs index 16f21324..735798ac 100644 --- a/src/api2/backup/environment.rs +++ b/src/api2/backup/environment.rs @@ -310,7 +310,13 @@ impl BackupEnvironment { self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size)); - let client_side_duplicates = chunk_count - upload_stat.count; + // account for zero chunk, which might be uploaded but never used + let client_side_duplicates = if chunk_count < upload_stat.count { + 0 + } else { + chunk_count - upload_stat.count + }; + let server_side_duplicates = upload_stat.duplicates; if (client_side_duplicates + server_side_duplicates) > 0 {