Compare commits

...

296 Commits

Author SHA1 Message Date
a329324139 bump version to 0.8.19-1 2020-09-22 13:30:52 +02:00
a83e2ffeab src/api2/reader.rs: use std::fs::read instead of tokio::fs::read
Because it is about 10%& faster this way.
2020-09-22 13:27:23 +02:00
5d7449a121 bump version to 0.8.18-1 2020-09-22 12:39:47 +02:00
ebbe4958c6 src/client/pull.rs: avoid duplicate downloads using in memory HashSet 2020-09-22 12:34:06 +02:00
73b2cc4977 src/client/pull.rs: allow up to 20 concurrent download streams 2020-09-22 11:39:31 +02:00
7ecfde8150 remote_chunk_reader.rs: use Arc for cache_hint to make clone faster 2020-09-22 11:39:31 +02:00
796480a38b docs: add version and date to HTML index
Similar to the PDF output or the Proxmox VE docs.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-22 09:00:12 +02:00
4ae6aede60 bump version to 0.8.17-1 2020-09-21 14:09:20 +02:00
e0085e6612 src/client/pull.rs: remove temporary manifest 2020-09-21 14:03:01 +02:00
194da6f867 src/client/pull.rs: open temporary manifest with truncate(true)
To delete any data if the file already exists.
2020-09-21 13:53:35 +02:00
3fade35260 bump proxmox version to 0.4.1 2020-09-21 13:51:33 +02:00
5e39918fe1 fix #3017: check array boundaries before using
else we panic here

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-21 09:22:06 +02:00
f4dc47a805 debian/control: update 2020-09-19 16:22:56 +02:00
12c65bacf1 src/backup/chunk_store.rs: disable debug output 2020-09-19 15:26:21 +02:00
ba37f3562d src/backup/datastore.rs - open_with_path: use Path instead of str 2020-09-19 10:01:57 +02:00
fce4659388 src/backup/datastore.rs: new method open_with_path
To make testing easier.
2020-09-19 09:55:21 +02:00
0a15870a82 depend on proxmox 0.4.0 2020-09-19 06:40:44 +02:00
9866de5e3d datastore/prune schedules: use JobState for tracking of schedules
like the sync jobs, so that if an admin configures a schedule it
really starts the next time that time is reached not immediately

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-19 06:24:37 +02:00
9d3f183ba9 Admin Guide: Add some more detailed info throughout
- Mention config files for: datastores, users, acl,
  remotes, syncjobs
- Expand a little bit on SMART and smartmontools package
- Explain acl config
- Include line in network stating why a bond would be set up
- Note the use of ifupdown2 for network config, and the potential
  need to install it on other systems
- Add note to PVE integration, specifying where to refer to for VM and
  CT backups

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-18 15:51:21 +02:00
fe233f3b3d Small formatting fix up
- Fix permission image.
- Change alt text for ZFS
- Change note block to match the others

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-18 15:50:36 +02:00
be3bd0f90b fix #3015: allow user self-service
listing, updating or deleting a user is now possible for the user
itself, in addition to higher-privileged users that have appropriate
privileges on '/access/users'.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-18 15:45:11 +02:00
3c053adbb5 role api: fix description
wrongly copy-pasted at some point

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-18 14:55:00 +02:00
c040ec22f7 add verification scheduling to proxmox-backup-proxy
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-09-18 12:14:05 +02:00
43f627ba92 ui: add verify-schedule field to edit datastore form
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-09-18 12:13:09 +02:00
2b67de2e3f api2: make verify_schedule deletable
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-09-18 12:12:29 +02:00
477859662a api2: add optional verify-schdule field to create/update datastore endpoint
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-09-18 12:12:16 +02:00
ccd7241e2f add verify_schedule field to DataStoreConfig
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-09-18 12:11:55 +02:00
f37ef25bdd api2: add VERIFY_SCHEDULE_SCHEMA
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-09-18 12:11:39 +02:00
b93bbab454 fix #3014: allow DataStoreAdmins to list DS config
filtered by those they are privileged enough to read individually. this
allows such users to configure prune/GC schedules via the GUI (the API
already allowed it previously).

permission-wise, a user with this privilege can already:
- list all stores they have access to (returns just name/comment)
- read the config of each store they have access to individually
(returns full config of that datastore + digest of whole config)

but combines them to
- read configs of all datastores they have access to (returns full
config of those datastores + digest of whole config)

user that have AUDIT on just /datastore without propagate can now no
longer read all configurations (but this could be added it back, it just
seems to make little sense to me).

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-18 12:09:13 +02:00
9cebc837d5 depend on pxar 0.6.1 2020-09-18 12:02:17 +02:00
1bc1d81a00 move compute_file_csum to src/tools.rs 2020-09-17 10:27:04 +02:00
dda72456d7 depend on proxmox 0.3.9 2020-09-17 08:49:50 +02:00
8f2f3dd710 fix #2942: implement lacp bond mode and bond_xmit_hash_policy
this was not yet implemented, should be compatible with pve and the gui

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-17 08:36:25 +02:00
85959a99ea api2/network: add bond-primary parameter
needed for 'active-backup' bond mode

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-17 08:36:14 +02:00
36700a0a87 api2/pull: make pull worker abortable
by selecting between the pull_future and the abort future

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-17 06:11:33 +02:00
dd4b42bac1 fix #2870: renew tickets in HttpClient
by packing the auth into a RwLock and starting a background
future that renews the ticket every 15 minutes

we still use the BroadcastFuture for the first ticket and only
if that is finished we start the scheduled future

we have to store an abort handle for the renewal future and abort it when
the http client is dropped, so we do not request new tickets forever

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-17 06:09:54 +02:00
9626c28619 always allow retrieving (censored) subscription info
like we do for PVE. this is visible on the dashboard, and caused 403 on
each update which bothers me when looking at the dev console.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-17 06:03:25 +02:00
463c03462a fix #2957: allow Sys.Audit access to node RRD
this is the same privilege needed to query the node status.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-17 06:03:25 +02:00
a086427a7d docs: fix epilogs fixme comment
restructured text comment syntax, i.e., everything it cannot parse is
a comment, is a real PITA!

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-16 16:36:54 +02:00
4d431383d3 src/backup/data_blob.rs: expose verify_crc again 2020-09-16 10:43:42 +02:00
d10332a15d SnapshotVerifyState: use enum for state
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-15 13:06:04 +02:00
43772efc6e backup: check all referenced chunks actually exist
A client can omit uploading chunks in the "known_chunks" list, those
then also won't be written on the server side. Check all those chunks
mentioned in the index but not uploaded for existance and report an
error if they don't exist instead of marking a potentially broken backup
as "successful".

This is only important if the base snapshot references corrupted chunks,
but has not been negatively verified. Also, it is important to only
verify this at the end, *after* all index writers are closed, since only
then can it be guaranteed that no GC will sweep referenced chunks away.

If a chunk is found missing, also mark the previous backup with a
verification failure, since we know the missing chunk has to referenced
in it (only way it could have been inserted to known_chunks with
checked=false). This has the benefit of automatically doing a
full-upload backup if the user attempts to retry after seeing the new
error, instead of requiring a manual verify or forget.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-15 10:00:05 +02:00
0af2da0437 backup: check verify state of previous backup before allowing reuse
Do not allow clients to reuse chunks from the previous backup if it has
a failed validation result. This would result in a new "successful"
backup that potentially references broken chunks.

If the previous backup has not been verified, assume it is fine and
continue on.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-15 09:59:29 +02:00
d09db6c2e9 rename BackupDir::new_with_group to BackupDir::with_group 2020-09-15 09:40:03 +02:00
bc871bd19d src/backup/backup_info.rs: new BackupDir::with_rfc3339 2020-09-15 09:34:46 +02:00
b11a6a029d debian/control: update 2020-09-15 09:33:38 +02:00
6a7be83efe avoid chrono dependency, depend on proxmox 0.3.8
- remove chrono dependency

- depend on proxmox 0.3.8

- remove epoch_now, epoch_now_u64 and epoch_now_f64

- remove tm_editor (moved to proxmox crate)

- use new helpers from proxmox 0.3.8
  * epoch_i64 and epoch_f64
  * parse_rfc3339
  * epoch_to_rfc3339_utc
  * strftime_local

- BackupDir changes:
  * store epoch and rfc3339 string instead of DateTime
  * backup_time_to_string now return a Result
  * remove unnecessary TryFrom<(BackupGroup, i64)> for BackupDir

- DynamicIndexHeader: change ctime to i64

- FixedIndexHeader: change ctime to i64
2020-09-15 07:12:57 +02:00
58169da46a www/OnlineHelpInfo.js: update for syncjobs 2020-09-12 15:10:08 +02:00
158f49e246 debian/control: update hyper dependency 2020-09-11 16:03:38 +02:00
3e4a67f350 bump version to 0.8.16-1 2020-09-11 15:55:37 +02:00
e0e5b4426a BackupDir: make constructor fallible
since converting from i64 epoch timestamp to DateTime is not always
possible. previously, passing invalid backup-time from client to server
(or vice-versa) panicked the corresponding tokio task. now we get proper
error messages including the invalid timestamp.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:49:35 +02:00
7158b304f5 handle invalid mtime when formating entries
otherwise operations like catalog shell panic when viewing pxar archives
containing such entries, e.g. with mtime very far ahead into the future.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:48:43 +02:00
833eca6d2f use non-panicky timestamp_opt where appropriate
by either printing the original, out-of-range timestamp as-is, or
bailing with a proper error message instead of panicking.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:48:24 +02:00
151acf5d96 don't truncate DateTime nanoseconds
where we don't care about them anyway..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:48:10 +02:00
4a363fb4a7 catalog dump: preserve original mtime
even if it can't be handled by chrono. silently replacing it with epoch
0 is confusing..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:43:54 +02:00
229adeb746 ui/docs: add onlineHelp button for syncjobs
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:54 +02:00
1eff9a1e89 docs: add section for calendar events
and move the info defined in 'Schedules' there,
the explanation of calendar events is inspired by the systemd.time
manpage and the pve docs (especially the examples are mostly
copied/adapted from there)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:42 +02:00
ed4f0a0edc ui: fix calendarevent examples
*/x is valid syntax for us, but not systemd, so to not confuse users
write it like systemd would accept it

also an timespec must at least have hours and minutes

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:32 +02:00
13bed6226e tools/systemd/parse_time: enable */x syntax for calendar events
we support this in pve, so also support it here to have a more
consistent syntax

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:22 +02:00
d937daedb3 docs: set html img width limitation through css
avoid hardcoding width in the docs itself, so that other render
outputs can choose another size.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:10:08 +02:00
8cce51135c docs: do not render TODOs in release builts
they are not useful for endusers...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:09:00 +02:00
0cfe1b3f13 docs: set GmbH as copyright holder
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:08:36 +02:00
05c16a6e59 docs: use alabaster theme
It's not all perfect (yet) but way cleaner and simpler to use than
the sphinx one.

Custom do the scrolling for the fixed side bar and make some other
slight adjustments.

Main issue for now is that the "Developer Appendix" is always shown
in the navigation tree, but we only include that toctree for
devbuilds...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:08:13 +02:00
3294b516d3 faq: fix typo
In note block:
    Proxmox Packup Server -> Proxmox Backup Server

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-10 15:18:13 +02:00
139bcedc53 benchmark: update TLS reference speed
We are now faster with recent patches.
2020-09-10 12:55:43 +02:00
cf9ea3c4c7 server: set http2 max frame size
else we get the default of 16k, which is quite low for our use case.
this improves the TLS upload benchmark speed by about 30-40% for me.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-10 12:43:51 +02:00
e84fde3e14 docs: faq: spell out PBS
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-10 12:35:04 +02:00
1de47507ff Add section "FAQ"
Adds an FAQ to the docs, based on question that have
been appearing on the forum.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-10 11:33:00 +02:00
1a9948a488 examples/upload-speed.rs: pass new benchmark parameter 2020-09-10 09:34:51 +02:00
04c2731349 bump version to 0.8.15-1 2020-09-10 09:26:16 +02:00
5656888cc9 verify: fix done count
We need to filter out benchmark group earlier
2020-09-10 09:06:33 +02:00
5fdc5a6f3d verify: skip benchmark directory 2020-09-10 08:44:18 +02:00
61d7b5013c add benchmark flag to backup creation for proper cleanup when running a benchmark
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
2020-09-10 08:25:24 +02:00
871181d984 mount: fix mount subcommand
fixes the error, "manifest does not contain
file 'X.pxar'", that occurs when trying to mount
a pxar archive with 'proxmox-backup-client mount':

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-10 07:21:16 +02:00
02939e178d ui: only mark backup encrypted if there are any files
if we have a stale backup without an manifest, we do not count
the remaining files in the backup dir anymore, but this means
we now have to check here if there are really any encrypted

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-10 07:18:51 +02:00
3be308b949 improve server->client tcp performance for high latency links
similar to the other fix, if we do not set the buffer size manually,
we get better performance for high latency connections

restore benchmark from f.gruenbicher:

no delay, without patch: ~50MB/s
no delay, with patch: ~50MB/s
25ms delay, without patch: ~11MB/s
25ms delay, with path: ~50MB/s

my own restore benchmark:

no delay, without patch: ~1.5GiB/s
no delay, with patch: ~1.5GiB/s
25ms delay, without patch: 30MiB/s
25ms delay, with patch: ~950MiB/s

for some more details about those benchmarks see
https://lists.proxmox.com/pipermail/pbs-devel/2020-September/000600.html

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-10 07:15:25 +02:00
83088644da fix #2983: improve tcp performance
by leaving the buffer sizes on default, we get much better tcp performance
for high latency links

throughput is still impacted by latency, but much less so when
leaving the sizes at default.
the disadvantage is slightly higher memory usage of the server
(details below)

my local benchmarks (proxmox-backup-client benchmark):

pbs client:
PVE Host
Epyc 7351P (16core/32thread)
64GB Memory

pbs server:
VM on Host
1 Socket, 4 Cores (Host CPU type)
4GB Memory

average of 3 runs, rounded to MB/s
                    | no delay |     1ms |     5ms |     10ms |    25ms |
without this patch  |  230MB/s |  55MB/s |  13MB/s |    7MB/s |   3MB/s |
with this patch     |  293MB/s | 293MB/s | 249MB/s |  241MB/s | 104MB/s |

memory usage (resident memory) of proxmox-backup-proxy:

                    | peak during benchmarks | after benchmarks |
without this patch  |                  144MB |            100MB |
with this patch     |                  145MB |            130MB |

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-10 07:15:12 +02:00
14db8b52dc src/backup/chunk_store.rs: use ? insteadf of unwrap 2020-09-10 06:37:37 +02:00
597427afaf clean up .bad file handling in sweep_unused_chunks
Code cleanup, no functional change intended.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-10 06:31:22 +02:00
3cddfb29be backup: ensure no fixed index writers are left over either
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-10 06:29:38 +02:00
e15b76369a buildsys: upload client packages also to PMG repo
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-08 16:48:31 +02:00
d7c1251435 ui: calendar event: disable matchFieldWidth for picker
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-08 16:48:31 +02:00
ea3ce82a74 ui: calendar event: enable more complex examples again
now that they (should) work.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-08 16:48:31 +02:00
092378ba92 Change "data store" to "datastore" throughout docs
Before, there were mixed usages of "data store" and
"datastore" throughout the docs.
This improves consistency in the docs by using only
"datastore" throughout.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 13:12:01 +02:00
068e526862 backup: touch all chunks, even if they exist
We need to update the atime of chunk files if they already exist,
otherwise a concurrently running GC could sweep them away.

This is protected with ChunkStore.mutex, so the fstat/unlink does not
race with touching.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-08 12:51:03 +02:00
a9767cf7de gc: remove .bad files on garbage collect
The iterator of get_chunk_iterator is extended with a third parameter
indicating whether the current file is a chunk (false) or a .bad file
(true).

Count their sizes to the total of removed bytes, since it also frees
disk space.

.bad files are only deleted if the corresponding chunk exists, i.e. has
been rewritten. Otherwise we might delete data only marked bad because
of transient errors.

While at it, also clean up and use nix::unistd::unlinkat instead of
unsafe libc calls.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-08 12:43:13 +02:00
aadcc2815c cleanup rename_corrupted_chunk: avoid duplicate format macro 2020-09-08 12:29:53 +02:00
0f3b7efa84 verify: rename corrupted chunks with .bad extension
This ensures that following backups will always upload the chunk,
thereby replacing it with a correct version again.

Format for renaming is <digest>.<counter>.bad where <counter> is used if
a chunk is found to be bad again before a GC cleans it up.

Care has been taken to deliberately only rename a chunk in conditions
where it is guaranteed to be an error in the chunk itself. Otherwise a
broken index file could lead to an unwanted mass-rename of chunks.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-08 12:20:57 +02:00
7c77e2f94a verify: fix log units
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-08 12:10:19 +02:00
abd4c4cb8c ui: add translation support
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-08 08:54:33 +02:00
09f12d1cf3 tools: rename extract_auth_cookie to extract_cookie
It does nothing specific to authentication..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-08 08:54:33 +02:00
1db4cfb308 tools/sytemd/time: add tests for multivalue fields
we did this wrong earlier, so it makes sense to add regression tests

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-08 07:09:43 +02:00
a4c1143664 server/worker_task: fix upid_read_status
a range from high to low in rust results in an empty range
(see std::ops::Range documentation)
so we need to generate the range from 0..data.len() and then reverse it

also, the task log contains a newline at the end, so we have to remove
that (should it exist)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-08 07:06:22 +02:00
0623674f44 Edit section "Network Management"
Following changes made:
    * Remove empty column "method6" from network list output,
      so table fits in console code-block
    * Walkthrough bond, rather than a bridge as it may be a more
      common setup case

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:30:42 +02:00
2dd58db792 PVE integration: Add note about hiding password
Add a note to section "Proxmox VE integration" explaining
how to avoid passing password as plain text when using the
pvesm command.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:19:20 +02:00
e11cfb93c0 change order of "Image Archives" and "File Archives"
Change the order of the "Image Archives" and "File
Archives" subsections, so that they match the order
which they are introduced in, in the section "Backup
Content" (minor readability improvement).

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:19:09 +02:00
bc0608955e Sync Jobs: add screenshots and explanation
Add screenshots of sync jobs panel in web interface
and explain how to carry out related tasks from it.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:18:50 +02:00
36be19218e Network Config: Add screenshots and explanation
Add screenshots for network configuration and explain
how to carry out related tasks using the web interface.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:18:15 +02:00
9fa39a46ba User Management: Add screenshots and explanation
Add screenshots for user management section in web
interface and explain how to carry out relevant tasks
using it.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:18:01 +02:00
ff30b912a0 Datastore Config: add screenshots and explanation
Add screenshots from the datastore section of the
web interface and explain how to carry out tasks using
the web interface.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:17:50 +02:00
b0c10a88a3 Disk Management: Add screenshots and explanation
This adds screenshots from the web interface for the
sections related to disk management and adds explanation
of how to carry out tasks using the web interface.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:12:54 +02:00
ccbe6547a7 Add screenshots of web interface
Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-08 06:12:17 +02:00
32afd60336 src/tools/systemd/time.rs: derive Clone 2020-09-07 12:37:08 +02:00
02e47b8d6e SYSTEMD_CALENDAR_EVENT_SCHEMA: fix wrong schema description 2020-09-07 09:07:55 +02:00
44055cac4d tools/systemd/time: enable dates for calendarevents
this implements parsing and calculating calendarevents that have a
basic date component (year-mon-day) with the usual syntax options
(*, ranges, lists)

and some special events:
monthly
yearly/annually (like systemd)
quarterly
semiannually,semi-annually (like systemd)

includes some regression tests

the ~ syntax for days (the last x days of the month) is not yet
implemented

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:36:29 +02:00
1dfc09cb6b tools/systemd/time: fix signed conversion
instead of using 'as' and silently converting wrong,
use the TryInto trait and raise an error if we cannot convert

this should only happen if we have a negative year,
but this is expected (we do not want schedules from before the year 0)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:35:38 +02:00
48c56024aa tools/systemd/tm_editor: add setter/getter for months/years/days
add_* are modeled after add_days

subtract one for set_mon to have a consistent interface for all fields
(i.e. getter/setter return/expect the 'real' number, not the ones
in the tm struct)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:34:27 +02:00
cf103266b3 tools/systemd/tm_editor: move conversion of the year into getter and setter
the tm struct contains the year - 1900 but we added that

if we want to use the libc normalization correctly, the tm struct
must have the correct year in it, else the computations for timezones,
etc. fail

instead add a getter that adds the years and a setter that subtracts it again

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:34:04 +02:00
d5cf8f606c tools/systemd/time: fix selection for multiple options
if we give multiple options/ranges for a value, e.g.
2,4,8
we always choose the biggest, instead of the smallest that is next

this happens because in DateTimeValue::find_next(value)
'next' can be set multiple times and we set it when the new
value was *bigger* than the last found 'next' value, when in reality
we have to choose the *smallest* next we can find

reverse the comparison operator to fix this

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:33:42 +02:00
ce7ab28cfa tools/systemd/parse_time: error out on invalid ranges
if the range is reverse (bigger..smaller) we will never find a value,
so error out during parsing

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:28:48 +02:00
07ca6f6e66 tools/systemd/tm_editor: remove reset_time from add_days and document it
we never passed 'false' to it anyway so remove it
(we can add it again if we should ever need it)

also remove the adding of wday (gets normalized anyway)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:28:24 +02:00
15ec790a40 tools/systemd/time: convert the resulting timestamp into an option
we want to use dates for the calendarspec, and with that there are some
impossible combinations that cannot be detected during parsing
(e.g. some datetimes do not exist in some timezones, and the timezone
can change after setting the schedule)

so finding no timestamp is not an error anymore but a valid result

we omit logging in that case (since it is not an error anymore)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:28:05 +02:00
cb73b2d69c tools/systemd/time: move continue out of the if/else
will be called anyway

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:27:20 +02:00
c931c87173 tools/systemd/time: let libc normalize time for us
mktime/gmtime can normalize time and even can handle special timezone
cases like the fact that the time 2:30 on specific day/timezone combos
do not exists

we have to convert the signature of all functions that use
normalize_time since mktime/gmtime can return an EOVERFLOW
but if this happens there is no way we can find a good time anyway

since normalize_time will always set wday according to the rest of the
time, remove set_wday

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:26:40 +02:00
28a0a9343c tools/systemd/tm_editor: remove TMChanges optimization
while it was correct, there was no measurable speed gain
(a benchmark yielded 2.8 ms for a spec that did not find a timestamp either way)
so remove it for simpler code

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-04 15:26:04 +02:00
56b666458c server/worker_task: fix 'unknown' status for some big task logs
when trying to parse the task status, we seek 8k from the end
which may be into the middle of a line, so the datetime parsing
can fail (when the log message contains ': ')

This patch does a fast search for the last line, and avoid the
'lines' iterator.
2020-09-04 10:41:13 +02:00
cd6ddb5a69 depend on proxmox 0.3.5 2020-09-04 08:11:53 +02:00
ecd55041a2 fix #2978: allow non-root to view datastore usage
for datastores where the requesting user has read or write permissions,
since the API method itself filters by that already. this is the same
permission setting and filtering that the datastore list API endpoint
does.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-04 06:18:20 +02:00
e7e8e6d5f7 online help: use a phony target and regenerate
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 14:41:03 +02:00
49df8ac115 docs: add prototype sphinx extension for online help
goes through the sections in the documents and creates the
OnlineHelpInfo.js file from the explicitly defined section labels which
are used in the js files with 'onlineHelp' variable.
2020-09-02 14:38:27 +02:00
7397f4a390 bump version to 0.8.14-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 10:41:42 +02:00
8317873c06 gc: improve percentage done logs 2020-09-02 10:04:18 +02:00
deef63699e verify: also fail on server shutdown 2020-09-02 09:50:17 +02:00
c6e07769e9 ui: datastore content: eslint fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 09:30:57 +02:00
423df9b1f4 ui: datastore: show more granular verify state
Allows to differ the following situations:
* some snapshots in a group where not verified
* how many snapshots failed to verify in a group
* all snapshots verified but last verification task was over 30 days
  ago

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 09:30:57 +02:00
c879e5af11 ui: datastore: mark row invalid if last snapshot verification failed
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 09:12:05 +02:00
63d9aca96f verify: log progress 2020-09-02 07:43:28 +02:00
c3b1da9e41 datastore content: search: set emptytext to searched columns
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-01 18:30:54 +02:00
46388e6aef datastore content: reduce count column width
Using 75 as width we can display up to 9999999 which would allow
displaying over 19 years of snapshots done each minute, so quite
enough for the common cases.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-01 18:28:14 +02:00
484d439a7c datastore content: reload after verify
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-01 18:27:30 +02:00
ab6615134c d/postinst: always fixup termproxy user id and for all users
Anyone with a PAM account and Sys.Console access could have started a
termproxy session, adapt the regex.

Always test for broken entries and run the sed expression to make sure
eventually all occurences of the broken syntax are fixed.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-01 18:02:11 +02:00
b1149ebb36 ui: DataStoreContent.js: fix wrong comma
should be semicolon

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-01 15:33:55 +02:00
1bfdae7933 ui: DataStoreContent: improve encrypted column
do not count files where we do not have any information

such files exist in the backup dir, but are not in the manifest
so we cannot use those files for determining if the backups are
encrypted or not

this marks encrypted/signed backups with unencrypted client.log.blob files as
encrypted/signed (respectively) instead of 'Mixed'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-01 15:33:55 +02:00
4f09d31085 src/backup/verify.rs: use global hashes (instead of per group)
This makes verify more predictable.
2020-09-01 13:33:04 +02:00
58d73ddb1d src/backup/data_blob.rs: avoid useless &, data is already a reference 2020-09-01 12:56:25 +02:00
6b809ff59b src/backup/verify.rs: use separate thread to load data 2020-09-01 12:56:25 +02:00
afe08d2755 debian/control: fix versions 2020-09-01 10:19:40 +02:00
a7bc5d4eaf depend on proxmox 0.3.4 2020-08-28 06:32:33 +02:00
97cd0a2a6d bump version to 0.8.13-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-27 16:15:31 +02:00
49a92084a9 gc: use human readable units for summary
and avoid the "percentage done: X %" phrase

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-27 16:06:35 +02:00
9bdeecaee4 bump pxar dep to 0.6.0
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-27 12:16:21 +02:00
843880f008 bin/backup-proxy: assert that daemon runs as backup user/group
Because if not, the backups it creates have bogus permissions and may
seem like they got broken once the daemon is started again with the
correct user/group.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-26 10:30:15 +02:00
a6ed5e1273 backup: add BACKUP_GROUP_NAME const and backup_group helper
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-26 10:27:47 +02:00
74f94d0678 bin/backup-proxy: remove outdated perl comments
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-26 10:27:47 +02:00
946c3e8a81 bin/backup-proxy: return error directly in main
anyhow makes this a nice error message, similar to the manual
wrapping used.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-26 10:27:47 +02:00
7b212c1f79 ui: datastore content: show last verify result from a snapshot
Double-click on the verify grid-cell of a specific snapshot (not the
group) opens the relevant task log.

The date of the last verify is shown as tool-tip.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-26 07:36:16 +02:00
3b2046d263 save last verify result in snapshot manifest
Save the state ("ok" or "failed") and the UPID of the respective
verify task. With this we can easily allow to open the relevant task
log and show when the last verify happened.

As we already load the manifest when listing the snapshots, just add
it there directly.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-26 07:35:13 +02:00
1ffe030123 various typo fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-25 18:52:31 +02:00
5255e641fa SnapshotListItem: add comment field also to schema
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-25 16:24:36 +02:00
c86b6f40d7 tools/format: implement from u64 for HumanByte helper type
Could be problematic for systems where usize is 32 bit, but we do not
really support those.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-25 14:18:49 +02:00
5a718dce17 api datastore: fix typo in error message
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-25 14:16:40 +02:00
1b32750644 update d/control for pxar 0.5.0
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-25 12:37:11 +02:00
5aa103c3c3 bump pxar dep to 0.5.0
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-25 12:37:11 +02:00
fd3f690104 Add section "Garbage Collection"
Add the section "Garbage Collection" to section "Backup Server
Management". This briefly explains the "garbage-collection"
subcommand of "proxmox-backup-manager"

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-25 09:38:03 +02:00
24b638bd9f Add section "Network Management"
Add the section "Network Management", which explains the
"network" subcommand of "proxmox-backup-manager"

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-25 09:37:41 +02:00
9624c5eecb add note about TLS benchmark test. 2020-08-25 09:36:12 +02:00
503dd339a8 Add further explanation to benchmarking
Adds a note, explaing the percentages shown in the output
of the benchmark

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-25 09:33:23 +02:00
36ea5df444 administration-guide.rst: remove debug output from code examples 2020-08-25 09:29:52 +02:00
dce9dd6f70 Add section "Disk Management"
Add the section "Disk Management" to the admin guide, explaining
the use of the "disk" subcommand of "proxmox-backup-manager"

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-25 09:27:48 +02:00
88e28e15e4 debian/control: update for new pxar 0.4 dependency 2020-08-25 09:09:37 +02:00
399e48a1ed bump version to 0.8.12-1 2020-08-25 08:57:12 +02:00
7ae571e7cb verify: speedup - only verify chunks once
We need to do the check before we load the chunk.
2020-08-25 08:52:24 +02:00
4264c5023b verify: sort backup groups 2020-08-25 08:38:47 +02:00
82b7adf90b bump pxar dep to 0.4.0
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-24 11:56:01 +02:00
71c4a3138f docs: fix PBS wiki link
rst/sphinx and comments are a PITA...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-21 11:09:41 +02:00
52991f239f bump version to 0.8.11-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-19 19:20:22 +02:00
3435f5491b Fix typo in program output
Change "comptation" -> "computation"

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-19 09:06:27 +02:00
aafe8609e5 d/postinst: fixup userid for older termproxy tasks
At the time when we can fix this up the new (and possibly an old)
server daemon process is running, so use the flock CLI tool from
util-linux to ensure we do the same locking as the server and thus we
avoid a race condition.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-08-19 07:26:58 +02:00
a8d69fcf05 Add "Benchmarking" section
This adds the "Benchmarking" section which discusses
the proxmox-backup-client benchmark subcommand.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-18 14:24:08 +02:00
1e68497c03 Add section describing acl tool
This adds a section how to use the acl subcommand
to manage user access control

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-18 14:24:08 +02:00
74fc844787 Correct erroneous instructions and add clarity
This patch changes the following:
- Provide extra clarity to instruction and information where
  appropriate.
- Fix examples and content that would lead to erroneous behavior
  in a command.
- Insert section about installing on Debian into a caution block

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-18 14:24:08 +02:00
4cda7603c4 minor language and formatting fixup
this fixes minor grammatical errors throughout the pbs docs
and rewords certain sections for improved readability.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-18 14:24:08 +02:00
11e1e27a42 turn UPID into an API type
It's a string-type.
Implement Serialize via Display, Deserialize via FromStr and
add an API_SCHEMA so that it can be used as a type within
the #[api] macro.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-18 11:54:30 +02:00
4ea831bfa1 style fixups
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-18 08:50:14 +02:00
c1d7d708d4 remove map_struct helper
if we ever need this it should be marked as unsafe!

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-17 11:53:02 +02:00
3fa2b983c1 add methods to allocate a DynamicIndexHeader
to avoid `map_struct` which is actually unsafe because it
does not verify alignment constraints at all

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-17 11:50:32 +02:00
a1e9c05738 api2/node/services: turn service api calls into workers
to be in line with pve/pmg and be able to show the progress in the gui

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-14 12:37:17 +02:00
934deeff2d fix #2904: zpool status: parse vdevs with state but without statistics
some vdevs (e.g. spares) have a 'state' (e.g. AVAIL), but
not statistics like READ/WRITE/etc.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-14 11:41:32 +02:00
c162df60c8 zfs status: add test with spares
this will fail for now, fixed in the next commit

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-14 11:41:32 +02:00
98161fddb5 cleanup last patch 2020-08-14 07:30:05 +02:00
be614c625f api2/node/../disks/directory: added DELETE endpoint for removal of mount-units
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
2020-08-14 07:06:10 +02:00
87c4cb7419 Fix #2926: parse_iface_attributes: always break on non-{attribitue, comment} token
There is no requirement to have at least
a blank line, attribute or comment in between two
interface definitions, e.g.
iface lo inet loopback
iface lo inet6 loopback

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2020-08-14 06:57:07 +02:00
93bb51fe7e config/jobstate: replace Job:load with create_state_file
it really is not necessary, since the only time we are interested in
loading the state from the file is when we list it, and there
we use JobState::load directly to avoid the lock

we still need to create the file on syncjob creation though, so
that we have the correct time for the schedule

to do this we add a new create_state_file that overwrites it on creation
of a syncjob

for safety, we subtract 30 seconds from the in-memory state in case
the statefile is missing

since we call create_state_file from  proxmox-backup-api,
we have to chown the lock file after creating to the backup user,
else the sync job scheduling cannot aquire the lock

also we remove the lock file on statefile removal

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-14 06:38:02 +02:00
713b66b6ed cleanup: replace id from do_sync_job with info from job
we already have it inside the job itself

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-14 06:36:43 +02:00
77bd2a469c cleanup: merge endtime into TaskState
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-14 06:36:19 +02:00
97af919530 ui: syncjob: make some columns smaller
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:51:47 +02:00
c91602316b ui: syncjob: improve task text rendering
to also have the correct icons for warnings and unknown tasks

the text is here "ERROR: ..." now, so leave the 'Error' out

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:51:35 +02:00
a13573c24a syncjob: use do_sync_job also for scheduled sync jobs
and determine the last runtime with the jobstate

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:51:20 +02:00
02543a5c7f api2/pull: extend do_sync_job to also handle schedule and jobstate
so that we can log if triggered by a schedule, and writing to a jobstatefile
also correctly polls now the abort_future of the worker, so that
users can stop a sync

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:49:28 +02:00
42b68f72e6 api/{pull, sync}: refactor to do_sync_job
and move the pull parameters into the worker, so that the task log
contains the error if there is one

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:40:52 +02:00
664d8a2765 api2/admin/sync: use JobState for faster access to state info
and delete the statefile again on syncjob removal

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:40:00 +02:00
e6263c2662 config: add JobState helper
this is intended to be a generic helper to (de)serialize job states
(e.g., sync, verify, and so on)

writes a json file into '/var/lib/proxmox-backup/jobstates/TYPE-ID.json'

the api creates the directory with the correct permissions, like
the rrd directory

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:36:10 +02:00
ae197dda23 server/worker_task: let upid_read_status also return the endtime
the endtime should be the timestamp of the last log line
or if there is no log at all, the starttime

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:35:44 +02:00
4c116bafb8 server: change status of a task from a string to an enum
representing a state via an enum makes more sense in this case
we also implement FromStr and Display to make it easy to convet from/to
a string

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-13 11:35:19 +02:00
df30017ff8 remove unused import
rustc doesn't warn about this kind of import, however,
clippy does

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-13 09:05:15 +02:00
3f3ae19d63 formatting fixups
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-12 14:30:03 +02:00
72dc68323c replace and remove old ticket functions
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-12 14:28:21 +02:00
593f917742 introduce Ticket struct
and add tests and compatibility tests

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-12 14:28:21 +02:00
639419b049 worker_task: new_thread() - remove unused tokio channel 2020-08-12 08:43:09 +02:00
c5ac2b9ddd bump version to 0.8.10-1
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-11 15:47:30 +02:00
81f293513e backup: lock base snapshot and ensure existance on finish
To prevent forgetting the base snapshot of a running backup, and catch
the case when it still happens (e.g. via manual rm) to at least error
out instead of storing a potentially invalid backup.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-11 11:04:47 +02:00
8b5f72b176 Revert "backup: ensure base snapshots are still available after backup"
This reverts commit d53fbe2474.

The HashSet and "register" function are unnecessary, as we already know
which backup is the one we need to check: the last one, stored as
'last_backup'.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-11 11:03:53 +02:00
f23f75433f backup: flock snapshot on backup start
An flock on the snapshot dir itself is used in addition to the group dir
lock. The lock is used to avoid races with forget and prune, while
having more granularity than the group lock (i.e. the group lock is
necessary to prevent more than one backup per group, but the snapshot
lock still allows backups unrelated to the currently running to be
forgotten/pruned).

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-11 11:02:21 +02:00
6d6b4e72d3 datastore: prevent in-use deletion with locks instead of heuristic
Attempt to lock the backup directory to be deleted, if it works keep the
lock until the deletion is complete. This way we ensure that no other
locking operation (e.g. using a snapshot as base for another backup) can
happen concurrently.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-11 11:00:29 +02:00
e434258592 src/backup/backup_info.rs: remove BackupGroup lock()
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-11 10:58:35 +02:00
3dc1a2d5b6 src/tools/fs.rs: new helper lock_dir_noblock
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-11 10:57:48 +02:00
5d95558bae Makefile: build target - do not fail if control file does not exist
This can happen if a previous build failed ...
2020-08-11 10:47:23 +02:00
882c082369 mark signed manifests as such
for less-confusing display in the web interface

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-11 09:56:53 +02:00
9a38fa29c2 verify: also check chunk CryptMode
and in-line verify_stored_chunk to avoid double-loading each chunk.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-11 09:56:20 +02:00
14f6c9cb8b chunk readers: ensure chunk/index CryptMode matches
an encrypted Index should never reference a plain-text chunk, and an
unencrypted Index should never reference an encrypted chunk.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-11 09:54:22 +02:00
2d55beeca0 datastore api: verify blob/index csum from manifest
when dowloading decoded files.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-11 09:52:45 +02:00
9238cdf50d datastore api: only decode unencrypted indices
these checks were already in place for regular downloading of backed up
files, also do them when attempting to decode a catalog, or when
downloading decoded files referenced by a pxar index.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-11 09:51:20 +02:00
5d30f03826 impl PartialEq between Realm and RealmRef
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-10 12:23:36 +02:00
14263ef989 assert that Username does not impl PartialEq
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-10 12:21:12 +02:00
e7cb4dc50d introduce Username, Realm and Userid api types
and begin splitting up types.rs as it has grown quite large
already

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-10 12:05:01 +02:00
27d864210a d/control: proxmox 0.3.3
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-10 12:05:01 +02:00
f667f49dab bump proxmox dependency to 0.3.3 for serde helpers
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-10 11:32:01 +02:00
866c556faf move types.rs to types/mod.rs
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-10 10:32:31 +02:00
90d515c97d config.rs: sort modules
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-10 08:33:38 +02:00
4dbe129284 backup: only allow finished backups as base snapshot
If the datastore holds broken backups for some reason, do not attempt to
base following snapshots on those. This would lead to an error on
/previous, leaving the client no choice but to upload all chunks, even
though there might be potential for incremental savings.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-07 07:32:56 +02:00
747c3bc087 administration-guide.rst: move Encryption headline up one level 2020-08-07 07:10:12 +02:00
c23e257c5a administration-guide.rst: fix headline (avoid compile error) 2020-08-07 06:56:58 +02:00
16a18dadba admin-guide: add section explaining master keys
Adds a section under encryption which goes into detail on how to
use a master key to store and recover backup encryption keys.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-07 06:54:37 +02:00
5f76ac37b5 fix: master-key: upload RSA encoded key with backup
When uploading an RSA encoded key alongside the backup,
the backup would fail with the error message: "wrong blob
file extension".
Adding the '.blob' extension to rsa-encrypted.key before the
the call to upload_blob_from_data(), rather than after, fixes
the issue.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-08-06 09:34:01 +02:00
d74edc3d89 finish_backup: mark backup as finished only after checks have passed
Commit 9fa55e09 "finish_backup: test/verify manifest at server side"
moved the finished-marking above some checks, which means if those fail
the backup would still be marked as successful on the server.

Revert that part and comment the line for the future.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-08-06 06:39:34 +02:00
2f57a433b1 fix #2909: handle missing chunks gracefully in garbage collection
instead of bailing and stopping the entire GC process, warn about the
missing chunks and continue.

this results in "TASK WARNINGS: X" as the status.

Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
2020-08-06 06:36:48 +02:00
df7f04364b d/control: bump proxmox to 0.3.2
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-04 11:34:58 +02:00
98c259b4c1 remove timer and lock functions, fix building with proxmox 0.3.2
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-04 11:33:02 +02:00
799b3d88bc bump proxmox dependency to 0.3.2 for timer / file locking
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-04 11:27:44 +02:00
db22e6b270 build: properly regenerate d/control
and commit the latest change

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-04 11:16:11 +02:00
16f0afbfb5 gui: user: fix #2898 add dialog to set password
Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-08-04 10:21:00 +02:00
d3d566f7bd GC: use time pre phase1 to calculate min_atime in phase2
Used chunks are marked in phase1 of the garbage collection process by
using the atime property. Each used chunk gets touched so that the atime
gets updated (if older than 24h, see relatime).

Should there ever be a situation in which the phase1 in the GC run needs
a very long time to finish, it could happen that the grace period
calculated in phase2 is not long enough and thus the marking of the
chunks (atime) becomes invalid. This would result in the removal of
needed chunks.

Even though the likelyhood of this happening is very low, using the
timestamp from right before phase1 is started, to calculate the grace
period in phase2 should avoid this situation.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
2020-08-04 10:19:05 +02:00
c96b0de48f datastore: allow browsing signed pxar files
just because we can't verify the signature, does not mean the contents
are not accessible. it might make sense to make it obvious with a hint
or click-through warning that no signature verification can take place
or this and downloading.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-04 07:27:56 +02:00
2ce159343b sync: verify size and checksum of pulled archives
and not just of previously synced ones.

we can't use BackupManifest::verify_file as the archive is still stored
under the tmp path at this point.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-04 07:27:56 +02:00
9e496ff6f1 sync: verify chunk size and digest, if possible
for encrypted chunks this is currently not possible, as we need the key
to decode the chunk.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-04 07:27:56 +02:00
8819d1f2f5 blobs: attempt to verify on decode when possible
regular chunks are only decoded when their contents are accessed, in
which case we need to have the key anyway and want to verify the digest.

for blobs we need to verify beforehand, since their checksums are always
calculated based on their raw content, and stored in the manifest.

manifests are also stored as blobs, but don't have a digest in the
traditional sense (they might have a signature covering parts of their
contents, but that is verified already when loading the manifest).

this commit does not cover pull/sync code which copies blobs and chunks
as-is without decoding them.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-08-04 07:27:56 +02:00
0f9218079a pxar/extract: fixup path stack for errors
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-03 12:20:30 +02:00
1cafbdc70d more whitespace fixups
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-03 12:02:19 +02:00
a3eb7b2cea whitespace fixup
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-03 12:00:59 +02:00
d9b8e2c795 pxar: better error handling on extract
Errors while applying metadata will not be considered fatal
by default using `pxar extract` unless `--strict` was passed
in which case it'll bail out immediately.

It'll still return an error exit status if something had
failed along the way.

Note that most other errors will still cause it to bail out
(eg. errors creating files, or I/O errors while writing
the contents).

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-08-03 09:40:55 +02:00
4bd2a9e42d worker_task: add getter for upid
sometimes we need the upid inside the worker itself, so give a
possibilty to get it

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-03 08:26:17 +02:00
cef03f4149 worker_task: refactor log text generator
we will need this elsewhere, so pull it out

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-03 08:23:13 +02:00
eeb19aeb2d systemd/time: fix weekday wrapping on month
the weekday does not change depending on the month, so remove that wrapping

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-03 08:18:42 +02:00
6c96ec418d systemd/time: add tests for weekday month wrapping
this will fail for now, gets fixed in the next commit

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-08-03 08:15:26 +02:00
5e4b32706c depend on proxmox 0.3.1 2020-08-02 12:02:21 +02:00
30c3c5d66c pxar: create: attempt to use O_NOATIME
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-31 11:46:53 +02:00
e51be33807 pxar: create: move common O_ flags to open_file
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-31 11:42:15 +02:00
70030b43d0 list_snapshots: Returns new "comment" property (fisrt line from notes) 2020-07-31 11:34:42 +02:00
724de093dd build: track generated d/control in git
to track changes and allow bootstrap-installation of build dependencies.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-07-31 11:18:33 +02:00
ff86ef00a7 cleanup: manifest is always CryptMode::None 2020-07-31 10:25:30 +02:00
912b3f5bc9 src/api2/admin/datastore.rs: add API to get/set Notes for backus 2020-07-31 10:17:35 +02:00
a4acb6ef84 lock_file: return std::io::Error 2020-07-31 08:53:00 +02:00
d7ee07d838 src/api2/backup/environment.rs: remove debug code 2020-07-31 07:48:53 +02:00
53705acece src/api2/backup/environment.rs: remove debug code 2020-07-31 07:47:08 +02:00
c8fff67d88 finish_backup: add chunk_upload_stats to manifest 2020-07-31 07:45:47 +02:00
9fa55e09a7 finish_backup: test/verify manifest at server side
We want to make sure that the client uploaded a readable manifest.
2020-07-31 07:45:47 +02:00
e443902583 src/backup/datastore.rs: add helpers to load/store manifest
We want this to modify the manifest "unprotected" data, for example
to add upload statistics, notes, ...
2020-07-31 07:45:47 +02:00
32dc4c4604 introduction: language improvement (fix typos, grammar, wording)
Fix typos and grammatical errors.
Reword some sentences for better readability.
Clean up the list found under "Software Stack", so that it maintains a consistent
style throughout.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-30 12:02:54 +02:00
f39a900722 api2/node/termproxy: fix user in worker task
'username' here is without realm, but we really want to use user@realm

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-30 11:57:43 +02:00
1fc82c41f2 src/api2/backup.rs: aquire backup lock earlier in create_locked_backup_group() 2020-07-30 11:03:05 +02:00
d2b0c78e23 api2/node/termproxy: fix zombies on worker abort
tokios kill_on_drop sometimes leaves zombies around, especially
when there is not another tokio::process::Command spawned after

so instead of relying on the 'kill_on_drop' feature, we explicitly
kill the child on a worker abort. to be able to do this
we have to use 'tokio::select' instead of 'futures::select' since
the latter requires the future to be fused, which consumes the
child handle, leaving us no possibility to kill it after fusing.
(tokio::select does not need the futures to be fused, so we
can reuse the child future after the select again)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-30 10:38:14 +02:00
adfdc36936 verify: keep track and log which dirs failed the verification
so that we can print a list at the end of the worker which backups
are corrupt.

this is useful if there are many snapshots and some in between had an
error. Before this patch, the task log simply says to 'look in the logs'
but if the log is very long it makes it hard to see what exactly failed.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-30 09:39:37 +02:00
d8594d87f1 verify: keep also track of corrupt chunks
so that we do not have to verify a corrupt one multiple times

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-30 09:39:37 +02:00
f66f537da9 verify: check all chunks of an index, even if we encounter a corrupt one
this makes it easier to see which chunks are corrupt
(and enables us in the future to build a 'complete' list of
corrupt chunks)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-30 09:39:37 +02:00
d44185c4a1 fix #2873: if --pattern is used, default to not extracting
The extraction algorithm has a state (bool) indicating
whether we're currently in a positive or negative match
which has always been initialized to true at the beginning,
but when the user provides a `--pattern` argument we need to
start out with a negative match.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-30 09:33:30 +02:00
d53fbe2474 backup: ensure base snapshots are still available after backup
This should never trigger if everything else works correctly, but it is
still a very cheap check to avoid wrongly marking a backup as "OK" when
in fact some chunks might be missing.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-07-30 08:28:54 +02:00
95bda2f25d backup: use flock on backup group to forbid multiple backups at once
Multiple backups within one backup group don't really make sense, but
break all sorts of guarantees (e.g. a second backup started after a
first would use a "known-chunks" list from the previous unfinished one,
which would be empty - but using the list from the last finished one is
not a fix either, as that one could be deleted or pruned once the first
simultaneous backup is finished).

Fix it by only allowing one backup per backup group at one time. This is
done via a flock on the backup group directory, thus remaining intact
even after a reload.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-07-30 08:26:26 +02:00
c9756b40d1 datastore: prevent deletion of snaps in use as "previous backup"
To prevent a race with a background GC operation, do not allow deletion
of backups who's index might currently be referenced as the "known chunk
list" for successive backups. Otherwise the GC could delete chunks it
thinks are no longer referenced, while at the same time telling the
client that it doesn't need to upload said chunks because they already
exist.

Additionally, prevent deletion of whole backup groups, if there are
snapshots contained that appear to be currently in-progress. This is
currently unlikely to trigger, as that function is only used for sync
jobs, but it's a useful safeguard either way.

Deleting a single snapshot has a 'force' parameter, which is necessary
to allow deleting incomplete snapshots on an aborted backup. Pruning
also sets force=true to avoid the check, since it calculates which
snapshots to keep on its own.

To avoid code duplication, the is_finished method is factored out.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-07-30 08:26:01 +02:00
8cd29fb24a tools: add nonblocking mode to lock_file
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-07-30 08:18:10 +02:00
505c5f0f76 fix typo: avgerage to average
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-07-30 07:08:08 +02:00
2aaae9705e src/backup/verify.rs: try to verify chunks only once
We use a HashSet (per BackupGroup) to track already verified chunks.
2020-07-29 13:29:13 +02:00
8aa67ee758 bump proxmox to 0.3, cleanup http_err macro usage
Also swap the order of a couple of `.map_err().await` to
`.await.map_err()` since that's generally more efficient.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-29 09:38:36 +02:00
3865e27e96 src/api2/node.rs: 'mod' statement cleanup
split them into groups: `pub`, `pub(crate)` and non-pub

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-29 09:19:57 +02:00
f6c6e09a8a update to pxar 0.3 to support negative timestamps
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-07-29 08:31:37 +02:00
71282dd988 ui: fix in-progress snapshots always showing as "Encrypted"
We can't know if they are encrypted or not when they're not even
finished yet.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-07-29 07:13:25 +02:00
80db161e05 ui: fix error when reloading DataStoreContent
...when an entry is selected, that doesn't exist after the reload.

E.g. when one deletes selects a file within a snapshot and then clicks
the delete icon for said snapshot, focusRow would then fail and the
loading mask stay on until a reload.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-07-29 07:13:12 +02:00
be10cdb122 fix #2856: also check whole device for device mapper
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-28 11:03:45 +02:00
7fde1a71ca upload_chunk: allow upload of empty blobs
a blob can be empty (e.g. an empty pct fw conf), so we
have to set the minimum size to the header size

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-28 11:03:36 +02:00
a83674ad48 administration-guide: fix typo that breaks example command
The ' ' (space) between 'etc/ **/*.txt' resulted in the example command's output
not matching the given example output. Removing this space fixes the command.
2020-07-28 10:59:53 +02:00
02f82148cf docs: pxar create: update docs to match current behavior
This removes parts of the previous explanation of the tool that are no longer
correct, and adds an explanation of '--exclude' parameter, instead.

Adds more clarity to the command, by use of '/path/to/source' to signify
source directory.

Specify that the pattern matching style of the exclude parameter is that of
gitignore's syntax.
2020-07-28 10:59:42 +02:00
39f18b30b6 src/backup/data_blob.rs: new load_from_reader(), which verifies the CRC
And make verify_crc private for now. We always call load_from_reader() to
verify the CRC.

Also add load_chunk() to datastore.rs (from chunk_store::read_chunk())
2020-07-28 10:23:16 +02:00
69d970a658 ui: DataStoreContent: keep selection and expansion on reload
when clicking reload, we keep the existing selection
(if it still exists), and the previous expanded elements expanded

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-27 12:51:34 +02:00
6d55603dcc ui: add search box to DataStore content
which searches the whole tree (name & owner)

we do this by traversing the tree and marking elements as matches,
then afterwards make a simple filter that matches on a boolean

worst case cost of this is O(2n) since we have to traverse the
tree (in the worst) case one time, and the filter function does it again

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-27 12:51:11 +02:00
3e395378bc ui: rework DataStore content Panel
instead of having the files as a column, put the files into the tree
as a third level

with this, we can move the actions into an action column and remove
the top buttons (except reload)

clicking the download action now downloads directly, so we would
not need the download window anymore

clicking the browse action, opens the pxar browser like before,
but expands and selects (&focus) the selected pxar file

also changes the icon of 'signed' to the one to locked
but color codes them (singed => greyed out, encrypted => green),
similar to what browsers do/did for certificates

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-27 12:47:51 +02:00
bccdc5fa04 src/backup/manifest.rs: cleanup - again, avoid recursive call to write_canonical_json
And use re-borrow instead of dyn trait casting.
2020-07-27 10:31:34 +02:00
0bf7ba6c92 src/backup/manifest.rs: cleanup - avoid recursive call to write_canonical_json 2020-07-27 08:48:11 +02:00
e6b599aa6c services: make reload safer and default to it in gui
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-25 20:23:12 +02:00
d757021f4c ui: acl: add improved permission selector
taken mostly from PVE, with adaption to how PBS does things.
Main difference is that we do not have a resource store singleton
here which we can use, but for datastores we can already use the
always present datastore-list store. Register it to the store manager
with a "storeId" property (vs. our internal storeid one).

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-25 20:10:11 +02:00
ee15af6bb8 api: service command: fix test for essential service
makes no sense to disallow reload or start (even if start cannot
really happen)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 19:35:19 +02:00
3da9b7e0dd followup: server/state: rename task_count to internal_task_count
so that the relation with spawn_internal_task is made more clear

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 12:11:39 +02:00
beaa683a52 bump version to 0.8.9-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 11:24:56 +02:00
33a88dafb9 server/state: add spawn_internal_task and use it for websockets
is a helper to spawn an internal tokio task without it showing up
in the task list

it is still tracked for reload and notifies the last_worker_listeners

this enables the console to survive a reload of proxmox-backup-proxy

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-24 11:17:33 +02:00
224c65f8de termproxy: let users stop the termproxy task
for that we have to do a select on the workers abort_future

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-24 11:17:33 +02:00
f2b4b4b9fe fix 2885: bail on duplicate backup target
Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
2020-07-24 11:08:56 +02:00
ea9e559fc4 client: log archive upload duration more accurate, fix grammar
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 10:15:28 +02:00
0cf14984cc client: avoid division by zero in avg speed calculation, be more accurate
using micros vs. as_secs_f64 allows to have it calculated as usize
bytes, easier to handle - this was also used when it still lived in
upload_chunk_info_stream

Co-authored-by: Stoiko Ivanov <s.ivanov@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 10:14:40 +02:00
156 changed files with 6600 additions and 2653 deletions

View File

@ -1,6 +1,6 @@
[package] [package]
name = "proxmox-backup" name = "proxmox-backup"
version = "0.8.8" version = "0.8.19"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"] authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018" edition = "2018"
license = "AGPL-3" license = "AGPL-3"
@ -18,7 +18,6 @@ apt-pkg-native = "0.3.1" # custom patched version
base64 = "0.12" base64 = "0.12"
bitflags = "1.2.1" bitflags = "1.2.1"
bytes = "0.5" bytes = "0.5"
chrono = "0.4" # Date and time library for Rust
crc32fast = "1" crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] } endian_trait = { version = "0.6", features = ["arrays"] }
anyhow = "1.0" anyhow = "1.0"
@ -26,7 +25,7 @@ futures = "0.3"
h2 = { version = "0.2", features = ["stream"] } h2 = { version = "0.2", features = ["stream"] }
handlebars = "3.0" handlebars = "3.0"
http = "0.2" http = "0.2"
hyper = "0.13" hyper = "0.13.6"
lazy_static = "1.4" lazy_static = "1.4"
libc = "0.2" libc = "0.2"
log = "0.4" log = "0.4"
@ -39,11 +38,11 @@ pam-sys = "0.5"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-utils = "0.1.0" pin-utils = "0.1.0"
pathpatterns = "0.1.2" pathpatterns = "0.1.2"
proxmox = { version = "0.2.1", features = [ "sortable-macro", "api-macro", "websocket" ] } proxmox = { version = "0.4.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] } #proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] } #proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox-fuse = "0.1.0" proxmox-fuse = "0.1.0"
pxar = { version = "0.2.1", features = [ "tokio-io", "futures-io" ] } pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] } #pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
regex = "1.2" regex = "1.2"
rustyline = "6" rustyline = "6"

View File

@ -69,10 +69,12 @@ doc:
.PHONY: build .PHONY: build
build: build:
rm -rf build rm -rf build
rm -f debian/control
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//') debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
cat build/debian/control.src build/debian/control.in > build/debian/control cat build/debian/control.src build/debian/control.in > build/debian/control
rm build/debian/control.in build/debian/control.src rm build/debian/control.in build/debian/control.src
cp build/debian/control debian/control
rm build/Cargo.lock rm build/Cargo.lock
find build/debian -name "*.hint" -delete find build/debian -name "*.hint" -delete
$(foreach i,$(SUBDIRS), \ $(foreach i,$(SUBDIRS), \
@ -148,4 +150,4 @@ upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
# check if working directory is clean # check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve,pmg" --dist buster

239
debian/changelog vendored
View File

@ -1,3 +1,241 @@
rust-proxmox-backup (0.8.19-1) unstable; urgency=medium
* src/api2/reader.rs: use std::fs::read instead of tokio::fs::read
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 13:30:27 +0200
rust-proxmox-backup (0.8.18-1) unstable; urgency=medium
* src/client/pull.rs: allow up to 20 concurrent download streams
* docs: add version and date to HTML index
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 12:39:26 +0200
rust-proxmox-backup (0.8.17-1) unstable; urgency=medium
* src/client/pull.rs: open temporary manifest with truncate(true)
* depend on proxmox 0.4.1
* fix #3017: check array boundaries before using
* datastore/prune schedules: use JobState for tracking of schedules
* improve docs
* fix #3015: allow user self-service
* add verification scheduling to proxmox-backup-proxy
* fix #3014: allow DataStoreAdmins to list DS config
* depend on pxar 0.6.1
* fix #2942: implement lacp bond mode and bond_xmit_hash_policy
* api2/pull: make pull worker abortable
* fix #2870: renew tickets in HttpClient
* always allow retrieving (censored) subscription info
* fix #2957: allow Sys.Audit access to node RRD
* backup: check all referenced chunks actually exist
* backup: check verify state of previous backup before allowing reuse
* avoid chrono dependency
-- Proxmox Support Team <support@proxmox.com> Mon, 21 Sep 2020 14:08:32 +0200
rust-proxmox-backup (0.8.16-1) unstable; urgency=medium
* BackupDir: make constructor fallible
* handle invalid mtime when formating entries
* ui/docs: add onlineHelp button for syncjobs
* docs: add section for calendar events
* tools/systemd/parse_time: enable */x syntax for calendar events
* docs: set html img width limitation through css
* docs: use alabaster theme
* server: set http2 max frame size
* doc: Add section "FAQ"
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Sep 2020 15:54:57 +0200
rust-proxmox-backup (0.8.15-1) unstable; urgency=medium
* verify: skip benchmark directory
* add benchmark flag to backup creation for proper cleanup when running
a benchmark
* mount: fix mount subcommand
* ui: only mark backup encrypted if there are any files
* fix #2983: improve tcp performance
* improve ui and docs
* verify: rename corrupted chunks with .bad extension
* gc: remove .bad files on garbage collect
* ui: add translation support
* server/worker_task: fix upid_read_status
* tools/systemd/time: enable dates for calendarevents
* server/worker_task: fix 'unknown' status for some big task logs
-- Proxmox Support Team <support@proxmox.com> Thu, 10 Sep 2020 09:25:59 +0200
rust-proxmox-backup (0.8.14-1) unstable; urgency=medium
* verify speed up: use separate IO thread, use datastore-wide cache (instead
of per group)
* ui: datastore content: improve encrypted column
* ui: datastore content: show more granular verify state, especially for
backup group rows
* verify: log progress in percent
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Sep 2020 09:36:47 +0200
rust-proxmox-backup (0.8.13-1) unstable; urgency=medium
* improve and add to documentation
* save last verify result in snapshot manifest and show it in the GUI
* gc: use human readable units for summary in task log
-- Proxmox Support Team <support@proxmox.com> Thu, 27 Aug 2020 16:12:07 +0200
rust-proxmox-backup (0.8.12-1) unstable; urgency=medium
* verify: speedup - only verify chunks once
* verify: sort backup groups
* bump pxar dep to 0.4.0
-- Proxmox Support Team <support@proxmox.com> Tue, 25 Aug 2020 08:55:52 +0200
rust-proxmox-backup (0.8.11-1) unstable; urgency=medium
* improve sync jobs, allow to stop them and better logging
* fix #2926: make network interfaces parser more flexible
* fix #2904: zpool status: parse also those vdevs without READ/ẀRITE/...
statistics
* api2/node/services: turn service api calls into workers
* docs: add sections describing ACL related commands and describing
benchmarking
* docs: general grammar, wording and typo improvements
-- Proxmox Support Team <support@proxmox.com> Wed, 19 Aug 2020 19:20:03 +0200
rust-proxmox-backup (0.8.10-1) unstable; urgency=medium
* ui: acl: add improved permission selector
* services: make reload safer and default to it in gui
* ui: rework DataStore content Panel
* ui: add search box to DataStore content
* ui: DataStoreContent: keep selection and expansion on reload
* upload_chunk: allow upload of empty blobs
* fix #2856: also check whole device for device mapper
* ui: fix error when reloading DataStoreContent
* ui: fix in-progress snapshots always showing as "Encrypted"
* update to pxar 0.3 to support negative timestamps
* fix #2873: if --pattern is used, default to not extracting
* finish_backup: test/verify manifest at server side
* finish_backup: add chunk_upload_stats to manifest
* src/api2/admin/datastore.rs: add API to get/set Notes for backus
* list_snapshots: Returns new "comment" property (first line from notes)
* pxar: create: attempt to use O_NOATIME
* systemd/time: fix weekday wrapping on month
* pxar: better error handling on extract
* pxar/extract: fixup path stack for errors
* datastore: allow browsing signed pxar files
* GC: use time pre phase1 to calculate min_atime in phase2
* gui: user: fix #2898 add dialog to set password
* fix #2909: handle missing chunks gracefully in garbage collection
* finish_backup: mark backup as finished only after checks have passed
* fix: master-key: upload RSA encoded key with backup
* admin-guide: add section explaining master keys
* backup: only allow finished backups as base snapshot
* datastore api: only decode unencrypted indices
* datastore api: verify blob/index csum from manifest
* sync, blobs and chunk readers: add more checks and verification
* verify: add more checks, don't fail on first error
* mark signed manifests as such
* backup/prune/forget: improve locking
* backup: ensure base snapshots are still available after backup
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Aug 2020 15:37:29 +0200
rust-proxmox-backup (0.8.9-1) unstable; urgency=medium
* improve termprocy (console) behavior on updating proxmox-backup-server and
other daemon restarts
* client: improve upload log output and speed calculation
* fix #2885: client upload: bail on duplicate backup targets
-- Proxmox Support Team <support@proxmox.com> Fri, 24 Jul 2020 11:24:07 +0200
rust-proxmox-backup (0.8.8-1) unstable; urgency=medium rust-proxmox-backup (0.8.8-1) unstable; urgency=medium
* pxar: .pxarexclude: match behavior from absolute paths to the one described * pxar: .pxarexclude: match behavior from absolute paths to the one described
@ -308,4 +546,3 @@ proxmox-backup (0.1-1) unstable; urgency=medium
* first try * first try
-- Proxmox Support Team <support@proxmox.com> Fri, 30 Nov 2018 13:03:28 +0100 -- Proxmox Support Team <support@proxmox.com> Fri, 30 Nov 2018 13:03:28 +0100

132
debian/control vendored Normal file
View File

@ -0,0 +1,132 @@
Source: rust-proxmox-backup
Section: admin
Priority: optional
Build-Depends: debhelper (>= 11),
dh-cargo (>= 18),
cargo:native,
rustc:native,
libstd-rust-dev,
librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.1-~~),
librust-base64-0.12+default-dev,
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-0.5+default-dev,
librust-crc32fast-1+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.2+default-dev,
librust-h2-0.2+stream-dev,
librust-handlebars-3+default-dev,
librust-http-0.2+default-dev,
librust-hyper-0.13+default-dev (>= 0.13.6-~~),
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev,
librust-nix-0.16+default-dev,
librust-nom-5+default-dev (>= 5.1-~~),
librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev,
librust-pam-0.7+default-dev,
librust-pam-sys-0.5+default-dev,
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-utils-0.1+default-dev,
librust-proxmox-0.4+api-macro-dev (>= 0.4.1-~~),
librust-proxmox-0.4+default-dev (>= 0.4.1-~~),
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.1-~~),
librust-proxmox-0.4+websocket-dev (>= 0.4.1-~~),
librust-proxmox-fuse-0.1+default-dev,
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
librust-pxar-0.6+tokio-io-dev (>= 0.6.1-~~),
librust-regex-1+default-dev (>= 1.2-~~),
librust-rustyline-6+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-json-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-4+default-dev,
librust-tokio-0.2+blocking-dev (>= 0.2.9-~~),
librust-tokio-0.2+default-dev (>= 0.2.9-~~),
librust-tokio-0.2+dns-dev (>= 0.2.9-~~),
librust-tokio-0.2+fs-dev (>= 0.2.9-~~),
librust-tokio-0.2+io-util-dev (>= 0.2.9-~~),
librust-tokio-0.2+macros-dev (>= 0.2.9-~~),
librust-tokio-0.2+process-dev (>= 0.2.9-~~),
librust-tokio-0.2+rt-threaded-dev (>= 0.2.9-~~),
librust-tokio-0.2+signal-dev (>= 0.2.9-~~),
librust-tokio-0.2+stream-dev (>= 0.2.9-~~),
librust-tokio-0.2+tcp-dev (>= 0.2.9-~~),
librust-tokio-0.2+time-dev (>= 0.2.9-~~),
librust-tokio-0.2+uds-dev (>= 0.2.9-~~),
librust-tokio-openssl-0.4+default-dev,
librust-tokio-util-0.3+codec-dev,
librust-tokio-util-0.3+default-dev,
librust-tower-service-0.3+default-dev,
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev,
librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.4+bindgen-dev,
librust-zstd-0.4+default-dev,
libacl1-dev,
libfuse3-dev,
libsystemd-dev,
uuid-dev,
debhelper (>= 12~),
bash-completion,
python3-docutils,
python3-pygments,
rsync,
fonts-dejavu-core <!nodoc>,
fonts-lato <!nodoc>,
fonts-open-sans <!nodoc>,
graphviz <!nodoc>,
latexmk <!nodoc>,
python3-sphinx <!nodoc>,
texlive-fonts-extra <!nodoc>,
texlive-fonts-recommended <!nodoc>,
texlive-xetex <!nodoc>,
xindy <!nodoc>
Maintainer: Proxmox Support Team <support@proxmox.com>
Standards-Version: 4.4.1
Vcs-Git:
Vcs-Browser:
Homepage: https://www.proxmox.com
Package: proxmox-backup-server
Architecture: any
Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1),
libzstd1 (>= 1.3.8),
lvm2,
pbs-i18n,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.2-4),
pve-xtermjs (>= 4.7.0-1),
smartmontools,
${misc:Depends},
${shlibs:Depends},
Recommends: zfsutils-linux,
Description: Proxmox Backup Server daemon with tools and GUI
This package contains the Proxmox Backup Server daemons and related
tools. This includes a web-based graphical user interface.
Package: proxmox-backup-client
Architecture: any
Depends: ${misc:Depends}, ${shlibs:Depends}
Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc
Depends: libjs-extjs,
${misc:Depends},
Architecture: all
Description: Proxmox Backup Documentation
This package contains the Proxmox Backup Documentation files.

1
debian/control.in vendored
View File

@ -4,6 +4,7 @@ Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1), libjs-extjs (>= 6.0.1),
libzstd1 (>= 1.3.8), libzstd1 (>= 1.3.8),
lvm2, lvm2,
pbs-i18n,
proxmox-backup-docs, proxmox-backup-docs,
proxmox-mini-journalreader, proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 2.2-4), proxmox-widget-toolkit (>= 2.2-4),

6
debian/postinst vendored
View File

@ -14,6 +14,12 @@ case "$1" in
_dh_action=start _dh_action=start
fi fi
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
echo "Fixing up termproxy user id in task log..."
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
fi
;; ;;
abort-upgrade|abort-remove|abort-deconfigure) abort-upgrade|abort-remove|abort-deconfigure)

View File

@ -28,7 +28,6 @@ COMPILEDIR := ../target/debug
SPHINXOPTS += -t devbuild SPHINXOPTS += -t devbuild
endif endif
# Sphinx internal variables. # Sphinx internal variables.
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) . ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
@ -68,9 +67,17 @@ proxmox-backup-manager.1: proxmox-backup-manager/man1.rst proxmox-backup-manage
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
rst2man $< >$@ rst2man $< >$@
.PHONY: onlinehelpinfo
onlinehelpinfo:
@echo "Generating OnlineHelpInfo.js..."
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
.PHONY: html .PHONY: html
html: ${GENERATED_SYNOPSIS} html: ${GENERATED_SYNOPSIS}
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
cp custom.css $(BUILDDIR)/html/_static/
@echo @echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html." @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@ -0,0 +1,133 @@
#!/usr/bin/env python3
# debugging stuff
from pprint import pprint
from typing import cast
import json
import re
import os
import io
from docutils import nodes
from sphinx.builders import Builder
from sphinx.util import logging
logger = logging.getLogger(__name__)
# refs are added in the following manner before the title of a section (note underscore and newline before title):
# .. _my-label:
#
# Section to ref
# --------------
#
#
# then referred to like (note missing underscore):
# "see :ref:`my-label`"
#
# the benefit of using this is if a label is explicitly set for a section,
# we can refer to it with this anchor #my-label in the html,
# even if the section name changes.
#
# see https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-ref
def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize later
js_files = []
used_anchors = []
logger.info("scanning extjs files for onlineHelp definitions")
for root, dirs, files in os.walk("{}".format(wwwdir)):
#print(root, dirs, files)
for filename in files:
if filename.endswith('.js'):
js_files.append(os.path.join(root, filename))
for js_file in js_files:
fd = open(js_file).read()
match = re.search("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd) # match object is tuple
if match:
anchor = match.groups()[0]
anchor = re.sub('_', '-', anchor) # normalize labels
logger.info("found onlineHelp: {} in {}".format(anchor, js_file))
used_anchors.append(anchor)
return used_anchors
def setup(app):
logger.info('Mapping reference labels...')
app.add_builder(ReflabelMapper)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
class ReflabelMapper(Builder):
name = 'proxmox-scanrefs'
def init(self):
self.docnames = []
self.env.online_help = {}
self.env.online_help['pbs_documentation_index'] = {
'link': '/docs/index.html',
'title': 'Proxmox Backup Server Documentation Index',
}
self.env.used_anchors = scan_extjs_files()
if not os.path.isdir(self.outdir):
os.mkdir(self.outdir)
self.output_filename = os.path.join(self.outdir, 'OnlineHelpInfo.js')
self.output = io.open(self.output_filename, 'w', encoding='UTF-8')
def write_doc(self, docname, doctree):
for node in doctree.traverse(nodes.section):
#pprint(vars(node))
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
filename = self.env.doc2path(docname)
filename_html = re.sub('.rst', '.html', filename)
labelid = node['ids'][1] # [0] is predefined by sphinx, we need [1] for explicit ones
title = cast(nodes.title, node[0])
logger.info('traversing section {}'.format(title.astext()))
ref_name = getattr(title, 'rawsource', title.astext())
self.env.online_help[labelid] = {'link': '', 'title': ''}
self.env.online_help[labelid]['link'] = "/docs/" + os.path.basename(filename_html) + "#{}".format(labelid)
self.env.online_help[labelid]['title'] = ref_name
return
def get_outdated_docs(self):
return 'all documents'
def prepare_writing(self, docnames):
return
def get_target_uri(self, docname, typ=None):
return ''
def validate_anchors(self):
#pprint(self.env.online_help)
to_remove = []
for anchor in self.env.used_anchors:
if anchor not in self.env.online_help:
logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
for anchor in self.env.online_help:
if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
logger.info("[*] anchor {} not used! deleting...".format(anchor))
to_remove.append(anchor)
for anchor in to_remove:
self.env.online_help.pop(anchor, None)
return
def finish(self):
# generate OnlineHelpInfo.js output
self.validate_anchors()
self.output.write("const proxmoxOnlineHelpInfo = ")
self.output.write(json.dumps(self.env.online_help, indent=2))
self.output.write(";\n")
self.output.close()
return

View File

@ -24,6 +24,13 @@ good deduplication rates for file archives.
The Proxmox Backup Server supports both strategies. The Proxmox Backup Server supports both strategies.
Image Archives: ``<name>.img``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is used for virtual machine images and other large binary
data. Content is split into fixed-sized chunks.
File Archives: ``<name>.pxar`` File Archives: ``<name>.pxar``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -34,13 +41,6 @@ the :ref:`pxar-format`, split into variable-sized chunks. The format
is optimized to achieve good deduplication rates. is optimized to achieve good deduplication rates.
Image Archives: ``<name>.img``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is used for virtual machine images and other large binary
data. Content is split into fixed-sized chunks.
Binary Data (BLOBs) Binary Data (BLOBs)
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
@ -132,12 +132,13 @@ The command line tool to configure and manage the backup server is called
:term:`DataStore` :term:`DataStore`
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~
A datastore is a place where backups are stored. The current implementation A datastore refers to a location at which backups are stored. The current
uses a directory inside a standard unix file system (``ext4``, ``xfs`` implementation uses a directory inside a standard unix file system (``ext4``,
or ``zfs``) to store the backup data. ``xfs`` or ``zfs``) to store the backup data.
Datastores are identified by a simple *ID*. You can configure it Datastores are identified by a simple *ID*. You can configure this
when setting up the backup server. when setting up the datastore. The configuration information for datastores
is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
.. note:: The `File Layout`_ requires the file system to support at least *65538* .. note:: The `File Layout`_ requires the file system to support at least *65538*
subdirectories per directory. That number comes from the 2\ :sup:`16` subdirectories per directory. That number comes from the 2\ :sup:`16`
@ -146,26 +147,132 @@ when setting up the backup server.
filesystem configuration from being supported for a datastore. For example, filesystem configuration from being supported for a datastore. For example,
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled. ``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
Disk Management
~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-disks.png
:align: right
:alt: List of disks
Proxmox Backup Server comes with a set of disk utilities, which are
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
disks, create various filesystems, and get information about the disks.
To view the disks connected to the system, navigate to **Administration ->
Disks** in the web interface or use the ``list`` subcommand of
``disk``:
.. code-block:: console
# proxmox-backup-manager disk list
┌──────┬────────┬─────┬───────────┬─────────────┬───────────────┬─────────┬────────┐
│ name │ used │ gpt │ disk-type │ size │ model │ wearout │ status │
╞══════╪════════╪═════╪═══════════╪═════════════╪═══════════════╪═════════╪════════╡
│ sda │ lvm │ 1 │ hdd │ 34359738368 │ QEMU_HARDDISK │ - │ passed │
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
│ sdb │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
│ sdc │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
└──────┴────────┴─────┴───────────┴─────────────┴───────────────┴─────────┴────────┘
To initialize a disk with a new GPT, use the ``initialize`` subcommand:
.. code-block:: console
# proxmox-backup-manager disk initialize sdX
.. image:: images/screenshots/pbs-gui-disks-dir-create.png
:align: right
:alt: Create a directory
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
create``, or by navigating to **Administration -> Disks -> Directory** in the
web interface and creating one from there. The following command creates an
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
automatically create a datastore on the disk (in this case ``sdd``). This will
create a datastore at the location ``/mnt/datastore/store1``:
.. code-block:: console
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
.. image:: images/screenshots/pbs-gui-disks-zfs-create.png
:align: right
:alt: Create ZFS
You can also create a ``zpool`` with various raid levels from **Administration
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
mounts it on the root directory (default):
.. code-block:: console
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
.. note:: You can also pass the ``--add-datastore`` parameter here, to automatically
create a datastore from the disk.
You can use ``disk fs list`` and ``disk zpool list`` to keep track of your
filesystems and zpools respectively.
Proxmox Backup Server uses the package smartmontools. This is a set of tools
used to monitor and control the S.M.A.R.T. system for local hard disks. If a
disk supports S.M.A.R.T. capability, and you have this enabled, you can
display S.M.A.R.T. attributes from the web interface or by using the command:
.. code-block:: console
# proxmox-backup-manager disk smart-attributes sdX
.. note:: This functionality may also be accessed directly through the use of
the ``smartctl`` command, which comes as part of the smartmontools package
(see ``man smartctl`` for more details).
Datastore Configuration Datastore Configuration
~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-datastore.png
:align: right
:alt: Datastore Overview
You can configure multiple datastores. Minimum one datastore needs to be You can configure multiple datastores. Minimum one datastore needs to be
configured. The datastore is identified by a simple `name` and points to a configured. The datastore is identified by a simple *name* and points to a
directory on the filesystem. Each datastore also has associated retention directory on the filesystem. Each datastore also has associated retention
settings of how many backup snapshots for each interval of ``hourly``, settings of how many backup snapshots for each interval of ``hourly``,
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent ``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
number of backups to keep in that store. :ref:`Pruning <pruning>` and number of backups to keep in that store. :ref:`Pruning <pruning>` and
:ref:`garbage collection <garbage-collection>` can also be configured to run :ref:`garbage collection <garbage-collection>` can also be configured to run
periodically based on a configured :term:`schedule` per datastore. periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1` Creating a Datastore
^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
:align: right
:alt: Create a datastore
You can create a new datastore from the web GUI, by navigating to **Datastore** in
the menu tree and clicking **Create**. Here:
* *Name* refers to the name of the datastore
* *Backing Path* is the path to the directory upon which you want to create the
datastore
* *GC Schedule* refers to the time and intervals at which garbage collection
runs
* *Prune Schedule* refers to the frequency at which pruning takes place
* *Prune Options* set the amount of backups which you would like to keep (see :ref:`Pruning <pruning>`).
Alternatively you can create a new datastore from the command line. The
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
.. code-block:: console .. code-block:: console
# proxmox-backup-manager datastore create store1 /backup/disk1/store1 # proxmox-backup-manager datastore create store1 /backup/disk1/store1
To list existing datastores run: Managing Datastores
^^^^^^^^^^^^^^^^^^^
To list existing datastores from the command line run:
.. code-block:: console .. code-block:: console
@ -176,13 +283,15 @@ To list existing datastores run:
│ store1 │ /backup/disk1/store1 │ This is my default storage. │ │ store1 │ /backup/disk1/store1 │ This is my default storage. │
└────────┴──────────────────────┴─────────────────────────────┘ └────────┴──────────────────────┴─────────────────────────────┘
You can change settings of a datastore, for example to set a prune and garbage You can change the garbage collection and prune settings of a datastore, by
collection schedule or retention settings using ``update`` subcommand and view editing the datastore from the GUI or by using the ``update`` subcommand. For
a datastore with the ``show`` subcommand: example, the below command changes the garbage collection schedule using the
``update`` subcommand and prints the properties of the datastore with the
``show`` subcommand:
.. code-block:: console .. code-block:: console
# proxmox-backup-manager datastore update store1 --keep-last 7 --prune-schedule daily --gc-schedule 'Tue 04:27' # proxmox-backup-manager datastore update store1 --gc-schedule 'Tue 04:27'
# proxmox-backup-manager datastore show store1 # proxmox-backup-manager datastore show store1
┌────────────────┬─────────────────────────────┐ ┌────────────────┬─────────────────────────────┐
│ Name │ Value │ │ Name │ Value │
@ -260,6 +369,10 @@ directories will store the chunked data after a backup operation has been execut
User Management User Management
~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-user-management.png
:align: right
:alt: User management
Proxmox Backup Server supports several authentication realms, and you need to Proxmox Backup Server supports several authentication realms, and you need to
choose the realm when you add a new user. Possible realms are: choose the realm when you add a new user. Possible realms are:
@ -271,7 +384,8 @@ choose the realm when you add a new user. Possible realms are:
``/etc/proxmox-backup/shadow.json``. ``/etc/proxmox-backup/shadow.json``.
After installation, there is a single user ``root@pam``, which After installation, there is a single user ``root@pam``, which
corresponds to the Unix superuser. You can use the corresponds to the Unix superuser. User configuration information is stored in the file
``/etc/proxmox-backup/user.cfg``. You can use the
``proxmox-backup-manager`` command line tool to list or manipulate ``proxmox-backup-manager`` command line tool to list or manipulate
users: users:
@ -284,19 +398,21 @@ users:
│ root@pam │ 1 │ │ │ │ │ Superuser │ │ root@pam │ 1 │ │ │ │ │ Superuser │
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘ └─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
:align: right
:alt: Add a new user
The superuser has full administration rights on everything, so you The superuser has full administration rights on everything, so you
normally want to add other users with less privileges: normally want to add other users with less privileges. You can create a new
user with the ``user create`` subcommand or through the web interface, under
**Configuration -> User Management**. The ``create`` subcommand lets you specify
many options like ``--email`` or ``--password``. You can update or change any
user properties using the ``update`` subcommand later (**Edit** in the GUI):
.. code-block:: console .. code-block:: console
# proxmox-backup-manager user create john@pbs --email john@example.com # proxmox-backup-manager user create john@pbs --email john@example.com
The create command lets you specify many options like ``--email`` or
``--password``. You can update or change any of them using the
update command later:
.. code-block:: console
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith # proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
# proxmox-backup-manager user update john@pbs --comment "An example user." # proxmox-backup-manager user update john@pbs --comment "An example user."
@ -344,10 +460,10 @@ following roles exist:
Disable Access - nothing is allowed. Disable Access - nothing is allowed.
**Admin** **Admin**
The Administrator can do anything. Can do anything.
**Audit** **Audit**
An Auditor can view things, but is not allowed to change settings. Can view things, but is not allowed to change settings.
**DatastoreAdmin** **DatastoreAdmin**
Can do anything on datastores. Can do anything on datastores.
@ -356,10 +472,10 @@ following roles exist:
Can view datastore settings and list content. But Can view datastore settings and list content. But
is not allowed to read the actual data. is not allowed to read the actual data.
**DataStoreReader** **DatastoreReader**
Can Inspect datastore content and can do restores. Can Inspect datastore content and can do restores.
**DataStoreBackup** **DatastoreBackup**
Can backup and restore owned backups. Can backup and restore owned backups.
**DatastorePowerUser** **DatastorePowerUser**
@ -374,24 +490,173 @@ following roles exist:
**RemoteSyncOperator** **RemoteSyncOperator**
Is allowed to read data from a remote. Is allowed to read data from a remote.
.. image:: images/screenshots/pbs-gui-permissions-add.png
:align: right
:alt: Add permissions for user
Access permission information is stored in ``/etc/proxmox-backup/acl.cfg``. The
file contains 5 fields, separated using a colon (':') as a delimiter. A typical
entry takes the form:
``acl:1:/datastore:john@pbs:DatastoreBackup``
The data represented in each field is as follows:
#. ``acl`` identifier
#. A ``1`` or ``0``, representing whether propagation is enabled or disabled,
respectively
#. The object on which the permission is set. This can be a specific object
(single datastore, remote, etc.) or a top level object, which with
propagation enabled, represents all children of the object also.
#. The user for which the permission is set
#. The role being set
You can manage datastore permissions from **Configuration -> Permissions** in the
web interface. Likewise, you can use the ``acl`` subcommand to manage and
monitor user permissions from the command line. For example, the command below
will add the user ``john@pbs`` as a **DatastoreAdmin** for the datastore
``store1``, located at ``/backup/disk1/store1``:
.. code-block:: console
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
You can monitor the roles of each user using the following command:
.. code-block:: console
# proxmox-backup-manager acl list
┌──────────┬──────────────────┬───────────┬────────────────┐
│ ugid │ path │ propagate │ roleid │
╞══════════╪══════════════════╪═══════════╪════════════════╡
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
└──────────┴──────────────────┴───────────┴────────────────┘
A single user can be assigned multiple permission sets for different datastores.
.. Note::
Naming convention is important here. For datastores on the host,
you must use the convention ``/datastore/{storename}``. For example, to set
permissions for a datastore mounted at ``/mnt/backup/disk4/store2``, you would use
``/datastore/store2`` for the path. For remote stores, use the convention
``/remote/{remote}/{storename}``, where ``{remote}`` signifies the name of the
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
the remote.
Network Management
~~~~~~~~~~~~~~~~~~
Proxmox Backup Server provides both a web interface and a command line tool for
network configuration. You can find the configuration options in the web
interface under the **Network Interfaces** section of the **Configuration** menu
tree item. The command line tool is accessed via the ``network`` subcommand.
These interfaces allow you to carry out some basic network management tasks,
such as adding, configuring, and removing network interfaces.
.. note:: Any changes made to the network configuration are not
applied, until you click on **Apply Configuration** or enter the ``network
reload`` command. This allows you to make many changes at once. It also allows
you to ensure that your changes are correct before applying them, as making a
mistake here can render the server inaccessible over the network.
To get a list of available interfaces, use the following command:
.. code-block:: console
# proxmox-backup-manager network list
┌───────┬────────┬───────────┬────────┬─────────────┬──────────────┬──────────────┐
│ name │ type │ autostart │ method │ address │ gateway │ ports/slaves │
╞═══════╪════════╪═══════════╪════════╪═════════════╪══════════════╪══════════════╡
│ bond0 │ bond │ 1 │ static │ x.x.x.x/x │ x.x.x.x │ ens18 ens19 │
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
│ ens18 │ eth │ 1 │ manual │ │ │ │
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
│ ens19 │ eth │ 1 │ manual │ │ │ │
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
.. image:: images/screenshots/pbs-gui-network-create-bond.png
:align: right
:alt: Add a network interface
To add a new network interface, use the ``create`` subcommand with the relevant
parameters. For example, you may want to set up a bond, for the purpose of
network redundancy. The following command shows a template for creating the bond shown
in the list above:
.. code-block:: console
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
You can make changes to the configuration of a network interface with the
``update`` subcommand:
.. code-block:: console
# proxmox-backup-manager network update bond0 --cidr y.y.y.y/y
You can also remove a network interface:
.. code-block:: console
# proxmox-backup-manager network remove bond0
The pending changes for the network configuration file will appear at the bottom of the
web interface. You can also view these changes, by using the command:
.. code-block:: console
# proxmox-backup-manager network changes
If you would like to cancel all changes at this point, you can either click on
the **Revert** button or use the following command:
.. code-block:: console
# proxmox-backup-manager network revert
If you are happy with the changes and would like to write them into the
configuration file, select **Apply Configuration**. The corresponding command
is:
.. code-block:: console
# proxmox-backup-manager network reload
.. note:: This command and corresponding GUI button rely on the ``ifreload``
command, from the package ``ifupdown2``. This package is included within the
Proxmox Backup Server installation, however, you may have to install it yourself,
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
You can also configure DNS settings, from the **DNS** section
of **Configuration** or by using the ``dns`` subcommand of
``proxmox-backup-manager``.
:term:`Remote` :term:`Remote`
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
A remote refers to a separate Proxmox Backup Server installation and a user on that A remote refers to a separate Proxmox Backup Server installation and a user on that
installation, from which you can `sync` datastores to a local datastore with a installation, from which you can `sync` datastores to a local datastore with a
`Sync Job`. `Sync Job`. You can configure remotes in the web interface, under **Configuration
-> Remotes**. Alternatively, you can use the ``remote`` subcommand. The
configuration information for remotes is stored in the file
``/etc/proxmox-backup/remote.cfg``.
.. image:: images/screenshots/pbs-gui-remote-add.png
:align: right
:alt: Add a remote
To add a remote, you need its hostname or ip, a userid and password on the To add a remote, you need its hostname or ip, a userid and password on the
remote, and its certificate fingerprint. To get the fingerprint, use the remote, and its certificate fingerprint. To get the fingerprint, use the
``proxmox-backup-manager cert info`` command on the remote. ``proxmox-backup-manager cert info`` command on the remote, or navigate to
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
.. code-block:: console .. code-block:: console
# proxmox-backup-manager cert info |grep Fingerprint # proxmox-backup-manager cert info |grep Fingerprint
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
Using the information specified above, add the remote with: Using the information specified above, you can add a remote from the **Remotes**
configuration panel, or by using the command:
.. code-block:: console .. code-block:: console
@ -411,14 +676,23 @@ Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘ └──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
# proxmox-backup-manager remote remove pbs2 # proxmox-backup-manager remote remove pbs2
.. _syncjobs:
Sync Jobs Sync Jobs
~~~~~~~~~ ~~~~~~~~~
Sync jobs are configured to pull the contents of a datastore on a `Remote` to a .. image:: images/screenshots/pbs-gui-syncjob-add.png
local datastore. You can either start the sync job manually on the GUI or :align: right
provide it with a :term:`schedule` to run regularly. The :alt: Add a Sync Job
``proxmox-backup-manager sync-job`` command is used to manage sync jobs:
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
a local datastore. You can manage sync jobs under **Configuration -> Sync Jobs**
in the web interface, or using the ``proxmox-backup-manager sync-job`` command.
The configuration information for sync jobs is stored at
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
either start it manually on the GUI or provide it with a schedule (see
:ref:`calendar-events`) to run regularly.
.. code-block:: console .. code-block:: console
@ -433,6 +707,15 @@ provide it with a :term:`schedule` to run regularly. The
# proxmox-backup-manager sync-job remove pbs2-local # proxmox-backup-manager sync-job remove pbs2-local
Garbage Collection
~~~~~~~~~~~~~~~~~~
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
Proxmox Backup Server using the ``garbage-collection`` subcommand of
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
collection on an entire datastore and the ``status`` subcommand to see
attributes relating to the :ref:`garbage collection <garbage-collection>`.
Backup Client usage Backup Client usage
------------------- -------------------
@ -543,7 +826,9 @@ This will prompt you for a password and then uploads a file archive named
The ``--repository`` option can get quite long and is used by all The ``--repository`` option can get quite long and is used by all
commands. You can avoid having to enter this value by setting the commands. You can avoid having to enter this value by setting the
environment variable ``PBS_REPOSITORY``. environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
over multiple sessions, you should instead add the below line to your
``.bashrc`` file.
.. code-block:: console .. code-block:: console
@ -578,7 +863,7 @@ Excluding files/folders from a backup
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes it is desired to exclude certain files or folders from a backup archive. Sometimes it is desired to exclude certain files or folders from a backup archive.
To tell the Proxmox backup client when and how to ignore files and directories, To tell the Proxmox Backup client when and how to ignore files and directories,
place a text file called ``.pxarexclude`` in the filesystem hierarchy. place a text file called ``.pxarexclude`` in the filesystem hierarchy.
Whenever the backup client encounters such a file in a directory, it interprets Whenever the backup client encounters such a file in a directory, it interprets
each line as glob match patterns for files and directories that are to be excluded each line as glob match patterns for files and directories that are to be excluded
@ -660,7 +945,7 @@ Restoring this backup will result in:
. .. file2 . .. file2
Encryption Encryption
^^^^^^^^^^ ~~~~~~~~~~
Proxmox Backup supports client-side encryption with AES-256 in GCM_ Proxmox Backup supports client-side encryption with AES-256 in GCM_
mode. To set this up, you first need to create an encryption key: mode. To set this up, you first need to create an encryption key:
@ -677,6 +962,8 @@ extra protection, you can also create it without a password:
# proxmox-backup-client key create /path/to/my-backup.key --kdf none # proxmox-backup-client key create /path/to/my-backup.key --kdf none
Having created this key, it is now possible to create an encrypted backup, by
passing the ``--keyfile`` parameter, with the path to the key file.
.. code-block:: console .. code-block:: console
@ -685,12 +972,97 @@ extra protection, you can also create it without a password:
Encryption Key Password: ************** Encryption Key Password: **************
... ...
.. Note:: If you do not specify the name of the backup key, the key will be
created in the default location
``~/.config/proxmox-backup/encryption-key.json``. ``proxmox-backup-client``
will also search this location by default, in case the ``--keyfile``
parameter is not specified.
You can avoid entering the passwords by setting the environment You can avoid entering the passwords by setting the environment
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``. variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
.. todo:: Explain master-key Using a master key to store and recover encryption keys
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can also use ``proxmox-backup-client key`` to create an RSA public/private
key pair, which can be used to store an encrypted version of the symmetric
backup encryption key alongside each backup and recover it later.
To set up a master key:
1. Create an encryption key for the backup:
.. code-block:: console
# proxmox-backup-client key create
creating default key at: "~/.config/proxmox-backup/encryption-key.json"
Encryption Key Password: **********
...
The resulting file will be saved to ``~/.config/proxmox-backup/encryption-key.json``.
2. Create an RSA public/private key pair:
.. code-block:: console
# proxmox-backup-client key create-master-key
Master Key Password: *********
...
This will create two files in your current directory, ``master-public.pem``
and ``master-private.pem``.
3. Import the newly created ``master-public.pem`` public certificate, so that
``proxmox-backup-client`` can find and use it upon backup.
.. code-block:: console
# proxmox-backup-client key import-master-pubkey /path/to/master-public.pem
Imported public master key to "~/.config/proxmox-backup/master-public.pem"
4. With all these files in place, run a backup job:
.. code-block:: console
# proxmox-backup-client backup etc.pxar:/etc
The key will be stored in your backup, under the name ``rsa-encrypted.key``.
.. Note:: The ``--keyfile`` parameter can be excluded, if the encryption key
is in the default path. If you specified another path upon creation, you
must pass the ``--keyfile`` parameter.
5. To test that everything worked, you can restore the key from the backup:
.. code-block:: console
# proxmox-backup-client restore /path/to/backup/ rsa-encrypted.key /path/to/target
.. Note:: You should not need an encryption key to extract this file. However, if
a key exists at the default location
(``~/.config/proxmox-backup/encryption-key.json``) the program will prompt
you for an encryption key password. Simply moving ``encryption-key.json``
out of this directory will fix this issue.
6. Then, use the previously generated master key to decrypt the file:
.. code-block:: console
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
Enter pass phrase for ./master-private.pem: *********
7. The target file will now contain the encryption key information in plain
text. The success of this can be confirmed by passing the resulting ``json``
file, with the ``--keyfile`` parameter, when decrypting files from the backup.
.. warning:: Without their key, backed up files will be inaccessible. Thus, you should
keep keys ordered and in a place that is separate from the contents being
backed up. It can happen, for example, that you back up an entire system, using
a key on that system. If the system then becomes inaccessable for any reason
and needs to be restored, this will not be possible as the encryption key will be
lost along with the broken system. In preparation for the worst case scenario,
you should consider keeping a paper copy of this key locked away in
a safe place.
Restoring Data Restoring Data
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
@ -733,7 +1105,7 @@ backup.
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/ # proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
To get the contents of any archive, you can restore the ``ìndex.json`` file in the To get the contents of any archive, you can restore the ``index.json`` file in the
repository to the target path '-'. This will dump the contents to the standard output. repository to the target path '-'. This will dump the contents to the standard output.
.. code-block:: console .. code-block:: console
@ -777,7 +1149,7 @@ For example:
.. code-block:: console .. code-block:: console
pxar:/ > find etc/ **/*.txt --select pxar:/ > find etc/**/*.txt --select
"/etc/X11/rgb.txt" "/etc/X11/rgb.txt"
pxar:/ > list-selected pxar:/ > list-selected
etc/**/*.txt etc/**/*.txt
@ -815,8 +1187,8 @@ file archive as a read-only filesystem to a mountpoint on your host.
.. code-block:: console .. code-block:: console
# proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt # proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt/mountpoint
# ls /mnt # ls /mnt/mountpoint
bin dev home lib32 libx32 media opt root sbin sys usr bin dev home lib32 libx32 media opt root sbin sys usr
boot etc lib lib64 lost+found mnt proc run srv tmp var boot etc lib lib64 lost+found mnt proc run srv tmp var
@ -831,7 +1203,7 @@ To unmount the filesystem use the ``umount`` command on the mountpoint:
.. code-block:: console .. code-block:: console
# umount /mnt # umount /mnt/mountpoint
Login and Logout Login and Logout
~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~
@ -874,8 +1246,8 @@ command:
snapshot. They will be inaccessible and unrecoverable. snapshot. They will be inaccessible and unrecoverable.
The manual removal is sometimes required, but normally the prune Although manual removal is sometimes required, the ``prune``
command is used to systematically delete older backups. Prune lets command is normally used to systematically delete older backups. Prune lets
you specify which backup snapshots you want to keep. The you specify which backup snapshots you want to keep. The
following retention options are available: following retention options are available:
@ -950,7 +1322,7 @@ Garbage Collection
~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~
The ``prune`` command removes only the backup index files, not the data The ``prune`` command removes only the backup index files, not the data
from the data store. This task is left to the garbage collection from the datastore. This task is left to the garbage collection
command. It is recommended to carry out garbage collection on a regular basis. command. It is recommended to carry out garbage collection on a regular basis.
The garbage collection works in two phases. In the first phase, all The garbage collection works in two phases. In the first phase, all
@ -995,6 +1367,42 @@ unused data blocks are removed.
.. todo:: howto run garbage-collection at regular intervalls (cron) .. todo:: howto run garbage-collection at regular intervalls (cron)
Benchmarking
~~~~~~~~~~~~
The backup client also comes with a benchmarking tool. This tool measures
various metrics relating to compression and encryption speeds. You can run a
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
.. code-block:: console
# proxmox-backup-client benchmark
Uploaded 656 chunks in 5 seconds.
Time per request: 7659 microseconds.
TLS speed: 547.60 MB/s
SHA256 speed: 585.76 MB/s
Compression speed: 1923.96 MB/s
Decompress speed: 7885.24 MB/s
AES256/GCM speed: 3974.03 MB/s
┌───────────────────────────────────┬─────────────────────┐
│ Name │ Value │
╞═══════════════════════════════════╪═════════════════════╡
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
├───────────────────────────────────┼─────────────────────┤
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
├───────────────────────────────────┼─────────────────────┤
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
├───────────────────────────────────┼─────────────────────┤
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
├───────────────────────────────────┼─────────────────────┤
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
└───────────────────────────────────┴─────────────────────┘
.. note:: The percentages given in the output table correspond to a
comparison against a Ryzen 7 2700X. The TLS test connects to the
local host, so there is no network involved.
You can also pass the ``--output-format`` parameter to output stats in ``json``,
rather than the default table format.
.. _pve-integration: .. _pve-integration:
@ -1011,13 +1419,17 @@ as ``user1@pbs``.
# pvesm add pbs store2 --server localhost --datastore store2 # pvesm add pbs store2 --server localhost --datastore store2
# pvesm set store2 --username user1@pbs --password <secret> # pvesm set store2 --username user1@pbs --password <secret>
.. note:: If you would rather not pass your password as plain text, you can pass
the ``--password`` parameter, without any arguments. This will cause the
program to prompt you for a password upon entering the command.
If your backup server uses a self signed certificate, you need to add If your backup server uses a self signed certificate, you need to add
the certificate fingerprint to the configuration. You can get the the certificate fingerprint to the configuration. You can get the
fingerprint by running the following command on the backup server: fingerprint by running the following command on the backup server:
.. code-block:: console .. code-block:: console
# proxmox-backup-manager cert info |grep Fingerprint # proxmox-backup-manager cert info | grep Fingerprint
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
Please add that fingerprint to your configuration to establish a trust Please add that fingerprint to your configuration to establish a trust
@ -1035,6 +1447,10 @@ After that you should be able to see storage status with:
Name Type Status Total Used Available % Name Type Status Total Used Available %
store2 pbs active 3905109820 1336687816 2568422004 34.23% store2 pbs active 3905109820 1336687816 2568422004 34.23%
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
containers in the same way you would for any other storage device within the
environment (see `PVE Admin Guide: Backup and Restore
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
.. include:: command-line-tools.rst .. include:: command-line-tools.rst

100
docs/calendarevents.rst Normal file
View File

@ -0,0 +1,100 @@
.. _calendar-events:
Calendar Events
===============
Introduction and Format
-----------------------
Certain tasks, for example pruning and garbage collection, need to be
performed on a regular basis. Proxmox Backup Server uses a format inspired
by the systemd Time and Date Specification (see `systemd.time manpage`_)
called `calendar events` for its schedules.
`Calendar events` are expressions to specify one or more points in time.
They are mostly compatible with systemds calendar events.
The general format is as follows:
.. code-block:: console
:caption: Calendar event
[WEEKDAY] [[YEARS-]MONTHS-DAYS] [HOURS:MINUTES[:SECONDS]]
Note that there either has to be at least a weekday, date or time part.
If the weekday or date part is omitted, all (week)days are included.
If the time part is omitted, the time 00:00:00 is implied.
(e.g. '2020-01-01' refers to '2020-01-01 00:00:00')
Weekdays are specified with the abbreviated english version:
`mon, tue, wed, thu, fri, sat, sun`.
Each field can contain multiple values in the following formats:
* comma-separated: e.g., 01,02,03
* as a range: e.g., 01..10
* as a repetition: e.g, 05/10 (means starting at 5 every 10)
* and a combination of the above: e.g., 01,05..10,12/02
* or a `*` for every possible value: e.g., \*:00
There are some special values that have specific meaning:
================================= ==============================
Value Syntax
================================= ==============================
`minutely` `*-*-* *:*:00`
`hourly` `*-*-* *:00:00`
`daily` `*-*-* 00:00:00`
`weekly` `mon *-*-* 00:00:00`
`monthly` `*-*-01 00:00:00`
`yearly` or `annualy` `*-01-01 00:00:00`
`quarterly` `*-01,04,07,10-01 00:00:00`
`semiannually` or `semi-annually` `*-01,07-01 00:00:00`
================================= ==============================
Here is a table with some useful examples:
======================== ============================= ===================================
Example Alternative Explanation
======================== ============================= ===================================
`mon,tue,wed,thu,fri` `mon..fri` Every working day at 00:00
`sat,sun` `sat..sun` Only on weekends at 00:00
`mon,wed,fri` -- Monday, Wednesday, Friday at 00:00
`12:05` -- Every day at 12:05 PM
`*:00/5` `0/1:0/5` Every five minutes
`mon..wed *:30/10` `mon,tue,wed *:30/10` Monday, Tuesday, Wednesday 30, 40 and 50 minutes after every full hour
`mon..fri 8..17,22:0/15` -- Every working day every 15 minutes between 8 AM and 6 PM and between 10 PM and 11 PM
`fri 12..13:5/20` `fri 12,13:5/20` Friday at 12:05, 12:25, 12:45, 13:05, 13:25 and 13:45
`12,14,16,18,20,22:5` `12/2:5` Every day starting at 12:05 until 22:05, every 2 hours
`*:*` `0/1:0/1` Every minute (minimum interval)
`*-05` -- On the 5th day of every Month
`Sat *-1..7 15:00` -- First Saturday each Month at 15:00
`2015-10-21` -- 21st October 2015 at 00:00
======================== ============================= ===================================
Differences to systemd
----------------------
Not all features of systemd calendar events are implemented:
* no unix timestamps (e.g. `@12345`): instead use date and time to specify
a specific point in time
* no timezone: all schedules use the set timezone on the server
* no sub-second resolution
* no reverse day syntax (e.g. 2020-03~01)
* no repetition of ranges (e.g. 1..10/2)
Notes on scheduling
-------------------
In `Proxmox Backup`_ scheduling for most tasks is done in the
`proxmox-backup-proxy`. This daemon checks all job schedules
if they are due every minute. This means that even if
`calendar events` can contain seconds, it will only be checked
once a minute.
Also, all schedules will be checked against the timezone set
in the `Proxmox Backup`_ server.

View File

@ -18,9 +18,12 @@
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
# #
import os import os
# import sys import sys
# sys.path.insert(0, os.path.abspath('.')) # sys.path.insert(0, os.path.abspath('.'))
# custom extensions
sys.path.append(os.path.abspath("./_ext"))
# -- Implement custom formatter for code-blocks --------------------------- # -- Implement custom formatter for code-blocks ---------------------------
# #
# * use smaller font # * use smaller font
@ -46,7 +49,7 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones. # ones.
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"] extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo", "proxmox-scanrefs"]
todo_link_only = True todo_link_only = True
@ -71,7 +74,7 @@ rst_epilog = epilog_file.read()
# General information about the project. # General information about the project.
project = 'Proxmox Backup' project = 'Proxmox Backup'
copyright = '2019-2020, Proxmox Support Team' copyright = '2019-2020, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team' author = 'Proxmox Support Team'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
@ -94,12 +97,10 @@ language = None
# There are two options for replacing |today|: either, you set today to some # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: # non-false value, then it is used:
#
# today = '' # today = ''
# #
# Else, today_fmt is used as the format for a strftime call. # Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%A, %d %B %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
@ -145,7 +146,7 @@ pygments_style = 'sphinx'
# keep_warnings = False # keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing. # If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True todo_include_todos = not tags.has('release')
# -- Options for HTML output ---------------------------------------------- # -- Options for HTML output ----------------------------------------------
@ -153,13 +154,32 @@ todo_include_todos = True
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
# #
html_theme = 'sphinxdoc' html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme # Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the # further. For a list of options available for each theme, see the
# documentation. # documentation.
# #
# html_theme_options = {} html_theme_options = {
'fixed_sidebar': True,
#'sidebar_includehidden': False,
'sidebar_collapse': False, # FIXME: documented, but does not works?!
'show_relbar_bottom': True, # FIXME: documented, but does not works?!
'show_powered_by': False,
'logo': 'proxmox-logo.svg',
'logo_name': True, # show project name below logo
#'logo_text_align': 'center',
#'description': 'Fast, Secure & Efficient.',
'sidebar_width': '300px',
'page_width': '1280px',
# font styles
'head_font_family': 'Lato, sans-serif',
'caption_font_family': 'Lato, sans-serif',
'caption_font_size': '20px',
'font_family': 'Open Sans, sans-serif',
}
# Add any paths that contain custom themes here, relative to this directory. # Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [] # html_theme_path = []
@ -176,7 +196,7 @@ html_theme = 'sphinxdoc'
# The name of an image file (relative to this directory) to place at the top # The name of an image file (relative to this directory) to place at the top
# of the sidebar. # of the sidebar.
# #
html_logo = 'images/proxmox-logo.svg' #html_logo = 'images/proxmox-logo.svg' # replaced by html_theme_options.logo
# The name of an image file (relative to this directory) to use as a favicon of # The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
@ -229,7 +249,7 @@ html_static_path = ['_static']
# If true, links to the reST sources are added to the pages. # If true, links to the reST sources are added to the pages.
# #
# html_show_sourcelink = True html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# #

15
docs/custom.css Normal file
View File

@ -0,0 +1,15 @@
div.sphinxsidebar {
height: calc(100% - 20px);
overflow: auto;
}
h1.logo-name {
font-size: 24px;
}
div.body img {
width: 250px;
}
pre {
padding: 5px 10px;
}

View File

@ -13,7 +13,8 @@
.. _Proxmox: https://www.proxmox.com .. _Proxmox: https://www.proxmox.com
.. _Proxmox Community Forum: https://forum.proxmox.com .. _Proxmox Community Forum: https://forum.proxmox.com
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve .. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page // FIXME .. FIXME
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel .. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html .. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
.. _Rust: https://www.rust-lang.org/ .. _Rust: https://www.rust-lang.org/
@ -37,3 +38,6 @@
.. _RFC3399: https://tools.ietf.org/html/rfc3339 .. _RFC3399: https://tools.ietf.org/html/rfc3339
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date .. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html

71
docs/faq.rst Normal file
View File

@ -0,0 +1,71 @@
FAQ
===
What distribution is Proxmox Backup Server (PBS) based on?
----------------------------------------------------------
Proxmox Backup Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
Which platforms are supported as a backup source (client)?
----------------------------------------------------------
The client tool works on most modern Linux systems, meaning you are not limited
to Debian-based backups.
Will Proxmox Backup Server run on a 32-bit processor?
-----------------------------------------------------
Proxmox Backup Server only supports 64-bit CPUs (AMD or Intel). There are no
future plans to support 32-bit processors.
How long will my Proxmox Backup Server version be supported?
------------------------------------------------------------
+-----------------------+--------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+====================+===============+============+====================+
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
+-----------------------+--------------------+---------------+------------+--------------------+
Can I copy or synchronize my datastore to another location?
-----------------------------------------------------------
Proxmox Backup Server allows you to copy or synchroize datastores to other
locations, through the use of *Remotes* and *Sync Jobs*. *Remote* is the term
given to a separate server, which has a datastore that can be synced to a local store.
A *Sync Job* is the process which is used to pull the contents of a datastore from
a *Remote* to a local datastore.
Can Proxmox Backup Server verify data integrity of a backup archive?
--------------------------------------------------------------------
Proxmox Backup Server uses a built-in SHA-256 checksum algorithm, to ensure
data integrity. Within each backup, a manifest file (index.json) is created,
which contains a list of all the backup files, along with their sizes and
checksums. This manifest file is used to verify the integrity of each backup.
When backing up to remote servers, do I have to trust the remote server?
------------------------------------------------------------------------
Proxmox Backup Server supports client-side encryption, meaning your data is
encrypted before it reaches the server. Thus, in the event that an attacker
gains access to the server, they will not be able to read the data.
.. note:: Encryption is not enabled by default. To set up encryption, see the
`Encryption
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
of the Proxmox Backup Server Administration Guide.
Is the backup incremental/deduplicated?
---------------------------------------
With Proxmox Backup Server, backups are sent incremental and data is
deduplicated on the server.
This minimizes both the storage consumed and the network impact.

View File

@ -51,14 +51,3 @@ Glossary
A remote Proxmox Backup Server installation and credentials for a user on it. A remote Proxmox Backup Server installation and credentials for a user on it.
You can pull datastores from a remote to a local datastore in order to You can pull datastores from a remote to a local datastore in order to
have redundant backups. have redundant backups.
Schedule
Certain tasks, for example pruning and garbage collection, need to be
performed on a regular basis. Proxmox Backup Server uses a subset of the
`systemd Time and Date Specification
<https://www.freedesktop.org/software/systemd/man/systemd.time.html#>`_.
The subset currently supports time of day specifications and weekdays, in
addition to the shorthand expressions 'minutely', 'hourly', 'daily'.
There is no support for specifying timezones, the tasks are run in the
timezone configured on the server.

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

View File

@ -2,8 +2,8 @@
Welcome to the Proxmox Backup documentation! Welcome to the Proxmox Backup documentation!
============================================ ============================================
| Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH | Version |version| -- |today|
Permission is granted to copy, distribute and/or modify this document under the Permission is granted to copy, distribute and/or modify this document under the
terms of the GNU Free Documentation License, Version 1.3 or any later version terms of the GNU Free Documentation License, Version 1.3 or any later version
@ -24,6 +24,7 @@ in the section entitled "GNU Free Documentation License".
installation.rst installation.rst
administration-guide.rst administration-guide.rst
sysadmin.rst sysadmin.rst
faq.rst
.. raw:: latex .. raw:: latex
@ -36,6 +37,7 @@ in the section entitled "GNU Free Documentation License".
command-syntax.rst command-syntax.rst
file-formats.rst file-formats.rst
backup-protocol.rst backup-protocol.rst
calendarevents.rst
glossary.rst glossary.rst
GFDL.rst GFDL.rst
@ -49,4 +51,3 @@ in the section entitled "GNU Free Documentation License".
* :ref:`genindex` * :ref:`genindex`

View File

@ -19,9 +19,9 @@ for various management tasks such as disk management.
The disk image (ISO file) provided by Proxmox includes a complete Debian system The disk image (ISO file) provided by Proxmox includes a complete Debian system
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server. ("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
The installer will guide you through the setup process and allows The installer will guide you through the setup process and allow
you to partition the local disk(s), apply basic system configurations you to partition the local disk(s), apply basic system configurations
(e.g. timezone, language, network), and installs all required packages. (e.g. timezone, language, network), and install all required packages.
The provided ISO will get you started in just a few minutes, and is the The provided ISO will get you started in just a few minutes, and is the
recommended method for new and existing users. recommended method for new and existing users.
@ -36,11 +36,11 @@ It includes the following:
* The `Proxmox Backup`_ server installer, which partitions the local * The `Proxmox Backup`_ server installer, which partitions the local
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
system. system
* Complete operating system (Debian Linux, 64-bit) * Complete operating system (Debian Linux, 64-bit)
* Our Linux kernel with ZFS support. * Our Linux kernel with ZFS support
* Complete tool-set to administer backups and all necessary resources * Complete tool-set to administer backups and all necessary resources
@ -54,7 +54,7 @@ Install `Proxmox Backup`_ server on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages which can be installed on top of a Proxmox ships as a set of Debian packages which can be installed on top of a
standard Debian installation. After configuring the standard Debian installation. After configuring the
:ref:`sysadmin_package_repositories`, you need to run: :ref:`sysadmin_package_repositories`, you need to run:
.. code-block:: console .. code-block:: console
@ -76,12 +76,11 @@ does, please use the following:
This will install all required packages, the Proxmox kernel with ZFS_ This will install all required packages, the Proxmox kernel with ZFS_
support, and a set of common and useful packages. support, and a set of common and useful packages.
Installing `Proxmox Backup`_ on top of an existing Debian_ installation looks easy, but .. caution:: Installing `Proxmox Backup`_ on top of an existing Debian_
it presumes that the base system and local storage has been set up correctly. installation looks easy, but it assumes that the base system and local
storage have been set up correctly. In general this is not trivial, especially
In general this is not trivial, especially when LVM_ or ZFS_ is used. when LVM_ or ZFS_ is used. The network configuration is completely up to you
as well.
The network configuration is completely up to you as well.
.. note:: You can access the webinterface of the Proxmox Backup Server with .. note:: You can access the webinterface of the Proxmox Backup Server with
your web browser, using HTTPS on port 8007. For example at your web browser, using HTTPS on port 8007. For example at
@ -103,9 +102,9 @@ After configuring the
server to store backups. Should the hypervisor server fail, you can server to store backups. Should the hypervisor server fail, you can
still access the backups. still access the backups.
.. note:: You can access the webinterface of the Proxmox Backup Server with .. note::
your web browser, using HTTPS on port 8007. For example at You can access the webinterface of the Proxmox Backup Server with your web
``https://<ip-or-dns-name>:8007`` browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
Client installation Client installation
------------------- -------------------

View File

@ -15,15 +15,15 @@ encryption (AE_). Using :term:`Rust` as the implementation language guarantees h
performance, low resource usage, and a safe, high-quality codebase. performance, low resource usage, and a safe, high-quality codebase.
It features strong client-side encryption. Thus, it's possible to It features strong client-side encryption. Thus, it's possible to
backup data to not fully trusted targets. backup data to targets that are not fully trusted.
Architecture Architecture
------------ ------------
Proxmox Backup Server uses a `client-server model`_. The server stores the Proxmox Backup Server uses a `client-server model`_. The server stores the
backup data and provides an API to create backups and restore data. With the backup data and provides an API to create and manage datastores. With the
API it's also possible to manage disks and other server side resources. API, it's also possible to manage disks and other server-side resources.
The backup client uses this API to access the backed up data. With the command The backup client uses this API to access the backed up data. With the command
line tool ``proxmox-backup-client`` you can create backups and restore data. line tool ``proxmox-backup-client`` you can create backups and restore data.
@ -32,7 +32,7 @@ For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
A single backup is allowed to contain several archives. For example, when you A single backup is allowed to contain several archives. For example, when you
backup a :term:`virtual machine`, each disk is stored as a separate archive backup a :term:`virtual machine`, each disk is stored as a separate archive
inside that backup. The VM configuration itself is stored as an extra file. inside that backup. The VM configuration itself is stored as an extra file.
This way, it is easy to access and restore only important parts of the backup This way, it's easy to access and restore only important parts of the backup,
without the need to scan the whole backup. without the need to scan the whole backup.
@ -44,29 +44,29 @@ Main Features
:term:`container`\ s. :term:`container`\ s.
:Performance: The whole software stack is written in :term:`Rust`, :Performance: The whole software stack is written in :term:`Rust`,
to provide high speed and memory efficiency. in order to provide high speed and memory efficiency.
:Deduplication: Periodic backups produce large amounts of duplicate :Deduplication: Periodic backups produce large amounts of duplicate
data. The deduplication layer avoids redundancy and minimizes the used data. The deduplication layer avoids redundancy and minimizes the storage
storage space. space used.
:Incremental backups: Changes between backups are typically low. Reading and :Incremental backups: Changes between backups are typically low. Reading and
sending only the delta reduces storage and network impact of backups. sending only the delta reduces the storage and network impact of backups.
:Data Integrity: The built-in `SHA-256`_ checksum algorithm assures the :Data Integrity: The built-in `SHA-256`_ checksum algorithm ensures accuracy and
accuracy and consistency of your backups. consistency in your backups.
:Remote Sync: It is possible to efficiently synchronize data to remote :Remote Sync: It is possible to efficiently synchronize data to remote
sites. Only deltas containing new data are transferred. sites. Only deltas containing new data are transferred.
:Compression: The ultra fast Zstandard_ compression is able to compress :Compression: The ultra-fast Zstandard_ compression is able to compress
several gigabytes of data per second. several gigabytes of data per second.
:Encryption: Backups can be encrypted on the client-side using AES-256 in :Encryption: Backups can be encrypted on the client-side, using AES-256 in
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
provides very high performance on modern hardware. provides very high performance on modern hardware.
:Web interface: Manage the Proxmox Backup Server with the integrated web-based :Web interface: Manage the Proxmox Backup Server with the integrated, web-based
user interface. user interface.
:Open Source: No secrets. Proxmox Backup Server is free and open-source :Open Source: No secrets. Proxmox Backup Server is free and open-source
@ -80,11 +80,11 @@ Reasons for Data Backup?
------------------------ ------------------------
The main purpose of a backup is to protect against data loss. Data loss can be The main purpose of a backup is to protect against data loss. Data loss can be
caused by faulty hardware but also by human error. caused by both faulty hardware and human error.
A common mistake is to accidentally delete a file or folder which is still A common mistake is to accidentally delete a file or folder which is still
required. Virtualization can even amplify this problem; it easily happens that required. Virtualization can even amplify this problem, as deleting a whole
a whole virtual machine is deleted by just pressing a single button. virtual machine can be as easy as pressing a single button.
For administrators, backups can serve as a useful toolkit for temporarily For administrators, backups can serve as a useful toolkit for temporarily
storing data. For example, it is common practice to perform full backups before storing data. For example, it is common practice to perform full backups before
@ -104,16 +104,16 @@ Software Stack
Proxmox Backup Server consists of multiple components: Proxmox Backup Server consists of multiple components:
* server-daemon providing, among others, a RESTfull API, super-fast * A server-daemon providing, among other things, a RESTfull API, super-fast
asynchronous tasks, lightweight usage statistic collection, scheduling asynchronous tasks, lightweight usage statistic collection, scheduling
events, strict separation of privileged and unprivileged execution events, strict separation of privileged and unprivileged execution
environments, ... environments
* JavaScript management webinterface * A JavaScript management web interface
* management CLI tool for the server (`proxmox-backup-manager`) * A management CLI tool for the server (`proxmox-backup-manager`)
* client CLI tool (`proxmox-backup-client`) to access the server easily from * A client CLI tool (`proxmox-backup-client`) to access the server easily from
any `Linux amd64` environment. any `Linux amd64` environment
Everything outside of the web interface is written in the Rust programming Aside from the web interface, everything is written in the Rust programming
language. language.
"The Rust programming language helps you write faster, more reliable software. "The Rust programming language helps you write faster, more reliable software.
@ -143,6 +143,7 @@ Mailing Lists
Proxmox Backup Server is fully open-source and contributions are welcome! Here Proxmox Backup Server is fully open-source and contributions are welcome! Here
is the primary communication channel for developers: is the primary communication channel for developers:
:Mailing list for developers: `PBS Development List`_ :Mailing list for developers: `PBS Development List`_
Bug Tracker Bug Tracker

View File

@ -1,3 +1,6 @@
.. _chapter-zfs:
ZFS on Linux ZFS on Linux
------------ ------------

View File

@ -3,8 +3,8 @@
Debian Package Repositories Debian Package Repositories
--------------------------- ---------------------------
All Debian based systems use APT_ as package management tool. The list of All Debian based systems use APT_ as a package management tool. The lists of
repositories is defined in ``/etc/apt/sources.list`` and ``.list`` files found repositories are defined in ``/etc/apt/sources.list`` and the ``.list`` files found
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
with the ``apt`` command line tool, or via the GUI. with the ``apt`` command line tool, or via the GUI.
@ -26,11 +26,10 @@ update``.
.. FIXME for 7.0: change security update suite to bullseye-security .. FIXME for 7.0: change security update suite to bullseye-security
In addition, you need a package repositories from Proxmox to get the backup In addition, you need a package repository from Proxmox to get Proxmox Backup updates.
server updates.
During the Proxmox Backup beta phase only one repository (pbstest) will be During the Proxmox Backup beta phase, only one repository (pbstest) will be
available. Once released, a Enterprise repository for production use and a available. Once released, an Enterprise repository for production use and a
no-subscription repository will be provided. no-subscription repository will be provided.
SecureApt SecureApt
@ -39,8 +38,8 @@ SecureApt
The `Release` files in the repositories are signed with GnuPG. APT is using The `Release` files in the repositories are signed with GnuPG. APT is using
these signatures to verify that all packages are from a trusted source. these signatures to verify that all packages are from a trusted source.
If you install Proxmox Backup Server from an official ISO image, the key for If you install Proxmox Backup Server from an official ISO image, the
verification is already installed. verification key is already installed.
If you install Proxmox Backup Server on top of Debian, download and install the If you install Proxmox Backup Server on top of Debian, download and install the
key with the following commands: key with the following commands:
@ -136,17 +135,17 @@ During the public beta, there is a repository called ``pbstest``. This one
contains the latest packages and is heavily used by developers to test new contains the latest packages and is heavily used by developers to test new
features. features.
.. .. warning:: the ``pbstest`` repository should (as the name implies) .. .. warning:: the ``pbstest`` repository should (as the name implies)
only be used to test new features or bug fixes. only be used to test new features or bug fixes.
You can configure this using ``/etc/apt/sources.list`` by adding the following You can access this repository by adding the following line to
line: ``/etc/apt/sources.list``:
.. code-block:: sources.list .. code-block:: sources.list
:caption: sources.list entry for ``pbstest`` :caption: sources.list entry for ``pbstest``
deb http://download.proxmox.com/debian/pbs buster pbstest deb http://download.proxmox.com/debian/pbs buster pbstest
If you installed Proxmox Backup Server from the official beta ISO you should If you installed Proxmox Backup Server from the official beta ISO, you should
have this repository already configured in have this repository already configured in
``/etc/apt/sources.list.d/pbstest-beta.list`` ``/etc/apt/sources.list.d/pbstest-beta.list``

View File

@ -9,7 +9,7 @@ which caters to a similar use-case.
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
Backup Server, for example, efficient storage of hardlinks. Backup Server, for example, efficient storage of hardlinks.
The format is designed to reduce storage space needed on the server by achieving The format is designed to reduce storage space needed on the server by achieving
a high level of de-duplication. a high level of deduplication.
Creating an Archive Creating an Archive
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
@ -18,7 +18,7 @@ Run the following command to create an archive of a folder named ``source``:
.. code-block:: console .. code-block:: console
# pxar create archive.pxar source # pxar create archive.pxar /path/to/source
This will create a new archive called ``archive.pxar`` with the contents of the This will create a new archive called ``archive.pxar`` with the contents of the
``source`` folder. ``source`` folder.
@ -29,45 +29,44 @@ This will create a new archive called ``archive.pxar`` with the contents of the
By default, ``pxar`` will skip certain mountpoints and will not follow device By default, ``pxar`` will skip certain mountpoints and will not follow device
boundaries. This design decision is based on the primary use case of creating boundaries. This design decision is based on the primary use case of creating
archives for backups. It is sensible to not back up the contents of certain archives for backups. It makes sense to not back up the contents of certain
temporary or system specific files. temporary or system specific files.
To alter this behavior and follow device boundaries, use the To alter this behavior and follow device boundaries, use the
``--all-file-systems`` flag. ``--all-file-systems`` flag.
It is possible to exclude certain files and/or folders from the archive by It is possible to exclude certain files and/or folders from the archive by
passing glob match patterns as additional parameters. Whenever a file is matched passing the ``--exclude`` parameter with ``gitignore``\-style match patterns.
by one of the patterns, you will get a warning stating that this file is skipped
and therefore not included in the archive.
For example, you can exclude all files ending in ``.txt`` from the archive For example, you can exclude all files ending in ``.txt`` from the archive
by running: by running:
.. code-block:: console .. code-block:: console
# pxar create archive.pxar source '**/*.txt' # pxar create archive.pxar /path/to/source --exclude '**/*.txt'
Be aware that the shell itself will try to expand all of the glob patterns before Be aware that the shell itself will try to expand all of the glob patterns before
invoking ``pxar``. invoking ``pxar``.
In order to avoid this, all globs have to be quoted correctly. In order to avoid this, all globs have to be quoted correctly.
It is possible to pass a list of match patterns to fulfill more complex It is possible to pass the ``--exclude`` parameter multiple times, in order to
file exclusion/inclusion behavior, although it is recommended to use the match more than one pattern. This allows you to use more complex
file exclusion/inclusion behavior. However, it is recommended to use
``.pxarexclude`` files instead for such cases. ``.pxarexclude`` files instead for such cases.
For example you might want to exclude all ``.txt`` files except for a specific For example you might want to exclude all ``.txt`` files except for a specific
one from the archive. This is achieved via the negated match pattern, prefixed one from the archive. This is achieved via the negated match pattern, prefixed
by ``!``. by ``!``.
All the glob pattern are relative to the ``source`` directory. All the glob patterns are relative to the ``source`` directory.
.. code-block:: console .. code-block:: console
# pxar create archive.pxar source '**/*.txt' '!/folder/file.txt' # pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
.. NOTE:: The order of the glob match patterns matters as later ones win over .. NOTE:: The order of the glob match patterns matters as later ones override
previous ones. Permutations of the same patterns lead to different results. previous ones. Permutations of the same patterns lead to different results.
``pxar`` will store the list of glob match patterns passed as parameters via the ``pxar`` will store the list of glob match patterns passed as parameters via the
command line in a file called ``.pxarexclude-cli`` and stores it at the root of command line, in a file called ``.pxarexclude-cli`` at the root of
the archive. the archive.
If a file with this name is already present in the source folder during archive If a file with this name is already present in the source folder during archive
creation, this file is not included in the archive and the file containing the creation, this file is not included in the archive and the file containing the
@ -86,23 +85,23 @@ The behavior is the same as described in :ref:`creating-backups`.
Extracting an Archive Extracting an Archive
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
An existing archive ``archive.pxar`` is extracted to a ``target`` directory An existing archive, ``archive.pxar``, is extracted to a ``target`` directory
with the following command: with the following command:
.. code-block:: console .. code-block:: console
# pxar extract archive.pxar --target target # pxar extract archive.pxar /path/to/target
If no target is provided, the content of the archive is extracted to the current If no target is provided, the content of the archive is extracted to the current
working directory. working directory.
In order to restore only parts of an archive, single files and/or folders, In order to restore only parts of an archive, single files, and/or folders,
it is possible to pass the corresponding glob match patterns as additional it is possible to pass the corresponding glob match patterns as additional
parameters or use the patterns stored in a file: parameters or to use the patterns stored in a file:
.. code-block:: console .. code-block:: console
# pxar extract etc.pxar '**/*.conf' --target /restore/target/etc # pxar extract etc.pxar /restore/target/etc --pattern '**/*.conf'
The above example restores all ``.conf`` files encountered in any of the The above example restores all ``.conf`` files encountered in any of the
sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``. sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.

View File

@ -2,8 +2,7 @@ use std::io::Write;
use anyhow::{Error}; use anyhow::{Error};
use chrono::{DateTime, Utc}; use proxmox_backup::api2::types::Userid;
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader}; use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
pub struct DummyWriter { pub struct DummyWriter {
@ -27,7 +26,7 @@ async fn run() -> Result<(), Error> {
let host = "localhost"; let host = "localhost";
let username = "root@pam"; let username = Userid::root_userid();
let options = HttpClientOptions::new() let options = HttpClientOptions::new()
.interactive(true) .interactive(true)
@ -35,7 +34,7 @@ async fn run() -> Result<(), Error> {
let client = HttpClient::new(host, username, options)?; let client = HttpClient::new(host, username, options)?;
let backup_time = "2019-06-28T10:49:48Z".parse::<DateTime<Utc>>()?; let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true) let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
.await?; .await?;

View File

@ -1,5 +1,6 @@
use anyhow::{Error}; use anyhow::{Error};
use proxmox_backup::api2::types::Userid;
use proxmox_backup::client::*; use proxmox_backup::client::*;
async fn upload_speed() -> Result<f64, Error> { async fn upload_speed() -> Result<f64, Error> {
@ -7,7 +8,7 @@ async fn upload_speed() -> Result<f64, Error> {
let host = "localhost"; let host = "localhost";
let datastore = "store2"; let datastore = "store2";
let username = "root@pam"; let username = Userid::root_userid();
let options = HttpClientOptions::new() let options = HttpClientOptions::new()
.interactive(true) .interactive(true)
@ -15,9 +16,9 @@ async fn upload_speed() -> Result<f64, Error> {
let client = HttpClient::new(host, username, options)?; let client = HttpClient::new(host, username, options)?;
let backup_time = chrono::Utc::now(); let backup_time = proxmox::tools::time::epoch_i64();
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?; let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
println!("start upload speed test"); println!("start upload speed test");
let res = client.upload_speedtest(true).await?; let res = client.upload_speedtest(true).await?;

View File

@ -2,13 +2,12 @@ use anyhow::{bail, format_err, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox::api::{api, RpcEnvironment, Permission, UserInformation}; use proxmox::api::{api, RpcEnvironment, Permission};
use proxmox::api::router::{Router, SubdirMap}; use proxmox::api::router::{Router, SubdirMap};
use proxmox::{sortable, identity}; use proxmox::{sortable, identity};
use proxmox::{http_err, list_subdirs_api_method}; use proxmox::{http_err, list_subdirs_api_method};
use crate::tools; use crate::tools::ticket::{self, Empty, Ticket};
use crate::tools::ticket::*;
use crate::auth_helpers::*; use crate::auth_helpers::*;
use crate::api2::types::*; use crate::api2::types::*;
@ -23,7 +22,7 @@ pub mod role;
/// returns Ok(true) if a ticket has to be created /// returns Ok(true) if a ticket has to be created
/// and Ok(false) if not /// and Ok(false) if not
fn authenticate_user( fn authenticate_user(
username: &str, userid: &Userid,
password: &str, password: &str,
path: Option<String>, path: Option<String>,
privs: Option<String>, privs: Option<String>,
@ -31,31 +30,35 @@ fn authenticate_user(
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
if !user_info.is_active_user(&username) { if !user_info.is_active_user(&userid) {
bail!("user account disabled or expired."); bail!("user account disabled or expired.");
} }
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
if password.starts_with("PBS:") { if password.starts_with("PBS:") {
if let Ok((_age, Some(ticket_username))) = tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", password, None, -300, ticket_lifetime) { if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
if ticket_username == username { .and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
{
if *userid == ticket_userid {
return Ok(true); return Ok(true);
} else {
bail!("ticket login failed - wrong username");
} }
bail!("ticket login failed - wrong userid");
} }
} else if password.starts_with("PBSTERM:") { } else if password.starts_with("PBSTERM:") {
if path.is_none() || privs.is_none() || port.is_none() { if path.is_none() || privs.is_none() || port.is_none() {
bail!("cannot check termnal ticket without path, priv and port"); bail!("cannot check termnal ticket without path, priv and port");
} }
let path = path.unwrap(); let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
let privilege_name = privs.unwrap(); let privilege_name = privs
let port = port.unwrap(); .ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
if let Ok((_age, _data)) = if let Ok(Empty) = Ticket::parse(password)
tools::ticket::verify_term_ticket(public_auth_key(), &username, &path, port, password) .and_then(|ticket| ticket.verify(
public_auth_key(),
ticket::TERM_PREFIX,
Some(&ticket::term_aad(userid, &path, port)),
))
{ {
for (name, privilege) in PRIVILEGES { for (name, privilege) in PRIVILEGES {
if *name == privilege_name { if *name == privilege_name {
@ -66,7 +69,7 @@ fn authenticate_user(
} }
} }
user_info.check_privs(username, &path_vec, *privilege, false)?; user_info.check_privs(userid, &path_vec, *privilege, false)?;
return Ok(false); return Ok(false);
} }
} }
@ -75,7 +78,7 @@ fn authenticate_user(
} }
} }
let _ = crate::auth::authenticate_user(username, password)?; let _ = crate::auth::authenticate_user(userid, password)?;
Ok(true) Ok(true)
} }
@ -83,7 +86,7 @@ fn authenticate_user(
input: { input: {
properties: { properties: {
username: { username: {
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
password: { password: {
schema: PASSWORD_SCHEMA, schema: PASSWORD_SCHEMA,
@ -130,7 +133,7 @@ fn authenticate_user(
/// ///
/// Returns: An authentication ticket with additional infos. /// Returns: An authentication ticket with additional infos.
fn create_ticket( fn create_ticket(
username: String, username: Userid,
password: String, password: String,
path: Option<String>, path: Option<String>,
privs: Option<String>, privs: Option<String>,
@ -138,7 +141,7 @@ fn create_ticket(
) -> Result<Value, Error> { ) -> Result<Value, Error> {
match authenticate_user(&username, &password, path, privs, port) { match authenticate_user(&username, &password, path, privs, port) {
Ok(true) => { Ok(true) => {
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some(&username), None)?; let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
let token = assemble_csrf_prevention_token(csrf_secret(), &username); let token = assemble_csrf_prevention_token(csrf_secret(), &username);
@ -156,7 +159,7 @@ fn create_ticket(
Err(err) => { Err(err) => {
let client_ip = "unknown"; // $rpcenv->get_client_ip() || ''; let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string()); log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
Err(http_err!(UNAUTHORIZED, "permission check failed.".into())) Err(http_err!(UNAUTHORIZED, "permission check failed."))
} }
} }
} }
@ -165,7 +168,7 @@ fn create_ticket(
input: { input: {
properties: { properties: {
userid: { userid: {
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
password: { password: {
schema: PASSWORD_SCHEMA, schema: PASSWORD_SCHEMA,
@ -183,13 +186,15 @@ fn create_ticket(
/// Each user is allowed to change his own password. Superuser /// Each user is allowed to change his own password. Superuser
/// can change all passwords. /// can change all passwords.
fn change_password( fn change_password(
userid: String, userid: Userid,
password: String, password: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let current_user = rpcenv.get_user() let current_user: Userid = rpcenv
.ok_or_else(|| format_err!("unknown user"))?; .get_user()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
let mut allowed = userid == current_user; let mut allowed = userid == current_user;
@ -205,9 +210,8 @@ fn change_password(
bail!("you are not authorized to change the password."); bail!("you are not authorized to change the password.");
} }
let (username, realm) = crate::auth::parse_userid(&userid)?; let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
let authenticator = crate::auth::lookup_authenticator(&realm)?; authenticator.store_password(userid.name(), &password)?;
authenticator.store_password(&username, &password)?;
Ok(Value::Null) Ok(Value::Null)
} }

View File

@ -2,6 +2,7 @@ use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment, Permission}; use proxmox::api::{api, Router, RpcEnvironment, Permission};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::acl; use crate::config::acl;
@ -141,7 +142,7 @@ pub fn read_acl(
}, },
userid: { userid: {
optional: true, optional: true,
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
group: { group: {
optional: true, optional: true,
@ -167,14 +168,14 @@ pub fn update_acl(
path: String, path: String,
role: String, role: String,
propagate: Option<bool>, propagate: Option<bool>,
userid: Option<String>, userid: Option<Userid>,
group: Option<String>, group: Option<String>,
delete: Option<bool>, delete: Option<bool>,
digest: Option<String>, digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut tree, expected_digest) = acl::config()?; let (mut tree, expected_digest) = acl::config()?;
@ -192,7 +193,7 @@ pub fn update_acl(
} else if let Some(ref userid) = userid { } else if let Some(ref userid) = userid {
if !delete { // Note: we allow to delete non-existent users if !delete { // Note: we allow to delete non-existent users
let user_cfg = crate::config::user::cached_config()?; let user_cfg = crate::config::user::cached_config()?;
if user_cfg.sections.get(userid).is_none() { if user_cfg.sections.get(&userid.to_string()).is_none() {
bail!("no such user."); bail!("no such user.");
} }
} }

View File

@ -14,7 +14,7 @@ use crate::config::acl::{Role, ROLE_NAMES, PRIVILEGES};
type: Array, type: Array,
items: { items: {
type: Object, type: Object,
description: "User name with description.", description: "Role with description and privileges.",
properties: { properties: {
roleid: { roleid: {
type: Role, type: Role,

View File

@ -3,10 +3,12 @@ use serde_json::Value;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission}; use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::schema::{Schema, StringSchema}; use proxmox::api::schema::{Schema, StringSchema};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::user; use crate::config::user;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY}; use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.") pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.format(&PASSWORD_FORMAT) .format(&PASSWORD_FORMAT)
@ -24,10 +26,11 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
items: { type: user::User }, items: { type: user::User },
}, },
access: { access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false), permission: &Permission::Anybody,
description: "Returns all or just the logged-in user, depending on privileges.",
}, },
)] )]
/// List all users /// List users
pub fn list_users( pub fn list_users(
_param: Value, _param: Value,
_info: &ApiMethod, _info: &ApiMethod,
@ -36,11 +39,21 @@ pub fn list_users(
let (config, digest) = user::config()?; let (config, digest) = user::config()?;
let list = config.convert_to_typed_array("user")?; let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let filter_by_privs = |user: &user::User| {
top_level_allowed || user.userid == userid
};
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into(); rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list) Ok(list.into_iter().filter(filter_by_privs).collect())
} }
#[api( #[api(
@ -48,7 +61,7 @@ pub fn list_users(
input: { input: {
properties: { properties: {
userid: { userid: {
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
comment: { comment: {
schema: SINGLE_LINE_COMMENT_SCHEMA, schema: SINGLE_LINE_COMMENT_SCHEMA,
@ -87,25 +100,24 @@ pub fn list_users(
/// Create new user. /// Create new user.
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> { pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let user: user::User = serde_json::from_value(param)?; let user: user::User = serde_json::from_value(param)?;
let (mut config, _digest) = user::config()?; let (mut config, _digest) = user::config()?;
if let Some(_) = config.sections.get(&user.userid) { if let Some(_) = config.sections.get(user.userid.as_str()) {
bail!("user '{}' already exists.", user.userid); bail!("user '{}' already exists.", user.userid);
} }
let (username, realm) = crate::auth::parse_userid(&user.userid)?; let authenticator = crate::auth::lookup_authenticator(&user.userid.realm())?;
let authenticator = crate::auth::lookup_authenticator(&realm)?;
config.set_data(&user.userid, "user", &user)?; config.set_data(user.userid.as_str(), "user", &user)?;
user::save_config(&config)?; user::save_config(&config)?;
if let Some(password) = password { if let Some(password) = password {
authenticator.store_password(&username, &password)?; authenticator.store_password(user.userid.name(), &password)?;
} }
Ok(()) Ok(())
@ -115,7 +127,7 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
input: { input: {
properties: { properties: {
userid: { userid: {
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
}, },
}, },
@ -124,13 +136,16 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
type: user::User, type: user::User,
}, },
access: { access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false), permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
&Permission::UserParam("userid"),
]),
}, },
)] )]
/// Read user configuration data. /// Read user configuration data.
pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> { pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
let (config, digest) = user::config()?; let (config, digest) = user::config()?;
let user = config.lookup("user", &userid)?; let user = config.lookup("user", userid.as_str())?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into(); rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(user) Ok(user)
} }
@ -140,7 +155,7 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
input: { input: {
properties: { properties: {
userid: { userid: {
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
comment: { comment: {
optional: true, optional: true,
@ -177,12 +192,15 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
}, },
}, },
access: { access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false), permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
}, },
)] )]
/// Update user configuration. /// Update user configuration.
pub fn update_user( pub fn update_user(
userid: String, userid: Userid,
comment: Option<String>, comment: Option<String>,
enable: Option<bool>, enable: Option<bool>,
expire: Option<i64>, expire: Option<i64>,
@ -193,7 +211,7 @@ pub fn update_user(
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = user::config()?; let (mut config, expected_digest) = user::config()?;
@ -202,7 +220,7 @@ pub fn update_user(
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
} }
let mut data: user::User = config.lookup("user", &userid)?; let mut data: user::User = config.lookup("user", userid.as_str())?;
if let Some(comment) = comment { if let Some(comment) = comment {
let comment = comment.trim().to_string(); let comment = comment.trim().to_string();
@ -222,9 +240,8 @@ pub fn update_user(
} }
if let Some(password) = password { if let Some(password) = password {
let (username, realm) = crate::auth::parse_userid(&userid)?; let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
let authenticator = crate::auth::lookup_authenticator(&realm)?; authenticator.store_password(userid.name(), &password)?;
authenticator.store_password(&username, &password)?;
} }
if let Some(firstname) = firstname { if let Some(firstname) = firstname {
@ -238,7 +255,7 @@ pub fn update_user(
data.email = if email.is_empty() { None } else { Some(email) }; data.email = if email.is_empty() { None } else { Some(email) };
} }
config.set_data(&userid, "user", &data)?; config.set_data(userid.as_str(), "user", &data)?;
user::save_config(&config)?; user::save_config(&config)?;
@ -250,7 +267,7 @@ pub fn update_user(
input: { input: {
properties: { properties: {
userid: { userid: {
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
digest: { digest: {
optional: true, optional: true,
@ -259,13 +276,16 @@ pub fn update_user(
}, },
}, },
access: { access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false), permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
&Permission::UserParam("userid"),
]),
}, },
)] )]
/// Remove a user from the configuration file. /// Remove a user from the configuration file.
pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error> { pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = user::config()?; let (mut config, expected_digest) = user::config()?;
@ -274,8 +294,8 @@ pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error>
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
} }
match config.sections.get(&userid) { match config.sections.get(userid.as_str()) {
Some(_) => { config.sections.remove(&userid); }, Some(_) => { config.sections.remove(userid.as_str()); },
None => bail!("user '{}' does not exist.", userid), None => bail!("user '{}' does not exist.", userid),
} }

View File

@ -1,6 +1,7 @@
use std::collections::{HashSet, HashMap}; use std::collections::{HashSet, HashMap};
use std::ffi::OsStr; use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::OsStrExt;
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
@ -10,7 +11,8 @@ use serde_json::{json, Value};
use proxmox::api::{ use proxmox::api::{
api, ApiResponseFuture, ApiHandler, ApiMethod, Router, api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation}; RpcEnvironment, RpcEnvironmentType, Permission
};
use proxmox::api::router::SubdirMap; use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*; use proxmox::api::schema::*;
use proxmox::tools::fs::{replace_file, CreateOptions}; use proxmox::tools::fs::{replace_file, CreateOptions};
@ -36,7 +38,11 @@ use crate::config::acl::{
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_BACKUP,
}; };
fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> { fn check_backup_owner(
store: &DataStore,
group: &BackupGroup,
userid: &Userid,
) -> Result<(), Error> {
let owner = store.get_owner(group)?; let owner = store.get_owner(group)?;
if &owner != userid { if &owner != userid {
bail!("backup owner check failed ({} != {})", userid, owner); bail!("backup owner check failed ({} != {})", userid, owner);
@ -44,9 +50,12 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
Ok(()) Ok(())
} }
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> { fn read_backup_index(
store: &DataStore,
backup_dir: &BackupDir,
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
let (manifest, manifest_crypt_mode, index_size) = store.load_manifest(backup_dir)?; let (manifest, index_size) = store.load_manifest(backup_dir)?;
let mut result = Vec::new(); let mut result = Vec::new();
for item in manifest.files() { for item in manifest.files() {
@ -59,18 +68,22 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
result.push(BackupContent { result.push(BackupContent {
filename: MANIFEST_BLOB_NAME.to_string(), filename: MANIFEST_BLOB_NAME.to_string(),
crypt_mode: Some(manifest_crypt_mode), crypt_mode: match manifest.signature {
Some(_) => Some(CryptMode::SignOnly),
None => Some(CryptMode::None),
},
size: Some(index_size), size: Some(index_size),
}); });
Ok(result) Ok((manifest, result))
} }
fn get_all_snapshot_files( fn get_all_snapshot_files(
store: &DataStore, store: &DataStore,
info: &BackupInfo, info: &BackupInfo,
) -> Result<Vec<BackupContent>, Error> { ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
let mut files = read_backup_index(&store, &info.backup_dir)?;
let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
acc.insert(item.filename.clone()); acc.insert(item.filename.clone());
@ -86,7 +99,7 @@ fn get_all_snapshot_files(
}); });
} }
Ok(files) Ok((manifest, files))
} }
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> { fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
@ -130,9 +143,9 @@ fn list_groups(
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> { ) -> Result<Vec<GroupListItem>, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
@ -153,13 +166,13 @@ fn list_groups(
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?; let owner = datastore.get_owner(group)?;
if !list_all { if !list_all {
if owner != username { continue; } if owner != userid { continue; }
} }
let result_item = GroupListItem { let result_item = GroupListItem {
backup_type: group.backup_type().to_string(), backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(), backup_id: group.backup_id().to_string(),
last_backup: info.backup_dir.backup_time().timestamp(), last_backup: info.backup_dir.backup_time(),
backup_count: list.len() as u64, backup_count: list.len() as u64,
files: info.files.clone(), files: info.files.clone(),
owner: Some(owner), owner: Some(owner),
@ -211,20 +224,22 @@ pub fn list_snapshot_files(
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> { ) -> Result<Vec<BackupContent>, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time); let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0; let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; } if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
let info = BackupInfo::new(&datastore.base_path(), snapshot)?; let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
get_all_snapshot_files(&datastore, &info) let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
Ok(files)
} }
#[api( #[api(
@ -261,18 +276,18 @@ fn delete_snapshot(
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time); let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0; let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; } if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
datastore.remove_backup_dir(&snapshot)?; datastore.remove_backup_dir(&snapshot, false)?;
Ok(Value::Null) Ok(Value::Null)
} }
@ -317,9 +332,9 @@ pub fn list_snapshots (
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> { ) -> Result<Vec<SnapshotListItem>, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
@ -342,34 +357,55 @@ pub fn list_snapshots (
let owner = datastore.get_owner(group)?; let owner = datastore.get_owner(group)?;
if !list_all { if !list_all {
if owner != username { continue; } if owner != userid { continue; }
} }
let mut size = None; let mut size = None;
let files = match get_all_snapshot_files(&datastore, &info) { let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
Ok(files) => { Ok((manifest, files)) => {
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
files // extract the first line from notes
let comment: Option<String> = manifest.unprotected["notes"]
.as_str()
.and_then(|notes| notes.lines().next())
.map(String::from);
let verify = manifest.unprotected["verify_state"].clone();
let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
Ok(verify) => verify,
Err(err) => {
eprintln!("error parsing verification state : '{}'", err);
None
}
};
(comment, verify, files)
}, },
Err(err) => { Err(err) => {
eprintln!("error during snapshot file listing: '{}'", err); eprintln!("error during snapshot file listing: '{}'", err);
info (
.files None,
.iter() None,
.map(|x| BackupContent { info
filename: x.to_string(), .files
size: None, .iter()
crypt_mode: None, .map(|x| BackupContent {
}) filename: x.to_string(),
.collect() size: None,
crypt_mode: None,
})
.collect()
)
}, },
}; };
let result_item = SnapshotListItem { let result_item = SnapshotListItem {
backup_type: group.backup_type().to_string(), backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(), backup_id: group.backup_id().to_string(),
backup_time: info.backup_dir.backup_time().timestamp(), backup_time: info.backup_dir.backup_time(),
comment,
verification,
files, files,
size, size,
owner: Some(owner), owner: Some(owner),
@ -454,7 +490,7 @@ pub fn verify(
match (backup_type, backup_id, backup_time) { match (backup_type, backup_id, backup_time) {
(Some(backup_type), Some(backup_id), Some(backup_time)) => { (Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time); worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
let dir = BackupDir::new(backup_type, backup_id, backup_time); let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
backup_dir = Some(dir); backup_dir = Some(dir);
} }
(Some(backup_type), Some(backup_id), None) => { (Some(backup_type), Some(backup_id), None) => {
@ -465,27 +501,50 @@ pub fn verify(
(None, None, None) => { (None, None, None) => {
worker_id = store.clone(); worker_id = store.clone();
} }
_ => bail!("parameters do not spefify a backup group or snapshot"), _ => bail!("parameters do not specify a backup group or snapshot"),
} }
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker| "verify",
{ Some(worker_id.clone()),
let success = if let Some(backup_dir) = backup_dir { userid,
verify_backup_dir(&datastore, &backup_dir, &worker)? to_stdout,
move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut res = Vec::new();
if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
res.push(backup_dir.to_string());
}
res
} else if let Some(backup_group) = backup_group { } else if let Some(backup_group) = backup_group {
verify_backup_group(&datastore, &backup_group, &worker)? let (_count, failed_dirs) = verify_backup_group(
datastore,
&backup_group,
verified_chunks,
corrupt_chunks,
None,
worker.clone(),
)?;
failed_dirs
} else { } else {
verify_all_backups(&datastore, &worker)? verify_all_backups(datastore, worker.clone())?
}; };
if !success { if failed_dirs.len() > 0 {
bail!("verfication failed - please check the log for details"); worker.log("Failed to verify following snapshots:");
for dir in failed_dirs {
worker.log(format!("\t{}", dir));
}
bail!("verification failed - please check the log for details");
} }
Ok(()) Ok(())
})?; },
)?;
Ok(json!(upid_str)) Ok(json!(upid_str))
} }
@ -570,9 +629,9 @@ fn prune(
let backup_type = tools::required_string_param(&param, "backup-type")?; let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?; let backup_id = tools::required_string_param(&param, "backup-id")?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let dry_run = param["dry-run"].as_bool().unwrap_or(false); let dry_run = param["dry-run"].as_bool().unwrap_or(false);
@ -581,7 +640,7 @@ fn prune(
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0; let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, &group, &username)?; } if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
let prune_options = PruneOptions { let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(), keep_last: param["keep-last"].as_u64(),
@ -614,7 +673,7 @@ fn prune(
prune_result.push(json!({ prune_result.push(json!({
"backup-type": group.backup_type(), "backup-type": group.backup_type(),
"backup-id": group.backup_id(), "backup-id": group.backup_id(),
"backup-time": backup_time.timestamp(), "backup-time": backup_time,
"keep": keep, "keep": keep,
})); }));
} }
@ -623,7 +682,7 @@ fn prune(
// We use a WorkerTask just to have a task log, but run synchrounously // We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?; let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
let result = try_block! { let result = try_block! {
if keep_all { if keep_all {
@ -638,7 +697,7 @@ fn prune(
if keep_all { keep = true; } if keep_all { keep = true; }
let backup_time = info.backup_dir.backup_time(); let backup_time = info.backup_dir.backup_time();
let timestamp = BackupDir::backup_time_to_string(backup_time); let timestamp = info.backup_dir.backup_time_string();
let group = info.backup_dir.group(); let group = info.backup_dir.group();
@ -655,12 +714,12 @@ fn prune(
prune_result.push(json!({ prune_result.push(json!({
"backup-type": group.backup_type(), "backup-type": group.backup_type(),
"backup-id": group.backup_id(), "backup-id": group.backup_id(),
"backup-time": backup_time.timestamp(), "backup-time": backup_time,
"keep": keep, "keep": keep,
})); }));
if !(dry_run || keep) { if !(dry_run || keep) {
datastore.remove_backup_dir(&info.backup_dir)?; datastore.remove_backup_dir(&info.backup_dir, true)?;
} }
} }
@ -705,11 +764,15 @@ fn start_garbage_collection(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker| "garbage_collection",
{ Some(store.clone()),
Userid::root_userid().clone(),
to_stdout,
move |worker| {
worker.log(format!("starting garbage collection on store {}", store)); worker.log(format!("starting garbage collection on store {}", store));
datastore.garbage_collection(&worker) datastore.garbage_collection(&worker)
})?; },
)?;
Ok(json!(upid_str)) Ok(json!(upid_str))
} }
@ -773,13 +836,13 @@ fn get_datastore_list(
let (config, _digest) = datastore::config()?; let (config, _digest) = datastore::config()?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let mut list = Vec::new(); let mut list = Vec::new();
for (store, (_, data)) in &config.sections { for (store, (_, data)) in &config.sections {
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if allowed { if allowed {
let mut entry = json!({ "store": store }); let mut entry = json!({ "store": store });
@ -824,9 +887,9 @@ fn download_file(
let store = tools::required_string_param(&param, "store")?; let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?; let datastore = DataStore::lookup_datastore(store)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let file_name = tools::required_string_param(&param, "file-name")?.to_owned(); let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
@ -834,10 +897,10 @@ fn download_file(
let backup_id = tools::required_string_param(&param, "backup-id")?; let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?; let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; } if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
@ -846,8 +909,8 @@ fn download_file(
path.push(&file_name); path.push(&file_name);
let file = tokio::fs::File::open(&path) let file = tokio::fs::File::open(&path)
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err))) .await
.await?; .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())) .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
@ -897,9 +960,9 @@ fn download_file_decoded(
let store = tools::required_string_param(&param, "store")?; let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?; let datastore = DataStore::lookup_datastore(store)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let file_name = tools::required_string_param(&param, "file-name")?.to_owned(); let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
@ -907,12 +970,12 @@ fn download_file_decoded(
let backup_id = tools::required_string_param(&param, "backup-id")?; let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?; let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; } if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
let files = read_backup_index(&datastore, &backup_dir)?; let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files { for file in files {
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
bail!("cannot decode '{}' - is encrypted", file_name); bail!("cannot decode '{}' - is encrypted", file_name);
@ -931,8 +994,10 @@ fn download_file_decoded(
"didx" => { "didx" => {
let index = DynamicIndexReader::open(&path) let index = DynamicIndexReader::open(&path)
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
let (csum, size) = index.compute_csum();
manifest.verify_file(&file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None); let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = AsyncIndexReader::new(index, chunk_reader); let reader = AsyncIndexReader::new(index, chunk_reader);
Body::wrap_stream(AsyncReaderStream::new(reader) Body::wrap_stream(AsyncReaderStream::new(reader)
.map_err(move |err| { .map_err(move |err| {
@ -944,7 +1009,10 @@ fn download_file_decoded(
let index = FixedIndexReader::open(&path) let index = FixedIndexReader::open(&path)
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?; .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
let chunk_reader = LocalChunkReader::new(datastore, None); let (csum, size) = index.compute_csum();
manifest.verify_file(&file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = AsyncIndexReader::new(index, chunk_reader); let reader = AsyncIndexReader::new(index, chunk_reader);
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024) Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
.map_err(move |err| { .map_err(move |err| {
@ -954,7 +1022,9 @@ fn download_file_decoded(
}, },
"blob" => { "blob" => {
let file = std::fs::File::open(&path) let file = std::fs::File::open(&path)
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?; .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
// FIXME: load full blob to verify index checksum?
Body::wrap_stream( Body::wrap_stream(
WrappedReaderStream::new(DataBlobReader::new(file, None)?) WrappedReaderStream::new(DataBlobReader::new(file, None)?)
@ -1013,10 +1083,10 @@ fn upload_backup_log(
let backup_id = tools::required_string_param(&param, "backup-id")?; let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?; let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
check_backup_owner(&datastore, backup_dir.group(), &username)?; check_backup_owner(&datastore, backup_dir.group(), &userid)?;
let mut path = datastore.base_path(); let mut path = datastore.base_path();
path.push(backup_dir.relative_path()); path.push(backup_dir.relative_path());
@ -1027,7 +1097,7 @@ fn upload_backup_log(
} }
println!("Upload backup log to {}/{}/{}/{}/{}", store, println!("Upload backup log to {}/{}/{}/{}/{}", store,
backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name); backup_type, backup_id, backup_dir.backup_time_string(), file_name);
let data = req_body let data = req_body
.map_err(Error::from) .map_err(Error::from)
@ -1037,11 +1107,10 @@ fn upload_backup_log(
}) })
.await?; .await?;
let blob = DataBlob::from_raw(data)?; // always verify blob/CRC at server side
// always verify CRC at server side let blob = DataBlob::load_from_reader(&mut &data[..])?;
blob.verify_crc()?;
let raw_data = blob.raw_data(); replace_file(&path, blob.raw_data(), CreateOptions::new())?;
replace_file(&path, raw_data, CreateOptions::new())?;
// fixme: use correct formatter // fixme: use correct formatter
Ok(crate::server::formatter::json_response(Ok(Value::Null))) Ok(crate::server::formatter::json_response(Ok(Value::Null)))
@ -1086,23 +1155,35 @@ fn catalog(
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; } if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
let file_name = CATALOG_NAME;
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
bail!("cannot decode '{}' - is encrypted", file_name);
}
}
let mut path = datastore.base_path(); let mut path = datastore.base_path();
path.push(backup_dir.relative_path()); path.push(backup_dir.relative_path());
path.push(CATALOG_NAME); path.push(file_name);
let index = DynamicIndexReader::open(&path) let index = DynamicIndexReader::open(&path)
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
let chunk_reader = LocalChunkReader::new(datastore, None); let (csum, size) = index.compute_csum();
manifest.verify_file(&file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = BufferedDynamicReader::new(index, chunk_reader); let reader = BufferedDynamicReader::new(index, chunk_reader);
let mut catalog_reader = CatalogReader::new(reader); let mut catalog_reader = CatalogReader::new(reader);
@ -1158,7 +1239,7 @@ fn catalog(
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&pxar_file_download), &ApiHandler::AsyncHttp(&pxar_file_download),
&ObjectSchema::new( &ObjectSchema::new(
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.", "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
&sorted!([ &sorted!([
("store", false, &DATASTORE_SCHEMA), ("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA), ("backup-type", false, &BACKUP_TYPE_SCHEMA),
@ -1185,9 +1266,9 @@ fn pxar_file_download(
let store = tools::required_string_param(&param, "store")?; let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let filepath = tools::required_string_param(&param, "filepath")?.to_owned(); let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
@ -1195,13 +1276,10 @@ fn pxar_file_download(
let backup_id = tools::required_string_param(&param, "backup-id")?; let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?; let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; } if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
let mut components = base64::decode(&filepath)?; let mut components = base64::decode(&filepath)?;
if components.len() > 0 && components[0] == '/' as u8 { if components.len() > 0 && components[0] == '/' as u8 {
@ -1209,15 +1287,26 @@ fn pxar_file_download(
} }
let mut split = components.splitn(2, |c| *c == '/' as u8); let mut split = components.splitn(2, |c| *c == '/' as u8);
let pxar_name = split.next().unwrap(); let pxar_name = std::str::from_utf8(split.next().unwrap())?;
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?; let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
bail!("cannot decode '{}' - is encrypted", pxar_name);
}
}
path.push(OsStr::from_bytes(&pxar_name)); let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(pxar_name);
let index = DynamicIndexReader::open(&path) let index = DynamicIndexReader::open(&path)
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
let chunk_reader = LocalChunkReader::new(datastore, None); let (csum, size) = index.compute_csum();
manifest.verify_file(&pxar_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = BufferedDynamicReader::new(index, chunk_reader); let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size(); let archive_size = reader.archive_size();
let reader = LocalDynamicReadAt::new(reader); let reader = LocalDynamicReadAt::new(reader);
@ -1293,6 +1382,108 @@ fn get_rrd_stats(
) )
} }
#[api(
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
},
"backup-time": {
schema: BACKUP_TIME_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
},
)]
/// Get "notes" for a specific backup
fn get_notes(
store: String,
backup_type: String,
backup_id: String,
backup_time: i64,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
let manifest = datastore.load_manifest_json(&backup_dir)?;
let notes = manifest["unprotected"]["notes"]
.as_str()
.unwrap_or("");
Ok(String::from(notes))
}
#[api(
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
},
"backup-time": {
schema: BACKUP_TIME_SCHEMA,
},
notes: {
description: "A multiline text.",
},
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
},
)]
/// Set "notes" for a specific backup
fn set_notes(
store: String,
backup_type: String,
backup_id: String,
backup_time: i64,
notes: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
let mut manifest = datastore.load_manifest_json(&backup_dir)?;
manifest["unprotected"]["notes"] = notes.into();
datastore.store_manifest(&backup_dir, manifest)?;
Ok(())
}
#[sortable] #[sortable]
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
( (
@ -1326,6 +1517,12 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
&Router::new() &Router::new()
.get(&API_METHOD_LIST_GROUPS) .get(&API_METHOD_LIST_GROUPS)
), ),
(
"notes",
&Router::new()
.get(&API_METHOD_GET_NOTES)
.put(&API_METHOD_SET_NOTES)
),
( (
"prune", "prune",
&Router::new() &Router::new()

View File

@ -1,15 +1,15 @@
use anyhow::{Error}; use anyhow::{format_err, Error};
use serde_json::Value; use serde_json::Value;
use std::collections::HashMap;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment}; use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::router::SubdirMap; use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable}; use proxmox::{list_subdirs_api_method, sortable};
use crate::api2::types::*; use crate::api2::types::*;
use crate::api2::pull::{get_pull_parameters}; use crate::api2::pull::do_sync_job;
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig}; use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
use crate::server::{self, TaskListInfo, WorkerTask}; use crate::server::UPID;
use crate::config::jobstate::{Job, JobState};
use crate::tools::systemd::time::{ use crate::tools::systemd::time::{
parse_calendar_event, compute_next_event}; parse_calendar_event, compute_next_event};
@ -33,38 +33,32 @@ pub fn list_sync_jobs(
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?; let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
let tasks = server::read_task_list()?;
for info in tasks.iter() {
let worker_id = match &info.upid.worker_id {
Some(id) => id,
_ => { continue; },
};
if let Some(last) = last_tasks.get(worker_id) {
if last.upid.starttime < info.upid.starttime {
last_tasks.insert(worker_id.to_string(), &info);
}
} else {
last_tasks.insert(worker_id.to_string(), &info);
}
}
for job in &mut list { for job in &mut list {
let mut last = 0; let last_state = JobState::load("syncjob", &job.id)
if let Some(task) = last_tasks.get(&job.id) { .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
job.last_run_upid = Some(task.upid_str.clone()); let (upid, endtime, state, starttime) = match last_state {
if let Some((endtime, status)) = &task.state { JobState::Created { time } => (None, None, None, time),
job.last_run_state = Some(String::from(status)); JobState::Started { upid } => {
job.last_run_endtime = Some(*endtime); let parsed_upid: UPID = upid.parse()?;
last = *endtime; (Some(upid), None, None, parsed_upid.starttime)
} },
} JobState::Finished { upid, state } => {
let parsed_upid: UPID = upid.parse()?;
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
},
};
job.last_run_upid = upid;
job.last_run_state = state;
job.last_run_endtime = endtime;
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
job.next_run = (|| -> Option<i64> { job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?; let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?; let event = parse_calendar_event(&schedule).ok()?;
compute_next_event(&event, last, false).ok() // ignore errors
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
})(); })();
} }
@ -83,7 +77,7 @@ pub fn list_sync_jobs(
} }
)] )]
/// Runs the sync jobs manually. /// Runs the sync jobs manually.
async fn run_sync_job( fn run_sync_job(
id: String, id: String,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
@ -92,21 +86,11 @@ async fn run_sync_job(
let (config, _digest) = sync::config()?; let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?; let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let delete = sync_job.remove_vanished.unwrap_or(true); let job = Job::new("syncjob", &id)?;
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move { let upid_str = do_sync_job(job, sync_job, &userid, None)?;
worker.log(format!("sync job '{}' start", &id));
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
worker.log(format!("sync job '{}' end", &id));
Ok(())
})?;
Ok(upid_str) Ok(upid_str)
} }

View File

@ -16,6 +16,7 @@ use crate::backup::*;
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::acl::PRIV_DATASTORE_BACKUP; use crate::config::acl::PRIV_DATASTORE_BACKUP;
use crate::config::cached_user_info::CachedUserInfo; use crate::config::cached_user_info::CachedUserInfo;
use crate::tools::fs::lock_dir_noblock;
mod environment; mod environment;
use environment::*; use environment::*;
@ -37,6 +38,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
("backup-id", false, &BACKUP_ID_SCHEMA), ("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA), ("backup-time", false, &BACKUP_TIME_SCHEMA),
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()), ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
]), ]),
) )
).access( ).access(
@ -55,13 +57,14 @@ fn upgrade_to_backup_protocol(
async move { async move {
let debug = param["debug"].as_bool().unwrap_or(false); let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let store = tools::required_string_param(&param, "store")?.to_owned(); let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?; user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
@ -88,35 +91,76 @@ async move {
let env_type = rpcenv.env_type(); let env_type = rpcenv.env_type();
let backup_group = BackupGroup::new(backup_type, backup_id); let backup_group = BackupGroup::new(backup_type, backup_id);
let owner = datastore.create_backup_group(&backup_group, &username)?;
let worker_type = if backup_type == "host" && backup_id == "benchmark" {
if !benchmark {
bail!("unable to run benchmark without --benchmark flags");
}
"benchmark"
} else {
if benchmark {
bail!("benchmark flags is only allowed on 'host/benchmark'");
}
"backup"
};
// lock backup group to only allow one backup per group at a time
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
// permission check // permission check
if owner != username { // only the owner is allowed to create additional snapshots if owner != userid && worker_type != "benchmark" {
bail!("backup owner check failed ({} != {})", username, owner); // only the owner is allowed to create additional snapshots
bail!("backup owner check failed ({} != {})", userid, owner);
} }
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None); let last_backup = {
let backup_dir = BackupDir::new_with_group(backup_group, backup_time); let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
if let Some(info) = info {
let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
let verify = manifest.unprotected["verify_state"].clone();
match serde_json::from_value::<SnapshotVerifyState>(verify) {
Ok(verify) => {
match verify.state {
VerifyState::Ok => Some(info),
VerifyState::Failed => None,
}
},
Err(_) => {
// no verify state found, treat as valid
Some(info)
}
}
} else {
None
}
};
if let Some(last) = &last_backup { let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() { if backup_dir.backup_time() <= last.backup_dir.backup_time() {
bail!("backup timestamp is older than last backup."); bail!("backup timestamp is older than last backup.");
} }
// fixme: abort if last backup is still running - howto test?
// Idea: write upid into a file inside snapshot dir. then test if
// it is still running here.
}
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?; // lock last snapshot to prevent forgetting/pruning it during backup
let full_path = datastore.snapshot_path(&last.backup_dir);
Some(lock_dir_noblock(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
} else {
None
};
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
if !is_new { bail!("backup directory already exists."); } if !is_new { bail!("backup directory already exists."); }
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
let mut env = BackupEnvironment::new( let mut env = BackupEnvironment::new(
env_type, username.clone(), worker.clone(), datastore, backup_dir); env_type, userid, worker.clone(), datastore, backup_dir);
env.debug = debug; env.debug = debug;
env.last_backup = last_backup; env.last_backup = last_backup;
env.log(format!("starting new backup on datastore '{}': {:?}", store, path)); env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug); let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
@ -136,6 +180,7 @@ async move {
let window_size = 32*1024*1024; // max = (1 << 31) - 2 let window_size = 32*1024*1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size); http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size); http.http2_initial_connection_window_size(window_size);
http.http2_max_frame_size(4*1024*1024);
http.serve_connection(conn, service) http.serve_connection(conn, service)
.map_err(Error::from) .map_err(Error::from)
@ -144,11 +189,20 @@ async move {
.map(|_| Err(format_err!("task aborted"))); .map(|_| Err(format_err!("task aborted")));
async move { async move {
// keep flock until task ends
let _group_guard = _group_guard;
let _snap_guard = _snap_guard;
let _last_guard = _last_guard;
let res = select!{ let res = select!{
req = req_fut => req, req = req_fut => req,
abrt = abort_future => abrt, abrt = abort_future => abrt,
}; };
if benchmark {
env.log("benchmark finished successfully");
env.remove_backup()?;
return Ok(());
}
match (res, env.ensure_finished()) { match (res, env.ensure_finished()) {
(Ok(_), Ok(())) => { (Ok(_), Ok(())) => {
env.log("backup finished successfully"); env.log("backup finished successfully");
@ -322,7 +376,7 @@ fn create_fixed_index(
let last_backup = match &env.last_backup { let last_backup = match &env.last_backup {
Some(info) => info, Some(info) => info,
None => { None => {
bail!("cannot reuse index - no previous backup exists"); bail!("cannot reuse index - no valid previous backup exists");
} }
}; };
@ -637,7 +691,7 @@ fn download_previous(
let last_backup = match &env.last_backup { let last_backup = match &env.last_backup {
Some(info) => info, Some(info) => info,
None => bail!("no previous backup"), None => bail!("no valid previous backup"),
}; };
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir); let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);

View File

@ -1,18 +1,21 @@
use anyhow::{bail, Error}; use anyhow::{bail, format_err, Error};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::collections::HashMap; use std::collections::HashMap;
use ::serde::{Serialize};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox::tools::digest_to_hex; use proxmox::tools::digest_to_hex;
use proxmox::tools::fs::{replace_file, CreateOptions}; use proxmox::tools::fs::{replace_file, CreateOptions};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType}; use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
use crate::server::WorkerTask; use crate::api2::types::{Userid, SnapshotVerifyState, VerifyState};
use crate::backup::*; use crate::backup::*;
use crate::server::WorkerTask;
use crate::server::formatter::*; use crate::server::formatter::*;
use hyper::{Body, Response}; use hyper::{Body, Response};
#[derive(Copy, Clone, Serialize)]
struct UploadStatistic { struct UploadStatistic {
count: u64, count: u64,
size: u64, size: u64,
@ -31,6 +34,19 @@ impl UploadStatistic {
} }
} }
impl std::ops::Add for UploadStatistic {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
count: self.count + other.count,
size: self.size + other.size,
compressed_size: self.compressed_size + other.compressed_size,
duplicates: self.duplicates + other.duplicates,
}
}
}
struct DynamicWriterState { struct DynamicWriterState {
name: String, name: String,
index: DynamicIndexWriter, index: DynamicIndexWriter,
@ -50,13 +66,18 @@ struct FixedWriterState {
incremental: bool, incremental: bool,
} }
// key=digest, value=(length, existance checked)
type KnownChunksMap = HashMap<[u8;32], (u32, bool)>;
struct SharedBackupState { struct SharedBackupState {
finished: bool, finished: bool,
uid_counter: usize, uid_counter: usize,
file_counter: usize, // successfully uploaded files file_counter: usize, // successfully uploaded files
dynamic_writers: HashMap<usize, DynamicWriterState>, dynamic_writers: HashMap<usize, DynamicWriterState>,
fixed_writers: HashMap<usize, FixedWriterState>, fixed_writers: HashMap<usize, FixedWriterState>,
known_chunks: HashMap<[u8;32], u32>, known_chunks: KnownChunksMap,
backup_size: u64, // sums up size of all files
backup_stat: UploadStatistic,
} }
impl SharedBackupState { impl SharedBackupState {
@ -82,7 +103,7 @@ impl SharedBackupState {
pub struct BackupEnvironment { pub struct BackupEnvironment {
env_type: RpcEnvironmentType, env_type: RpcEnvironmentType,
result_attributes: Value, result_attributes: Value,
user: String, user: Userid,
pub debug: bool, pub debug: bool,
pub formatter: &'static OutputFormatter, pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>, pub worker: Arc<WorkerTask>,
@ -95,7 +116,7 @@ pub struct BackupEnvironment {
impl BackupEnvironment { impl BackupEnvironment {
pub fn new( pub fn new(
env_type: RpcEnvironmentType, env_type: RpcEnvironmentType,
user: String, user: Userid,
worker: Arc<WorkerTask>, worker: Arc<WorkerTask>,
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
backup_dir: BackupDir, backup_dir: BackupDir,
@ -108,6 +129,8 @@ impl BackupEnvironment {
dynamic_writers: HashMap::new(), dynamic_writers: HashMap::new(),
fixed_writers: HashMap::new(), fixed_writers: HashMap::new(),
known_chunks: HashMap::new(), known_chunks: HashMap::new(),
backup_size: 0,
backup_stat: UploadStatistic::new(),
}; };
Self { Self {
@ -133,7 +156,7 @@ impl BackupEnvironment {
state.ensure_unfinished()?; state.ensure_unfinished()?;
state.known_chunks.insert(digest, length); state.known_chunks.insert(digest, (length, false));
Ok(()) Ok(())
} }
@ -175,7 +198,7 @@ impl BackupEnvironment {
if is_duplicate { data.upload_stat.duplicates += 1; } if is_duplicate { data.upload_stat.duplicates += 1; }
// register chunk // register chunk
state.known_chunks.insert(digest, size); state.known_chunks.insert(digest, (size, true));
Ok(()) Ok(())
} }
@ -208,7 +231,7 @@ impl BackupEnvironment {
if is_duplicate { data.upload_stat.duplicates += 1; } if is_duplicate { data.upload_stat.duplicates += 1; }
// register chunk // register chunk
state.known_chunks.insert(digest, size); state.known_chunks.insert(digest, (size, true));
Ok(()) Ok(())
} }
@ -217,7 +240,7 @@ impl BackupEnvironment {
let state = self.state.lock().unwrap(); let state = self.state.lock().unwrap();
match state.known_chunks.get(digest) { match state.known_chunks.get(digest) {
Some(len) => Some(*len), Some((len, _)) => Some(*len),
None => None, None => None,
} }
} }
@ -353,7 +376,6 @@ impl BackupEnvironment {
let expected_csum = data.index.close()?; let expected_csum = data.index.close()?;
println!("server checksum {:?} client: {:?}", expected_csum, csum);
if csum != expected_csum { if csum != expected_csum {
bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name); bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name);
} }
@ -361,6 +383,8 @@ impl BackupEnvironment {
self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat); self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat);
state.file_counter += 1; state.file_counter += 1;
state.backup_size += size;
state.backup_stat = state.backup_stat + data.upload_stat;
Ok(()) Ok(())
} }
@ -395,7 +419,6 @@ impl BackupEnvironment {
let uuid = data.index.uuid; let uuid = data.index.uuid;
let expected_csum = data.index.close()?; let expected_csum = data.index.close()?;
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
if csum != expected_csum { if csum != expected_csum {
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name); bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
} }
@ -403,6 +426,8 @@ impl BackupEnvironment {
self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat); self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat);
state.file_counter += 1; state.file_counter += 1;
state.backup_size += size;
state.backup_stat = state.backup_stat + data.upload_stat;
Ok(()) Ok(())
} }
@ -416,9 +441,8 @@ impl BackupEnvironment {
let blob_len = data.len(); let blob_len = data.len();
let orig_len = data.len(); // fixme: let orig_len = data.len(); // fixme:
let blob = DataBlob::from_raw(data)?; // always verify blob/CRC at server side
// always verify CRC at server side let blob = DataBlob::load_from_reader(&mut &data[..])?;
blob.verify_crc()?;
let raw_data = blob.raw_data(); let raw_data = blob.raw_data();
replace_file(&path, raw_data, CreateOptions::new())?; replace_file(&path, raw_data, CreateOptions::new())?;
@ -427,6 +451,49 @@ impl BackupEnvironment {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.file_counter += 1; state.file_counter += 1;
state.backup_size += orig_len as u64;
state.backup_stat.size += blob_len as u64;
Ok(())
}
/// Ensure all chunks referenced in this backup actually exist.
/// Only call *after* all writers have been closed, to avoid race with GC.
/// In case of error, mark the previous backup as 'verify failed'.
fn verify_chunk_existance(&self, known_chunks: &KnownChunksMap) -> Result<(), Error> {
for (digest, (_, checked)) in known_chunks.iter() {
if !checked && !self.datastore.chunk_path(digest).0.exists() {
let mark_msg = if let Some(ref last_backup) = self.last_backup {
let last_dir = &last_backup.backup_dir;
let verify_state = SnapshotVerifyState {
state: VerifyState::Failed,
upid: self.worker.upid().clone(),
};
let res = proxmox::try_block!{
let (mut manifest, _) = self.datastore.load_manifest(last_dir)?;
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
self.datastore.store_manifest(last_dir, serde_json::to_value(manifest)?)
};
if let Err(err) = res {
format!("tried marking previous snapshot as bad, \
but got error accessing manifest: {}", err)
} else {
"marked previous snapshot as bad, please use \
'verify' for a detailed check".to_owned()
}
} else {
"internal error: no base backup registered to mark invalid".to_owned()
};
bail!(
"chunk '{}' was attempted to be reused but doesn't exist - {}",
digest_to_hex(digest),
mark_msg
);
}
}
Ok(()) Ok(())
} }
@ -434,11 +501,11 @@ impl BackupEnvironment {
/// Mark backup as finished /// Mark backup as finished
pub fn finish_backup(&self) -> Result<(), Error> { pub fn finish_backup(&self) -> Result<(), Error> {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
// test if all writer are correctly closed
state.ensure_unfinished()?; state.ensure_unfinished()?;
if state.dynamic_writers.len() != 0 { // test if all writer are correctly closed
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
bail!("found open index writer - unable to finish backup"); bail!("found open index writer - unable to finish backup");
} }
@ -446,6 +513,30 @@ impl BackupEnvironment {
bail!("backup does not contain valid files (file count == 0)"); bail!("backup does not contain valid files (file count == 0)");
} }
// check manifest
let mut manifest = self.datastore.load_manifest_json(&self.backup_dir)
.map_err(|err| format_err!("unable to load manifest blob - {}", err))?;
let stats = serde_json::to_value(state.backup_stat)?;
manifest["unprotected"]["chunk_upload_stats"] = stats;
self.datastore.store_manifest(&self.backup_dir, manifest)
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
if let Some(base) = &self.last_backup {
let path = self.datastore.snapshot_path(&base.backup_dir);
if !path.exists() {
bail!(
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
base.backup_dir
);
}
}
self.verify_chunk_existance(&state.known_chunks)?;
// marks the backup as successful
state.finished = true; state.finished = true;
Ok(()) Ok(())
@ -480,7 +571,7 @@ impl BackupEnvironment {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.finished = true; state.finished = true;
self.datastore.remove_backup_dir(&self.backup_dir)?; self.datastore.remove_backup_dir(&self.backup_dir, true)?;
Ok(()) Ok(())
} }
@ -505,7 +596,7 @@ impl RpcEnvironment for BackupEnvironment {
} }
fn get_user(&self) -> Option<String> { fn get_user(&self) -> Option<String> {
Some(self.user.clone()) Some(self.user.to_string())
} }
} }

View File

@ -243,7 +243,7 @@ pub const API_METHOD_UPLOAD_BLOB: ApiMethod = ApiMethod::new(
&sorted!([ &sorted!([
("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA), ("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
("encoded-size", false, &IntegerSchema::new("Encoded blob size.") ("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1) .minimum(std::mem::size_of::<DataBlobHeader>() as isize)
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize)) .maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
.schema() .schema()
) )

View File

@ -5,9 +5,11 @@ use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment, Permission}; use proxmox::api::{api, Router, RpcEnvironment, Permission};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*; use crate::api2::types::*;
use crate::backup::*; use crate::backup::*;
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA}; use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY}; use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
@ -21,7 +23,7 @@ use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
items: { type: datastore::DataStoreConfig }, items: { type: datastore::DataStoreConfig },
}, },
access: { access: {
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false), permission: &Permission::Anybody,
}, },
)] )]
/// List all datastores /// List all datastores
@ -32,11 +34,18 @@ pub fn list_datastores(
let (config, digest) = datastore::config()?; let (config, digest) = datastore::config()?;
let list = config.convert_to_typed_array("datastore")?; let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into(); rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list) let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
};
Ok(list.into_iter().filter(filter_by_privs).collect())
} }
@ -66,6 +75,10 @@ pub fn list_datastores(
optional: true, optional: true,
schema: PRUNE_SCHEDULE_SCHEMA, schema: PRUNE_SCHEDULE_SCHEMA,
}, },
"verify-schedule": {
optional: true,
schema: VERIFY_SCHEDULE_SCHEMA,
},
"keep-last": { "keep-last": {
optional: true, optional: true,
schema: PRUNE_SCHEMA_KEEP_LAST, schema: PRUNE_SCHEMA_KEEP_LAST,
@ -99,7 +112,7 @@ pub fn list_datastores(
/// Create new datastore config. /// Create new datastore config.
pub fn create_datastore(param: Value) -> Result<(), Error> { pub fn create_datastore(param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?; let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
@ -118,6 +131,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
datastore::save_config(&config)?; datastore::save_config(&config)?;
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
Ok(()) Ok(())
} }
@ -162,6 +177,8 @@ pub enum DeletableProperty {
gc_schedule, gc_schedule,
/// Delete the prune job schedule. /// Delete the prune job schedule.
prune_schedule, prune_schedule,
/// Delete the verify schedule property
verify_schedule,
/// Delete the keep-last property /// Delete the keep-last property
keep_last, keep_last,
/// Delete the keep-hourly property /// Delete the keep-hourly property
@ -195,6 +212,10 @@ pub enum DeletableProperty {
optional: true, optional: true,
schema: PRUNE_SCHEDULE_SCHEMA, schema: PRUNE_SCHEDULE_SCHEMA,
}, },
"verify-schedule": {
optional: true,
schema: VERIFY_SCHEDULE_SCHEMA,
},
"keep-last": { "keep-last": {
optional: true, optional: true,
schema: PRUNE_SCHEMA_KEEP_LAST, schema: PRUNE_SCHEMA_KEEP_LAST,
@ -243,6 +264,7 @@ pub fn update_datastore(
comment: Option<String>, comment: Option<String>,
gc_schedule: Option<String>, gc_schedule: Option<String>,
prune_schedule: Option<String>, prune_schedule: Option<String>,
verify_schedule: Option<String>,
keep_last: Option<u64>, keep_last: Option<u64>,
keep_hourly: Option<u64>, keep_hourly: Option<u64>,
keep_daily: Option<u64>, keep_daily: Option<u64>,
@ -253,7 +275,7 @@ pub fn update_datastore(
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
// pass/compare digest // pass/compare digest
let (mut config, expected_digest) = datastore::config()?; let (mut config, expected_digest) = datastore::config()?;
@ -271,6 +293,7 @@ pub fn update_datastore(
DeletableProperty::comment => { data.comment = None; }, DeletableProperty::comment => { data.comment = None; },
DeletableProperty::gc_schedule => { data.gc_schedule = None; }, DeletableProperty::gc_schedule => { data.gc_schedule = None; },
DeletableProperty::prune_schedule => { data.prune_schedule = None; }, DeletableProperty::prune_schedule => { data.prune_schedule = None; },
DeletableProperty::verify_schedule => { data.verify_schedule = None; },
DeletableProperty::keep_last => { data.keep_last = None; }, DeletableProperty::keep_last => { data.keep_last = None; },
DeletableProperty::keep_hourly => { data.keep_hourly = None; }, DeletableProperty::keep_hourly => { data.keep_hourly = None; },
DeletableProperty::keep_daily => { data.keep_daily = None; }, DeletableProperty::keep_daily => { data.keep_daily = None; },
@ -291,7 +314,12 @@ pub fn update_datastore(
} }
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; } if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
if prune_schedule.is_some() { data.prune_schedule = prune_schedule; } let mut prune_schedule_changed = false;
if prune_schedule.is_some() {
prune_schedule_changed = true;
data.prune_schedule = prune_schedule;
}
if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
if keep_last.is_some() { data.keep_last = keep_last; } if keep_last.is_some() { data.keep_last = keep_last; }
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; } if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
@ -304,6 +332,12 @@ pub fn update_datastore(
datastore::save_config(&config)?; datastore::save_config(&config)?;
// we want to reset the statefile, to avoid an immediate sync in some cases
// (e.g. going from monthly to weekly in the second week of the month)
if prune_schedule_changed {
crate::config::jobstate::create_state_file("prune", &name)?;
}
Ok(()) Ok(())
} }
@ -327,7 +361,7 @@ pub fn update_datastore(
/// Remove a datastore configuration. /// Remove a datastore configuration.
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> { pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = datastore::config()?; let (mut config, expected_digest) = datastore::config()?;
@ -343,6 +377,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
datastore::save_config(&config)?; datastore::save_config(&config)?;
crate::config::jobstate::remove_state_file("prune", &name)?;
Ok(()) Ok(())
} }

View File

@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
use base64; use base64;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission}; use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::remote; use crate::config::remote;
@ -60,7 +61,7 @@ pub fn list_remotes(
schema: DNS_NAME_OR_IP_SCHEMA, schema: DNS_NAME_OR_IP_SCHEMA,
}, },
userid: { userid: {
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
password: { password: {
schema: remote::REMOTE_PASSWORD_SCHEMA, schema: remote::REMOTE_PASSWORD_SCHEMA,
@ -78,7 +79,7 @@ pub fn list_remotes(
/// Create new remote. /// Create new remote.
pub fn create_remote(password: String, param: Value) -> Result<(), Error> { pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let mut data = param.clone(); let mut data = param.clone();
data["password"] = Value::from(base64::encode(password.as_bytes())); data["password"] = Value::from(base64::encode(password.as_bytes()));
@ -154,7 +155,7 @@ pub enum DeletableProperty {
}, },
userid: { userid: {
optional: true, optional: true,
schema: PROXMOX_USER_ID_SCHEMA, type: Userid,
}, },
password: { password: {
optional: true, optional: true,
@ -187,14 +188,14 @@ pub fn update_remote(
name: String, name: String,
comment: Option<String>, comment: Option<String>,
host: Option<String>, host: Option<String>,
userid: Option<String>, userid: Option<Userid>,
password: Option<String>, password: Option<String>,
fingerprint: Option<String>, fingerprint: Option<String>,
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = remote::config()?; let (mut config, expected_digest) = remote::config()?;
@ -255,7 +256,7 @@ pub fn update_remote(
/// Remove a remote from the configuration file. /// Remove a remote from the configuration file.
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> { pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = remote::config()?; let (mut config, expected_digest) = remote::config()?;

View File

@ -3,6 +3,7 @@ use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment}; use proxmox::api::{api, Router, RpcEnvironment};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::sync::{self, SyncJobConfig}; use crate::config::sync::{self, SyncJobConfig};
@ -68,7 +69,7 @@ pub fn list_sync_jobs(
/// Create a new sync job. /// Create a new sync job.
pub fn create_sync_job(param: Value) -> Result<(), Error> { pub fn create_sync_job(param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?; let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
@ -82,6 +83,8 @@ pub fn create_sync_job(param: Value) -> Result<(), Error> {
sync::save_config(&config)?; sync::save_config(&config)?;
crate::config::jobstate::create_state_file("syncjob", &sync_job.id)?;
Ok(()) Ok(())
} }
@ -184,7 +187,7 @@ pub fn update_sync_job(
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
// pass/compare digest // pass/compare digest
let (mut config, expected_digest) = sync::config()?; let (mut config, expected_digest) = sync::config()?;
@ -247,7 +250,7 @@ pub fn update_sync_job(
/// Remove a sync job configuration /// Remove a sync job configuration
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> { pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = sync::config()?; let (mut config, expected_digest) = sync::config()?;
@ -263,6 +266,8 @@ pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error>
sync::save_config(&config)?; sync::save_config(&config)?;
crate::config::jobstate::remove_state_file("syncjob", &id)?;
Ok(()) Ok(())
} }

View File

@ -1,18 +1,19 @@
use std::path::PathBuf; use std::path::PathBuf;
use anyhow::Error; use anyhow::Error;
use futures::*; use futures::stream::TryStreamExt;
use hyper::{Body, Response, StatusCode, header}; use hyper::{Body, Response, StatusCode, header};
use proxmox::http_err;
use proxmox::http_bail;
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> { pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
let file = tokio::fs::File::open(path.clone()) let file = match tokio::fs::File::open(path.clone()).await {
.map_err(move |err| { Ok(file) => file,
match err.kind() { Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {
std::io::ErrorKind::NotFound => http_err!(NOT_FOUND, format!("open file {:?} failed - not found", path.clone())), http_bail!(NOT_FOUND, "open file {:?} failed - not found", path);
_ => http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)), }
} Err(err) => http_bail!(BAD_REQUEST, "open file {:?} failed: {}", path, err),
}) };
.await?;
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())); .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));

View File

@ -2,10 +2,7 @@ use std::net::TcpListener;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::{ use futures::future::{FutureExt, TryFutureExt};
future::{FutureExt, TryFutureExt},
try_join,
};
use hyper::body::Body; use hyper::body::Body;
use hyper::http::request::Parts; use hyper::http::request::Parts;
use hyper::upgrade::Upgraded; use hyper::upgrade::Upgraded;
@ -25,18 +22,21 @@ use crate::api2::types::*;
use crate::config::acl::PRIV_SYS_CONSOLE; use crate::config::acl::PRIV_SYS_CONSOLE;
use crate::server::WorkerTask; use crate::server::WorkerTask;
use crate::tools; use crate::tools;
use crate::tools::ticket::{self, Empty, Ticket};
pub mod disks; pub mod disks;
pub mod dns; pub mod dns;
mod journal;
pub mod network; pub mod network;
pub mod tasks;
pub(crate) mod rrd; pub(crate) mod rrd;
mod apt;
mod journal;
mod services; mod services;
mod status; mod status;
mod subscription; mod subscription;
mod apt;
mod syslog; mod syslog;
pub mod tasks;
mod time; mod time;
pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.") pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.")
@ -91,12 +91,12 @@ async fn termproxy(
cmd: Option<String>, cmd: Option<String>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let userid = rpcenv let userid: Userid = rpcenv
.get_user() .get_user()
.ok_or_else(|| format_err!("unknown user"))?; .ok_or_else(|| format_err!("unknown user"))?
let (username, realm) = crate::auth::parse_userid(&userid)?; .parse()?;
if realm != "pam" { if userid.realm() != "pam" {
bail!("only pam users can use the console"); bail!("only pam users can use the console");
} }
@ -106,12 +106,11 @@ async fn termproxy(
let listener = TcpListener::bind("localhost:0")?; let listener = TcpListener::bind("localhost:0")?;
let port = listener.local_addr()?.port(); let port = listener.local_addr()?.port();
let ticket = tools::ticket::assemble_term_ticket( let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?
crate::auth_helpers::private_auth_key(), .sign(
&userid, crate::auth_helpers::private_auth_key(),
&path, Some(&ticket::term_aad(&userid, &path, port)),
port, )?;
)?;
let mut command = Vec::new(); let mut command = Vec::new();
match cmd.as_ref().map(|x| x.as_str()) { match cmd.as_ref().map(|x| x.as_str()) {
@ -134,10 +133,11 @@ async fn termproxy(
_ => bail!("invalid command"), _ => bail!("invalid command"),
}; };
let username = userid.name().to_owned();
let upid = WorkerTask::spawn( let upid = WorkerTask::spawn(
"termproxy", "termproxy",
None, None,
&username, userid,
false, false,
move |worker| async move { move |worker| async move {
// move inside the worker so that it survives and does not close the port // move inside the worker so that it survives and does not close the port
@ -169,9 +169,9 @@ async fn termproxy(
let mut cmd = tokio::process::Command::new("/usr/bin/termproxy"); let mut cmd = tokio::process::Command::new("/usr/bin/termproxy");
cmd.args(&arguments); cmd.args(&arguments)
cmd.stdout(std::process::Stdio::piped()); .stdout(std::process::Stdio::piped())
cmd.stderr(std::process::Stdio::piped()); .stderr(std::process::Stdio::piped());
let mut child = cmd.spawn().expect("error executing termproxy"); let mut child = cmd.spawn().expect("error executing termproxy");
@ -184,7 +184,7 @@ async fn termproxy(
while let Some(line) = reader.next_line().await? { while let Some(line) = reader.next_line().await? {
worker_stdout.log(line); worker_stdout.log(line);
} }
Ok(()) Ok::<(), Error>(())
}; };
let worker_stderr = worker.clone(); let worker_stderr = worker.clone();
@ -193,21 +193,48 @@ async fn termproxy(
while let Some(line) = reader.next_line().await? { while let Some(line) = reader.next_line().await? {
worker_stderr.warn(line); worker_stderr.warn(line);
} }
Ok(()) Ok::<(), Error>(())
}; };
let (exit_code, _, _) = try_join!(child, stdout_fut, stderr_fut)?; let mut needs_kill = false;
if !exit_code.success() { let res = tokio::select!{
match exit_code.code() { res = &mut child => {
Some(code) => bail!("termproxy exited with {}", code), let exit_code = res?;
None => bail!("termproxy exited by signal"), if !exit_code.success() {
match exit_code.code() {
Some(code) => bail!("termproxy exited with {}", code),
None => bail!("termproxy exited by signal"),
}
}
Ok(())
},
res = stdout_fut => res,
res = stderr_fut => res,
res = worker.abort_future() => {
needs_kill = true;
res.map_err(Error::from)
}
};
if needs_kill {
if res.is_ok() {
child.kill()?;
child.await?;
return Ok(());
}
if let Err(err) = child.kill() {
worker.warn(format!("error killing termproxy: {}", err));
} else if let Err(err) = child.await {
worker.warn(format!("error awaiting termproxy: {}", err));
} }
} }
Ok(()) res
}, },
)?; )?;
// FIXME: We're returning the user NAME only?
Ok(json!({ Ok(json!({
"user": username, "user": username,
"ticket": ticket, "ticket": ticket,
@ -245,22 +272,21 @@ fn upgrade_to_websocket(
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let ticket = tools::required_string_param(&param, "vncticket")?.to_owned(); let ticket = tools::required_string_param(&param, "vncticket")?;
let port: u16 = tools::required_integer_param(&param, "port")? as u16; let port: u16 = tools::required_integer_param(&param, "port")? as u16;
// will be checked again by termproxy // will be checked again by termproxy
tools::ticket::verify_term_ticket( Ticket::<Empty>::parse(ticket)?
crate::auth_helpers::public_auth_key(), .verify(
&username, crate::auth_helpers::public_auth_key(),
&"/system", ticket::TERM_PREFIX,
port, Some(&ticket::term_aad(&userid, "/system", port)),
&ticket, )?;
)?;
let (ws, response) = WebSocket::new(parts.headers)?; let (ws, response) = WebSocket::new(parts.headers)?;
tokio::spawn(async move { crate::server::spawn_internal_task(async move {
let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await { let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await {
Ok(upgraded) => upgraded, Ok(upgraded) => upgraded,
_ => bail!("error"), _ => bail!("error"),

View File

@ -9,7 +9,7 @@ use proxmox::api::router::{Router, SubdirMap};
use crate::server::WorkerTask; use crate::server::WorkerTask;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA}; use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
const_regex! { const_regex! {
VERSION_EPOCH_REGEX = r"^\d+:"; VERSION_EPOCH_REGEX = r"^\d+:";
@ -233,11 +233,11 @@ pub fn apt_update_database(
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET); let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
let upid_str = WorkerTask::new_thread("aptupdate", None, &username.clone(), to_stdout, move |worker| { let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
if !quiet { worker.log("starting apt-get update") } if !quiet { worker.log("starting apt-get update") }
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE // TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE

View File

@ -13,7 +13,7 @@ use crate::tools::disks::{
}; };
use crate::server::WorkerTask; use crate::server::WorkerTask;
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA}; use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
pub mod directory; pub mod directory;
pub mod zfs; pub mod zfs;
@ -140,7 +140,7 @@ pub fn initialize_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?; let info = get_disk_usage_info(&disk, true)?;
@ -149,7 +149,7 @@ pub fn initialize_disk(
} }
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker| "diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
{ {
worker.log(format!("initialize disk {}", disk)); worker.log(format!("initialize disk {}", disk));

View File

@ -16,6 +16,7 @@ use crate::tools::systemd::{self, types::*};
use crate::server::WorkerTask; use crate::server::WorkerTask;
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::datastore::DataStoreConfig;
#[api( #[api(
properties: { properties: {
@ -133,7 +134,7 @@ pub fn create_datastore_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?; let info = get_disk_usage_info(&disk, true)?;
@ -142,7 +143,7 @@ pub fn create_datastore_disk(
} }
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker| "dircreate", Some(name.clone()), userid, to_stdout, move |worker|
{ {
worker.log(format!("create datastore '{}' on disk {}", name, disk)); worker.log(format!("create datastore '{}' on disk {}", name, disk));
@ -175,9 +176,69 @@ pub fn create_datastore_disk(
Ok(upid_str) Ok(upid_str)
} }
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
name: {
schema: DATASTORE_SCHEMA,
},
}
},
access: {
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
},
)]
/// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
let path = format!("/mnt/datastore/{}", name);
// path of datastore cannot be changed
let (config, _) = crate::config::datastore::config()?;
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
.filter(|ds| ds.path == path)
.next();
if let Some(conflicting_datastore) = conflicting_datastore {
bail!("Can't remove '{}' since it's required by datastore '{}'",
conflicting_datastore.path, conflicting_datastore.name);
}
// disable systemd mount-unit
let mut mount_unit_name = systemd::escape_unit(&path, true);
mount_unit_name.push_str(".mount");
systemd::disable_unit(&mount_unit_name)?;
// delete .mount-file
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
let full_path = std::path::Path::new(&mount_unit_path);
log::info!("removing systemd mount unit {:?}", full_path);
std::fs::remove_file(&full_path)?;
// try to unmount, if that fails tell the user to reboot or unmount manually
let mut command = std::process::Command::new("umount");
command.arg(&path);
match crate::tools::run_command(command, None) {
Err(_) => bail!(
"Could not umount '{}' since it is busy. It will stay mounted \
until the next reboot or until unmounted manually!",
path
),
Ok(_) => Ok(())
}
}
const ITEM_ROUTER: Router = Router::new()
.delete(&API_METHOD_DELETE_DATASTORE_DISK);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DATASTORE_MOUNTS) .get(&API_METHOD_LIST_DATASTORE_MOUNTS)
.post(&API_METHOD_CREATE_DATASTORE_DISK); .post(&API_METHOD_CREATE_DATASTORE_DISK)
.match_all("name", &ITEM_ROUTER);
fn create_datastore_mount_unit( fn create_datastore_mount_unit(

View File

@ -254,7 +254,7 @@ pub fn create_zpool(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let add_datastore = add_datastore.unwrap_or(false); let add_datastore = add_datastore.unwrap_or(false);
@ -314,7 +314,7 @@ pub fn create_zpool(
} }
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker| "zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
{ {
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text)); worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));

View File

@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission}; use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::schema::parse_property_string; use proxmox::api::schema::parse_property_string;
use proxmox::tools::fs::open_file_locked;
use crate::config::network::{self, NetworkConfig}; use crate::config::network::{self, NetworkConfig};
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
@ -197,6 +198,14 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
type: LinuxBondMode, type: LinuxBondMode,
optional: true, optional: true,
}, },
"bond-primary": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_xmit_hash_policy: {
type: BondXmitHashPolicy,
optional: true,
},
slaves: { slaves: {
schema: NETWORK_INTERFACE_LIST_SCHEMA, schema: NETWORK_INTERFACE_LIST_SCHEMA,
optional: true, optional: true,
@ -223,6 +232,8 @@ pub fn create_interface(
bridge_ports: Option<String>, bridge_ports: Option<String>,
bridge_vlan_aware: Option<bool>, bridge_vlan_aware: Option<bool>,
bond_mode: Option<LinuxBondMode>, bond_mode: Option<LinuxBondMode>,
bond_primary: Option<String>,
bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
slaves: Option<String>, slaves: Option<String>,
param: Value, param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -230,7 +241,7 @@ pub fn create_interface(
let interface_type = crate::tools::required_string_param(&param, "type")?; let interface_type = crate::tools::required_string_param(&param, "type")?;
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?; let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, _digest) = network::config()?; let (mut config, _digest) = network::config()?;
@ -283,7 +294,23 @@ pub fn create_interface(
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; } if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
} }
NetworkInterfaceType::Bond => { NetworkInterfaceType::Bond => {
if bond_mode.is_some() { interface.bond_mode = bond_mode; } if let Some(mode) = bond_mode {
interface.bond_mode = bond_mode;
if bond_primary.is_some() {
if mode != LinuxBondMode::active_backup {
bail!("bond-primary is only valid with Active/Backup mode");
}
interface.bond_primary = bond_primary;
}
if bond_xmit_hash_policy.is_some() {
if mode != LinuxBondMode::ieee802_3ad &&
mode != LinuxBondMode::balance_xor
{
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
}
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
}
}
if let Some(slaves) = slaves { if let Some(slaves) = slaves {
let slaves = split_interface_list(&slaves)?; let slaves = split_interface_list(&slaves)?;
interface.set_bond_slaves(slaves)?; interface.set_bond_slaves(slaves)?;
@ -342,6 +369,11 @@ pub enum DeletableProperty {
bridge_vlan_aware, bridge_vlan_aware,
/// Delete bond-slaves (set to 'none') /// Delete bond-slaves (set to 'none')
slaves, slaves,
/// Delete bond-primary
#[serde(rename = "bond-primary")]
bond_primary,
/// Delete bond transmit hash policy
bond_xmit_hash_policy,
} }
@ -419,6 +451,14 @@ pub enum DeletableProperty {
type: LinuxBondMode, type: LinuxBondMode,
optional: true, optional: true,
}, },
"bond-primary": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_xmit_hash_policy: {
type: BondXmitHashPolicy,
optional: true,
},
slaves: { slaves: {
schema: NETWORK_INTERFACE_LIST_SCHEMA, schema: NETWORK_INTERFACE_LIST_SCHEMA,
optional: true, optional: true,
@ -457,13 +497,15 @@ pub fn update_interface(
bridge_ports: Option<String>, bridge_ports: Option<String>,
bridge_vlan_aware: Option<bool>, bridge_vlan_aware: Option<bool>,
bond_mode: Option<LinuxBondMode>, bond_mode: Option<LinuxBondMode>,
bond_primary: Option<String>,
bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
slaves: Option<String>, slaves: Option<String>,
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
digest: Option<String>, digest: Option<String>,
param: Value, param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = network::config()?; let (mut config, expected_digest) = network::config()?;
@ -500,6 +542,8 @@ pub fn update_interface(
DeletableProperty::bridge_ports => { interface.set_bridge_ports(Vec::new())?; } DeletableProperty::bridge_ports => { interface.set_bridge_ports(Vec::new())?; }
DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; } DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
DeletableProperty::slaves => { interface.set_bond_slaves(Vec::new())?; } DeletableProperty::slaves => { interface.set_bond_slaves(Vec::new())?; }
DeletableProperty::bond_primary => { interface.bond_primary = None; }
DeletableProperty::bond_xmit_hash_policy => { interface.bond_xmit_hash_policy = None }
} }
} }
} }
@ -517,7 +561,23 @@ pub fn update_interface(
let slaves = split_interface_list(&slaves)?; let slaves = split_interface_list(&slaves)?;
interface.set_bond_slaves(slaves)?; interface.set_bond_slaves(slaves)?;
} }
if bond_mode.is_some() { interface.bond_mode = bond_mode; } if let Some(mode) = bond_mode {
interface.bond_mode = bond_mode;
if bond_primary.is_some() {
if mode != LinuxBondMode::active_backup {
bail!("bond-primary is only valid with Active/Backup mode");
}
interface.bond_primary = bond_primary;
}
if bond_xmit_hash_policy.is_some() {
if mode != LinuxBondMode::ieee802_3ad &&
mode != LinuxBondMode::balance_xor
{
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
}
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
}
}
if let Some(cidr) = cidr { if let Some(cidr) = cidr {
let (_, _, is_v6) = network::parse_cidr(&cidr)?; let (_, _, is_v6) = network::parse_cidr(&cidr)?;
@ -586,7 +646,7 @@ pub fn update_interface(
/// Remove network interface configuration. /// Remove network interface configuration.
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> { pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?; let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = network::config()?; let (mut config, expected_digest) = network::config()?;
@ -624,9 +684,9 @@ pub async fn reload_network_config(
network::assert_ifupdown2_installed()?; network::assert_ifupdown2_installed()?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), &username.clone(), true, |_worker| async { let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME); let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);

View File

@ -1,10 +1,10 @@
use anyhow::Error; use anyhow::Error;
use serde_json::{Value, json}; use serde_json::{Value, json};
use proxmox::api::{api, Router}; use proxmox::api::{api, Permission, Router};
use crate::api2::types::*; use crate::api2::types::*;
use crate::tools::epoch_now_f64; use crate::config::acl::PRIV_SYS_AUDIT;
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES}; use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
pub fn create_value_from_rrd( pub fn create_value_from_rrd(
@ -15,7 +15,7 @@ pub fn create_value_from_rrd(
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let mut result = Vec::new(); let mut result = Vec::new();
let now = epoch_now_f64()?; let now = proxmox::tools::time::epoch_f64();
for name in list { for name in list {
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) { let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
@ -57,6 +57,9 @@ pub fn create_value_from_rrd(
}, },
}, },
}, },
access: {
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
},
)] )]
/// Read node stats /// Read node stats
fn get_node_stats( fn get_node_stats(

View File

@ -4,12 +4,13 @@ use anyhow::{bail, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox::{sortable, identity, list_subdirs_api_method}; use proxmox::{sortable, identity, list_subdirs_api_method};
use proxmox::api::{api, Router, Permission}; use proxmox::api::{api, Router, Permission, RpcEnvironment};
use proxmox::api::router::SubdirMap; use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*; use proxmox::api::schema::*;
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::server::WorkerTask;
static SERVICE_NAME_LIST: [&str; 7] = [ static SERVICE_NAME_LIST: [&str; 7] = [
"proxmox-backup", "proxmox-backup",
@ -181,30 +182,43 @@ fn get_service_state(
Ok(json_service_state(&service, status)) Ok(json_service_state(&service, status))
} }
fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> { fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
// fixme: run background worker (fork_worker) ??? let workerid = format!("srv{}", &cmd);
match cmd { let cmd = match cmd {
"start"|"stop"|"restart"|"reload" => {}, "start"|"stop"|"restart"=> cmd.to_string(),
"reload" => "try-reload-or-restart".to_string(), // some services do not implement reload
_ => bail!("unknown service command '{}'", cmd), _ => bail!("unknown service command '{}'", cmd),
} };
let service = service.to_string();
if service == "proxmox-backup" && cmd != "restart" { let upid = WorkerTask::new_thread(
bail!("invalid service cmd '{} {}'", service, cmd); &workerid,
} Some(service.clone()),
userid,
false,
move |_worker| {
let real_service_name = real_service_name(service); if service == "proxmox-backup" && cmd == "stop" {
bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd);
}
let status = Command::new("systemctl") let real_service_name = real_service_name(&service);
.args(&[cmd, real_service_name])
.status()?;
if !status.success() { let status = Command::new("systemctl")
bail!("systemctl {} failed with {}", cmd, status); .args(&[&cmd, real_service_name])
} .status()?;
Ok(Value::Null) if !status.success() {
bail!("systemctl {} failed with {}", cmd, status);
}
Ok(())
}
)?;
Ok(upid.into())
} }
#[api( #[api(
@ -227,11 +241,14 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
fn start_service( fn start_service(
service: String, service: String,
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
log::info!("starting service {}", service); log::info!("starting service {}", service);
run_service_command(&service, "start") run_service_command(&service, "start", userid)
} }
#[api( #[api(
@ -254,11 +271,14 @@ fn start_service(
fn stop_service( fn stop_service(
service: String, service: String,
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
log::info!("stopping service {}", service); log::info!("stopping service {}", service);
run_service_command(&service, "stop") run_service_command(&service, "stop", userid)
} }
#[api( #[api(
@ -281,15 +301,18 @@ fn stop_service(
fn restart_service( fn restart_service(
service: String, service: String,
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
log::info!("re-starting service {}", service); log::info!("re-starting service {}", service);
if &service == "proxmox-backup-proxy" { if &service == "proxmox-backup-proxy" {
// special case, avoid aborting running tasks // special case, avoid aborting running tasks
run_service_command(&service, "reload") run_service_command(&service, "reload", userid)
} else { } else {
run_service_command(&service, "restart") run_service_command(&service, "restart", userid)
} }
} }
@ -313,11 +336,14 @@ fn restart_service(
fn reload_service( fn reload_service(
service: String, service: String,
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
log::info!("reloading service {}", service); log::info!("reloading service {}", service);
run_service_command(&service, "reload") run_service_command(&service, "reload", userid)
} }

View File

@ -1,11 +1,12 @@
use anyhow::{Error}; use anyhow::{Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox::api::{api, Router, Permission}; use proxmox::api::{api, Router, RpcEnvironment, Permission};
use crate::tools; use crate::tools;
use crate::config::acl::PRIV_SYS_AUDIT; use crate::config::acl::PRIV_SYS_AUDIT;
use crate::api2::types::NODE_SCHEMA; use crate::config::cached_user_info::CachedUserInfo;
use crate::api2::types::{NODE_SCHEMA, Userid};
#[api( #[api(
input: { input: {
@ -28,7 +29,7 @@ use crate::api2::types::NODE_SCHEMA;
}, },
serverid: { serverid: {
type: String, type: String,
description: "The unique server ID.", description: "The unique server ID, if permitted to access.",
}, },
url: { url: {
type: String, type: String,
@ -37,18 +38,29 @@ use crate::api2::types::NODE_SCHEMA;
}, },
}, },
access: { access: {
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false), permission: &Permission::Anybody,
}, },
)] )]
/// Read subscription info. /// Read subscription info.
fn get_subscription(_param: Value) -> Result<Value, Error> { fn get_subscription(
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &[]);
let server_id = if (user_privs & PRIV_SYS_AUDIT) != 0 {
tools::get_hardware_address()?
} else {
"hidden".to_string()
};
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing"; let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
Ok(json!({ Ok(json!({
"status": "NotFound", "status": "NotFound",
"message": "There is no subscription key", "message": "There is no subscription key",
"serverid": tools::get_hardware_address()?, "serverid": server_id,
"url": url, "url": url,
})) }))
} }

View File

@ -4,13 +4,13 @@ use std::io::{BufRead, BufReader};
use anyhow::{Error}; use anyhow::{Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox::api::{api, Router, RpcEnvironment, Permission, UserInformation}; use proxmox::api::{api, Router, RpcEnvironment, Permission};
use proxmox::api::router::SubdirMap; use proxmox::api::router::SubdirMap;
use proxmox::{identity, list_subdirs_api_method, sortable}; use proxmox::{identity, list_subdirs_api_method, sortable};
use crate::tools; use crate::tools;
use crate::api2::types::*; use crate::api2::types::*;
use crate::server::{self, UPID}; use crate::server::{self, UPID, TaskState};
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo; use crate::config::cached_user_info::CachedUserInfo;
@ -84,11 +84,11 @@ async fn get_task_status(
let upid = extract_upid(&param)?; let upid = extract_upid(&param)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
if username != upid.username { if userid != upid.userid {
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?; user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
} }
let mut result = json!({ let mut result = json!({
@ -99,15 +99,15 @@ async fn get_task_status(
"starttime": upid.starttime, "starttime": upid.starttime,
"type": upid.worker_type, "type": upid.worker_type,
"id": upid.worker_id, "id": upid.worker_id,
"user": upid.username, "user": upid.userid,
}); });
if crate::server::worker_is_active(&upid).await? { if crate::server::worker_is_active(&upid).await? {
result["status"] = Value::from("running"); result["status"] = Value::from("running");
} else { } else {
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(String::from("unknown")); let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(TaskState::Unknown { endtime: 0 });
result["status"] = Value::from("stopped"); result["status"] = Value::from("stopped");
result["exitstatus"] = Value::from(exitstatus); result["exitstatus"] = Value::from(exitstatus.to_string());
}; };
Ok(result) Ok(result)
@ -161,11 +161,11 @@ async fn read_task_log(
let upid = extract_upid(&param)?; let upid = extract_upid(&param)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
if username != upid.username { if userid != upid.userid {
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?; user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
} }
let test_status = param["test-status"].as_bool().unwrap_or(false); let test_status = param["test-status"].as_bool().unwrap_or(false);
@ -234,11 +234,11 @@ fn stop_task(
let upid = extract_upid(&param)?; let upid = extract_upid(&param)?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
if username != upid.username { if userid != upid.userid {
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_MODIFY, false)?; user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
} }
server::abort_worker_async(upid); server::abort_worker_async(upid);
@ -281,7 +281,7 @@ fn stop_task(
default: false, default: false,
}, },
userfilter: { userfilter: {
optional:true, optional: true,
type: String, type: String,
description: "Only list tasks from this user.", description: "Only list tasks from this user.",
}, },
@ -307,9 +307,9 @@ pub fn list_tasks(
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> { ) -> Result<Vec<TaskListItem>, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]); let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0; let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
@ -324,11 +324,11 @@ pub fn list_tasks(
let mut count = 0; let mut count = 0;
for info in list { for info in list {
if !list_all && info.upid.username != username { continue; } if !list_all && info.upid.userid != userid { continue; }
if let Some(username) = userfilter { if let Some(userid) = userfilter {
if !info.upid.username.contains(username) { continue; } if !info.upid.userid.as_str().contains(userid) { continue; }
} }
if let Some(store) = store { if let Some(store) = store {
@ -352,8 +352,9 @@ pub fn list_tasks(
if let Some(ref state) = info.state { if let Some(ref state) = info.state {
if running { continue; } if running { continue; }
if errors && state.1 == "OK" { match state {
continue; crate::server::TaskState::OK { .. } if errors => continue,
_ => {},
} }
} }

View File

@ -1,4 +1,3 @@
use chrono::prelude::*;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
@ -57,10 +56,11 @@ fn read_etc_localtime() -> Result<String, Error> {
)] )]
/// Read server time and time zone settings. /// Read server time and time zone settings.
fn get_time(_param: Value) -> Result<Value, Error> { fn get_time(_param: Value) -> Result<Value, Error> {
let datetime = Local::now(); let time = proxmox::tools::time::epoch_i64();
let offset = datetime.offset(); let tm = proxmox::tools::time::localtime(time)?;
let time = datetime.timestamp(); let offset = tm.tm_gmtoff;
let localtime = time + (offset.fix().local_minus_utc() as i64);
let localtime = time + offset;
Ok(json!({ Ok(json!({
"timezone": read_etc_localtime()?, "timezone": read_etc_localtime()?,

View File

@ -2,6 +2,7 @@
use std::sync::{Arc}; use std::sync::{Arc};
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use futures::{select, future::FutureExt};
use proxmox::api::api; use proxmox::api::api;
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission}; use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
@ -12,13 +13,15 @@ use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_
use crate::api2::types::*; use crate::api2::types::*;
use crate::config::{ use crate::config::{
remote, remote,
sync::SyncJobConfig,
jobstate::Job,
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ}, acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
cached_user_info::CachedUserInfo, cached_user_info::CachedUserInfo,
}; };
pub fn check_pull_privs( pub fn check_pull_privs(
username: &str, userid: &Userid,
store: &str, store: &str,
remote: &str, remote: &str,
remote_store: &str, remote_store: &str,
@ -27,11 +30,11 @@ pub fn check_pull_privs(
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?; user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?; user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
if delete { if delete {
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?; user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
} }
Ok(()) Ok(())
@ -62,6 +65,68 @@ pub async fn get_pull_parameters(
Ok((client, src_repo, tgt_store)) Ok((client, src_repo, tgt_store))
} }
pub fn do_sync_job(
mut job: Job,
sync_job: SyncJobConfig,
userid: &Userid,
schedule: Option<String>,
) -> Result<String, Error> {
let job_id = job.jobname().to_string();
let worker_type = job.jobtype().to_string();
let upid_str = WorkerTask::spawn(
&worker_type,
Some(job.jobname().to_string()),
userid.clone(),
false,
move |worker| async move {
job.start(&worker.upid().to_string())?;
let worker2 = worker.clone();
let worker_future = async move {
let delete = sync_job.remove_vanished.unwrap_or(true);
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
worker.log(format!("Starting datastore sync job '{}'", job_id));
if let Some(event_str) = schedule {
worker.log(format!("task triggered by schedule '{}'", event_str));
}
worker.log(format!("Sync datastore '{}' from '{}/{}'",
sync_job.store, sync_job.remote, sync_job.remote_store));
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
worker.log(format!("sync job '{}' end", &job_id));
Ok(())
};
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
let res = select!{
worker = worker_future.fuse() => worker,
abort = abort_future => abort,
};
let status = worker2.create_state(&res);
match job.finish(status) {
Ok(_) => {},
Err(err) => {
eprintln!("could not finish job state: {}", err);
}
}
res
})?;
Ok(upid_str)
}
#[api( #[api(
input: { input: {
properties: { properties: {
@ -99,19 +164,25 @@ async fn pull (
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(true); let delete = remove_vanished.unwrap_or(true);
check_pull_privs(&username, &store, &remote, &remote_store, delete)?; check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?; let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
// fixme: set to_stdout to false? // fixme: set to_stdout to false?
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move { let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
worker.log(format!("sync datastore '{}' start", store)); worker.log(format!("sync datastore '{}' start", store));
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, username).await?; let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
let future = select!{
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
};
let _ = future?;
worker.log(format!("sync datastore '{}' end", store)); worker.log(format!("sync datastore '{}' end", store));

View File

@ -1,4 +1,3 @@
//use chrono::{Local, TimeZone};
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hyper::header::{self, HeaderValue, UPGRADE}; use hyper::header::{self, HeaderValue, UPGRADE};
@ -55,11 +54,11 @@ fn upgrade_to_backup_reader_protocol(
async move { async move {
let debug = param["debug"].as_bool().unwrap_or(false); let debug = param["debug"].as_bool().unwrap_or(false);
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let store = tools::required_string_param(&param, "store")?.to_owned(); let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_READ, false)?; user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
@ -83,16 +82,21 @@ fn upgrade_to_backup_reader_protocol(
let env_type = rpcenv.env_type(); let env_type = rpcenv.env_type();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let path = datastore.base_path(); let path = datastore.base_path();
//let files = BackupInfo::list_files(&path, &backup_dir)?; //let files = BackupInfo::list_files(&path, &backup_dir)?;
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp()); let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
WorkerTask::spawn("reader", Some(worker_id), &username.clone(), true, move |worker| { WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
let mut env = ReaderEnvironment::new( let mut env = ReaderEnvironment::new(
env_type, username.clone(), worker.clone(), datastore, backup_dir); env_type,
userid,
worker.clone(),
datastore,
backup_dir,
);
env.debug = debug; env.debug = debug;
@ -116,6 +120,7 @@ fn upgrade_to_backup_reader_protocol(
let window_size = 32*1024*1024; // max = (1 << 31) - 2 let window_size = 32*1024*1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size); http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size); http.http2_initial_connection_window_size(window_size);
http.http2_max_frame_size(4*1024*1024);
http.serve_connection(conn, service) http.serve_connection(conn, service)
.map_err(Error::from) .map_err(Error::from)
@ -224,9 +229,8 @@ fn download_chunk(
env.debug(format!("download chunk {:?}", path)); env.debug(format!("download chunk {:?}", path));
let data = tokio::fs::read(path) let data = tools::runtime::block_in_place(|| std::fs::read(path))
.map_err(move |err| http_err!(BAD_REQUEST, format!("reading file {:?} failed: {}", path2, err))) .map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
.await?;
let body = Body::from(data); let body = Body::from(data);
@ -260,7 +264,7 @@ fn download_chunk_old(
let path3 = path.clone(); let path3 = path.clone();
let response_future = tokio::fs::File::open(path) let response_future = tokio::fs::File::open(path)
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err))) .map_err(move |err| http_err!(BAD_REQUEST, "open file {:?} failed: {}", path2, err))
.and_then(move |file| { .and_then(move |file| {
env2.debug(format!("download chunk {:?}", path3)); env2.debug(format!("download chunk {:?}", path3));
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())

View File

@ -5,9 +5,10 @@ use serde_json::{json, Value};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType}; use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
use crate::server::WorkerTask; use crate::api2::types::Userid;
use crate::backup::*; use crate::backup::*;
use crate::server::formatter::*; use crate::server::formatter::*;
use crate::server::WorkerTask;
//use proxmox::tools; //use proxmox::tools;
@ -16,7 +17,7 @@ use crate::server::formatter::*;
pub struct ReaderEnvironment { pub struct ReaderEnvironment {
env_type: RpcEnvironmentType, env_type: RpcEnvironmentType,
result_attributes: Value, result_attributes: Value,
user: String, user: Userid,
pub debug: bool, pub debug: bool,
pub formatter: &'static OutputFormatter, pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>, pub worker: Arc<WorkerTask>,
@ -28,7 +29,7 @@ pub struct ReaderEnvironment {
impl ReaderEnvironment { impl ReaderEnvironment {
pub fn new( pub fn new(
env_type: RpcEnvironmentType, env_type: RpcEnvironmentType,
user: String, user: Userid,
worker: Arc<WorkerTask>, worker: Arc<WorkerTask>,
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
backup_dir: BackupDir, backup_dir: BackupDir,
@ -77,7 +78,7 @@ impl RpcEnvironment for ReaderEnvironment {
} }
fn get_user(&self) -> Option<String> { fn get_user(&self) -> Option<String> {
Some(self.user.clone()) Some(self.user.to_string())
} }
} }

View File

@ -10,20 +10,19 @@ use proxmox::api::{
Router, Router,
RpcEnvironment, RpcEnvironment,
SubdirMap, SubdirMap,
UserInformation,
}; };
use crate::api2::types::{ use crate::api2::types::{
DATASTORE_SCHEMA, DATASTORE_SCHEMA,
RRDMode, RRDMode,
RRDTimeFrameResolution, RRDTimeFrameResolution,
TaskListItem TaskListItem,
Userid,
}; };
use crate::server; use crate::server;
use crate::backup::{DataStore}; use crate::backup::{DataStore};
use crate::config::datastore; use crate::config::datastore;
use crate::tools::epoch_now_f64;
use crate::tools::statistics::{linear_regression}; use crate::tools::statistics::{linear_regression};
use crate::config::cached_user_info::CachedUserInfo; use crate::config::cached_user_info::CachedUserInfo;
use crate::config::acl::{ use crate::config::acl::{
@ -74,6 +73,9 @@ use crate::config::acl::{
}, },
}, },
}, },
access: {
permission: &Permission::Anybody,
},
)] )]
/// List Datastore usages and estimates /// List Datastore usages and estimates
fn datastore_status( fn datastore_status(
@ -84,13 +86,13 @@ fn datastore_status(
let (config, _digest) = datastore::config()?; let (config, _digest) = datastore::config()?;
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let mut list = Vec::new(); let mut list = Vec::new();
for (store, (_, _)) in &config.sections { for (store, (_, _)) in &config.sections {
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if !allowed { if !allowed {
continue; continue;
@ -107,7 +109,7 @@ fn datastore_status(
}); });
let rrd_dir = format!("datastore/{}", store); let rrd_dir = format!("datastore/{}", store);
let now = epoch_now_f64()?; let now = proxmox::tools::time::epoch_f64();
let rrd_resolution = RRDTimeFrameResolution::Month; let rrd_resolution = RRDTimeFrameResolution::Month;
let rrd_mode = RRDMode::Average; let rrd_mode = RRDMode::Average;
@ -202,9 +204,9 @@ pub fn list_tasks(
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> { ) -> Result<Vec<TaskListItem>, Error> {
let username = rpcenv.get_user().unwrap(); let userid: Userid = rpcenv.get_user().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]); let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0; let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
@ -212,7 +214,7 @@ pub fn list_tasks(
let list: Vec<TaskListItem> = server::read_task_list()? let list: Vec<TaskListItem> = server::read_task_list()?
.into_iter() .into_iter()
.map(TaskListItem::from) .map(TaskListItem::from)
.filter(|entry| list_all || entry.user == username) .filter(|entry| list_all || entry.user == userid)
.collect(); .collect();
Ok(list.into()) Ok(list.into())

4
src/api2/types/macros.rs Normal file
View File

@ -0,0 +1,4 @@
//! Macros exported from api2::types.
#[macro_export]
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }

View File

@ -1,11 +1,22 @@
use anyhow::{bail}; use anyhow::bail;
use ::serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox::api::{api, schema::*}; use proxmox::api::{api, schema::*};
use proxmox::const_regex; use proxmox::const_regex;
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32}; use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
use crate::backup::CryptMode; use crate::backup::CryptMode;
use crate::server::UPID;
#[macro_use]
mod macros;
#[macro_use]
mod userid;
pub use userid::{Realm, RealmRef};
pub use userid::{Username, UsernameRef};
pub use userid::Userid;
pub use userid::PROXMOX_GROUP_ID_SCHEMA;
// File names: may not contain slashes, may not start with "." // File names: may not contain slashes, may not start with "."
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| { pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
@ -21,19 +32,6 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") } macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) } macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
// we only allow a limited set of characters
// colon is not allowed, because we store usernames in
// colon separated lists)!
// slash is not allowed because it is used as pve API delimiter
// also see "man useradd"
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
#[macro_export]
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) } macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) } macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
@ -67,12 +65,8 @@ const_regex!{
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$"); pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$"); pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$"); pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
@ -115,12 +109,6 @@ pub const DNS_NAME_FORMAT: ApiStringFormat =
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX); ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
pub const PASSWORD_FORMAT: ApiStringFormat = pub const PASSWORD_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PASSWORD_REGEX); ApiStringFormat::Pattern(&PASSWORD_REGEX);
@ -314,6 +302,11 @@ pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event)) .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
.schema(); .schema();
pub const VERIFY_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run verify job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.format(&PROXMOX_SAFE_ID_FORMAT) .format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3) .min_length(3)
@ -343,24 +336,6 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr
.format(&DNS_NAME_OR_IP_FORMAT) .format(&DNS_NAME_OR_IP_FORMAT)
.schema(); .schema();
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = StringSchema::new("Authentication domain ID")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const PROXMOX_USER_ID_SCHEMA: Schema = StringSchema::new("User ID")
.format(&PROXMOX_USER_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
.format(&PROXMOX_GROUP_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).") pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
.format(&BLOCKDEVICE_NAME_FORMAT) .format(&BLOCKDEVICE_NAME_FORMAT)
.min_length(3) .min_length(3)
@ -388,6 +363,10 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
schema: BACKUP_ARCHIVE_NAME_SCHEMA schema: BACKUP_ARCHIVE_NAME_SCHEMA
}, },
}, },
owner: {
type: Userid,
optional: true,
},
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@ -403,7 +382,37 @@ pub struct GroupListItem {
pub files: Vec<String>, pub files: Vec<String>,
/// The owner of group /// The owner of group
#[serde(skip_serializing_if="Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<String>, pub owner: Option<Userid>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Result of a verify operation.
pub enum VerifyState {
/// Verification was successful
Ok,
/// Verification reported one or more errors
Failed,
}
#[api(
properties: {
upid: {
schema: UPID_SCHEMA
},
state: {
type: VerifyState
},
},
)]
#[derive(Serialize, Deserialize)]
/// Task properties.
pub struct SnapshotVerifyState {
/// UPID of the verify task
pub upid: UPID,
/// State of the verification. Enum.
pub state: VerifyState,
} }
#[api( #[api(
@ -417,11 +426,23 @@ pub struct GroupListItem {
"backup-time": { "backup-time": {
schema: BACKUP_TIME_SCHEMA, schema: BACKUP_TIME_SCHEMA,
}, },
comment: {
schema: SINGLE_LINE_COMMENT_SCHEMA,
optional: true,
},
verification: {
type: SnapshotVerifyState,
optional: true,
},
files: { files: {
items: { items: {
schema: BACKUP_ARCHIVE_NAME_SCHEMA schema: BACKUP_ARCHIVE_NAME_SCHEMA
}, },
}, },
owner: {
type: Userid,
optional: true,
},
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@ -431,6 +452,12 @@ pub struct SnapshotListItem {
pub backup_type: String, // enum pub backup_type: String, // enum
pub backup_id: String, pub backup_id: String,
pub backup_time: i64, pub backup_time: i64,
/// The first line from manifest "notes"
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
/// The result of the last run verify task
#[serde(skip_serializing_if="Option::is_none")]
pub verification: Option<SnapshotVerifyState>,
/// List of contained archive files. /// List of contained archive files.
pub files: Vec<BackupContent>, pub files: Vec<BackupContent>,
/// Overall snapshot size (sum of all archive sizes). /// Overall snapshot size (sum of all archive sizes).
@ -438,7 +465,7 @@ pub struct SnapshotListItem {
pub size: Option<u64>, pub size: Option<u64>,
/// The owner of the snapshots group /// The owner of the snapshots group
#[serde(skip_serializing_if="Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<String>, pub owner: Option<Userid>,
} }
#[api( #[api(
@ -548,6 +575,8 @@ pub struct GarbageCollectionStatus {
pub pending_bytes: u64, pub pending_bytes: u64,
/// Number of pending chunks (pending removal - kept for safety). /// Number of pending chunks (pending removal - kept for safety).
pub pending_chunks: usize, pub pending_chunks: usize,
/// Number of chunks marked as .bad by verify that have been removed by GC.
pub removed_bad: usize,
} }
impl Default for GarbageCollectionStatus { impl Default for GarbageCollectionStatus {
@ -562,6 +591,7 @@ impl Default for GarbageCollectionStatus {
removed_chunks: 0, removed_chunks: 0,
pending_bytes: 0, pending_bytes: 0,
pending_chunks: 0, pending_chunks: 0,
removed_bad: 0,
} }
} }
} }
@ -581,7 +611,8 @@ pub struct StorageStatus {
#[api( #[api(
properties: { properties: {
"upid": { schema: UPID_SCHEMA }, upid: { schema: UPID_SCHEMA },
user: { type: Userid },
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@ -601,7 +632,7 @@ pub struct TaskListItem {
/// Worker ID (arbitrary ASCII string) /// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>, pub worker_id: Option<String>,
/// The user who started the task /// The user who started the task
pub user: String, pub user: Userid,
/// The task end time (Epoch) /// The task end time (Epoch)
#[serde(skip_serializing_if="Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>, pub endtime: Option<i64>,
@ -614,7 +645,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
fn from(info: crate::server::TaskListInfo) -> Self { fn from(info: crate::server::TaskListInfo) -> Self {
let (endtime, status) = info let (endtime, status) = info
.state .state
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b))); .map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
TaskListItem { TaskListItem {
upid: info.upid_str, upid: info.upid_str,
@ -624,7 +655,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
starttime: info.upid.starttime, starttime: info.upid.starttime,
worker_type: info.upid.worker_type, worker_type: info.upid.worker_type,
worker_id: info.upid.worker_id, worker_id: info.upid.worker_id,
user: info.upid.username, user: info.upid.userid,
endtime, endtime,
status, status,
} }
@ -673,7 +704,7 @@ pub enum LinuxBondMode {
/// Broadcast policy /// Broadcast policy
broadcast = 3, broadcast = 3,
/// IEEE 802.3ad Dynamic link aggregation /// IEEE 802.3ad Dynamic link aggregation
//#[serde(rename = "802.3ad")] #[serde(rename = "802.3ad")]
ieee802_3ad = 4, ieee802_3ad = 4,
/// Adaptive transmit load balancing /// Adaptive transmit load balancing
balance_tlb = 5, balance_tlb = 5,
@ -681,6 +712,23 @@ pub enum LinuxBondMode {
balance_alb = 6, balance_alb = 6,
} }
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
#[repr(u8)]
/// Bond Transmit Hash Policy for LACP (802.3ad)
pub enum BondXmitHashPolicy {
/// Layer 2
layer2 = 0,
/// Layer 2+3
#[serde(rename = "layer2+3")]
layer2_3 = 1,
/// Layer 3+4
#[serde(rename = "layer3+4")]
layer3_4 = 2,
}
#[api()] #[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
@ -786,7 +834,15 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
bond_mode: { bond_mode: {
type: LinuxBondMode, type: LinuxBondMode,
optional: true, optional: true,
} },
"bond-primary": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_xmit_hash_policy: {
type: BondXmitHashPolicy,
optional: true,
},
} }
)] )]
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@ -843,6 +899,10 @@ pub struct Interface {
pub slaves: Option<Vec<String>>, pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if="Option::is_none")] #[serde(skip_serializing_if="Option::is_none")]
pub bond_mode: Option<LinuxBondMode>, pub bond_mode: Option<LinuxBondMode>,
#[serde(skip_serializing_if="Option::is_none")]
#[serde(rename = "bond-primary")]
pub bond_primary: Option<String>,
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
} }
// Regression tests // Regression tests
@ -890,9 +950,6 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
#[test] #[test]
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> { fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
let schema = PROXMOX_USER_ID_SCHEMA;
let invalid_user_ids = [ let invalid_user_ids = [
"x", // too short "x", // too short
"xx", // too short "xx", // too short
@ -906,7 +963,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
]; ];
for name in invalid_user_ids.iter() { for name in invalid_user_ids.iter() {
if let Ok(_) = parse_simple_value(name, &schema) { if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
bail!("test userid '{}' failed - got Ok() while exception an error.", name); bail!("test userid '{}' failed - got Ok() while exception an error.", name);
} }
} }
@ -920,7 +977,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
]; ];
for name in valid_user_ids.iter() { for name in valid_user_ids.iter() {
let v = match parse_simple_value(name, &schema) { let v = match parse_simple_value(name, &Userid::API_SCHEMA) {
Ok(v) => v, Ok(v) => v,
Err(err) => { Err(err) => {
bail!("unable to parse userid '{}' - {}", name, err); bail!("unable to parse userid '{}' - {}", name, err);

420
src/api2/types/userid.rs Normal file
View File

@ -0,0 +1,420 @@
//! Types for user handling.
//!
//! We have [`Username`]s and [`Realm`]s. To uniquely identify a user, they must be combined into a [`Userid`].
//!
//! Since they're all string types, they're organized as follows:
//!
//! * [`Username`]: an owned user name. Internally a `String`.
//! * [`UsernameRef`]: a borrowed user name. Pairs with a `Username` the same way a `str` pairs
//! with `String`, meaning you can only make references to it.
//! * [`Realm`]: an owned realm (`String` equivalent).
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separate
//! borrowed type.
//!
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
//! compared directly. If a direct comparison is really required, they can be compared as strings
//! via the `as_str()` method. [`Realm`]s and [`Userid`]s on the other hand can be compared with
//! each other, as in those two cases the comparison has meaning.
use std::borrow::Borrow;
use std::convert::TryFrom;
use std::fmt;
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use proxmox::api::api;
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
use proxmox::const_regex;
// we only allow a limited set of characters
// colon is not allowed, because we store usernames in
// colon separated lists)!
// slash is not allowed because it is used as pve API delimiter
// also see "man useradd"
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
const_regex! {
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
}
pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX);
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
.format(&PROXMOX_GROUP_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
StringSchema::new("Authentication domain ID")
.format(&super::PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32);
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
#[api(
type: String,
format: &PROXMOX_USER_NAME_FORMAT,
)]
/// The user name part of a user id.
///
/// This alone does NOT uniquely identify the user and therefore does not implement `Eq`. In order
/// to compare user names directly, they need to be explicitly compared as strings by calling
/// `.as_str()`.
///
/// ```compile_fail
/// fn test(a: Username, b: Username) -> bool {
/// a == b // illegal and does not compile
/// }
/// ```
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
pub struct Username(String);
/// A reference to a user name part of a user id. This alone does NOT uniquely identify the user.
///
/// This is like a `str` to the `String` of a [`Username`].
#[derive(Debug, Hash)]
pub struct UsernameRef(str);
#[doc(hidden)]
/// ```compile_fail
/// let a: Username = unsafe { std::mem::zeroed() };
/// let b: Username = unsafe { std::mem::zeroed() };
/// let _ = <Username as PartialEq>::eq(&a, &b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
/// ```
struct _AssertNoEqImpl;
impl UsernameRef {
fn new(s: &str) -> &Self {
unsafe { &*(s as *const str as *const UsernameRef) }
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl std::ops::Deref for Username {
type Target = UsernameRef;
fn deref(&self) -> &UsernameRef {
self.borrow()
}
}
impl Borrow<UsernameRef> for Username {
fn borrow(&self) -> &UsernameRef {
UsernameRef::new(self.as_str())
}
}
impl AsRef<UsernameRef> for Username {
fn as_ref(&self) -> &UsernameRef {
UsernameRef::new(self.as_str())
}
}
impl ToOwned for UsernameRef {
type Owned = Username;
fn to_owned(&self) -> Self::Owned {
Username(self.0.to_owned())
}
}
impl TryFrom<String> for Username {
type Error = Error;
fn try_from(s: String) -> Result<Self, Error> {
if !PROXMOX_USER_NAME_REGEX.is_match(&s) {
bail!("invalid user name");
}
Ok(Self(s))
}
}
impl<'a> TryFrom<&'a str> for &'a UsernameRef {
type Error = Error;
fn try_from(s: &'a str) -> Result<&'a UsernameRef, Error> {
if !PROXMOX_USER_NAME_REGEX.is_match(s) {
bail!("invalid name in user id");
}
Ok(UsernameRef::new(s))
}
}
#[api(schema: PROXMOX_AUTH_REALM_SCHEMA)]
/// An authentication realm.
#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
pub struct Realm(String);
/// A reference to an authentication realm.
///
/// This is like a `str` to the `String` of a `Realm`.
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct RealmRef(str);
impl RealmRef {
fn new(s: &str) -> &Self {
unsafe { &*(s as *const str as *const RealmRef) }
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl std::ops::Deref for Realm {
type Target = RealmRef;
fn deref(&self) -> &RealmRef {
self.borrow()
}
}
impl Borrow<RealmRef> for Realm {
fn borrow(&self) -> &RealmRef {
RealmRef::new(self.as_str())
}
}
impl AsRef<RealmRef> for Realm {
fn as_ref(&self) -> &RealmRef {
RealmRef::new(self.as_str())
}
}
impl ToOwned for RealmRef {
type Owned = Realm;
fn to_owned(&self) -> Self::Owned {
Realm(self.0.to_owned())
}
}
impl TryFrom<String> for Realm {
type Error = Error;
fn try_from(s: String) -> Result<Self, Error> {
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&s)
.map_err(|_| format_err!("invalid realm"))?;
Ok(Self(s))
}
}
impl<'a> TryFrom<&'a str> for &'a RealmRef {
type Error = Error;
fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> {
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(s)
.map_err(|_| format_err!("invalid realm"))?;
Ok(RealmRef::new(s))
}
}
impl PartialEq<str> for Realm {
fn eq(&self, rhs: &str) -> bool {
self.0 == rhs
}
}
impl PartialEq<&str> for Realm {
fn eq(&self, rhs: &&str) -> bool {
self.0 == *rhs
}
}
impl PartialEq<str> for RealmRef {
fn eq(&self, rhs: &str) -> bool {
self.0 == *rhs
}
}
impl PartialEq<&str> for RealmRef {
fn eq(&self, rhs: &&str) -> bool {
self.0 == **rhs
}
}
impl PartialEq<RealmRef> for Realm {
fn eq(&self, rhs: &RealmRef) -> bool {
self.0 == &rhs.0
}
}
impl PartialEq<Realm> for RealmRef {
fn eq(&self, rhs: &Realm) -> bool {
self.0 == rhs.0
}
}
impl PartialEq<Realm> for &RealmRef {
fn eq(&self, rhs: &Realm) -> bool {
(*self).0 == rhs.0
}
}
/// A complete user id consting of a user name and a realm.
#[derive(Clone, Debug, Hash)]
pub struct Userid {
data: String,
name_len: usize,
//name: Username,
//realm: Realm,
}
impl Userid {
pub const API_SCHEMA: Schema = StringSchema::new("User ID")
.format(&PROXMOX_USER_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
const fn new(data: String, name_len: usize) -> Self {
Self { data, name_len }
}
pub fn name(&self) -> &UsernameRef {
UsernameRef::new(&self.data[..self.name_len])
}
pub fn realm(&self) -> &RealmRef {
RealmRef::new(&self.data[(self.name_len + 1)..])
}
pub fn as_str(&self) -> &str {
&self.data
}
/// Get the "backup@pam" user id.
pub fn backup_userid() -> &'static Self {
&*BACKUP_USERID
}
/// Get the "root@pam" user id.
pub fn root_userid() -> &'static Self {
&*ROOT_USERID
}
}
lazy_static! {
pub static ref BACKUP_USERID: Userid = Userid::new("backup@pam".to_string(), 6);
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
}
impl Eq for Userid {}
impl PartialEq for Userid {
fn eq(&self, rhs: &Self) -> bool {
self.data == rhs.data && self.name_len == rhs.name_len
}
}
impl From<(Username, Realm)> for Userid {
fn from(parts: (Username, Realm)) -> Self {
Self::from((parts.0.as_ref(), parts.1.as_ref()))
}
}
impl From<(&UsernameRef, &RealmRef)> for Userid {
fn from(parts: (&UsernameRef, &RealmRef)) -> Self {
let data = format!("{}@{}", parts.0.as_str(), parts.1.as_str());
let name_len = parts.0.as_str().len();
Self { data, name_len }
}
}
impl fmt::Display for Userid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.data.fmt(f)
}
}
impl std::str::FromStr for Userid {
type Err = Error;
fn from_str(id: &str) -> Result<Self, Error> {
let (name, realm) = match id.as_bytes().iter().rposition(|&b| b == b'@') {
Some(pos) => (&id[..pos], &id[(pos + 1)..]),
None => bail!("not a valid user id"),
};
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm)
.map_err(|_| format_err!("invalid realm in user id"))?;
Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm))))
}
}
impl TryFrom<String> for Userid {
type Error = Error;
fn try_from(data: String) -> Result<Self, Error> {
let name_len = data
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..])
.map_err(|_| format_err!("invalid realm in user id"))?;
Ok(Self { data, name_len })
}
}
impl PartialEq<str> for Userid {
fn eq(&self, rhs: &str) -> bool {
rhs.len() > self.name_len + 2 // make sure range access below is allowed
&& rhs.starts_with(self.name().as_str())
&& rhs.as_bytes()[self.name_len] == b'@'
&& &rhs[(self.name_len + 1)..] == self.realm().as_str()
}
}
impl PartialEq<&str> for Userid {
fn eq(&self, rhs: &&str) -> bool {
*self == **rhs
}
}
impl PartialEq<String> for Userid {
fn eq(&self, rhs: &String) -> bool {
self == rhs.as_str()
}
}
proxmox::forward_deserialize_to_from_str!(Userid);
proxmox::forward_serialize_to_display!(Userid);

View File

@ -10,39 +10,54 @@ use base64;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::json; use serde_json::json;
use crate::api2::types::{Userid, UsernameRef, RealmRef};
pub trait ProxmoxAuthenticator { pub trait ProxmoxAuthenticator {
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error>; fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
fn store_password(&self, username: &str, password: &str) -> Result<(), Error>; fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
} }
pub struct PAM(); pub struct PAM();
impl ProxmoxAuthenticator for PAM { impl ProxmoxAuthenticator for PAM {
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> { fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap(); let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
auth.get_handler().set_credentials(username, password); auth.get_handler().set_credentials(username.as_str(), password);
auth.authenticate()?; auth.authenticate()?;
return Ok(()); return Ok(());
} }
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> { fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
let mut child = Command::new("passwd") let mut child = Command::new("passwd")
.arg(username) .arg(username.as_str())
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stderr(Stdio::piped()) .stderr(Stdio::piped())
.spawn() .spawn()
.or_else(|err| Err(format_err!("unable to set password for '{}' - execute passwd failed: {}", username, err)))?; .map_err(|err| format_err!(
"unable to set password for '{}' - execute passwd failed: {}",
username.as_str(),
err,
))?;
// Note: passwd reads password twice from stdin (for verify) // Note: passwd reads password twice from stdin (for verify)
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?; writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
let output = child.wait_with_output() let output = child
.or_else(|err| Err(format_err!("unable to set password for '{}' - wait failed: {}", username, err)))?; .wait_with_output()
.map_err(|err| format_err!(
"unable to set password for '{}' - wait failed: {}",
username.as_str(),
err,
))?;
if !output.status.success() { if !output.status.success() {
bail!("unable to set password for '{}' - {}", username, String::from_utf8_lossy(&output.stderr)); bail!(
"unable to set password for '{}' - {}",
username.as_str(),
String::from_utf8_lossy(&output.stderr),
);
} }
Ok(()) Ok(())
@ -90,23 +105,23 @@ pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error>
Ok(()) Ok(())
} }
const SHADOW_CONFIG_FILENAME: &str = "/etc/proxmox-backup/shadow.json"; const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");
impl ProxmoxAuthenticator for PBS { impl ProxmoxAuthenticator for PBS {
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> { fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?; let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
match data[username].as_str() { match data[username.as_str()].as_str() {
None => bail!("no password set"), None => bail!("no password set"),
Some(enc_password) => verify_crypt_pw(password, enc_password)?, Some(enc_password) => verify_crypt_pw(password, enc_password)?,
} }
Ok(()) Ok(())
} }
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> { fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
let enc_password = encrypt_pw(password)?; let enc_password = encrypt_pw(password)?;
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?; let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
data[username] = enc_password.into(); data[username.as_str()] = enc_password.into();
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600); let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
let options = proxmox::tools::fs::CreateOptions::new() let options = proxmox::tools::fs::CreateOptions::new()
@ -121,28 +136,18 @@ impl ProxmoxAuthenticator for PBS {
} }
} }
pub fn parse_userid(userid: &str) -> Result<(String, String), Error> {
let data: Vec<&str> = userid.rsplitn(2, '@').collect();
if data.len() != 2 {
bail!("userid '{}' has no realm", userid);
}
Ok((data[1].to_owned(), data[0].to_owned()))
}
/// Lookup the autenticator for the specified realm /// Lookup the autenticator for the specified realm
pub fn lookup_authenticator(realm: &str) -> Result<Box<dyn ProxmoxAuthenticator>, Error> { pub fn lookup_authenticator(realm: &RealmRef) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
match realm { match realm.as_str() {
"pam" => Ok(Box::new(PAM())), "pam" => Ok(Box::new(PAM())),
"pbs" => Ok(Box::new(PBS())), "pbs" => Ok(Box::new(PBS())),
_ => bail!("unknown realm '{}'", realm), _ => bail!("unknown realm '{}'", realm.as_str()),
} }
} }
/// Authenticate users /// Authenticate users
pub fn authenticate_user(userid: &str, password: &str) -> Result<(), Error> { pub fn authenticate_user(userid: &Userid, password: &str) -> Result<(), Error> {
let (username, realm) = parse_userid(userid)?;
lookup_authenticator(&realm)? lookup_authenticator(userid.realm())?
.authenticate_user(&username, password) .authenticate_user(userid.name(), password)
} }

View File

@ -10,16 +10,16 @@ use std::path::PathBuf;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions}; use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox::try_block; use proxmox::try_block;
use crate::tools::epoch_now_u64; use crate::api2::types::Userid;
fn compute_csrf_secret_digest( fn compute_csrf_secret_digest(
timestamp: i64, timestamp: i64,
secret: &[u8], secret: &[u8],
username: &str, userid: &Userid,
) -> String { ) -> String {
let mut hasher = sha::Sha256::new(); let mut hasher = sha::Sha256::new();
let data = format!("{:08X}:{}:", timestamp, username); let data = format!("{:08X}:{}:", timestamp, userid);
hasher.update(data.as_bytes()); hasher.update(data.as_bytes());
hasher.update(secret); hasher.update(secret);
@ -28,19 +28,19 @@ fn compute_csrf_secret_digest(
pub fn assemble_csrf_prevention_token( pub fn assemble_csrf_prevention_token(
secret: &[u8], secret: &[u8],
username: &str, userid: &Userid,
) -> String { ) -> String {
let epoch = epoch_now_u64().unwrap() as i64; let epoch = proxmox::tools::time::epoch_i64();
let digest = compute_csrf_secret_digest(epoch, secret, username); let digest = compute_csrf_secret_digest(epoch, secret, userid);
format!("{:08X}:{}", epoch, digest) format!("{:08X}:{}", epoch, digest)
} }
pub fn verify_csrf_prevention_token( pub fn verify_csrf_prevention_token(
secret: &[u8], secret: &[u8],
username: &str, userid: &Userid,
token: &str, token: &str,
min_age: i64, min_age: i64,
max_age: i64, max_age: i64,
@ -62,13 +62,13 @@ pub fn verify_csrf_prevention_token(
let ttime = i64::from_str_radix(timestamp, 16). let ttime = i64::from_str_radix(timestamp, 16).
map_err(|err| format_err!("timestamp format error - {}", err))?; map_err(|err| format_err!("timestamp format error - {}", err))?;
let digest = compute_csrf_secret_digest(ttime, secret, username); let digest = compute_csrf_secret_digest(ttime, secret, userid);
if digest != sig { if digest != sig {
bail!("invalid signature."); bail!("invalid signature.");
} }
let now = epoch_now_u64()? as i64; let now = proxmox::tools::time::epoch_i64();
let age = now - ttime; let age = now - ttime;
if age < min_age { if age < min_age {

View File

@ -120,6 +120,8 @@ macro_rules! PROXMOX_BACKUP_READER_PROTOCOL_ID_V1 {
/// Unix system user used by proxmox-backup-proxy /// Unix system user used by proxmox-backup-proxy
pub const BACKUP_USER_NAME: &str = "backup"; pub const BACKUP_USER_NAME: &str = "backup";
/// Unix system group used by proxmox-backup-proxy
pub const BACKUP_GROUP_NAME: &str = "backup";
/// Return User info for the 'backup' user (``getpwnam_r(3)``) /// Return User info for the 'backup' user (``getpwnam_r(3)``)
pub fn backup_user() -> Result<nix::unistd::User, Error> { pub fn backup_user() -> Result<nix::unistd::User, Error> {
@ -129,6 +131,14 @@ pub fn backup_user() -> Result<nix::unistd::User, Error> {
} }
} }
/// Return Group info for the 'backup' group (``getgrnam(3)``)
pub fn backup_group() -> Result<nix::unistd::Group, Error> {
match nix::unistd::Group::from_name(BACKUP_GROUP_NAME)? {
Some(group) => Ok(group),
None => bail!("Unable to lookup backup user."),
}
}
mod file_formats; mod file_formats;
pub use file_formats::*; pub use file_formats::*;

View File

@ -4,8 +4,6 @@ use anyhow::{bail, format_err, Error};
use regex::Regex; use regex::Regex;
use std::os::unix::io::RawFd; use std::os::unix::io::RawFd;
use chrono::{DateTime, TimeZone, SecondsFormat, Utc};
use std::path::{PathBuf, Path}; use std::path::{PathBuf, Path};
use lazy_static::lazy_static; use lazy_static::lazy_static;
@ -45,6 +43,31 @@ pub struct BackupGroup {
backup_id: String, backup_id: String,
} }
impl std::cmp::Ord for BackupGroup {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let type_order = self.backup_type.cmp(&other.backup_type);
if type_order != std::cmp::Ordering::Equal {
return type_order;
}
// try to compare IDs numerically
let id_self = self.backup_id.parse::<u64>();
let id_other = other.backup_id.parse::<u64>();
match (id_self, id_other) {
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
_ => self.backup_id.cmp(&other.backup_id),
}
}
}
impl std::cmp::PartialOrd for BackupGroup {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl BackupGroup { impl BackupGroup {
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self { pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
@ -80,8 +103,7 @@ impl BackupGroup {
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| { tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); } if file_type != nix::dir::Type::Directory { return Ok(()); }
let dt = backup_time.parse::<DateTime<Utc>>()?; let backup_dir = BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp());
let files = list_backup_files(l2_fd, backup_time)?; let files = list_backup_files(l2_fd, backup_time)?;
list.push(BackupInfo { backup_dir, files }); list.push(BackupInfo { backup_dir, files });
@ -91,7 +113,7 @@ impl BackupGroup {
Ok(list) Ok(list)
} }
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<DateTime<Utc>>, Error> { pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
let mut last = None; let mut last = None;
@ -117,11 +139,11 @@ impl BackupGroup {
} }
} }
let dt = backup_time.parse::<DateTime<Utc>>()?; let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
if let Some(last_dt) = last { if let Some(last_timestamp) = last {
if dt > last_dt { last = Some(dt); } if timestamp > last_timestamp { last = Some(timestamp); }
} else { } else {
last = Some(dt); last = Some(timestamp);
} }
Ok(()) Ok(())
@ -173,50 +195,68 @@ impl std::str::FromStr for BackupGroup {
/// Uniquely identify a Backup (relative to data store) /// Uniquely identify a Backup (relative to data store)
/// ///
/// We also call this a backup snaphost. /// We also call this a backup snaphost.
#[derive(Debug, Clone)] #[derive(Debug, Eq, PartialEq, Clone)]
pub struct BackupDir { pub struct BackupDir {
/// Backup group /// Backup group
group: BackupGroup, group: BackupGroup,
/// Backup timestamp /// Backup timestamp
backup_time: DateTime<Utc>, backup_time: i64,
// backup_time as rfc3339
backup_time_string: String
} }
impl BackupDir { impl BackupDir {
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Self pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
where where
T: Into<String>, T: Into<String>,
U: Into<String>, U: Into<String>,
{ {
// Note: makes sure that nanoseconds is 0 let group = BackupGroup::new(backup_type.into(), backup_id.into());
Self { BackupDir::with_group(group, backup_time)
group: BackupGroup::new(backup_type.into(), backup_id.into()),
backup_time: Utc.timestamp(timestamp, 0),
}
} }
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Self {
Self { group, backup_time: Utc.timestamp(timestamp, 0) } pub fn with_rfc3339<T,U,V>(backup_type: T, backup_id: U, backup_time_string: V) -> Result<Self, Error>
where
T: Into<String>,
U: Into<String>,
V: Into<String>,
{
let backup_time_string = backup_time_string.into();
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
let group = BackupGroup::new(backup_type.into(), backup_id.into());
Ok(Self { group, backup_time, backup_time_string })
}
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
let backup_time_string = Self::backup_time_to_string(backup_time)?;
Ok(Self { group, backup_time, backup_time_string })
} }
pub fn group(&self) -> &BackupGroup { pub fn group(&self) -> &BackupGroup {
&self.group &self.group
} }
pub fn backup_time(&self) -> DateTime<Utc> { pub fn backup_time(&self) -> i64 {
self.backup_time self.backup_time
} }
pub fn backup_time_string(&self) -> &str {
&self.backup_time_string
}
pub fn relative_path(&self) -> PathBuf { pub fn relative_path(&self) -> PathBuf {
let mut relative_path = self.group.group_path(); let mut relative_path = self.group.group_path();
relative_path.push(Self::backup_time_to_string(self.backup_time)); relative_path.push(self.backup_time_string.clone());
relative_path relative_path
} }
pub fn backup_time_to_string(backup_time: DateTime<Utc>) -> String { pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
backup_time.to_rfc3339_opts(SecondsFormat::Secs, true) // fixme: can this fail? (avoid unwrap)
proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
} }
} }
@ -230,9 +270,11 @@ impl std::str::FromStr for BackupDir {
let cap = SNAPSHOT_PATH_REGEX.captures(path) let cap = SNAPSHOT_PATH_REGEX.captures(path)
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str()); BackupDir::with_rfc3339(
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?; cap.get(1).unwrap().as_str(),
Ok(BackupDir::from((group, backup_time.timestamp()))) cap.get(2).unwrap().as_str(),
cap.get(3).unwrap().as_str(),
)
} }
} }
@ -240,14 +282,7 @@ impl std::fmt::Display for BackupDir {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let backup_type = self.group.backup_type(); let backup_type = self.group.backup_type();
let id = self.group.backup_id(); let id = self.group.backup_id();
let time = Self::backup_time_to_string(self.backup_time); write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
write!(f, "{}/{}/{}", backup_type, id, time)
}
}
impl From<(BackupGroup, i64)> for BackupDir {
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
} }
} }
@ -272,9 +307,13 @@ impl BackupInfo {
} }
/// Finds the latest backup inside a backup group /// Finds the latest backup inside a backup group
pub fn last_backup(base_path: &Path, group: &BackupGroup) -> Result<Option<BackupInfo>, Error> { pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
-> Result<Option<BackupInfo>, Error>
{
let backups = group.list_backups(base_path)?; let backups = group.list_backups(base_path)?;
Ok(backups.into_iter().max_by_key(|item| item.backup_dir.backup_time())) Ok(backups.into_iter()
.filter(|item| !only_finished || item.is_finished())
.max_by_key(|item| item.backup_dir.backup_time()))
} }
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) { pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
@ -301,13 +340,12 @@ impl BackupInfo {
if file_type != nix::dir::Type::Directory { return Ok(()); } if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| { tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); } if file_type != nix::dir::Type::Directory { return Ok(()); }
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| { tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time_string, file_type| {
if file_type != nix::dir::Type::Directory { return Ok(()); } if file_type != nix::dir::Type::Directory { return Ok(()); }
let dt = backup_time.parse::<DateTime<Utc>>()?; let backup_dir = BackupDir::with_rfc3339(backup_type, backup_id, backup_time_string)?;
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp());
let files = list_backup_files(l2_fd, backup_time)?; let files = list_backup_files(l2_fd, backup_time_string)?;
list.push(BackupInfo { backup_dir, files }); list.push(BackupInfo { backup_dir, files });
@ -317,6 +355,11 @@ impl BackupInfo {
})?; })?;
Ok(list) Ok(list)
} }
pub fn is_finished(&self) -> bool {
// backup is considered unfinished if there is no manifest
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
}
} }
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> { fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {

View File

@ -5,7 +5,6 @@ use std::io::{Read, Write, Seek, SeekFrom};
use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::OsStrExt;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use chrono::offset::{TimeZone, Local};
use pathpatterns::{MatchList, MatchType}; use pathpatterns::{MatchList, MatchType};
use proxmox::tools::io::ReadExt; use proxmox::tools::io::ReadExt;
@ -533,17 +532,17 @@ impl <R: Read + Seek> CatalogReader<R> {
self.dump_dir(&path, pos)?; self.dump_dir(&path, pos)?;
} }
CatalogEntryType::File => { CatalogEntryType::File => {
let dt = Local let mut mtime_string = mtime.to_string();
.timestamp_opt(mtime as i64, 0) if let Ok(s) = proxmox::tools::time::strftime_local("%FT%TZ", mtime as i64) {
.single() // chrono docs say timestamp_opt can only be None or Single! mtime_string = s;
.unwrap_or_else(|| Local.timestamp(0, 0)); }
println!( println!(
"{} {:?} {} {}", "{} {:?} {} {}",
etype, etype,
path, path,
size, size,
dt.to_rfc3339_opts(chrono::SecondsFormat::Secs, false), mtime_string,
); );
} }
_ => { _ => {

View File

@ -3,7 +3,7 @@ use std::ffi::{CStr, CString, OsStr, OsString};
use std::future::Future; use std::future::Future;
use std::io::Write; use std::io::Write;
use std::mem; use std::mem;
use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::{OsStrExt, OsStringExt};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::pin::Pin; use std::pin::Pin;
@ -1073,6 +1073,7 @@ impl<'a> ExtractorState<'a> {
} }
self.path.extend(&entry.name); self.path.extend(&entry.name);
self.extractor.set_path(OsString::from_vec(self.path.clone()));
self.handle_entry(entry).await?; self.handle_entry(entry).await?;
} }

View File

@ -104,12 +104,11 @@ impl ChunkStore {
} }
let percentage = (i*100)/(64*1024); let percentage = (i*100)/(64*1024);
if percentage != last_percentage { if percentage != last_percentage {
eprintln!("Percentage done: {}", percentage); // eprintln!("ChunkStore::create {}%", percentage);
last_percentage = percentage; last_percentage = percentage;
} }
} }
Self::open(name, base) Self::open(name, base)
} }
@ -184,26 +183,10 @@ impl ChunkStore {
Ok(true) Ok(true)
} }
pub fn read_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let (chunk_path, digest_str) = self.chunk_path(digest);
let mut file = std::fs::File::open(&chunk_path)
.map_err(|err| {
format_err!(
"store '{}', unable to read chunk '{}' - {}",
self.name,
digest_str,
err,
)
})?;
DataBlob::load(&mut file)
}
pub fn get_chunk_iterator( pub fn get_chunk_iterator(
&self, &self,
) -> Result< ) -> Result<
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)> + std::iter::FusedIterator, impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
Error Error
> { > {
use nix::dir::Dir; use nix::dir::Dir;
@ -235,19 +218,21 @@ impl ChunkStore {
Some(Ok(entry)) => { Some(Ok(entry)) => {
// skip files if they're not a hash // skip files if they're not a hash
let bytes = entry.file_name().to_bytes(); let bytes = entry.file_name().to_bytes();
if bytes.len() != 64 { if bytes.len() != 64 && bytes.len() != 64 + ".0.bad".len() {
continue; continue;
} }
if !bytes.iter().all(u8::is_ascii_hexdigit) { if !bytes.iter().take(64).all(u8::is_ascii_hexdigit) {
continue; continue;
} }
return Some((Ok(entry), percentage));
let bad = bytes.ends_with(".bad".as_bytes());
return Some((Ok(entry), percentage, bad));
} }
Some(Err(err)) => { Some(Err(err)) => {
// stop after first error // stop after first error
done = true; done = true;
// and pass the error through: // and pass the error through:
return Some((Err(err), percentage)); return Some((Err(err), percentage, false));
} }
None => (), // open next directory None => (), // open next directory
} }
@ -277,7 +262,7 @@ impl ChunkStore {
// other errors are fatal, so end our iteration // other errors are fatal, so end our iteration
done = true; done = true;
// and pass the error through: // and pass the error through:
return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage)); return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage, false));
} }
} }
} }
@ -291,14 +276,14 @@ impl ChunkStore {
pub fn sweep_unused_chunks( pub fn sweep_unused_chunks(
&self, &self,
oldest_writer: i64, oldest_writer: i64,
phase1_start_time: i64,
status: &mut GarbageCollectionStatus, status: &mut GarbageCollectionStatus,
worker: &WorkerTask, worker: &WorkerTask,
) -> Result<(), Error> { ) -> Result<(), Error> {
use nix::sys::stat::fstatat; use nix::sys::stat::fstatat;
use nix::unistd::{unlinkat, UnlinkatFlags};
let now = unsafe { libc::time(std::ptr::null_mut()) }; let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
if oldest_writer < min_atime { if oldest_writer < min_atime {
min_atime = oldest_writer; min_atime = oldest_writer;
@ -309,10 +294,10 @@ impl ChunkStore {
let mut last_percentage = 0; let mut last_percentage = 0;
let mut chunk_count = 0; let mut chunk_count = 0;
for (entry, percentage) in self.get_chunk_iterator()? { for (entry, percentage, bad) in self.get_chunk_iterator()? {
if last_percentage != percentage { if last_percentage != percentage {
last_percentage = percentage; last_percentage = percentage;
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count)); worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
} }
worker.fail_on_abort()?; worker.fail_on_abort()?;
@ -338,14 +323,47 @@ impl ChunkStore {
let lock = self.mutex.lock(); let lock = self.mutex.lock();
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) { if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
if stat.st_atime < min_atime { if bad {
// filename validity checked in iterator
let orig_filename = std::ffi::CString::new(&filename.to_bytes()[..64])?;
match fstatat(
dirfd,
orig_filename.as_c_str(),
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
{
Ok(_) => {
match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
Err(err) =>
worker.warn(format!(
"unlinking corrupt chunk {:?} failed on store '{}' - {}",
filename,
self.name,
err,
)),
Ok(_) => {
status.removed_bad += 1;
status.removed_bytes += stat.st_size as u64;
}
}
},
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
// chunk hasn't been rewritten yet, keep .bad file
},
Err(err) => {
// some other error, warn user and keep .bad file around too
worker.warn(format!(
"error during stat on '{:?}' - {}",
orig_filename,
err,
));
}
}
} else if stat.st_atime < min_atime {
//let age = now - stat.st_atime; //let age = now - stat.st_atime;
//println!("UNLINK {} {:?}", age/(3600*24), filename); //println!("UNLINK {} {:?}", age/(3600*24), filename);
let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) }; if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
if res != 0 {
let err = nix::Error::last();
bail!( bail!(
"unlink chunk {:?} failed on store '{}' - {}", "unlinking chunk {:?} failed on store '{}' - {}",
filename, filename,
self.name, self.name,
err, err,
@ -383,6 +401,7 @@ impl ChunkStore {
if let Ok(metadata) = std::fs::metadata(&chunk_path) { if let Ok(metadata) = std::fs::metadata(&chunk_path) {
if metadata.is_file() { if metadata.is_file() {
self.touch_chunk(digest)?;
return Ok((true, metadata.len())); return Ok((true, metadata.len()));
} else { } else {
bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str); bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);

View File

@ -10,7 +10,6 @@
use std::io::Write; use std::io::Write;
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use chrono::{Local, TimeZone, DateTime};
use openssl::hash::MessageDigest; use openssl::hash::MessageDigest;
use openssl::pkcs5::pbkdf2_hmac; use openssl::pkcs5::pbkdf2_hmac;
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode}; use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
@ -216,10 +215,10 @@ impl CryptConfig {
pub fn generate_rsa_encoded_key( pub fn generate_rsa_encoded_key(
&self, &self,
rsa: openssl::rsa::Rsa<openssl::pkey::Public>, rsa: openssl::rsa::Rsa<openssl::pkey::Public>,
created: DateTime<Local>, created: i64,
) -> Result<Vec<u8>, Error> { ) -> Result<Vec<u8>, Error> {
let modified = Local.timestamp(Local::now().timestamp(), 0); let modified = proxmox::tools::time::epoch_i64();
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() }; let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec(); let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();

View File

@ -36,6 +36,11 @@ impl DataBlob {
&self.raw_data &self.raw_data
} }
/// Returns raw_data size
pub fn raw_size(&self) -> u64 {
self.raw_data.len() as u64
}
/// Consume self and returns raw_data /// Consume self and returns raw_data
pub fn into_inner(self) -> Vec<u8> { pub fn into_inner(self) -> Vec<u8> {
self.raw_data self.raw_data
@ -66,7 +71,7 @@ impl DataBlob {
hasher.finalize() hasher.finalize()
} }
/// verify the CRC32 checksum // verify the CRC32 checksum
pub fn verify_crc(&self) -> Result<(), Error> { pub fn verify_crc(&self) -> Result<(), Error> {
let expected_crc = self.compute_crc(); let expected_crc = self.compute_crc();
if expected_crc != self.crc() { if expected_crc != self.crc() {
@ -180,16 +185,23 @@ impl DataBlob {
} }
/// Decode blob data /// Decode blob data
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> { pub fn decode(&self, config: Option<&CryptConfig>, digest: Option<&[u8; 32]>) -> Result<Vec<u8>, Error> {
let magic = self.magic(); let magic = self.magic();
if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 { if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
let data_start = std::mem::size_of::<DataBlobHeader>(); let data_start = std::mem::size_of::<DataBlobHeader>();
Ok(self.raw_data[data_start..].to_vec()) let data = self.raw_data[data_start..].to_vec();
if let Some(digest) = digest {
Self::verify_digest(&data, None, digest)?;
}
Ok(data)
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 { } else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
let data_start = std::mem::size_of::<DataBlobHeader>(); let data_start = std::mem::size_of::<DataBlobHeader>();
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?; let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
if let Some(digest) = digest {
Self::verify_digest(&data, None, digest)?;
}
Ok(data) Ok(data)
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 { } else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
let header_len = std::mem::size_of::<EncryptedDataBlobHeader>(); let header_len = std::mem::size_of::<EncryptedDataBlobHeader>();
@ -203,6 +215,9 @@ impl DataBlob {
} else { } else {
config.decode_uncompressed_chunk(&self.raw_data[header_len..], &head.iv, &head.tag)? config.decode_uncompressed_chunk(&self.raw_data[header_len..], &head.iv, &head.tag)?
}; };
if let Some(digest) = digest {
Self::verify_digest(&data, Some(config), digest)?;
}
Ok(data) Ok(data)
} else { } else {
bail!("unable to decrypt blob - missing CryptConfig"); bail!("unable to decrypt blob - missing CryptConfig");
@ -212,13 +227,17 @@ impl DataBlob {
} }
} }
/// Load blob from ``reader`` /// Load blob from ``reader``, verify CRC
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> { pub fn load_from_reader(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
let mut data = Vec::with_capacity(1024*1024); let mut data = Vec::with_capacity(1024*1024);
reader.read_to_end(&mut data)?; reader.read_to_end(&mut data)?;
Self::from_raw(data) let blob = Self::from_raw(data)?;
blob.verify_crc()?;
Ok(blob)
} }
/// Create Instance from raw data /// Create Instance from raw data
@ -254,7 +273,7 @@ impl DataBlob {
/// To do that, we need to decompress data first. Please note that /// To do that, we need to decompress data first. Please note that
/// this is not possible for encrypted chunks. This function simply return Ok /// this is not possible for encrypted chunks. This function simply return Ok
/// for encrypted chunks. /// for encrypted chunks.
/// Note: This does not call verify_crc /// Note: This does not call verify_crc, because this is usually done in load
pub fn verify_unencrypted( pub fn verify_unencrypted(
&self, &self,
expected_chunk_size: usize, expected_chunk_size: usize,
@ -267,12 +286,26 @@ impl DataBlob {
return Ok(()); return Ok(());
} }
let data = self.decode(None)?; // verifies digest!
let data = self.decode(None, Some(expected_digest))?;
if expected_chunk_size != data.len() { if expected_chunk_size != data.len() {
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len()); bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
} }
let digest = openssl::sha::sha256(&data);
Ok(())
}
fn verify_digest(
data: &[u8],
config: Option<&CryptConfig>,
expected_digest: &[u8; 32],
) -> Result<(), Error> {
let digest = match config {
Some(config) => config.compute_digest(data),
None => openssl::sha::sha256(data),
};
if &digest != expected_digest { if &digest != expected_digest {
bail!("detected chunk with wrong digest."); bail!("detected chunk with wrong digest.");
} }

View File

@ -6,7 +6,9 @@ use std::convert::TryFrom;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use chrono::{DateTime, Utc}; use serde_json::Value;
use proxmox::tools::fs::{replace_file, CreateOptions};
use super::backup_info::{BackupGroup, BackupDir}; use super::backup_info::{BackupGroup, BackupDir};
use super::chunk_store::ChunkStore; use super::chunk_store::ChunkStore;
@ -15,11 +17,12 @@ use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest}; use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
use super::index::*; use super::index::*;
use super::{DataBlob, ArchiveType, archive_type}; use super::{DataBlob, ArchiveType, archive_type};
use crate::backup::CryptMode;
use crate::config::datastore; use crate::config::datastore;
use crate::server::WorkerTask; use crate::server::WorkerTask;
use crate::tools; use crate::tools;
use crate::api2::types::GarbageCollectionStatus; use crate::tools::format::HumanByte;
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
use crate::api2::types::{GarbageCollectionStatus, Userid};
lazy_static! { lazy_static! {
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new()); static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
@ -67,6 +70,10 @@ impl DataStore {
let path = store_config["path"].as_str().unwrap(); let path = store_config["path"].as_str().unwrap();
Self::open_with_path(store_name, Path::new(path))
}
pub fn open_with_path(store_name: &str, path: &Path) -> Result<Self, Error> {
let chunk_store = ChunkStore::open(store_name, path)?; let chunk_store = ChunkStore::open(store_name, path)?;
let gc_status = GarbageCollectionStatus::default(); let gc_status = GarbageCollectionStatus::default();
@ -81,7 +88,7 @@ impl DataStore {
pub fn get_chunk_iterator( pub fn get_chunk_iterator(
&self, &self,
) -> Result< ) -> Result<
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)>, impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)>,
Error Error
> { > {
self.chunk_store.get_chunk_iterator() self.chunk_store.get_chunk_iterator()
@ -197,6 +204,8 @@ impl DataStore {
let full_path = self.group_path(backup_group); let full_path = self.group_path(backup_group);
let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
log::info!("removing backup group {:?}", full_path); log::info!("removing backup group {:?}", full_path);
std::fs::remove_dir_all(&full_path) std::fs::remove_dir_all(&full_path)
.map_err(|err| { .map_err(|err| {
@ -211,10 +220,15 @@ impl DataStore {
} }
/// Remove a backup directory including all content /// Remove a backup directory including all content
pub fn remove_backup_dir(&self, backup_dir: &BackupDir) -> Result<(), Error> { pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
let full_path = self.snapshot_path(backup_dir); let full_path = self.snapshot_path(backup_dir);
let _guard;
if !force {
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?;
}
log::info!("removing backup snapshot {:?}", full_path); log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path) std::fs::remove_dir_all(&full_path)
.map_err(|err| { .map_err(|err| {
@ -231,7 +245,7 @@ impl DataStore {
/// Returns the time of the last successful backup /// Returns the time of the last successful backup
/// ///
/// Or None if there is no backup in the group (or the group dir does not exist). /// Or None if there is no backup in the group (or the group dir does not exist).
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<DateTime<Utc>>, Error> { pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
let base_path = self.base_path(); let base_path = self.base_path();
let mut group_path = base_path.clone(); let mut group_path = base_path.clone();
group_path.push(backup_group.group_path()); group_path.push(backup_group.group_path());
@ -246,16 +260,21 @@ impl DataStore {
/// Returns the backup owner. /// Returns the backup owner.
/// ///
/// The backup owner is the user who first created the backup group. /// The backup owner is the user who first created the backup group.
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> { pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
let mut full_path = self.base_path(); let mut full_path = self.base_path();
full_path.push(backup_group.group_path()); full_path.push(backup_group.group_path());
full_path.push("owner"); full_path.push("owner");
let owner = proxmox::tools::fs::file_read_firstline(full_path)?; let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
Ok(owner.trim_end().to_string()) // remove trailing newline Ok(owner.trim_end().parse()?) // remove trailing newline
} }
/// Set the backup owner. /// Set the backup owner.
pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> { pub fn set_owner(
&self,
backup_group: &BackupGroup,
userid: &Userid,
force: bool,
) -> Result<(), Error> {
let mut path = self.base_path(); let mut path = self.base_path();
path.push(backup_group.group_path()); path.push(backup_group.group_path());
path.push("owner"); path.push("owner");
@ -279,12 +298,17 @@ impl DataStore {
Ok(()) Ok(())
} }
/// Create a backup group if it does not already exists. /// Create (if it does not already exists) and lock a backup group
/// ///
/// And set the owner to 'userid'. If the group already exists, it returns the /// And set the owner to 'userid'. If the group already exists, it returns the
/// current owner (instead of setting the owner). /// current owner (instead of setting the owner).
pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> { ///
/// This also acquires an exclusive lock on the directory and returns the lock guard.
pub fn create_locked_backup_group(
&self,
backup_group: &BackupGroup,
userid: &Userid,
) -> Result<(Userid, DirLockGuard), Error> {
// create intermediate path first: // create intermediate path first:
let base_path = self.base_path(); let base_path = self.base_path();
@ -297,13 +321,15 @@ impl DataStore {
// create the last component now // create the last component now
match std::fs::create_dir(&full_path) { match std::fs::create_dir(&full_path) {
Ok(_) => { Ok(_) => {
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
self.set_owner(backup_group, userid, false)?; self.set_owner(backup_group, userid, false)?;
let owner = self.get_owner(backup_group)?; // just to be sure let owner = self.get_owner(backup_group)?; // just to be sure
Ok(owner) Ok((owner, guard))
} }
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => { Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
let owner = self.get_owner(backup_group)?; // just to be sure let owner = self.get_owner(backup_group)?; // just to be sure
Ok(owner) Ok((owner, guard))
} }
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err), Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
} }
@ -312,15 +338,20 @@ impl DataStore {
/// Creates a new backup snapshot inside a BackupGroup /// Creates a new backup snapshot inside a BackupGroup
/// ///
/// The BackupGroup directory needs to exist. /// The BackupGroup directory needs to exist.
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> { pub fn create_locked_backup_dir(&self, backup_dir: &BackupDir)
-> Result<(PathBuf, bool, DirLockGuard), Error>
{
let relative_path = backup_dir.relative_path(); let relative_path = backup_dir.relative_path();
let mut full_path = self.base_path(); let mut full_path = self.base_path();
full_path.push(&relative_path); full_path.push(&relative_path);
let lock = ||
lock_dir_noblock(&full_path, "snapshot", "internal error - tried creating snapshot that's already in use");
match std::fs::create_dir(&full_path) { match std::fs::create_dir(&full_path) {
Ok(_) => Ok((relative_path, true)), Ok(_) => Ok((relative_path, true, lock()?)),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)), Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false, lock()?)),
Err(e) => Err(e) Err(e) => Err(e.into())
} }
} }
@ -391,8 +422,8 @@ impl DataStore {
tools::fail_on_shutdown()?; tools::fail_on_shutdown()?;
let digest = index.index_digest(pos).unwrap(); let digest = index.index_digest(pos).unwrap();
if let Err(err) = self.chunk_store.touch_chunk(digest) { if let Err(err) = self.chunk_store.touch_chunk(digest) {
bail!("unable to access chunk {}, required by {:?} - {}", worker.warn(&format!("warning: unable to access chunk {}, required by {:?} - {}",
proxmox::tools::digest_to_hex(digest), file_name, err); proxmox::tools::digest_to_hex(digest), file_name, err));
} }
} }
Ok(()) Ok(())
@ -402,6 +433,12 @@ impl DataStore {
let image_list = self.list_images()?; let image_list = self.list_images()?;
let image_count = image_list.len();
let mut done = 0;
let mut last_percentage: usize = 0;
for path in image_list { for path in image_list {
worker.fail_on_abort()?; worker.fail_on_abort()?;
@ -416,6 +453,14 @@ impl DataStore {
self.index_mark_used_chunks(index, &path, status, worker)?; self.index_mark_used_chunks(index, &path, status, worker)?;
} }
} }
done += 1;
let percentage = done*100/image_count;
if percentage > last_percentage {
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
percentage, done, image_count));
last_percentage = percentage;
}
} }
Ok(()) Ok(())
@ -435,9 +480,8 @@ impl DataStore {
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?; let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
let now = unsafe { libc::time(std::ptr::null_mut()) }; let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) };
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
let mut gc_status = GarbageCollectionStatus::default(); let mut gc_status = GarbageCollectionStatus::default();
gc_status.upid = Some(worker.to_string()); gc_status.upid = Some(worker.to_string());
@ -447,26 +491,29 @@ impl DataStore {
self.mark_used_chunks(&mut gc_status, &worker)?; self.mark_used_chunks(&mut gc_status, &worker)?;
worker.log("Start GC phase2 (sweep unused chunks)"); worker.log("Start GC phase2 (sweep unused chunks)");
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?; self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?;
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes)); worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes)));
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks)); worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
if gc_status.pending_bytes > 0 { if gc_status.pending_bytes > 0 {
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks)); worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
}
if gc_status.removed_bad > 0 {
worker.log(&format!("Removed bad files: {}", gc_status.removed_bad));
} }
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes)); worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
if gc_status.index_data_bytes > 0 { if gc_status.index_data_bytes > 0 {
let comp_per = (gc_status.disk_bytes*100)/gc_status.index_data_bytes; let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
worker.log(&format!("Disk bytes: {} ({} %)", gc_status.disk_bytes, comp_per)); worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per));
} }
worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks)); worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks));
if gc_status.disk_chunks > 0 { if gc_status.disk_chunks > 0 {
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64); let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
worker.log(&format!("Average chunk size: {}", avg_chunk)); worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk)));
} }
*self.last_gc_status.lock().unwrap() = gc_status; *self.last_gc_status.lock().unwrap() = gc_status;
@ -498,31 +545,69 @@ impl DataStore {
self.chunk_store.insert_chunk(chunk, digest) self.chunk_store.insert_chunk(chunk, digest)
} }
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> { pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
let blob = self.chunk_store.read_chunk(digest)?;
blob.verify_crc()?;
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
Ok(())
}
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
let mut path = self.base_path(); let mut path = self.base_path();
path.push(backup_dir.relative_path()); path.push(backup_dir.relative_path());
path.push(filename); path.push(filename);
let raw_data = proxmox::tools::fs::file_get_contents(&path)?; proxmox::try_block!({
let raw_size = raw_data.len() as u64; let mut file = std::fs::File::open(&path)?;
let blob = DataBlob::from_raw(raw_data)?; DataBlob::load_from_reader(&mut file)
Ok((blob, raw_size)) }).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
} }
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
proxmox::try_block!({
let mut file = std::fs::File::open(&chunk_path)?;
DataBlob::load_from_reader(&mut file)
}).map_err(|err| format_err!(
"store '{}', unable to load chunk '{}' - {}",
self.name(),
digest_str,
err,
))
}
pub fn load_manifest( pub fn load_manifest(
&self, &self,
backup_dir: &BackupDir, backup_dir: &BackupDir,
) -> Result<(BackupManifest, CryptMode, u64), Error> { ) -> Result<(BackupManifest, u64), Error> {
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?; let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
let crypt_mode = blob.crypt_mode()?; let raw_size = blob.raw_size();
let manifest = BackupManifest::try_from(blob)?; let manifest = BackupManifest::try_from(blob)?;
Ok((manifest, crypt_mode, raw_size)) Ok((manifest, raw_size))
}
pub fn load_manifest_json(
&self,
backup_dir: &BackupDir,
) -> Result<Value, Error> {
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
// no expected digest available
let manifest_data = blob.decode(None, None)?;
let manifest: Value = serde_json::from_slice(&manifest_data[..])?;
Ok(manifest)
}
pub fn store_manifest(
&self,
backup_dir: &BackupDir,
manifest: Value,
) -> Result<(), Error> {
let manifest = serde_json::to_string_pretty(&manifest)?;
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
let raw_data = blob.raw_data();
let mut path = self.base_path();
path.push(backup_dir.relative_path());
path.push(MANIFEST_BLOB_NAME);
replace_file(&path, raw_data, CreateOptions::new())?;
Ok(())
} }
} }

View File

@ -11,7 +11,6 @@ use anyhow::{bail, format_err, Error};
use proxmox::tools::io::ReadExt; use proxmox::tools::io::ReadExt;
use proxmox::tools::uuid::Uuid; use proxmox::tools::uuid::Uuid;
use proxmox::tools::vec;
use proxmox::tools::mmap::Mmap; use proxmox::tools::mmap::Mmap;
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
@ -22,14 +21,14 @@ use super::read_chunk::ReadChunk;
use super::Chunker; use super::Chunker;
use super::IndexFile; use super::IndexFile;
use super::{DataBlob, DataChunkBuilder}; use super::{DataBlob, DataChunkBuilder};
use crate::tools::{self, epoch_now_u64}; use crate::tools;
/// Header format definition for dynamic index files (`.dixd`) /// Header format definition for dynamic index files (`.dixd`)
#[repr(C)] #[repr(C)]
pub struct DynamicIndexHeader { pub struct DynamicIndexHeader {
pub magic: [u8; 8], pub magic: [u8; 8],
pub uuid: [u8; 16], pub uuid: [u8; 16],
pub ctime: u64, pub ctime: i64,
/// Sha256 over the index ``SHA256(offset1||digest1||offset2||digest2||...)`` /// Sha256 over the index ``SHA256(offset1||digest1||offset2||digest2||...)``
pub index_csum: [u8; 32], pub index_csum: [u8; 32],
reserved: [u8; 4032], // overall size is one page (4096 bytes) reserved: [u8; 4032], // overall size is one page (4096 bytes)
@ -41,6 +40,24 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
// pub data: DynamicIndexHeaderData, // pub data: DynamicIndexHeaderData,
// } // }
impl DynamicIndexHeader {
/// Convenience method to allocate a zero-initialized header struct.
pub fn zeroed() -> Box<Self> {
unsafe {
Box::from_raw(std::alloc::alloc_zeroed(std::alloc::Layout::new::<Self>()) as *mut Self)
}
}
pub fn as_bytes(&self) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
self as *const Self as *const u8,
std::mem::size_of::<Self>(),
)
}
}
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
#[repr(C)] #[repr(C)]
pub struct DynamicEntry { pub struct DynamicEntry {
@ -60,7 +77,7 @@ pub struct DynamicIndexReader {
pub size: usize, pub size: usize,
index: Mmap<DynamicEntry>, index: Mmap<DynamicEntry>,
pub uuid: [u8; 16], pub uuid: [u8; 16],
pub ctime: u64, pub ctime: i64,
pub index_csum: [u8; 32], pub index_csum: [u8; 32],
} }
@ -90,7 +107,7 @@ impl DynamicIndexReader {
bail!("got unknown magic number"); bail!("got unknown magic number");
} }
let ctime = u64::from_le(header.ctime); let ctime = proxmox::tools::time::epoch_i64();
let rawfd = file.as_raw_fd(); let rawfd = file.as_raw_fd();
@ -463,7 +480,7 @@ pub struct DynamicIndexWriter {
tmp_filename: PathBuf, tmp_filename: PathBuf,
csum: Option<openssl::sha::Sha256>, csum: Option<openssl::sha::Sha256>,
pub uuid: [u8; 16], pub uuid: [u8; 16],
pub ctime: u64, pub ctime: i64,
} }
impl Drop for DynamicIndexWriter { impl Drop for DynamicIndexWriter {
@ -489,27 +506,16 @@ impl DynamicIndexWriter {
let mut writer = BufWriter::with_capacity(1024 * 1024, file); let mut writer = BufWriter::with_capacity(1024 * 1024, file);
let header_size = std::mem::size_of::<DynamicIndexHeader>(); let ctime = proxmox::tools::time::epoch_i64();
// todo: use static assertion when available in rust
if header_size != 4096 {
panic!("got unexpected header size");
}
let ctime = epoch_now_u64()?;
let uuid = Uuid::generate(); let uuid = Uuid::generate();
let mut buffer = vec::zeroed(header_size); let mut header = DynamicIndexHeader::zeroed();
let header = crate::tools::map_struct_mut::<DynamicIndexHeader>(&mut buffer)?;
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0; header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
header.ctime = u64::to_le(ctime); header.ctime = i64::to_le(ctime);
header.uuid = *uuid.as_bytes(); header.uuid = *uuid.as_bytes();
// header.index_csum = [0u8; 32];
header.index_csum = [0u8; 32]; writer.write_all(header.as_bytes())?;
writer.write_all(&buffer)?;
let csum = Some(openssl::sha::Sha256::new()); let csum = Some(openssl::sha::Sha256::new());

View File

@ -4,9 +4,8 @@ use std::io::{Seek, SeekFrom};
use super::chunk_stat::*; use super::chunk_stat::*;
use super::chunk_store::*; use super::chunk_store::*;
use super::{IndexFile, ChunkReadInfo}; use super::{IndexFile, ChunkReadInfo};
use crate::tools::{self, epoch_now_u64}; use crate::tools;
use chrono::{Local, TimeZone};
use std::fs::File; use std::fs::File;
use std::io::Write; use std::io::Write;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
@ -23,7 +22,7 @@ use proxmox::tools::Uuid;
pub struct FixedIndexHeader { pub struct FixedIndexHeader {
pub magic: [u8; 8], pub magic: [u8; 8],
pub uuid: [u8; 16], pub uuid: [u8; 16],
pub ctime: u64, pub ctime: i64,
/// Sha256 over the index ``SHA256(digest1||digest2||...)`` /// Sha256 over the index ``SHA256(digest1||digest2||...)``
pub index_csum: [u8; 32], pub index_csum: [u8; 32],
pub size: u64, pub size: u64,
@ -41,7 +40,7 @@ pub struct FixedIndexReader {
index_length: usize, index_length: usize,
index: *mut u8, index: *mut u8,
pub uuid: [u8; 16], pub uuid: [u8; 16],
pub ctime: u64, pub ctime: i64,
pub index_csum: [u8; 32], pub index_csum: [u8; 32],
} }
@ -82,7 +81,7 @@ impl FixedIndexReader {
} }
let size = u64::from_le(header.size); let size = u64::from_le(header.size);
let ctime = u64::from_le(header.ctime); let ctime = i64::from_le(header.ctime);
let chunk_size = u64::from_le(header.chunk_size); let chunk_size = u64::from_le(header.chunk_size);
let index_length = ((size + chunk_size - 1) / chunk_size) as usize; let index_length = ((size + chunk_size - 1) / chunk_size) as usize;
@ -148,10 +147,13 @@ impl FixedIndexReader {
pub fn print_info(&self) { pub fn print_info(&self) {
println!("Size: {}", self.size); println!("Size: {}", self.size);
println!("ChunkSize: {}", self.chunk_size); println!("ChunkSize: {}", self.chunk_size);
println!(
"CTime: {}", let mut ctime_str = self.ctime.to_string();
Local.timestamp(self.ctime as i64, 0).format("%c") if let Ok(s) = proxmox::tools::time::strftime_local("%c",self.ctime) {
); ctime_str = s;
}
println!("CTime: {}", ctime_str);
println!("UUID: {:?}", self.uuid); println!("UUID: {:?}", self.uuid);
} }
} }
@ -228,7 +230,7 @@ pub struct FixedIndexWriter {
index_length: usize, index_length: usize,
index: *mut u8, index: *mut u8,
pub uuid: [u8; 16], pub uuid: [u8; 16],
pub ctime: u64, pub ctime: i64,
} }
// `index` is mmap()ed which cannot be thread-local so should be sendable // `index` is mmap()ed which cannot be thread-local so should be sendable
@ -271,7 +273,7 @@ impl FixedIndexWriter {
panic!("got unexpected header size"); panic!("got unexpected header size");
} }
let ctime = epoch_now_u64()?; let ctime = proxmox::tools::time::epoch_i64();
let uuid = Uuid::generate(); let uuid = Uuid::generate();
@ -279,7 +281,7 @@ impl FixedIndexWriter {
let header = unsafe { &mut *(buffer.as_ptr() as *mut FixedIndexHeader) }; let header = unsafe { &mut *(buffer.as_ptr() as *mut FixedIndexHeader) };
header.magic = super::FIXED_SIZED_CHUNK_INDEX_1_0; header.magic = super::FIXED_SIZED_CHUNK_INDEX_1_0;
header.ctime = u64::to_le(ctime); header.ctime = i64::to_le(ctime);
header.size = u64::to_le(size as u64); header.size = u64::to_le(size as u64);
header.chunk_size = u64::to_le(chunk_size as u64); header.chunk_size = u64::to_le(chunk_size as u64);
header.uuid = *uuid.as_bytes(); header.uuid = *uuid.as_bytes();

View File

@ -1,7 +1,6 @@
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Context, Error};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use chrono::{Local, TimeZone, DateTime};
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions}; use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox::try_block; use proxmox::try_block;
@ -61,10 +60,10 @@ impl KeyDerivationConfig {
#[derive(Deserialize, Serialize, Debug)] #[derive(Deserialize, Serialize, Debug)]
pub struct KeyConfig { pub struct KeyConfig {
pub kdf: Option<KeyDerivationConfig>, pub kdf: Option<KeyDerivationConfig>,
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")] #[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
pub created: DateTime<Local>, pub created: i64,
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")] #[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
pub modified: DateTime<Local>, pub modified: i64,
#[serde(with = "proxmox::tools::serde::bytes_as_base64")] #[serde(with = "proxmox::tools::serde::bytes_as_base64")]
pub data: Vec<u8>, pub data: Vec<u8>,
} }
@ -136,7 +135,7 @@ pub fn encrypt_key_with_passphrase(
enc_data.extend_from_slice(&tag); enc_data.extend_from_slice(&tag);
enc_data.extend_from_slice(&encrypted_key); enc_data.extend_from_slice(&encrypted_key);
let created = Local.timestamp(Local::now().timestamp(), 0); let created = proxmox::tools::time::epoch_i64();
Ok(KeyConfig { Ok(KeyConfig {
kdf: Some(kdf), kdf: Some(kdf),
@ -149,7 +148,7 @@ pub fn encrypt_key_with_passphrase(
pub fn load_and_decrypt_key( pub fn load_and_decrypt_key(
path: &std::path::Path, path: &std::path::Path,
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
) -> Result<([u8;32], DateTime<Local>), Error> { ) -> Result<([u8;32], i64), Error> {
do_load_and_decrypt_key(path, passphrase) do_load_and_decrypt_key(path, passphrase)
.with_context(|| format!("failed to load decryption key from {:?}", path)) .with_context(|| format!("failed to load decryption key from {:?}", path))
} }
@ -157,14 +156,14 @@ pub fn load_and_decrypt_key(
fn do_load_and_decrypt_key( fn do_load_and_decrypt_key(
path: &std::path::Path, path: &std::path::Path,
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
) -> Result<([u8;32], DateTime<Local>), Error> { ) -> Result<([u8;32], i64), Error> {
decrypt_key(&file_get_contents(&path)?, passphrase) decrypt_key(&file_get_contents(&path)?, passphrase)
} }
pub fn decrypt_key( pub fn decrypt_key(
mut keydata: &[u8], mut keydata: &[u8],
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
) -> Result<([u8;32], DateTime<Local>), Error> { ) -> Result<([u8;32], i64), Error> {
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?; let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
let raw_data = key_config.data; let raw_data = key_config.data;

View File

@ -49,6 +49,20 @@ pub struct FileInfo {
pub csum: [u8; 32], pub csum: [u8; 32],
} }
impl FileInfo {
/// Return expected CryptMode of referenced chunks
///
/// Encrypted Indices should only reference encrypted chunks, while signed or plain indices
/// should only reference plain chunks.
pub fn chunk_crypt_mode (&self) -> CryptMode {
match self.crypt_mode {
CryptMode::Encrypt => CryptMode::Encrypt,
CryptMode::SignOnly | CryptMode::None => CryptMode::None,
}
}
}
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all="kebab-case")]
pub struct BackupManifest { pub struct BackupManifest {
@ -58,6 +72,7 @@ pub struct BackupManifest {
files: Vec<FileInfo>, files: Vec<FileInfo>,
#[serde(default="empty_value")] // to be compatible with < 0.8.0 backups #[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
pub unprotected: Value, pub unprotected: Value,
pub signature: Option<String>,
} }
#[derive(PartialEq)] #[derive(PartialEq)]
@ -88,9 +103,10 @@ impl BackupManifest {
Self { Self {
backup_type: snapshot.group().backup_type().into(), backup_type: snapshot.group().backup_type().into(),
backup_id: snapshot.group().backup_id().into(), backup_id: snapshot.group().backup_id().into(),
backup_time: snapshot.backup_time().timestamp(), backup_time: snapshot.backup_time(),
files: Vec::new(), files: Vec::new(),
unprotected: json!({}), unprotected: json!({}),
signature: None,
} }
} }
@ -129,7 +145,7 @@ impl BackupManifest {
Ok(()) Ok(())
} }
// Generate cannonical json // Generate canonical json
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> { fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
let mut data = Vec::new(); let mut data = Vec::new();
Self::write_canonical_json(value, &mut data)?; Self::write_canonical_json(value, &mut data)?;
@ -160,12 +176,12 @@ impl BackupManifest {
keys.sort(); keys.sort();
let mut iter = keys.into_iter(); let mut iter = keys.into_iter();
if let Some(key) = iter.next() { if let Some(key) = iter.next() {
Self::write_canonical_json(&key.into(), output)?; serde_json::to_writer(&mut *output, &key)?;
output.push(b':'); output.push(b':');
Self::write_canonical_json(&map[key], output)?; Self::write_canonical_json(&map[key], output)?;
for key in iter { for key in iter {
output.push(b','); output.push(b',');
Self::write_canonical_json(&key.into(), output)?; serde_json::to_writer(&mut *output, &key)?;
output.push(b':'); output.push(b':');
Self::write_canonical_json(&map[key], output)?; Self::write_canonical_json(&map[key], output)?;
} }
@ -238,7 +254,8 @@ impl TryFrom<super::DataBlob> for BackupManifest {
type Error = Error; type Error = Error;
fn try_from(blob: super::DataBlob) -> Result<Self, Error> { fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
let data = blob.decode(None) // no expected digest available
let data = blob.decode(None, None)
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?; .map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
let json: Value = serde_json::from_slice(&data[..]) let json: Value = serde_json::from_slice(&data[..])
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?; .map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;

View File

@ -2,18 +2,16 @@ use anyhow::{Error};
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::path::PathBuf; use std::path::PathBuf;
use chrono::{DateTime, Timelike, Datelike, Local}; use super::BackupInfo;
use super::{BackupDir, BackupInfo};
enum PruneMark { Keep, KeepPartial, Remove } enum PruneMark { Keep, KeepPartial, Remove }
fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> ( fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
mark: &mut HashMap<PathBuf, PruneMark>, mark: &mut HashMap<PathBuf, PruneMark>,
list: &Vec<BackupInfo>, list: &Vec<BackupInfo>,
keep: usize, keep: usize,
select_id: F, select_id: F,
) { ) -> Result<(), Error> {
let mut include_hash = HashSet::new(); let mut include_hash = HashSet::new();
@ -21,8 +19,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
for info in list { for info in list {
let backup_id = info.backup_dir.relative_path(); let backup_id = info.backup_dir.relative_path();
if let Some(PruneMark::Keep) = mark.get(&backup_id) { if let Some(PruneMark::Keep) = mark.get(&backup_id) {
let local_time = info.backup_dir.backup_time().with_timezone(&Local); let sel_id: String = select_id(&info)?;
let sel_id: String = select_id(local_time, &info);
already_included.insert(sel_id); already_included.insert(sel_id);
} }
} }
@ -30,8 +27,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
for info in list { for info in list {
let backup_id = info.backup_dir.relative_path(); let backup_id = info.backup_dir.relative_path();
if let Some(_) = mark.get(&backup_id) { continue; } if let Some(_) = mark.get(&backup_id) { continue; }
let local_time = info.backup_dir.backup_time().with_timezone(&Local); let sel_id: String = select_id(&info)?;
let sel_id: String = select_id(local_time, &info);
if already_included.contains(&sel_id) { continue; } if already_included.contains(&sel_id) { continue; }
@ -43,6 +39,8 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
mark.insert(backup_id, PruneMark::Remove); mark.insert(backup_id, PruneMark::Remove);
} }
} }
Ok(())
} }
fn remove_incomplete_snapshots( fn remove_incomplete_snapshots(
@ -53,7 +51,7 @@ fn remove_incomplete_snapshots(
let mut keep_unfinished = true; let mut keep_unfinished = true;
for info in list.iter() { for info in list.iter() {
// backup is considered unfinished if there is no manifest // backup is considered unfinished if there is no manifest
if info.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME) { if info.is_finished() {
// There is a new finished backup, so there is no need // There is a new finished backup, so there is no need
// to keep older unfinished backups. // to keep older unfinished backups.
keep_unfinished = false; keep_unfinished = false;
@ -182,44 +180,43 @@ pub fn compute_prune_info(
remove_incomplete_snapshots(&mut mark, &list); remove_incomplete_snapshots(&mut mark, &list);
if let Some(keep_last) = options.keep_last { if let Some(keep_last) = options.keep_last {
mark_selections(&mut mark, &list, keep_last as usize, |_local_time, info| { mark_selections(&mut mark, &list, keep_last as usize, |info| {
BackupDir::backup_time_to_string(info.backup_dir.backup_time()) Ok(info.backup_dir.backup_time_string().to_owned())
}); })?;
} }
use proxmox::tools::time::strftime_local;
if let Some(keep_hourly) = options.keep_hourly { if let Some(keep_hourly) = options.keep_hourly {
mark_selections(&mut mark, &list, keep_hourly as usize, |local_time, _info| { mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
format!("{}/{}/{}/{}", local_time.year(), local_time.month(), strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time())
local_time.day(), local_time.hour()) })?;
});
} }
if let Some(keep_daily) = options.keep_daily { if let Some(keep_daily) = options.keep_daily {
mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| { mark_selections(&mut mark, &list, keep_daily as usize, |info| {
format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day()) strftime_local("%Y/%m/%d", info.backup_dir.backup_time())
}); })?;
} }
if let Some(keep_weekly) = options.keep_weekly { if let Some(keep_weekly) = options.keep_weekly {
mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| { mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
let iso_week = local_time.iso_week(); // Note: Use iso-week year/week here. This year number
let week = iso_week.week(); // might not match the calendar year number.
// Note: This year number might not match the calendar year number. strftime_local("%G/%V", info.backup_dir.backup_time())
let iso_week_year = iso_week.year(); })?;
format!("{}/{}", iso_week_year, week)
});
} }
if let Some(keep_monthly) = options.keep_monthly { if let Some(keep_monthly) = options.keep_monthly {
mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| { mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
format!("{}/{}", local_time.year(), local_time.month()) strftime_local("%Y/%m", info.backup_dir.backup_time())
}); })?;
} }
if let Some(keep_yearly) = options.keep_yearly { if let Some(keep_yearly) = options.keep_yearly {
mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| { mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
format!("{}/{}", local_time.year(), local_time.year()) strftime_local("%Y", info.backup_dir.backup_time())
}); })?;
} }
let prune_info: Vec<(BackupInfo, bool)> = list.into_iter() let prune_info: Vec<(BackupInfo, bool)> = list.into_iter()

View File

@ -2,9 +2,9 @@ use std::future::Future;
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use anyhow::Error; use anyhow::{bail, Error};
use super::crypt_config::CryptConfig; use super::crypt_config::{CryptConfig, CryptMode};
use super::data_blob::DataBlob; use super::data_blob::DataBlob;
use super::datastore::DataStore; use super::datastore::DataStore;
@ -21,33 +21,47 @@ pub trait ReadChunk {
pub struct LocalChunkReader { pub struct LocalChunkReader {
store: Arc<DataStore>, store: Arc<DataStore>,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
crypt_mode: CryptMode,
} }
impl LocalChunkReader { impl LocalChunkReader {
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self { pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>, crypt_mode: CryptMode) -> Self {
Self { Self {
store, store,
crypt_config, crypt_config,
crypt_mode,
}
}
fn ensure_crypt_mode(&self, chunk_mode: CryptMode) -> Result<(), Error> {
match self.crypt_mode {
CryptMode::Encrypt => {
match chunk_mode {
CryptMode::Encrypt => Ok(()),
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
}
},
CryptMode::SignOnly | CryptMode::None => {
match chunk_mode {
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
CryptMode::SignOnly | CryptMode::None => Ok(()),
}
},
} }
} }
} }
impl ReadChunk for LocalChunkReader { impl ReadChunk for LocalChunkReader {
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> { fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let (path, _) = self.store.chunk_path(digest); let chunk = self.store.load_chunk(digest)?;
let raw_data = proxmox::tools::fs::file_get_contents(&path)?; self.ensure_crypt_mode(chunk.crypt_mode()?)?;
let chunk = DataBlob::from_raw(raw_data)?;
chunk.verify_crc()?;
Ok(chunk) Ok(chunk)
} }
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> { fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
let chunk = ReadChunk::read_raw_chunk(self, digest)?; let chunk = ReadChunk::read_raw_chunk(self, digest)?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?; let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
// fixme: verify digest?
Ok(raw_data) Ok(raw_data)
} }
@ -76,8 +90,9 @@ impl AsyncReadChunk for LocalChunkReader {
let (path, _) = self.store.chunk_path(digest); let (path, _) = self.store.chunk_path(digest);
let raw_data = tokio::fs::read(&path).await?; let raw_data = tokio::fs::read(&path).await?;
let chunk = DataBlob::from_raw(raw_data)?;
chunk.verify_crc()?; let chunk = DataBlob::load_from_reader(&mut &raw_data[..])?;
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
Ok(chunk) Ok(chunk)
}) })
@ -90,7 +105,7 @@ impl AsyncReadChunk for LocalChunkReader {
Box::pin(async move { Box::pin(async move {
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?; let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?; let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
// fixme: verify digest? // fixme: verify digest?

View File

@ -1,58 +1,215 @@
use anyhow::{bail, Error}; use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{Ordering, AtomicUsize};
use std::time::Instant;
use anyhow::{bail, format_err, Error};
use crate::server::WorkerTask; use crate::server::WorkerTask;
use crate::api2::types::*;
use super::{ use super::{
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile, DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, CryptMode,
FileInfo, ArchiveType, archive_type, FileInfo, ArchiveType, archive_type,
}; };
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?; let blob = datastore.load_blob(backup_dir, &info.filename)?;
let csum = openssl::sha::sha256(blob.raw_data()); let raw_size = blob.raw_size();
if raw_size != info.size { if raw_size != info.size {
bail!("wrong size ({} != {})", info.size, raw_size); bail!("wrong size ({} != {})", info.size, raw_size);
} }
let csum = openssl::sha::sha256(blob.raw_data());
if csum != info.csum { if csum != info.csum {
bail!("wrong index checksum"); bail!("wrong index checksum");
} }
blob.verify_crc()?; match blob.crypt_mode()? {
CryptMode::Encrypt => Ok(()),
CryptMode::None => {
// digest already verified above
blob.decode(None, None)?;
Ok(())
},
CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
}
}
let magic = blob.magic(); fn rename_corrupted_chunk(
datastore: Arc<DataStore>,
digest: &[u8;32],
worker: Arc<WorkerTask>,
) {
let (path, digest_str) = datastore.chunk_path(digest);
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 { let mut counter = 0;
return Ok(()); let mut new_path = path.clone();
loop {
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
if new_path.exists() && counter < 9 { counter += 1; } else { break; }
} }
blob.decode(None)?; match std::fs::rename(&path, &new_path) {
Ok(_) => {
worker.log(format!("corrupted chunk renamed to {:?}", &new_path));
},
Err(err) => {
match err.kind() {
std::io::ErrorKind::NotFound => { /* ignored */ },
_ => worker.log(format!("could not rename corrupted chunk {:?} - {}", &path, err))
}
}
};
}
Ok(()) // We use a separate thread to read/load chunks, so that we can do
// load and verify in parallel to increase performance.
fn chunk_reader_thread(
datastore: Arc<DataStore>,
index: Box<dyn IndexFile + Send>,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
errors: Arc<AtomicUsize>,
worker: Arc<WorkerTask>,
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
std::thread::spawn(move|| {
for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap();
let size = info.range.end - info.range.start;
if verified_chunks.lock().unwrap().contains(&info.digest) {
continue; // already verified
}
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
worker.log(format!("chunk {} was marked as corrupt", digest_str));
errors.fetch_add(1, Ordering::SeqCst);
continue;
}
match datastore.load_chunk(&info.digest) {
Err(err) => {
corrupt_chunks.lock().unwrap().insert(info.digest);
worker.log(format!("can't verify chunk, load failed - {}", err));
errors.fetch_add(1, Ordering::SeqCst);
rename_corrupted_chunk(datastore.clone(), &info.digest, worker.clone());
continue;
}
Ok(chunk) => {
if sender.send((chunk, info.digest, size)).is_err() {
break; // receiver gone - simply stop
}
}
}
}
});
receiver
} }
fn verify_index_chunks( fn verify_index_chunks(
datastore: &DataStore, datastore: Arc<DataStore>,
index: Box<dyn IndexFile>, index: Box<dyn IndexFile + Send>,
worker: &WorkerTask, verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
crypt_mode: CryptMode,
worker: Arc<WorkerTask>,
) -> Result<(), Error> { ) -> Result<(), Error> {
for pos in 0..index.index_count() { let errors = Arc::new(AtomicUsize::new(0));
let start_time = Instant::now();
let chunk_channel = chunk_reader_thread(
datastore.clone(),
index,
verified_chunks.clone(),
corrupt_chunks.clone(),
errors.clone(),
worker.clone(),
);
let mut read_bytes = 0;
let mut decoded_bytes = 0;
loop {
worker.fail_on_abort()?; worker.fail_on_abort()?;
crate::tools::fail_on_shutdown()?;
let info = index.chunk_info(pos).unwrap(); let (chunk, digest, size) = match chunk_channel.recv() {
let size = info.range.end - info.range.start; Ok(tuple) => tuple,
datastore.verify_stored_chunk(&info.digest, size)?; Err(std::sync::mpsc::RecvError) => break,
};
read_bytes += chunk.raw_size();
decoded_bytes += size;
let chunk_crypt_mode = match chunk.crypt_mode() {
Err(err) => {
corrupt_chunks.lock().unwrap().insert(digest);
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
errors.fetch_add(1, Ordering::SeqCst);
continue;
},
Ok(mode) => mode,
};
if chunk_crypt_mode != crypt_mode {
worker.log(format!(
"chunk CryptMode {:?} does not match index CryptMode {:?}",
chunk_crypt_mode,
crypt_mode
));
errors.fetch_add(1, Ordering::SeqCst);
}
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
corrupt_chunks.lock().unwrap().insert(digest);
worker.log(format!("{}", err));
errors.fetch_add(1, Ordering::SeqCst);
rename_corrupted_chunk(datastore.clone(), &digest, worker.clone());
} else {
verified_chunks.lock().unwrap().insert(digest);
}
}
let elapsed = start_time.elapsed().as_secs_f64();
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
let read_speed = read_bytes_mib/elapsed;
let decode_speed = decoded_bytes_mib/elapsed;
let error_count = errors.load(Ordering::SeqCst);
worker.log(format!(" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
if errors.load(Ordering::SeqCst) > 0 {
bail!("chunks could not be verified");
} }
Ok(()) Ok(())
} }
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> { fn verify_fixed_index(
datastore: Arc<DataStore>,
backup_dir: &BackupDir,
info: &FileInfo,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: Arc<WorkerTask>,
) -> Result<(), Error> {
let mut path = backup_dir.relative_path(); let mut path = backup_dir.relative_path();
path.push(&info.filename); path.push(&info.filename);
@ -68,10 +225,18 @@ fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &File
bail!("wrong index checksum"); bail!("wrong index checksum");
} }
verify_index_chunks(datastore, Box::new(index), worker) verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
} }
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> { fn verify_dynamic_index(
datastore: Arc<DataStore>,
backup_dir: &BackupDir,
info: &FileInfo,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: Arc<WorkerTask>,
) -> Result<(), Error> {
let mut path = backup_dir.relative_path(); let mut path = backup_dir.relative_path();
path.push(&info.filename); path.push(&info.filename);
@ -86,7 +251,7 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
bail!("wrong index checksum"); bail!("wrong index checksum");
} }
verify_index_chunks(datastore, Box::new(index), worker) verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
} }
/// Verify a single backup snapshot /// Verify a single backup snapshot
@ -98,10 +263,16 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
/// - Ok(true) if verify is successful /// - Ok(true) if verify is successful
/// - Ok(false) if there were verification errors /// - Ok(false) if there were verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> { pub fn verify_backup_dir(
datastore: Arc<DataStore>,
backup_dir: &BackupDir,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: Arc<WorkerTask>
) -> Result<bool, Error> {
let manifest = match datastore.load_manifest(&backup_dir) { let mut manifest = match datastore.load_manifest(&backup_dir) {
Ok((manifest, _crypt_mode, _)) => manifest, Ok((manifest, _)) => manifest,
Err(err) => { Err(err) => {
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err)); worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
return Ok(false); return Ok(false);
@ -112,24 +283,52 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
let mut error_count = 0; let mut error_count = 0;
let mut verify_result = VerifyState::Ok;
for info in manifest.files() { for info in manifest.files() {
let result = proxmox::try_block!({ let result = proxmox::try_block!({
worker.log(format!(" check {}", info.filename)); worker.log(format!(" check {}", info.filename));
match archive_type(&info.filename)? { match archive_type(&info.filename)? {
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker), ArchiveType::FixedIndex =>
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker), verify_fixed_index(
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info), datastore.clone(),
&backup_dir,
info,
verified_chunks.clone(),
corrupt_chunks.clone(),
worker.clone(),
),
ArchiveType::DynamicIndex =>
verify_dynamic_index(
datastore.clone(),
&backup_dir,
info,
verified_chunks.clone(),
corrupt_chunks.clone(),
worker.clone(),
),
ArchiveType::Blob => verify_blob(datastore.clone(), &backup_dir, info),
} }
}); });
worker.fail_on_abort()?; worker.fail_on_abort()?;
crate::tools::fail_on_shutdown()?;
if let Err(err) = result { if let Err(err) = result {
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err)); worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
error_count += 1; error_count += 1;
verify_result = VerifyState::Failed;
} }
} }
let verify_state = SnapshotVerifyState {
state: verify_result,
upid: worker.upid().clone(),
};
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
Ok(error_count == 0) Ok(error_count == 0)
} }
@ -138,31 +337,45 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
/// Errors are logged to the worker log. /// Errors are logged to the worker log.
/// ///
/// Returns /// Returns
/// - Ok(true) if verify is successful /// - Ok((count, failed_dirs)) where failed_dirs had verification errors
/// - Ok(false) if there were verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> { pub fn verify_backup_group(
datastore: Arc<DataStore>,
group: &BackupGroup,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
progress: Option<(usize, usize)>, // (done, snapshot_count)
worker: Arc<WorkerTask>,
) -> Result<(usize, Vec<String>), Error> {
let mut errors = Vec::new();
let mut list = match group.list_backups(&datastore.base_path()) { let mut list = match group.list_backups(&datastore.base_path()) {
Ok(list) => list, Ok(list) => list,
Err(err) => { Err(err) => {
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err)); worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
return Ok(false); return Ok((0, errors));
} }
}; };
worker.log(format!("verify group {}:{}", datastore.name(), group)); worker.log(format!("verify group {}:{}", datastore.name(), group));
let mut error_count = 0; let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
let mut count = 0;
BackupInfo::sort_list(&mut list, false); // newest first BackupInfo::sort_list(&mut list, false); // newest first
for info in list { for info in list {
if !verify_backup_dir(datastore, &info.backup_dir, worker)? { count += 1;
error_count += 1; if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
errors.push(info.backup_dir.to_string());
}
if snapshot_count != 0 {
let pos = done + count;
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
} }
} }
Ok(error_count == 0) Ok((count, errors))
} }
/// Verify all backups inside a datastore /// Verify all backups inside a datastore
@ -170,27 +383,52 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
/// Errors are logged to the worker log. /// Errors are logged to the worker log.
/// ///
/// Returns /// Returns
/// - Ok(true) if verify is successful /// - Ok(failed_dirs) where failed_dirs had verification errors
/// - Ok(false) if there were verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> { pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
let list = match BackupGroup::list_groups(&datastore.base_path()) { let mut errors = Vec::new();
Ok(list) => list,
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
Ok(list) => list
.into_iter()
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
.collect::<Vec<BackupGroup>>(),
Err(err) => { Err(err) => {
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err)); worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
return Ok(false); return Ok(errors);
} }
}; };
worker.log(format!("verify datastore {}", datastore.name())); list.sort_unstable();
let mut error_count = 0; let mut snapshot_count = 0;
for group in list { for group in list.iter() {
if !verify_backup_group(datastore, &group, worker)? { snapshot_count += group.list_backups(&datastore.base_path())?.len();
error_count += 1;
}
} }
Ok(error_count == 0) // start with 16384 chunks (up to 65GB)
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
// start with 64 chunks since we assume there are few corrupt ones
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
let mut done = 0;
for group in list {
let (count, mut group_errors) = verify_backup_group(
datastore.clone(),
&group,
verified_chunks.clone(),
corrupt_chunks.clone(),
Some((done, snapshot_count)),
worker.clone(),
)?;
errors.append(&mut group_errors);
done += count;
}
Ok(errors)
} }

View File

@ -37,6 +37,7 @@ async fn run() -> Result<(), Error> {
config::update_self_signed_cert(false)?; config::update_self_signed_cert(false)?;
proxmox_backup::rrd::create_rrdb_dir()?; proxmox_backup::rrd::create_rrdb_dir()?;
proxmox_backup::config::jobstate::create_jobstate_dir()?;
if let Err(err) = generate_auth_key() { if let Err(err) = generate_auth_key() {
bail!("unable to generate auth key - {}", err); bail!("unable to generate auth key - {}", err);

View File

@ -8,7 +8,6 @@ use std::sync::{Arc, Mutex};
use std::task::Context; use std::task::Context;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use chrono::{Local, DateTime, Utc, TimeZone};
use futures::future::FutureExt; use futures::future::FutureExt;
use futures::stream::{StreamExt, TryStreamExt}; use futures::stream::{StreamExt, TryStreamExt};
use serde_json::{json, Value}; use serde_json::{json, Value};
@ -16,11 +15,20 @@ use tokio::sync::mpsc;
use xdg::BaseDirectories; use xdg::BaseDirectories;
use pathpatterns::{MatchEntry, MatchType, PatternFlag}; use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size}; use proxmox::{
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment}; tools::{
use proxmox::api::schema::*; time::{strftime_local, epoch_i64},
use proxmox::api::cli::*; fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
use proxmox::api::api; },
api::{
api,
ApiHandler,
ApiMethod,
RpcEnvironment,
schema::*,
cli::*,
},
};
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use proxmox_backup::tools; use proxmox_backup::tools;
@ -184,7 +192,7 @@ pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<
result result
} }
fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> { fn connect(server: &str, userid: &Userid) -> Result<HttpClient, Error> {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok(); let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
@ -246,7 +254,7 @@ pub async fn api_datastore_latest_snapshot(
client: &HttpClient, client: &HttpClient,
store: &str, store: &str,
group: BackupGroup, group: BackupGroup,
) -> Result<(String, String, DateTime<Utc>), Error> { ) -> Result<(String, String, i64), Error> {
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?; let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?; let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
@ -257,7 +265,7 @@ pub async fn api_datastore_latest_snapshot(
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time)); list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
let backup_time = Utc.timestamp(list[0].backup_time, 0); let backup_time = list[0].backup_time;
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)) Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
} }
@ -373,7 +381,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> { let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?; let item: GroupListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup); let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned()) Ok(snapshot.relative_path().to_str().unwrap().to_owned())
}; };
@ -444,7 +452,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> { let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?; let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time); let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned()) Ok(snapshot.relative_path().to_str().unwrap().to_owned())
}; };
@ -502,7 +510,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
let result = client.delete(&path, Some(json!({ let result = client.delete(&path, Some(json!({
"backup-type": snapshot.group().backup_type(), "backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(), "backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time().timestamp(), "backup-time": snapshot.backup_time(),
}))).await?; }))).await?;
record_repository(&repo); record_repository(&repo);
@ -639,7 +647,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
let mut result = client.get(&path, Some(json!({ let mut result = client.get(&path, Some(json!({
"backup-type": snapshot.group().backup_type(), "backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(), "backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time().timestamp(), "backup-time": snapshot.backup_time(),
}))).await?; }))).await?;
record_repository(&repo); record_repository(&repo);
@ -935,12 +943,18 @@ async fn create_backup(
} }
let mut upload_list = vec![]; let mut upload_list = vec![];
let mut target_set = HashSet::new();
for backupspec in backupspec_list { for backupspec in backupspec_list {
let spec = parse_backup_specification(backupspec.as_str().unwrap())?; let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
let filename = &spec.config_string; let filename = &spec.config_string;
let target = &spec.archive_name; let target = &spec.archive_name;
if target_set.contains(target) {
bail!("got target twice: '{}'", target);
}
target_set.insert(target.to_string());
use std::os::unix::fs::FileTypeExt; use std::os::unix::fs::FileTypeExt;
let metadata = std::fs::metadata(filename) let metadata = std::fs::metadata(filename)
@ -980,18 +994,18 @@ async fn create_backup(
} }
} }
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0); let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
let client = connect(repo.host(), repo.user())?; let client = connect(repo.host(), repo.user())?;
record_repository(&repo); record_repository(&repo);
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)); println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
println!("Client name: {}", proxmox::tools::nodename()); println!("Client name: {}", proxmox::tools::nodename());
let start_time = Local::now(); let start_time = std::time::Instant::now();
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false)); println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
let (crypt_config, rsa_encrypted_key) = match keydata { let (crypt_config, rsa_encrypted_key) = match keydata {
None => (None, None), None => (None, None),
@ -1020,6 +1034,7 @@ async fn create_backup(
&backup_id, &backup_id,
backup_time, backup_time,
verbose, verbose,
false
).await?; ).await?;
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await { let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
@ -1028,7 +1043,7 @@ async fn create_backup(
None None
}; };
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp()); let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let mut manifest = BackupManifest::new(snapshot); let mut manifest = BackupManifest::new(snapshot);
let mut catalog = None; let mut catalog = None;
@ -1114,12 +1129,12 @@ async fn create_backup(
} }
if let Some(rsa_encrypted_key) = rsa_encrypted_key { if let Some(rsa_encrypted_key) = rsa_encrypted_key {
let target = "rsa-encrypted.key"; let target = "rsa-encrypted.key.blob";
println!("Upload RSA encoded key to '{:?}' as {}", repo, target); println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
let stats = client let stats = client
.upload_blob_from_data(rsa_encrypted_key, target, false, false) .upload_blob_from_data(rsa_encrypted_key, target, false, false)
.await?; .await?;
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, crypt_mode)?; manifest.add_file(target.to_string(), stats.size, stats.csum, crypt_mode)?;
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
/* /*
@ -1130,7 +1145,6 @@ async fn create_backup(
println!("TEST {} {:?}", len, buffer2); println!("TEST {} {:?}", len, buffer2);
*/ */
} }
// create manifest (index.json) // create manifest (index.json)
// manifests are never encrypted, but include a signature // manifests are never encrypted, but include a signature
let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref)) let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
@ -1144,11 +1158,11 @@ async fn create_backup(
client.finish().await?; client.finish().await?;
let end_time = Local::now(); let end_time = std::time::Instant::now();
let elapsed = end_time.signed_duration_since(start_time); let elapsed = end_time.duration_since(start_time);
println!("Duration: {}", elapsed); println!("Duration: {:.2}s", elapsed.as_secs_f64());
println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false)); println!("End Time: {}", strftime_local("%c", epoch_i64())?);
Ok(Value::Null) Ok(Value::Null)
} }
@ -1177,6 +1191,7 @@ fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<Str
async fn dump_image<W: Write>( async fn dump_image<W: Write>(
client: Arc<BackupReader>, client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
crypt_mode: CryptMode,
index: FixedIndexReader, index: FixedIndexReader,
mut writer: W, mut writer: W,
verbose: bool, verbose: bool,
@ -1184,7 +1199,7 @@ async fn dump_image<W: Write>(
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used); let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
// and thus slows down reading. Instead, directly use RemoteChunkReader // and thus slows down reading. Instead, directly use RemoteChunkReader
@ -1335,7 +1350,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
.map_err(|err| format_err!("unable to pipe data - {}", err))?; .map_err(|err| format_err!("unable to pipe data - {}", err))?;
} }
} else if archive_type == ArchiveType::Blob { return Ok(Value::Null);
}
let file_info = manifest.lookup_file_info(&archive_name)?;
if archive_type == ArchiveType::Blob {
let mut reader = client.download_blob(&manifest, &archive_name).await?; let mut reader = client.download_blob(&manifest, &archive_name).await?;
@ -1360,7 +1380,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used); let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let mut reader = BufferedDynamicReader::new(index, chunk_reader); let mut reader = BufferedDynamicReader::new(index, chunk_reader);
@ -1369,6 +1389,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
pxar::decoder::Decoder::from_std(reader)?, pxar::decoder::Decoder::from_std(reader)?,
Path::new(target), Path::new(target),
&[], &[],
true,
proxmox_backup::pxar::Flags::DEFAULT, proxmox_backup::pxar::Flags::DEFAULT,
allow_existing_dirs, allow_existing_dirs,
|path| { |path| {
@ -1376,6 +1397,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
println!("{:?}", path); println!("{:?}", path);
} }
}, },
None,
) )
.map_err(|err| format_err!("error extracting archive - {}", err))?; .map_err(|err| format_err!("error extracting archive - {}", err))?;
} else { } else {
@ -1405,7 +1427,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))? .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
}; };
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?; dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?;
} }
Ok(Value::Null) Ok(Value::Null)
@ -1478,7 +1500,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
let args = json!({ let args = json!({
"backup-type": snapshot.group().backup_type(), "backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(), "backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time().timestamp(), "backup-time": snapshot.backup_time(),
}); });
let body = hyper::Body::from(raw_data); let body = hyper::Body::from(raw_data);
@ -1546,7 +1568,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> { let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: PruneListItem = serde_json::from_value(record.to_owned())?; let item: PruneListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time); let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned()) Ok(snapshot.relative_path().to_str().unwrap().to_owned())
}; };
@ -1738,8 +1760,9 @@ async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<Str
if let (Some(backup_id), Some(backup_type), Some(backup_time)) = if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64()) (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
{ {
let snapshot = BackupDir::new(backup_type, backup_id, backup_time); if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
result.push(snapshot.relative_path().to_str().unwrap().to_owned()); result.push(snapshot.relative_path().to_str().unwrap().to_owned());
}
} }
} }
} }
@ -1773,7 +1796,7 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
let query = tools::json_object_to_query(json!({ let query = tools::json_object_to_query(json!({
"backup-type": snapshot.group().backup_type(), "backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(), "backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time().timestamp(), "backup-time": snapshot.backup_time(),
})).unwrap(); })).unwrap();
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query); let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);

View File

@ -9,7 +9,7 @@ use proxmox_backup::tools;
use proxmox_backup::config; use proxmox_backup::config;
use proxmox_backup::api2::{self, types::* }; use proxmox_backup::api2::{self, types::* };
use proxmox_backup::client::*; use proxmox_backup::client::*;
use proxmox_backup::tools::ticket::*; use proxmox_backup::tools::ticket::Ticket;
use proxmox_backup::auth_helpers::*; use proxmox_backup::auth_helpers::*;
mod proxmox_backup_manager; mod proxmox_backup_manager;
@ -59,12 +59,13 @@ fn connect() -> Result<HttpClient, Error> {
.verify_cert(false); // not required for connection to localhost .verify_cert(false); // not required for connection to localhost
let client = if uid.is_root() { let client = if uid.is_root() {
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some("root@pam"), None)?; let ticket = Ticket::new("PBS", Userid::root_userid())?
.sign(private_auth_key(), None)?;
options = options.password(Some(ticket)); options = options.password(Some(ticket));
HttpClient::new("localhost", "root@pam", options)? HttpClient::new("localhost", Userid::root_userid(), options)?
} else { } else {
options = options.ticket_cache(true).interactive(true); options = options.ticket_cache(true).interactive(true);
HttpClient::new("localhost", "root@pam", options)? HttpClient::new("localhost", Userid::root_userid(), options)?
}; };
Ok(client) Ok(client)

View File

@ -1,4 +1,4 @@
use std::sync::Arc; use std::sync::{Arc};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
@ -9,21 +9,30 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
use proxmox::try_block; use proxmox::try_block;
use proxmox::api::RpcEnvironmentType; use proxmox::api::RpcEnvironmentType;
use proxmox_backup::api2::types::Userid;
use proxmox_backup::configdir; use proxmox_backup::configdir;
use proxmox_backup::buildcfg; use proxmox_backup::buildcfg;
use proxmox_backup::server; use proxmox_backup::server;
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64}; use proxmox_backup::tools::daemon;
use proxmox_backup::server::{ApiConfig, rest::*}; use proxmox_backup::server::{ApiConfig, rest::*};
use proxmox_backup::auth_helpers::*; use proxmox_backup::auth_helpers::*;
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats }; use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
fn main() { use proxmox_backup::api2::pull::do_sync_job;
fn main() -> Result<(), Error> {
proxmox_backup::tools::setup_safe_path_env(); proxmox_backup::tools::setup_safe_path_env();
if let Err(err) = proxmox_backup::tools::runtime::main(run()) { let backup_uid = proxmox_backup::backup::backup_user()?.uid;
eprintln!("Error: {}", err); let backup_gid = proxmox_backup::backup::backup_group()?.gid;
std::process::exit(-1); let running_uid = nix::unistd::Uid::effective();
let running_gid = nix::unistd::Gid::effective();
if running_uid != backup_uid || running_gid != backup_gid {
bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
} }
proxmox_backup::tools::runtime::main(run())
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
@ -40,15 +49,11 @@ async fn run() -> Result<(), Error> {
let mut config = ApiConfig::new( let mut config = ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?; buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
// add default dirs which includes jquery and bootstrap
// my $base = '/usr/share/libpve-http-server-perl';
// add_dirs($self->{dirs}, '/css/' => "$base/css/");
// add_dirs($self->{dirs}, '/js/' => "$base/js/");
// add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
config.add_alias("novnc", "/usr/share/novnc-pve"); config.add_alias("novnc", "/usr/share/novnc-pve");
config.add_alias("extjs", "/usr/share/javascript/extjs"); config.add_alias("extjs", "/usr/share/javascript/extjs");
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome"); config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
config.add_alias("xtermjs", "/usr/share/pve-xtermjs"); config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
config.add_alias("locale", "/usr/share/pbs-i18n");
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit"); config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css"); config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html"); config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
@ -82,8 +87,6 @@ async fn run() -> Result<(), Error> {
let acceptor = Arc::clone(&acceptor); let acceptor = Arc::clone(&acceptor);
async move { async move {
sock.set_nodelay(true).unwrap(); sock.set_nodelay(true).unwrap();
sock.set_send_buffer_size(1024*1024).unwrap();
sock.set_recv_buffer_size(1024*1024).unwrap();
Ok(tokio_openssl::accept(&acceptor, sock) Ok(tokio_openssl::accept(&acceptor, sock)
.await .await
.ok() // handshake errors aren't be fatal, so return None to filter .ok() // handshake errors aren't be fatal, so return None to filter
@ -141,11 +144,12 @@ fn start_task_scheduler() {
tokio::spawn(task.map(|_| ())); tokio::spawn(task.map(|_| ()));
} }
use std::time:: {Instant, Duration}; use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
fn next_minute() -> Result<Instant, Error> { fn next_minute() -> Result<Instant, Error> {
let epoch_now = epoch_now()?; let now = SystemTime::now();
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60); let epoch_now = now.duration_since(UNIX_EPOCH)?;
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
Ok(Instant::now() + epoch_next - epoch_now) Ok(Instant::now() + epoch_next - epoch_now)
} }
@ -192,6 +196,7 @@ async fn schedule_tasks() -> Result<(), Error> {
schedule_datastore_garbage_collection().await; schedule_datastore_garbage_collection().await;
schedule_datastore_prune().await; schedule_datastore_prune().await;
schedule_datastore_verification().await;
schedule_datastore_sync_jobs().await; schedule_datastore_sync_jobs().await;
Ok(()) Ok(())
@ -297,20 +302,16 @@ async fn schedule_datastore_garbage_collection() {
}; };
let next = match compute_next_event(&event, last, false) { let next = match compute_next_event(&event, last, false) {
Ok(next) => next, Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => { Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err); eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue; continue;
} }
}; };
let now = match epoch_now_u64() { let now = proxmox::tools::time::epoch_i64();
Ok(epoch_now) => epoch_now as i64,
Err(err) => {
eprintln!("query system time failed - {}", err);
continue;
}
};
if next > now { continue; } if next > now { continue; }
let store2 = store.clone(); let store2 = store.clone();
@ -318,7 +319,7 @@ async fn schedule_datastore_garbage_collection() {
if let Err(err) = WorkerTask::new_thread( if let Err(err) = WorkerTask::new_thread(
worker_type, worker_type,
Some(store.clone()), Some(store.clone()),
"backup@pam", Userid::backup_userid().clone(),
false, false,
move |worker| { move |worker| {
worker.log(format!("starting garbage collection on store {}", store)); worker.log(format!("starting garbage collection on store {}", store));
@ -334,9 +335,12 @@ async fn schedule_datastore_garbage_collection() {
async fn schedule_datastore_prune() { async fn schedule_datastore_prune() {
use proxmox_backup::backup::{ use proxmox_backup::backup::{
PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info}; PruneOptions, DataStore, BackupGroup, compute_prune_info};
use proxmox_backup::server::{WorkerTask}; use proxmox_backup::server::{WorkerTask};
use proxmox_backup::config::datastore::{self, DataStoreConfig}; use proxmox_backup::config::{
jobstate::{self, Job},
datastore::{self, DataStoreConfig}
};
use proxmox_backup::tools::systemd::time::{ use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event}; parse_calendar_event, compute_next_event};
@ -393,6 +397,135 @@ async fn schedule_datastore_prune() {
let worker_type = "prune"; let worker_type = "prune";
let last = match jobstate::last_run_time(worker_type, &store) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
};
let next = match compute_next_event(&event, last, false) {
Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
let now = proxmox::tools::time::epoch_i64();
if next > now { continue; }
let mut job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
Userid::backup_userid().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
let result = {
worker.log(format!("Starting datastore prune on store \"{}\"", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
let base_path = datastore.base_path();
let groups = BackupGroup::list_groups(&base_path)?;
for group in groups {
let list = group.list_backups(&base_path)?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
prune_info.reverse(); // delete older snapshots first
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, group.backup_type(), group.backup_id()));
for (info, keep) in prune_info {
worker.log(format!(
"{} {}/{}/{}",
if keep { "keep" } else { "remove" },
group.backup_type(), group.backup_id(),
info.backup_dir.backup_time_string()));
if !keep {
datastore.remove_backup_dir(&info.backup_dir, true)?;
}
}
}
Ok(())
};
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", worker_type, err);
}
result
}
) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
}
}
}
async fn schedule_datastore_verification() {
use proxmox_backup::backup::{DataStore, verify_all_backups};
use proxmox_backup::server::{WorkerTask};
use proxmox_backup::config::datastore::{self, DataStoreConfig};
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
let config = match datastore::config() {
Err(err) => {
eprintln!("unable to read datastore config - {}", err);
return;
}
Ok((config, _digest)) => config,
};
for (store, (_, store_config)) in config.sections {
let datastore = match DataStore::lookup_datastore(&store) {
Ok(datastore) => datastore,
Err(err) => {
eprintln!("lookup_datastore failed - {}", err);
continue;
}
};
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c,
Err(err) => {
eprintln!("datastore config from_value failed - {}", err);
continue;
}
};
let event_str = match store_config.verify_schedule {
Some(event_str) => event_str,
None => continue,
};
let event = match parse_calendar_event(&event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
continue;
}
};
let worker_type = "verify";
let last = match lookup_last_worker(worker_type, &store) { let last = match lookup_last_worker(worker_type, &store) {
Ok(Some(upid)) => { Ok(Some(upid)) => {
if proxmox_backup::server::worker_is_active_local(&upid) { if proxmox_backup::server::worker_is_active_local(&upid) {
@ -408,62 +541,41 @@ async fn schedule_datastore_prune() {
}; };
let next = match compute_next_event(&event, last, false) { let next = match compute_next_event(&event, last, false) {
Ok(next) => next, Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => { Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err); eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue; continue;
} }
}; };
let now = match epoch_now_u64() { let now = proxmox::tools::time::epoch_i64();
Ok(epoch_now) => epoch_now as i64,
Err(err) => {
eprintln!("query system time failed - {}", err);
continue;
}
};
if next > now { continue; }
if next > now { continue; }
let worker_id = store.clone();
let store2 = store.clone(); let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread( if let Err(err) = WorkerTask::new_thread(
worker_type, worker_type,
Some(store.clone()), Some(worker_id),
"backup@pam", Userid::backup_userid().clone(),
false, false,
move |worker| { move |worker| {
worker.log(format!("Starting datastore prune on store \"{}\"", store)); worker.log(format!("starting verification on store {}", store2));
worker.log(format!("task triggered by schedule '{}'", event_str)); worker.log(format!("task triggered by schedule '{}'", event_str));
worker.log(format!("retention options: {}", prune_options.cli_options_string())); if let Ok(failed_dirs) = verify_all_backups(datastore, worker.clone()) {
if failed_dirs.len() > 0 {
let base_path = datastore.base_path(); worker.log("Failed to verify following snapshots:");
for dir in failed_dirs {
let groups = BackupGroup::list_groups(&base_path)?; worker.log(format!("\t{}", dir));
for group in groups {
let list = group.list_backups(&base_path)?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
prune_info.reverse(); // delete older snapshots first
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, group.backup_type(), group.backup_id()));
for (info, keep) in prune_info {
worker.log(format!(
"{} {}/{}/{}",
if keep { "keep" } else { "remove" },
group.backup_type(), group.backup_id(),
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
if !keep {
datastore.remove_backup_dir(&info.backup_dir)?;
} }
bail!("verification failed - please check the log for details");
} }
} }
Ok(()) Ok(())
} },
) { ) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err); eprintln!("unable to start verification on store {} - {}", store, err);
} }
} }
} }
@ -471,10 +583,7 @@ async fn schedule_datastore_prune() {
async fn schedule_datastore_sync_jobs() { async fn schedule_datastore_sync_jobs() {
use proxmox_backup::{ use proxmox_backup::{
backup::DataStore, config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
server::{ WorkerTask },
config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
tools::systemd::time::{ parse_calendar_event, compute_next_event }, tools::systemd::time::{ parse_calendar_event, compute_next_event },
}; };
@ -486,14 +595,6 @@ async fn schedule_datastore_sync_jobs() {
Ok((config, _digest)) => config, Ok((config, _digest)) => config,
}; };
let remote_config = match remote::config() {
Err(err) => {
eprintln!("unable to read remote config - {}", err);
return;
}
Ok((config, _digest)) => config,
};
for (job_id, (_, job_config)) in config.sections { for (job_id, (_, job_config)) in config.sections {
let job_config: SyncJobConfig = match serde_json::from_value(job_config) { let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c, Ok(c) => c,
@ -518,88 +619,36 @@ async fn schedule_datastore_sync_jobs() {
let worker_type = "syncjob"; let worker_type = "syncjob";
let last = match lookup_last_worker(worker_type, &job_id) { let last = match jobstate::last_run_time(worker_type, &job_id) {
Ok(Some(upid)) => { Ok(time) => time,
if proxmox_backup::server::worker_is_active_local(&upid) {
continue;
}
upid.starttime
},
Ok(None) => 0,
Err(err) => { Err(err) => {
eprintln!("lookup_last_job_start failed: {}", err); eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
continue; continue;
} }
}; };
let next = match compute_next_event(&event, last, false) { let next = match compute_next_event(&event, last, false) {
Ok(next) => next, Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => { Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err); eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue; continue;
} }
}; };
let now = match epoch_now_u64() { let now = proxmox::tools::time::epoch_i64();
Ok(epoch_now) => epoch_now as i64,
Err(err) => {
eprintln!("query system time failed - {}", err);
continue;
}
};
if next > now { continue; } if next > now { continue; }
let job = match Job::new(worker_type, &job_id) {
let job_id2 = job_id.clone(); Ok(job) => job,
Err(_) => continue, // could not get lock
let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
Ok(datastore) => datastore,
Err(err) => {
eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
continue;
}
}; };
let remote: Remote = match remote_config.lookup("remote", &job_config.remote) { let userid = Userid::backup_userid().clone();
Ok(remote) => remote,
Err(err) => {
eprintln!("remote_config lookup failed: {}", err);
continue;
}
};
let username = String::from("backup@pam"); if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
let delete = job_config.remove_vanished.unwrap_or(true);
if let Err(err) = WorkerTask::spawn(
worker_type,
Some(job_id.clone()),
&username.clone(),
false,
move |worker| async move {
worker.log(format!("Starting datastore sync job '{}'", job_id));
worker.log(format!("task triggered by schedule '{}'", event_str));
worker.log(format!("Sync datastore '{}' from '{}/{}'",
job_config.store, job_config.remote, job_config.remote_store));
let options = HttpClientOptions::new()
.password(Some(remote.password.clone()))
.fingerprint(remote.fingerprint.clone());
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
let _auth_info = client.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
Ok(())
}
) {
eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
} }
} }
} }

View File

@ -3,7 +3,6 @@ use std::sync::Arc;
use anyhow::{Error}; use anyhow::{Error};
use serde_json::Value; use serde_json::Value;
use chrono::{TimeZone, Utc};
use serde::Serialize; use serde::Serialize;
use proxmox::api::{ApiMethod, RpcEnvironment}; use proxmox::api::{ApiMethod, RpcEnvironment};
@ -68,7 +67,7 @@ struct Speed {
struct BenchmarkResult { struct BenchmarkResult {
/// TLS upload speed /// TLS upload speed
tls: Speed, tls: Speed,
/// SHA256 checksum comptation speed /// SHA256 checksum computation speed
sha256: Speed, sha256: Speed,
/// ZStd level 1 compression speed /// ZStd level 1 compression speed
compress: Speed, compress: Speed,
@ -82,7 +81,7 @@ struct BenchmarkResult {
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult { static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
tls: Speed { tls: Speed {
speed: None, speed: None,
top: 1_000_000.0 * 590.0, // TLS to localhost, AMD Ryzen 7 2700X top: 1_000_000.0 * 690.0, // TLS to localhost, AMD Ryzen 7 2700X
}, },
sha256: Speed { sha256: Speed {
speed: None, speed: None,
@ -187,7 +186,7 @@ fn render_result(
.header("TLS (maximal backup upload speed)") .header("TLS (maximal backup upload speed)")
.right_align(false).renderer(render_speed)) .right_align(false).renderer(render_speed))
.column(ColumnConfig::new("sha256") .column(ColumnConfig::new("sha256")
.header("SHA256 checksum comptation speed") .header("SHA256 checksum computation speed")
.right_align(false).renderer(render_speed)) .right_align(false).renderer(render_speed))
.column(ColumnConfig::new("compress") .column(ColumnConfig::new("compress")
.header("ZStd level 1 compression speed") .header("ZStd level 1 compression speed")
@ -212,7 +211,7 @@ async fn test_upload_speed(
verbose: bool, verbose: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0); let backup_time = proxmox::tools::time::epoch_i64();
let client = connect(repo.host(), repo.user())?; let client = connect(repo.host(), repo.user())?;
record_repository(&repo); record_repository(&repo);
@ -226,6 +225,7 @@ async fn test_upload_speed(
"benchmark", "benchmark",
backup_time, backup_time,
false, false,
true
).await?; ).await?;
if verbose { eprintln!("Start TLS speed test"); } if verbose { eprintln!("Start TLS speed test"); }

View File

@ -97,7 +97,9 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used); let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let mut reader = BufferedDynamicReader::new(index, chunk_reader); let mut reader = BufferedDynamicReader::new(index, chunk_reader);
@ -200,7 +202,9 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?; let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
let file_info = manifest.lookup_file_info(&server_archive_name)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used);
let reader = BufferedDynamicReader::new(index, chunk_reader); let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size(); let archive_size = reader.archive_size();
let reader: proxmox_backup::pxar::fuse::Reader = let reader: proxmox_backup::pxar::fuse::Reader =
@ -216,7 +220,9 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
manifest.verify_file(CATALOG_NAME, &csum, size)?; manifest.verify_file(CATALOG_NAME, &csum, size)?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let mut reader = BufferedDynamicReader::new(index, chunk_reader); let mut reader = BufferedDynamicReader::new(index, chunk_reader);
let mut catalogfile = std::fs::OpenOptions::new() let mut catalogfile = std::fs::OpenOptions::new()
.write(true) .write(true)

View File

@ -1,7 +1,6 @@
use std::path::PathBuf; use std::path::PathBuf;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use chrono::{Local, TimeZone};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox::api::api; use proxmox::api::api;
@ -112,7 +111,7 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
match kdf { match kdf {
Kdf::None => { Kdf::None => {
let created = Local.timestamp(Local::now().timestamp(), 0); let created = proxmox::tools::time::epoch_i64();
store_key_config( store_key_config(
&path, &path,
@ -180,7 +179,7 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
match kdf { match kdf {
Kdf::None => { Kdf::None => {
let modified = Local.timestamp(Local::now().timestamp(), 0); let modified = proxmox::tools::time::epoch_i64();
store_key_config( store_key_config(
&path, &path,

View File

@ -141,10 +141,12 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
let file_info = manifest.lookup_file_info(&server_archive_name)?;
if server_archive_name.ends_with(".didx") { if server_archive_name.ends_with(".didx") {
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?; let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used); let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let reader = BufferedDynamicReader::new(index, chunk_reader); let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size(); let archive_size = reader.archive_size();
let reader: proxmox_backup::pxar::fuse::Reader = let reader: proxmox_backup::pxar::fuse::Reader =

View File

@ -239,7 +239,7 @@ pub fn zpool_commands() -> CommandLineInterface {
.insert("create", .insert("create",
CliCommand::new(&API_METHOD_CREATE_ZPOOL) CliCommand::new(&API_METHOD_CREATE_ZPOOL)
.arg_param(&["name"]) .arg_param(&["name"])
.completion_cb("devices", complete_disk_name) // fixme: comlete the list .completion_cb("devices", complete_disk_name) // fixme: complete the list
); );
cmd_def.into() cmd_def.into()

View File

@ -3,8 +3,10 @@ use std::ffi::OsStr;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::os::unix::fs::OpenOptionsExt; use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use anyhow::{format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::future::FutureExt; use futures::future::FutureExt;
use futures::select; use futures::select;
use tokio::signal::unix::{signal, SignalKind}; use tokio::signal::unix::{signal, SignalKind};
@ -24,11 +26,14 @@ fn extract_archive_from_reader<R: std::io::Read>(
allow_existing_dirs: bool, allow_existing_dirs: bool,
verbose: bool, verbose: bool,
match_list: &[MatchEntry], match_list: &[MatchEntry],
extract_match_default: bool,
on_error: Option<Box<dyn FnMut(Error) -> Result<(), Error> + Send>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
proxmox_backup::pxar::extract_archive( proxmox_backup::pxar::extract_archive(
pxar::decoder::Decoder::from_std(reader)?, pxar::decoder::Decoder::from_std(reader)?,
Path::new(target), Path::new(target),
&match_list, &match_list,
extract_match_default,
feature_flags, feature_flags,
allow_existing_dirs, allow_existing_dirs,
|path| { |path| {
@ -36,6 +41,7 @@ fn extract_archive_from_reader<R: std::io::Read>(
println!("{:?}", path); println!("{:?}", path);
} }
}, },
on_error,
) )
} }
@ -102,6 +108,11 @@ fn extract_archive_from_reader<R: std::io::Read>(
optional: true, optional: true,
default: false, default: false,
}, },
strict: {
description: "Stop on errors. Otherwise most errors will simply warn.",
optional: true,
default: false,
},
}, },
}, },
)] )]
@ -119,6 +130,7 @@ fn extract_archive(
no_device_nodes: bool, no_device_nodes: bool,
no_fifos: bool, no_fifos: bool,
no_sockets: bool, no_sockets: bool,
strict: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut feature_flags = Flags::DEFAULT; let mut feature_flags = Flags::DEFAULT;
if no_xattrs { if no_xattrs {
@ -162,6 +174,22 @@ fn extract_archive(
); );
} }
let extract_match_default = match_list.is_empty();
let was_ok = Arc::new(AtomicBool::new(true));
let on_error = if strict {
// by default errors are propagated up
None
} else {
let was_ok = Arc::clone(&was_ok);
// otherwise we want to log them but not act on them
Some(Box::new(move |err| {
was_ok.store(false, Ordering::Release);
eprintln!("error: {}", err);
Ok(())
}) as Box<dyn FnMut(Error) -> Result<(), Error> + Send>)
};
if archive == "-" { if archive == "-" {
let stdin = std::io::stdin(); let stdin = std::io::stdin();
let mut reader = stdin.lock(); let mut reader = stdin.lock();
@ -172,6 +200,8 @@ fn extract_archive(
allow_existing_dirs, allow_existing_dirs,
verbose, verbose,
&match_list, &match_list,
extract_match_default,
on_error,
)?; )?;
} else { } else {
if verbose { if verbose {
@ -186,9 +216,15 @@ fn extract_archive(
allow_existing_dirs, allow_existing_dirs,
verbose, verbose,
&match_list, &match_list,
extract_match_default,
on_error,
)?; )?;
} }
if !was_ok.load(Ordering::Acquire) {
bail!("there were errors");
}
Ok(()) Ok(())
} }

View File

@ -1,16 +1,18 @@
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use std::io::{Read, Write, Seek, SeekFrom}; use std::io::{Write, Seek, SeekFrom};
use std::fs::File; use std::fs::File;
use std::sync::Arc; use std::sync::Arc;
use std::os::unix::fs::OpenOptionsExt; use std::os::unix::fs::OpenOptionsExt;
use chrono::{DateTime, Utc};
use futures::future::AbortHandle; use futures::future::AbortHandle;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox::tools::digest_to_hex; use proxmox::tools::digest_to_hex;
use crate::backup::*; use crate::{
tools::compute_file_csum,
backup::*,
};
use super::{HttpClient, H2Client}; use super::{HttpClient, H2Client};
@ -41,14 +43,14 @@ impl BackupReader {
datastore: &str, datastore: &str,
backup_type: &str, backup_type: &str,
backup_id: &str, backup_id: &str,
backup_time: DateTime<Utc>, backup_time: i64,
debug: bool, debug: bool,
) -> Result<Arc<BackupReader>, Error> { ) -> Result<Arc<BackupReader>, Error> {
let param = json!({ let param = json!({
"backup-type": backup_type, "backup-type": backup_type,
"backup-id": backup_id, "backup-id": backup_id,
"backup-time": backup_time.timestamp(), "backup-time": backup_time,
"store": datastore, "store": datastore,
"debug": debug, "debug": debug,
}); });
@ -129,9 +131,9 @@ impl BackupReader {
let mut raw_data = Vec::with_capacity(64 * 1024); let mut raw_data = Vec::with_capacity(64 * 1024);
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?; self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
let blob = DataBlob::from_raw(raw_data)?; let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
blob.verify_crc()?; // no expected digest available
let data = blob.decode(None)?; let data = blob.decode(None, None)?;
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?; let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
@ -220,29 +222,3 @@ impl BackupReader {
Ok(index) Ok(index)
} }
} }
pub fn compute_file_csum(file: &mut File) -> Result<([u8; 32], u64), Error> {
file.seek(SeekFrom::Start(0))?;
let mut hasher = openssl::sha::Sha256::new();
let mut buffer = proxmox::tools::vec::undefined(256*1024);
let mut size: u64 = 0;
loop {
let count = match file.read(&mut buffer) {
Ok(count) => count,
Err(ref err) if err.kind() == std::io::ErrorKind::Interrupted => { continue; }
Err(err) => return Err(err.into()),
};
if count == 0 {
break;
}
size += count as u64;
hasher.update(&buffer[..count]);
}
let csum = hasher.finish();
Ok((csum, size))
}

View File

@ -1,3 +1,4 @@
use std::convert::TryFrom;
use std::fmt; use std::fmt;
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
@ -15,7 +16,7 @@ pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_RE
#[derive(Debug)] #[derive(Debug)]
pub struct BackupRepository { pub struct BackupRepository {
/// The user name used for Authentication /// The user name used for Authentication
user: Option<String>, user: Option<Userid>,
/// The host name or IP address /// The host name or IP address
host: Option<String>, host: Option<String>,
/// The name of the datastore /// The name of the datastore
@ -24,15 +25,15 @@ pub struct BackupRepository {
impl BackupRepository { impl BackupRepository {
pub fn new(user: Option<String>, host: Option<String>, store: String) -> Self { pub fn new(user: Option<Userid>, host: Option<String>, store: String) -> Self {
Self { user, host, store } Self { user, host, store }
} }
pub fn user(&self) -> &str { pub fn user(&self) -> &Userid {
if let Some(ref user) = self.user { if let Some(ref user) = self.user {
return user; return &user;
} }
"root@pam" Userid::root_userid()
} }
pub fn host(&self) -> &str { pub fn host(&self) -> &str {
@ -73,7 +74,7 @@ impl std::str::FromStr for BackupRepository {
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?; .ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
Ok(Self { Ok(Self {
user: cap.get(1).map(|m| m.as_str().to_owned()), user: cap.get(1).map(|m| Userid::try_from(m.as_str().to_owned())).transpose()?,
host: cap.get(2).map(|m| m.as_str().to_owned()), host: cap.get(2).map(|m| m.as_str().to_owned()),
store: cap[3].to_owned(), store: cap[3].to_owned(),
}) })

View File

@ -4,7 +4,6 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use chrono::{DateTime, Utc};
use futures::*; use futures::*;
use futures::stream::Stream; use futures::stream::Stream;
use futures::future::AbortHandle; use futures::future::AbortHandle;
@ -51,16 +50,18 @@ impl BackupWriter {
datastore: &str, datastore: &str,
backup_type: &str, backup_type: &str,
backup_id: &str, backup_id: &str,
backup_time: DateTime<Utc>, backup_time: i64,
debug: bool, debug: bool,
benchmark: bool
) -> Result<Arc<BackupWriter>, Error> { ) -> Result<Arc<BackupWriter>, Error> {
let param = json!({ let param = json!({
"backup-type": backup_type, "backup-type": backup_type,
"backup-id": backup_id, "backup-id": backup_id,
"backup-time": backup_time.timestamp(), "backup-time": backup_time,
"store": datastore, "store": datastore,
"debug": debug "debug": debug,
"benchmark": benchmark
}); });
let req = HttpClient::request_builder( let req = HttpClient::request_builder(
@ -264,9 +265,9 @@ impl BackupWriter {
crate::tools::format::strip_server_file_expenstion(archive_name.clone()) crate::tools::format::strip_server_file_expenstion(archive_name.clone())
}; };
if archive_name != CATALOG_NAME { if archive_name != CATALOG_NAME {
let speed: HumanByte = (uploaded / (duration.as_secs() as usize)).into(); let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
let uploaded: HumanByte = uploaded.into(); let uploaded: HumanByte = uploaded.into();
println!("{}: had to upload {} from {} in {}s, avgerage speed {}/s).", archive, uploaded, vsize_h, duration.as_secs(), speed); println!("{}: had to upload {} of {} in {:.2}s, average speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
} else { } else {
println!("Uploaded backup catalog ({})", vsize_h); println!("Uploaded backup catalog ({})", vsize_h);
} }
@ -479,9 +480,9 @@ impl BackupWriter {
let param = json!({ "archive-name": MANIFEST_BLOB_NAME }); let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
self.h2.download("previous", Some(param), &mut raw_data).await?; self.h2.download("previous", Some(param), &mut raw_data).await?;
let blob = DataBlob::from_raw(raw_data)?; let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
blob.verify_crc()?; // no expected digest available
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?; let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?; let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
@ -629,7 +630,7 @@ impl BackupWriter {
}) })
} }
/// Upload speed test - prints result ot stderr /// Upload speed test - prints result to stderr
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> { pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
let mut data = vec![]; let mut data = vec![];

View File

@ -1,8 +1,8 @@
use std::io::Write; use std::io::Write;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use chrono::Utc;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use http::Uri; use http::Uri;
@ -24,12 +24,13 @@ use proxmox::{
}; };
use super::pipe_to_stream::PipeToSendStream; use super::pipe_to_stream::PipeToSendStream;
use crate::api2::types::Userid;
use crate::tools::async_io::EitherStream; use crate::tools::async_io::EitherStream;
use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET}; use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
#[derive(Clone)] #[derive(Clone)]
pub struct AuthInfo { pub struct AuthInfo {
pub username: String, pub userid: Userid,
pub ticket: String, pub ticket: String,
pub token: String, pub token: String,
} }
@ -99,12 +100,14 @@ pub struct HttpClient {
client: Client<HttpsConnector>, client: Client<HttpsConnector>,
server: String, server: String,
fingerprint: Arc<Mutex<Option<String>>>, fingerprint: Arc<Mutex<Option<String>>>,
auth: BroadcastFuture<AuthInfo>, first_auth: BroadcastFuture<()>,
auth: Arc<RwLock<AuthInfo>>,
ticket_abort: futures::future::AbortHandle,
_options: HttpClientOptions, _options: HttpClientOptions,
} }
/// Delete stored ticket data (logout) /// Delete stored ticket data (logout)
pub fn delete_ticket_info(prefix: &str, server: &str, username: &str) -> Result<(), Error> { pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> {
let base = BaseDirectories::with_prefix(prefix)?; let base = BaseDirectories::with_prefix(prefix)?;
@ -116,7 +119,7 @@ pub fn delete_ticket_info(prefix: &str, server: &str, username: &str) -> Result<
let mut data = file_get_json(&path, Some(json!({})))?; let mut data = file_get_json(&path, Some(json!({})))?;
if let Some(map) = data[server].as_object_mut() { if let Some(map) = data[server].as_object_mut() {
map.remove(username); map.remove(username.as_str());
} }
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?; replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
@ -198,7 +201,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
let mut data = file_get_json(&path, Some(json!({})))?; let mut data = file_get_json(&path, Some(json!({})))?;
let now = Utc::now().timestamp(); let now = proxmox::tools::time::epoch_i64();
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token}); data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
@ -223,15 +226,15 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
Ok(()) Ok(())
} }
fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(String, String)> { fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(String, String)> {
let base = BaseDirectories::with_prefix(prefix).ok()?; let base = BaseDirectories::with_prefix(prefix).ok()?;
// usually /run/user/<uid>/... // usually /run/user/<uid>/...
let path = base.place_runtime_file("tickets").ok()?; let path = base.place_runtime_file("tickets").ok()?;
let data = file_get_json(&path, None).ok()?; let data = file_get_json(&path, None).ok()?;
let now = Utc::now().timestamp(); let now = proxmox::tools::time::epoch_i64();
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60; let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
let uinfo = data[server][username].as_object()?; let uinfo = data[server][userid.as_str()].as_object()?;
let timestamp = uinfo["timestamp"].as_i64()?; let timestamp = uinfo["timestamp"].as_i64()?;
let age = now - timestamp; let age = now - timestamp;
@ -245,8 +248,11 @@ fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(Strin
} }
impl HttpClient { impl HttpClient {
pub fn new(
pub fn new(server: &str, username: &str, mut options: HttpClientOptions) -> Result<Self, Error> { server: &str,
userid: &Userid,
mut options: HttpClientOptions,
) -> Result<Self, Error> {
let verified_fingerprint = Arc::new(Mutex::new(None)); let verified_fingerprint = Arc::new(Mutex::new(None));
@ -288,7 +294,6 @@ impl HttpClient {
let mut httpc = hyper::client::HttpConnector::new(); let mut httpc = hyper::client::HttpConnector::new();
httpc.set_nodelay(true); // important for h2 download performance! httpc.set_nodelay(true); // important for h2 download performance!
httpc.set_recv_buffer_size(Some(1024*1024)); //important for h2 download performance!
httpc.enforce_http(false); // we want https... httpc.enforce_http(false); // we want https...
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build()); let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build());
@ -306,30 +311,66 @@ impl HttpClient {
} else { } else {
let mut ticket_info = None; let mut ticket_info = None;
if use_ticket_cache { if use_ticket_cache {
ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, username); ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, userid);
} }
if let Some((ticket, _token)) = ticket_info { if let Some((ticket, _token)) = ticket_info {
ticket ticket
} else { } else {
Self::get_password(&username, options.interactive)? Self::get_password(userid, options.interactive)?
} }
}; };
let auth = Arc::new(RwLock::new(AuthInfo {
userid: userid.clone(),
ticket: password.clone(),
token: "".to_string(),
}));
let server2 = server.to_string();
let client2 = client.clone();
let auth2 = auth.clone();
let prefix2 = options.prefix.clone();
let renewal_future = async move {
loop {
tokio::time::delay_for(Duration::new(60*15, 0)).await; // 15 minutes
let (userid, ticket) = {
let authinfo = auth2.read().unwrap().clone();
(authinfo.userid, authinfo.ticket)
};
match Self::credentials(client2.clone(), server2.clone(), userid, ticket).await {
Ok(auth) => {
if use_ticket_cache & &prefix2.is_some() {
let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.userid.to_string(), &auth.ticket, &auth.token);
}
*auth2.write().unwrap() = auth;
},
Err(err) => {
eprintln!("re-authentication failed: {}", err);
return;
}
}
}
};
let (renewal_future, ticket_abort) = futures::future::abortable(renewal_future);
let login_future = Self::credentials( let login_future = Self::credentials(
client.clone(), client.clone(),
server.to_owned(), server.to_owned(),
username.to_owned(), userid.to_owned(),
password, password.to_owned(),
).map_ok({ ).map_ok({
let server = server.to_string(); let server = server.to_string();
let prefix = options.prefix.clone(); let prefix = options.prefix.clone();
let authinfo = auth.clone();
move |auth| { move |auth| {
if use_ticket_cache & &prefix.is_some() { if use_ticket_cache & &prefix.is_some() {
let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.username, &auth.ticket, &auth.token); let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.userid.to_string(), &auth.ticket, &auth.token);
} }
*authinfo.write().unwrap() = auth;
auth tokio::spawn(renewal_future);
} }
}); });
@ -337,7 +378,9 @@ impl HttpClient {
client, client,
server: String::from(server), server: String::from(server),
fingerprint: verified_fingerprint, fingerprint: verified_fingerprint,
auth: BroadcastFuture::new(Box::new(login_future)), auth,
ticket_abort,
first_auth: BroadcastFuture::new(Box::new(login_future)),
_options: options, _options: options,
}) })
} }
@ -347,7 +390,9 @@ impl HttpClient {
/// Login is done on demand, so this is only required if you need /// Login is done on demand, so this is only required if you need
/// access to authentication data in 'AuthInfo'. /// access to authentication data in 'AuthInfo'.
pub async fn login(&self) -> Result<AuthInfo, Error> { pub async fn login(&self) -> Result<AuthInfo, Error> {
self.auth.listen().await self.first_auth.listen().await?;
let authinfo = self.auth.read().unwrap();
Ok(authinfo.clone())
} }
/// Returns the optional fingerprint passed to the new() constructor. /// Returns the optional fingerprint passed to the new() constructor.
@ -355,7 +400,7 @@ impl HttpClient {
(*self.fingerprint.lock().unwrap()).clone() (*self.fingerprint.lock().unwrap()).clone()
} }
fn get_password(username: &str, interactive: bool) -> Result<String, Error> { fn get_password(username: &Userid, interactive: bool) -> Result<String, Error> {
// If we're on a TTY, query the user for a password // If we're on a TTY, query the user for a password
if interactive && tty::stdin_isatty() { if interactive && tty::stdin_isatty() {
let msg = format!("Password for \"{}\": ", username); let msg = format!("Password for \"{}\": ", username);
@ -579,14 +624,14 @@ impl HttpClient {
async fn credentials( async fn credentials(
client: Client<HttpsConnector>, client: Client<HttpsConnector>,
server: String, server: String,
username: String, username: Userid,
password: String, password: String,
) -> Result<AuthInfo, Error> { ) -> Result<AuthInfo, Error> {
let data = json!({ "username": username, "password": password }); let data = json!({ "username": username, "password": password });
let req = Self::request_builder(&server, "POST", "/api2/json/access/ticket", Some(data)).unwrap(); let req = Self::request_builder(&server, "POST", "/api2/json/access/ticket", Some(data)).unwrap();
let cred = Self::api_request(client, req).await?; let cred = Self::api_request(client, req).await?;
let auth = AuthInfo { let auth = AuthInfo {
username: cred["data"]["username"].as_str().unwrap().to_owned(), userid: cred["data"]["username"].as_str().unwrap().parse()?,
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(), ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(), token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
}; };
@ -664,6 +709,12 @@ impl HttpClient {
} }
} }
impl Drop for HttpClient {
fn drop(&mut self) {
self.ticket_abort.abort();
}
}
#[derive(Clone)] #[derive(Clone)]
pub struct H2Client { pub struct H2Client {

View File

@ -3,15 +3,18 @@
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::json; use serde_json::json;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::sync::Arc; use std::sync::{Arc, Mutex};
use std::collections::HashMap; use std::collections::{HashSet, HashMap};
use std::io::{Seek, SeekFrom}; use std::io::{Seek, SeekFrom};
use proxmox::api::error::{StatusCode, HttpError}; use proxmox::api::error::{StatusCode, HttpError};
use crate::server::{WorkerTask}; use crate::{
use crate::backup::*; tools::compute_file_csum,
use crate::api2::types::*; server::WorkerTask,
use super::*; backup::*,
api2::types::*,
client::*,
};
// fixme: implement filters // fixme: implement filters
@ -20,24 +23,54 @@ use super::*;
async fn pull_index_chunks<I: IndexFile>( async fn pull_index_chunks<I: IndexFile>(
_worker: &WorkerTask, _worker: &WorkerTask,
chunk_reader: &mut RemoteChunkReader, chunk_reader: RemoteChunkReader,
target: Arc<DataStore>, target: Arc<DataStore>,
index: I, index: I,
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
use futures::stream::{self, StreamExt, TryStreamExt};
for pos in 0..index.index_count() { let stream = stream::iter(
let digest = index.index_digest(pos).unwrap(); (0..index.index_count())
let chunk_exists = target.cond_touch_chunk(digest, false)?; .map(|pos| index.chunk_info(pos).unwrap())
if chunk_exists { .filter(|info| {
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest))); let mut guard = downloaded_chunks.lock().unwrap();
continue; let done = guard.contains(&info.digest);
} if !done {
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest))); // Note: We mark a chunk as downloaded before its actually downloaded
let chunk = chunk_reader.read_raw_chunk(&digest).await?; // to avoid duplicate downloads.
guard.insert(info.digest);
}
!done
})
);
target.insert_chunk(&chunk, &digest)?; stream
} .map(|info| {
let target = Arc::clone(&target);
let chunk_reader = chunk_reader.clone();
Ok::<_, Error>(async move {
let chunk_exists = crate::tools::runtime::block_in_place(|| target.cond_touch_chunk(&info.digest, false))?;
if chunk_exists {
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
return Ok::<_, Error>(());
}
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
crate::tools::runtime::block_in_place(|| {
chunk.verify_unencrypted(info.size() as usize, &info.digest)?;
target.insert_chunk(&chunk, &info.digest)?;
Ok(())
})
})
})
.try_buffer_unordered(20)
.try_for_each(|_res| futures::future::ok(()))
.await?;
Ok(()) Ok(())
} }
@ -50,6 +83,7 @@ async fn download_manifest(
let mut tmp_manifest_file = std::fs::OpenOptions::new() let mut tmp_manifest_file = std::fs::OpenOptions::new()
.write(true) .write(true)
.create(true) .create(true)
.truncate(true)
.read(true) .read(true)
.open(&filename)?; .open(&filename)?;
@ -60,15 +94,33 @@ async fn download_manifest(
Ok(tmp_manifest_file) Ok(tmp_manifest_file)
} }
fn verify_archive(
info: &FileInfo,
csum: &[u8; 32],
size: u64,
) -> Result<(), Error> {
if size != info.size {
bail!("wrong size for file '{}' ({} != {})", info.filename, info.size, size);
}
if csum != &info.csum {
bail!("wrong checksum for file '{}'", info.filename);
}
Ok(())
}
async fn pull_single_archive( async fn pull_single_archive(
worker: &WorkerTask, worker: &WorkerTask,
reader: &BackupReader, reader: &BackupReader,
chunk_reader: &mut RemoteChunkReader, chunk_reader: &mut RemoteChunkReader,
tgt_store: Arc<DataStore>, tgt_store: Arc<DataStore>,
snapshot: &BackupDir, snapshot: &BackupDir,
archive_name: &str, archive_info: &FileInfo,
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let archive_name = &archive_info.filename;
let mut path = tgt_store.base_path(); let mut path = tgt_store.base_path();
path.push(snapshot.relative_path()); path.push(snapshot.relative_path());
path.push(archive_name); path.push(archive_name);
@ -89,16 +141,23 @@ async fn pull_single_archive(
ArchiveType::DynamicIndex => { ArchiveType::DynamicIndex => {
let index = DynamicIndexReader::new(tmpfile) let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?; .map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?;
let (csum, size) = index.compute_csum();
verify_archive(archive_info, &csum, size)?;
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?; pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
} }
ArchiveType::FixedIndex => { ArchiveType::FixedIndex => {
let index = FixedIndexReader::new(tmpfile) let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?; .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?;
let (csum, size) = index.compute_csum();
verify_archive(archive_info, &csum, size)?;
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?; pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
}
ArchiveType::Blob => {
let (csum, size) = compute_file_csum(&mut tmpfile)?;
verify_archive(archive_info, &csum, size)?;
} }
ArchiveType::Blob => { /* nothing to do */ }
} }
if let Err(err) = std::fs::rename(&tmp_path, &path) { if let Err(err) = std::fs::rename(&tmp_path, &path) {
bail!("Atomic rename file {:?} failed - {}", path, err); bail!("Atomic rename file {:?} failed - {}", path, err);
@ -139,6 +198,7 @@ async fn pull_snapshot(
reader: Arc<BackupReader>, reader: Arc<BackupReader>,
tgt_store: Arc<DataStore>, tgt_store: Arc<DataStore>,
snapshot: &BackupDir, snapshot: &BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut manifest_name = tgt_store.base_path(); let mut manifest_name = tgt_store.base_path();
@ -174,16 +234,14 @@ async fn pull_snapshot(
}; };
}, },
}; };
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?; let tmp_manifest_blob = DataBlob::load_from_reader(&mut tmp_manifest_file)?;
tmp_manifest_blob.verify_crc()?;
if manifest_name.exists() { if manifest_name.exists() {
let manifest_blob = proxmox::try_block!({ let manifest_blob = proxmox::try_block!({
let mut manifest_file = std::fs::File::open(&manifest_name) let mut manifest_file = std::fs::File::open(&manifest_name)
.map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?; .map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?;
let manifest_blob = DataBlob::load(&mut manifest_file)?; let manifest_blob = DataBlob::load_from_reader(&mut manifest_file)?;
manifest_blob.verify_crc()?;
Ok(manifest_blob) Ok(manifest_blob)
}).map_err(|err: Error| { }).map_err(|err: Error| {
format_err!("unable to read local manifest {:?} - {}", manifest_name, err) format_err!("unable to read local manifest {:?} - {}", manifest_name, err)
@ -194,14 +252,13 @@ async fn pull_snapshot(
try_client_log_download(worker, reader, &client_log_name).await?; try_client_log_download(worker, reader, &client_log_name).await?;
} }
worker.log("no data changes"); worker.log("no data changes");
let _ = std::fs::remove_file(&tmp_manifest_name);
return Ok(()); // nothing changed return Ok(()); // nothing changed
} }
} }
let manifest = BackupManifest::try_from(tmp_manifest_blob)?; let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, HashMap::new());
for item in manifest.files() { for item in manifest.files() {
let mut path = tgt_store.base_path(); let mut path = tgt_store.base_path();
path.push(snapshot.relative_path()); path.push(snapshot.relative_path());
@ -242,13 +299,16 @@ async fn pull_snapshot(
} }
} }
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, item.chunk_crypt_mode(), HashMap::new());
pull_single_archive( pull_single_archive(
worker, worker,
&reader, &reader,
&mut chunk_reader, &mut chunk_reader,
tgt_store.clone(), tgt_store.clone(),
snapshot, snapshot,
&item.filename, &item,
downloaded_chunks.clone(),
).await?; ).await?;
} }
@ -271,15 +331,16 @@ pub async fn pull_snapshot_from(
reader: Arc<BackupReader>, reader: Arc<BackupReader>,
tgt_store: Arc<DataStore>, tgt_store: Arc<DataStore>,
snapshot: &BackupDir, snapshot: &BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (_path, is_new) = tgt_store.create_backup_dir(&snapshot)?; let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
if is_new { if is_new {
worker.log(format!("sync snapshot {:?}", snapshot.relative_path())); worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await { if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await {
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot) { if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
worker.log(format!("cleanup error - {}", cleanup_err)); worker.log(format!("cleanup error - {}", cleanup_err));
} }
return Err(err); return Err(err);
@ -287,7 +348,7 @@ pub async fn pull_snapshot_from(
worker.log(format!("sync snapshot {:?} done", snapshot.relative_path())); worker.log(format!("sync snapshot {:?} done", snapshot.relative_path()));
} else { } else {
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path())); worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?; pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await?;
worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path())); worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path()));
} }
@ -322,8 +383,11 @@ pub async fn pull_group(
let mut remote_snapshots = std::collections::HashSet::new(); let mut remote_snapshots = std::collections::HashSet::new();
// start with 16384 chunks (up to 65GB)
let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*64)));
for item in list { for item in list {
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time); let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
// in-progress backups can't be synced // in-progress backups can't be synced
if let None = item.size { if let None = item.size {
@ -355,7 +419,7 @@ pub async fn pull_group(
true, true,
).await?; ).await?;
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?; pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks.clone()).await?;
} }
if delete { if delete {
@ -364,7 +428,7 @@ pub async fn pull_group(
let backup_time = info.backup_dir.backup_time(); let backup_time = info.backup_dir.backup_time();
if remote_snapshots.contains(&backup_time) { continue; } if remote_snapshots.contains(&backup_time) { continue; }
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path())); worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
tgt_store.remove_backup_dir(&info.backup_dir)?; tgt_store.remove_backup_dir(&info.backup_dir, false)?;
} }
} }
@ -377,7 +441,7 @@ pub async fn pull_store(
src_repo: &BackupRepository, src_repo: &BackupRepository,
tgt_store: Arc<DataStore>, tgt_store: Arc<DataStore>,
delete: bool, delete: bool,
username: String, userid: Userid,
) -> Result<(), Error> { ) -> Result<(), Error> {
// explicit create shared lock to prevent GC on newly created chunks // explicit create shared lock to prevent GC on newly created chunks
@ -408,11 +472,11 @@ pub async fn pull_store(
for item in list { for item in list {
let group = BackupGroup::new(&item.backup_type, &item.backup_id); let group = BackupGroup::new(&item.backup_type, &item.backup_id);
let owner = tgt_store.create_backup_group(&group, &username)?; let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
// permission check // permission check
if owner != username { // only the owner is allowed to create additional snapshots if userid != owner { // only the owner is allowed to create additional snapshots
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})", worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
item.backup_type, item.backup_id, username, owner)); item.backup_type, item.backup_id, userid, owner));
errors = true; errors = true;
continue; // do not stop here, instead continue continue; // do not stop here, instead continue
} }

View File

@ -3,10 +3,10 @@ use std::collections::HashMap;
use std::pin::Pin; use std::pin::Pin;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use anyhow::Error; use anyhow::{bail, Error};
use super::BackupReader; use super::BackupReader;
use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk}; use crate::backup::{AsyncReadChunk, CryptConfig, CryptMode, DataBlob, ReadChunk};
use crate::tools::runtime::block_on; use crate::tools::runtime::block_on;
/// Read chunks from remote host using ``BackupReader`` /// Read chunks from remote host using ``BackupReader``
@ -14,7 +14,8 @@ use crate::tools::runtime::block_on;
pub struct RemoteChunkReader { pub struct RemoteChunkReader {
client: Arc<BackupReader>, client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
cache_hint: HashMap<[u8; 32], usize>, crypt_mode: CryptMode,
cache_hint: Arc<HashMap<[u8; 32], usize>>,
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>, cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
} }
@ -25,16 +26,20 @@ impl RemoteChunkReader {
pub fn new( pub fn new(
client: Arc<BackupReader>, client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
crypt_mode: CryptMode,
cache_hint: HashMap<[u8; 32], usize>, cache_hint: HashMap<[u8; 32], usize>,
) -> Self { ) -> Self {
Self { Self {
client, client,
crypt_config, crypt_config,
cache_hint, crypt_mode,
cache_hint: Arc::new(cache_hint),
cache: Arc::new(Mutex::new(HashMap::new())), cache: Arc::new(Mutex::new(HashMap::new())),
} }
} }
/// Downloads raw chunk. This only verifies the (untrusted) CRC32, use
/// DataBlob::verify_unencrypted or DataBlob::decode before storing/processing further.
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> { pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024); let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
@ -42,10 +47,22 @@ impl RemoteChunkReader {
.download_chunk(&digest, &mut chunk_data) .download_chunk(&digest, &mut chunk_data)
.await?; .await?;
let chunk = DataBlob::from_raw(chunk_data)?; let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
chunk.verify_crc()?;
Ok(chunk) match self.crypt_mode {
CryptMode::Encrypt => {
match chunk.crypt_mode()? {
CryptMode::Encrypt => Ok(chunk),
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
}
},
CryptMode::SignOnly | CryptMode::None => {
match chunk.crypt_mode()? {
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
CryptMode::SignOnly | CryptMode::None => Ok(chunk),
}
},
}
} }
} }
@ -61,9 +78,7 @@ impl ReadChunk for RemoteChunkReader {
let chunk = ReadChunk::read_raw_chunk(self, digest)?; let chunk = ReadChunk::read_raw_chunk(self, digest)?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?; let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
// fixme: verify digest?
let use_cache = self.cache_hint.contains_key(digest); let use_cache = self.cache_hint.contains_key(digest);
if use_cache { if use_cache {
@ -93,9 +108,7 @@ impl AsyncReadChunk for RemoteChunkReader {
let chunk = Self::read_raw_chunk(self, digest).await?; let chunk = Self::read_raw_chunk(self, digest).await?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?; let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
// fixme: verify digest?
let use_cache = self.cache_hint.contains_key(digest); let use_cache = self.cache_hint.contains_key(digest);
if use_cache { if use_cache {

View File

@ -15,13 +15,14 @@ use proxmox::try_block;
use crate::buildcfg; use crate::buildcfg;
pub mod datastore;
pub mod remote;
pub mod user;
pub mod acl; pub mod acl;
pub mod cached_user_info; pub mod cached_user_info;
pub mod datastore;
pub mod jobstate;
pub mod network; pub mod network;
pub mod remote;
pub mod sync; pub mod sync;
pub mod user;
/// Check configuration directory permissions /// Check configuration directory permissions
/// ///

View File

@ -15,6 +15,8 @@ use proxmox::tools::{fs::replace_file, fs::CreateOptions};
use proxmox::constnamemap; use proxmox::constnamemap;
use proxmox::api::{api, schema::*}; use proxmox::api::{api, schema::*};
use crate::api2::types::Userid;
// define Privilege bitfield // define Privilege bitfield
constnamemap! { constnamemap! {
@ -224,7 +226,7 @@ pub struct AclTree {
} }
pub struct AclTreeNode { pub struct AclTreeNode {
pub users: HashMap<String, HashMap<String, bool>>, pub users: HashMap<Userid, HashMap<String, bool>>,
pub groups: HashMap<String, HashMap<String, bool>>, pub groups: HashMap<String, HashMap<String, bool>>,
pub children: BTreeMap<String, AclTreeNode>, pub children: BTreeMap<String, AclTreeNode>,
} }
@ -239,7 +241,7 @@ impl AclTreeNode {
} }
} }
pub fn extract_roles(&self, user: &str, all: bool) -> HashSet<String> { pub fn extract_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
let user_roles = self.extract_user_roles(user, all); let user_roles = self.extract_user_roles(user, all);
if !user_roles.is_empty() { if !user_roles.is_empty() {
// user privs always override group privs // user privs always override group privs
@ -249,7 +251,7 @@ impl AclTreeNode {
self.extract_group_roles(user, all) self.extract_group_roles(user, all)
} }
pub fn extract_user_roles(&self, user: &str, all: bool) -> HashSet<String> { pub fn extract_user_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
let mut set = HashSet::new(); let mut set = HashSet::new();
@ -273,7 +275,7 @@ impl AclTreeNode {
set set
} }
pub fn extract_group_roles(&self, _user: &str, all: bool) -> HashSet<String> { pub fn extract_group_roles(&self, _user: &Userid, all: bool) -> HashSet<String> {
let mut set = HashSet::new(); let mut set = HashSet::new();
@ -305,7 +307,7 @@ impl AclTreeNode {
roles.remove(role); roles.remove(role);
} }
pub fn delete_user_role(&mut self, userid: &str, role: &str) { pub fn delete_user_role(&mut self, userid: &Userid, role: &str) {
let roles = match self.users.get_mut(userid) { let roles = match self.users.get_mut(userid) {
Some(r) => r, Some(r) => r,
None => return, None => return,
@ -324,7 +326,7 @@ impl AclTreeNode {
} }
} }
pub fn insert_user_role(&mut self, user: String, role: String, propagate: bool) { pub fn insert_user_role(&mut self, user: Userid, role: String, propagate: bool) {
let map = self.users.entry(user).or_insert_with(|| HashMap::new()); let map = self.users.entry(user).or_insert_with(|| HashMap::new());
if role == ROLE_NAME_NO_ACCESS { if role == ROLE_NAME_NO_ACCESS {
map.clear(); map.clear();
@ -376,7 +378,7 @@ impl AclTree {
node.delete_group_role(group, role); node.delete_group_role(group, role);
} }
pub fn delete_user_role(&mut self, path: &str, userid: &str, role: &str) { pub fn delete_user_role(&mut self, path: &str, userid: &Userid, role: &str) {
let path = split_acl_path(path); let path = split_acl_path(path);
let node = match self.get_node(&path) { let node = match self.get_node(&path) {
Some(n) => n, Some(n) => n,
@ -391,10 +393,10 @@ impl AclTree {
node.insert_group_role(group.to_string(), role.to_string(), propagate); node.insert_group_role(group.to_string(), role.to_string(), propagate);
} }
pub fn insert_user_role(&mut self, path: &str, user: &str, role: &str, propagate: bool) { pub fn insert_user_role(&mut self, path: &str, user: &Userid, role: &str, propagate: bool) {
let path = split_acl_path(path); let path = split_acl_path(path);
let node = self.get_or_insert_node(&path); let node = self.get_or_insert_node(&path);
node.insert_user_role(user.to_string(), role.to_string(), propagate); node.insert_user_role(user.to_owned(), role.to_string(), propagate);
} }
fn write_node_config( fn write_node_config(
@ -521,7 +523,7 @@ impl AclTree {
let group = &user_or_group[1..]; let group = &user_or_group[1..];
node.insert_group_role(group.to_string(), role.to_string(), propagate); node.insert_group_role(group.to_string(), role.to_string(), propagate);
} else { } else {
node.insert_user_role(user_or_group.to_string(), role.to_string(), propagate); node.insert_user_role(user_or_group.parse()?, role.to_string(), propagate);
} }
} }
} }
@ -569,7 +571,7 @@ impl AclTree {
Ok(tree) Ok(tree)
} }
pub fn roles(&self, userid: &str, path: &[&str]) -> HashSet<String> { pub fn roles(&self, userid: &Userid, path: &[&str]) -> HashSet<String> {
let mut node = &self.root; let mut node = &self.root;
let mut role_set = node.extract_roles(userid, path.is_empty()); let mut role_set = node.extract_roles(userid, path.is_empty());
@ -665,13 +667,14 @@ pub fn save_config(acl: &AclTree) -> Result<(), Error> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use anyhow::{Error}; use anyhow::{Error};
use super::AclTree; use super::AclTree;
use crate::api2::types::Userid;
fn check_roles( fn check_roles(
tree: &AclTree, tree: &AclTree,
user: &str, user: &Userid,
path: &str, path: &str,
expected_roles: &str, expected_roles: &str,
) { ) {
@ -686,22 +689,23 @@ mod test {
} }
#[test] #[test]
fn test_acl_line_compression() -> Result<(), Error> { fn test_acl_line_compression() {
let tree = AclTree::from_raw(r###" let tree = AclTree::from_raw(
acl:0:/store/store2:user1:Admin "\
acl:0:/store/store2:user2:Admin acl:0:/store/store2:user1@pbs:Admin\n\
acl:0:/store/store2:user1:DatastoreBackup acl:0:/store/store2:user2@pbs:Admin\n\
acl:0:/store/store2:user2:DatastoreBackup acl:0:/store/store2:user1@pbs:DatastoreBackup\n\
"###)?; acl:0:/store/store2:user2@pbs:DatastoreBackup\n\
",
)
.expect("failed to parse acl tree");
let mut raw: Vec<u8> = Vec::new(); let mut raw: Vec<u8> = Vec::new();
tree.write_config(&mut raw)?; tree.write_config(&mut raw).expect("failed to write acl tree");
let raw = std::str::from_utf8(&raw)?; let raw = std::str::from_utf8(&raw).expect("acl tree is not valid utf8");
assert_eq!(raw, "acl:0:/store/store2:user1,user2:Admin,DatastoreBackup\n"); assert_eq!(raw, "acl:0:/store/store2:user1@pbs,user2@pbs:Admin,DatastoreBackup\n");
Ok(())
} }
#[test] #[test]
@ -712,15 +716,17 @@ acl:1:/storage:user1@pbs:Admin
acl:1:/storage/store1:user1@pbs:DatastoreBackup acl:1:/storage/store1:user1@pbs:DatastoreBackup
acl:1:/storage/store2:user2@pbs:DatastoreBackup acl:1:/storage/store2:user2@pbs:DatastoreBackup
"###)?; "###)?;
check_roles(&tree, "user1@pbs", "/", ""); let user1: Userid = "user1@pbs".parse()?;
check_roles(&tree, "user1@pbs", "/storage", "Admin"); check_roles(&tree, &user1, "/", "");
check_roles(&tree, "user1@pbs", "/storage/store1", "DatastoreBackup"); check_roles(&tree, &user1, "/storage", "Admin");
check_roles(&tree, "user1@pbs", "/storage/store2", "Admin"); check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
check_roles(&tree, &user1, "/storage/store2", "Admin");
check_roles(&tree, "user2@pbs", "/", ""); let user2: Userid = "user2@pbs".parse()?;
check_roles(&tree, "user2@pbs", "/storage", ""); check_roles(&tree, &user2, "/", "");
check_roles(&tree, "user2@pbs", "/storage/store1", ""); check_roles(&tree, &user2, "/storage", "");
check_roles(&tree, "user2@pbs", "/storage/store2", "DatastoreBackup"); check_roles(&tree, &user2, "/storage/store1", "");
check_roles(&tree, &user2, "/storage/store2", "DatastoreBackup");
Ok(()) Ok(())
} }
@ -733,22 +739,23 @@ acl:1:/:user1@pbs:Admin
acl:1:/storage:user1@pbs:NoAccess acl:1:/storage:user1@pbs:NoAccess
acl:1:/storage/store1:user1@pbs:DatastoreBackup acl:1:/storage/store1:user1@pbs:DatastoreBackup
"###)?; "###)?;
check_roles(&tree, "user1@pbs", "/", "Admin"); let user1: Userid = "user1@pbs".parse()?;
check_roles(&tree, "user1@pbs", "/storage", "NoAccess"); check_roles(&tree, &user1, "/", "Admin");
check_roles(&tree, "user1@pbs", "/storage/store1", "DatastoreBackup"); check_roles(&tree, &user1, "/storage", "NoAccess");
check_roles(&tree, "user1@pbs", "/storage/store2", "NoAccess"); check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
check_roles(&tree, "user1@pbs", "/system", "Admin"); check_roles(&tree, &user1, "/storage/store2", "NoAccess");
check_roles(&tree, &user1, "/system", "Admin");
let tree = AclTree::from_raw(r###" let tree = AclTree::from_raw(r###"
acl:1:/:user1@pbs:Admin acl:1:/:user1@pbs:Admin
acl:0:/storage:user1@pbs:NoAccess acl:0:/storage:user1@pbs:NoAccess
acl:1:/storage/store1:user1@pbs:DatastoreBackup acl:1:/storage/store1:user1@pbs:DatastoreBackup
"###)?; "###)?;
check_roles(&tree, "user1@pbs", "/", "Admin"); check_roles(&tree, &user1, "/", "Admin");
check_roles(&tree, "user1@pbs", "/storage", "NoAccess"); check_roles(&tree, &user1, "/storage", "NoAccess");
check_roles(&tree, "user1@pbs", "/storage/store1", "DatastoreBackup"); check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
check_roles(&tree, "user1@pbs", "/storage/store2", "Admin"); check_roles(&tree, &user1, "/storage/store2", "Admin");
check_roles(&tree, "user1@pbs", "/system", "Admin"); check_roles(&tree, &user1, "/system", "Admin");
Ok(()) Ok(())
} }
@ -758,13 +765,15 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
let mut tree = AclTree::new(); let mut tree = AclTree::new();
tree.insert_user_role("/", "user1@pbs", "Admin", true); let user1: Userid = "user1@pbs".parse()?;
tree.insert_user_role("/", "user1@pbs", "Audit", true);
check_roles(&tree, "user1@pbs", "/", "Admin,Audit"); tree.insert_user_role("/", &user1, "Admin", true);
tree.insert_user_role("/", &user1, "Audit", true);
tree.insert_user_role("/", "user1@pbs", "NoAccess", true); check_roles(&tree, &user1, "/", "Admin,Audit");
check_roles(&tree, "user1@pbs", "/", "NoAccess");
tree.insert_user_role("/", &user1, "NoAccess", true);
check_roles(&tree, &user1, "/", "NoAccess");
let mut raw: Vec<u8> = Vec::new(); let mut raw: Vec<u8> = Vec::new();
tree.write_config(&mut raw)?; tree.write_config(&mut raw)?;
@ -780,20 +789,21 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
let mut tree = AclTree::new(); let mut tree = AclTree::new();
tree.insert_user_role("/storage", "user1@pbs", "NoAccess", true); let user1: Userid = "user1@pbs".parse()?;
check_roles(&tree, "user1@pbs", "/storage", "NoAccess"); tree.insert_user_role("/storage", &user1, "NoAccess", true);
tree.insert_user_role("/storage", "user1@pbs", "Admin", true); check_roles(&tree, &user1, "/storage", "NoAccess");
tree.insert_user_role("/storage", "user1@pbs", "Audit", true);
check_roles(&tree, "user1@pbs", "/storage", "Admin,Audit"); tree.insert_user_role("/storage", &user1, "Admin", true);
tree.insert_user_role("/storage", &user1, "Audit", true);
tree.insert_user_role("/storage", "user1@pbs", "NoAccess", true); check_roles(&tree, &user1, "/storage", "Admin,Audit");
check_roles(&tree, "user1@pbs", "/storage", "NoAccess"); tree.insert_user_role("/storage", &user1, "NoAccess", true);
check_roles(&tree, &user1, "/storage", "NoAccess");
Ok(()) Ok(())
} }
} }

Some files were not shown because too many files have changed in this diff Show More