Compare commits

...

372 Commits

Author SHA1 Message Date
c76c7f8303 bump version to 0.2.2-1 2020-06-03 10:37:46 +02:00
c48aa39f3b src/bin/proxmox-backup-client.rs: implement quite flag 2020-06-03 10:11:37 +02:00
2d32fe2c04 client restore: don't add server file ending if already specified
If one executes a client command like
 # proxmox-backup-client files <snapshot> --repository ...
the files shown have already the '.fidx' or '.blob' file ending, so
if a user would just copy paste that one the client would always add
.blob, and the server would not find that file.

So avoid adding file endings if it is already a known OK one.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-06-03 07:03:55 +02:00
dc155e9bd7 client restore: factor out archive/type parsing
will be extended in a next patch.

Also drop a dead else branch, can never get hit as we always add
.blob as fallback

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-06-03 07:03:12 +02:00
4e14781aec fix typo 2020-06-03 06:59:43 +02:00
a595f0fee0 client: improve connection/new fingerprint query
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-06-02 10:40:31 +02:00
add5861e8d typo fixes all over the place
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-30 16:39:08 +02:00
1610c45a86 src/client/pull.rs: also download client.log.blob 2020-05-30 14:51:33 +02:00
b2387eaa45 avoid compiler warnings 2020-05-30 14:05:33 +02:00
96d65fbcd0 cleanup: define/use const for predefined blob file names. 2020-05-30 14:04:15 +02:00
7cc3473a4e src/client/backup_specification.rs: split code into extra file 2020-05-30 10:54:38 +02:00
4856a21836 src/client/pull.rs: more verbose logging 2020-05-30 08:12:43 +02:00
a0153b02c9 ui: use Proxmox.Utils.setAuthData
this uses different parameters which we want to be the same for
all products (e.g. secure cookie)

leave the PBS.Utils.updateLoginData for the case that we want to do
something more here (as in pve for example)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-30 07:24:27 +02:00
04b0ca8b59 add owner to group and snapshot listings
while touching it, make columns and tbar in DataStoreContent.js
declarative members and remove the (now) unnecessary initComponent

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-30 07:24:12 +02:00
86e432b0b8 ui: add SyncView
shows a nice overview of sync jobs (incl status of last run, estimated
next run, etc.) with options to add/edit/remove and also show the
log of the last run and manually run it now

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:32:40 +02:00
f0ed6a218c ui: add SyncJobEdit window
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:32:13 +02:00
709584719d ui: add RemoteSelector and DataStoreSelector
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:31:54 +02:00
d43f86f3f3 api2: add admin/sync endpoint
this returns the list of syncjobs with status, as opposed to
config/sync (which is just the config)

also adds an api call where users can run the job manually under
/admin/sync/$ID/run

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:31:32 +02:00
997d7e19fc config/sync: add SyncJobStatus Struct/Schema
contains the config + status

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:29:39 +02:00
c67b1fa72f syncjob: change worker type for sync jobs
'sync' is used for manually pulling a remote datastore
changing it for a scheduled sync to 'syncjob' so that we can
differentiate between both types of syncs

this also adds a seperate task description for it

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:28:04 +02:00
268687ddf0 api2/pull: refactor priv checking and creating pull parameters
we want to reuse those in the api call for manually running a sync job

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:27:43 +02:00
426c1e353b api2/config/sync: fix id parameter
'name' is not the correct parameter for get/post

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:24:54 +02:00
2888b27f4c create SYNC_SCHEDULE_SCHEMA to adapt description for sync jobs
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:24:25 +02:00
f5d00373f3 ui: add missing comment field to remote model
when using a diffstore, we have to add all used columns to the model,
else they will not refresh on a load

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 11:17:15 +02:00
934f5bb8ac src/bin/proxmox-backup-proxy.rs: cleanup, move code to src/tools/disks.rs
And simplify find_mounted_device by using stat.st_dev
2020-05-29 11:13:36 +02:00
9857472211 fix removing of remotes
we have to save the remote config after removing the section

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-29 10:48:26 +02:00
013fa7bbcb rrd: reduce io by saving data only once a minute 2020-05-29 09:16:13 +02:00
a8d7033cb2 src/bin/proxmox-backup-proxy.rs: add test if last prune job is still running 2020-05-29 08:06:48 +02:00
04ad7bc436 src/bin/proxmox-backup-proxy.rs: test if last sync job is still running 2020-05-29 08:06:48 +02:00
77ebbefc1a src/server/worker_task.rs: make worker_is_active_local pub 2020-05-29 08:06:48 +02:00
750252ba2f src/tools/systemd/time.rs: add test for "daily" schedule 2020-05-29 07:52:09 +02:00
dc58194ebe src/bin/proxmox-backup-proxy.rs: use correct id to lookup sync jobs 2020-05-29 07:50:59 +02:00
c6887a8a4d remote config gui: add comment field 2020-05-29 06:46:56 +02:00
090decbe76 BACKUP_REPO_URL_REGEX: move to api2::types and allow all valid data store names
The repo URL consists of
* optional userid
* optional host
* datastore name

All three have defined regex or format, but none of that is used, so
for example not all valid datastore names are accepted.

Move definition of the regex over to api2::types where we can access
all required regexes easily.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-29 06:29:23 +02:00
c32186595e api2::types: factor out USER_ID regex
allows for better reuse in a next patch

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-29 06:27:38 +02:00
947f45252d www/ServerStatus.js: use term "IO wait" for CPU iowait
Because we already use "IO delay" for the storage statistics.
2020-05-29 06:12:49 +02:00
c94e1f655e rrd stats: improve io delay stats 2020-05-28 19:12:13 +02:00
d80d1f9a2b bump version to 0.2.1-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-28 17:39:41 +02:00
6161ac18a4 ui: remotes: fix remote remove buttons base url
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-28 17:29:54 +02:00
6bba120d14 ui: fix RemoteEdit password change
we have to remove the password from the submitvalues if it did not
change

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-28 17:24:06 +02:00
91e5bb49f5 src/bin/proxmox-backup-proxy.rs: simplify code
and gather all stats for the root disk
2020-05-28 12:30:54 +02:00
547e3c2f6c src/tools/disks/zfs.rs: use wtime + rtime (wait + run time) 2020-05-28 11:45:34 +02:00
4bf26be3bb www/DataStoreStatistic.js: add transfer rate 2020-05-28 10:20:29 +02:00
25c550bc28 src/bin/proxmox-backup-proxy.rs: gather zpool io stats 2020-05-28 10:09:13 +02:00
0146133b4b src/tools/disks/zfs.rs: helper to read zfs pool io stats 2020-05-28 10:07:52 +02:00
3eeba68785 depend on proxmox 0.1.38, use new fs helper functions 2020-05-28 10:06:44 +02:00
f5056656b2 use the sync id for the scheduled sync worker task
this way, multiple sync jobs with the same local store, can get scheduled

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-28 06:26:03 +02:00
8c87743642 fix 'remove_vanished' cli arg again
since the target side wants this to be a boolean and
serde interprets a None Value as 'null' we have to only
add this when it is really set via cli

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-28 06:25:30 +02:00
05d755b282 fix inserting of worker tasks
when starting a new task, we do two things to keep track of tasks
(in that order):
* updating the 'active' file with a list of tasks with
  'update_active_workers'
* updating the WORKER_TASK_LIST

the second also updates the status of running tasks in the file by
checking if it is still running by checking the WORKER_TASK_LIST

since those two things are not locked, it can happend that
we update the file, and before updating the WORKER_TASK_LIST,
another thread calls update_active_workers and tries to
get the status from the task log, which won't have any data yet
so the status is 'unknown'

(we do not update that status ever, likely for performance reasons,
so we have to fix this here)

by switching the order of the two operations, we make sure that only
tasks reach the 'active' file which are inserted in the WORKER_TASK_LIST

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-28 06:24:42 +02:00
143b654550 src/tools.rs - command_output: add parameter to check exit code 2020-05-27 07:25:39 +02:00
97fab7aa11 src/tools.rs: new helper to handle command_output (std::process::Output) 2020-05-27 06:53:25 +02:00
ed216fd773 ui: acl view: only update if component is activated
Avoid triggering non-required background updates during browsing a
datastores content or statistics panels. They're not expensive, but I
do not like such behavior at all (having traveled with trains and
spotty network to often)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 18:58:21 +02:00
0f13623443 ui: tasks: add sync description+
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 18:36:58 +02:00
dbd959d43f ui: tasks: render reader with full info
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 18:36:58 +02:00
f68ae22cc0 ui: factor out render_datetime_utc
will be reused in the next patch

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 18:36:48 +02:00
06c3dc8a8e ui: task: improve rendering of backup/prune worker entries
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 13:37:57 +02:00
a6fbbd03c8 depend on proxmox 0.1.37 2020-05-26 13:00:34 +02:00
26956d73a2 ui: datastore prune: remove debug logging
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 12:50:06 +02:00
3f98b34705 ui: rework datastore content panel controller
Mostly refactoring, but actually fixes an issue where one seldom run
into a undefined dereference due to the store onLoad callback getting
triggered after some of the componet was destroyed - on quick
switching through the datastores.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 12:46:48 +02:00
40dc103103 fix cli pull api call
there is no 'delete' parameter, only 'remove-vanished', so fix that

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 12:39:19 +02:00
12710fd3c3 ui: add missing monStoreErrors
to actually show api errors on the list call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 12:38:57 +02:00
9e2a4653b4 ui: add crud for remotes
listing/adding/editing/removing

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 12:38:39 +02:00
de4db62c57 remotes: save passwords as base64
to avoid having arbitrary characters in the config (e.g. newlines)
note that this breaks existings configs

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 12:38:06 +02:00
1a0d3d11d2 src/api2/admin/datastore.rs: add rrd api 2020-05-26 12:26:14 +02:00
8c03041a2c src/bin/proxmox-backup-proxy.rs: gather block device stats on datastore 2020-05-26 11:20:59 +02:00
3fcc4b4e5c src/tools/disks.rs: add helper to read block device stats 2020-05-26 11:20:22 +02:00
3ed07ed2cd src/tools/disks.rs: export read_sys 2020-05-26 09:49:13 +02:00
75410d65ef d/control: proxmox-backup-server: depend on proxmox-backup-docs
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-26 09:37:03 +02:00
83fd4b3b1b remote: try to use Struct for api
with a catch: password is in the struct but we do not want it to return
via the api, so we only 'serialize' it when the string is not empty
(this can only happen when the format is not checked by us, iow.
when its returned from the api) and setting it manually to ""
when we return remotes from the api

this way we can still use the type but do not return the password

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 08:55:07 +02:00
bfa0146c00 ui: acls: include roleid into id and sort by it
this fixes missing acls on the gui

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 08:49:59 +02:00
5dcdcea293 api2/config/remote: remove password from read_remote
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 08:49:12 +02:00
99f443c6ae api2/config/remote: lock and use digest for removal
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 08:48:45 +02:00
4f966d0592 api2/config/remote: use rpcenv for digest for read_remote
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 08:48:28 +02:00
db0c228719 config/remote: add 'name' to Remote struct
and use it as section id, like with User

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-26 08:48:05 +02:00
880fa939d1 gui: move system stat RRDs to ServerStatus panel. 2020-05-26 07:33:00 +02:00
052aaeb5e9 re-bump to 0.2.0-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 20:10:38 +02:00
5f249127b2 docs: sync version with the package versions
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 20:10:38 +02:00
8277f4ace5 ui: navigation: sort datastores entries
adding a new one after load will append it still at the end, though.
But datastores are not something which get frequently added after
initial setup, so don't care about that for now..

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 19:46:47 +02:00
9b1aa424b9 ui: add some task log description mappings
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 19:06:52 +02:00
fef2b3e04c css: fix load mask background image path
We're not using the exact same paths as in PVE/PMG here.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 18:41:14 +02:00
7cebe5a1f4 ui: system config: reorder big panel to bottom
Gives a better look and feel if the flex'd big panel is at the bottom

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 16:41:54 +02:00
309ef20d6d src/bin/proxmox-backup-proxy.rs: simplify code 2020-05-25 16:20:32 +02:00
d0833a70f7 src/bin/proxmox-backup-proxy.rs: gather datastore usage stats 2020-05-25 16:20:32 +02:00
dda246403c ui: index: load widget toolkit CSS
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 15:44:39 +02:00
16e0dd65f1 d/control: proxmox-widget-toolkit depend on 2.2-2
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-05-25 12:50:55 +02:00
c5a516918f bump version to 0.2.0 2020-05-25 12:48:07 +02:00
cc275e8f93 depend on proxmox 0.1.36 2020-05-25 12:13:45 +02:00
d8dc281992 www/DataStoreStatus.js: display loadavg stats 2020-05-25 11:54:15 +02:00
2c66a590c0 src/bin/proxmox-backup-proxy.rs: gather iowait stats 2020-05-25 11:54:15 +02:00
485841da2c src/bin/proxmox-backup-proxy.rs: gather loadavg stats 2020-05-25 11:40:20 +02:00
e8f5810aa1 depend on proxmox 0.1.35 2020-05-25 11:34:34 +02:00
3e930f2bdc www/DataStoreStatus.js: display root disk stats 2020-05-25 11:34:24 +02:00
dd15c0aa3b src/bin/proxmox-backup-proxy.rs: gather root disk stats 2020-05-25 11:10:07 +02:00
c1b24fbf0b www/DataStoreStatus.js: display swap stats 2020-05-25 10:39:54 +02:00
3f23b17298 src/rrd/rrd.rs: do not wrap error and return ErrorKind::NotFound 2020-05-25 10:30:04 +02:00
c25c9d8dd1 src/bin/proxmox-backup-proxy.rs: gather swap usage stats 2020-05-25 10:25:58 +02:00
84dc6adcc1 src/rrd/cache.rs: display/log error when RRD load fails 2020-05-25 10:18:53 +02:00
0c4344650d src/rrd/rrd.rs: store/verify magic number 2020-05-25 09:21:54 +02:00
4f9513996c src/bin/proxmox-backup-proxy.rs: use block_in_place for rrd update 2020-05-25 08:30:59 +02:00
736edc7a7e src/rrd/rrd.rs: implement DST_COUNTER 2020-05-25 08:14:30 +02:00
2b55de407e src/rrd/rrd.rs: correctly compute derived values
use f64 for time.
2020-05-25 07:02:04 +02:00
a608806f65 www/DataStoreStatus.js: display netin/netout 2020-05-24 19:02:35 +02:00
8f0cec2642 src/bin/proxmox-backup-proxy.rs: gather netin/netout stats 2020-05-24 19:02:35 +02:00
0ed9a2b3ae src/config/network.rs: implement is_physical_nic() helper 2020-05-24 19:02:35 +02:00
58edd33d2b src/rrd/rrd.rs: implement DST_DERIVE 2020-05-24 19:02:35 +02:00
4fb05fde17 src/rrd/rrd.rs: restructure whole code 2020-05-24 16:51:28 +02:00
daca4f7888 src/rrd/rrd.rs: reduce size by using f64:NAN as UNKNOWN 2020-05-24 09:09:09 +02:00
4e6585839b src/rrd/rrd.rs: simplify an fix old value deletion 2020-05-24 06:44:06 +02:00
8c981ae379 rrd: fix display interval, try to avoid numeric errors 2020-05-23 16:03:43 +02:00
803ab12ad4 rrd: simplify code 2020-05-23 15:37:17 +02:00
a4a3f7ca5e rrd: pack multiple rrd values into th estat list 2020-05-23 14:03:44 +02:00
ba1c249eec www/DataStoreStatus.js: add test for RRD api 2020-05-23 11:52:26 +02:00
a2f862eed6 add experimental rrd api to get cpu stats 2020-05-23 11:50:53 +02:00
eaeda365e0 start gathering stats using new rrd module 2020-05-23 10:43:08 +02:00
6359dc891a add simple rrd implementation 2020-05-23 10:42:48 +02:00
07ad6470ca src/client/pull.rs: split out pull related code 2020-05-22 08:04:20 +02:00
a6160cdfeb src/bin/proxmox-backup-proxy.rs: schedule sync jobs 2020-05-22 07:50:59 +02:00
183125d576 src/api2/pull.rs: aquire try_shared_chunk_store_lock inside pull_store 2020-05-22 07:24:17 +02:00
a3016d6583 proxmox-backup-manager: add sync-job cli 2020-05-21 11:44:45 +02:00
b29d046e89 proxmox-backup-manager: split out cert.rs 2020-05-21 11:22:20 +02:00
380bd7df97 proxmox-backup-manager: split out datastore.rs 2020-05-21 11:14:34 +02:00
ea6f404e55 proxmox-backup-manager: split out dns.rs 2020-05-21 11:10:58 +02:00
a35a211d9e proxmox-backup-manager: split out network.rs 2020-05-21 11:08:38 +02:00
53e14507c1 proxmox-backup-manager: split out acl.rs 2020-05-21 10:56:46 +02:00
6fa39e53e0 proxmox-backup-manager: split out users.rs 2020-05-21 10:53:06 +02:00
a220a4564a roxmox-backup-manager: start splitting command into several files 2020-05-21 10:46:07 +02:00
6f652b1b3a rename 'job' to 'sync' 2020-05-21 10:29:25 +02:00
b1d4edc769 src/api2/config/job.rs: add job api 2020-05-21 10:16:35 +02:00
b4900286ce src/config/jobs.rs: use SectionConfig for jobs 2020-05-21 10:16:35 +02:00
c681885227 src/bin/proxmox-backup-manager.rs: format output of show commands 2020-05-20 16:47:37 +02:00
ee8b464466 src/tools/systemd.rs: avoid compiler warnings 2020-05-20 16:47:08 +02:00
51c63475e1 ui: add '.' to path regex
since we use the path for datastore ids, which can contain a '.'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 16:33:26 +02:00
ce55db66d6 proxmox-backup-manager: add show command for remote and datastore
to show the data for a single item

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 16:33:07 +02:00
2882c881e9 api2/access/acl: add path and exact parameter to list_acl
so that we can get only a subset of the acls, filtered by the backed
also return the digest here

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:44:36 +02:00
c0ac207453 ui: add ACL panel to datastores
by introducing a datastorepanel (a TabPanel) which holds the content
and acl panel for now.

to be able to handle this in the router, we have to change the logic
of how to select the datastore from using the subpath to putting it
into the path (and extracting it when necessary)

if we need this again (e.g. possibly for remotes), we can further
refactor this logic to be more generic

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:27:13 +02:00
ee1458b61d fixup 2020-05-20 13:27:13 +02:00
0542cfdf4f ui: add ACL panel to Configuration
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:23:00 +02:00
12e3895399 api2/access/acl: make update_acl a protected api call
since we want to set the owner of the acl config to 'root'
which is only possible when using a protected api call

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:22:41 +02:00
11b6391c83 add 'exact' parameter to extract_acl_node_data
so that we can return acls for a single path

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:22:10 +02:00
2072aeaee6 ui: add UserSelector
this has to be different from pve for now, since the default of
'enabled' is reverted (pve: default disabled, pbs: default enabled)

if we decide to change this either here or in pve, we can refactor
it to the widget-toolkit

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:22:01 +02:00
b05672579e api2/roles: change return field of role to roleid
to be compatible with the pve api
with this, we can reuse the ui parts (RoleSelector)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:21:47 +02:00
5160c0e986 api2/acl: add privs array to roles
so that an admin can see which roles have which privileges

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:21:37 +02:00
1ad9dd08f4 acls: use constnamemap macro for privileges
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 13:21:28 +02:00
4d3369cb9a depend on proxmox 0.1.34 2020-05-20 13:21:01 +02:00
25829a879b src/bin/proxmox-backup-proxy.rs: schedule prune jobs 2020-05-20 13:00:53 +02:00
872062ee9f src/config/datastore.rs_ change prune types from i64 to u64 2020-05-20 13:00:13 +02:00
67f7ffd0db src/config/datastore.rs: add prune settings 2020-05-20 11:29:59 +02:00
0fafac2492 src/api2/access/user.rs: remove useless description
The description is not used at all if we refer to a type.
2020-05-20 11:27:58 +02:00
49ff10921c src/api2/types.rs: define PRUNE_SCHEMA_KEEP_* 2020-05-20 10:13:38 +02:00
479e4932b5 src/tools/systemd/parse_time.rs: improve error message 2020-05-20 09:43:16 +02:00
dd7a7eae8f src/bin/proxmox-backup-manager.rs: add completion helper for gc-schedule 2020-05-20 09:42:51 +02:00
8545480a31 src/bin/proxmox-backup-proxy.rs: add simple task scheduler for garbage collection 2020-05-20 08:59:45 +02:00
d6c28ddf84 src/tools/systemd/time.rs: export parse/verify 2020-05-20 08:38:39 +02:00
42fdbe5112 src/config/datastore.rs: add gc-schedule property 2020-05-20 08:38:10 +02:00
a67b70c154 depend on proxmox 0.1.33 2020-05-20 06:29:06 +02:00
9c5c383bff user: create default root user as typed struct
we added a userid attribute to the User struct, but missed that we
created the default user without that attribuet via the json! macro
which lead to a runtime panic on the deserialization

by using the struct directly, such errors will be caught by the compiler
in the future

with this change, we can remove the serde_json import here

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-20 06:09:08 +02:00
7d4e362993 depend on proxmox 0.1.32, src/api2/access/user.rs: simplify code 2020-05-19 12:58:46 +02:00
88acc86129 ui: add UserManagement panel
to add/edit users

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-19 09:34:41 +02:00
1d8ef0dcf7 ui: use Logo/RealmComboBox from widget-toolkit
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-19 09:34:19 +02:00
522c0da0a0 use new 'id_property' for user::User and use it in api calls
this allows us to return a user::User (or Vec<> of it)
instead of a generic serde value

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-19 09:33:56 +02:00
16c75c580b adapt to changes of SectionConfigPlugin
it requires not an Option<String> for the optional id_property

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-05-19 09:28:45 +02:00
07ce44a633 avoid compiler warnings 2020-05-19 07:03:41 +02:00
6c5024b050 Cargo.toml: remove native-tls
it's not used anymore.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-05-18 13:52:56 +02:00
b2c9c793ad debcargo.toml: add missing doc build-dependencies
and mark them accordingly.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-05-18 13:48:16 +02:00
79166b3935 debcargo.toml: reflow dependencies
to make changes easier to track

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-05-18 13:08:10 +02:00
e8d1da6a15 depend on proxmox 0.1.31 - use Value to store result metadata 2020-05-18 09:57:35 +02:00
2e686e0a63 update dependencies
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-05-18 09:08:09 +02:00
7a314d18f7 src/tools/systemd/parse_time.rs: check max values 2020-05-16 13:13:50 +02:00
2d08c97ae2 CalendarEvent - compute_next_value: use change tracking to avoid repeated testing 2020-05-16 10:32:27 +02:00
50ce1f987d CalendarEvent - compute_next_value: support seconds 2020-05-16 10:21:24 +02:00
d1a5ffdf78 src/tools/systemd/tm_editor.rs: new helper class 2020-05-16 10:09:41 +02:00
99baf7afcc CalendarEvent: test and fix repeated values 2020-05-16 07:43:51 +02:00
fed270bf3f CalendarEvent: speedup/simplify repetition tests 2020-05-16 07:09:53 +02:00
e05b637c73 src/tools/systemd/parse_time.rs: move parser into separate file 2020-05-16 06:53:15 +02:00
2ee6b3fdb9 src/tools/systemd/time.rs: implement compute_next_event 2020-05-16 06:33:03 +02:00
f3a96b2cdb renamed: src/tools/systemd/parser.rs -> src/tools/systemd/config.rs 2020-05-16 06:32:28 +02:00
a260c74a12 src/tools/systemd/time.rs: add helpers to compute CalendarEvents 2020-05-15 17:55:54 +02:00
52c70f3f5e depend on proxmox 0.1.30 2020-05-15 17:51:52 +02:00
30f577248b src/api2/node/time.rs: avoid custom unsafe readlink implementations 2020-05-15 06:50:07 +02:00
00491c0230 src/tools/systemd/parser.rs: use different setups for service and timer files, code cleanup 2020-05-14 13:55:13 +02:00
2ebdbac1c4 depend on nom, add parser for systemd calendar enents and time span 2020-05-14 12:18:30 +02:00
b4a85a3fa8 update pin-utils dep to stable version
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-05-14 11:46:05 +02:00
f486e9e50e add systemd configuration file parser/writer, start job configuration 2020-05-12 13:07:49 +02:00
7f5a27d302 depend on proxmox 0.1.29 2020-05-12 13:03:09 +02:00
40a36bcc57 depend on proxmox 0.1.28 2020-05-12 09:35:57 +02:00
b61d344f01 TODO.rst: update 2020-05-12 09:35:37 +02:00
65dab0266c proxmox-backup-manager: add completion helper for port list 2020-05-08 17:28:04 +02:00
525008f7ad proxmox-backup-manager - network list: render ports/slaves
And render interface name as first comumn.
2020-05-08 16:07:23 +02:00
5bef0f43da src/config/network.rs - check_bridge_ports: correctly check vlan ports 2020-05-08 15:51:47 +02:00
0f6bdbb01f src/config/network.rs - write_config: add more consistency checks 2020-05-08 14:31:38 +02:00
a4ccb46176 src/config/network.rs: avoid duplicate port usage 2020-05-08 11:15:00 +02:00
80bf084876 src/config/network.rs: do not combine entries
It is unclear when and how to write combined entries ...
2020-05-08 10:20:57 +02:00
db5672e83e src/config/network.rs: always write bridge_ports and bond_slaves
So that we can reliable detect the interface type.
2020-05-08 09:58:03 +02:00
86a5d56c4e proxmox-backup-manager: add network create command 2020-05-08 09:55:56 +02:00
3dd27a3bf8 src/api2/node/network.rs: add protected flag to revert 2020-05-08 09:30:25 +02:00
3aedb73816 src/api2/node/network.rs: pass bridge_ports and slaves a property strings
To make it compatible with pve.
2020-05-08 08:49:17 +02:00
bab5d18c3d src/config/network.rs: implement bond_mode
and rename bond_slaves to slaves to make it compatible with pve.
2020-05-07 14:07:45 +02:00
c2ffc68554 src/api2/node/network.rs: cleanup - factor out check_duplicate_gateway 2020-05-07 11:26:30 +02:00
9651833130 src/api2/node/network.rs: allow to create bridge and bond 2020-05-07 11:09:12 +02:00
7b22acd0c2 src/config/network.rs: make it compatible with pve
and depend on proxmox 0.1.26
2020-05-07 09:28:25 +02:00
5751e49566 src/server/worker_task.rs: implement and use status command 2020-05-07 09:27:33 +02:00
197de83ffa src/server/command_socket.rs: do not abort loop on client errors, allow backup gid 2020-05-07 09:27:33 +02:00
10effc9849 add tools/disks.rs (work in progress...)
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-05-05 10:14:42 +02:00
139f891087 TODO.rst: update 2020-05-05 09:22:01 +02:00
99641a6bbb garbage_collect: call fail_on_abort to abort GV when requested. 2020-05-05 09:06:34 +02:00
74f7240b8d src/bin/proxmox-backup-client.rs: add human readable date to prune list 2020-05-05 07:33:58 +02:00
a66d5898a1 docs/administration-guide.rst: fix prune command output 2020-05-05 07:24:27 +02:00
db1e061dcb src/bin/proxmox-backup-client.rs: correctly format prune result list. 2020-05-05 06:45:37 +02:00
96feecd621 administration-guide.rst: update snapshot list output 2020-05-04 13:14:03 +02:00
f9dcfa4149 administration-guide.rst: add section "Proxmox VE integration" 2020-05-04 12:41:38 +02:00
25cf09065f docs: use OpenSans as main font
Most people also read PDFs online ...
2020-05-04 10:48:09 +02:00
fc598cdbe1 docs: use better fonts for PDFs
Font XCharter and Lato have better quality.
2020-05-04 10:15:27 +02:00
bca294a17c docs/conf.py: avoid font scale option 2020-05-04 09:02:10 +02:00
a02e8b1e95 docs/conf.py: fix baselineskip in code-blocks with scaled font 2020-05-03 09:22:01 +02:00
26d29e0ec7 pdf docs: scale down monospace font 2020-05-03 08:23:35 +02:00
15d74eaaf4 use xetex to generate pdf
To correctly handle unicode art in code blocks...
2020-05-03 07:48:55 +02:00
8df51d4852 administration-guide.rst: add role definitions 2020-05-02 16:40:20 +02:00
8f3b3cc1f9 administration-guide.rst: add example to disable/remove a user 2020-05-02 11:21:05 +02:00
17ec699d79 administration-guide.rst: start user management docs 2020-05-02 11:11:36 +02:00
b080583ba8 src/bin/proxmox-backup-manager.rs: improve user list output 2020-05-01 16:22:50 +02:00
32d83bb34c TODO: update 2020-05-01 09:02:36 +02:00
e325dbd4a3 www/Dashboard.js: fix status url 2020-04-30 12:58:41 +02:00
ecb53af6d9 add ServerStatus.js GUI with Reboot and Shutdown buttons 2020-04-30 12:12:20 +02:00
ed751dc2ab src/api2/node/status.rs: rework api, implement reboot and shutdown 2020-04-30 11:52:40 +02:00
ca9dfe5fa4 src/api2/node/tasks.rs: use api macro features for default values 2020-04-30 11:51:56 +02:00
720af9f69b src/api2/node/tasks.rs: allow users to list/access there own tasks 2020-04-30 10:05:50 +02:00
f1490da82a use resonable acl paths (fixup) 2020-04-30 09:32:13 +02:00
74c08a5782 use reasonable acl paths 2020-04-30 09:30:00 +02:00
7f402dafb7 TODO.rst: update 2020-04-30 07:42:57 +02:00
bd88dc4116 cached_config: avoid parsing non-existent files multiple times 2020-04-30 07:04:23 +02:00
ebe556d0e7 www/DataStoreStatus.js: define Model for datastorte list
We want to use the admin/datastore api (instead of config/datastore),
to get the restricted list of datastores.
2020-04-30 06:50:45 +02:00
f9e3b1104e change index to templates using handlebars
using a handlebars instance in ApiConfig, to cache the templates
as long as possible, this is currently ok, as the index template
can only change when the whole package changes

if we split this in the future, we have to trigger a reload of
the daemon on gui package upgrade (so that the template gets reloaded)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-04-29 17:05:53 +02:00
bc0d03885c use proxmox 0.1.25, use new EnumEntry feature 2020-04-29 13:01:24 +02:00
acb428cdec add DataStoreStatus.js dummy 2020-04-29 11:22:05 +02:00
de1f8f1d36 Revert "gui: display DataStoreConfig above DataStoreContent"
This reverts commit 555dfe7b8e.
2020-04-29 11:09:35 +02:00
b9f2f761bb avoid problems with missing acl.cfg and user.cfg 2020-04-29 10:40:42 +02:00
30fb602578 src/api2/admin/datastore.rs - get_datastore_list: only return name and comment
We dont want to leak the full configuration to users with limited access permission.
Please use the api2::config::datastore api to get the full configuration.
2020-04-29 09:21:34 +02:00
0a00f6e01c src/api2/config/datastore.rs_ add delete property to update method 2020-04-29 09:09:59 +02:00
30003baaa4 src/api2/config/remote.rs: fix white space 2020-04-29 09:09:39 +02:00
5211705ff1 src/api2/config/remote.rs: add delete parameter to update method 2020-04-29 09:04:17 +02:00
ec67af9af3 src/api2/pull.rs: require Datastore.Prune if delete flag is set. 2020-04-29 07:19:32 +02:00
8247db5b39 src/config/acl.rs: introduice privileges and roles for remotes 2020-04-29 07:03:44 +02:00
409f44247b fix api2::types::ACL_ROLE_SCHEMA
make sure we list all roles ...
2020-04-28 13:25:02 +02:00
dd335b77f5 src/config/acl.rs - fix regression tests 2020-04-28 11:16:15 +02:00
6f6aa95abb add Datastore.Backup, Datastore.PowerUser and Datastore.Reader role 2020-04-28 11:07:25 +02:00
54552dda59 implemnt backup ownership, improve datastore access permissions 2020-04-28 10:22:25 +02:00
21690bfaef depend on proxmox 0.1.24 2020-04-28 08:23:41 +02:00
1347b1152d src/config/cached_user_info.rs - lookup_privs: correctly handle superuser 2020-04-27 13:22:03 +02:00
d00e1a216f src/config/acl.rs: introduce more/better datastore privileges 2020-04-27 07:13:50 +02:00
9c7fe29dfc src/config/acl.rs: rtename PRTIV_DATASTORE_ALLOCATE to PRIV_DATASTORE_MODIFY 2020-04-27 06:50:35 +02:00
14627d671a src/bin/proxmox-backup-manager.rs: add dns sub command
Also improved the DNS api, added a --delete option.
2020-04-26 08:23:23 +02:00
76227a6acd src/bin/proxmox-backup-manager.rs: fix node parameter handling 2020-04-25 17:20:22 +02:00
6830608855 depend on proxmox 0.1.23 2020-04-25 17:12:15 +02:00
26d9aebc28 move src/api2/config/network.rs to src/api2/node/network.rs
So that we have the same api path for network config as pve.
2020-04-25 17:00:38 +02:00
1ca540a63b src/config/network.rs: auto-add lo, and implement a few regression tests 2020-04-24 12:57:11 +02:00
9094186a57 xattr: cleanup: don't use pxar types in the API
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-04-24 11:23:48 +02:00
27a3decbfe xattr: api cleanup
Make `flistxattr()` return a `ListXAttr` helper which
provides an iterator over `&CStr`.

This exposes the property that xattr names are a
zero-terminated string without simply being an opaque
"byte vector". Using &[u8] as a type here is too lax.

Also let `fgetxattr` take a `CStr`. While this may be a
burden on the caller, we usually already have
zero-terminated strings on the call site. Currently we only
use this method coming from `flistxattr` after all.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-04-24 10:56:52 +02:00
9af76ef075 xattr: use checked_mul to increase size
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-04-24 10:56:52 +02:00
00ec8d1685 tools: pub use Fd from proxmox crate
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-04-24 10:56:52 +02:00
fd7c0979b4 src/bin/proxmox-backup-manager.rs: implement netwerk revert 2020-04-24 10:45:49 +02:00
c67bc9c35c src/bin/proxmox-backup-manager.rs: new command to show pending network changes 2020-04-24 10:27:43 +02:00
3181f9b625 src/bin/proxmox-backup-manager.rs: only show pending changes with "text" format 2020-04-24 10:16:57 +02:00
2eefd9aee1 src/config/network.rs: implement network reload, set "changes" attribute 2020-04-24 09:55:46 +02:00
8a6b86b8a7 src/config/network.rs: use a simple String for comments 2020-04-24 07:46:08 +02:00
96d9478668 src/config/network/parser.rs: corectly detect vanished interfaces 2020-04-24 07:26:54 +02:00
10a9be45bd src/api2/config/network.rs: implement update/delete comments 2020-04-23 16:08:35 +02:00
5f60a58fd5 src/config/network.rs; support interface comments, cleanups 2020-04-23 15:54:30 +02:00
659c3be3d5 src/config/network.rs: avoid newline after family options 2020-04-23 11:30:41 +02:00
5e4e88e83f src/api2/config/network.rs: implement update/delete for bridge_ports and bond_slaves 2020-04-23 11:21:27 +02:00
339965d720 src/api2/config/network.rs: only allow one default gateway 2020-04-23 10:37:40 +02:00
c38b4bb8b2 src/config/network.rs: do not allow to change interface type 2020-04-23 09:43:38 +02:00
42fbe91a34 src/config/network.rs: parse bond-slaves 2020-04-23 09:31:10 +02:00
1d9a68c2fc src/config/network.rs: parse bridge-ports 2020-04-23 09:24:17 +02:00
02269f3dba src/config/network.rs: introduce NetworkInterfaceType 2020-04-23 08:45:03 +02:00
d5ca9bd5df src/config/network.rs: cleanup (new helper combine_entry) 2020-04-23 07:54:12 +02:00
02e36d96ad src/config/network.rs: write changes to interfaces.new 2020-04-23 07:19:29 +02:00
2c18efd902 src/config/network.rs: use a single mtu setting (instead of mtu_v4 and mtu_v6) 2020-04-23 07:07:14 +02:00
4cb6bd894c src/bin/proxmox-backup-manager.rs: improve network list output format 2020-04-23 06:44:55 +02:00
b1564af25a src/bin/proxmox-backup-manager.rs: format datastore list output 2020-04-22 17:37:20 +02:00
bf004ecd87 src/bin/proxmox-backup-manager.rs: format network list output 2020-04-22 17:14:52 +02:00
f1026a5aa9 src/api2/config/network.rs: allow to update 'auto' flag 2020-04-22 16:46:46 +02:00
3fce3bc36e src/config/network/parser.rs: parse MTU settings 2020-04-22 13:44:51 +02:00
f8e7ac686a src/config/network.rs: only save attriubutes used by configuration method 2020-04-22 12:42:09 +02:00
c016482c7a src/api2/config/network.rs: implement delete property 2020-04-22 12:19:31 +02:00
27f2c23049 src/api2/config/network.rs: allow to update configuration method 2020-04-22 11:32:36 +02:00
df6bb03d0e src/api2/config/network.rs: improve network api 2020-04-22 10:54:07 +02:00
e2d940b949 src/config/network/parser.rs: remove debug println 2020-04-22 10:53:26 +02:00
0c226bc173 src/config/network/helper.rs: fix CIDR regex 2020-04-22 10:52:31 +02:00
76cf5208cf src/api2/types.rs: add schemas for IP/CIDR 2020-04-22 10:28:53 +02:00
2ea7bf1b3d src/api2/config/datastore.rs_ fix method docs 2020-04-22 08:53:16 +02:00
8b57cd4441 src/config/network.rs: remove netmask support
rely on cidr instead.
2020-04-22 08:45:13 +02:00
68da20bf62 src/api2/types.rs: define NETWORK_INTERFACE_NAME_SCHEMA 2020-04-21 17:54:52 +02:00
c357260d09 src/config/network.rs: move type definitions to src/api2/types.rs 2020-04-21 17:25:05 +02:00
7e02d08cd0 rename ConfigMethod to NetworkConfigMethod 2020-04-21 17:17:57 +02:00
ca0e534796 src/api2/config/network.rs: start network configuration api 2020-04-21 14:28:26 +02:00
904e988667 src/config/network.rs: impleement load/save 2020-04-21 12:55:33 +02:00
3f129233be src/config/network.rs: add Interface flags 'exists' and 'active' 2020-04-21 11:46:56 +02:00
a9bb491e35 src/config/network.rs: cleanup autostart flag handling 2020-04-21 11:06:22 +02:00
1ec7f8a0dd src/config/network/helper.rs: new helper get_network_interfaces() 2020-04-21 10:32:54 +02:00
92310d585c src/config/network.rs: simplify code 2020-04-20 18:10:15 +02:00
f34d4401f7 src/config/network.rs: read/write /etc/network/interfaces
Start implementing a recursive descent parser.
2020-04-20 14:15:57 +02:00
6e695960ca src/config/cached_user_info.rs: cache it up to 5 seconds 2020-04-18 08:49:20 +02:00
365f0f720c fix permission tests using non-uri parameters
We nood to do those tests inside the fuction body instead...
2020-04-18 08:23:04 +02:00
a737179eb4 src/config/cached_user_info.rs: new check_privs helper 2020-04-18 08:09:34 +02:00
bb072ba49c src/api2/access.rs: cleanup 2020-04-18 07:28:25 +02:00
ff329f970b src/api2/types.rs: use anyhow::Error in test cases 2020-04-18 07:05:31 +02:00
f7d4e4b506 switch from failure to anyhow
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-04-17 18:43:30 +02:00
404d78c41e src/api2/pull.rs: add access permission 2020-04-17 15:27:04 +02:00
1bfc1efa50 src/api2/subscription.rs: add access permissions 2020-04-17 15:14:28 +02:00
73ce1d1146 src/api2/reader.rs: add access permissions 2020-04-17 15:01:56 +02:00
70e5f2461d src/api2/config/remote.rs: add access permissions 2020-04-17 14:57:26 +02:00
c0ef209aeb src/api2/config/datastore.rs: impl digest check for delete, add access permissions 2020-04-17 14:51:29 +02:00
9f9f7eefa3 src/api2/backup.rs: add access permissions 2020-04-17 14:40:20 +02:00
bb34b58910 src/api2/admin/datastore.rs: add access permissions - first try
We need to refine this later (introduce backup owner concept?)
2020-04-17 14:36:27 +02:00
5972def5ec acl: change path "storage" to "datastore" 2020-04-17 14:15:44 +02:00
aa90ced3bf src/api2/access/role.rs: use schema ACL_ROLE_SCHEMA 2020-04-17 14:14:06 +02:00
ca257c8097 move type defs from src/api2/access/acl.rs to src/api2/types.rs 2020-04-17 14:13:15 +02:00
3fff55b293 src/api2/access/role.rs: new api to list roles 2020-04-17 14:03:24 +02:00
4f66423fcc src/api2/access/user.rs: add access permissions 2020-04-17 11:04:36 +02:00
d4f020f4c5 src/api2/access/user.rs: add access permissions 2020-04-17 10:08:45 +02:00
d28ddb8e04 src/api2/access/acl.rs: add access permissions 2020-04-17 10:03:09 +02:00
83b6a7cf71 src/api2/node/tasks.rs: use api macro, implement access permissions 2020-04-16 17:47:21 +02:00
e4681f9f71 src/api2/node/syslog.rs: add access permissions 2020-04-16 17:08:19 +02:00
b5037fa8ed src/api2/node/status.rs: add access permissions 2020-04-16 17:05:09 +02:00
9989d2c4e9 src/server/rest.rs: reduce delay for permission error to 500ms 2020-04-16 12:56:34 +02:00
1cf7bbf412 src/api2/node/services.rs: add access permissions 2020-04-16 12:47:16 +02:00
68ed0c629d src/api2/node/journal.rs: add access permissions 2020-04-16 12:47:16 +02:00
4b40148caa start impl. access permissions 2020-04-16 12:47:16 +02:00
423e656163 src/config/cached_user_info.rs: new helper class 2020-04-16 10:05:16 +02:00
1ce8a5d0b7 depend on proxmox 0.1.21 2020-04-16 10:04:00 +02:00
109d7817cd src/config/user.rs - cached_config: do not store/return digest 2020-04-15 11:35:57 +02:00
5354511fd0 src/config/acl.rs: implement cached_config 2020-04-15 11:30:47 +02:00
bd098a7f77 src/api2/node/dns.rs: use api macro (cleanup) 2020-04-15 10:09:18 +02:00
8d048af2bf acl: improve NoAccess handling 2020-04-15 08:11:43 +02:00
4f3db187cf Docu: first proof reading
This is a first proof reading of the currently existing documentation.

fixes (hopefully all):
* spelling
* grammar

Tries to increase readabilty and ease of understanding by simplifying
and restructing some sentences and paragraphs. Filler words which add
to the cognitive load but don't add anything are removed
(most notably `also`).
2020-04-15 06:52:59 +02:00
9a328319dd pxar extract: remove pattern from arg_param, add target instead 2020-04-15 06:41:37 +02:00
7e3d2e5b41 pxar create: remove exclude from arg_param 2020-04-15 06:31:46 +02:00
9c06f6c292 fix previous commit - use result. 2020-04-14 17:48:10 +02:00
9f4e47dd93 acl update: check path 2020-04-14 17:23:48 +02:00
d83175dd69 acl update: check if user exist. 2020-04-14 13:46:27 +02:00
68ccdf09a4 src/config/user.rs: implement user config cache 2020-04-14 13:45:45 +02:00
9765092ede acl api: implement update 2020-04-14 10:16:49 +02:00
ed3e60ae69 start ACL api 2020-04-13 11:09:44 +02:00
a83eab3c4d acl: use BTreeMap and BTreeSet to avoid sort() 2020-04-12 17:13:53 +02:00
0815ec7e65 acl: implement roles(), add regression tests. 2020-04-12 13:06:50 +02:00
5c6cdf9815 add acl config 2020-04-11 12:24:26 +02:00
9abcae1b0e gui: improve login view (use realms) 2020-04-09 13:37:14 +02:00
b88f9c5b1e PASSWORD_SCHEMA: set max_length to 1024 (for tickets) 2020-04-09 13:35:58 +02:00
879546aff6 api: add default property to domain list 2020-04-09 13:35:08 +02:00
73b40e9b46 api: correctly sort access subdirmap 2020-04-09 13:34:07 +02:00
708db4b3ae api: add list_domains 2020-04-09 11:36:45 +02:00
685e13347e api: move config/user to access/users, implement change_password
To make it similar to the pve api
2020-04-09 10:21:24 +02:00
7d817b0358 implement auth framework 2020-04-08 14:06:15 +02:00
579728c641 add user configiguration 2020-04-08 14:06:15 +02:00
cf459b1982 gc: log pending removals 2020-04-06 09:50:40 +02:00
d16122cd87 gui: preview prune selection 2020-04-01 14:14:44 +02:00
dda7015497 prune api: return a usable result (we run synchronous anyways) 2020-04-01 12:24:28 +02:00
5b5ca60a07 fix 'keep-monthly' field name
else the backend complains about a non-existant parameter

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-03-31 08:46:52 +02:00
aeee4329b0 gui - DataStoreContent: avoid useless icons, display file path 2020-03-26 18:01:04 +01:00
5f44899207 gui - DataStoreContent: move control code into controller (cleanup) 2020-03-26 17:23:51 +01:00
b1127fd0d0 gui: add prune dialog 2020-03-26 13:23:28 +01:00
4299ca727c src/server/rest.rs: use correct formatter 2020-03-26 12:54:20 +01:00
3383973532 gui: cleanup DataStoreContent.js 2020-03-26 11:17:15 +01:00
555dfe7b8e gui: display DataStoreConfig above DataStoreContent 2020-03-26 08:38:35 +01:00
e8f0ad19af gui: use a tree panel for DataStoreContent 2020-03-25 15:17:28 +01:00
a83ee10c49 depend on proxmox 0.1.20 2020-03-25 15:17:16 +01:00
9abc1166b0 bump proxmox dependency to 0.1.19
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-03-19 10:12:33 +01:00
99c287861e add 'rsync' to build_depends
a 'make deb' fails without rsync installed (a pxar test needs it)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-03-18 16:24:33 +01:00
6650a242fb rewrite future select in upgrade_to_backup_protocol using select macro
and handle all ok/err cases with at least logging

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-03-18 11:33:59 +01:00
66b4593b04 fix typo
s/Nuber/Number/
s/backups/Backups/

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-03-17 16:13:54 +01:00
0e7ab0567c buildsys: add missing dependency
required for the docs built when building the deb packages

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-03-16 14:54:24 +01:00
10426c1750 Makefile - upload: upload to correct product repos 2020-03-03 10:56:07 +01:00
199 changed files with 14286 additions and 2154 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "0.1.3"
version = "0.2.2"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018"
license = "AGPL-3"
@ -14,44 +14,48 @@ name = "proxmox_backup"
path = "src/lib.rs"
[dependencies]
base64 = "0.10"
base64 = "0.12"
bitflags = "1.2.1"
bytes = "0.5"
chrono = "0.4" # Date and time library for Rust
crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] }
failure = "0.1"
anyhow = "1.0"
futures = "0.3"
h2 = { version = "0.2", features = ["stream"] }
handlebars = "3.0"
http = "0.2"
hyper = "0.13"
lazy_static = "1.4"
libc = "0.2"
log = "0.4"
native-tls = "0.2"
nix = "0.16"
once_cell = "1.3.1"
openssl = "0.10"
pam = "0.7"
pam-sys = "0.5"
percent-encoding = "2.1"
pin-utils = "0.1.0-alpha"
proxmox = { version = "0.1.18", features = [ "sortable-macro", "api-macro" ] }
pin-utils = "0.1.0"
proxmox = { version = "0.1.38", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
regex = "1.2"
rustyline = "5.0.5"
rustyline = "6"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
siphasher = "0.3"
syslog = "4.0"
tokio = { version = "0.2.9", features = [ "blocking", "fs", "io-util", "macros", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
tokio-openssl = "0.4.0"
tokio-util = { version = "0.2.0", features = [ "codec" ] }
tokio-util = { version = "0.3", features = [ "codec" ] }
tower-service = "0.3.0"
udev = "0.3"
url = "2.1"
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
walkdir = "2"
xdg = "2.2"
zstd = { version = "0.4", features = [ "bindgen" ] }
nom = "5.1"
[features]
default = []

View File

@ -37,10 +37,12 @@ CARGO ?= cargo
COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
DEBS= ${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb ${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
DEBS=${SERVER_DEB} ${CLIENT_DEB}
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
DESTDIR=
@ -135,7 +137,8 @@ install: $(COMPILED_BINS)
$(MAKE) -C docs install
.PHONY: upload
upload: ${DEBS}
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - ${DEBS} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster

View File

@ -1,22 +1,39 @@
TODO list for Proxmox Backup
============================
* user management api
* disk management api
* start writing server GUI
* improve catalog shell commands
* improve user documentation
GUI
===
* fix network/dns GUI (network/dns api changed)
* user/acl/permission management GUI
* implement GUI to configure remotes
* implement fancy DatastoreStatus.js dashboard
* implement PVE GUI to add PBS storage (with convenient copy/paste
functionality, like we have for cluster join)
Chores:
=======
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
* recompute PXAR_ header types from strings: avoid using numbers from casync
* remove pbs-* systemd timers and services on package purge
Suggestions
===========

38
debian/changelog vendored
View File

@ -1,3 +1,41 @@
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
* proxmox-backup-client.rs: implement quiet flag
* client restore: don't add server file ending if already specified
* src/client/pull.rs: also download client.log.blob
* src/client/pull.rs: more verbose logging
* gui improvements
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jun 2020 10:37:12 +0200
rust-proxmox-backup (0.2.1-1) unstable; urgency=medium
* ui: move server RRD statistics to 'Server Status' panel
* ui/api: add more server statistics
* ui/api: add per-datastore usage and performance statistics over time
* ui: add initial remote config management panel
* remotes: save passwords as base64
* gather zpool io stats
* various fixes/improvements
-- Proxmox Support Team <support@proxmox.com> Thu, 28 May 2020 17:39:33 +0200
rust-proxmox-backup (0.2.0-1) unstable; urgency=medium
* see git changelog (too many changes)
-- Proxmox Support Team <support@proxmox.com> Mon, 25 May 2020 19:17:03 +0200
rust-proxmox-backup (0.1.3-1) unstable; urgency=medium
* use SectionConfig from proxmox 0.1.18-1

3
debian/control.in vendored
View File

@ -3,8 +3,9 @@ Architecture: any
Depends: fonts-font-awesome,
libjs-extjs (>= 6.0.1),
libzstd1 (>= 1.3.8),
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit,
proxmox-widget-toolkit (>= 2.2-4),
${misc:Depends},
${shlibs:Depends},
Description: Proxmox Backup Server daemon with tools and GUI

29
debian/debcargo.toml vendored
View File

@ -11,8 +11,31 @@ vcs_git = ""
vcs_browser = ""
maintainer = "Proxmox Support Team <support@proxmox.com>"
section = "admin"
build_depends = [ "debhelper (>= 12~)", "bash-completion" ]
build_depends_excludes = [ "debhelper (>=11)" ]
build_depends = [
"debhelper (>= 12~)",
"bash-completion",
"python3-docutils",
"python3-pygments",
"rsync",
"fonts-dejavu-core <!nodoc>",
"fonts-lato <!nodoc>",
"fonts-open-sans <!nodoc>",
"graphviz <!nodoc>",
"latexmk <!nodoc>",
"python3-sphinx <!nodoc>",
"texlive-fonts-extra <!nodoc>",
"texlive-fonts-recommended <!nodoc>",
"texlive-xetex <!nodoc>",
"xindy <!nodoc>",
]
build_depends_excludes = [
"debhelper (>=11)",
]
[packages.lib]
depends = [ "libacl1-dev", "libsystemd-dev", "libfuse3-dev", "uuid-dev" ]
depends = [
"libacl1-dev",
"libfuse3-dev",
"libsystemd-dev",
"uuid-dev",
]

View File

@ -5,6 +5,7 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
usr/sbin/proxmox-backup-manager
usr/share/javascript/proxmox-backup/index.hbs
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
usr/share/javascript/proxmox-backup/images/logo-128.png
usr/share/javascript/proxmox-backup/images/proxmox_logo.png

View File

@ -73,10 +73,11 @@ html: ${GENERATED_SYNOPSIS}
.PHONY: latexpdf
latexpdf: ${GENERATED_SYNOPSIS}
@echo "Requires python3-sphinx, texlive-xetex, xindy and texlive-fonts-extra"
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
@echo "Running LaTeX files through xelatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
@echo "xelatex finished; the PDF files are in $(BUILDDIR)/latex."
.PHONY: epub3
epub3: ${GENERATED_SYNOPSIS}

View File

@ -3,6 +3,7 @@ Administration Guide
The administration guide.
.. todo:: either add a bit more explanation or remove the previous sentence
Terminology
-----------
@ -12,7 +13,7 @@ Backup Content
When doing deduplication, there are different strategies to get
optimal results in terms of performance and/or deduplication rates.
Depending on the type of data, one can split data into fixed or variable
Depending on the type of data, one can split data into *fixed* or *variable*
sized chunks.
Fixed sized chunking needs almost no CPU performance, and is used to
@ -21,7 +22,7 @@ backup virtual machine images.
Variable sized chunking needs more CPU power, but is essential to get
good deduplication rates for file archives.
Therefore, the backup server supports both strategies.
The backup server supports both strategies.
File Archives: ``<name>.pxar``
@ -29,9 +30,9 @@ File Archives: ``<name>.pxar``
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
A file archive stores a whole directory tree. Content is stored using
A file archive stores a full directory tree. Content is stored using
the :ref:`pxar-format`, split into variable sized chunks. The format
is specially optimized to achieve good deduplication rates.
is optimized to achieve good deduplication rates.
Image Archives: ``<name>.img``
@ -44,8 +45,8 @@ data. Content is split into fixed sized chunks.
Binary Data (BLOBs)
^^^^^^^^^^^^^^^^^^^
This type is used to store smaller (< 16MB) binaries like
configuration data. Larger files should be stored as image archive.
This type is used to store smaller (< 16MB) binary data such as
configuration files. Larger files should be stored as image archive.
.. caution:: Please do not store all files as BLOBs. Instead, use the
file archive to store whole directory trees.
@ -54,15 +55,15 @@ configuration data. Larger files should be stored as image archive.
Catalog File: ``catalog.pcat1``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The catalog file is basically an index for file archive. It contains
the list of files, and is used to speedup search operations.
The catalog file is an index for file archives. It contains
the list of files and is used to speed-up search operations.
The Manifest: ``index.json``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The manifest contains the list of all backup files, including
file sizes and checksums. It is used to verify the consistency of a
The manifest contains the list of all backup files, their
sizes and checksums. It is used to verify the consistency of a
backup.
@ -73,17 +74,17 @@ The backup server groups backups by *type*, where *type* is one of:
``vm``
This type is used for :term:`virtual machine`\ s. Typically
contains the virtual machine configuration and an image archive
contains the virtual machine's configuration and an image archive
for each disk.
``ct``
This type is used for :term:`container`\ s. Contains the container
This type is used for :term:`container`\ s. Contains the container's
configuration and a single file archive for the container content.
``host``
This type is used for physical host, or if you want to run backups
manually from inside virtual machines or containers. Such backups
may contain file and image archives (no restrictions here).
This type is used for backups created from within the backed up machine.
Typically this would be a physical host but could also be a virtual machine
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
Backup ID
@ -102,14 +103,14 @@ The time when the backup was made.
Backup Group
~~~~~~~~~~~~
We call the tuple ``<type>/<ID>`` a backup group. Such group
may contains one or more backup snapshots.
The tuple ``<type>/<ID>`` is called a backup group. Such a group
may contain one or more backup snapshots.
Backup Snapshot
~~~~~~~~~~~~~~~
We call the triplet ``<type>/<ID>/<time>`` a backup snapshot. It
The triplet ``<type>/<ID>/<time>`` is called a backup snapshot. It
uniquely identifies a specific backup within a datastore.
.. code-block:: console
@ -118,25 +119,25 @@ uniquely identifies a specific backup within a datastore.
vm/104/2019-10-09T08:01:06Z
host/elsa/2019-11-08T09:48:14Z
As you can see, the time is formatted as RFC3399_ using Coordinated
As you can see, the time format is RFC3399_ with Coordinated
Universal Time (UTC_, identified by the trailing *Z*).
:term:`DataStore`
~~~~~~~~~~~~~~~~~
A datastore is a place to store backups. The current implementation
A datastore is a place where backups are stored. The current implementation
uses a directory inside a standard unix file system (``ext4``, ``xfs``
or ``zfs``) to store backup data.
or ``zfs``) to store the backup data.
Datastores are identified by a simple *ID*. You can configure that
Datastores are identified by a simple *ID*. You can configure it
when setting up the backup server.
Backup Server Management
------------------------
The command line tool to configure and manage the server is called
The command line tool to configure and manage the backup server is called
:command:`proxmox-backup-manager`.
@ -144,7 +145,9 @@ Datastore Configuration
~~~~~~~~~~~~~~~~~~~~~~~
A :term:`datastore` is a place to store backups. You can configure
several datastores, but you need at least one of them. The datastore is identified by a simple `name` and point to a directory.
multiple datastores. At least one datastore needs to be
configured. The datastore is identified by a simple `name` and points
to a directory.
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
@ -152,20 +155,24 @@ The following command creates a new datastore called ``store1`` on :file:`/backu
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
To list existing datastores use:
To list existing datastores run:
.. code-block:: console
# proxmox-backup-manager datastore list
store1 /backup/disk1/store1
┌────────┬──────────────────────┬─────────────────────────────┐
│ name │ path │ comment │
╞════════╪══════════════════════╪═════════════════════════════╡
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
└────────┴──────────────────────┴─────────────────────────────┘
Finally, it is also possible to remove the datastore configuration:
Finally, it is possible to remove the datastore configuration:
.. code-block:: console
# proxmox-backup-manager datastore remove store1
.. note:: Above command removes the datastore configuration. It does
.. note:: The above command removes only the datastore configuration. It does
not delete any data from the underlying directory.
@ -175,6 +182,126 @@ File Layout
.. todo:: Add datastore file layout example
User Management
~~~~~~~~~~~~~~~
Proxmox Backup support several authentication realms, and you need to
choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to
authenticate as Linux system user (Users needs to exist on the
system).
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
``/etc/proxmox-backup/shadow.json``.
After installation, there is a single user ``root@pam``, which
corresponds to the Unix superuser. You can use the
``proxmox-backup-manager`` command line tool to list or manipulate
users:
.. code-block:: console
# proxmox-backup-manager user list
┌─────────────┬────────┬────────┬───────────┬──────────┬────────────────┬────────────────────┐
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
╞═════════════╪════════╪════════╪═══════════╪══════════╪════════════════╪════════════════════╡
│ root@pam │ 1 │ │ │ │ │ Superuser │
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
The superuser has full administration rights on everything, so you
normally want to add other users with less privileges:
.. code-block:: console
# proxmox-backup-manager user create john@pbs --email john@example.com
The create command lets you specify many option like ``--email`` or
``--password``, but you can update or change any of them using the
update command later:
.. code-block:: console
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
# proxmox-backup-manager user update john@pbs --comment "An example user."
.. todo:: Mention how to set password without passing plaintext password as cli argument.
The resulting use list looks like this:
.. code-block:: console
# proxmox-backup-manager user list
┌──────────┬────────┬────────┬───────────┬──────────┬──────────────────┬──────────────────┐
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
╞══════════╪════════╪════════╪═══════════╪══════════╪══════════════════╪══════════════════╡
│ john@pbs │ 1 │ │ John │ Smith │ john@example.com │ An example user. │
├──────────┼────────┼────────┼───────────┼──────────┼──────────────────┼──────────────────┤
│ root@pam │ 1 │ │ │ │ │ Superuser │
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
Newly created users do not have an permissions. Please read the next
section to learn how to set access permissions.
If you want to disable an user account, you can do that by setting ``--enable`` to ``0``
.. code-block:: console
# proxmox-backup-manager user update john@pbs --enable 0
Or completely remove the users with:
.. code-block:: console
# proxmox-backup-manager user remove john@pbs
Access Control
~~~~~~~~~~~~~~
Users do not have any permission by default. Instead you need to
specify what is allowed and what not. You can do this by assigning
roles to users on specific objects like datastores or remotes. The
following roles exist:
**Admin**
The Administrator can do anything.
**Audit**
An Auditor can view things, but is not allowed to change settings.
**NoAccess**
Disable Access - nothing is allowed.
**DatastoreAdmin**
Can do anything on datastores.
**DatastoreAudit**
Can view datastore settings and list content. But
is not allowed to read the actual data.
**DataStoreReader**
Can Inspect datastore content and can do restores.
**DataStoreBackup**
Can backup and restore owned backups.
**DatastorePowerUser**
Can backup, restore, and prune owned backups.
**RemoteAdmin**
Can do anything on remotes.
**RemoteAudit**
Can view remote settings.
**RemoteSyncOperator**
Is allowed to read data from a remote.
Backup Client usage
-------------------
@ -184,16 +311,16 @@ The command line client is called :command:`proxmox-backup-client`.
Respository Locations
~~~~~~~~~~~~~~~~~~~~~
The client uses a special repository notation to specify a datastore
The client uses the following notation to specify a datastore repository
on the backup server.
[[username@]server:]datastore
If you do not specify a ``username`` the default is ``root@pam``. The
default for server is to use the local host (``localhost``).
The default value for ``username`` ist ``root``. If no server is specified, the
default is the local host (``localhost``).
You can pass the repository by setting the ``--repository`` command
line options, or by setting the ``PBS_REPOSITORY`` environment
You can pass the repository with the ``--repository`` command
line option, or by setting the ``PBS_REPOSITORY`` environment
variable.
@ -219,8 +346,8 @@ Environment Variables
Output Format
~~~~~~~~~~~~~
Most commands support the ``--output-format`` parameter, which can be
set to the following values:
Most commands support the ``--output-format`` parameter. It accepts
the following values:
:``text``: Text format (default). Structured data is rendered as a table.
@ -240,9 +367,9 @@ Please use the following environment variables to modify output behavior:
``PROXMOX_OUTPUT_NO_HEADER``
If set (to any value), do not render table headers.
.. note:: The ``text`` format is designed to be human readable, but
.. note:: The ``text`` format is designed to be human readable, and
not meant to be parsed by automation tools. Please use the ``json``
format for such purpose because it is machine readable.
format if you need to process the output.
.. _creating-backups:
@ -250,15 +377,15 @@ Please use the following environment variables to modify output behavior:
Creating Backups
~~~~~~~~~~~~~~~~
This section explains how to create backup on physical host, or from
inside virtual machines or containers. Such backups may contain file
and image archives (no restrictions here).
This section explains how to create a backup from within the machine. This can
be a physical host, a virtual machine, or a container. Such backups may contain file
and image archives. There are no restrictions in this case.
.. note:: If you want to backup virtual machines or containers see :ref:`pve-integration`.
.. note:: If you want to backup virtual machines or containers on Proxmov VE, see :ref:`pve-integration`.
The prerequisite is that you have already set up (or can access) a
backup server. It is assumed that you know the repository name and
credentials. In the following examples we simply use ``backup-server:store1``.
For the following example you need to have a backup server set up, working
credentials and need to know the repository name.
In the following examples we use ``backup-server:store1``.
.. code-block:: console
@ -275,15 +402,15 @@ credentials. In the following examples we simply use ``backup-server:store1``.
This will prompt you for a password and then uploads a file archive named
``root.pxar`` containing all the files in the ``/`` directory.
.. Caution:: Please note that proxmox-backup-client does not
.. Caution:: Please note that the proxmox-backup-client does not
automatically include mount points. Instead, you will see a short
``skip mount point`` notice for each of them. The idea is that you
create a separate file archive for each mounted disk. You can also
``skip mount point`` notice for each of them. The idea is to
create a separate file archive for each mounted disk. You can
explicitly include them using the ``--include-dev`` option
(i.e. ``--include-dev /boot/efi``). You can use this option
multiple times, once for each mount point you want to include.
multiple times for each mount point that should be included.
The ``--repository`` option is sometimes quite long and is used by all
The ``--repository`` option can get quite long and is used by all
commands. You can avoid having to enter this value by setting the
environment variable ``PBS_REPOSITORY``.
@ -291,26 +418,26 @@ environment variable ``PBS_REPOSITORY``.
# export PBS_REPOSTORY=backup-server:store1
You can then execute all commands without specifying the ``--repository``
After this you can execute all commands without specifying the ``--repository``
option.
One single backup is allowed to contain more than one archive. For example, assume you want to backup two disks mounted at ``/mmt/disk1`` and ``/mnt/disk2``:
One single backup is allowed to contain more than one archive. For example, if
you want to backup two disks mounted at ``/mmt/disk1`` and ``/mnt/disk2``:
.. code-block:: console
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2
This create a backup of both disks.
This creates a backup of both disks.
The backup command takes a list of backup specifications, which
include archive name on the server, the type of the archive, and the
archive source at the client. The format is quite simple to understand:
include the archive name on the server, the type of the archive, and the
archive source at the client. The format is:
<archive-name>.<type>:<source-path>
Common types are ``.pxar`` for file archives, and ``.img`` for block
device images. Thus it is quite easy to create a backup for a block
device:
device images. To create a backup of a block device run the following command:
.. code-block:: console
@ -320,42 +447,43 @@ Excluding files/folders from a backup
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes it is desired to exclude certain files or folders from a backup archive.
Using the proxmox backup client this is possible via simple text based
``.pxarexclude`` files placed in the filesystem hierarchy.
To tell the Proxmox backup client when and how to ignore files and directories,
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
Whenever the backup client encounters such a file in a directory, it interprets
each line as glob match patterns for files and directories that are to be excluded
from the backup.
Whenever such a file is encountered in a directory, the backup client reads its
content and lines are interpreted as glob match patterns for files/directories
to exclude from the archive.
The file must contain a single glob pattern on each line. Empty lines are ignored.
The same is true for lines starting with ``#``, indicating a line containing comments.
Lines starting with ``!`` correspond to glob match patterns for explicit inclusion
of files previously excluded by a match. This allows for example to exclude
all entries in a directory except for a few single files.
Lines ending in ``/`` match directory entries only.
The folder containing the ``.pxarexclude`` file is considered to be the root of
the given patterns. It is only possible to match files in this or below this folder.
The file must contain a single glob pattern per line. Empty lines are ignored.
The same is true for lines starting with ``#``, which indicates a comment.
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
to an explicit inclusion. This makes it possible to exclude all entries in a
directory except for a few single files/subdirectories.
Lines ending in ``/`` match only on directories.
The directory containing the ``.pxarexclude`` file is considered to be the root of
the given patterns. It is only possible to match files in this directory and its subdirectories.
``\`` is used to escape glob characters. ``?`` matches any single character,
``*`` matches any character including the empty string.
``**`` is used to match also subdirectories and can be used to exclude for example
all files ending in ``.tmp`` within the directory or a subdirectory by the
``\`` is used to escape special glob characters.
``?`` matches any single character.
``*`` matches any character, including an empty string.
``**`` is used to match subdirectories. It can be used to, for example, exclude
all files ending in ``.tmp`` within the directory or subdirectories with the
following pattern ``**/*.tmp``.
``[...]`` matches a single character from any of the provided characters within
the brackets. ``[!...]`` does the complementary and matches any singe character
not contained within the brackets. It is also possible to specify ranges by two
characters separated by ``-``. For example ``[a-z]`` matches any lowercase
alphabetic character, ``[0-9]`` matches any one single digit.
not contained within the brackets. It is also possible to specify ranges with two
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
alphabetic character and ``[0-9]`` matches any one single digit.
The order of the glob match patterns defines if the file is finally included or
The order of the glob match patterns defines if a file is included or
excluded, later entries win over previous ones.
This is also true for match patterns encountered deeper down the directory tree,
which may then override a previous exclusion.
Note however that folders marked for exclusion are not read by the client,
so ``.pxarexclude`` files contained within have no effect.
``.pxarexclude`` files are treated as regular files and are also included in the
which can override a previous exclusion.
Be aware that excluded directories will **not** be read by the backup client.
A ``.pxarexclude`` file in a subdirectory will have no effect.
``.pxarexclude`` files are treated as regular files and will be included in the
backup archive.
For example, consider the following folder structure:
For example, consider the following directory structure:
.. code-block:: console
@ -369,7 +497,7 @@ For example, consider the following folder structure:
folder/subfolder1:
. .. file0 file1 file2 file3
The ``.pxarexclude`` files containing the following:
The different ``.pxarexclude`` files contain the following:
.. code-block:: console
@ -386,7 +514,7 @@ The ``.pxarexclude`` files containing the following:
This would exclude ``file1`` and ``file3`` in ``subfolder0`` and all of
``subfolder1`` except ``file2``.
Restoring this archive form backup results in:
Restoring this backup will result in:
.. code-block:: console
@ -403,8 +531,8 @@ Restoring this archive form backup results in:
Encryption
^^^^^^^^^^
Proxmox backup support client side encryption using AES-256 in GCM_
mode. You first need to create an encryption key in order to use that:
Proxmox backup supports client side encryption with AES-256 in GCM_
mode. First you need to create an encryption key:
.. code-block:: console
@ -427,7 +555,7 @@ extra protection, you can also create it without a password:
...
You can avoid having to enter the passwords by setting the environment
You can avoid entering the passwords by setting the environment
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
.. todo:: Explain master-key
@ -437,22 +565,26 @@ Restoring Data
~~~~~~~~~~~~~~
The regular creation of backups is a necessary step to avoid data
loss. More important, however, is the restoration. Be sure to perform
periodic recovery tests to ensure that you can access your data in
loss. More important, however, is the restoration. It is good practice to perform
periodic recovery tests to ensure that you can access the data in
case of problems.
First, you need to find the snapshot you want to restore. The snapshot
command gives you a list of all snapshots on the server:
First, you need to find the snapshot which you want to restore. The snapshot
command gives a list of all snapshots on the server:
.. code-block:: console
# proxmox-backup-client snapshots
...
host/elsa/2019-12-03T09:30:15Z | 51788646825 | root.pxar catalog.pcat1 index.json
host/elsa/2019-12-03T09:35:01Z | 51790622048 | root.pxar catalog.pcat1 index.json
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
│ snapshot │ size │ files │
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
│ host/elsa/2019-12-03T09:30:15Z │ 51788646825 │ root.pxar catalog.pcat1 index.json │
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
│ host/elsa/2019-12-03T09:35:01Z │ 51790622048 │ root.pxar catalog.pcat1 index.json │
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
...
You can also inspect the catalog to find specific files.
You can inspect the catalog to find specific files.
.. code-block:: console
@ -470,9 +602,8 @@ backup.
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
You can instead simply download the contents of any archive using '-'
instead of ``/target/path``. This dumps the content to standard
output:
To get the contents of any archive you can restore the ``ìndex.json`` file in the
repository and restore it to '-'. This will dump the content to the standard output.
.. code-block:: console
@ -494,20 +625,18 @@ to use the interactive recovery shell.
...
The interactive recovery shell is a minimalistic command line interface that
utilizes the metadata stored in the catalog for you to quickly list, navigate and
search files contained within a file archive.
You can select individual files as well as select files matched by a glob pattern
for restore.
utilizes the metadata stored in the catalog to quickly list, navigate and
search files in a file archive.
To restore files, you can select them individually or match them with a glob
pattern.
The use of the catalog for navigation reduces the overhead otherwise caused by
network traffic and decryption, as instead of downloading and decrypting
individual encrypted chunks from the chunk-store to access the metadata, we only
need to download and decrypt the catalog.
Using the catalog for navigation reduces the overhead considerably because only
the catalog needs to be downloaded and, optionally, decrypted.
The actual chunks are only accessed if the metadata in the catalog is not enough
or for the actual restore.
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
working directory and list directory contents of the archive.
working directory and list directory contents in the archive.
``pwd`` shows the full path of the current working directory with respect to the
archive root.
@ -567,7 +696,7 @@ This allows you to access the full content of the archive in a seamless manner.
load on your host, depending on the operations you perform on the mounted
filesystem.
To unmount the filesystem simply use the ``umount`` command on the mountpoint:
To unmount the filesystem use the ``umount`` command on the mountpoint:
.. code-block:: console
@ -579,7 +708,7 @@ Login and Logout
The client tool prompts you to enter the logon password as soon as you
want to access the backup server. The server checks your credentials
and responds with a ticket that is valid for two hours. The client
tool automatically stores that ticket and use it for further requests
tool automatically stores that ticket and uses it for further requests
to this server.
You can also manually trigger this login/logout using the login and
@ -590,7 +719,7 @@ logout commands:
# proxmox-backup-client login
Password: **********
To remove the ticket, simply issue a logout:
To remove the ticket, issue a logout:
.. code-block:: console
@ -608,76 +737,78 @@ command:
# proxmox-backup-client forget <snapshot>
.. caution:: This command removes all the archives in this backup
snapshot so that they are inaccessible and unrecoverable.
.. caution:: This command removes all archives in this backup
snapshot. They will be inaccessible and unrecoverable.
Such manual removal is sometimes required, but normally the prune
The manual removal is sometimes required, but normally the prune
command is used to systematically delete older backups. Prune lets
you specify which backup snapshots you want to keep. There are the
following retention options:
you specify which backup snapshots you want to keep. The
following retention options are available:
``--keep-last <N>``
Keep the last ``<N>`` backup snapshots.
``--keep-hourly <N>``
Keep backups for the last ``<N>`` different hours. If there is more than one
backup for a single hour, only the latest one is kept.
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is kept.
``--keep-daily <N>``
Keep backups for the last ``<N>`` different days. If there is more than one
backup for a single day, only the latest one is kept.
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is kept.
``--keep-weekly <N>``
Keep backups for the last ``<N>`` different weeks. If there is more than one
backup for a single week, only the latest one is kept.
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is kept.
.. note:: The weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and correctly handles weeks at
the end of the year.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
the end of the year correctly.
``--keep-monthly <N>``
Keep backups for the last ``<N>`` different months. If there is more than one
backup for a single month, only the latest one is kept.
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is kept.
``--keep-yearly <N>``
Keep backups for the last ``<N>`` different years. If there is more than one
backup for a single year, only the latest one is kept.
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is kept.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
of already covered backups. It will only consider older backups.
Those retention options are processed in the order given above. Each
option covers a specific period of time. We say that backups within
this period are covered by this option. The next option does not take
care of already covered backups and only considers older backups.
The prune command also looks for unfinished and incomplete backups and
removes them unless they are newer than the last successful backup. In
this case, the last failed backup is retained.
Unfinished and incomplete backups will be removed by the prune command unless
they are newer than the last successful backup. In this case, the last failed
backup is retained.
.. code-block:: console
# proxmox-backup-client prune <group> --keep-daily 7 --keep-weekly 4 --keep-monthly 3
You can use the ``--dry-run`` option to test your settings. This just
shows the list of existing snapshots and what action prune would take
on that.
You can use the ``--dry-run`` option to test your settings. This only
shows the list of existing snapshots and which action prune would take.
.. code-block:: console
# proxmox-backup-client prune host/elsa --dry-run --keep-daily 1 --keep-weekly 3
retention options: --keep-daily 1 --keep-weekly 3
Testing prune on store "store2" group "host/elsa"
host/elsa/2019-12-04T13:20:37Z keep
host/elsa/2019-12-03T09:35:01Z remove
host/elsa/2019-11-22T11:54:47Z keep
host/elsa/2019-11-21T12:36:25Z remove
host/elsa/2019-11-10T10:42:20Z keep
┌────────────────────────────────┬──────┐
│ snapshot │ keep │
╞════════════════════════════════╪══════╡
host/elsa/2019-12-04T13:20:37Z │ 1 │
├────────────────────────────────┼──────┤
host/elsa/2019-12-03T09:35:01Z │ 0 │
├────────────────────────────────┼──────┤
│ host/elsa/2019-11-22T11:54:47Z │ 1 │
├────────────────────────────────┼──────┤
│ host/elsa/2019-11-21T12:36:25Z │ 0 │
├────────────────────────────────┼──────┤
│ host/elsa/2019-11-10T10:42:20Z │ 1 │
└────────────────────────────────┴──────┘
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
in the chunk-store. The chunk-store still contains the data blocks
unless you are performing :ref:`garbage-collection`.
in the chunk-store. The chunk-store still contains the data blocks. To free
space you need to perform :ref:`garbage-collection`.
.. _garbage-collection:
@ -687,8 +818,7 @@ Garbage Collection
The ``prune`` command removes only the backup index files, not the data
from the data store. This task is left to the garbage collection
command. It is therefore recommended to carry out garbage collection
regularly.
command. It is recommended to carry out garbage collection on a regular basis.
The garbage collection works in two phases. In the first phase, all
data blocks that are still in use are marked. In the second phase,
@ -727,6 +857,41 @@ unused data blocks are removed.
`Proxmox VE`_ integration
-------------------------
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
node. The following example uses ``store2`` as storage name, and
assumes the server address is ``localhost``, and you want to connect
as ``user1@pbs``.
.. code-block:: console
# pvesm add pbs store2 --server localhost --datastore store2
# pvesm set store2 --username user1@pbs --password <secret>
If your backup server uses a self signed certificate, you need to add
the certificate fingerprint to the configuration. You can get the
fingerprint by running the following command on the backup server:
.. code-block:: console
# proxmox-backup-manager cert info |grep Fingerprint
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
Please add that fingerprint to your configuration to establish a trust
relationship:
.. code-block:: console
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
After that you should be able to see storage status with:
.. code-block:: console
# pvesm status --storage store2
Name Type Status Total Used Available %
store2 pbs active 3905109820 1336687816 2568422004 34.23%
.. include:: command-line-tools.rst

View File

@ -21,6 +21,21 @@
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Implement custom formatter for code-blocks ---------------------------
#
# * use smaller font
# * avoid space between lines to nicely format utf8 tables
from sphinx.highlighting import PygmentsBridge
from pygments.formatters.latex import LatexFormatter
class CustomLatexFormatter(LatexFormatter):
def __init__(self, **options):
super(CustomLatexFormatter, self).__init__(**options)
self.verboptions = r"formatcom=\footnotesize\relax\let\strut\empty"
PygmentsBridge.latex_formatter = CustomLatexFormatter
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
@ -53,7 +68,7 @@ rst_epilog = epilog_file.read()
# General information about the project.
project = 'Proxmox Backup'
copyright = '2019, Proxmox Support Team'
copyright = '2019-2020, Proxmox Support Team'
author = 'Proxmox Support Team'
# The version info for the project you're documenting, acts as replacement for
@ -61,9 +76,9 @@ author = 'Proxmox Support Team'
# built documents.
#
# The short X.Y version.
version = '1.0'
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '1.0-1'
release = '0.2-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@ -251,14 +266,24 @@ htmlhelp_basename = 'ProxmoxBackupdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_engine = 'xelatex'
latex_elements = {
'fontenc': '\\usepackage{fontspec}',
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '12pt',
'pointsize': '10pt',
'fontpkg': r'''
\setmainfont{Open Sans}
\setsansfont{Lato}
\setmonofont{DejaVu Sans Mono}
''',
# Additional stuff for the LaTeX preamble.
#

View File

@ -5,24 +5,23 @@ Glossary
`Virtual machine`_
A Virtual machine is a program that can execute an entire
operatin system inside an emulated hardware environment.
A virtual machine is a program that can execute an entire
operating system inside an emulated hardware environment.
`Container`_
A Container is an isolated user space. Programs runs directly on
the hosts kernel, but with limited access to the host resources.
A container is an isolated user space. Programs run directly on
the host's kernel, but with limited access to the host resources.
Datastore
A place to store backups. The current implemenation is
file-system based, so this refers to a directory containing the
backup data.
A place to store backups. A directory which contains the backup data.
The current implemenation is file-system based.
`Rust`_
Rust is a new, fast and memory-efficient system programming
language, with no runtime or garbage collector. Rusts rich type
language. It has no runtime or garbage collector. Rusts rich type
system and ownership model guarantee memory-safety and
thread-safety. I can eliminate many classes of bugs
at compile-time.
@ -31,11 +30,9 @@ Glossary
Is a tool that makes it easy to create intelligent and
beautiful documentation. It was originally created for the
Python documentation, and it has excellent facilities for the
documentation of the Python programming language. It has excellent facilities for the
documentation of software projects in a range of languages.
`reStructuredText`_
Is an easy-to-read, what-you-see-is-what-you-get plaintext
@ -44,8 +41,8 @@ Glossary
`FUSE`
Filesystem in Userspace (`FUSE <https://en.wikipedia.org/wiki/Filesystem_in_Userspace>`_)
defines an interface which allows to implement a filesystem in
defines an interface which makes it possible to implement a filesystem in
userspace as opposed to implementing it in the kernel. The fuse
kernel driver handles filesystem requests and sends them to an
userspace application for reply.
kernel driver handles filesystem requests and sends them to a
userspace application.

View File

@ -1,55 +1,50 @@
Installation
============
`Proxmox Backup`_ is split into a server part and a client part. The
server part comes with it's own graphical installer, but we also
ship Debian_ package repositories, so you can easily install those
packages on any Debian_ based system.
`Proxmox Backup`_ is split into a server and client part. The server part
can either be installed with a graphical installer or on top of
Debian_ from the provided package repository.
.. include:: package-repositories.rst
Server installation
-------------------
The backup server stores the actual backup data, but also provides a
web based GUI for various management tasks, for example disk
management.
The backup server stores the actual backed up data and provides a web based GUI
for various management tasks such as disk management.
.. note:: You always need a backup server. It is not possible to use
`Proxmox Backup`_ without the server part.
The server is based on Debian, therefore the disk image (ISO file) provided
by us includes a complete Debian system ("buster" for version 1.x) as
well as all necessary backup packages.
The disk image (ISO file) provided by Proxmox includes a complete Debian system
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
Using the installer will guide you through the setup, allowing
The installer will guide you through the setup process and allows
you to partition the local disk(s), apply basic system configurations
(e.g. timezone, language, network) and install all required packages.
Using the provided ISO will get you started in just a few minutes,
that's why we recommend this method for new and existing users.
(e.g. timezone, language, network), and installs all required packages.
The provided ISO will get you started in just a few minutes, and is the
recommended method for new and existing users.
Alternatively, `Proxmox Backup`_ server can be installed on top of an
existing Debian system.
Using the `Proxmox Backup`_ Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Install `Proxmox Backup`_ with the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can download the ISO from |DOWNLOADS|.
Download the ISO from |DOWNLOADS|.
It includes the following:
* The `Proxmox Backup`_ server installer, which partitions the local
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
system.
* Complete operating system (Debian Linux, 64-bit)
* The `Proxmox Backup`_ server installer, which partitions the local
disk(s) with ext4, ext3, xfs or ZFS and installs the operating
system.
* Our Linux kernel with ZFS support.
* Complete toolset for administering backups and all necessary
resources
* Complete tool-set to administer backups and all necessary resources
* Web based management interface for using the toolset
* Web based GUI management interface
.. note:: During the installation process, the complete server
is used by default and all existing data is removed.
@ -58,8 +53,8 @@ It includes the following:
Install `Proxmox Backup`_ server on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages, so you can install it on
top of a standard Debian installation. After configuring the
Proxmox ships as a set of Debian packages which can be installed on top of a
standard Debian installation. After configuring the
:ref:`sysadmin_package_repositories`, you need to run:
.. code-block:: console
@ -67,7 +62,7 @@ top of a standard Debian installation. After configuring the
# apt-get update
# apt-get install proxmox-backup-server
Above code keeps the current (Debian) kernel and installs a minimal
The commands above keep the current (Debian) kernel and install a minimal
set of required packages.
If you want to install the same set of packages as the installer
@ -78,16 +73,15 @@ does, please use the following:
# apt-get update
# apt-get install proxmox-backup
This installs all required packages, the Proxmox kernel with ZFS_
support, and a set of commonly useful packages.
This will install all required packages, the Proxmox kernel with ZFS_
support, and a set of common and useful packages.
Installing on top of an existing Debian_ installation looks easy, but
it presumes that you have correctly installed the base system, and you
know how you want to configure and use the local storage. Network
configuration is also completely up to you.
Installing `Proxmox Backup`_ on top of an existing Debian_ installation looks easy, but
it presumes that the base system and local storage has been set up correctly.
In general, this is not trivial, especially when you use LVM_ or
ZFS_.
In general this is not trivial, especially when LVM_ or ZFS_ is used.
The network configuration is completely up to you as well.
Install Proxmox Backup server on `Proxmox VE`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -101,9 +95,9 @@ After configuring the
# apt-get install proxmox-backup-server
.. caution:: Installing the backup server directly on the hypervisor
is not recommended. It is more secure to use a separate physical
server to store backups. If the hypervisor server fails, you can
still access your backups.
is not recommended. It is safer to use a separate physical
server to store backups. Should the hypervisor server fail, you can
still access the backups.
Client installation
-------------------
@ -111,7 +105,7 @@ Client installation
Install `Proxmox Backup`_ client on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages, so you can install it on
Proxmox ships as a set of Debian packages to be installed on
top of a standard Debian installation. After configuring the
:ref:`sysadmin_package_repositories`, you need to run:

View File

@ -1,14 +1,14 @@
Introduction
============
This documentationm is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
This documentation is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
What is Proxmox Backup
----------------------
Proxmox Backup is an enterprise class client-server backup software,
specially optimized for `Proxmox Virtual Environment`_ to backup
specially optimized for the `Proxmox Virtual Environment`_ to backup
:term:`virtual machine`\ s and :term:`container`\ s. It is also
possible to backup physical hosts.
@ -24,23 +24,23 @@ Architecture
------------
Proxmox Backup uses a `Client-server model`_. The server is
responsible to store the backup data, and provides an API to create
backups and restore data. It is also possible to manage disks and
responsible to store the backup data and provides an API to create
backups and restore data. It is possible to manage disks and
other server side resources using this API.
A backup client uses this API to access the backed up data,
i.e. ``proxmox-backup-client`` is a command line tool to create
backups and restore data. We also deliver an integrated client for
backups and restore data. We deliver an integrated client for
QEMU_ with `Proxmox Virtual Environment`_.
A single backup is allowed to contain several archives. For example,
when you backup a :term:`virtual machine`, each disk is stored as a
separate archive inside that backup. The VM configuration also gets an
extra file. This way, it is easy to access and restore important parts
of the backup, without having to scan the whole backup.
of the backup without having to scan the whole backup.
Main features
Main Features
-------------
:Proxmox VE: The `Proxmox Virtual Environment`_ is fully
@ -49,52 +49,52 @@ Main features
:GUI: We provide a graphical, web based user interface.
:Deduplication: Incremental backup produces large amounts of duplicate
:Deduplication: Incremental backups produce large amounts of duplicate
data. The deduplication layer removes that redundancy and makes
inkremental backup small and space efficient.
incremental backups small and space efficient.
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
accuracy and consistency of your backups.
:Remote Sync: It is possible to efficently synchronize data to remote
sites. Only deltas containing new data are transfered.
:Remote Sync: It is possible to efficiently synchronize data to remote
sites. Only deltas containing new data are transferred.
:Performance: The whole software stack is written in :term:`Rust`,
which provides high speed and memory efficiency.
to provide high speed and memory efficiency.
:Compression: Ultra fast Zstandard_ compression is able to compress
several gigabytes of data per second.
:Encryption: Backups can be encrypted at client side using AES-256 in
:Encryption: Backups can be encrypted client-side using AES-256 in
GCM_ mode. This authenticated encryption mode (AE_) provides very
high performance on modern hardware.
:Open Source: No secrets. You have access to the whole source tree.
:Open Source: No secrets. You have access to all the source code.
:Support: Commercial support options available from `Proxmox`_.
:Support: Commercial support options are available from `Proxmox`_.
Why Backup?
-----------
The primary purpose of backup is to protect against data loss. Data
loss can happen because of faulty hardware, but also by human errors.
The primary purpose of a backup is to protect against data loss. Data
loss can be caused by faulty hardware, but also by human error.
A common mistake is to delete a file or folder which is still
required. Virtualization can amplify this problem, because it is now
easy to delete a whole virtual machine by a single button press.
required. Virtualization can amplify this problem. It is now
easy to delete a whole virtual machine by pressing a single button.
Backups can also serve as a toolkit for administrators to temporarily
Backups can serve as a toolkit for administrators to temporarily
store data. For example, it is common practice to perform full backups
before installing major software updates. If something goes wrong, you
can just restore the previous state.
can restore the previous state.
Another reason for backups are legal requirements. Some data must be
kept in a safe place for several years so that you can access it if
required by law.
kept in a safe place for several years by law, so that it can be accessed if
required.
Data loss can be very costly as it can severely restrict your
business. Therefore, make sure that you regularly perform a backup
business. Therefore, make sure that you perform a backup regularly
and run restore tests.

View File

@ -5,12 +5,12 @@ Debian Package Repositories
All Debian based systems use APT_ as package
management tool. The list of repositories is defined in
``/etc/apt/sources.list`` and ``.list`` files found inside
``/etc/apt/sources.d/``. Updates can be installed directly using
``/etc/apt/sources.list`` and ``.list`` files found in the
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
the ``apt`` command line tool, or via the GUI.
APT_ ``sources.list`` files list one package repository per line, with
the most preferred source listed first. Empty lines are ignored, and a
the most preferred source listed first. Empty lines are ignored and a
``#`` character anywhere on a line marks the remainder of that line as a
comment. The information available from the configured sources is
acquired by ``apt update``.
@ -33,7 +33,7 @@ the backup server binaries.
`Proxmox Backup`_ Enterprise Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is the default, stable and recommended repository, available for
This is the default, stable, and recommended repository. It is available for
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
@ -44,15 +44,13 @@ enabled by default:
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
As soon as updates are available, the superuser (``root@pam`` user) is
notified via email about the available new packages. On the GUI, the
change-log of each package can be viewed (if available), showing all
details of the update. So you will never miss important security
fixes.
To never miss important security fixes, the superuser (``root@pam`` user) is
notified via email about new packages as soon as they are available. The
change-log and details of each package can be viewed in the GUI (if available).
Please note that you need a valid subscription key to access this
repository. We offer different support levels, and you can find further
details at https://www.proxmox.com/en/proxmox-backup/pricing.
repository. More information regarding subscription levels and pricing can be
found at https://www.proxmox.com/en/proxmox-backup/pricing.
.. note:: You can disable this repository by commenting out the above
line using a `#` (at the start of the line). This prevents error
@ -65,7 +63,7 @@ details at https://www.proxmox.com/en/proxmox-backup/pricing.
As the name suggests, you do not need a subscription key to access
this repository. It can be used for testing and non-production
use. Its not recommended to run on production servers, as these
use. It is not recommended to use it on production servers, because these
packages are not always heavily tested and validated.
We recommend to configure this repository in ``/etc/apt/sources.list``.
@ -92,9 +90,9 @@ latest packages and is heavily used by developers to test new
features.
.. warning:: the ``pbstest`` repository should (as the name implies)
only be used for testing new features or bug fixes.
only be used to test new features or bug fixes.
As usual, you can configure this using ``/etc/apt/sources.list`` by
You can configure this using ``/etc/apt/sources.list`` by
adding the following line:
.. code-block:: sources.list

View File

@ -1,15 +1,15 @@
Description
^^^^^^^^^^^
``pxar`` is a command line utility used to create and manipulate archives in the
``pxar`` is a command line utility to create and manipulate archives in the
:ref:`pxar-format`.
It is inspired by `casync file archive format
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
which has a similar use-case.
The ``.pxar`` format is adapted to fulfill the specific needs of the proxmox
backup server, for example efficient storage of hardlinks.
which caters to a similar use-case.
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
Backup Server, for example, efficient storage of hardlinks.
The format is designed to reduce storage space needed on the server by achieving
high de-duplication.
a high level of de-duplication.
Creating an Archive
^^^^^^^^^^^^^^^^^^^
@ -20,23 +20,23 @@ Run the following command to create an archive of a folder named ``source``:
# pxar create archive.pxar source
This will create a new archive called ``archive.pxar`` from the contents of the
This will create a new archive called ``archive.pxar`` with the contents of the
``source`` folder.
.. NOTE:: ``pxar`` will not overwrite any existing archives. If an archive with
the same name is already present in the target folder, the creation will
fail.
By default, ``pxar`` will skip certain mountpoints and not follow device
By default, ``pxar`` will skip certain mountpoints and will not follow device
boundaries. This design decision is based on the primary use case of creating
archives for backups, where it makes no sense to store the content of certain
archives for backups. It is sensible to not back up the contents of certain
temporary or system specific files.
In order to alter this behavior and follow device boundaries, use the
To alter this behavior and follow device boundaries, use the
``--all-file-systems`` flag.
It is possible to exclude certain files and/or folders from the archive by
passing glob match patterns as additional parameters. Whenever a file is matched
by one of the patterns, you will get a warning saying that this file is skipped
by one of the patterns, you will get a warning stating that this file is skipped
and therefore not included in the archive.
For example, you can exclude all files ending in ``.txt`` from the archive
@ -50,7 +50,7 @@ Be aware that the shell itself will try to expand all of the glob patterns befor
invoking ``pxar``.
In order to avoid this, all globs have to be quoted correctly.
It is also possible to pass a list of match pattern to fulfill more complex
It is possible to pass a list of match patterns to fulfill more complex
file exclusion/inclusion behavior, although it is recommended to use the
``.pxarexclude`` files instead for such cases.
@ -67,7 +67,7 @@ All the glob pattern are relative to the ``source`` directory.
previous ones. Permutations of the same patterns lead to different results.
``pxar`` will store the list of glob match patterns passed as parameters via the
command line in a file called ``.pxarexclude-cli`` and store it at the root of
command line in a file called ``.pxarexclude-cli`` and stores it at the root of
the archive.
If a file with this name is already present in the source folder during archive
creation, this file is not included in the archive and the file containing the
@ -79,9 +79,9 @@ It is possible to create and place these files in any directory of the filesyste
tree.
These files must contain one pattern per line, again later patterns win over
previous ones.
The patterns control file exclusion of files present within the given directory
The patterns control file exclusions of files present within the given directory
or further below it in the tree.
The behaviour is the same as described in :ref:`creating-backups`.
The behavior is the same as described in :ref:`creating-backups`.
Extracting an Archive
^^^^^^^^^^^^^^^^^^^^^
@ -96,7 +96,7 @@ with the following command:
If no target is provided, the content of the archive is extracted to the current
working directory.
In order to restore only part of an archive or single files and/or folders,
In order to restore only parts of an archive, single files and/or folders,
it is possible to pass the corresponding glob match patterns as additional
parameters or use the patterns stored in a file:
@ -109,8 +109,8 @@ sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.
A path to the file containing match patterns can be specified using the
``--files-from`` parameter.
List the Content of an Archive
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
List the Contents of an Archive
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To display the files and directories contained in an archive ``archive.pxar``,
run the following command:
@ -126,7 +126,7 @@ Mounting an Archive
^^^^^^^^^^^^^^^^^^^
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
In order to mount an archive named ``archive.pxar`` to the mountpoint ``mnt``,
In order to mount an archive named ``archive.pxar`` to the mountpoint ``/mnt``,
run the command:
.. code-block:: console

View File

@ -1,4 +1,4 @@
mod access;
pub mod access;
pub mod admin;
pub mod backup;
pub mod config;

View File

@ -1,18 +1,33 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use proxmox::api::api;
use proxmox::api::{api, RpcEnvironment, Permission, UserInformation};
use proxmox::api::router::{Router, SubdirMap};
use proxmox::sortable;
use proxmox::{sortable, identity};
use proxmox::{http_err, list_subdirs_api_method};
use crate::tools;
use crate::tools::ticket::*;
use crate::auth_helpers::*;
use crate::api2::types::*;
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::acl::PRIV_PERMISSIONS_MODIFY;
pub mod user;
pub mod domain;
pub mod acl;
pub mod role;
fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
let user_info = CachedUserInfo::new()?;
if !user_info.is_active_user(&username) {
bail!("user account disabled or expired.");
}
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
if password.starts_with("PBS:") {
@ -25,27 +40,17 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
}
}
if username == "root@pam" {
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
auth.get_handler().set_credentials("root", password);
auth.authenticate()?;
return Ok(());
}
bail!("inavlid credentials");
crate::auth::authenticate_user(username, password)
}
#[api(
input: {
properties: {
username: {
type: String,
description: "User name.",
max_length: 64,
schema: PROXMOX_USER_ID_SCHEMA,
},
password: {
type: String,
description: "The secret password. This can also be a valid ticket.",
schema: PASSWORD_SCHEMA,
},
},
},
@ -66,6 +71,9 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
},
},
protected: true,
access: {
permission: &Permission::World,
},
)]
/// Create or verify authentication ticket.
///
@ -94,13 +102,72 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
}
}
#[api(
input: {
properties: {
userid: {
schema: PROXMOX_USER_ID_SCHEMA,
},
password: {
schema: PASSWORD_SCHEMA,
},
},
},
access: {
description: "Anybody is allowed to change there own password. In addition, users with 'Permissions:Modify' privilege may change any password.",
permission: &Permission::Anybody,
},
)]
/// Change user password
///
/// Each user is allowed to change his own password. Superuser
/// can change all passwords.
fn change_password(
userid: String,
password: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let current_user = rpcenv.get_user()
.ok_or_else(|| format_err!("unknown user"))?;
let mut allowed = userid == current_user;
if userid == "root@pam" { allowed = true; }
if !allowed {
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&current_user, &[]);
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
}
if !allowed {
bail!("you are not authorized to change the password.");
}
let (username, realm) = crate::auth::parse_userid(&userid)?;
let authenticator = crate::auth::lookup_authenticator(&realm)?;
authenticator.store_password(&username, &password)?;
Ok(Value::Null)
}
#[sortable]
const SUBDIRS: SubdirMap = &[
const SUBDIRS: SubdirMap = &sorted!([
("acl", &acl::ROUTER),
(
"password", &Router::new()
.put(&API_METHOD_CHANGE_PASSWORD)
),
(
"ticket", &Router::new()
.post(&API_METHOD_CREATE_TICKET)
)
];
),
("domains", &domain::ROUTER),
("roles", &role::ROUTER),
("users", &user::ROUTER),
]);
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))

228
src/api2/access/acl.rs Normal file
View File

@ -0,0 +1,228 @@
use anyhow::{bail, Error};
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment, Permission};
use crate::api2::types::*;
use crate::config::acl;
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
#[api(
properties: {
propagate: {
schema: ACL_PROPAGATE_SCHEMA,
},
path: {
schema: ACL_PATH_SCHEMA,
},
ugid_type: {
schema: ACL_UGID_TYPE_SCHEMA,
},
ugid: {
type: String,
description: "User or Group ID.",
},
roleid: {
type: Role,
}
}
)]
#[derive(Serialize, Deserialize)]
/// ACL list entry.
pub struct AclListItem {
path: String,
ugid: String,
ugid_type: String,
propagate: bool,
roleid: String,
}
fn extract_acl_node_data(
node: &acl::AclTreeNode,
path: &str,
list: &mut Vec<AclListItem>,
exact: bool,
) {
for (user, roles) in &node.users {
for (role, propagate) in roles {
list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() },
propagate: *propagate,
ugid_type: String::from("user"),
ugid: user.to_string(),
roleid: role.to_string(),
});
}
}
for (group, roles) in &node.groups {
for (role, propagate) in roles {
list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() },
propagate: *propagate,
ugid_type: String::from("group"),
ugid: group.to_string(),
roleid: role.to_string(),
});
}
}
if exact {
return;
}
for (comp, child) in &node.children {
let new_path = format!("{}/{}", path, comp);
extract_acl_node_data(child, &new_path, list, exact);
}
}
#[api(
input: {
properties: {
path: {
schema: ACL_PATH_SCHEMA,
optional: true,
},
exact: {
description: "If set, returns only ACL for the exact path.",
type: bool,
optional: true,
default: false,
},
},
},
returns: {
description: "ACL entry list.",
type: Array,
items: {
type: AclListItem,
}
},
access: {
permission: &Permission::Privilege(&["access", "acl"], PRIV_SYS_AUDIT, false),
},
)]
/// Read Access Control List (ACLs).
pub fn read_acl(
path: Option<String>,
exact: bool,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<AclListItem>, Error> {
//let auth_user = rpcenv.get_user().unwrap();
let (mut tree, digest) = acl::config()?;
let mut list: Vec<AclListItem> = Vec::new();
if let Some(path) = &path {
if let Some(node) = &tree.find_node(path) {
extract_acl_node_data(&node, path, &mut list, exact);
}
} else {
extract_acl_node_data(&tree.root, "", &mut list, exact);
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
#[api(
protected: true,
input: {
properties: {
path: {
schema: ACL_PATH_SCHEMA,
},
role: {
type: Role,
},
propagate: {
optional: true,
schema: ACL_PROPAGATE_SCHEMA,
},
userid: {
optional: true,
schema: PROXMOX_USER_ID_SCHEMA,
},
group: {
optional: true,
schema: PROXMOX_GROUP_ID_SCHEMA,
},
delete: {
optional: true,
description: "Remove permissions (instead of adding it).",
type: bool,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["access", "acl"], PRIV_PERMISSIONS_MODIFY, false),
},
)]
/// Update Access Control List (ACLs).
pub fn update_acl(
path: String,
role: String,
propagate: Option<bool>,
userid: Option<String>,
group: Option<String>,
delete: Option<bool>,
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut tree, expected_digest) = acl::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let propagate = propagate.unwrap_or(true);
let delete = delete.unwrap_or(false);
if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported.");
} else if let Some(ref userid) = userid {
if !delete { // Note: we allow to delete non-existent users
let user_cfg = crate::config::user::cached_config()?;
if user_cfg.sections.get(userid).is_none() {
bail!("no such user.");
}
}
} else {
bail!("missing 'userid' or 'group' parameter.");
}
if !delete { // Note: we allow to delete entries with invalid path
acl::check_acl_path(&path)?;
}
if let Some(userid) = userid {
if delete {
tree.delete_user_role(&path, &userid, &role);
} else {
tree.insert_user_role(&path, &userid, &role, propagate);
}
} else if let Some(group) = group {
if delete {
tree.delete_group_role(&path, &group, &role);
} else {
tree.insert_group_role(&path, &group, &role, propagate);
}
}
acl::save_config(&tree)?;
Ok(())
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_READ_ACL)
.put(&API_METHOD_UPDATE_ACL);

47
src/api2/access/domain.rs Normal file
View File

@ -0,0 +1,47 @@
use anyhow::{Error};
use serde_json::{json, Value};
use proxmox::api::{api, Permission};
use proxmox::api::router::Router;
use crate::api2::types::*;
#[api(
returns: {
description: "List of realms.",
type: Array,
items: {
type: Object,
description: "User configuration (without password).",
properties: {
realm: {
description: "Realm ID.",
type: String,
},
comment: {
schema: SINGLE_LINE_COMMENT_SCHEMA,
optional: true,
},
default: {
description: "Default realm.",
type: bool,
}
},
}
},
access: {
description: "Anyone can access this, because we need that list for the login box (before the user is authenticated).",
permission: &Permission::World,
}
)]
/// Authentication domain/realm index.
fn list_domains() -> Result<Value, Error> {
let mut list = Vec::new();
list.push(json!({ "realm": "pam", "comment": "Linux PAM standard authentication", "default": true }));
list.push(json!({ "realm": "pbs", "comment": "Proxmox Backup authentication server" }));
Ok(list.into())
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DOMAINS);

58
src/api2/access/role.rs Normal file
View File

@ -0,0 +1,58 @@
use anyhow::Error;
use serde_json::{json, Value};
use proxmox::api::{api, Permission};
use proxmox::api::router::Router;
use crate::api2::types::*;
use crate::config::acl::{Role, ROLE_NAMES, PRIVILEGES};
#[api(
returns: {
description: "List of roles.",
type: Array,
items: {
type: Object,
description: "User name with description.",
properties: {
roleid: {
type: Role,
},
privs: {
type: Array,
description: "List of Privileges",
items: {
type: String,
description: "A Privilege",
},
},
comment: {
schema: SINGLE_LINE_COMMENT_SCHEMA,
optional: true,
},
},
}
},
access: {
permission: &Permission::Anybody,
}
)]
/// Role list
fn list_roles() -> Result<Value, Error> {
let mut list = Vec::new();
for (role, (privs, comment)) in ROLE_NAMES.iter() {
let mut priv_list = Vec::new();
for (name, privilege) in PRIVILEGES.iter() {
if privs & privilege > 0 {
priv_list.push(name.clone());
}
}
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
}
Ok(list.into())
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_ROLES);

295
src/api2/access/user.rs Normal file
View File

@ -0,0 +1,295 @@
use anyhow::{bail, Error};
use serde_json::Value;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::schema::{Schema, StringSchema};
use crate::api2::types::*;
use crate::config::user;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.format(&PASSWORD_FORMAT)
.min_length(5)
.max_length(64)
.schema();
#[api(
input: {
properties: {},
},
returns: {
description: "List users (with config digest).",
type: Array,
items: { type: user::User },
},
access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
},
)]
/// List all users
pub fn list_users(
_param: Value,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<user::User>, Error> {
let (config, digest) = user::config()?;
let list = config.convert_to_typed_array("user")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
#[api(
protected: true,
input: {
properties: {
userid: {
schema: PROXMOX_USER_ID_SCHEMA,
},
comment: {
schema: SINGLE_LINE_COMMENT_SCHEMA,
optional: true,
},
password: {
schema: PBS_PASSWORD_SCHEMA,
optional: true,
},
enable: {
schema: user::ENABLE_USER_SCHEMA,
optional: true,
},
expire: {
schema: user::EXPIRE_USER_SCHEMA,
optional: true,
},
firstname: {
schema: user::FIRST_NAME_SCHEMA,
optional: true,
},
lastname: {
schema: user::LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: user::EMAIL_SCHEMA,
optional: true,
},
},
},
access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
},
)]
/// Create new user.
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let user: user::User = serde_json::from_value(param)?;
let (mut config, _digest) = user::config()?;
if let Some(_) = config.sections.get(&user.userid) {
bail!("user '{}' already exists.", user.userid);
}
let (username, realm) = crate::auth::parse_userid(&user.userid)?;
let authenticator = crate::auth::lookup_authenticator(&realm)?;
config.set_data(&user.userid, "user", &user)?;
user::save_config(&config)?;
if let Some(password) = password {
authenticator.store_password(&username, &password)?;
}
Ok(())
}
#[api(
input: {
properties: {
userid: {
schema: PROXMOX_USER_ID_SCHEMA,
},
},
},
returns: {
description: "The user configuration (with config digest).",
type: user::User,
},
access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
},
)]
/// Read user configuration data.
pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
let (config, digest) = user::config()?;
let user = config.lookup("user", &userid)?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(user)
}
#[api(
protected: true,
input: {
properties: {
userid: {
schema: PROXMOX_USER_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
password: {
schema: PBS_PASSWORD_SCHEMA,
optional: true,
},
enable: {
schema: user::ENABLE_USER_SCHEMA,
optional: true,
},
expire: {
schema: user::EXPIRE_USER_SCHEMA,
optional: true,
},
firstname: {
schema: user::FIRST_NAME_SCHEMA,
optional: true,
},
lastname: {
schema: user::LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: user::EMAIL_SCHEMA,
optional: true,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
},
)]
/// Update user configuration.
pub fn update_user(
userid: String,
comment: Option<String>,
enable: Option<bool>,
expire: Option<i64>,
password: Option<String>,
firstname: Option<String>,
lastname: Option<String>,
email: Option<String>,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: user::User = config.lookup("user", &userid)?;
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
data.comment = None;
} else {
data.comment = Some(comment);
}
}
if let Some(enable) = enable {
data.enable = if enable { None } else { Some(false) };
}
if let Some(expire) = expire {
data.expire = if expire > 0 { Some(expire) } else { None };
}
if let Some(password) = password {
let (username, realm) = crate::auth::parse_userid(&userid)?;
let authenticator = crate::auth::lookup_authenticator(&realm)?;
authenticator.store_password(&username, &password)?;
}
if let Some(firstname) = firstname {
data.firstname = if firstname.is_empty() { None } else { Some(firstname) };
}
if let Some(lastname) = lastname {
data.lastname = if lastname.is_empty() { None } else { Some(lastname) };
}
if let Some(email) = email {
data.email = if email.is_empty() { None } else { Some(email) };
}
config.set_data(&userid, "user", &data)?;
user::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
userid: {
schema: PROXMOX_USER_ID_SCHEMA,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
},
)]
/// Remove a user from the configuration file.
pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = user::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
match config.sections.get(&userid) {
Some(_) => { config.sections.remove(&userid); },
None => bail!("user '{}' does not exist.", userid),
}
user::save_config(&config)?;
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_USER)
.put(&API_METHOD_UPDATE_USER)
.delete(&API_METHOD_DELETE_USER);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_USERS)
.post(&API_METHOD_CREATE_USER)
.match_all("userid", &ITEM_ROUTER);

View File

@ -2,9 +2,11 @@ use proxmox::api::router::{Router, SubdirMap};
use proxmox::list_subdirs_api_method;
pub mod datastore;
pub mod sync;
const SUBDIRS: SubdirMap = &[
("datastore", &datastore::ROUTER)
("datastore", &datastore::ROUTER),
("sync", &sync::ROUTER)
];
pub const ROUTER: Router = Router::new()

View File

@ -2,14 +2,15 @@ use std::collections::{HashSet, HashMap};
use std::convert::TryFrom;
use chrono::{TimeZone, Local};
use failure::*;
use anyhow::{bail, Error};
use futures::*;
use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode};
use serde_json::{json, Value};
use proxmox::api::api;
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox::api::{
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
@ -19,14 +20,31 @@ use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
use crate::api2::types::*;
use crate::backup::*;
use crate::config::datastore;
use crate::config::cached_user_info::CachedUserInfo;
use crate::server::WorkerTask;
use crate::tools;
use crate::config::acl::{
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_PRUNE,
PRIV_DATASTORE_BACKUP,
};
fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
let owner = store.get_owner(group)?;
if &owner != userid {
bail!("backup owner check failed ({} != {})", userid, owner);
}
Ok(())
}
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
let mut path = store.base_path();
path.push(backup_dir.relative_path());
path.push("index.json.blob");
path.push(MANIFEST_BLOB_NAME);
let raw_data = file_get_contents(&path)?;
let index_size = raw_data.len() as u64;
@ -43,7 +61,7 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
}
result.push(BackupContent {
filename: "index.json.blob".to_string(),
filename: MANIFEST_BLOB_NAME.to_string(),
size: Some(index_size),
});
@ -78,12 +96,23 @@ fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo
type: GroupListItem,
}
},
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"],
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
true),
},
)]
/// List backup groups.
fn list_groups(
store: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
@ -97,14 +126,22 @@ fn list_groups(
BackupInfo::sort_list(&mut list, false);
let info = &list[0];
let group = info.backup_dir.group();
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
if !list_all {
if owner != username { continue; }
}
let result_item = GroupListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
last_backup: info.backup_dir.backup_time().timestamp(),
backup_count: list.len() as u64,
files: info.files.clone(),
owner: Some(owner),
};
groups.push(result_item);
}
@ -136,6 +173,12 @@ fn list_groups(
type: BackupContent,
}
},
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"],
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
true),
},
)]
/// List snapshot files.
pub fn list_snapshot_files(
@ -144,12 +187,20 @@ pub fn list_snapshot_files(
backup_id: String,
backup_time: i64,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
let mut files = read_backup_index(&datastore, &snapshot)?;
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
@ -184,6 +235,12 @@ pub fn list_snapshot_files(
},
},
},
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"],
PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
true),
},
)]
/// Delete backup snapshot.
fn delete_snapshot(
@ -192,13 +249,20 @@ fn delete_snapshot(
backup_id: String,
backup_time: i64,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
datastore.remove_backup_dir(&snapshot)?;
Ok(Value::Null)
@ -227,19 +291,28 @@ fn delete_snapshot(
type: SnapshotListItem,
}
},
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"],
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
true),
},
)]
/// List backup snapshots.
pub fn list_snapshots (
param: Value,
store: String,
backup_type: Option<String>,
backup_id: Option<String>,
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> {
let store = tools::required_string_param(&param, "store")?;
let backup_type = param["backup-type"].as_str();
let backup_id = param["backup-id"].as_str();
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(store)?;
let datastore = DataStore::lookup_datastore(&store)?;
let base_path = datastore.base_path();
@ -249,19 +322,27 @@ pub fn list_snapshots (
for info in backup_list {
let group = info.backup_dir.group();
if let Some(backup_type) = backup_type {
if let Some(ref backup_type) = backup_type {
if backup_type != group.backup_type() { continue; }
}
if let Some(backup_id) = backup_id {
if let Some(ref backup_id) = backup_id {
if backup_id != group.backup_id() { continue; }
}
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
if !list_all {
if owner != username { continue; }
}
let mut result_item = SnapshotListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
backup_time: info.backup_dir.backup_time().timestamp(),
files: info.files,
size: None,
owner: Some(owner),
};
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
@ -291,6 +372,9 @@ pub fn list_snapshots (
returns: {
type: StorageStatus,
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
},
)]
/// Get datastore status.
pub fn status(
@ -330,50 +414,43 @@ macro_rules! add_common_prune_prameters {
(
"keep-daily",
true,
&IntegerSchema::new("Number of daily backups to keep.")
.minimum(1)
.schema()
&PRUNE_SCHEMA_KEEP_DAILY,
),
(
"keep-hourly",
true,
&IntegerSchema::new("Number of hourly backups to keep.")
.minimum(1)
.schema()
&PRUNE_SCHEMA_KEEP_HOURLY,
),
(
"keep-last",
true,
&IntegerSchema::new("Number of backups to keep.")
.minimum(1)
.schema()
&PRUNE_SCHEMA_KEEP_LAST,
),
(
"keep-monthly",
true,
&IntegerSchema::new("Number of monthly backups to keep.")
.minimum(1)
.schema()
&PRUNE_SCHEMA_KEEP_MONTHLY,
),
(
"keep-weekly",
true,
&IntegerSchema::new("Number of weekly backups to keep.")
.minimum(1)
.schema()
&PRUNE_SCHEMA_KEEP_WEEKLY,
),
(
"keep-yearly",
true,
&IntegerSchema::new("Number of yearly backups to keep.")
.minimum(1)
.schema()
&PRUNE_SCHEMA_KEEP_YEARLY,
),
$( $list2 )*
]
}
}
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
PruneListItem::API_SCHEMA
).schema();
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&prune),
&ObjectSchema::new(
@ -388,25 +465,36 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
],[
("store", false, &DATASTORE_SCHEMA),
])
)
))
.returns(&API_RETURN_SCHEMA_PRUNE)
.access(None, &Permission::Privilege(
&["datastore", "{store}"],
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
true)
);
fn prune(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let store = param["store"].as_str().unwrap();
let store = tools::required_string_param(&param, "store")?;
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
let group = BackupGroup::new(backup_type, backup_id);
let datastore = DataStore::lookup_datastore(store)?;
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, &group, &username)?; }
let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(),
@ -419,22 +507,7 @@ fn prune(
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
let result = try_block! {
if !prune_options.keeps_something() {
worker.log("No prune selection - keeping all files.");
return Ok(());
} else {
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
if dry_run {
worker.log(format!("Testing prune on store \"{}\" group \"{}/{}\"",
store, backup_type, backup_id));
} else {
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, backup_type, backup_id));
}
}
let mut prune_result = Vec::new();
let list = group.list_backups(&datastore.base_path())?;
@ -442,11 +515,46 @@ fn prune(
prune_info.reverse(); // delete older snapshots first
for (info, keep) in prune_info {
let keep_all = !prune_options.keeps_something();
if dry_run {
for (info, mut keep) in prune_info {
if keep_all { keep = true; }
let backup_time = info.backup_dir.backup_time();
let group = info.backup_dir.group();
prune_result.push(json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
"backup-time": backup_time.timestamp(),
"keep": keep,
}));
}
return Ok(json!(prune_result));
}
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
let result = try_block! {
if keep_all {
worker.log("No prune selection - keeping all files.");
} else {
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, backup_type, backup_id));
}
for (info, mut keep) in prune_info {
if keep_all { keep = true; }
let backup_time = info.backup_dir.backup_time();
let timestamp = BackupDir::backup_time_to_string(backup_time);
let group = info.backup_dir.group();
let msg = format!(
"{}/{}/{} {}",
group.backup_type(),
@ -457,6 +565,13 @@ fn prune(
worker.log(msg);
prune_result.push(json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
"backup-time": backup_time.timestamp(),
"keep": keep,
}));
if !(dry_run || keep) {
datastore.remove_backup_dir(&info.backup_dir)?;
}
@ -469,9 +584,9 @@ fn prune(
if let Err(err) = result {
bail!("prune failed - {}", err);
}
};
Ok(json!(worker.to_string())) // return the UPID
Ok(json!(prune_result))
}
#[api(
@ -485,6 +600,9 @@ fn prune(
returns: {
schema: UPID_SCHEMA,
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
},
)]
/// Start garbage collection.
fn start_garbage_collection(
@ -503,7 +621,7 @@ fn start_garbage_collection(
"garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
{
worker.log(format!("starting garbage collection on store {}", store));
datastore.garbage_collection(worker)
datastore.garbage_collection(&worker)
})?;
Ok(json!(upid_str))
@ -519,7 +637,10 @@ fn start_garbage_collection(
},
returns: {
type: GarbageCollectionStatus,
}
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
},
)]
/// Garbage collection status.
pub fn garbage_collection_status(
@ -535,16 +656,54 @@ pub fn garbage_collection_status(
Ok(status)
}
#[api(
returns: {
description: "List the accessible datastores.",
type: Array,
items: {
description: "Datastore name and description.",
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
},
},
access: {
permission: &Permission::Anybody,
},
)]
/// Datastore list
fn get_datastore_list(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let (config, _digest) = datastore::config()?;
Ok(config.convert_to_array("store", None, &[]))
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, data)) in &config.sections {
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if allowed {
let mut entry = json!({ "store": store });
if let Some(comment) = data["comment"].as_str() {
entry["comment"] = comment.into();
}
list.push(entry);
}
}
Ok(list.into())
}
#[sortable]
@ -560,6 +719,10 @@ pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
]),
)
).access(None, &Permission::Privilege(
&["datastore", "{store}"],
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
true)
);
fn download_file(
@ -567,25 +730,31 @@ fn download_file(
_req_body: Body,
param: Value,
_info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
@ -611,7 +780,7 @@ fn download_file(
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&upload_backup_log),
&ObjectSchema::new(
"Download single raw file from backup snapshot.",
"Upload the client backup log file into a backup snapshot ('client.log.blob').",
&sorted!([
("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
@ -619,6 +788,9 @@ pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
("backup-time", false, &BACKUP_TIME_SCHEMA),
]),
)
).access(
Some("Only the backup creator/owner is allowed to do this."),
&Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
);
fn upload_backup_log(
@ -626,15 +798,14 @@ fn upload_backup_log(
req_body: Body,
param: Value,
_info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let file_name = "client.log.blob";
let file_name = CLIENT_LOG_BLOB_NAME;
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
@ -642,6 +813,9 @@ fn upload_backup_log(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let username = rpcenv.get_user().unwrap();
check_backup_owner(&datastore, backup_dir.group(), &username)?;
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
@ -672,6 +846,47 @@ fn upload_backup_log(
}.boxed()
}
#[api(
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
timeframe: {
type: RRDTimeFrameResolution,
},
cf: {
type: RRDMode,
},
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
},
)]
/// Read datastore stats
fn get_rrd_stats(
store: String,
timeframe: RRDTimeFrameResolution,
cf: RRDMode,
_param: Value,
) -> Result<Value, Error> {
let rrd_dir = format!("datastore/{}", store);
crate::rrd::extract_data(
&rrd_dir,
&[
"total", "used",
"read_ios", "read_bytes",
"write_ios", "write_bytes",
"io_ticks",
],
timeframe,
cf,
)
}
#[sortable]
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
(
@ -700,6 +915,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
&Router::new()
.post(&API_METHOD_PRUNE)
),
(
"rrd",
&Router::new()
.get(&API_METHOD_GET_RRD_STATS)
),
(
"snapshots",
&Router::new()
@ -724,10 +944,5 @@ const DATASTORE_INFO_ROUTER: Router = Router::new()
pub const ROUTER: Router = Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&get_datastore_list),
&ObjectSchema::new("Directory index.", &[])
)
)
.get(&API_METHOD_GET_DATASTORE_LIST)
.match_all("store", &DATASTORE_INFO_ROUTER);

134
src/api2/admin/sync.rs Normal file
View File

@ -0,0 +1,134 @@
use anyhow::{Error};
use serde_json::Value;
use std::time::{SystemTime, UNIX_EPOCH};
use std::collections::HashMap;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable};
use crate::api2::types::*;
use crate::api2::pull::{get_pull_parameters};
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
use crate::server::{self, TaskListInfo, WorkerTask};
use crate::tools::systemd::time::{
parse_calendar_event, compute_next_event};
#[api(
input: {
properties: {},
},
returns: {
description: "List configured jobs and their status.",
type: Array,
items: { type: sync::SyncJobStatus },
},
)]
/// List all sync jobs
pub fn list_sync_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobStatus>, Error> {
let (config, digest) = sync::config()?;
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
let tasks = server::read_task_list()?;
for info in tasks.iter() {
let worker_id = match &info.upid.worker_id {
Some(id) => id,
_ => { continue; },
};
if let Some(last) = last_tasks.get(worker_id) {
if last.upid.starttime < info.upid.starttime {
last_tasks.insert(worker_id.to_string(), &info);
}
} else {
last_tasks.insert(worker_id.to_string(), &info);
}
}
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(epoch_now) => epoch_now.as_secs() as i64,
_ => 0i64,
};
for job in &mut list {
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
compute_next_event(&event, now, false).ok()
})();
if let Some(task) = last_tasks.get(&job.id) {
job.last_run_upid = Some(task.upid_str.clone());
if let Some((endttime, status)) = &task.state {
job.last_run_state = Some(String::from(status));
job.last_run_endtime = Some(*endttime);
}
}
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
}
}
}
)]
/// Runs the sync jobs manually.
async fn run_sync_job(
id: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
let username = rpcenv.get_user().unwrap();
let delete = sync_job.remove_vanished.unwrap_or(true);
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
worker.log(format!("sync job '{}' start", &id));
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
worker.log(format!("sync job '{}' end", &id));
Ok(())
})?;
Ok(upid_str)
}
#[sortable]
const SYNC_INFO_SUBDIRS: SubdirMap = &[
(
"run",
&Router::new()
.post(&API_METHOD_RUN_SYNC_JOB)
),
];
const SYNC_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
.subdirs(SYNC_INFO_SUBDIRS);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_SYNC_JOBS)
.match_all("id", &SYNC_INFO_ROUTER);

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use futures::*;
use hyper::header::{HeaderValue, UPGRADE};
use hyper::http::request::Parts;
@ -6,7 +6,7 @@ use hyper::{Body, Response, StatusCode};
use serde_json::{json, Value};
use proxmox::{sortable, identity, list_subdirs_api_method};
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
@ -14,6 +14,8 @@ use crate::tools::{self, WrappedReaderStream};
use crate::server::{WorkerTask, H2Service};
use crate::backup::*;
use crate::api2::types::*;
use crate::config::acl::PRIV_DATASTORE_BACKUP;
use crate::config::cached_user_info::CachedUserInfo;
mod environment;
use environment::*;
@ -37,6 +39,10 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
]),
)
).access(
// Note: parameter 'store' is no uri parameter, so we need to test inside function body
Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
&Permission::Anybody
);
fn upgrade_to_backup_protocol(
@ -47,10 +53,16 @@ fn upgrade_to_backup_protocol(
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let username = rpcenv.get_user().unwrap();
let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store)?;
let backup_type = tools::required_string_param(&param, "backup-type")?;
@ -73,10 +85,15 @@ fn upgrade_to_backup_protocol(
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
let username = rpcenv.get_user().unwrap();
let env_type = rpcenv.env_type();
let backup_group = BackupGroup::new(backup_type, backup_id);
let owner = datastore.create_backup_group(&backup_group, &username)?;
// permission check
if owner != username { // only the owner is allowed to create additional snapshots
bail!("backup owner check failed ({} != {})", username, owner);
}
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
@ -90,7 +107,7 @@ fn upgrade_to_backup_protocol(
}
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
if !is_new { bail!("backup directorty already exists."); }
if !is_new { bail!("backup directory already exists."); }
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
let mut env = BackupEnvironment::new(
@ -106,13 +123,12 @@ fn upgrade_to_backup_protocol(
let abort_future = worker.abort_future();
let env2 = env.clone();
let env3 = env.clone();
let req_fut = req_body
let mut req_fut = req_body
.on_upgrade()
.map_err(Error::from)
.and_then(move |conn| {
env3.debug("protocol upgrade done");
env2.debug("protocol upgrade done");
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
@ -124,36 +140,39 @@ fn upgrade_to_backup_protocol(
http.serve_connection(conn, service)
.map_err(Error::from)
});
let abort_future = abort_future
let mut abort_future = abort_future
.map(|_| Err(format_err!("task aborted")));
use futures::future::Either;
future::select(req_fut, abort_future)
.map(|res| match res {
Either::Left((Ok(res), _)) => Ok(res),
Either::Left((Err(err), _)) => Err(err),
Either::Right((Ok(res), _)) => Ok(res),
Either::Right((Err(err), _)) => Err(err),
})
.and_then(move |_result| async move {
env.ensure_finished()?;
env.log("backup finished sucessfully");
async move {
let res = select!{
req = req_fut => req,
abrt = abort_future => abrt,
};
match (res, env.ensure_finished()) {
(Ok(_), Ok(())) => {
env.log("backup finished successfully");
Ok(())
})
.then(move |result| async move {
if let Err(err) = result {
match env2.ensure_finished() {
Ok(()) => {}, // ignore error after finish
_ => {
env2.log(format!("backup failed: {}", err));
env2.log("removing failed backup");
env2.remove_backup()?;
return Err(err);
}
}
}
},
(Err(err), Ok(())) => {
// ignore errors after finish
env.log(format!("backup had errors but finished: {}", err));
Ok(())
})
},
(Ok(_), Err(err)) => {
env.log(format!("backup ended and finish failed: {}", err));
env.log("removing unfinished backup");
env.remove_backup()?;
Err(err)
},
(Err(err), Err(_)) => {
env.log(format!("backup failed: {}", err));
env.log("removing failed backup");
env.remove_backup()?;
Err(err)
},
}
}
})?;
let response = Response::builder()
@ -359,7 +378,7 @@ fn dynamic_append (
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
}
Ok(Value::Null)
@ -424,7 +443,7 @@ fn fixed_append (
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
}
Ok(Value::Null)
@ -479,7 +498,7 @@ fn close_dynamic_index (
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
env.log(format!("sucessfully closed dynamic index {}", wid));
env.log(format!("successfully closed dynamic index {}", wid));
Ok(Value::Null)
}
@ -533,7 +552,7 @@ fn close_fixed_index (
env.fixed_writer_close(wid, chunk_count, size, csum)?;
env.log(format!("sucessfully closed fixed index {}", wid));
env.log(format!("successfully closed fixed index {}", wid));
Ok(Value::Null)
}
@ -547,7 +566,7 @@ fn finish_backup (
let env: &BackupEnvironment = rpcenv.as_ref();
env.finish_backup()?;
env.log("sucessfully finished backup");
env.log("successfully finished backup");
Ok(Value::Null)
}

View File

@ -1,8 +1,8 @@
use failure::*;
use anyhow::{bail, Error};
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use serde_json::Value;
use serde_json::{json, Value};
use proxmox::tools::digest_to_hex;
use proxmox::tools::fs::{replace_file, CreateOptions};
@ -52,7 +52,7 @@ struct FixedWriterState {
struct SharedBackupState {
finished: bool,
uid_counter: usize,
file_counter: usize, // sucessfully uploaded files
file_counter: usize, // successfully uploaded files
dynamic_writers: HashMap<usize, DynamicWriterState>,
fixed_writers: HashMap<usize, FixedWriterState>,
known_chunks: HashMap<[u8;32], u32>,
@ -80,7 +80,7 @@ impl SharedBackupState {
#[derive(Clone)]
pub struct BackupEnvironment {
env_type: RpcEnvironmentType,
result_attributes: HashMap<String, Value>,
result_attributes: Value,
user: String,
pub debug: bool,
pub formatter: &'static OutputFormatter,
@ -110,7 +110,7 @@ impl BackupEnvironment {
};
Self {
result_attributes: HashMap::new(),
result_attributes: json!({}),
env_type,
user,
worker,
@ -480,12 +480,12 @@ impl BackupEnvironment {
impl RpcEnvironment for BackupEnvironment {
fn set_result_attrib(&mut self, name: &str, value: Value) {
self.result_attributes.insert(name.into(), value);
fn result_attrib_mut(&mut self) -> &mut Value {
&mut self.result_attributes
}
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
self.result_attributes.get(name)
fn result_attrib(&self) -> &Value {
&self.result_attributes
}
fn env_type(&self) -> RpcEnvironmentType {

View File

@ -2,7 +2,7 @@ use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use failure::*;
use anyhow::{bail, format_err, Error};
use futures::*;
use hyper::Body;
use hyper::http::request::Parts;

View File

@ -3,10 +3,12 @@ use proxmox::list_subdirs_api_method;
pub mod datastore;
pub mod remote;
pub mod sync;
const SUBDIRS: SubdirMap = &[
("datastore", &datastore::ROUTER),
("remote", &remote::ROUTER),
("sync", &sync::ROUTER),
];
pub const ROUTER: Router = Router::new()

View File

@ -1,13 +1,15 @@
use std::path::PathBuf;
use failure::*;
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, Router, RpcEnvironment, Permission};
use crate::api2::types::*;
use crate::backup::*;
use crate::config::datastore;
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
#[api(
input: {
@ -16,23 +18,32 @@ use crate::config::datastore;
returns: {
description: "List the configured datastores (with config digest).",
type: Array,
items: {
type: datastore::DataStoreConfig,
items: { type: datastore::DataStoreConfig },
},
access: {
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false),
},
)]
/// List all datastores
pub fn list_datastores(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<DataStoreConfig>, Error> {
let (config, digest) = datastore::config()?;
Ok(config.convert_to_array("name", Some(&digest), &[]))
let list = config.convert_to_typed_array("datastore")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
// fixme: impl. const fn get_object_schema(datastore::DataStoreConfig::API_SCHEMA),
// but this need support for match inside const fn
// see: https://github.com/rust-lang/rust/issues/49146
#[api(
protected: true,
input: {
@ -40,18 +51,53 @@ pub fn list_datastores(
name: {
schema: DATASTORE_SCHEMA,
},
path: {
schema: DIR_NAME_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
path: {
schema: datastore::DIR_NAME_SCHEMA,
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
},
"prune-schedule": {
optional: true,
schema: PRUNE_SCHEDULE_SCHEMA,
},
"keep-last": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_LAST,
},
"keep-hourly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_HOURLY,
},
"keep-daily": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_DAILY,
},
"keep-weekly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
},
"keep-monthly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
},
"keep-yearly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_YEARLY,
},
},
},
access: {
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_MODIFY, false),
},
)]
/// Create new datastore config.
pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
pub fn create_datastore(param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
@ -59,16 +105,16 @@ pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
let (mut config, _digest) = datastore::config()?;
if let Some(_) = config.sections.get(&name) {
bail!("datastore '{}' already exists.", name);
if let Some(_) = config.sections.get(&datastore.name) {
bail!("datastore '{}' already exists.", datastore.name);
}
let path: PathBuf = datastore.path.clone().into();
let backup_user = crate::backup::backup_user()?;
let _store = ChunkStore::create(&name, path, backup_user.uid, backup_user.gid)?;
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid)?;
config.set_data(&name, "datastore", &datastore)?;
config.set_data(&datastore.name, "datastore", &datastore)?;
datastore::save_config(&config)?;
@ -87,14 +133,47 @@ pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
description: "The datastore configuration (with config digest).",
type: datastore::DataStoreConfig,
},
access: {
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
},
)]
/// Read a datastore configuration.
pub fn read_datastore(name: String) -> Result<Value, Error> {
pub fn read_datastore(
name: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<DataStoreConfig, Error> {
let (config, digest) = datastore::config()?;
let mut data = config.lookup_json("datastore", &name)?;
data.as_object_mut().unwrap()
.insert("digest".into(), proxmox::tools::digest_to_hex(&digest).into());
Ok(data)
let store_config = config.lookup("datastore", &name)?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(store_config)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the comment property.
comment,
/// Delete the garbage collection schedule.
gc_schedule,
/// Delete the prune job schedule.
prune_schedule,
/// Delete the keep-last property
keep_last,
/// Delete the keep-hourly property
keep_hourly,
/// Delete the keep-daily property
keep_daily,
/// Delete the keep-weekly property
keep_weekly,
/// Delete the keep-monthly property
keep_monthly,
/// Delete the keep-yearly property
keep_yearly,
}
#[api(
@ -108,17 +187,69 @@ pub fn read_datastore(name: String) -> Result<Value, Error> {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
},
"prune-schedule": {
optional: true,
schema: PRUNE_SCHEDULE_SCHEMA,
},
"keep-last": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_LAST,
},
"keep-hourly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_HOURLY,
},
"keep-daily": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_DAILY,
},
"keep-weekly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
},
"keep-monthly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
},
"keep-yearly": {
optional: true,
schema: PRUNE_SCHEMA_KEEP_YEARLY,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
},
)]
/// Create new datastore config.
/// Update datastore config.
pub fn update_datastore(
name: String,
comment: Option<String>,
gc_schedule: Option<String>,
prune_schedule: Option<String>,
keep_last: Option<u64>,
keep_hourly: Option<u64>,
keep_daily: Option<u64>,
keep_weekly: Option<u64>,
keep_monthly: Option<u64>,
keep_yearly: Option<u64>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
@ -134,6 +265,22 @@ pub fn update_datastore(
let mut data: datastore::DataStoreConfig = config.lookup("datastore", &name)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
DeletableProperty::keep_last => { data.keep_last = None; },
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
DeletableProperty::keep_daily => { data.keep_daily = None; },
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
}
}
}
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
@ -143,6 +290,16 @@ pub fn update_datastore(
}
}
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
if keep_last.is_some() { data.keep_last = keep_last; }
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
if keep_daily.is_some() { data.keep_daily = keep_daily; }
if keep_weekly.is_some() { data.keep_weekly = keep_weekly; }
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
config.set_data(&name, "datastore", &data)?;
datastore::save_config(&config)?;
@ -157,16 +314,27 @@ pub fn update_datastore(
name: {
schema: DATASTORE_SCHEMA,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
},
)]
/// Remove a datastore configuration.
pub fn delete_datastore(name: String) -> Result<(), Error> {
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
// fixme: locking ?
// fixme: check digest ?
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, _digest) = datastore::config()?;
let (mut config, expected_digest) = datastore::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); },

View File

@ -1,10 +1,13 @@
use failure::*;
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use base64;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use crate::api2::types::*;
use crate::config::remote;
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
#[api(
input: {
@ -14,42 +17,32 @@ use crate::config::remote;
description: "The list of configured remotes (with config digest).",
type: Array,
items: {
type: Object,
type: remote::Remote,
description: "Remote configuration (without password).",
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
host: {
schema: DNS_NAME_OR_IP_SCHEMA,
},
userid: {
schema: PROXMOX_USER_ID_SCHEMA,
},
fingerprint: {
optional: true,
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_AUDIT, false),
},
)]
/// List all remotes
pub fn list_remotes(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<remote::Remote>, Error> {
let (config, digest) = remote::config()?;
let value = config.convert_to_array("name", Some(&digest), &["password"]);
let mut list: Vec<remote::Remote> = config.convert_to_typed_array("remote")?;
Ok(value.into())
// don't return password in api
for remote in &mut list {
remote.password = "".to_string();
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
#[api(
@ -78,21 +71,26 @@ pub fn list_remotes(
},
},
},
access: {
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_MODIFY, false),
},
)]
/// Create new remote.
pub fn create_remote(name: String, param: Value) -> Result<(), Error> {
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let remote: remote::Remote = serde_json::from_value(param.clone())?;
let mut data = param.clone();
data["password"] = Value::from(base64::encode(password.as_bytes()));
let remote: remote::Remote = serde_json::from_value(data)?;
let (mut config, _digest) = remote::config()?;
if let Some(_) = config.sections.get(&name) {
bail!("remote '{}' already exists.", name);
if let Some(_) = config.sections.get(&remote.name) {
bail!("remote '{}' already exists.", remote.name);
}
config.set_data(&name, "remote", &remote)?;
config.set_data(&remote.name, "remote", &remote)?;
remote::save_config(&config)?;
@ -111,16 +109,34 @@ pub fn create_remote(name: String, param: Value) -> Result<(), Error> {
description: "The remote configuration (with config digest).",
type: remote::Remote,
},
access: {
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
}
)]
/// Read remote configuration data.
pub fn read_remote(name: String) -> Result<Value, Error> {
pub fn read_remote(
name: String,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<remote::Remote, Error> {
let (config, digest) = remote::config()?;
let mut data = config.lookup_json("remote", &name)?;
data.as_object_mut().unwrap()
.insert("digest".into(), proxmox::tools::digest_to_hex(&digest).into());
let mut data: remote::Remote = config.lookup("remote", &name)?;
data.password = "".to_string(); // do not return password in api
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(data)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the comment property.
comment,
/// Delete the fingerprint property.
fingerprint,
}
#[api(
protected: true,
input: {
@ -148,12 +164,23 @@ pub fn read_remote(name: String) -> Result<Value, Error> {
optional: true,
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_MODIFY, false),
},
)]
/// Update remote configuration.
pub fn update_remote(
@ -163,6 +190,7 @@ pub fn update_remote(
userid: Option<String>,
password: Option<String>,
fingerprint: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
@ -177,6 +205,15 @@ pub fn update_remote(
let mut data: remote::Remote = config.lookup("remote", &name)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::fingerprint => { data.fingerprint = None; },
}
}
}
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
@ -189,7 +226,6 @@ pub fn update_remote(
if let Some(userid) = userid { data.userid = userid; }
if let Some(password) = password { data.password = password; }
// fixme: howto delete a fingeprint?
if let Some(fingerprint) = fingerprint { data.fingerprint = Some(fingerprint); }
config.set_data(&name, "remote", &data)?;
@ -206,22 +242,35 @@ pub fn update_remote(
name: {
schema: REMOTE_ID_SCHEMA,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_MODIFY, false),
},
)]
/// Remove a remote from the configuration file.
pub fn delete_remote(name: String) -> Result<(), Error> {
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
// fixme: locking ?
// fixme: check digest ?
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, _digest) = remote::config()?;
let (mut config, expected_digest) = remote::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); },
None => bail!("remote '{}' does not exist.", name),
}
remote::save_config(&config)?;
Ok(())
}

277
src/api2/config/sync.rs Normal file
View File

@ -0,0 +1,277 @@
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, Router, RpcEnvironment};
use crate::api2::types::*;
use crate::config::sync::{self, SyncJobConfig};
// fixme: add access permissions
#[api(
input: {
properties: {},
},
returns: {
description: "List configured jobs.",
type: Array,
items: { type: sync::SyncJobConfig },
},
)]
/// List all sync jobs
pub fn list_sync_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobConfig>, Error> {
let (config, digest) = sync::config()?;
let list = config.convert_to_typed_array("sync")?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
#[api(
protected: true,
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
remote: {
schema: REMOTE_ID_SCHEMA,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
},
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
},
},
)]
/// Create a new sync job.
pub fn create_sync_job(param: Value) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
let (mut config, _digest) = sync::config()?;
if let Some(_) = config.sections.get(&sync_job.id) {
bail!("job '{}' already exists.", sync_job.id);
}
config.set_data(&sync_job.id, "sync", &sync_job)?;
sync::save_config(&config)?;
Ok(())
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
},
},
returns: {
description: "The sync job configuration.",
type: sync::SyncJobConfig,
},
)]
/// Read a sync job configuration.
pub fn read_sync_job(
id: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<SyncJobConfig, Error> {
let (config, digest) = sync::config()?;
let sync_job = config.lookup("sync", &id)?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(sync_job)
}
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the comment property.
comment,
/// Delete the job schedule.
schedule,
/// Delete the remove-vanished flag.
remove_vanished,
}
#[api(
protected: true,
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
optional: true,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
optional: true,
},
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
)]
/// Update sync job config.
pub fn update_sync_job(
id: String,
store: Option<String>,
remote: Option<String>,
remote_store: Option<String>,
remove_vanished: Option<bool>,
comment: Option<String>,
schedule: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
// pass/compare digest
let (mut config, expected_digest) = sync::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: sync::SyncJobConfig = config.lookup("sync", &id)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::schedule => { data.schedule = None; },
DeletableProperty::remove_vanished => { data.remove_vanished = None; },
}
}
}
if let Some(comment) = comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
data.comment = None;
} else {
data.comment = Some(comment);
}
}
if let Some(store) = store { data.store = store; }
if let Some(remote) = remote { data.remote = remote; }
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
if schedule.is_some() { data.schedule = schedule; }
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
config.set_data(&id, "sync", &data)?;
sync::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
)]
/// Remove a sync job configuration
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = sync::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
match config.sections.get(&id) {
Some(_) => { config.sections.remove(&id); },
None => bail!("job '{}' does not exist.", id),
}
sync::save_config(&config)?;
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_SYNC_JOB)
.put(&API_METHOD_UPDATE_SYNC_JOB)
.delete(&API_METHOD_DELETE_SYNC_JOB);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_SYNC_JOBS)
.post(&API_METHOD_CREATE_SYNC_JOB)
.match_all("id", &ITEM_ROUTER);

View File

@ -3,17 +3,19 @@ use proxmox::list_subdirs_api_method;
pub mod tasks;
mod time;
mod network;
pub mod network;
pub mod dns;
mod syslog;
mod journal;
mod services;
mod status;
mod rrd;
pub const SUBDIRS: SubdirMap = &[
("dns", &dns::ROUTER),
("journal", &journal::ROUTER),
("network", &network::ROUTER),
("rrd", &rrd::ROUTER),
("services", &services::ROUTER),
("status", &status::ROUTER),
("syslog", &syslog::ROUTER),

View File

@ -1,21 +1,34 @@
use std::sync::{Arc, Mutex};
use failure::*;
use anyhow::{Error};
use lazy_static::lazy_static;
use openssl::sha;
use regex::Regex;
use serde_json::{json, Value};
use ::serde::{Deserialize, Serialize};
use proxmox::{sortable, identity};
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::schema::*;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
use crate::api2::types::*;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
static RESOLV_CONF_FN: &str = "/etc/resolv.conf";
#[api()]
#[derive(Serialize, Deserialize)]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete first nameserver entry
dns1,
/// Delete second nameserver entry
dns2,
/// Delete third nameserver entry
dns3,
}
pub fn read_etc_resolv_conf() -> Result<Value, Error> {
let mut result = json!({});
@ -34,6 +47,8 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
}
let mut options = String::new();
for line in data.lines() {
if let Some(caps) = DOMAIN_REGEX.captures(&line) {
@ -44,16 +59,69 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
let nameserver = &caps[1];
let id = format!("dns{}", nscount);
result[id] = Value::from(nameserver);
} else {
if !options.is_empty() { options.push('\n'); }
options.push_str(line);
}
}
if !options.is_empty() {
result["options"] = options.into();
}
Ok(result)
}
fn update_dns(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
#[api(
protected: true,
input: {
description: "Update DNS settings.",
properties: {
node: {
schema: NODE_SCHEMA,
},
search: {
schema: SEARCH_DOMAIN_SCHEMA,
optional: true,
},
dns1: {
optional: true,
schema: FIRST_DNS_SERVER_SCHEMA,
},
dns2: {
optional: true,
schema: SECOND_DNS_SERVER_SCHEMA,
},
dns3: {
optional: true,
schema: THIRD_DNS_SERVER_SCHEMA,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "dns"], PRIV_SYS_MODIFY, false),
}
)]
/// Update DNS settings
pub fn update_dns(
search: Option<String>,
dns1: Option<String>,
dns2: Option<String>,
dns3: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<Value, Error> {
lazy_static! {
@ -62,33 +130,41 @@ fn update_dns(
let _guard = MUTEX.lock();
let search = crate::tools::required_string_param(&param, "search")?;
let mut config = read_etc_resolv_conf()?;
let old_digest = config["digest"].as_str().unwrap();
let raw = file_get_contents(RESOLV_CONF_FN)?;
let old_digest = proxmox::tools::digest_to_hex(&sha::sha256(&raw));
if let Some(digest) = param["digest"].as_str() {
crate::tools::assert_if_modified(&old_digest, &digest)?;
if let Some(digest) = digest {
crate::tools::assert_if_modified(old_digest, &digest)?;
}
let old_data = String::from_utf8(raw)?;
if let Some(delete) = delete {
for delete_prop in delete {
let config = config.as_object_mut().unwrap();
match delete_prop {
DeletableProperty::dns1 => { config.remove("dns1"); },
DeletableProperty::dns2 => { config.remove("dns2"); },
DeletableProperty::dns3 => { config.remove("dns3"); },
}
}
}
let mut data = format!("search {}\n", search);
if let Some(search) = search { config["search"] = search.into(); }
if let Some(dns1) = dns1 { config["dns1"] = dns1.into(); }
if let Some(dns2) = dns2 { config["dns2"] = dns2.into(); }
if let Some(dns3) = dns3 { config["dns3"] = dns3.into(); }
let mut data = String::new();
if let Some(search) = config["search"].as_str() {
data.push_str(&format!("search {}\n", search));
}
for opt in &["dns1", "dns2", "dns3"] {
if let Some(server) = param[opt].as_str() {
if let Some(server) = config[opt].as_str() {
data.push_str(&format!("nameserver {}\n", server));
}
}
// append other data
lazy_static! {
static ref SKIP_REGEX: Regex = Regex::new(r"^(search|domain|nameserver)\s+").unwrap();
}
for line in old_data.lines() {
if SKIP_REGEX.is_match(line) { continue; }
data.push_str(line);
data.push('\n');
if let Some(options) = config["options"].as_str() {
data.push_str(options);
}
replace_file(RESOLV_CONF_FN, data.as_bytes(), CreateOptions::new())?;
@ -96,7 +172,45 @@ fn update_dns(
Ok(Value::Null)
}
fn get_dns(
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
returns: {
description: "Returns DNS server IPs and sreach domain.",
type: Object,
properties: {
digest: {
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
search: {
optional: true,
schema: SEARCH_DOMAIN_SCHEMA,
},
dns1: {
optional: true,
schema: FIRST_DNS_SERVER_SCHEMA,
},
dns2: {
optional: true,
schema: SECOND_DNS_SERVER_SCHEMA,
},
dns3: {
optional: true,
schema: THIRD_DNS_SERVER_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "dns"], PRIV_SYS_AUDIT, false),
}
)]
/// Read DNS settings.
pub fn get_dns(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
@ -105,41 +219,6 @@ fn get_dns(
read_etc_resolv_conf()
}
#[sortable]
pub const ROUTER: Router = Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&get_dns),
&ObjectSchema::new(
"Read DNS settings.",
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
)
).returns(
&ObjectSchema::new(
"Returns DNS server IPs and sreach domain.",
&sorted!([
("digest", false, &PROXMOX_CONFIG_DIGEST_SCHEMA),
("search", true, &SEARCH_DOMAIN_SCHEMA),
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
]),
).schema()
)
)
.put(
&ApiMethod::new(
&ApiHandler::Sync(&update_dns),
&ObjectSchema::new(
"Returns DNS server IPs and sreach domain.",
&sorted!([
("node", false, &NODE_SCHEMA),
("search", false, &SEARCH_DOMAIN_SCHEMA),
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
("digest", true, &PROXMOX_CONFIG_DIGEST_SCHEMA),
]),
)
).protected(true)
);
.get(&API_METHOD_GET_DNS)
.put(&API_METHOD_UPDATE_DNS);

View File

@ -1,12 +1,13 @@
use std::process::{Command, Stdio};
use failure::*;
use anyhow::{Error};
use serde_json::{json, Value};
use std::io::{BufRead,BufReader};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use crate::api2::types::*;
use crate::config::acl::PRIV_SYS_AUDIT;
#[api(
protected: true,
@ -53,6 +54,9 @@ use crate::api2::types::*;
description: "Line text.",
},
},
access: {
permission: &Permission::Privilege(&["system", "log"], PRIV_SYS_AUDIT, false),
},
)]
/// Read syslog entries.
fn get_journal(

View File

@ -1,28 +1,671 @@
use failure::*;
use serde_json::{json, Value};
use anyhow::{Error, bail};
use serde_json::{Value, to_value};
use ::serde::{Deserialize, Serialize};
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::schema::ObjectSchema;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::schema::parse_property_string;
use crate::config::network::{self, NetworkConfig};
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::api2::types::*;
use crate::server::{WorkerTask};
fn get_network_config(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
Ok(json!({}))
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
let value = parse_property_string(&list, &NETWORK_INTERFACE_ARRAY_SCHEMA)?;
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
}
pub const ROUTER: Router = Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&get_network_config),
&ObjectSchema::new(
"Read network configuration.",
&[ ("node", false, &NODE_SCHEMA) ],
)
)
);
fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
let current_gateway_v4 = config.interfaces.iter()
.find(|(_, interface)| interface.gateway.is_some())
.map(|(name, _)| name.to_string());
if let Some(current_gateway_v4) = current_gateway_v4 {
if current_gateway_v4 != iface {
bail!("Default IPv4 gateway already exists on interface '{}'", current_gateway_v4);
}
}
Ok(())
}
fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
let current_gateway_v6 = config.interfaces.iter()
.find(|(_, interface)| interface.gateway6.is_some())
.map(|(name, _)| name.to_string());
if let Some(current_gateway_v6) = current_gateway_v6 {
if current_gateway_v6 != iface {
bail!("Default IPv6 gateway already exists on interface '{}'", current_gateway_v6);
}
}
Ok(())
}
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
returns: {
description: "List network devices (with config digest).",
type: Array,
items: {
type: Interface,
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_AUDIT, false),
},
)]
/// List all datastores
pub fn list_network_devices(
_param: Value,
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let (config, digest) = network::config()?;
let digest = proxmox::tools::digest_to_hex(&digest);
let mut list = Vec::new();
for (iface, interface) in config.interfaces.iter() {
if iface == "lo" { continue; } // do not list lo
let mut item: Value = to_value(interface)?;
item["digest"] = digest.clone().into();
item["iface"] = iface.to_string().into();
list.push(item);
}
let diff = network::changes()?;
if !diff.is_empty() {
rpcenv["changes"] = diff.into();
}
Ok(list.into())
}
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
iface: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
},
},
returns: {
description: "The network interface configuration (with config digest).",
type: Interface,
},
access: {
permission: &Permission::Privilege(&["system", "network", "interfaces", "{name}"], PRIV_SYS_AUDIT, false),
},
)]
/// Read a network interface configuration.
pub fn read_interface(iface: String) -> Result<Value, Error> {
let (config, digest) = network::config()?;
let interface = config.lookup(&iface)?;
let mut data: Value = to_value(interface)?;
data["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(data)
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
iface: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
"type": {
description: "Interface type.",
type: NetworkInterfaceType,
optional: true,
},
autostart: {
description: "Autostart interface.",
type: bool,
optional: true,
},
method: {
type: NetworkConfigMethod,
optional: true,
},
method6: {
type: NetworkConfigMethod,
optional: true,
},
comments: {
description: "Comments (inet, may span multiple lines)",
type: String,
optional: true,
},
comments6: {
description: "Comments (inet5, may span multiple lines)",
type: String,
optional: true,
},
cidr: {
schema: CIDR_V4_SCHEMA,
optional: true,
},
cidr6: {
schema: CIDR_V6_SCHEMA,
optional: true,
},
gateway: {
schema: IP_V4_SCHEMA,
optional: true,
},
gateway6: {
schema: IP_V6_SCHEMA,
optional: true,
},
mtu: {
description: "Maximum Transmission Unit.",
optional: true,
minimum: 46,
maximum: 65535,
default: 1500,
},
bridge_ports: {
schema: NETWORK_INTERFACE_LIST_SCHEMA,
optional: true,
},
bridge_vlan_aware: {
description: "Enable bridge vlan support.",
type: bool,
optional: true,
},
bond_mode: {
type: LinuxBondMode,
optional: true,
},
slaves: {
schema: NETWORK_INTERFACE_LIST_SCHEMA,
optional: true,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
},
)]
/// Create network interface configuration.
pub fn create_interface(
iface: String,
autostart: Option<bool>,
method: Option<NetworkConfigMethod>,
method6: Option<NetworkConfigMethod>,
comments: Option<String>,
comments6: Option<String>,
cidr: Option<String>,
gateway: Option<String>,
cidr6: Option<String>,
gateway6: Option<String>,
mtu: Option<u64>,
bridge_ports: Option<String>,
bridge_vlan_aware: Option<bool>,
bond_mode: Option<LinuxBondMode>,
slaves: Option<String>,
param: Value,
) -> Result<(), Error> {
let interface_type = crate::tools::required_string_param(&param, "type")?;
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, _digest) = network::config()?;
if config.interfaces.contains_key(&iface) {
bail!("interface '{}' already exists", iface);
}
let mut interface = Interface::new(iface.clone());
interface.interface_type = interface_type;
if let Some(autostart) = autostart { interface.autostart = autostart; }
if method.is_some() { interface.method = method; }
if method6.is_some() { interface.method6 = method6; }
if mtu.is_some() { interface.mtu = mtu; }
if comments.is_some() { interface.comments = comments; }
if comments6.is_some() { interface.comments6 = comments6; }
if let Some(cidr) = cidr {
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
interface.cidr = Some(cidr);
}
if let Some(cidr6) = cidr6 {
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
interface.cidr6 = Some(cidr6);
}
if let Some(gateway) = gateway {
let is_v6 = gateway.contains(':');
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
check_duplicate_gateway_v4(&config, &iface)?;
interface.gateway = Some(gateway);
}
if let Some(gateway6) = gateway6 {
let is_v6 = gateway6.contains(':');
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
check_duplicate_gateway_v6(&config, &iface)?;
interface.gateway6 = Some(gateway6);
}
match interface_type {
NetworkInterfaceType::Bridge => {
if let Some(ports) = bridge_ports {
let ports = split_interface_list(&ports)?;
interface.set_bridge_ports(ports)?;
}
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
}
NetworkInterfaceType::Bond => {
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
if let Some(slaves) = slaves {
let slaves = split_interface_list(&slaves)?;
interface.set_bond_slaves(slaves)?;
}
}
_ => bail!("creating network interface type '{:?}' is not supported", interface_type),
}
if interface.cidr.is_some() || interface.gateway.is_some() {
interface.method = Some(NetworkConfigMethod::Static);
} else if interface.method.is_none() {
interface.method = Some(NetworkConfigMethod::Manual);
}
if interface.cidr6.is_some() || interface.gateway6.is_some() {
interface.method6 = Some(NetworkConfigMethod::Static);
} else if interface.method6.is_none() {
interface.method6 = Some(NetworkConfigMethod::Manual);
}
config.interfaces.insert(iface, interface);
network::save_config(&config)?;
Ok(())
}
#[api()]
#[derive(Serialize, Deserialize)]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the IPv4 address property.
cidr,
/// Delete the IPv6 address property.
cidr6,
/// Delete the IPv4 gateway property.
gateway,
/// Delete the IPv6 gateway property.
gateway6,
/// Delete the whole IPv4 configuration entry.
method,
/// Delete the whole IPv6 configuration entry.
method6,
/// Delete IPv4 comments
comments,
/// Delete IPv6 comments
comments6,
/// Delete mtu.
mtu,
/// Delete autostart flag
autostart,
/// Delete bridge ports (set to 'none')
bridge_ports,
/// Delete bridge-vlan-aware flag
bridge_vlan_aware,
/// Delete bond-slaves (set to 'none')
slaves,
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
iface: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
"type": {
description: "Interface type. If specified, need to match the current type.",
type: NetworkInterfaceType,
optional: true,
},
autostart: {
description: "Autostart interface.",
type: bool,
optional: true,
},
method: {
type: NetworkConfigMethod,
optional: true,
},
method6: {
type: NetworkConfigMethod,
optional: true,
},
comments: {
description: "Comments (inet, may span multiple lines)",
type: String,
optional: true,
},
comments6: {
description: "Comments (inet5, may span multiple lines)",
type: String,
optional: true,
},
cidr: {
schema: CIDR_V4_SCHEMA,
optional: true,
},
cidr6: {
schema: CIDR_V6_SCHEMA,
optional: true,
},
gateway: {
schema: IP_V4_SCHEMA,
optional: true,
},
gateway6: {
schema: IP_V6_SCHEMA,
optional: true,
},
mtu: {
description: "Maximum Transmission Unit.",
optional: true,
minimum: 46,
maximum: 65535,
default: 1500,
},
bridge_ports: {
schema: NETWORK_INTERFACE_LIST_SCHEMA,
optional: true,
},
bridge_vlan_aware: {
description: "Enable bridge vlan support.",
type: bool,
optional: true,
},
bond_mode: {
type: LinuxBondMode,
optional: true,
},
slaves: {
schema: NETWORK_INTERFACE_LIST_SCHEMA,
optional: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
},
)]
/// Update network interface config.
pub fn update_interface(
iface: String,
autostart: Option<bool>,
method: Option<NetworkConfigMethod>,
method6: Option<NetworkConfigMethod>,
comments: Option<String>,
comments6: Option<String>,
cidr: Option<String>,
gateway: Option<String>,
cidr6: Option<String>,
gateway6: Option<String>,
mtu: Option<u64>,
bridge_ports: Option<String>,
bridge_vlan_aware: Option<bool>,
bond_mode: Option<LinuxBondMode>,
slaves: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
param: Value,
) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = network::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
if gateway.is_some() { check_duplicate_gateway_v4(&config, &iface)?; }
if gateway6.is_some() { check_duplicate_gateway_v6(&config, &iface)?; }
let interface = config.lookup_mut(&iface)?;
if let Some(interface_type) = param.get("type") {
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.clone())?;
if interface_type != interface.interface_type {
bail!("got unexpected interface type ({:?} != {:?})", interface_type, interface.interface_type);
}
}
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::cidr => { interface.cidr = None; },
DeletableProperty::cidr6 => { interface.cidr6 = None; },
DeletableProperty::gateway => { interface.gateway = None; },
DeletableProperty::gateway6 => { interface.gateway6 = None; },
DeletableProperty::method => { interface.method = None; },
DeletableProperty::method6 => { interface.method6 = None; },
DeletableProperty::comments => { interface.comments = None; },
DeletableProperty::comments6 => { interface.comments6 = None; },
DeletableProperty::mtu => { interface.mtu = None; },
DeletableProperty::autostart => { interface.autostart = false; },
DeletableProperty::bridge_ports => { interface.set_bridge_ports(Vec::new())?; }
DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
DeletableProperty::slaves => { interface.set_bond_slaves(Vec::new())?; }
}
}
}
if let Some(autostart) = autostart { interface.autostart = autostart; }
if method.is_some() { interface.method = method; }
if method6.is_some() { interface.method6 = method6; }
if mtu.is_some() { interface.mtu = mtu; }
if let Some(ports) = bridge_ports {
let ports = split_interface_list(&ports)?;
interface.set_bridge_ports(ports)?;
}
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
if let Some(slaves) = slaves {
let slaves = split_interface_list(&slaves)?;
interface.set_bond_slaves(slaves)?;
}
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
if let Some(cidr) = cidr {
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
interface.cidr = Some(cidr);
}
if let Some(cidr6) = cidr6 {
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
interface.cidr6 = Some(cidr6);
}
if let Some(gateway) = gateway {
let is_v6 = gateway.contains(':');
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
interface.gateway = Some(gateway);
}
if let Some(gateway6) = gateway6 {
let is_v6 = gateway6.contains(':');
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
interface.gateway6 = Some(gateway6);
}
if comments.is_some() { interface.comments = comments; }
if comments6.is_some() { interface.comments6 = comments6; }
if interface.cidr.is_some() || interface.gateway.is_some() {
interface.method = Some(NetworkConfigMethod::Static);
} else {
interface.method = Some(NetworkConfigMethod::Manual);
}
if interface.cidr6.is_some() || interface.gateway6.is_some() {
interface.method6 = Some(NetworkConfigMethod::Static);
} else {
interface.method6 = Some(NetworkConfigMethod::Manual);
}
network::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
iface: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
},
)]
/// Remove network interface configuration.
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
let (mut config, expected_digest) = network::config()?;
if let Some(ref digest) = digest {
let digest = proxmox::tools::hex_to_digest(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let _interface = config.lookup(&iface)?; // check if interface exists
config.interfaces.remove(&iface);
network::save_config(&config)?;
Ok(())
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_MODIFY, false),
},
)]
/// Reload network configuration (requires ifupdown2).
pub async fn reload_network_config(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
network::assert_ifupdown2_installed()?;
let username = rpcenv.get_user().unwrap();
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), &username.clone(), true, |_worker| async {
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
network::network_reload()?;
Ok(())
})?;
Ok(upid_str)
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_MODIFY, false),
},
)]
/// Revert network configuration (rm /etc/network/interfaces.new).
pub fn revert_network_config() -> Result<(), Error> {
let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME);
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_INTERFACE)
.put(&API_METHOD_UPDATE_INTERFACE)
.delete(&API_METHOD_DELETE_INTERFACE);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_NETWORK_DEVICES)
.put(&API_METHOD_RELOAD_NETWORK_CONFIG)
.post(&API_METHOD_CREATE_INTERFACE)
.delete(&API_METHOD_REVERT_NETWORK_CONFIG)
.match_all("iface", &ITEM_ROUTER);

49
src/api2/node/rrd.rs Normal file
View File

@ -0,0 +1,49 @@
use anyhow::Error;
use serde_json::Value;
use proxmox::api::{api, Router};
use crate::api2::types::*;
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
timeframe: {
type: RRDTimeFrameResolution,
},
cf: {
type: RRDMode,
},
},
},
)]
/// Read node stats
fn get_node_stats(
timeframe: RRDTimeFrameResolution,
cf: RRDMode,
_param: Value,
) -> Result<Value, Error> {
crate::rrd::extract_data(
"host",
&[
"cpu", "iowait",
"memtotal", "memused",
"swaptotal", "swapused",
"netin", "netout",
"loadavg",
"total", "used",
"read_ios", "read_bytes",
"write_ios", "write_bytes",
"io_ticks",
],
timeframe,
cf,
)
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_GET_NODE_STATS);

View File

@ -1,15 +1,15 @@
use std::process::{Command, Stdio};
use failure::*;
use anyhow::{bail, Error};
use serde_json::{json, Value};
use proxmox::{sortable, identity, list_subdirs_api_method};
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, Router, Permission};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
use crate::api2::types::*;
use crate::tools;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
static SERVICE_NAME_LIST: [&str; 7] = [
"proxmox-backup",
@ -91,11 +91,45 @@ fn json_service_state(service: &str, status: Value) -> Value {
Value::Null
}
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
returns: {
description: "Returns a list of systemd services.",
type: Array,
items: {
description: "Service details.",
properties: {
service: {
schema: SERVICE_ID_SCHEMA,
},
name: {
type: String,
description: "systemd service name.",
},
desc: {
type: String,
description: "systemd service description.",
},
state: {
type: String,
description: "systemd service 'SubState'.",
},
},
},
},
access: {
permission: &Permission::Privilege(&["system", "services"], PRIV_SYS_AUDIT, false),
},
)]
/// Service list.
fn list_services(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let mut list = vec![];
@ -115,21 +149,36 @@ fn list_services(
Ok(Value::from(list))
}
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
service: {
schema: SERVICE_ID_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_AUDIT, false),
},
)]
/// Read service properties.
fn get_service_state(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
service: String,
_param: Value,
) -> Result<Value, Error> {
let service = tools::required_string_param(&param, "service")?;
let service = service.as_str();
if !SERVICE_NAME_LIST.contains(&service) {
bail!("unknown service name '{}'", service);
}
let status = get_full_service_state(service)?;
let status = get_full_service_state(&service)?;
Ok(json_service_state(service, status))
Ok(json_service_state(&service, status))
}
fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
@ -158,61 +207,117 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
Ok(Value::Null)
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
service: {
schema: SERVICE_ID_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
},
)]
/// Start service.
fn start_service(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
service: String,
_param: Value,
) -> Result<Value, Error> {
let service = tools::required_string_param(&param, "service")?;
log::info!("starting service {}", service);
run_service_command(service, "start")
run_service_command(&service, "start")
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
service: {
schema: SERVICE_ID_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
},
)]
/// Stop service.
fn stop_service(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
service: String,
_param: Value,
) -> Result<Value, Error> {
let service = tools::required_string_param(&param, "service")?;
log::info!("stopping service {}", service);
log::info!("stoping service {}", service);
run_service_command(service, "stop")
run_service_command(&service, "stop")
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
service: {
schema: SERVICE_ID_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
},
)]
/// Retart service.
fn restart_service(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
service: String,
_param: Value,
) -> Result<Value, Error> {
let service = tools::required_string_param(&param, "service")?;
log::info!("re-starting service {}", service);
if service == "proxmox-backup-proxy" {
if &service == "proxmox-backup-proxy" {
// special case, avoid aborting running tasks
run_service_command(service, "reload")
run_service_command(&service, "reload")
} else {
run_service_command(service, "restart")
run_service_command(&service, "restart")
}
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
service: {
schema: SERVICE_ID_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
},
)]
/// Reload service.
fn reload_service(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
service: String,
_param: Value,
) -> Result<Value, Error> {
let service = tools::required_string_param(&param, "service")?;
log::info!("reloading service {}", service);
run_service_command(service, "reload")
run_service_command(&service, "reload")
}
@ -221,111 +326,33 @@ const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
.schema();
#[sortable]
const SERVICE_SUBDIRS: SubdirMap = &[
const SERVICE_SUBDIRS: SubdirMap = &sorted!([
(
"reload", &Router::new()
.post(
&ApiMethod::new(
&ApiHandler::Sync(&reload_service),
&ObjectSchema::new(
"Reload service.",
&sorted!([
("node", false, &NODE_SCHEMA),
("service", false, &SERVICE_ID_SCHEMA),
]),
)
).protected(true)
)
.post(&API_METHOD_RELOAD_SERVICE)
),
(
"restart", &Router::new()
.post(
&ApiMethod::new(
&ApiHandler::Sync(&restart_service),
&ObjectSchema::new(
"Restart service.",
&sorted!([
("node", false, &NODE_SCHEMA),
("service", false, &SERVICE_ID_SCHEMA),
]),
)
).protected(true)
)
.post(&API_METHOD_RESTART_SERVICE)
),
(
"start", &Router::new()
.post(
&ApiMethod::new(
&ApiHandler::Sync(&start_service),
&ObjectSchema::new(
"Start service.",
&sorted!([
("node", false, &NODE_SCHEMA),
("service", false, &SERVICE_ID_SCHEMA),
]),
)
).protected(true)
)
.post(&API_METHOD_START_SERVICE)
),
(
"state", &Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&get_service_state),
&ObjectSchema::new(
"Read service properties.",
&sorted!([
("node", false, &NODE_SCHEMA),
("service", false, &SERVICE_ID_SCHEMA),
]),
)
)
)
.get(&API_METHOD_GET_SERVICE_STATE)
),
(
"stop", &Router::new()
.post(
&ApiMethod::new(
&ApiHandler::Sync(&stop_service),
&ObjectSchema::new(
"Stop service.",
&sorted!([
("node", false, &NODE_SCHEMA),
("service", false, &SERVICE_ID_SCHEMA),
]),
)
).protected(true)
)
.post(&API_METHOD_STOP_SERVICE)
),
];
]);
const SERVICE_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SERVICE_SUBDIRS))
.subdirs(SERVICE_SUBDIRS);
#[sortable]
pub const ROUTER: Router = Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&list_services),
&ObjectSchema::new(
"Service list.",
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
)
).returns(
&ArraySchema::new(
"Returns a list of systemd services.",
&ObjectSchema::new(
"Service details.",
&sorted!([
("service", false, &SERVICE_ID_SCHEMA),
("name", false, &StringSchema::new("systemd service name.").schema()),
("desc", false, &StringSchema::new("systemd service description.").schema()),
("state", false, &StringSchema::new("systemd service 'SubState'.").schema()),
]),
).schema()
).schema()
)
)
.get(&API_METHOD_LIST_SERVICES)
.match_all("service", &SERVICE_ROUTER);

View File

@ -1,12 +1,14 @@
use failure::*;
use std::process::Command;
use anyhow::{Error, format_err, bail};
use serde_json::{json, Value};
use proxmox::sys::linux::procfs;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, SubdirMap};
use proxmox::list_subdirs_api_method;
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use crate::api2::types::*;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
#[api(
input: {
@ -44,7 +46,10 @@ use crate::api2::types::*;
optional: true,
},
}
}
},
access: {
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
},
)]
/// Read node memory, CPU and (root) disk usage
fn get_usage(
@ -66,12 +71,49 @@ fn get_usage(
}))
}
pub const USAGE_ROUTER: Router = Router::new()
.get(&API_METHOD_GET_USAGE);
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
command: {
type: NodePowerCommand,
},
}
},
access: {
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_POWER_MANAGEMENT, false),
},
)]
/// Reboot or shutdown the node.
fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
let systemctl_command = match command {
NodePowerCommand::Reboot => "reboot",
NodePowerCommand::Shutdown => "poweroff",
};
let output = Command::new("/bin/systemctl")
.arg(systemctl_command)
.output()
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
if !output.status.success() {
match output.status.code() {
Some(code) => {
let msg = String::from_utf8(output.stderr)
.map(|m| if m.is_empty() { String::from("no error message") } else { m })
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
bail!("diff failed with status code: {} - {}", code, msg);
}
None => bail!("systemctl terminated by signal"),
}
}
Ok(())
}
pub const SUBDIRS: SubdirMap = &[
("usage", &USAGE_ROUTER),
];
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))
.subdirs(SUBDIRS);
.get(&API_METHOD_GET_USAGE)
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);

View File

@ -1,11 +1,12 @@
use std::process::{Command, Stdio};
use failure::*;
use anyhow::{Error};
use serde_json::{json, Value};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use crate::api2::types::*;
use crate::config::acl::PRIV_SYS_AUDIT;
fn dump_journal(
start: Option<u64>,
@ -122,12 +123,15 @@ fn dump_journal(
}
},
},
access: {
permission: &Permission::Privilege(&["system", "log"], PRIV_SYS_AUDIT, false),
},
)]
/// Read syslog entries.
fn get_syslog(
param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let (count, lines) = dump_journal(
@ -137,7 +141,7 @@ fn get_syslog(
param["until"].as_str(),
param["service"].as_str())?;
rpcenv.set_result_attrib("total", Value::from(count));
rpcenv["total"] = Value::from(count);
Ok(json!(lines))
}

View File

@ -1,26 +1,96 @@
use std::fs::File;
use std::io::{BufRead, BufReader};
use failure::*;
use anyhow::{Error};
use serde_json::{json, Value};
use proxmox::api::{api, ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{api, Router, RpcEnvironment, Permission, UserInformation};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
use proxmox::{identity, list_subdirs_api_method, sortable};
use crate::tools;
use crate::api2::types::*;
use crate::server::{self, UPID};
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
fn get_task_status(
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
upid: {
schema: UPID_SCHEMA,
},
},
},
returns: {
description: "Task status nformation.",
properties: {
node: {
schema: NODE_SCHEMA,
},
upid: {
schema: UPID_SCHEMA,
},
pid: {
type: i64,
description: "The Unix PID.",
},
pstart: {
type: u64,
description: "The Unix process start time from `/proc/pid/stat`",
},
starttime: {
type: i64,
description: "The task start time (Epoch)",
},
"type": {
type: String,
description: "Worker type (arbitrary ASCII string)",
},
id: {
type: String,
optional: true,
description: "Worker ID (arbitrary ASCII string)",
},
user: {
type: String,
description: "The user who started the task.",
},
status: {
type: String,
description: "'running' or 'stopped'",
},
exitstatus: {
type: String,
optional: true,
description: "'OK', 'Error: <msg>', or 'unkwown'.",
},
},
},
access: {
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
permission: &Permission::Anybody,
},
)]
/// Get task status.
async fn get_task_status(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let upid = extract_upid(&param)?;
let username = rpcenv.get_user().unwrap();
if username != upid.username {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let mut result = json!({
"upid": param["upid"],
"node": upid.node,
@ -32,7 +102,7 @@ fn get_task_status(
"user": upid.username,
});
if crate::server::worker_is_active(&upid) {
if crate::server::worker_is_active(&upid).await? {
result["status"] = Value::from("running");
} else {
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(String::from("unknown"));
@ -50,14 +120,54 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> {
upid_str.parse::<UPID>()
}
fn read_task_log(
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
upid: {
schema: UPID_SCHEMA,
},
"test-status": {
type: bool,
optional: true,
description: "Test task status, and set result attribute \"active\" accordingly.",
},
start: {
type: u64,
optional: true,
description: "Start at this line.",
default: 0,
},
limit: {
type: u64,
optional: true,
description: "Only list this amount of lines.",
default: 50,
},
},
},
access: {
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
permission: &Permission::Anybody,
},
)]
/// Read task log.
async fn read_task_log(
param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let upid = extract_upid(&param)?;
let username = rpcenv.get_user().unwrap();
if username != upid.username {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let test_status = param["test-status"].as_bool().unwrap_or(false);
let start = param["start"].as_u64().unwrap_or(0);
@ -89,28 +199,50 @@ fn read_task_log(
}
}
rpcenv.set_result_attrib("total", Value::from(count));
rpcenv["total"] = Value::from(count);
if test_status {
let active = crate::server::worker_is_active(&upid);
rpcenv.set_result_attrib("active", Value::from(active));
let active = crate::server::worker_is_active(&upid).await?;
rpcenv["active"] = Value::from(active);
}
Ok(json!(lines))
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
upid: {
schema: UPID_SCHEMA,
},
},
},
access: {
description: "Users can stop there own tasks, or need Sys.Modify on /system/tasks.",
permission: &Permission::Anybody,
},
)]
/// Try to stop a task.
fn stop_task(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let upid = extract_upid(&param)?;
if crate::server::worker_is_active(&upid) {
server::abort_worker_async(upid);
let username = rpcenv.get_user().unwrap();
if username != upid.username {
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
}
server::abort_worker_async(upid);
Ok(Value::Null)
}
@ -140,11 +272,13 @@ fn stop_task(
type: bool,
description: "Only list running tasks.",
optional: true,
default: false,
},
errors: {
type: bool,
description: "Only list erroneous tasks.",
optional:true,
default: false,
},
userfilter: {
optional:true,
@ -158,18 +292,26 @@ fn stop_task(
type: Array,
items: { type: TaskListItem },
},
access: {
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
permission: &Permission::Anybody,
},
)]
/// List tasks.
pub fn list_tasks(
start: u64,
limit: u64,
errors: bool,
running: bool,
param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
let start = param["start"].as_u64().unwrap_or(0);
let limit = param["limit"].as_u64().unwrap_or(50);
let errors = param["errors"].as_bool().unwrap_or(false);
let running = param["running"].as_bool().unwrap_or(false);
let username = rpcenv.get_user().unwrap();
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
let store = param["store"].as_str();
@ -182,6 +324,8 @@ pub fn list_tasks(
let mut count = 0;
for info in list.iter() {
if !list_all && info.upid.username != username { continue; }
let mut entry = TaskListItem {
upid: info.upid_str.clone(),
node: "localhost".to_string(),
@ -238,79 +382,28 @@ pub fn list_tasks(
if (result.len() as u64) < limit { result.push(entry); };
}
rpcenv.set_result_attrib("total", Value::from(count));
rpcenv["total"] = Value::from(count);
Ok(result)
}
#[sortable]
const UPID_API_SUBDIRS: SubdirMap = &[
const UPID_API_SUBDIRS: SubdirMap = &sorted!([
(
"log", &Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&read_task_log),
&ObjectSchema::new(
"Read task log.",
&sorted!([
("node", false, &NODE_SCHEMA),
( "test-status",
true,
&BooleanSchema::new(
"Test task status, and set result attribute \"active\" accordingly."
).schema()
),
("upid", false, &UPID_SCHEMA),
("start", true, &IntegerSchema::new("Start at this line.")
.minimum(0)
.default(0)
.schema()
),
("limit", true, &IntegerSchema::new("Only list this amount of lines.")
.minimum(0)
.default(50)
.schema()
),
]),
)
)
)
.get(&API_METHOD_READ_TASK_LOG)
),
(
"status", &Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&get_task_status),
&ObjectSchema::new(
"Get task status.",
&sorted!([
("node", false, &NODE_SCHEMA),
("upid", false, &UPID_SCHEMA),
]),
.get(&API_METHOD_GET_TASK_STATUS)
)
)
)
)
];
]);
#[sortable]
pub const UPID_API_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
.delete(
&ApiMethod::new(
&ApiHandler::Sync(&stop_task),
&ObjectSchema::new(
"Try to stop a task.",
&sorted!([
("node", false, &NODE_SCHEMA),
("upid", false, &UPID_SCHEMA),
]),
)
).protected(true)
)
.delete(&API_METHOD_STOP_TASK)
.subdirs(&UPID_API_SUBDIRS);
#[sortable]
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_TASKS)
.match_all("upid", &UPID_API_ROUTER);

View File

@ -1,14 +1,11 @@
use std::mem::{self, MaybeUninit};
use chrono::prelude::*;
use failure::*;
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use proxmox::{sortable, identity};
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::schema::*;
use proxmox::api::{api, Router, Permission};
use proxmox::tools::fs::{file_read_firstline, replace_file, CreateOptions};
use crate::config::acl::PRIV_SYS_MODIFY;
use crate::api2::types::*;
fn read_etc_localtime() -> Result<String, Error> {
@ -18,34 +15,48 @@ fn read_etc_localtime() -> Result<String, Error> {
}
// otherwise guess from the /etc/localtime symlink
let mut buf = MaybeUninit::<[u8; 64]>::uninit();
let len = unsafe {
libc::readlink(
"/etc/localtime".as_ptr() as *const _,
buf.as_mut_ptr() as *mut _,
mem::size_of_val(&buf),
)
};
if len <= 0 {
bail!("failed to guess timezone");
}
let len = len as usize;
let buf = unsafe {
(*buf.as_mut_ptr())[len] = 0;
buf.assume_init()
};
let link = std::str::from_utf8(&buf[..len])?;
let link = std::fs::read_link("/etc/localtime").
map_err(|err| format_err!("failed to guess timezone - {}", err))?;
let link = link.to_string_lossy();
match link.rfind("/zoneinfo/") {
Some(pos) => Ok(link[(pos + 10)..].to_string()),
None => Ok(link.to_string()),
}
}
fn get_time(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
#[api(
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
},
},
returns: {
description: "Returns server time and timezone.",
properties: {
timezone: {
schema: TIME_ZONE_SCHEMA,
},
time: {
type: i64,
description: "Seconds since 1970-01-01 00:00:00 UTC.",
minimum: 1_297_163_644,
},
localtime: {
type: i64,
description: "Seconds since 1970-01-01 00:00:00 UTC. (local time)",
minimum: 1_297_163_644,
},
}
},
access: {
permission: &Permission::Anybody,
},
)]
/// Read server time and time zone settings.
fn get_time(_param: Value) -> Result<Value, Error> {
let datetime = Local::now();
let offset = datetime.offset();
let time = datetime.timestamp();
@ -58,13 +69,28 @@ fn get_time(
}))
}
#[api(
protected: true,
reload_timezone: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
timezone: {
schema: TIME_ZONE_SCHEMA,
},
},
},
access: {
permission: &Permission::Privilege(&["system", "time"], PRIV_SYS_MODIFY, false),
},
)]
/// Set time zone
fn set_timezone(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
timezone: String,
_param: Value,
) -> Result<Value, Error> {
let timezone = crate::tools::required_string_param(&param, "timezone")?;
let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone));
if !path.exists() {
@ -81,45 +107,6 @@ fn set_timezone(
Ok(Value::Null)
}
#[sortable]
pub const ROUTER: Router = Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&get_time),
&ObjectSchema::new(
"Read server time and time zone settings.",
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
)
).returns(
&ObjectSchema::new(
"Returns server time and timezone.",
&sorted!([
("timezone", false, &StringSchema::new("Time zone").schema()),
("time", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC.")
.minimum(1_297_163_644)
.schema()
),
("localtime", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC. (local time)")
.minimum(1_297_163_644)
.schema()
),
]),
).schema()
)
)
.put(
&ApiMethod::new(
&ApiHandler::Sync(&set_timezone),
&ObjectSchema::new(
"Set time zone.",
&sorted!([
("node", false, &NODE_SCHEMA),
("timezone", false, &StringSchema::new(
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
.schema()
),
]),
)
).protected(true).reload_timezone(true)
);
.get(&API_METHOD_GET_TIME)
.put(&API_METHOD_SET_TIMEZONE);

View File

@ -1,370 +1,65 @@
//! Sync datastore from remote server
use std::sync::{Arc};
use failure::*;
use serde_json::json;
use std::convert::TryFrom;
use std::sync::Arc;
use std::collections::HashMap;
use std::io::{Seek, SeekFrom};
use chrono::{Utc, TimeZone};
use anyhow::{format_err, Error};
use proxmox::api::api;
use proxmox::api::{ApiMethod, Router, RpcEnvironment};
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
use crate::server::{WorkerTask};
use crate::backup::*;
use crate::client::*;
use crate::config::remote;
use crate::backup::DataStore;
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
use crate::api2::types::*;
// fixme: implement filters
// fixme: delete vanished groups
// Todo: correctly lock backup groups
async fn pull_index_chunks<I: IndexFile>(
_worker: &WorkerTask,
chunk_reader: &mut RemoteChunkReader,
target: Arc<DataStore>,
index: I,
) -> Result<(), Error> {
use crate::config::{
remote,
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
cached_user_info::CachedUserInfo,
};
for pos in 0..index.index_count() {
let digest = index.index_digest(pos).unwrap();
let chunk_exists = target.cond_touch_chunk(digest, false)?;
if chunk_exists {
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
continue;
}
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
let chunk = chunk_reader.read_raw_chunk(&digest)?;
target.insert_chunk(&chunk, &digest)?;
}
Ok(())
}
async fn download_manifest(
reader: &BackupReader,
filename: &std::path::Path,
) -> Result<std::fs::File, Error> {
let tmp_manifest_file = std::fs::OpenOptions::new()
.write(true)
.create(true)
.read(true)
.open(&filename)?;
let mut tmp_manifest_file = reader.download(MANIFEST_BLOB_NAME, tmp_manifest_file).await?;
tmp_manifest_file.seek(SeekFrom::Start(0))?;
Ok(tmp_manifest_file)
}
async fn pull_single_archive(
worker: &WorkerTask,
reader: &BackupReader,
chunk_reader: &mut RemoteChunkReader,
tgt_store: Arc<DataStore>,
snapshot: &BackupDir,
archive_name: &str,
) -> Result<(), Error> {
let mut path = tgt_store.base_path();
path.push(snapshot.relative_path());
path.push(archive_name);
let mut tmp_path = path.clone();
tmp_path.set_extension("tmp");
worker.log(format!("sync archive {}", archive_name));
let tmpfile = std::fs::OpenOptions::new()
.write(true)
.create(true)
.read(true)
.open(&tmp_path)?;
let tmpfile = reader.download(archive_name, tmpfile).await?;
match archive_type(archive_name)? {
ArchiveType::DynamicIndex => {
let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?;
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
}
ArchiveType::FixedIndex => {
let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?;
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
}
ArchiveType::Blob => { /* nothing to do */ }
}
if let Err(err) = std::fs::rename(&tmp_path, &path) {
bail!("Atomic rename file {:?} failed - {}", path, err);
}
Ok(())
}
async fn pull_snapshot(
worker: &WorkerTask,
reader: Arc<BackupReader>,
tgt_store: Arc<DataStore>,
snapshot: &BackupDir,
) -> Result<(), Error> {
let mut manifest_name = tgt_store.base_path();
manifest_name.push(snapshot.relative_path());
manifest_name.push(MANIFEST_BLOB_NAME);
let mut tmp_manifest_name = manifest_name.clone();
tmp_manifest_name.set_extension("tmp");
let mut tmp_manifest_file = download_manifest(&reader, &tmp_manifest_name).await?;
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
tmp_manifest_blob.verify_crc()?;
if manifest_name.exists() {
let manifest_blob = proxmox::try_block!({
let mut manifest_file = std::fs::File::open(&manifest_name)
.map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?;
let manifest_blob = DataBlob::load(&mut manifest_file)?;
manifest_blob.verify_crc()?;
Ok(manifest_blob)
}).map_err(|err: Error| {
format_err!("unable to read local manifest {:?} - {}", manifest_name, err)
})?;
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
return Ok(()); // nothing changed
}
}
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, HashMap::new());
for item in manifest.files() {
let mut path = tgt_store.base_path();
path.push(snapshot.relative_path());
path.push(&item.filename);
if path.exists() {
match archive_type(&item.filename)? {
ArchiveType::DynamicIndex => {
let index = DynamicIndexReader::open(&path)?;
let (csum, size) = index.compute_csum();
match manifest.verify_file(&item.filename, &csum, size) {
Ok(_) => continue,
Err(err) => {
worker.log(format!("detected changed file {:?} - {}", path, err));
}
}
}
ArchiveType::FixedIndex => {
let index = FixedIndexReader::open(&path)?;
let (csum, size) = index.compute_csum();
match manifest.verify_file(&item.filename, &csum, size) {
Ok(_) => continue,
Err(err) => {
worker.log(format!("detected changed file {:?} - {}", path, err));
}
}
}
ArchiveType::Blob => {
let mut tmpfile = std::fs::File::open(&path)?;
let (csum, size) = compute_file_csum(&mut tmpfile)?;
match manifest.verify_file(&item.filename, &csum, size) {
Ok(_) => continue,
Err(err) => {
worker.log(format!("detected changed file {:?} - {}", path, err));
}
}
}
}
}
pull_single_archive(
worker,
&reader,
&mut chunk_reader,
tgt_store.clone(),
snapshot,
&item.filename,
).await?;
}
if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) {
bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
}
// cleanup - remove stale files
tgt_store.cleanup_backup_dir(snapshot, &manifest)?;
Ok(())
}
pub async fn pull_snapshot_from(
worker: &WorkerTask,
reader: Arc<BackupReader>,
tgt_store: Arc<DataStore>,
snapshot: &BackupDir,
) -> Result<(), Error> {
let (_path, is_new) = tgt_store.create_backup_dir(&snapshot)?;
if is_new {
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot) {
worker.log(format!("cleanup error - {}", cleanup_err));
}
return Err(err);
}
} else {
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?
}
Ok(())
}
pub async fn pull_group(
worker: &WorkerTask,
client: &HttpClient,
src_repo: &BackupRepository,
tgt_store: Arc<DataStore>,
group: &BackupGroup,
pub fn check_pull_privs(
username: &str,
store: &str,
remote: &str,
remote_store: &str,
delete: bool,
) -> Result<(), Error> {
let path = format!("api2/json/admin/datastore/{}/snapshots", src_repo.store());
let user_info = CachedUserInfo::new()?;
let args = json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
});
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
let mut result = client.get(&path, Some(args)).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
let auth_info = client.login().await?;
let fingerprint = client.fingerprint();
let last_sync = tgt_store.last_successful_backup(group)?;
let mut remote_snapshots = std::collections::HashSet::new();
for item in list {
let backup_time = Utc.timestamp(item.backup_time, 0);
remote_snapshots.insert(backup_time);
if let Some(last_sync_time) = last_sync {
if last_sync_time > backup_time { continue; }
if delete {
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
}
Ok(())
}
pub async fn get_pull_parameters(
store: &str,
remote: &str,
remote_store: &str,
) -> Result<(HttpClient, BackupRepository, Arc<DataStore>), Error> {
let tgt_store = DataStore::lookup_datastore(store)?;
let (remote_config, _digest) = remote::config()?;
let remote: remote::Remote = remote_config.lookup("remote", remote)?;
let options = HttpClientOptions::new()
.password(Some(auth_info.ticket.clone()))
.fingerprint(fingerprint.clone());
.password(Some(remote.password.clone()))
.fingerprint(remote.fingerprint.clone());
let new_client = HttpClient::new(src_repo.host(), src_repo.user(), options)?;
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
let _auth_info = client.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
let reader = BackupReader::start(
new_client,
None,
src_repo.store(),
&item.backup_type,
&item.backup_id,
backup_time,
true,
).await?;
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store.to_string());
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
}
if delete {
let local_list = group.list_backups(&tgt_store.base_path())?;
for info in local_list {
let backup_time = info.backup_dir.backup_time();
if remote_snapshots.contains(&backup_time) { continue; }
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
tgt_store.remove_backup_dir(&info.backup_dir)?;
}
}
Ok(())
}
pub async fn pull_store(
worker: &WorkerTask,
client: &HttpClient,
src_repo: &BackupRepository,
tgt_store: Arc<DataStore>,
delete: bool,
) -> Result<(), Error> {
let path = format!("api2/json/admin/datastore/{}/groups", src_repo.store());
let mut result = client.get(&path, None).await?;
let mut list: Vec<GroupListItem> = serde_json::from_value(result["data"].take())?;
list.sort_unstable_by(|a, b| {
let type_order = a.backup_type.cmp(&b.backup_type);
if type_order == std::cmp::Ordering::Equal {
a.backup_id.cmp(&b.backup_id)
} else {
type_order
}
});
let mut errors = false;
let mut new_groups = std::collections::HashSet::new();
for item in list {
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
if let Err(err) = pull_group(worker, client, src_repo, tgt_store.clone(), &group, delete).await {
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
errors = true;
// do not stop here, instead continue
}
new_groups.insert(group);
}
if delete {
let result: Result<(), Error> = proxmox::try_block!({
let local_groups = BackupGroup::list_groups(&tgt_store.base_path())?;
for local_group in local_groups {
if new_groups.contains(&local_group) { continue; }
worker.log(format!("delete vanished group '{}/{}'", local_group.backup_type(), local_group.backup_id()));
if let Err(err) = tgt_store.remove_backup_group(&local_group) {
worker.log(err.to_string());
errors = true;
}
}
Ok(())
});
if let Err(err) = result {
worker.log(format!("error during cleanup: {}", err));
errors = true;
};
}
if errors {
bail!("sync failed with some errors.");
}
Ok(())
Ok((client, src_repo, tgt_store))
}
#[api(
@ -379,54 +74,44 @@ pub async fn pull_store(
"remote-store": {
schema: DATASTORE_SCHEMA,
},
delete: {
description: "Delete vanished backups. This remove the local copy if the remote backup was deleted.",
type: Boolean,
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
default: true,
},
},
},
access: {
// Note: used parameters are no uri parameters, so we need to test inside function body
description: r###"The user needs Datastore.Backup privilege on '/datastore/{store}',
and needs to own the backup group. Remote.Read is required on '/remote/{remote}/{remote-store}'.
The delete flag additionally requires the Datastore.Prune privilege on '/datastore/{store}'.
"###,
permission: &Permission::Anybody,
},
)]
/// Sync store from other repository
async fn pull (
store: String,
remote: String,
remote_store: String,
delete: Option<bool>,
remove_vanished: Option<bool>,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let username = rpcenv.get_user().unwrap();
let delete = remove_vanished.unwrap_or(true);
let delete = delete.unwrap_or(true);
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
let tgt_store = DataStore::lookup_datastore(&store)?;
let (remote_config, _digest) = remote::config()?;
let remote: remote::Remote = remote_config.lookup("remote", &remote)?;
let options = HttpClientOptions::new()
.password(Some(remote.password.clone()))
.fingerprint(remote.fingerprint.clone());
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
let _auth_info = client.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store);
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
// fixme: set to_stdout to false?
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
worker.log(format!("sync datastore '{}' start", store));
// explicit create shared lock to prevent GC on newly created chunks
let _shared_store_lock = tgt_store.try_shared_chunk_store_lock()?;
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete).await?;
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, username).await?;
worker.log(format!("sync datastore '{}' end", store));

View File

@ -1,5 +1,5 @@
//use chrono::{Local, TimeZone};
use failure::*;
use anyhow::{bail, format_err, Error};
use futures::*;
use hyper::header::{self, HeaderValue, UPGRADE};
use hyper::http::request::Parts;
@ -7,7 +7,7 @@ use hyper::{Body, Response, StatusCode};
use serde_json::Value;
use proxmox::{sortable, identity};
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::schema::*;
use proxmox::http_err;
@ -15,6 +15,8 @@ use crate::api2::types::*;
use crate::backup::*;
use crate::server::{WorkerTask, H2Service};
use crate::tools;
use crate::config::acl::PRIV_DATASTORE_READ;
use crate::config::cached_user_info::CachedUserInfo;
mod environment;
use environment::*;
@ -29,18 +31,16 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."),
&sorted!([
("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &StringSchema::new("Backup type.")
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
.schema()
),
("backup-id", false, &StringSchema::new("Backup ID.").schema()),
("backup-time", false, &IntegerSchema::new("Backup time (Unix epoch.)")
.minimum(1_547_797_308)
.schema()
),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
]),
)
).access(
// Note: parameter 'store' is no uri parameter, so we need to test inside function body
Some("The user needs Datastore.Read privilege on /datastore/{store}."),
&Permission::Anybody
);
fn upgrade_to_backup_reader_protocol(
@ -54,7 +54,12 @@ fn upgrade_to_backup_reader_protocol(
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let username = rpcenv.get_user().unwrap();
let store = tools::required_string_param(&param, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
let datastore = DataStore::lookup_datastore(&store)?;
let backup_type = tools::required_string_param(&param, "backup-type")?;
@ -75,7 +80,6 @@ fn upgrade_to_backup_reader_protocol(
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
}
let username = rpcenv.get_user().unwrap();
let env_type = rpcenv.env_type();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
@ -127,7 +131,7 @@ fn upgrade_to_backup_reader_protocol(
Either::Right((Ok(res), _)) => Ok(res),
Either::Right((Err(err), _)) => Err(err),
})
.map_ok(move |_| env.log("reader finished sucessfully"))
.map_ok(move |_| env.log("reader finished successfully"))
})?;
let response = Response::builder()

View File

@ -1,8 +1,7 @@
//use failure::*;
//use anyhow::{bail, format_err, Error};
use std::sync::Arc;
use std::collections::HashMap;
use serde_json::Value;
use serde_json::{json, Value};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
@ -16,7 +15,7 @@ use crate::server::formatter::*;
#[derive(Clone)]
pub struct ReaderEnvironment {
env_type: RpcEnvironmentType,
result_attributes: HashMap<String, Value>,
result_attributes: Value,
user: String,
pub debug: bool,
pub formatter: &'static OutputFormatter,
@ -37,7 +36,7 @@ impl ReaderEnvironment {
Self {
result_attributes: HashMap::new(),
result_attributes: json!({}),
env_type,
user,
worker,
@ -61,12 +60,12 @@ impl ReaderEnvironment {
impl RpcEnvironment for ReaderEnvironment {
fn set_result_attrib(&mut self, name: &str, value: Value) {
self.result_attributes.insert(name.into(), value);
fn result_attrib_mut(&mut self) -> &mut Value {
&mut self.result_attributes
}
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
self.result_attributes.get(name)
fn result_attrib(&self) -> &Value {
&self.result_attributes
}
fn env_type(&self) -> RpcEnvironmentType {

View File

@ -1,16 +1,39 @@
use failure::*;
use anyhow::{Error};
use serde_json::{json, Value};
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::schema::ObjectSchema;
use proxmox::api::{api, Router, Permission};
use crate::tools;
use crate::config::acl::PRIV_SYS_AUDIT;
fn get_subscription(
_param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
#[api(
returns: {
description: "Subscription status.",
properties: {
status: {
type: String,
description: "'NotFound', 'active' or 'inactive'."
},
message: {
type: String,
description: "Human readable problem description.",
},
serverid: {
type: String,
description: "The unique server ID.",
},
url: {
type: String,
description: "URL to Web Shop.",
},
},
},
access: {
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
},
)]
/// Read subscription info.
fn get_subscription(_param: Value) -> Result<Value, Error> {
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
Ok(json!({
@ -22,9 +45,4 @@ fn get_subscription(
}
pub const ROUTER: Router = Router::new()
.get(
&ApiMethod::new(
&ApiHandler::Sync(&get_subscription),
&ObjectSchema::new("Read subscription info.", &[])
)
);
.get(&API_METHOD_GET_SUBSCRIPTION);

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail};
use ::serde::{Deserialize, Serialize};
use proxmox::api::{api, schema::*};
@ -25,11 +25,24 @@ macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL
// slash is not allowed because it is used as pve API delimiter
// also see "man useradd"
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
#[macro_export]
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
const_regex!{
pub IP_FORMAT_REGEX = IPRE!();
pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$");
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
@ -52,16 +65,28 @@ const_regex!{
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
}
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
pub const IP_V4_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&IP_V4_REGEX);
pub const IP_V6_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&IP_V6_REGEX);
pub const IP_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&IP_FORMAT_REGEX);
ApiStringFormat::Pattern(&IP_REGEX);
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
@ -87,10 +112,39 @@ pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
pub const PASSWORD_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PASSWORD_REGEX);
pub const ACL_PATH_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const CIDR_V4_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&CIDR_V4_REGEX);
pub const CIDR_V6_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&CIDR_V6_REGEX);
pub const CIDR_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&CIDR_REGEX);
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.format(&PASSWORD_FORMAT)
.min_length(5)
.max_length(64)
.schema();
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
"X509 certificate fingerprint (sha256)."
@ -142,6 +196,68 @@ pub const THIRD_DNS_SERVER_SCHEMA: Schema =
.format(&IP_FORMAT)
.schema();
pub const IP_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address.")
.format(&IP_V4_FORMAT)
.max_length(15)
.schema();
pub const IP_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address.")
.format(&IP_V6_FORMAT)
.max_length(39)
.schema();
pub const IP_SCHEMA: Schema =
StringSchema::new("IP (IPv4 or IPv6) address.")
.format(&IP_FORMAT)
.max_length(39)
.schema();
pub const CIDR_V4_SCHEMA: Schema =
StringSchema::new("IPv4 address with netmask (CIDR notation).")
.format(&CIDR_V4_FORMAT)
.max_length(18)
.schema();
pub const CIDR_V6_SCHEMA: Schema =
StringSchema::new("IPv6 address with netmask (CIDR notation).")
.format(&CIDR_V6_FORMAT)
.max_length(43)
.schema();
pub const CIDR_SCHEMA: Schema =
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
.format(&CIDR_FORMAT)
.max_length(43)
.schema();
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
"Access control path.")
.format(&ACL_PATH_FORMAT)
.min_length(1)
.max_length(128)
.schema();
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
"Allow to propagate (inherit) permissions.")
.default(true)
.schema();
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
"Type of 'ugid' property.")
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("user", "User"),
EnumEntry::new("group", "Group")]))
.schema();
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
StringSchema::new("Backup archive name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
@ -149,7 +265,10 @@ pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
pub const BACKUP_TYPE_SCHEMA: Schema =
StringSchema::new("Backup type.")
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("vm", "Virtual Machine Backup"),
EnumEntry::new("ct", "Container Backup"),
EnumEntry::new("host", "Host Backup")]))
.schema();
pub const BACKUP_ID_SCHEMA: Schema =
@ -172,12 +291,38 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
.max_length(32)
.schema();
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run sync job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
.schema();
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
.default(true)
.schema();
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.schema();
@ -202,6 +347,12 @@ pub const PROXMOX_USER_ID_SCHEMA: Schema = StringSchema::new("User ID")
.max_length(64)
.schema();
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
.format(&PROXMOX_GROUP_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
// Complex type definitions
@ -237,6 +388,9 @@ pub struct GroupListItem {
pub backup_count: u64,
/// List of contained archive files.
pub files: Vec<String>,
/// The owner of group
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<String>,
}
#[api(
@ -269,8 +423,65 @@ pub struct SnapshotListItem {
/// Overall snapshot size (sum of all archive sizes).
#[serde(skip_serializing_if="Option::is_none")]
pub size: Option<u64>,
/// The owner of the snapshots group
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<String>,
}
#[api(
properties: {
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
},
"backup-time": {
schema: BACKUP_TIME_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
/// Prune result.
pub struct PruneListItem {
pub backup_type: String, // enum
pub backup_id: String,
pub backup_time: i64,
/// Keep snapshot
pub keep: bool,
}
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new(
"Number of daily backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = IntegerSchema::new(
"Number of hourly backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new(
"Number of backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = IntegerSchema::new(
"Number of monthly backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = IntegerSchema::new(
"Number of weekly backups to keep.")
.minimum(1)
.schema();
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
"Number of yearly backups to keep.")
.minimum(1)
.schema();
#[api(
properties: {
"filename": {
@ -313,6 +524,10 @@ pub struct GarbageCollectionStatus {
pub removed_bytes: u64,
/// Number of removed chunks.
pub removed_chunks: usize,
/// Sum of pending bytes (pending removal - kept for safety).
pub pending_bytes: u64,
/// Number of pending chunks (pending removal - kept for safety).
pub pending_chunks: usize,
}
impl Default for GarbageCollectionStatus {
@ -325,6 +540,8 @@ impl Default for GarbageCollectionStatus {
disk_chunks: 0,
removed_bytes: 0,
removed_chunks: 0,
pending_bytes: 0,
pending_chunks: 0,
}
}
}
@ -373,10 +590,224 @@ pub struct TaskListItem {
pub status: Option<String>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Node Power command type.
pub enum NodePowerCommand {
/// Restart the server
Reboot,
/// Shutdown the server
Shutdown,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Interface configuration method
pub enum NetworkConfigMethod {
/// Configuration is done manually using other tools
Manual,
/// Define interfaces with statically allocated addresses.
Static,
/// Obtain an address via DHCP
DHCP,
/// Define the loopback interface.
Loopback,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
#[repr(u8)]
/// Linux Bond Mode
pub enum LinuxBondMode {
/// Round-robin policy
balance_rr = 0,
/// Active-backup policy
active_backup = 1,
/// XOR policy
balance_xor = 2,
/// Broadcast policy
broadcast = 3,
/// IEEE 802.3ad Dynamic link aggregation
//#[serde(rename = "802.3ad")]
ieee802_3ad = 4,
/// Adaptive transmit load balancing
balance_tlb = 5,
/// Adaptive load balancing
balance_alb = 6,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Network interface type
pub enum NetworkInterfaceType {
/// Loopback
Loopback,
/// Physical Ethernet device
Eth,
/// Linux Bridge
Bridge,
/// Linux Bond
Bond,
/// Linux VLAN (eth.10)
Vlan,
/// Interface Alias (eth:1)
Alias,
/// Unknown interface type
Unknown,
}
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
.format(&NETWORK_INTERFACE_FORMAT)
.min_length(1)
.max_length(libc::IFNAMSIZ-1)
.schema();
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
.schema();
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
"A list of network devices, comma separated.")
.format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
.schema();
#[api(
properties: {
name: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
"type": {
type: NetworkInterfaceType,
},
method: {
type: NetworkConfigMethod,
optional: true,
},
method6: {
type: NetworkConfigMethod,
optional: true,
},
cidr: {
schema: CIDR_V4_SCHEMA,
optional: true,
},
cidr6: {
schema: CIDR_V6_SCHEMA,
optional: true,
},
gateway: {
schema: IP_V4_SCHEMA,
optional: true,
},
gateway6: {
schema: IP_V6_SCHEMA,
optional: true,
},
options: {
description: "Option list (inet)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
options6: {
description: "Option list (inet6)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
comments: {
description: "Comments (inet, may span multiple lines)",
type: String,
optional: true,
},
comments6: {
description: "Comments (inet6, may span multiple lines)",
type: String,
optional: true,
},
bridge_ports: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
slaves: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
bond_mode: {
type: LinuxBondMode,
optional: true,
}
}
)]
#[derive(Debug, Serialize, Deserialize)]
/// Network Interface configuration
pub struct Interface {
/// Autostart interface
#[serde(rename = "autostart")]
pub autostart: bool,
/// Interface is active (UP)
pub active: bool,
/// Interface name
pub name: String,
/// Interface type
#[serde(rename = "type")]
pub interface_type: NetworkInterfaceType,
#[serde(skip_serializing_if="Option::is_none")]
pub method: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if="Option::is_none")]
pub method6: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv4 address with netmask
pub cidr: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv4 gateway
pub gateway: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv6 address with netmask
pub cidr6: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// IPv6 gateway
pub gateway6: Option<String>,
#[serde(skip_serializing_if="Vec::is_empty")]
pub options: Vec<String>,
#[serde(skip_serializing_if="Vec::is_empty")]
pub options6: Vec<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub comments: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub comments6: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
/// Maximum Transmission Unit
pub mtu: Option<u64>,
#[serde(skip_serializing_if="Option::is_none")]
pub bridge_ports: Option<Vec<String>>,
/// Enable bridge vlan support.
#[serde(skip_serializing_if="Option::is_none")]
pub bridge_vlan_aware: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if="Option::is_none")]
pub bond_mode: Option<LinuxBondMode>,
}
// Regression tests
#[test]
fn test_cert_fingerprint_schema() -> Result<(), Error> {
fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
let schema = CERT_FINGERPRINT_SHA256_SCHEMA;
@ -391,7 +822,7 @@ fn test_cert_fingerprint_schema() -> Result<(), Error> {
for fingerprint in invalid_fingerprints.iter() {
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint);
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
}
}
@ -417,7 +848,7 @@ fn test_cert_fingerprint_schema() -> Result<(), Error> {
}
#[test]
fn test_proxmox_user_id_schema() -> Result<(), Error> {
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
let schema = PROXMOX_USER_ID_SCHEMA;
@ -435,7 +866,7 @@ fn test_proxmox_user_id_schema() -> Result<(), Error> {
for name in invalid_user_ids.iter() {
if let Ok(_) = parse_simple_value(name, &schema) {
bail!("test userid '{}' failed - got Ok() while expection an error.", name);
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
}
}
@ -462,3 +893,31 @@ fn test_proxmox_user_id_schema() -> Result<(), Error> {
Ok(())
}
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
pub enum RRDMode {
/// Maximum
Max,
/// Average
Average,
}
#[api()]
#[repr(u64)]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RRDTimeFrameResolution {
/// 1 min => last 70 minutes
Hour = 60,
/// 30 min => last 35 hours
Day = 60*30,
/// 3 hours => about 8 days
Week = 60*180,
/// 12 hours => last 35 days
Month = 60*720,
/// 1 week => last 490 days
Year = 60*10080,
}

View File

@ -1,7 +1,7 @@
use failure::*;
use anyhow::{Error};
use serde_json::{json, Value};
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::api::schema::ObjectSchema;
pub const PROXMOX_PKG_VERSION: &str =
@ -31,6 +31,6 @@ pub const ROUTER: Router = Router::new()
&ApiMethod::new(
&ApiHandler::Sync(&get_version),
&ObjectSchema::new("Proxmox Backup Server API version.", &[])
)
).access(None, &Permission::Anybody)
);

148
src/auth.rs Normal file
View File

@ -0,0 +1,148 @@
//! Proxmox Backup Server Authentication
//!
//! This library contains helper to authenticate users.
use std::process::{Command, Stdio};
use std::io::Write;
use std::ffi::{CString, CStr};
use base64;
use anyhow::{bail, format_err, Error};
use serde_json::json;
pub trait ProxmoxAuthenticator {
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error>;
fn store_password(&self, username: &str, password: &str) -> Result<(), Error>;
}
pub struct PAM();
impl ProxmoxAuthenticator for PAM {
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
auth.get_handler().set_credentials(username, password);
auth.authenticate()?;
return Ok(());
}
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
let mut child = Command::new("passwd")
.arg(username)
.stdin(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.or_else(|err| Err(format_err!("unable to set password for '{}' - execute passwd failed: {}", username, err)))?;
// Note: passwd reads password twice from stdin (for verify)
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
let output = child.wait_with_output()
.or_else(|err| Err(format_err!("unable to set password for '{}' - wait failed: {}", username, err)))?;
if !output.status.success() {
bail!("unable to set password for '{}' - {}", username, String::from_utf8_lossy(&output.stderr));
}
Ok(())
}
}
pub struct PBS();
pub fn crypt(password: &[u8], salt: &str) -> Result<String, Error> {
#[link(name="crypt")]
extern "C" {
#[link_name = "crypt"]
fn __crypt(key: *const libc::c_char, salt: *const libc::c_char) -> * mut libc::c_char;
}
let salt = CString::new(salt)?;
let password = CString::new(password)?;
let res = unsafe {
CStr::from_ptr(
__crypt(
password.as_c_str().as_ptr(),
salt.as_c_str().as_ptr()
)
)
};
Ok(String::from(res.to_str()?))
}
pub fn encrypt_pw(password: &str) -> Result<String, Error> {
let salt = proxmox::sys::linux::random_data(8)?;
let salt = format!("$5${}$", base64::encode_config(&salt, base64::CRYPT));
crypt(password.as_bytes(), &salt)
}
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
let verify = crypt(password.as_bytes(), enc_password)?;
if &verify != enc_password {
bail!("invalid credentials");
}
Ok(())
}
const SHADOW_CONFIG_FILENAME: &str = "/etc/proxmox-backup/shadow.json";
impl ProxmoxAuthenticator for PBS {
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
match data[username].as_str() {
None => bail!("no password set"),
Some(enc_password) => verify_crypt_pw(password, enc_password)?,
}
Ok(())
}
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
let enc_password = encrypt_pw(password)?;
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
data[username] = enc_password.into();
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
let options = proxmox::tools::fs::CreateOptions::new()
.perm(mode)
.owner(nix::unistd::ROOT)
.group(nix::unistd::Gid::from_raw(0));
let data = serde_json::to_vec_pretty(&data)?;
proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options)?;
Ok(())
}
}
pub fn parse_userid(userid: &str) -> Result<(String, String), Error> {
let data: Vec<&str> = userid.rsplitn(2, '@').collect();
if data.len() != 2 {
bail!("userid '{}' has no realm", userid);
}
Ok((data[1].to_owned(), data[0].to_owned()))
}
/// Lookup the autenticator for the specified realm
pub fn lookup_authenticator(realm: &str) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
match realm {
"pam" => Ok(Box::new(PAM())),
"pbs" => Ok(Box::new(PBS())),
_ => bail!("unknown realm '{}'", realm),
}
}
/// Authenticate users
pub fn authenticate_user(userid: &str, password: &str) -> Result<(), Error> {
let (username, realm) = parse_userid(userid)?;
lookup_authenticator(&realm)?
.authenticate_user(&username, password)
}

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
use openssl::rsa::{Rsa};

View File

@ -103,7 +103,7 @@
//!
//! Not sure if this is better. TODO
use failure::*;
use anyhow::{bail, Error};
// Note: .pcat1 => Proxmox Catalog Format version 1
pub const CATALOG_NAME: &str = "catalog.pcat1.didx";

View File

@ -1,6 +1,6 @@
use crate::tools;
use failure::*;
use anyhow::{bail, format_err, Error};
use regex::Regex;
use std::os::unix::io::RawFd;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use std::fmt;
use std::ffi::{CStr, CString, OsStr};
use std::os::unix::ffi::OsStrExt;

View File

@ -7,7 +7,7 @@ use std::os::unix::ffi::OsStrExt;
use std::path::{Component, Path, PathBuf};
use chrono::{Utc, offset::TimeZone};
use failure::*;
use anyhow::{bail, format_err, Error};
use nix::sys::stat::{Mode, SFlag};
use proxmox::api::{cli::*, *};
@ -140,7 +140,9 @@ impl Shell {
continue;
}
};
let _ = handle_command(helper.cmd_def(), "", args, None);
let rpcenv = CliEnvironment::new();
let _ = handle_command(helper.cmd_def(), "", args, rpcenv, None);
self.rl.add_history_entry(line);
self.update_prompt()?;
}

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use std::sync::Arc;
use std::io::Read;

View File

@ -1,7 +1,7 @@
use std::sync::Arc;
use std::io::Write;
use failure::*;
use anyhow::{Error};
use super::CryptConfig;
use crate::tools::borrow::Tied;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use std::path::{Path, PathBuf};
use std::io::Write;
@ -157,8 +157,8 @@ impl ChunkStore {
let (chunk_path, _digest_str) = self.chunk_path(digest);
const UTIME_NOW: i64 = ((1 << 30) - 1);
const UTIME_OMIT: i64 = ((1 << 30) - 2);
const UTIME_NOW: i64 = (1 << 30) - 1;
const UTIME_OMIT: i64 = (1 << 30) - 2;
let times: [libc::timespec; 2] = [
libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
@ -289,9 +289,9 @@ impl ChunkStore {
pub fn sweep_unused_chunks(
&self,
oldest_writer: Option<i64>,
oldest_writer: i64,
status: &mut GarbageCollectionStatus,
worker: Arc<WorkerTask>,
worker: &WorkerTask,
) -> Result<(), Error> {
use nix::sys::stat::fstatat;
@ -299,10 +299,8 @@ impl ChunkStore {
let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
if let Some(stamp) = oldest_writer {
if stamp < min_atime {
min_atime = stamp;
}
if oldest_writer < min_atime {
min_atime = oldest_writer;
}
min_atime -= 300; // add 5 mins gap for safety
@ -316,6 +314,7 @@ impl ChunkStore {
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
}
worker.fail_on_abort()?;
tools::fail_on_shutdown()?;
let (dirfd, entry) = match entry {
@ -338,10 +337,9 @@ impl ChunkStore {
let lock = self.mutex.lock();
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
let age = now - stat.st_atime;
//println!("FOUND {} {:?}", age/(3600*24), filename);
if stat.st_atime < min_atime {
println!("UNLINK {} {:?}", age/(3600*24), filename);
//let age = now - stat.st_atime;
//println!("UNLINK {} {:?}", age/(3600*24), filename);
let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) };
if res != 0 {
let err = nix::Error::last();
@ -354,11 +352,16 @@ impl ChunkStore {
}
status.removed_chunks += 1;
status.removed_bytes += stat.st_size as u64;
} else {
if stat.st_atime < oldest_writer {
status.pending_chunks += 1;
status.pending_bytes += stat.st_size as u64;
} else {
status.disk_chunks += 1;
status.disk_bytes += stat.st_size as u64;
}
}
}
drop(lock);
}

View File

@ -2,7 +2,7 @@ use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::BytesMut;
use failure::*;
use anyhow::{Error};
use futures::ready;
use futures::stream::{Stream, TryStream};

View File

@ -6,7 +6,7 @@
//! See the Wikipedia Artikel for [Authenticated
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
//! for a short introduction.
use failure::*;
use anyhow::{bail, Error};
use openssl::pkcs5::pbkdf2_hmac;
use openssl::hash::MessageDigest;
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, Error};
use std::sync::Arc;
use std::io::{Read, BufRead};

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use std::sync::Arc;
use std::io::Write;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, Error};
use std::convert::TryInto;
use proxmox::tools::io::{ReadExt, WriteExt};
@ -311,7 +311,7 @@ impl DataBlob {
/// Verify digest and data length for unencrypted chunks.
///
/// To do that, we need to decompress data first. Please note that
/// this is noth possible for encrypted chunks.
/// this is not possible for encrypted chunks.
pub fn verify_unencrypted(
&self,
expected_chunk_size: usize,

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, Error};
use std::sync::Arc;
use std::io::{Read, BufReader};
use proxmox::tools::io::ReadExt;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use std::sync::Arc;
use std::io::{Write, Seek, SeekFrom};
use proxmox::tools::io::WriteExt;

View File

@ -1,9 +1,9 @@
use std::collections::{HashSet, HashMap};
use std::io;
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use failure::*;
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
use chrono::{DateTime, Utc};
@ -11,7 +11,7 @@ use super::backup_info::{BackupGroup, BackupDir};
use super::chunk_store::ChunkStore;
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
use super::manifest::{MANIFEST_BLOB_NAME, BackupManifest};
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
use super::index::*;
use super::{DataBlob, ArchiveType, archive_type};
use crate::config::datastore;
@ -149,6 +149,7 @@ impl DataStore {
let mut wanted_files = HashSet::new();
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
@ -236,18 +237,80 @@ impl DataStore {
}
}
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
/// Returns the backup owner.
///
/// The backup owner is the user who first created the backup group.
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> {
let mut full_path = self.base_path();
full_path.push(backup_group.group_path());
full_path.push("owner");
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
Ok(owner.trim_end().to_string()) // remove trailing newline
}
/// Set the backup owner.
pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> {
let mut path = self.base_path();
path.push(backup_group.group_path());
path.push("owner");
let mut open_options = std::fs::OpenOptions::new();
open_options.write(true);
open_options.truncate(true);
if force {
open_options.create(true);
} else {
open_options.create_new(true);
}
let mut file = open_options.open(&path)
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
write!(file, "{}\n", userid)
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
Ok(())
}
/// Create a backup group if it does not already exists.
///
/// And set the owner to 'userid'. If the group already exists, it returns the
/// current owner (instead of setting the owner).
pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> {
// create intermediate path first:
let mut full_path = self.base_path();
full_path.push(backup_dir.group().group_path());
let base_path = self.base_path();
let mut full_path = base_path.clone();
full_path.push(backup_group.backup_type());
std::fs::create_dir_all(&full_path)?;
full_path.push(backup_group.backup_id());
// create the last component now
match std::fs::create_dir(&full_path) {
Ok(_) => {
self.set_owner(backup_group, userid, false)?;
let owner = self.get_owner(backup_group)?; // just to be sure
Ok(owner)
}
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
let owner = self.get_owner(backup_group)?; // just to be sure
Ok(owner)
}
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
}
}
/// Creates a new backup snapshot inside a BackupGroup
///
/// The BackupGroup directory needs to exist.
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
let relative_path = backup_dir.relative_path();
let mut full_path = self.base_path();
full_path.push(&relative_path);
// create the last component now
match std::fs::create_dir(&full_path) {
Ok(_) => Ok((relative_path, true)),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
@ -290,12 +353,14 @@ impl DataStore {
index: I,
file_name: &Path, // only used for error reporting
status: &mut GarbageCollectionStatus,
worker: &WorkerTask,
) -> Result<(), Error> {
status.index_file_count += 1;
status.index_data_bytes += index.index_bytes();
for pos in 0..index.index_count() {
worker.fail_on_abort()?;
tools::fail_on_shutdown()?;
let digest = index.index_digest(pos).unwrap();
if let Err(err) = self.chunk_store.touch_chunk(digest) {
@ -306,21 +371,22 @@ impl DataStore {
Ok(())
}
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus, worker: &WorkerTask) -> Result<(), Error> {
let image_list = self.list_images()?;
for path in image_list {
worker.fail_on_abort()?;
tools::fail_on_shutdown()?;
if let Ok(archive_type) = archive_type(&path) {
if archive_type == ArchiveType::FixedIndex {
let index = self.open_fixed_reader(&path)?;
self.index_mark_used_chunks(index, &path, status)?;
self.index_mark_used_chunks(index, &path, status, worker)?;
} else if archive_type == ArchiveType::DynamicIndex {
let index = self.open_dynamic_reader(&path)?;
self.index_mark_used_chunks(index, &path, status)?;
self.index_mark_used_chunks(index, &path, status, worker)?;
}
}
}
@ -332,26 +398,36 @@ impl DataStore {
self.last_gc_status.lock().unwrap().clone()
}
pub fn garbage_collection(&self, worker: Arc<WorkerTask>) -> Result<(), Error> {
pub fn garbage_collection_running(&self) -> bool {
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
}
pub fn garbage_collection(&self, worker: &WorkerTask) -> Result<(), Error> {
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
let oldest_writer = self.chunk_store.oldest_writer();
let now = unsafe { libc::time(std::ptr::null_mut()) };
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
let mut gc_status = GarbageCollectionStatus::default();
gc_status.upid = Some(worker.to_string());
worker.log("Start GC phase1 (mark used chunks)");
self.mark_used_chunks(&mut gc_status)?;
self.mark_used_chunks(&mut gc_status, &worker)?;
worker.log("Start GC phase2 (sweep unused chunks)");
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, worker.clone())?;
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?;
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
if gc_status.pending_bytes > 0 {
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
}
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
if gc_status.index_data_bytes > 0 {

View File

@ -5,7 +5,7 @@ use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use failure::*;
use anyhow::{bail, format_err, Error};
use proxmox::tools::io::ReadExt;
use proxmox::tools::uuid::Uuid;
@ -275,7 +275,7 @@ struct ChunkCacher<'a, S> {
}
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, failure::Error> {
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, anyhow::Error> {
let (start, end, digest) = self.index.chunk_info(index)?;
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
}

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use std::convert::TryInto;
use std::io::{Seek, SeekFrom};

View File

@ -3,7 +3,7 @@ use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::{Bytes, BytesMut};
use failure::*;
use anyhow::{format_err, Error};
use futures::*;
/// Trait to get digest list from index files

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
use chrono::{Local, TimeZone, DateTime};

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use std::convert::TryFrom;
use std::path::Path;
@ -7,6 +7,7 @@ use serde_json::{json, Value};
use crate::backup::BackupDir;
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
pub struct FileInfo {
pub filename: String,

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use std::sync::Arc;
use super::datastore::*;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
// chacha20-poly1305

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use proxmox::api::{*, cli::*};
@ -49,7 +49,7 @@ fn hello_command(
}
#[api(input: { properties: {} })]
/// Quit command. Exit the programm.
/// Quit command. Exit the program.
///
/// Returns: nothing
fn quit_command() -> Result<(), Error> {
@ -83,7 +83,8 @@ fn main() -> Result<(), Error> {
let args = shellword_split(&line)?;
let _ = handle_command(helper.cmd_def(), "", args, None);
let rpcenv = CliEnvironment::new();
let _ = handle_command(helper.cmd_def(), "", args, rpcenv, None);
rl.add_history_entry(line);
}

View File

@ -1,6 +1,6 @@
use std::io::Write;
use failure::*;
use anyhow::{Error};
use chrono::{DateTime, Utc};

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use proxmox::api::format::*;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use proxmox::api::format::*;
use proxmox::api::cli::*;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use proxmox::api::format::dump_api;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, Error};
use std::thread;
use std::path::PathBuf;
@ -16,7 +16,7 @@ use std::io::Write;
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
// # pxar create test.pxar ./dyntest1/
// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
fn create_large_file(path: PathBuf) {

View File

@ -2,7 +2,7 @@ use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use failure::*;
use anyhow::{Error};
use futures::future::TryFutureExt;
use futures::stream::Stream;
use tokio::net::TcpStream;

View File

@ -2,7 +2,7 @@ use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use failure::*;
use anyhow::{format_err, Error};
use futures::future::TryFutureExt;
use futures::stream::Stream;

View File

@ -1,6 +1,6 @@
use std::sync::Arc;
use failure::*;
use anyhow::{format_err, Error};
use futures::*;
use hyper::{Request, Response, Body};
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use futures::*;
// Simple H2 server to test H2 speed with h2client.rs

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, Error};
use futures::*;
use proxmox::try_block;
@ -34,6 +34,8 @@ async fn run() -> Result<(), Error> {
config::update_self_signed_cert(false)?;
proxmox_backup::rrd::create_rrdb_dir()?;
if let Err(err) = generate_auth_key() {
bail!("unable to generate auth key - {}", err);
}
@ -45,7 +47,7 @@ async fn run() -> Result<(), Error> {
let _ = csrf_secret(); // load with lazy_static
let config = server::ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED);
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
let rest_server = RestServer::new(config);

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{bail, format_err, Error};
use nix::unistd::{fork, ForkResult, pipe};
use std::os::unix::io::RawFd;
use chrono::{Local, DateTime, Utc, TimeZone};
@ -22,11 +22,6 @@ use proxmox_backup::client::*;
use proxmox_backup::backup::*;
use proxmox_backup::pxar::{ self, catalog::* };
//use proxmox_backup::backup::image_index::*;
//use proxmox_backup::config::datastore;
//use proxmox_backup::pxar::encoder::*;
//use proxmox_backup::backup::datastore::*;
use serde_json::{json, Value};
//use hyper::Body;
use std::sync::{Arc, Mutex};
@ -39,20 +34,12 @@ use tokio::sync::mpsc;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
proxmox::const_regex! {
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
}
const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
.format(&BACKUP_REPO_URL)
.max_length(256)
.schema();
const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
"Backup source specification ([<label>:<path>]).")
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema();
const KEYFILE_SCHEMA: Schema = StringSchema::new(
"Path to encryption key. All data will be encrypted using this key.")
.schema();
@ -688,14 +675,6 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
Ok(Value::Null)
}
fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
}
bail!("unable to parse directory specification '{}'", value);
}
fn spawn_catalog_upload(
client: Arc<BackupWriter>,
crypt_config: Option<Arc<CryptConfig>>,
@ -865,12 +844,12 @@ async fn create_backup(
let mut upload_list = vec![];
enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
let mut upload_catalog = false;
for backupspec in backupspec_list {
let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
let filename = &spec.config_string;
let target = &spec.archive_name;
use std::os::unix::fs::FileTypeExt;
@ -878,19 +857,15 @@ async fn create_backup(
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
let file_type = metadata.file_type();
let extension = target.rsplit('.').next()
.ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
match extension {
"pxar" => {
match spec.spec_type {
BackupSpecificationType::PXAR => {
if !file_type.is_dir() {
bail!("got unexpected file type (expected directory)");
}
upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
upload_catalog = true;
}
"img" => {
BackupSpecificationType::IMAGE => {
if !(file_type.is_file() || file_type.is_block_device()) {
bail!("got unexpected file type (expected file or block device)");
}
@ -899,22 +874,19 @@ async fn create_backup(
if size == 0 { bail!("got zero-sized file '{}'", filename); }
upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
}
"conf" => {
BackupSpecificationType::CONFIG => {
if !file_type.is_file() {
bail!("got unexpected file type (expected regular file)");
}
upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
}
"log" => {
BackupSpecificationType::LOGFILE => {
if !file_type.is_file() {
bail!("got unexpected file type (expected regular file)");
}
upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
}
_ => {
bail!("got unknown archive extension '{}'", extension);
upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
}
}
}
@ -967,21 +939,21 @@ async fn create_backup(
for (backup_type, filename, target, size) in upload_list {
match backup_type {
BackupType::CONFIG => {
BackupSpecificationType::CONFIG => {
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
let stats = client
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
.await?;
manifest.add_file(target, stats.size, stats.csum)?;
}
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
let stats = client
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
.await?;
manifest.add_file(target, stats.size, stats.csum)?;
}
BackupType::PXAR => {
BackupSpecificationType::PXAR => {
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
let stats = backup_directory(
@ -1000,7 +972,7 @@ async fn create_backup(
manifest.add_file(target, stats.size, stats.csum)?;
catalog.lock().unwrap().end_directory()?;
}
BackupType::IMAGE => {
BackupSpecificationType::IMAGE => {
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
let stats = backup_image(
&client,
@ -1135,6 +1107,18 @@ fn dump_image<W: Write>(
Ok(())
}
fn parse_archive_type(name: &str) -> (String, ArchiveType) {
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
(name.into(), archive_type(name).unwrap())
} else if name.ends_with(".pxar") {
(format!("{}.didx", name), ArchiveType::DynamicIndex)
} else if name.ends_with(".img") {
(format!("{}.fidx", name), ArchiveType::FixedIndex)
} else {
(format!("{}.blob", name), ArchiveType::Blob)
}
}
#[api(
input: {
properties: {
@ -1207,14 +1191,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
}
};
let server_archive_name = if archive_name.ends_with(".pxar") {
format!("{}.didx", archive_name)
} else if archive_name.ends_with(".img") {
format!("{}.fidx", archive_name)
} else {
format!("{}.blob", archive_name)
};
let client = BackupReader::start(
client,
crypt_config.clone(),
@ -1227,7 +1203,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
let manifest = client.download_manifest().await?;
if server_archive_name == MANIFEST_BLOB_NAME {
let (archive_name, archive_type) = parse_archive_type(archive_name);
if archive_name == MANIFEST_BLOB_NAME {
let backup_index_data = manifest.into_json().to_string();
if let Some(target) = target {
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
@ -1238,9 +1216,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
} else if server_archive_name.ends_with(".blob") {
} else if archive_type == ArchiveType::Blob {
let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
let mut reader = client.download_blob(&manifest, &archive_name).await?;
if let Some(target) = target {
let mut writer = std::fs::OpenOptions::new()
@ -1257,9 +1235,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
} else if server_archive_name.ends_with(".didx") {
} else if archive_type == ArchiveType::DynamicIndex {
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
let index = client.download_dynamic_index(&manifest, &archive_name).await?;
let most_used = index.find_most_used_chunks(8);
@ -1289,9 +1267,9 @@ async fn restore(param: Value) -> Result<Value, Error> {
std::io::copy(&mut reader, &mut writer)
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
} else if server_archive_name.ends_with(".fidx") {
} else if archive_type == ArchiveType::FixedIndex {
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
let index = client.download_fixed_index(&manifest, &archive_name).await?;
let mut writer = if let Some(target) = target {
std::fs::OpenOptions::new()
@ -1308,9 +1286,6 @@ async fn restore(param: Value) -> Result<Value, Error> {
};
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
} else {
bail!("unknown archive file extension (expected .pxar of .img)");
}
Ok(Value::Null)
@ -1390,6 +1365,12 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
("group", false, &StringSchema::new("Backup group.").schema()),
], [
("output-format", true, &OUTPUT_FORMAT),
(
"quiet",
true,
&BooleanSchema::new("Minimal output - only show removals.")
.schema()
),
("repository", true, &REPO_URL_SCHEMA),
])
)
@ -1417,18 +1398,55 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let quiet = param["quiet"].as_bool().unwrap_or(false);
param.as_object_mut().unwrap().remove("repository");
param.as_object_mut().unwrap().remove("group");
param.as_object_mut().unwrap().remove("output-format");
param.as_object_mut().unwrap().remove("quiet");
param["backup-type"] = group.backup_type().into();
param["backup-id"] = group.backup_id().into();
let result = client.post(&path, Some(param)).await?;
let mut result = client.post(&path, Some(param)).await?;
record_repository(&repo);
view_task_result(client, result, &output_format).await?;
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
Ok(match v.as_bool() {
Some(true) => "keep",
Some(false) => "remove",
None => "unknown",
}.to_string())
};
let options = default_table_format_options()
.sortby("backup-type", false)
.sortby("backup-id", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
.column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
.column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
;
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
let mut data = result["data"].take();
if quiet {
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
item["keep"].as_bool() == Some(false)
}).map(|v| v.clone()).collect();
data = list.into();
}
format_and_print_result_full(&mut data, info, &output_format, &options);
Ok(Value::Null)
}
@ -1852,7 +1870,9 @@ fn key_mgmt_cli() -> CliCommandMap {
const KDF_SCHEMA: Schema =
StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
.format(&ApiStringFormat::Enum(&["scrypt", "none"]))
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("scrypt", "SCrypt"),
EnumEntry::new("none", "Do not encrypt the key")]))
.default("scrypt")
.schema();
@ -2007,7 +2027,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
if let Some(pipe) = pipe {
nix::unistd::chdir(Path::new("/")).unwrap();
// Finish creation of deamon by redirecting filedescriptors.
// Finish creation of daemon by redirecting filedescriptors.
let nullfd = nix::fcntl::open(
"/dev/null",
nix::fcntl::OFlag::O_RDWR,
@ -2400,7 +2420,8 @@ fn main() {
.insert("catalog", catalog_mgmt_cli())
.insert("task", task_mgmt_cli());
run_cli_command(cmd_def, Some(|future| {
let rpcenv = CliEnvironment::new();
run_cli_command(cmd_def, rpcenv, Some(|future| {
proxmox_backup::tools::runtime::main(future)
}));
}

View File

@ -1,19 +1,20 @@
use std::path::PathBuf;
use std::collections::HashMap;
use failure::*;
use anyhow::{format_err, Error};
use serde_json::{json, Value};
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox::api::{api, cli::*, RpcEnvironment};
use proxmox_backup::configdir;
use proxmox_backup::tools;
use proxmox_backup::config::{self, remote::{self, Remote}};
use proxmox_backup::config;
use proxmox_backup::api2::{self, types::* };
use proxmox_backup::client::*;
use proxmox_backup::tools::ticket::*;
use proxmox_backup::auth_helpers::*;
mod proxmox_backup_manager;
use proxmox_backup_manager::*;
async fn view_task_result(
client: HttpClient,
result: Value,
@ -51,88 +52,6 @@ fn connect() -> Result<HttpClient, Error> {
Ok(client)
}
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// List configured remotes.
fn list_remotes(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::remote::API_METHOD_LIST_REMOTES;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("name"))
.column(ColumnConfig::new("host"))
.column(ColumnConfig::new("userid"))
.column(ColumnConfig::new("fingerprint"))
.column(ColumnConfig::new("comment"));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
fn remote_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES))
.insert(
"create",
// fixme: howto handle password parameter?
CliCommand::new(&api2::config::remote::API_METHOD_CREATE_REMOTE)
.arg_param(&["name"])
)
.insert(
"update",
CliCommand::new(&api2::config::remote::API_METHOD_UPDATE_REMOTE)
.arg_param(&["name"])
.completion_cb("name", config::remote::complete_remote_name)
)
.insert(
"remove",
CliCommand::new(&api2::config::remote::API_METHOD_DELETE_REMOTE)
.arg_param(&["name"])
.completion_cb("name", config::remote::complete_remote_name)
);
cmd_def.into()
}
fn datastore_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&api2::config::datastore::API_METHOD_LIST_DATASTORES))
.insert("create",
CliCommand::new(&api2::config::datastore::API_METHOD_CREATE_DATASTORE)
.arg_param(&["name", "path"])
)
.insert("update",
CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE)
.arg_param(&["name"])
.completion_cb("name", config::datastore::complete_datastore_name)
)
.insert("remove",
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
.arg_param(&["name"])
.completion_cb("name", config::datastore::complete_datastore_name)
);
cmd_def.into()
}
#[api(
input: {
properties: {
@ -328,97 +247,6 @@ fn task_mgmt_cli() -> CommandLineInterface {
cmd_def.into()
}
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
let mut parts = Vec::new();
for entry in name.entries() {
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
}
Ok(parts.join(", "))
}
#[api]
/// Diplay node certificate information.
fn cert_info() -> Result<(), Error> {
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
if let Some(san) = cert.subject_alt_names() {
for name in san.iter() {
if let Some(v) = name.dnsname() {
println!(" DNS:{}", v);
} else if let Some(v) = name.ipaddress() {
println!(" IP:{:?}", v);
} else if let Some(v) = name.email() {
println!(" EMAIL:{}", v);
} else if let Some(v) = name.uri() {
println!(" URI:{}", v);
}
}
}
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
println!("Validity:");
println!(" Not Before: {}", cert.not_before());
println!(" Not After : {}", cert.not_after());
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
let fp_string = proxmox::tools::digest_to_hex(&fp);
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
.collect::<Vec<&str>>().join(":");
println!("Fingerprint (sha256): {}", fp_string);
let pubkey = cert.public_key()?;
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
println!("Public key bits: {}", pubkey.bits());
Ok(())
}
#[api(
input: {
properties: {
force: {
description: "Force generation of new SSL certifate.",
type: Boolean,
optional:true,
},
}
},
)]
/// Update node certificates and generate all needed files/directories.
fn update_certs(force: Option<bool>) -> Result<(), Error> {
config::create_configdir()?;
if let Err(err) = generate_auth_key() {
bail!("unable to generate auth key - {}", err);
}
if let Err(err) = generate_csrf_key() {
bail!("unable to generate csrf key - {}", err);
}
config::update_self_signed_cert(force.unwrap_or(false))?;
Ok(())
}
fn cert_mgmt_cli() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("info", CliCommand::new(&API_METHOD_CERT_INFO))
.insert("update", CliCommand::new(&API_METHOD_UPDATE_CERTS));
cmd_def.into()
}
// fixme: avoid API redefinition
#[api(
input: {
@ -432,11 +260,9 @@ fn cert_mgmt_cli() -> CommandLineInterface {
"remote-store": {
schema: DATASTORE_SCHEMA,
},
delete: {
description: "Delete vanished backups. This remove the local copy if the remote backup was deleted.",
type: Boolean,
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
default: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
@ -450,7 +276,7 @@ async fn pull_datastore(
remote: String,
remote_store: String,
local_store: String,
delete: Option<bool>,
remove_vanished: Option<bool>,
param: Value,
) -> Result<Value, Error> {
@ -464,8 +290,8 @@ async fn pull_datastore(
"remote-store": remote_store,
});
if let Some(delete) = delete {
args["delete"] = delete.into();
if let Some(remove_vanished) = remove_vanished {
args["remove-vanished"] = Value::from(remove_vanished);
}
let result = client.post("api2/json/pull", Some(args)).await?;
@ -478,10 +304,15 @@ async fn pull_datastore(
fn main() {
let cmd_def = CliCommandMap::new()
.insert("acl", acl_commands())
.insert("datastore", datastore_commands())
.insert("dns", dns_commands())
.insert("network", network_commands())
.insert("user", user_commands())
.insert("remote", remote_commands())
.insert("garbage-collection", garbage_collection_commands())
.insert("cert", cert_mgmt_cli())
.insert("sync-job", sync_job_commands())
.insert("task", task_mgmt_cli())
.insert(
"pull",
@ -492,7 +323,10 @@ fn main() {
.completion_cb("remote-store", complete_remote_datastore_name)
);
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def));
let mut rpcenv = CliEnvironment::new();
rpcenv.set_user(Some(String::from("root@pam")));
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
}
// shell completion helper
@ -502,9 +336,9 @@ pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String
let _ = proxmox::try_block!({
let remote = param.get("remote").ok_or_else(|| format_err!("no remote"))?;
let (remote_config, _digest) = remote::config()?;
let (remote_config, _digest) = config::remote::config()?;
let remote: Remote = remote_config.lookup("remote", &remote)?;
let remote: config::remote::Remote = remote_config.lookup("remote", &remote)?;
let options = HttpClientOptions::new()
.password(Some(remote.password.clone()))

View File

@ -1,6 +1,7 @@
use std::sync::Arc;
use std::path::Path;
use failure::*;
use anyhow::{bail, format_err, Error};
use futures::*;
use hyper;
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
@ -14,6 +15,7 @@ use proxmox_backup::server;
use proxmox_backup::tools::daemon;
use proxmox_backup::server::{ApiConfig, rest::*};
use proxmox_backup::auth_helpers::*;
use proxmox_backup::tools::disks::{ DiskManage, zfs::zfs_pool_stats };
fn main() {
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
@ -34,7 +36,7 @@ async fn run() -> Result<(), Error> {
let _ = csrf_secret(); // load with lazy_static
let mut config = ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC);
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
// add default dirs which includes jquery and bootstrap
// my $base = '/usr/share/libpve-http-server-perl';
@ -107,6 +109,9 @@ async fn run() -> Result<(), Error> {
bail!("unable to start daemon - {}", err);
}
start_task_scheduler();
start_stat_generator();
server.await?;
log::info!("server shutting down, waiting for active workers to complete");
proxmox_backup::server::last_worker_future().await?;
@ -114,3 +119,648 @@ async fn run() -> Result<(), Error> {
Ok(())
}
fn start_stat_generator() {
let abort_future = server::shutdown_future();
let future = Box::pin(run_stat_generator());
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
}
fn start_task_scheduler() {
let abort_future = server::shutdown_future();
let future = Box::pin(run_task_scheduler());
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
}
use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
fn next_minute() -> Result<Instant, Error> {
let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
Ok(Instant::now() + epoch_next - epoch_now)
}
async fn run_task_scheduler() {
let mut count: usize = 0;
loop {
count += 1;
let delay_target = match next_minute() { // try to run very minute
Ok(d) => d,
Err(err) => {
eprintln!("task scheduler: compute next minute failed - {}", err);
tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
continue;
}
};
if count > 2 { // wait 1..2 minutes before starting
match schedule_tasks().catch_unwind().await {
Err(panic) => {
match panic.downcast::<&str>() {
Ok(msg) => {
eprintln!("task scheduler panic: {}", msg);
}
Err(_) => {
eprintln!("task scheduler panic - unknown type");
}
}
}
Ok(Err(err)) => {
eprintln!("task scheduler failed - {:?}", err);
}
Ok(Ok(_)) => {}
}
}
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
}
}
async fn schedule_tasks() -> Result<(), Error> {
schedule_datastore_garbage_collection().await;
schedule_datastore_prune().await;
schedule_datastore_sync_jobs().await;
Ok(())
}
fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
let list = proxmox_backup::server::read_task_list()?;
let mut last: Option<&server::UPID> = None;
for entry in list.iter() {
if entry.upid.worker_type == worker_type {
if let Some(ref id) = entry.upid.worker_id {
if id == worker_id {
match last {
Some(ref upid) => {
if upid.starttime < entry.upid.starttime {
last = Some(&entry.upid)
}
}
None => {
last = Some(&entry.upid)
}
}
}
}
}
}
Ok(last.cloned())
}
async fn schedule_datastore_garbage_collection() {
use proxmox_backup::backup::DataStore;
use proxmox_backup::server::{UPID, WorkerTask};
use proxmox_backup::config::datastore::{self, DataStoreConfig};
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
let config = match datastore::config() {
Err(err) => {
eprintln!("unable to read datastore config - {}", err);
return;
}
Ok((config, _digest)) => config,
};
for (store, (_, store_config)) in config.sections {
let datastore = match DataStore::lookup_datastore(&store) {
Ok(datastore) => datastore,
Err(err) => {
eprintln!("lookup_datastore failed - {}", err);
continue;
}
};
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c,
Err(err) => {
eprintln!("datastore config from_value failed - {}", err);
continue;
}
};
let event_str = match store_config.gc_schedule {
Some(event_str) => event_str,
None => continue,
};
let event = match parse_calendar_event(&event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
continue;
}
};
if datastore.garbage_collection_running() { continue; }
let worker_type = "garbage_collection";
let stat = datastore.last_gc_status();
let last = if let Some(upid_str) = stat.upid {
match upid_str.parse::<UPID>() {
Ok(upid) => upid.starttime,
Err(err) => {
eprintln!("unable to parse upid '{}' - {}", upid_str, err);
continue;
}
}
} else {
match lookup_last_worker(worker_type, &store) {
Ok(Some(upid)) => upid.starttime,
Ok(None) => 0,
Err(err) => {
eprintln!("lookup_last_job_start failed: {}", err);
continue;
}
}
};
let next = match compute_next_event(&event, last, false) {
Ok(next) => next,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(epoch_now) => epoch_now.as_secs() as i64,
Err(err) => {
eprintln!("query system time failed - {}", err);
continue;
}
};
if next > now { continue; }
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
"backup@pam",
false,
move |worker| {
worker.log(format!("starting garbage collection on store {}", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
datastore.garbage_collection(&worker)
}
) {
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
}
}
}
async fn schedule_datastore_prune() {
use proxmox_backup::backup::{
PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
use proxmox_backup::server::{WorkerTask};
use proxmox_backup::config::datastore::{self, DataStoreConfig};
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
let config = match datastore::config() {
Err(err) => {
eprintln!("unable to read datastore config - {}", err);
return;
}
Ok((config, _digest)) => config,
};
for (store, (_, store_config)) in config.sections {
let datastore = match DataStore::lookup_datastore(&store) {
Ok(datastore) => datastore,
Err(err) => {
eprintln!("lookup_datastore '{}' failed - {}", store, err);
continue;
}
};
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c,
Err(err) => {
eprintln!("datastore '{}' config from_value failed - {}", store, err);
continue;
}
};
let event_str = match store_config.prune_schedule {
Some(event_str) => event_str,
None => continue,
};
let prune_options = PruneOptions {
keep_last: store_config.keep_last,
keep_hourly: store_config.keep_hourly,
keep_daily: store_config.keep_daily,
keep_weekly: store_config.keep_weekly,
keep_monthly: store_config.keep_monthly,
keep_yearly: store_config.keep_yearly,
};
if !prune_options.keeps_something() { // no prune settings - keep all
continue;
}
let event = match parse_calendar_event(&event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
continue;
}
};
let worker_type = "prune";
let last = match lookup_last_worker(worker_type, &store) {
Ok(Some(upid)) => {
if proxmox_backup::server::worker_is_active_local(&upid) {
continue;
}
upid.starttime
}
Ok(None) => 0,
Err(err) => {
eprintln!("lookup_last_job_start failed: {}", err);
continue;
}
};
let next = match compute_next_event(&event, last, false) {
Ok(next) => next,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(epoch_now) => epoch_now.as_secs() as i64,
Err(err) => {
eprintln!("query system time failed - {}", err);
continue;
}
};
if next > now { continue; }
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
"backup@pam",
false,
move |worker| {
worker.log(format!("Starting datastore prune on store \"{}\"", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
let base_path = datastore.base_path();
let groups = BackupGroup::list_groups(&base_path)?;
for group in groups {
let list = group.list_backups(&base_path)?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
prune_info.reverse(); // delete older snapshots first
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, group.backup_type(), group.backup_id()));
for (info, keep) in prune_info {
worker.log(format!(
"{} {}/{}/{}",
if keep { "keep" } else { "remove" },
group.backup_type(), group.backup_id(),
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
if !keep {
datastore.remove_backup_dir(&info.backup_dir)?;
}
}
}
Ok(())
}
) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
}
}
}
async fn schedule_datastore_sync_jobs() {
use proxmox_backup::{
backup::DataStore,
client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
server::{ WorkerTask },
config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
tools::systemd::time::{ parse_calendar_event, compute_next_event },
};
let config = match sync::config() {
Err(err) => {
eprintln!("unable to read sync job config - {}", err);
return;
}
Ok((config, _digest)) => config,
};
let remote_config = match remote::config() {
Err(err) => {
eprintln!("unable to read remote config - {}", err);
return;
}
Ok((config, _digest)) => config,
};
for (job_id, (_, job_config)) in config.sections {
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c,
Err(err) => {
eprintln!("sync job config from_value failed - {}", err);
continue;
}
};
let event_str = match job_config.schedule {
Some(ref event_str) => event_str.clone(),
None => continue,
};
let event = match parse_calendar_event(&event_str) {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
continue;
}
};
let worker_type = "syncjob";
let last = match lookup_last_worker(worker_type, &job_id) {
Ok(Some(upid)) => {
if proxmox_backup::server::worker_is_active_local(&upid) {
continue;
}
upid.starttime
},
Ok(None) => 0,
Err(err) => {
eprintln!("lookup_last_job_start failed: {}", err);
continue;
}
};
let next = match compute_next_event(&event, last, false) {
Ok(next) => next,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(epoch_now) => epoch_now.as_secs() as i64,
Err(err) => {
eprintln!("query system time failed - {}", err);
continue;
}
};
if next > now { continue; }
let job_id2 = job_id.clone();
let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
Ok(datastore) => datastore,
Err(err) => {
eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
continue;
}
};
let remote: Remote = match remote_config.lookup("remote", &job_config.remote) {
Ok(remote) => remote,
Err(err) => {
eprintln!("remote_config lookup failed: {}", err);
continue;
}
};
let username = String::from("backup@pam");
let delete = job_config.remove_vanished.unwrap_or(true);
if let Err(err) = WorkerTask::spawn(
worker_type,
Some(job_id.clone()),
&username.clone(),
false,
move |worker| async move {
worker.log(format!("Starting datastore sync job '{}'", job_id));
worker.log(format!("task triggered by schedule '{}'", event_str));
worker.log(format!("Sync datastore '{}' from '{}/{}'",
job_config.store, job_config.remote, job_config.remote_store));
let options = HttpClientOptions::new()
.password(Some(remote.password.clone()))
.fingerprint(remote.fingerprint.clone());
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
let _auth_info = client.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
Ok(())
}
) {
eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
}
}
}
async fn run_stat_generator() {
let mut count = 0;
loop {
count += 1;
let save = if count > 6 { count = 0; true } else { false };
let delay_target = Instant::now() + Duration::from_secs(10);
generate_host_stats(save).await;
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
}
}
fn rrd_update_gauge(name: &str, value: f64, save: bool) {
use proxmox_backup::rrd;
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
eprintln!("rrd::update_value '{}' failed - {}", name, err);
}
}
fn rrd_update_derive(name: &str, value: f64, save: bool) {
use proxmox_backup::rrd;
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
eprintln!("rrd::update_value '{}' failed - {}", name, err);
}
}
async fn generate_host_stats(save: bool) {
use proxmox::sys::linux::procfs::{
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
use proxmox_backup::config::datastore;
proxmox_backup::tools::runtime::block_in_place(move || {
match read_proc_stat() {
Ok(stat) => {
rrd_update_gauge("host/cpu", stat.cpu, save);
rrd_update_gauge("host/iowait", stat.iowait_percent, save);
}
Err(err) => {
eprintln!("read_proc_stat failed - {}", err);
}
}
match read_meminfo() {
Ok(meminfo) => {
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
rrd_update_gauge("host/memused", meminfo.memused as f64, save);
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
}
Err(err) => {
eprintln!("read_meminfo failed - {}", err);
}
}
match read_proc_net_dev() {
Ok(netdev) => {
use proxmox_backup::config::network::is_physical_nic;
let mut netin = 0;
let mut netout = 0;
for item in netdev {
if !is_physical_nic(&item.device) { continue; }
netin += item.receive;
netout += item.send;
}
rrd_update_derive("host/netin", netin as f64, save);
rrd_update_derive("host/netout", netout as f64, save);
}
Err(err) => {
eprintln!("read_prox_net_dev failed - {}", err);
}
}
match read_loadavg() {
Ok(loadavg) => {
rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
}
Err(err) => {
eprintln!("read_loadavg failed - {}", err);
}
}
let disk_manager = DiskManage::new();
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
match datastore::config() {
Ok((config, _)) => {
let datastore_list: Vec<datastore::DataStoreConfig> =
config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
for config in datastore_list {
let rrd_prefix = format!("datastore/{}", config.name);
let path = std::path::Path::new(&config.path);
gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
}
}
Err(err) => {
eprintln!("read datastore config failed - {}", err);
}
}
});
}
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
match proxmox_backup::tools::disks::disk_usage(path) {
Ok((total, used, _avail)) => {
let rrd_key = format!("{}/total", rrd_prefix);
rrd_update_gauge(&rrd_key, total as f64, save);
let rrd_key = format!("{}/used", rrd_prefix);
rrd_update_gauge(&rrd_key, used as f64, save);
}
Err(err) => {
eprintln!("read disk_usage on {:?} failed - {}", path, err);
}
}
match disk_manager.find_mounted_device(path) {
Ok(None) => {},
Ok(Some((fs_type, device, source))) => {
let mut device_stat = None;
match fs_type.as_str() {
"zfs" => {
if let Some(pool) = source {
match zfs_pool_stats(&pool) {
Ok(stat) => device_stat = stat,
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
}
}
}
_ => {
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
match disk.read_stat() {
Ok(stat) => device_stat = stat,
Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
}
}
}
}
if let Some(stat) = device_stat {
let rrd_key = format!("{}/read_ios", rrd_prefix);
rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
let rrd_key = format!("{}/read_bytes", rrd_prefix);
rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
let rrd_key = format!("{}/write_ios", rrd_prefix);
rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
let rrd_key = format!("{}/write_bytes", rrd_prefix);
rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
let rrd_key = format!("{}/io_ticks", rrd_prefix);
rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
}
}
Err(err) => {
eprintln!("find_mounted_device failed - {}", err);
}
}
}

View File

@ -0,0 +1,69 @@
use anyhow::{bail, Error};
use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::config;
use proxmox_backup::api2;
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Access Control list.
fn list_acls(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::access::acl::API_METHOD_READ_ACL;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
fn render_ugid(value: &Value, record: &Value) -> Result<String, Error> {
if value.is_null() { return Ok(String::new()); }
let ugid = value.as_str().unwrap();
let ugid_type = record["ugid_type"].as_str().unwrap();
if ugid_type == "user" {
Ok(ugid.to_string())
} else if ugid_type == "group" {
Ok(format!("@{}", ugid))
} else {
bail!("render_ugid: got unknown ugid_type");
}
}
let options = default_table_format_options()
.column(ColumnConfig::new("ugid").renderer(render_ugid))
.column(ColumnConfig::new("path"))
.column(ColumnConfig::new("propagate"))
.column(ColumnConfig::new("roleid"));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn acl_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&&API_METHOD_LIST_ACLS))
.insert(
"update",
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
.arg_param(&["path", "role"])
.completion_cb("userid", config::user::complete_user_name)
.completion_cb("path", config::datastore::complete_acl_path)
);
cmd_def.into()
}

View File

@ -0,0 +1,100 @@
use std::path::PathBuf;
use anyhow::{bail, Error};
use proxmox::api::{api, cli::*};
use proxmox_backup::config;
use proxmox_backup::configdir;
use proxmox_backup::auth_helpers::*;
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
let mut parts = Vec::new();
for entry in name.entries() {
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
}
Ok(parts.join(", "))
}
#[api]
/// Display node certificate information.
fn cert_info() -> Result<(), Error> {
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
if let Some(san) = cert.subject_alt_names() {
for name in san.iter() {
if let Some(v) = name.dnsname() {
println!(" DNS:{}", v);
} else if let Some(v) = name.ipaddress() {
println!(" IP:{:?}", v);
} else if let Some(v) = name.email() {
println!(" EMAIL:{}", v);
} else if let Some(v) = name.uri() {
println!(" URI:{}", v);
}
}
}
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
println!("Validity:");
println!(" Not Before: {}", cert.not_before());
println!(" Not After : {}", cert.not_after());
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
let fp_string = proxmox::tools::digest_to_hex(&fp);
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
.collect::<Vec<&str>>().join(":");
println!("Fingerprint (sha256): {}", fp_string);
let pubkey = cert.public_key()?;
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
println!("Public key bits: {}", pubkey.bits());
Ok(())
}
#[api(
input: {
properties: {
force: {
description: "Force generation of new SSL certifate.",
type: Boolean,
optional:true,
},
}
},
)]
/// Update node certificates and generate all needed files/directories.
fn update_certs(force: Option<bool>) -> Result<(), Error> {
config::create_configdir()?;
if let Err(err) = generate_auth_key() {
bail!("unable to generate auth key - {}", err);
}
if let Err(err) = generate_csrf_key() {
bail!("unable to generate csrf key - {}", err);
}
config::update_self_signed_cert(force.unwrap_or(false))?;
Ok(())
}
pub fn cert_mgmt_cli() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("info", CliCommand::new(&API_METHOD_CERT_INFO))
.insert("update", CliCommand::new(&API_METHOD_UPDATE_CERTS));
cmd_def.into()
}

View File

@ -0,0 +1,97 @@
use anyhow::Error;
use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::config;
use proxmox_backup::api2::{self, types::* };
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Datastore list.
fn list_datastores(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::datastore::API_METHOD_LIST_DATASTORES;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("name"))
.column(ColumnConfig::new("path"))
.column(ColumnConfig::new("comment"));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
#[api(
input: {
properties: {
name: {
schema: DATASTORE_SCHEMA,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Show datastore configuration
fn show_datastore(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::datastore::API_METHOD_READ_DATASTORE;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options();
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn datastore_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES))
.insert("show",
CliCommand::new(&API_METHOD_SHOW_DATASTORE)
.arg_param(&["name"])
.completion_cb("name", config::datastore::complete_datastore_name)
)
.insert("create",
CliCommand::new(&api2::config::datastore::API_METHOD_CREATE_DATASTORE)
.arg_param(&["name", "path"])
)
.insert("update",
CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE)
.arg_param(&["name"])
.completion_cb("name", config::datastore::complete_datastore_name)
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
)
.insert("remove",
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
.arg_param(&["name"])
.completion_cb("name", config::datastore::complete_datastore_name)
);
cmd_def.into()
}

View File

@ -0,0 +1,57 @@
use anyhow::Error;
use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::api2;
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Read DNS settings
fn get_dns(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
param["node"] = "localhost".into();
let info = &api2::node::dns::API_METHOD_GET_DNS;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("search"))
.column(ColumnConfig::new("dns1"))
.column(ColumnConfig::new("dns2"))
.column(ColumnConfig::new("dns3"));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn dns_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert(
"get",
CliCommand::new(&API_METHOD_GET_DNS)
)
.insert(
"set",
CliCommand::new(&api2::node::dns::API_METHOD_UPDATE_DNS)
.fixed_param("node", String::from("localhost"))
);
cmd_def.into()
}

View File

@ -0,0 +1,16 @@
mod acl;
pub use acl::*;
mod cert;
pub use cert::*;
mod datastore;
pub use datastore::*;
mod dns;
pub use dns::*;
mod network;
pub use network::*;
mod remote;
pub use remote::*;
mod sync;
pub use sync::*;
mod user;
pub use user::*;

View File

@ -0,0 +1,162 @@
use anyhow::Error;
use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::config;
use proxmox_backup::api2;
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Network device list.
fn list_network_devices(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
param["node"] = "localhost".into();
let info = &api2::node::network::API_METHOD_LIST_NETWORK_DEVICES;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
if let Value::String(ref diff) = rpcenv["changes"] {
if output_format == "text" {
eprintln!("pending changes:\n{}\n", diff);
}
}
fn render_address(_value: &Value, record: &Value) -> Result<String, Error> {
let mut text = String::new();
if let Some(cidr) = record["cidr"].as_str() {
text.push_str(cidr);
}
if let Some(cidr) = record["cidr6"].as_str() {
if !text.is_empty() { text.push('\n'); }
text.push_str(cidr);
}
Ok(text)
}
fn render_ports(_value: &Value, record: &Value) -> Result<String, Error> {
let mut text = String::new();
if let Some(ports) = record["bridge_ports"].as_array() {
let list: Vec<&str> = ports.iter().filter_map(|v| v.as_str()).collect();
text.push_str(&list.join(" "));
}
if let Some(slaves) = record["slaves"].as_array() {
let list: Vec<&str> = slaves.iter().filter_map(|v| v.as_str()).collect();
text.push_str(&list.join(" "));
}
Ok(text)
}
fn render_gateway(_value: &Value, record: &Value) -> Result<String, Error> {
let mut text = String::new();
if let Some(gateway) = record["gateway"].as_str() {
text.push_str(gateway);
}
if let Some(gateway) = record["gateway6"].as_str() {
if !text.is_empty() { text.push('\n'); }
text.push_str(gateway);
}
Ok(text)
}
let options = default_table_format_options()
.column(ColumnConfig::new("name"))
.column(ColumnConfig::new("type").header("type"))
.column(ColumnConfig::new("autostart"))
.column(ColumnConfig::new("method"))
.column(ColumnConfig::new("method6"))
.column(ColumnConfig::new("cidr").header("address").renderer(render_address))
.column(ColumnConfig::new("gateway").header("gateway").renderer(render_gateway))
.column(ColumnConfig::new("bridge_ports").header("ports/slaves").renderer(render_ports));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
#[api()]
/// Show pending configuration changes (diff)
fn pending_network_changes(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
param["node"] = "localhost".into();
let info = &api2::node::network::API_METHOD_LIST_NETWORK_DEVICES;
let _data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
if let Value::String(ref diff) = rpcenv["changes"] {
println!("{}", diff);
}
Ok(Value::Null)
}
pub fn network_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert(
"list",
CliCommand::new(&API_METHOD_LIST_NETWORK_DEVICES)
)
.insert(
"changes",
CliCommand::new(&API_METHOD_PENDING_NETWORK_CHANGES)
)
.insert(
"create",
CliCommand::new(&api2::node::network::API_METHOD_CREATE_INTERFACE)
.fixed_param("node", String::from("localhost"))
.arg_param(&["iface"])
.completion_cb("iface", config::network::complete_interface_name)
.completion_cb("bridge_ports", config::network::complete_port_list)
.completion_cb("slaves", config::network::complete_port_list)
)
.insert(
"update",
CliCommand::new(&api2::node::network::API_METHOD_UPDATE_INTERFACE)
.fixed_param("node", String::from("localhost"))
.arg_param(&["iface"])
.completion_cb("iface", config::network::complete_interface_name)
.completion_cb("bridge_ports", config::network::complete_port_list)
.completion_cb("slaves", config::network::complete_port_list)
)
.insert(
"remove",
CliCommand::new(&api2::node::network::API_METHOD_DELETE_INTERFACE)
.fixed_param("node", String::from("localhost"))
.arg_param(&["iface"])
.completion_cb("iface", config::network::complete_interface_name)
)
.insert(
"revert",
CliCommand::new(&api2::node::network::API_METHOD_REVERT_NETWORK_CONFIG)
.fixed_param("node", String::from("localhost"))
)
.insert(
"reload",
CliCommand::new(&api2::node::network::API_METHOD_RELOAD_NETWORK_CONFIG)
.fixed_param("node", String::from("localhost"))
);
cmd_def.into()
}

View File

@ -0,0 +1,102 @@
use anyhow::Error;
use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::config;
use proxmox_backup::api2::{self, types::* };
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// List configured remotes.
fn list_remotes(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::remote::API_METHOD_LIST_REMOTES;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("name"))
.column(ColumnConfig::new("host"))
.column(ColumnConfig::new("userid"))
.column(ColumnConfig::new("fingerprint"))
.column(ColumnConfig::new("comment"));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
#[api(
input: {
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Show remote configuration
fn show_remote(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::remote::API_METHOD_READ_REMOTE;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options();
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn remote_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES))
.insert(
"show",
CliCommand::new(&API_METHOD_SHOW_REMOTE)
.arg_param(&["name"])
.completion_cb("name", config::remote::complete_remote_name)
)
.insert(
"create",
// fixme: howto handle password parameter?
CliCommand::new(&api2::config::remote::API_METHOD_CREATE_REMOTE)
.arg_param(&["name"])
)
.insert(
"update",
CliCommand::new(&api2::config::remote::API_METHOD_UPDATE_REMOTE)
.arg_param(&["name"])
.completion_cb("name", config::remote::complete_remote_name)
)
.insert(
"remove",
CliCommand::new(&api2::config::remote::API_METHOD_DELETE_REMOTE)
.arg_param(&["name"])
.completion_cb("name", config::remote::complete_remote_name)
);
cmd_def.into()
}

View File

@ -0,0 +1,106 @@
use anyhow::Error;
use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::config;
use proxmox_backup::api2::{self, types::* };
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Sync job list.
fn list_sync_jobs(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::sync::API_METHOD_LIST_SYNC_JOBS;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("id"))
.column(ColumnConfig::new("store"))
.column(ColumnConfig::new("remote"))
.column(ColumnConfig::new("remote-store"))
.column(ColumnConfig::new("schedule"))
.column(ColumnConfig::new("comment"));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// Show sync job configuration
fn show_sync_job(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::config::sync::API_METHOD_READ_SYNC_JOB;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options();
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn sync_job_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&API_METHOD_LIST_SYNC_JOBS))
.insert("show",
CliCommand::new(&API_METHOD_SHOW_SYNC_JOB)
.arg_param(&["id"])
.completion_cb("id", config::sync::complete_sync_job_id)
)
.insert("create",
CliCommand::new(&api2::config::sync::API_METHOD_CREATE_SYNC_JOB)
.arg_param(&["id"])
.completion_cb("id", config::sync::complete_sync_job_id)
.completion_cb("schedule", config::datastore::complete_calendar_event)
.completion_cb("store", config::datastore::complete_datastore_name)
.completion_cb("remote", config::remote::complete_remote_name)
.completion_cb("remote-store", crate::complete_remote_datastore_name)
)
.insert("update",
CliCommand::new(&api2::config::sync::API_METHOD_UPDATE_SYNC_JOB)
.arg_param(&["id"])
.completion_cb("id", config::sync::complete_sync_job_id)
.completion_cb("schedule", config::datastore::complete_calendar_event)
.completion_cb("store", config::datastore::complete_datastore_name)
.completion_cb("remote-store", crate::complete_remote_datastore_name)
)
.insert("remove",
CliCommand::new(&api2::config::sync::API_METHOD_DELETE_SYNC_JOB)
.arg_param(&["id"])
.completion_cb("id", config::sync::complete_sync_job_id)
);
cmd_def.into()
}

View File

@ -0,0 +1,75 @@
use anyhow::Error;
use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use proxmox_backup::config;
use proxmox_backup::tools;
use proxmox_backup::api2;
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
}
}
)]
/// List configured users.
fn list_users(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::access::user::API_METHOD_LIST_USERS;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("userid"))
.column(
ColumnConfig::new("enable")
.renderer(tools::format::render_bool_with_default_true)
)
.column(
ColumnConfig::new("expire")
.renderer(tools::format::render_epoch)
)
.column(ColumnConfig::new("firstname"))
.column(ColumnConfig::new("lastname"))
.column(ColumnConfig::new("email"))
.column(ColumnConfig::new("comment"));
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn user_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&&API_METHOD_LIST_USERS))
.insert(
"create",
// fixme: howto handle password parameter?
CliCommand::new(&api2::access::user::API_METHOD_CREATE_USER)
.arg_param(&["userid"])
)
.insert(
"update",
CliCommand::new(&api2::access::user::API_METHOD_UPDATE_USER)
.arg_param(&["userid"])
.completion_cb("userid", config::user::complete_user_name)
)
.insert(
"remove",
CliCommand::new(&api2::access::user::API_METHOD_DELETE_USER)
.arg_param(&["userid"])
.completion_cb("userid", config::user::complete_user_name)
);
cmd_def.into()
}

View File

@ -1,6 +1,6 @@
extern crate proxmox_backup;
use failure::*;
use anyhow::{format_err, Error};
use proxmox::{sortable, identity};
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
@ -499,12 +499,12 @@ fn main() {
let cmd_def = CliCommandMap::new()
.insert("create", CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
.arg_param(&["archive", "source", "exclude"])
.arg_param(&["archive", "source"])
.completion_cb("archive", tools::complete_file_name)
.completion_cb("source", tools::complete_file_name)
)
.insert("extract", CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
.arg_param(&["archive", "pattern"])
.arg_param(&["archive", "target"])
.completion_cb("archive", tools::complete_file_name)
.completion_cb("target", tools::complete_file_name)
.completion_cb("files-from", tools::complete_file_name)
@ -519,5 +519,6 @@ fn main() {
.completion_cb("archive", tools::complete_file_name)
);
run_cli_command(cmd_def, None);
let rpcenv = CliEnvironment::new();
run_cli_command(cmd_def, rpcenv, None);
}

View File

@ -2,7 +2,7 @@ extern crate proxmox_backup;
// also see https://www.johndcook.com/blog/standard_deviation/
use failure::*;
use anyhow::{Error};
use std::io::{Read, Write};
use proxmox_backup::backup::*;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use futures::*;
extern crate proxmox_backup;

View File

@ -1,4 +1,4 @@
use failure::*;
use anyhow::{Error};
use proxmox_backup::client::*;

View File

@ -29,3 +29,8 @@ pub use pxar_decode_writer::*;
mod backup_repo;
pub use backup_repo::*;
mod backup_specification;
pub use backup_specification::*;
pub mod pull;

Some files were not shown because too many files have changed in this diff Show More