Compare commits
73 Commits
Author | SHA1 | Date | |
---|---|---|---|
7d07b73def | |||
3d3670d786 | |||
14291179ce | |||
e744de0eb0 | |||
98b1733760 | |||
fdac28fcec | |||
653e2031d2 | |||
01ca99da2d | |||
1c2f842a98 | |||
a4d1675513 | |||
2ab5acac5a | |||
27fde64794 | |||
fa3f0584bb | |||
d12720c796 | |||
a4e86972a4 | |||
3a3af6e2b6 | |||
482409641f | |||
9688f6de0f | |||
5b32820e93 | |||
f40b4fb05a | |||
6e1deb158a | |||
50ec1a8712 | |||
a74b026baa | |||
7e42ccdaf2 | |||
e713ee5c56 | |||
ec5f9d3525 | |||
d0463b67ca | |||
2ff4c2cd5f | |||
c3b090ac8a | |||
c47e294ea7 | |||
25455bd06d | |||
c1c4a18f48 | |||
91f5594c08 | |||
86f6f74114 | |||
13d9fe3a6c | |||
41e4388005 | |||
06a94edcf6 | |||
ef496e2c20 | |||
113c9b5981 | |||
956295cefe | |||
a26c27c8e6 | |||
0c1c492d48 | |||
255ed62166 | |||
b96b11cdb7 | |||
faa8e6948a | |||
8314ca9c10 | |||
538c2b6dcf | |||
e9b44bec01 | |||
65418a0763 | |||
aef4976801 | |||
295d4f4116 | |||
c47a900ceb | |||
1b1110581a | |||
eb13d9151a | |||
449e4a66fe | |||
217c22c754 | |||
ba5b8a3e76 | |||
ac5e9e770b | |||
b25deec0be | |||
cdf1da2872 | |||
3cfc56f5c2 | |||
37e53b4c07 | |||
77d634710e | |||
5c5181a252 | |||
67042466e8 | |||
757d0ccc76 | |||
4a55fa87d5 | |||
032cd1b862 | |||
ec2434fe3c | |||
34389132d9 | |||
78ee20d72d | |||
601e42ac35 | |||
e1897b363b |
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,3 +3,4 @@ local.mak
|
||||
**/*.rs.bk
|
||||
/etc/proxmox-backup.service
|
||||
/etc/proxmox-backup-proxy.service
|
||||
build/
|
||||
|
13
Cargo.toml
13
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.8.6"
|
||||
version = "0.8.8"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -14,6 +14,7 @@ name = "proxmox_backup"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
apt-pkg-native = "0.3.1" # custom patched version
|
||||
base64 = "0.12"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5"
|
||||
@ -38,11 +39,11 @@ pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.2.0", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox = { version = "0.2.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||
pxar = { version = "0.2.1", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
@ -50,11 +51,11 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
siphasher = "0.3"
|
||||
syslog = "4.0"
|
||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "io-util", "macros", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "dns", "io-util", "macros", "process", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||
tokio-openssl = "0.4.0"
|
||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.3"
|
||||
udev = ">= 0.3, <0.5"
|
||||
url = "2.1"
|
||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||
walkdir = "2"
|
||||
|
13
Makefile
13
Makefile
@ -60,7 +60,7 @@ $(SUBDIRS):
|
||||
test:
|
||||
#cargo test test_broadcast_future
|
||||
#cargo test $(CARGO_BUILD_ARGS)
|
||||
#$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||
|
||||
doc:
|
||||
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
||||
@ -80,18 +80,21 @@ build:
|
||||
|
||||
|
||||
.PHONY: proxmox-backup-docs
|
||||
proxmox-backup-docs: $(DOC_DEB)
|
||||
$(DOC_DEB): build
|
||||
$(DOC_DEB) $(DEBS): proxmox-backup-docs
|
||||
proxmox-backup-docs: build
|
||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
||||
lintian $(DOC_DEB)
|
||||
|
||||
# copy the local target/ dir as a build-cache
|
||||
.PHONY: deb
|
||||
deb: $(DEBS)
|
||||
$(DEBS): build
|
||||
$(DEBS): deb
|
||||
deb: build
|
||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
|
||||
lintian $(DEBS)
|
||||
|
||||
.PHONY: deb-all
|
||||
deb-all: $(DOC_DEB) $(DEBS)
|
||||
|
||||
.PHONY: dsc
|
||||
dsc: $(DSC)
|
||||
$(DSC): build
|
||||
|
60
debian/changelog
vendored
60
debian/changelog
vendored
@ -1,3 +1,63 @@
|
||||
rust-proxmox-backup (0.8.8-1) unstable; urgency=medium
|
||||
|
||||
* pxar: .pxarexclude: match behavior from absolute paths to the one described
|
||||
in the documentation and use byte based paths
|
||||
|
||||
* catalog shell: add exit command
|
||||
|
||||
* manifest: revert signature canonicalization to old behaviour. Fallout from
|
||||
encrypted older backups is expected and was ignored due to the beta status
|
||||
of Proxmox Backup.
|
||||
|
||||
* documentation: various improvements and additions
|
||||
|
||||
* cached user info: print privilege path in error message
|
||||
|
||||
* docs: fix #2851 Add note about GC grace period
|
||||
|
||||
* api2/status: fix datastore full estimation bug if there where (almost) no
|
||||
change for several days
|
||||
|
||||
* schedules, calendar event: support the 'weekly' special expression
|
||||
|
||||
* ui: sync job: group remote fields and use "Source" in labels
|
||||
|
||||
* ui: add calendar event selector
|
||||
|
||||
* ui: sync job: change default to false for "remove-vanished" for new jobs
|
||||
|
||||
* fix #2860: skip in-progress snapshots when syncing
|
||||
|
||||
* fix #2865: detect and skip vanished snapshots
|
||||
|
||||
* fix #2871: close FDs when scanning backup group, avoid leaking
|
||||
|
||||
* backup: list images: handle walkdir error, catch "lost+found" special
|
||||
directory
|
||||
|
||||
* implement AsyncSeek for AsyncIndexReader
|
||||
|
||||
* client: rework logging upload info like size or bandwidth
|
||||
|
||||
* client writer: do not output chunklist for now on verbose=true
|
||||
|
||||
* add initial API for listing available updates and updating the APT
|
||||
database
|
||||
|
||||
* ui: add xterm.js console implementation
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 23 Jul 2020 12:16:05 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.7-2) unstable; urgency=medium
|
||||
|
||||
* support restoring file attributes from pxar archives
|
||||
|
||||
* docs: additions and fixes
|
||||
|
||||
* ui: running tasks: update limit to 100
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 14 Jul 2020 12:05:25 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.6-1) unstable; urgency=medium
|
||||
|
||||
* ui: add button for easily showing the server fingerprint dashboard
|
||||
|
1
debian/control.in
vendored
1
debian/control.in
vendored
@ -7,6 +7,7 @@ Depends: fonts-font-awesome,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
|
1
debian/lintian-overrides
vendored
1
debian/lintian-overrides
vendored
@ -1 +1,2 @@
|
||||
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
||||
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target
|
||||
|
@ -139,6 +139,12 @@ or ``zfs``) to store the backup data.
|
||||
Datastores are identified by a simple *ID*. You can configure it
|
||||
when setting up the backup server.
|
||||
|
||||
.. note:: The `File Layout`_ requires the file system to support at least *65538*
|
||||
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||
directory entries. This requirement excludes certain filesystems and
|
||||
filesystem configuration from being supported for a datastore. For example,
|
||||
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||
|
||||
|
||||
Datastore Configuration
|
||||
@ -148,7 +154,7 @@ You can configure multiple datastores. Minimum one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points to a
|
||||
directory on the filesystem. Each datastore also has associated retention
|
||||
settings of how many backup snapshots for each interval of ``hourly``,
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as an time independent
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||
number of backups to keep in that store. :ref:`Pruning <pruning>` and
|
||||
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||
periodically based on a configured :term:`schedule` per datastore.
|
||||
@ -372,20 +378,20 @@ following roles exist:
|
||||
:term:`Remote`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
A remote is a different Proxmox Backup Server installation and a user on that
|
||||
A remote refers to a separate Proxmox Backup Server installation and a user on that
|
||||
installation, from which you can `sync` datastores to a local datastore with a
|
||||
`Sync Job`.
|
||||
|
||||
For adding a remote you need its hostname or ip, a userid and password on the
|
||||
remote and its certificate fingerprint to add it. To get the fingerprint use
|
||||
the ``proxmox-backup-manager cert info`` command on the remote.
|
||||
To add a remote, you need its hostname or ip, a userid and password on the
|
||||
remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||
``proxmox-backup-manager cert info`` command on the remote.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager cert info |grep Fingerprint
|
||||
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
With the needed information add the remote with:
|
||||
Using the information specified above, add the remote with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -433,8 +439,8 @@ Backup Client usage
|
||||
The command line client is called :command:`proxmox-backup-client`.
|
||||
|
||||
|
||||
Respository Locations
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
Repository Locations
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The client uses the following notation to specify a datastore repository
|
||||
on the backup server.
|
||||
@ -541,7 +547,7 @@ environment variable ``PBS_REPOSITORY``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# export PBS_REPOSTORY=backup-server:store1
|
||||
# export PBS_REPOSITORY=backup-server:store1
|
||||
|
||||
After this you can execute all commands without specifying the ``--repository``
|
||||
option.
|
||||
@ -594,17 +600,17 @@ the given patterns. It is only possible to match files in this directory and its
|
||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||
following pattern ``**/*.tmp``.
|
||||
``[...]`` matches a single character from any of the provided characters within
|
||||
the brackets. ``[!...]`` does the complementary and matches any singe character
|
||||
the brackets. ``[!...]`` does the complementary and matches any single character
|
||||
not contained within the brackets. It is also possible to specify ranges with two
|
||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||
|
||||
The order of the glob match patterns defines if a file is included or
|
||||
excluded, later entries win over previous ones.
|
||||
The order of the glob match patterns defines whether a file is included or
|
||||
excluded, that is to say later entries override previous ones.
|
||||
This is also true for match patterns encountered deeper down the directory tree,
|
||||
which can override a previous exclusion.
|
||||
Be aware that excluded directories will **not** be read by the backup client.
|
||||
A ``.pxarexclude`` file in a subdirectory will have no effect.
|
||||
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||
backup archive.
|
||||
|
||||
@ -656,8 +662,8 @@ Restoring this backup will result in:
|
||||
Encryption
|
||||
^^^^^^^^^^
|
||||
|
||||
Proxmox backup supports client side encryption with AES-256 in GCM_
|
||||
mode. First you need to create an encryption key:
|
||||
Proxmox Backup supports client-side encryption with AES-256 in GCM_
|
||||
mode. To set this up, you first need to create an encryption key:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -689,13 +695,13 @@ variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||
Restoring Data
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The regular creation of backups is a necessary step to avoid data
|
||||
loss. More important, however, is the restoration. It is good practice to perform
|
||||
The regular creation of backups is a necessary step to avoiding data
|
||||
loss. More importantly, however, is the restoration. It is good practice to perform
|
||||
periodic recovery tests to ensure that you can access the data in
|
||||
case of problems.
|
||||
|
||||
First, you need to find the snapshot which you want to restore. The snapshot
|
||||
command gives a list of all snapshots on the server:
|
||||
command provides a list of all the snapshots on the server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -727,8 +733,8 @@ backup.
|
||||
|
||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||
|
||||
To get the contents of any archive you can restore the ``ìndex.json`` file in the
|
||||
repository and restore it to '-'. This will dump the content to the standard output.
|
||||
To get the contents of any archive, you can restore the ``ìndex.json`` file in the
|
||||
repository to the target path '-'. This will dump the contents to the standard output.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -765,7 +771,7 @@ working directory and list directory contents in the archive.
|
||||
``pwd`` shows the full path of the current working directory with respect to the
|
||||
archive root.
|
||||
|
||||
Being able to quickly search the contents of the archive is a often needed feature.
|
||||
Being able to quickly search the contents of the archive is a commmonly needed feature.
|
||||
That's where the catalog is most valuable.
|
||||
For example:
|
||||
|
||||
@ -814,10 +820,10 @@ file archive as a read-only filesystem to a mountpoint on your host.
|
||||
bin dev home lib32 libx32 media opt root sbin sys usr
|
||||
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
||||
|
||||
This allows you to access the full content of the archive in a seamless manner.
|
||||
This allows you to access the full contents of the archive in a seamless manner.
|
||||
|
||||
.. note:: As the FUSE connection needs to fetch and decrypt chunks from the
|
||||
backup servers datastore, this can cause some additional network and CPU
|
||||
backup server's datastore, this can cause some additional network and CPU
|
||||
load on your host, depending on the operations you perform on the mounted
|
||||
filesystem.
|
||||
|
||||
@ -914,7 +920,7 @@ backup is retained.
|
||||
|
||||
|
||||
You can use the ``--dry-run`` option to test your settings. This only
|
||||
shows the list of existing snapshots and which action prune would take.
|
||||
shows the list of existing snapshots and what actions prune would take.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -956,6 +962,17 @@ unused data blocks are removed.
|
||||
depending on the number of chunks and the speed of the underlying
|
||||
disks.
|
||||
|
||||
.. note:: The garbage collection will only remove chunks that haven't been used
|
||||
for at least one day (exactly 24h 5m). This grace period is necessary because
|
||||
chunks in use are marked by touching the chunk which updates the ``atime``
|
||||
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||
by default. This results in a better performance by only updating the
|
||||
``atime`` property if the last access has been at least 24 hours ago. The
|
||||
downside is, that touching a chunk within these 24 hours will not always
|
||||
update its ``atime`` property.
|
||||
|
||||
Chunks in the grace period will be logged at the end of the garbage
|
||||
collection task as *Pending removals*.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page // FIXME
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
.. _Rust: https://www.rust-lang.org/
|
||||
|
@ -16,7 +16,7 @@ Glossary
|
||||
Datastore
|
||||
|
||||
A place to store backups. A directory which contains the backup data.
|
||||
The current implemenation is file-system based.
|
||||
The current implementation is file-system based.
|
||||
|
||||
`Rust`_
|
||||
|
||||
|
@ -83,6 +83,10 @@ In general this is not trivial, especially when LVM_ or ZFS_ is used.
|
||||
|
||||
The network configuration is completely up to you as well.
|
||||
|
||||
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||
your web browser, using HTTPS on port 8007. For example at
|
||||
``https://<ip-or-dns-name>:8007``
|
||||
|
||||
Install Proxmox Backup server on `Proxmox VE`_
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -99,6 +103,10 @@ After configuring the
|
||||
server to store backups. Should the hypervisor server fail, you can
|
||||
still access the backups.
|
||||
|
||||
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||
your web browser, using HTTPS on port 8007. For example at
|
||||
``https://<ip-or-dns-name>:8007``
|
||||
|
||||
Client installation
|
||||
-------------------
|
||||
|
||||
|
@ -4,17 +4,17 @@ Introduction
|
||||
What is Proxmox Backup Server
|
||||
-----------------------------
|
||||
|
||||
Proxmox Backup Server is an enterprise-class client-server backup software that
|
||||
backups :term:`virtual machine`\ s, :term:`container`\ s, and physical hosts.
|
||||
It is specially optimized for the `Proxmox Virtual Environment`_ platform and
|
||||
allows you to backup your data securely, even between remote sites, providing
|
||||
easy management with a web-based user interface.
|
||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
||||
platform and allows you to back up your data securely, even between remote
|
||||
sites, providing easy management with a web-based user interface.
|
||||
|
||||
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||
encryption (AE_). Using :term:`Rust` as implementation language guarantees high
|
||||
performance, low resource usage, and a safe, high quality code base.
|
||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||
performance, low resource usage, and a safe, high-quality codebase.
|
||||
|
||||
It features strong encryption done on the client side. Thus, it's possible to
|
||||
It features strong client-side encryption. Thus, it's possible to
|
||||
backup data to not fully trusted targets.
|
||||
|
||||
|
||||
@ -63,7 +63,7 @@ Main Features
|
||||
several gigabytes of data per second.
|
||||
|
||||
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mde
|
||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
|
||||
provides very high performance on modern hardware.
|
||||
|
||||
:Web interface: Manage the Proxmox Backup Server with the integrated web-based
|
||||
@ -113,7 +113,7 @@ Proxmox Backup Server consists of multiple components:
|
||||
* client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||
any `Linux amd64` environment.
|
||||
|
||||
Everything besides the web interface are written in the Rust programming
|
||||
Everything outside of the web interface is written in the Rust programming
|
||||
language.
|
||||
|
||||
"The Rust programming language helps you write faster, more reliable software.
|
||||
|
@ -24,7 +24,6 @@ General ZFS advantages
|
||||
* Self healing
|
||||
* Continuous integrity checking
|
||||
* Designed for high storage capacities
|
||||
* Protection against data corruption
|
||||
* Asynchronous replication over network
|
||||
* Open Source
|
||||
* Encryption
|
||||
|
@ -33,6 +33,46 @@ During the Proxmox Backup beta phase only one repository (pbstest) will be
|
||||
available. Once released, a Enterprise repository for production use and a
|
||||
no-subscription repository will be provided.
|
||||
|
||||
SecureApt
|
||||
~~~~~~~~~
|
||||
|
||||
The `Release` files in the repositories are signed with GnuPG. APT is using
|
||||
these signatures to verify that all packages are from a trusted source.
|
||||
|
||||
If you install Proxmox Backup Server from an official ISO image, the key for
|
||||
verification is already installed.
|
||||
|
||||
If you install Proxmox Backup Server on top of Debian, download and install the
|
||||
key with the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
Verify the SHA512 checksum afterwards with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sha512sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
The output should be:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
acca6f416917e8e11490a08a1e2842d500b3a5d9f322c6319db0927b2901c3eae23cfb5cd5df6facf2b57399d3cfa52ad7769ebdd75d9b204549ca147da52626 /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
and the md5sum:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# md5sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
Here, the output should be:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
.. comment
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -24,7 +24,7 @@ This daemon is normally started and managed as ``systemd`` service::
|
||||
|
||||
systemctl status proxmox-backup-proxy
|
||||
|
||||
For debugging, you can start the daemon in forground using::
|
||||
For debugging, you can start the daemon in foreground using::
|
||||
|
||||
proxmox-backup-proxy
|
||||
|
||||
|
@ -2,7 +2,7 @@ use anyhow::{Error};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
async fn upload_speed() -> Result<usize, Error> {
|
||||
async fn upload_speed() -> Result<f64, Error> {
|
||||
|
||||
let host = "localhost";
|
||||
let datastore = "store2";
|
||||
@ -20,7 +20,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest().await?;
|
||||
let res = client.upload_speedtest(true).await?;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ pub mod backup;
|
||||
pub mod config;
|
||||
pub mod node;
|
||||
pub mod reader;
|
||||
mod subscription;
|
||||
pub mod status;
|
||||
pub mod types;
|
||||
pub mod version;
|
||||
@ -26,7 +25,6 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("pull", &pull::ROUTER),
|
||||
("reader", &reader::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("version", &version::ROUTER),
|
||||
];
|
||||
|
||||
|
@ -13,15 +13,22 @@ use crate::auth_helpers::*;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::PRIV_PERMISSIONS_MODIFY;
|
||||
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
||||
|
||||
pub mod user;
|
||||
pub mod domain;
|
||||
pub mod acl;
|
||||
pub mod role;
|
||||
|
||||
fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
|
||||
/// returns Ok(true) if a ticket has to be created
|
||||
/// and Ok(false) if not
|
||||
fn authenticate_user(
|
||||
username: &str,
|
||||
password: &str,
|
||||
path: Option<String>,
|
||||
privs: Option<String>,
|
||||
port: Option<u16>,
|
||||
) -> Result<bool, Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&username) {
|
||||
@ -33,14 +40,43 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
if password.starts_with("PBS:") {
|
||||
if let Ok((_age, Some(ticket_username))) = tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", password, None, -300, ticket_lifetime) {
|
||||
if ticket_username == username {
|
||||
return Ok(());
|
||||
return Ok(true);
|
||||
} else {
|
||||
bail!("ticket login failed - wrong username");
|
||||
}
|
||||
}
|
||||
} else if password.starts_with("PBSTERM:") {
|
||||
if path.is_none() || privs.is_none() || port.is_none() {
|
||||
bail!("cannot check termnal ticket without path, priv and port");
|
||||
}
|
||||
|
||||
crate::auth::authenticate_user(username, password)
|
||||
let path = path.unwrap();
|
||||
let privilege_name = privs.unwrap();
|
||||
let port = port.unwrap();
|
||||
|
||||
if let Ok((_age, _data)) =
|
||||
tools::ticket::verify_term_ticket(public_auth_key(), &username, &path, port, password)
|
||||
{
|
||||
for (name, privilege) in PRIVILEGES {
|
||||
if *name == privilege_name {
|
||||
let mut path_vec = Vec::new();
|
||||
for part in path.split('/') {
|
||||
if part != "" {
|
||||
path_vec.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
user_info.check_privs(username, &path_vec, *privilege, false)?;
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
bail!("No such privilege");
|
||||
}
|
||||
}
|
||||
|
||||
let _ = crate::auth::authenticate_user(username, password)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -52,6 +88,21 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
password: {
|
||||
schema: PASSWORD_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
type: String,
|
||||
description: "Path for verifying terminal tickets.",
|
||||
optional: true,
|
||||
},
|
||||
privs: {
|
||||
type: String,
|
||||
description: "Privilege for verifying terminal tickets.",
|
||||
optional: true,
|
||||
},
|
||||
port: {
|
||||
type: Integer,
|
||||
description: "Port for verifying terminal tickets.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -78,11 +129,16 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
/// Create or verify authentication ticket.
|
||||
///
|
||||
/// Returns: An authentication ticket with additional infos.
|
||||
fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
match authenticate_user(&username, &password) {
|
||||
Ok(_) => {
|
||||
|
||||
let ticket = assemble_rsa_ticket( private_auth_key(), "PBS", Some(&username), None)?;
|
||||
fn create_ticket(
|
||||
username: String,
|
||||
password: String,
|
||||
path: Option<String>,
|
||||
privs: Option<String>,
|
||||
port: Option<u16>,
|
||||
) -> Result<Value, Error> {
|
||||
match authenticate_user(&username, &password, path, privs, port) {
|
||||
Ok(true) => {
|
||||
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some(&username), None)?;
|
||||
|
||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||
|
||||
@ -94,6 +150,9 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
"CSRFPreventionToken": token,
|
||||
}))
|
||||
}
|
||||
Ok(false) => Ok(json!({
|
||||
"username": username,
|
||||
})),
|
||||
Err(err) => {
|
||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
||||
|
@ -6,7 +6,12 @@ use proxmox::http_err;
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = tokio::fs::File::open(path.clone())
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||
.map_err(move |err| {
|
||||
match err.kind() {
|
||||
std::io::ErrorKind::NotFound => http_err!(NOT_FOUND, format!("open file {:?} failed - not found", path.clone())),
|
||||
_ => http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)),
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
|
286
src/api2/node.rs
286
src/api2/node.rs
@ -1,18 +1,282 @@
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use std::net::TcpListener;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
pub mod tasks;
|
||||
mod time;
|
||||
pub mod network;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::{
|
||||
future::{FutureExt, TryFutureExt},
|
||||
try_join,
|
||||
};
|
||||
use hyper::body::Body;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::upgrade::Upgraded;
|
||||
use nix::fcntl::{fcntl, FcntlArg, FdFlag};
|
||||
use serde_json::{json, Value};
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::api::{
|
||||
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment,
|
||||
};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use proxmox::tools::websocket::WebSocket;
|
||||
use proxmox::{identity, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_SYS_CONSOLE;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
|
||||
pub mod disks;
|
||||
pub mod dns;
|
||||
mod syslog;
|
||||
mod journal;
|
||||
pub mod network;
|
||||
pub(crate) mod rrd;
|
||||
mod services;
|
||||
mod status;
|
||||
pub(crate) mod rrd;
|
||||
pub mod disks;
|
||||
mod subscription;
|
||||
mod apt;
|
||||
mod syslog;
|
||||
pub mod tasks;
|
||||
mod time;
|
||||
|
||||
pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("login", "Login"),
|
||||
EnumEntry::new("upgrade", "Upgrade"),
|
||||
]))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
cmd: {
|
||||
schema: SHELL_CMD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: Object,
|
||||
description: "Object with the user, ticket, port and upid",
|
||||
properties: {
|
||||
user: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
ticket: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
port: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
upid: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
description: "Restricted to users on realm 'pam'",
|
||||
permission: &Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||
}
|
||||
)]
|
||||
/// Call termproxy and return shell ticket
|
||||
async fn termproxy(
|
||||
cmd: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let userid = rpcenv
|
||||
.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?;
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
|
||||
if realm != "pam" {
|
||||
bail!("only pam users can use the console");
|
||||
}
|
||||
|
||||
let path = "/system";
|
||||
|
||||
// use port 0 and let the kernel decide which port is free
|
||||
let listener = TcpListener::bind("localhost:0")?;
|
||||
let port = listener.local_addr()?.port();
|
||||
|
||||
let ticket = tools::ticket::assemble_term_ticket(
|
||||
crate::auth_helpers::private_auth_key(),
|
||||
&userid,
|
||||
&path,
|
||||
port,
|
||||
)?;
|
||||
|
||||
let mut command = Vec::new();
|
||||
match cmd.as_ref().map(|x| x.as_str()) {
|
||||
Some("login") | None => {
|
||||
command.push("login");
|
||||
if userid == "root@pam" {
|
||||
command.push("-f");
|
||||
command.push("root");
|
||||
}
|
||||
}
|
||||
Some("upgrade") => {
|
||||
if userid != "root@pam" {
|
||||
bail!("only root@pam can upgrade");
|
||||
}
|
||||
// TODO: add nicer/safer wrapper like in PVE instead
|
||||
command.push("sh");
|
||||
command.push("-c");
|
||||
command.push("apt full-upgrade; bash -l");
|
||||
}
|
||||
_ => bail!("invalid command"),
|
||||
};
|
||||
|
||||
let upid = WorkerTask::spawn(
|
||||
"termproxy",
|
||||
None,
|
||||
&username,
|
||||
false,
|
||||
move |worker| async move {
|
||||
// move inside the worker so that it survives and does not close the port
|
||||
// remove CLOEXEC from listenere so that we can reuse it in termproxy
|
||||
let fd = listener.as_raw_fd();
|
||||
let mut flags = match fcntl(fd, FcntlArg::F_GETFD) {
|
||||
Ok(bits) => FdFlag::from_bits_truncate(bits),
|
||||
Err(err) => bail!("could not get fd: {}", err),
|
||||
};
|
||||
flags.remove(FdFlag::FD_CLOEXEC);
|
||||
if let Err(err) = fcntl(fd, FcntlArg::F_SETFD(flags)) {
|
||||
bail!("could not set fd: {}", err);
|
||||
}
|
||||
|
||||
let mut arguments: Vec<&str> = Vec::new();
|
||||
let fd_string = fd.to_string();
|
||||
arguments.push(&fd_string);
|
||||
arguments.extend_from_slice(&[
|
||||
"--path",
|
||||
&path,
|
||||
"--perm",
|
||||
"Sys.Console",
|
||||
"--authport",
|
||||
"82",
|
||||
"--port-as-fd",
|
||||
"--",
|
||||
]);
|
||||
arguments.extend_from_slice(&command);
|
||||
|
||||
let mut cmd = tokio::process::Command::new("/usr/bin/termproxy");
|
||||
|
||||
cmd.args(&arguments);
|
||||
cmd.stdout(std::process::Stdio::piped());
|
||||
cmd.stderr(std::process::Stdio::piped());
|
||||
|
||||
let mut child = cmd.spawn().expect("error executing termproxy");
|
||||
|
||||
let stdout = child.stdout.take().expect("no child stdout handle");
|
||||
let stderr = child.stderr.take().expect("no child stderr handle");
|
||||
|
||||
let worker_stdout = worker.clone();
|
||||
let stdout_fut = async move {
|
||||
let mut reader = BufReader::new(stdout).lines();
|
||||
while let Some(line) = reader.next_line().await? {
|
||||
worker_stdout.log(line);
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let worker_stderr = worker.clone();
|
||||
let stderr_fut = async move {
|
||||
let mut reader = BufReader::new(stderr).lines();
|
||||
while let Some(line) = reader.next_line().await? {
|
||||
worker_stderr.warn(line);
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let (exit_code, _, _) = try_join!(child, stdout_fut, stderr_fut)?;
|
||||
if !exit_code.success() {
|
||||
match exit_code.code() {
|
||||
Some(code) => bail!("termproxy exited with {}", code),
|
||||
None => bail!("termproxy exited by signal"),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(json!({
|
||||
"user": username,
|
||||
"ticket": ticket,
|
||||
"port": port,
|
||||
"upid": upid,
|
||||
}))
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_WEBSOCKET: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&upgrade_to_websocket),
|
||||
&ObjectSchema::new(
|
||||
"Upgraded to websocket",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
(
|
||||
"vncticket",
|
||||
false,
|
||||
&StringSchema::new("Terminal ticket").schema()
|
||||
),
|
||||
("port", false, &IntegerSchema::new("Terminal port").schema()),
|
||||
]),
|
||||
),
|
||||
)
|
||||
.access(
|
||||
Some("The user needs Sys.Console on /system."),
|
||||
&Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||
);
|
||||
|
||||
fn upgrade_to_websocket(
|
||||
parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
async move {
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let ticket = tools::required_string_param(¶m, "vncticket")?.to_owned();
|
||||
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||
|
||||
// will be checked again by termproxy
|
||||
tools::ticket::verify_term_ticket(
|
||||
crate::auth_helpers::public_auth_key(),
|
||||
&username,
|
||||
&"/system",
|
||||
port,
|
||||
&ticket,
|
||||
)?;
|
||||
|
||||
let (ws, response) = WebSocket::new(parts.headers)?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await {
|
||||
Ok(upgraded) => upgraded,
|
||||
_ => bail!("error"),
|
||||
};
|
||||
|
||||
let local = tokio::net::TcpStream::connect(format!("localhost:{}", port)).await?;
|
||||
ws.serve_connection(conn, local).await
|
||||
});
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("apt", &apt::ROUTER),
|
||||
("disks", &disks::ROUTER),
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
@ -20,9 +284,15 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("rrd", &rrd::ROUTER),
|
||||
("services", &services::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("syslog", &syslog::ROUTER),
|
||||
("tasks", &tasks::ROUTER),
|
||||
("termproxy", &Router::new().post(&API_METHOD_TERMPROXY)),
|
||||
("time", &time::ROUTER),
|
||||
(
|
||||
"vncwebsocket",
|
||||
&Router::new().upgrade(&API_METHOD_WEBSOCKET),
|
||||
),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
268
src/api2/node/apt.rs
Normal file
268
src/api2/node/apt.rs
Normal file
@ -0,0 +1,268 @@
|
||||
use apt_pkg_native::Cache;
|
||||
use anyhow::{Error, bail};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{list_subdirs_api_method, const_regex};
|
||||
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||
|
||||
const_regex! {
|
||||
VERSION_EPOCH_REGEX = r"^\d+:";
|
||||
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
|
||||
}
|
||||
|
||||
// FIXME: Replace with call to 'apt changelog <pkg> --print-uris'. Currently
|
||||
// not possible as our packages do not have a URI set in their Release file
|
||||
fn get_changelog_url(
|
||||
package: &str,
|
||||
filename: &str,
|
||||
source_pkg: &str,
|
||||
version: &str,
|
||||
source_version: &str,
|
||||
origin: &str,
|
||||
component: &str,
|
||||
) -> Result<String, Error> {
|
||||
if origin == "" {
|
||||
bail!("no origin available for package {}", package);
|
||||
}
|
||||
|
||||
if origin == "Debian" {
|
||||
let source_version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(source_version, "");
|
||||
|
||||
let prefix = if source_pkg.starts_with("lib") {
|
||||
source_pkg.get(0..4)
|
||||
} else {
|
||||
source_pkg.get(0..1)
|
||||
};
|
||||
|
||||
let prefix = match prefix {
|
||||
Some(p) => p,
|
||||
None => bail!("cannot get starting characters of package name '{}'", package)
|
||||
};
|
||||
|
||||
// note: security updates seem to not always upload a changelog for
|
||||
// their package version, so this only works *most* of the time
|
||||
return Ok(format!("https://metadata.ftp-master.debian.org/changelogs/main/{}/{}/{}_{}_changelog",
|
||||
prefix, source_pkg, source_pkg, source_version));
|
||||
|
||||
} else if origin == "Proxmox" {
|
||||
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
|
||||
|
||||
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
|
||||
Some(captures) => {
|
||||
let base_capture = captures.get(1);
|
||||
match base_capture {
|
||||
Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
|
||||
None => bail!("incompatible filename, cannot find regex group")
|
||||
}
|
||||
},
|
||||
None => bail!("incompatible filename, doesn't match regex")
|
||||
};
|
||||
|
||||
return Ok(format!("http://download.proxmox.com/{}/{}_{}.changelog",
|
||||
base, package, version));
|
||||
}
|
||||
|
||||
bail!("unknown origin ({}) or component ({})", origin, component)
|
||||
}
|
||||
|
||||
fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
||||
-> Vec<APTUpdateInfo> {
|
||||
|
||||
let mut ret = Vec::new();
|
||||
|
||||
// note: this is not an 'apt update', it just re-reads the cache from disk
|
||||
let mut cache = Cache::get_singleton();
|
||||
cache.reload();
|
||||
|
||||
let mut cache_iter = cache.iter();
|
||||
|
||||
loop {
|
||||
let view = match cache_iter.next() {
|
||||
Some(view) => view,
|
||||
None => break
|
||||
};
|
||||
|
||||
let current_version = match view.current_version() {
|
||||
Some(vers) => vers,
|
||||
None => continue
|
||||
};
|
||||
let candidate_version = match view.candidate_version() {
|
||||
Some(vers) => vers,
|
||||
// if there's no candidate (i.e. no update) get info of currently
|
||||
// installed version instead
|
||||
None => current_version.clone()
|
||||
};
|
||||
|
||||
let package = view.name();
|
||||
if filter(&package, ¤t_version, &candidate_version) {
|
||||
let mut origin_res = "unknown".to_owned();
|
||||
let mut section_res = "unknown".to_owned();
|
||||
let mut priority_res = "unknown".to_owned();
|
||||
let mut change_log_url = "".to_owned();
|
||||
let mut short_desc = package.clone();
|
||||
let mut long_desc = "".to_owned();
|
||||
|
||||
// get additional information via nested APT 'iterators'
|
||||
let mut view_iter = view.versions();
|
||||
while let Some(ver) = view_iter.next() {
|
||||
if ver.version() == candidate_version {
|
||||
if let Some(section) = ver.section() {
|
||||
section_res = section;
|
||||
}
|
||||
|
||||
if let Some(prio) = ver.priority_type() {
|
||||
priority_res = prio;
|
||||
}
|
||||
|
||||
// assume every package has only one origin file (not
|
||||
// origin, but origin *file*, for some reason those seem to
|
||||
// be different concepts in APT)
|
||||
let mut origin_iter = ver.origin_iter();
|
||||
let origin = origin_iter.next();
|
||||
if let Some(origin) = origin {
|
||||
|
||||
if let Some(sd) = origin.short_desc() {
|
||||
short_desc = sd;
|
||||
}
|
||||
|
||||
if let Some(ld) = origin.long_desc() {
|
||||
long_desc = ld;
|
||||
}
|
||||
|
||||
// the package files appear in priority order, meaning
|
||||
// the one for the candidate version is first
|
||||
let mut pkg_iter = origin.file();
|
||||
let pkg_file = pkg_iter.next();
|
||||
if let Some(pkg_file) = pkg_file {
|
||||
if let Some(origin_name) = pkg_file.origin() {
|
||||
origin_res = origin_name;
|
||||
}
|
||||
|
||||
let filename = pkg_file.file_name();
|
||||
let source_pkg = ver.source_package();
|
||||
let source_ver = ver.source_version();
|
||||
let component = pkg_file.component();
|
||||
|
||||
// build changelog URL from gathered information
|
||||
// ignore errors, use empty changelog instead
|
||||
let url = get_changelog_url(&package, &filename, &source_pkg,
|
||||
&candidate_version, &source_ver, &origin_res, &component);
|
||||
if let Ok(url) = url {
|
||||
change_log_url = url;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let info = APTUpdateInfo {
|
||||
package,
|
||||
title: short_desc,
|
||||
arch: view.arch(),
|
||||
description: long_desc,
|
||||
change_log_url,
|
||||
origin: origin_res,
|
||||
version: candidate_version,
|
||||
old_version: current_version,
|
||||
priority: priority_res,
|
||||
section: section_res,
|
||||
};
|
||||
ret.push(info);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "A list of packages with available updates.",
|
||||
type: Array,
|
||||
items: { type: APTUpdateInfo },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List available APT updates
|
||||
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||
let ret = list_installed_apt_packages(|_pkg, cur_ver, can_ver| cur_ver != can_ver);
|
||||
Ok(json!(ret))
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
quiet: {
|
||||
description: "Only produces output suitable for logging, omitting progress indicators.",
|
||||
type: bool,
|
||||
default: false,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update the APT database
|
||||
pub fn apt_update_database(
|
||||
quiet: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||
|
||||
let upid_str = WorkerTask::new_thread("aptupdate", None, &username.clone(), to_stdout, move |worker| {
|
||||
if !quiet { worker.log("starting apt-get update") }
|
||||
|
||||
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||
|
||||
let mut command = std::process::Command::new("apt-get");
|
||||
command.arg("update");
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
if !quiet { worker.log(output) }
|
||||
|
||||
// TODO: add mail notify for new updates like PVE
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("update", &Router::new()
|
||||
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
|
||||
.post(&API_METHOD_APT_UPDATE_DATABASE)
|
||||
),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
@ -5,8 +5,16 @@ use proxmox::api::{api, Router, Permission};
|
||||
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
use crate::api2::types::NODE_SCHEMA;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Subscription status.",
|
||||
properties: {
|
@ -161,6 +161,8 @@ fn datastore_status(
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
} else {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -962,3 +962,30 @@ pub enum RRDTimeFrameResolution {
|
||||
/// 1 week => last 490 days
|
||||
Year = 60*10080,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "PascalCase")]
|
||||
/// Describes a package for which an update is available.
|
||||
pub struct APTUpdateInfo {
|
||||
/// Package name
|
||||
pub package: String,
|
||||
/// Package title
|
||||
pub title: String,
|
||||
/// Package architecture
|
||||
pub arch: String,
|
||||
/// Human readable package description
|
||||
pub description: String,
|
||||
/// New version to be updated to
|
||||
pub version: String,
|
||||
/// Old version currently installed
|
||||
pub old_version: String,
|
||||
/// Package origin
|
||||
pub origin: String,
|
||||
/// Package priority in human-readable form
|
||||
pub priority: String,
|
||||
/// Package section
|
||||
pub section: String,
|
||||
/// URL under which the package's changelog can be retrieved
|
||||
pub change_log_url: String,
|
||||
}
|
||||
|
@ -40,21 +40,21 @@
|
||||
//!
|
||||
//! Acquire shared lock for ChunkStore (process wide).
|
||||
//!
|
||||
//! Note: When creating .idx files, we create temporary (.tmp) file,
|
||||
//! Note: When creating .idx files, we create temporary a (.tmp) file,
|
||||
//! then do an atomic rename ...
|
||||
//!
|
||||
//!
|
||||
//! * Garbage Collect:
|
||||
//!
|
||||
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
||||
//! already an shared lock for ChunkStore, try to updraged that
|
||||
//! already a shared lock for the ChunkStore, try to upgrade that
|
||||
//! lock.
|
||||
//!
|
||||
//!
|
||||
//! * Server Restart
|
||||
//!
|
||||
//! Try to abort running garbage collection to release exclusive
|
||||
//! ChunkStore lock asap. Start new service with existing listening
|
||||
//! Try to abort the running garbage collection to release exclusive
|
||||
//! ChunkStore locks ASAP. Start the new service with the existing listening
|
||||
//! socket.
|
||||
//!
|
||||
//!
|
||||
@ -62,10 +62,10 @@
|
||||
//!
|
||||
//! Deleting backups is as easy as deleting the corresponding .idx
|
||||
//! files. Unfortunately, this does not free up any storage, because
|
||||
//! those files just contains references to chunks.
|
||||
//! those files just contain references to chunks.
|
||||
//!
|
||||
//! To free up some storage, we run a garbage collection process at
|
||||
//! regular intervals. The collector uses an mark and sweep
|
||||
//! regular intervals. The collector uses a mark and sweep
|
||||
//! approach. In the first phase, it scans all .idx files to mark used
|
||||
//! chunks. The second phase then removes all unmarked chunks from the
|
||||
//! store.
|
||||
@ -90,12 +90,12 @@
|
||||
//! amount of time ago (by default 24h). So we may only delete chunks
|
||||
//! with `atime` older than 24 hours.
|
||||
//!
|
||||
//! Another problem arise from running backups. The mark phase does
|
||||
//! Another problem arises from running backups. The mark phase does
|
||||
//! not find any chunks from those backups, because there is no .idx
|
||||
//! file for them (created after the backup). Chunks created or
|
||||
//! touched by those backups may have an `atime` as old as the start
|
||||
//! time of those backup. Please not that the backup start time may
|
||||
//! predate the GC start time. Se we may only delete chunk older than
|
||||
//! time of those backups. Please note that the backup start time may
|
||||
//! predate the GC start time. So we may only delete chunks older than
|
||||
//! the start time of those running backup jobs.
|
||||
//!
|
||||
//!
|
||||
|
@ -1,30 +1,35 @@
|
||||
use std::future::Future;
|
||||
use std::task::{Poll, Context};
|
||||
use std::pin::Pin;
|
||||
use std::io::SeekFrom;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::FutureExt;
|
||||
use futures::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::io::{AsyncRead, AsyncSeek};
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::io_format_err;
|
||||
|
||||
use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
use super::index::ChunkReadInfo;
|
||||
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||
HaveData(usize),
|
||||
HaveData,
|
||||
}
|
||||
|
||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||
store: Option<S>,
|
||||
index: I,
|
||||
read_buffer: Vec<u8>,
|
||||
current_chunk_offset: u64,
|
||||
current_chunk_idx: usize,
|
||||
current_chunk_digest: [u8; 32],
|
||||
current_chunk_info: Option<ChunkReadInfo>,
|
||||
position: u64,
|
||||
seek_to_pos: i64,
|
||||
state: AsyncIndexReaderState<S>,
|
||||
}
|
||||
|
||||
@ -37,8 +42,11 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||
store: Some(store),
|
||||
index,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
current_chunk_offset: 0,
|
||||
current_chunk_idx: 0,
|
||||
current_chunk_digest: [0u8; 32],
|
||||
current_chunk_info: None,
|
||||
position: 0,
|
||||
seek_to_pos: 0,
|
||||
state: AsyncIndexReaderState::NoData,
|
||||
}
|
||||
}
|
||||
@ -58,23 +66,41 @@ where
|
||||
loop {
|
||||
match &mut this.state {
|
||||
AsyncIndexReaderState::NoData => {
|
||||
if this.current_chunk_idx >= this.index.index_count() {
|
||||
let (idx, offset) = if this.current_chunk_info.is_some() &&
|
||||
this.position == this.current_chunk_info.as_ref().unwrap().range.end
|
||||
{
|
||||
// optimization for sequential chunk read
|
||||
let next_idx = this.current_chunk_idx + 1;
|
||||
(next_idx, 0)
|
||||
} else {
|
||||
match this.index.chunk_from_offset(this.position) {
|
||||
Some(res) => res,
|
||||
None => return Poll::Ready(Ok(0))
|
||||
}
|
||||
};
|
||||
|
||||
if idx >= this.index.index_count() {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
|
||||
let digest = this
|
||||
let info = this
|
||||
.index
|
||||
.index_digest(this.current_chunk_idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?
|
||||
.clone();
|
||||
.chunk_info(idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?;
|
||||
|
||||
if digest == this.current_chunk_digest {
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
this.current_chunk_offset = offset;
|
||||
this.current_chunk_idx = idx;
|
||||
let old_info = this.current_chunk_info.replace(info.clone());
|
||||
|
||||
if let Some(old_info) = old_info {
|
||||
if old_info.digest == info.digest {
|
||||
// hit, chunk is currently in cache
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
this.current_chunk_digest = digest;
|
||||
|
||||
// miss, need to download new chunk
|
||||
let store = match this.store.take() {
|
||||
Some(store) => store,
|
||||
None => {
|
||||
@ -83,7 +109,7 @@ where
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
store.read_chunk(&digest)
|
||||
store.read_chunk(&info.digest)
|
||||
.await
|
||||
.map(move |x| (store, x))
|
||||
};
|
||||
@ -95,7 +121,7 @@ where
|
||||
Ok((store, mut chunk_data)) => {
|
||||
this.read_buffer.clear();
|
||||
this.read_buffer.append(&mut chunk_data);
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
this.store = Some(store);
|
||||
}
|
||||
Err(err) => {
|
||||
@ -103,8 +129,8 @@ where
|
||||
}
|
||||
};
|
||||
}
|
||||
AsyncIndexReaderState::HaveData(offset) => {
|
||||
let offset = *offset;
|
||||
AsyncIndexReaderState::HaveData => {
|
||||
let offset = this.current_chunk_offset as usize;
|
||||
let len = this.read_buffer.len();
|
||||
let n = if len - offset < buf.len() {
|
||||
len - offset
|
||||
@ -113,11 +139,13 @@ where
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
||||
this.position += n as u64;
|
||||
|
||||
if offset + n == len {
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
this.current_chunk_idx += 1;
|
||||
} else {
|
||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
||||
this.current_chunk_offset += n as u64;
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
}
|
||||
|
||||
return Poll::Ready(Ok(n));
|
||||
@ -126,3 +154,51 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncSeek for AsyncIndexReader<S, I>
|
||||
where
|
||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||
I: IndexFile + Unpin,
|
||||
{
|
||||
fn start_seek(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
pos: SeekFrom,
|
||||
) -> Poll<tokio::io::Result<()>> {
|
||||
let this = Pin::get_mut(self);
|
||||
this.seek_to_pos = match pos {
|
||||
SeekFrom::Start(offset) => {
|
||||
offset as i64
|
||||
},
|
||||
SeekFrom::End(offset) => {
|
||||
this.index.index_bytes() as i64 + offset
|
||||
},
|
||||
SeekFrom::Current(offset) => {
|
||||
this.position as i64 + offset
|
||||
}
|
||||
};
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn poll_complete(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<tokio::io::Result<u64>> {
|
||||
let this = Pin::get_mut(self);
|
||||
|
||||
let index_bytes = this.index.index_bytes();
|
||||
if this.seek_to_pos < 0 {
|
||||
return Poll::Ready(Err(io_format_err!("cannot seek to negative values")));
|
||||
} else if this.seek_to_pos > index_bytes as i64 {
|
||||
this.position = index_bytes;
|
||||
} else {
|
||||
this.position = this.seek_to_pos as u64;
|
||||
}
|
||||
|
||||
// even if seeking within one chunk, we need to go to NoData to
|
||||
// recalculate the current_chunk_offset (data is cached anyway)
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
|
||||
Poll::Ready(Ok(this.position))
|
||||
}
|
||||
}
|
||||
|
@ -106,7 +106,11 @@ impl BackupGroup {
|
||||
|
||||
use nix::fcntl::{openat, OFlag};
|
||||
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
||||
Ok(_) => { /* manifest exists --> assume backup was successful */ },
|
||||
Ok(rawfd) => {
|
||||
/* manifest exists --> assume backup was successful */
|
||||
/* close else this leaks! */
|
||||
nix::unistd::close(rawfd)?;
|
||||
},
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
||||
Err(err) => {
|
||||
bail!("last_successful_backup: unexpected error - {}", err);
|
||||
|
@ -89,6 +89,10 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
|
||||
"find",
|
||||
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
|
||||
)
|
||||
.insert(
|
||||
"exit",
|
||||
CliCommand::new(&API_METHOD_EXIT),
|
||||
)
|
||||
.insert_help(),
|
||||
)
|
||||
}
|
||||
@ -104,6 +108,14 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
|
||||
}
|
||||
}
|
||||
|
||||
// just an empty wrapper so that it is displayed in help/docs, we check
|
||||
// in the readloop for 'exit' again break
|
||||
#[api(input: { properties: {} })]
|
||||
/// Exit the shell
|
||||
async fn exit() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(input: { properties: {} })]
|
||||
/// List the current working directory.
|
||||
async fn pwd_command() -> Result<(), Error> {
|
||||
@ -439,6 +451,9 @@ impl Shell {
|
||||
SHELL = Some(this as *mut Shell as usize);
|
||||
}
|
||||
while let Ok(line) = this.rl.readline(&this.prompt) {
|
||||
if line == "exit" {
|
||||
break;
|
||||
}
|
||||
let helper = this.rl.helper().unwrap();
|
||||
let args = match cli::shellword_split(&line) {
|
||||
Ok(args) => args,
|
||||
|
@ -178,7 +178,7 @@ impl ChunkStore {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
bail!("updata atime failed for chunk {:?} - {}", chunk_path, err);
|
||||
bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
|
@ -5,15 +5,15 @@
|
||||
/// use hash value 0 to detect a boundary.
|
||||
const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
||||
|
||||
/// Slinding window chunker (Buzhash)
|
||||
/// Sliding window chunker (Buzhash)
|
||||
///
|
||||
/// This is a rewrite of *casync* chunker (cachunker.h) in rust.
|
||||
///
|
||||
/// Hashing by cyclic polynomial (also called Buzhash) has the benefit
|
||||
/// of avoiding multiplications, using barrel shifts instead. For more
|
||||
/// information please take a look at the [Rolling
|
||||
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) artikel from
|
||||
/// wikipedia.
|
||||
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) article from
|
||||
/// Wikipedia.
|
||||
|
||||
pub struct Chunker {
|
||||
h: u32,
|
||||
|
@ -144,7 +144,7 @@ impl DataStore {
|
||||
self.chunk_store.base_path()
|
||||
}
|
||||
|
||||
/// Clenaup a backup directory
|
||||
/// Cleanup a backup directory
|
||||
///
|
||||
/// Removes all files not mentioned in the manifest.
|
||||
pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest
|
||||
@ -340,9 +340,30 @@ impl DataStore {
|
||||
.map(|s| s.starts_with("."))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
let handle_entry_err = |err: walkdir::Error| {
|
||||
if let Some(inner) = err.io_error() {
|
||||
let path = err.path().unwrap_or(Path::new(""));
|
||||
match inner.kind() {
|
||||
io::ErrorKind::PermissionDenied => {
|
||||
// only allow to skip ext4 fsck directory, avoid GC if, for example,
|
||||
// a user got file permissions wrong on datastore rsync to new server
|
||||
if err.depth() > 1 || !path.ends_with("lost+found") {
|
||||
bail!("cannot continue garbage-collection safely, permission denied on: {}", path.display())
|
||||
}
|
||||
},
|
||||
_ => bail!("unexpected error on datastore traversal: {} - {}", inner, path.display()),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
for entry in walker.filter_entry(|e| !is_hidden(e)) {
|
||||
let path = entry?.into_path();
|
||||
let path = match entry {
|
||||
Ok(entry) => entry.into_path(),
|
||||
Err(err) => {
|
||||
handle_entry_err(err)?;
|
||||
continue
|
||||
},
|
||||
};
|
||||
if let Ok(archive_type) = archive_type(&path) {
|
||||
if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
|
||||
list.push(path);
|
||||
|
@ -216,6 +216,24 @@ impl IndexFile for DynamicIndexReader {
|
||||
digest: self.index[pos].digest.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||
let end_idx = self.index.len() - 1;
|
||||
let end = self.chunk_end(end_idx);
|
||||
let found_idx = self.binary_search(0, 0, end_idx, end, offset);
|
||||
let found_idx = match found_idx {
|
||||
Ok(i) => i,
|
||||
Err(_) => return None
|
||||
};
|
||||
|
||||
let found_start = if found_idx == 0 {
|
||||
0
|
||||
} else {
|
||||
self.chunk_end(found_idx - 1)
|
||||
};
|
||||
|
||||
Some((found_idx, offset - found_start))
|
||||
}
|
||||
}
|
||||
|
||||
struct CachedChunk {
|
||||
|
@ -13,7 +13,6 @@ use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::read_chunk::*;
|
||||
use super::ChunkInfo;
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -146,20 +145,6 @@ impl FixedIndexReader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_length {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
|
||||
let end = ((pos + 1) * self.chunk_size) as u64;
|
||||
if end > self.size {
|
||||
self.size
|
||||
} else {
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
@ -219,6 +204,17 @@ impl IndexFile for FixedIndexReader {
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||
if offset >= self.size {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((
|
||||
(offset / self.chunk_size as u64) as usize,
|
||||
offset & (self.chunk_size - 1) as u64 // fast modulo, valid for 2^x chunk_size
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FixedIndexWriter {
|
||||
@ -465,142 +461,3 @@ impl FixedIndexWriter {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedFixedReader<S> {
|
||||
store: S,
|
||||
index: FixedIndexReader,
|
||||
archive_size: u64,
|
||||
read_buffer: Vec<u8>,
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
pub fn new(index: FixedIndexReader, store: S) -> Self {
|
||||
let archive_size = index.size;
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
archive_size,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
buffered_chunk_idx: 0,
|
||||
buffered_chunk_start: 0,
|
||||
read_offset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn archive_size(&self) -> u64 {
|
||||
self.archive_size
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let index = &self.index;
|
||||
let info = match index.chunk_info(idx) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
|
||||
// fixme: avoid copy
|
||||
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
let size = info.range.end - info.range.start;
|
||||
if size != data.len() as u64 {
|
||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||
}
|
||||
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = info.range.start as u64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> crate::tools::BufferedRead for BufferedFixedReader<S> {
|
||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
||||
if offset == self.archive_size {
|
||||
return Ok(&self.read_buffer[0..0]);
|
||||
}
|
||||
|
||||
let buffer_len = self.read_buffer.len();
|
||||
let index = &self.index;
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index_length)
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
let next_end = index.chunk_end(next_idx);
|
||||
if offset < next_end {
|
||||
self.buffer_chunk(next_idx)?;
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
return Ok(&self.read_buffer[buffer_offset..]);
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer_len == 0)
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let idx = (offset / index.chunk_size as u64) as usize;
|
||||
self.buffer_chunk(idx)?;
|
||||
}
|
||||
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
Ok(&self.read_buffer[buffer_offset..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> std::io::Read for BufferedFixedReader<S> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
use crate::tools::BufferedRead;
|
||||
use std::io::{Error, ErrorKind};
|
||||
|
||||
let data = match self.buffered_read(self.read_offset) {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
||||
};
|
||||
|
||||
let n = if data.len() > buf.len() {
|
||||
buf.len()
|
||||
} else {
|
||||
data.len()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
||||
}
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> Seek for BufferedFixedReader<S> {
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
||||
let new_offset = match pos {
|
||||
SeekFrom::Start(start_offset) => start_offset as i64,
|
||||
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
||||
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
||||
};
|
||||
|
||||
use std::io::{Error, ErrorKind};
|
||||
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
format!(
|
||||
"seek is out of range {} ([0..{}])",
|
||||
new_offset, self.archive_size
|
||||
),
|
||||
));
|
||||
}
|
||||
self.read_offset = new_offset as u64;
|
||||
|
||||
Ok(self.read_offset)
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Range;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ChunkReadInfo {
|
||||
pub range: Range<u64>,
|
||||
pub digest: [u8; 32],
|
||||
@ -22,6 +23,9 @@ pub trait IndexFile {
|
||||
fn index_bytes(&self) -> u64;
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||
|
||||
/// Get the chunk index and the relative offset within it for a byte offset
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)>;
|
||||
|
||||
/// Compute index checksum and size
|
||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||
|
||||
|
@ -104,7 +104,7 @@ impl BackupManifest {
|
||||
&self.files[..]
|
||||
}
|
||||
|
||||
fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
||||
pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
||||
|
||||
let info = self.files.iter().find(|item| item.filename == name);
|
||||
|
||||
@ -160,12 +160,12 @@ impl BackupManifest {
|
||||
keys.sort();
|
||||
let mut iter = keys.into_iter();
|
||||
if let Some(key) = iter.next() {
|
||||
output.extend(key.as_bytes());
|
||||
Self::write_canonical_json(&key.into(), output)?;
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
for key in iter {
|
||||
output.push(b',');
|
||||
output.extend(key.as_bytes());
|
||||
Self::write_canonical_json(&key.into(), output)?;
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
}
|
||||
|
@ -1037,14 +1037,14 @@ async fn create_backup(
|
||||
for (backup_type, filename, target, size) in upload_list {
|
||||
match backup_type {
|
||||
BackupSpecificationType::CONFIG => {
|
||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
println!("Upload config file '{}' to '{}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
println!("Upload log file '{}' to '{}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||
.await?;
|
||||
@ -1059,7 +1059,7 @@ async fn create_backup(
|
||||
}
|
||||
let catalog = catalog.as_ref().unwrap();
|
||||
|
||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
||||
println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
|
||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||
let stats = backup_directory(
|
||||
&client,
|
||||
@ -1137,7 +1137,7 @@ async fn create_backup(
|
||||
.map_err(|err| format_err!("unable to format manifest - {}", err))?;
|
||||
|
||||
|
||||
println!("Upload index.json to '{:?}'", repo);
|
||||
if verbose { println!("Upload index.json to '{}'", repo) };
|
||||
client
|
||||
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
|
||||
.await?;
|
||||
|
@ -1,5 +1,5 @@
|
||||
use std::sync::Arc;
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -53,6 +53,11 @@ async fn run() -> Result<(), Error> {
|
||||
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
|
||||
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
|
||||
|
||||
let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
|
||||
indexpath.push("index.hbs");
|
||||
config.register_template("index", &indexpath)?;
|
||||
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
|
||||
|
||||
let rest_server = RestServer::new(config);
|
||||
|
||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||
|
@ -16,6 +16,7 @@ use proxmox::tools::digest_to_hex;
|
||||
|
||||
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
|
||||
use crate::backup::*;
|
||||
use crate::tools::format::HumanByte;
|
||||
|
||||
use super::{HttpClient, H2Client};
|
||||
|
||||
@ -242,7 +243,7 @@ impl BackupWriter {
|
||||
|
||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||
|
||||
let (chunk_count, size, duration, speed, csum) =
|
||||
let (chunk_count, chunk_reused, size, size_reused, duration, csum) =
|
||||
Self::upload_chunk_info_stream(
|
||||
self.h2.clone(),
|
||||
wid,
|
||||
@ -255,10 +256,30 @@ impl BackupWriter {
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("{}: Uploaded {} bytes as {} chunks in {} seconds ({} MB/s).", archive_name, size, chunk_count, duration.as_secs(), speed);
|
||||
if chunk_count > 0 {
|
||||
println!("{}: Average chunk size was {} bytes.", archive_name, size/chunk_count);
|
||||
println!("{}: Time per request: {} microseconds.", archive_name, (duration.as_micros())/(chunk_count as u128));
|
||||
let uploaded = size - size_reused;
|
||||
let vsize_h: HumanByte = size.into();
|
||||
let archive = if self.verbose {
|
||||
archive_name.to_string()
|
||||
} else {
|
||||
crate::tools::format::strip_server_file_expenstion(archive_name.clone())
|
||||
};
|
||||
if archive_name != CATALOG_NAME {
|
||||
let speed: HumanByte = (uploaded / (duration.as_secs() as usize)).into();
|
||||
let uploaded: HumanByte = uploaded.into();
|
||||
println!("{}: had to upload {} from {} in {}s, avgerage speed {}/s).", archive, uploaded, vsize_h, duration.as_secs(), speed);
|
||||
} else {
|
||||
println!("Uploaded backup catalog ({})", vsize_h);
|
||||
}
|
||||
|
||||
if size_reused > 0 && size > 1024*1024 {
|
||||
let reused_percent = size_reused as f64 * 100. / size as f64;
|
||||
let reused: HumanByte = size_reused.into();
|
||||
println!("{}: backup was done incrementally, reused {} ({:.1}%)", archive, reused, reused_percent);
|
||||
}
|
||||
if self.verbose && chunk_count > 0 {
|
||||
println!("{}: Reused {} from {} chunks.", archive, chunk_reused, chunk_count);
|
||||
println!("{}: Average chunk size was {}.", archive, HumanByte::from(size/chunk_count));
|
||||
println!("{}: Average time per request: {} microseconds.", archive, (duration.as_micros())/(chunk_count as u128));
|
||||
}
|
||||
|
||||
let param = json!({
|
||||
@ -476,13 +497,17 @@ impl BackupWriter {
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
verbose: bool,
|
||||
) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
|
||||
) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>> {
|
||||
|
||||
let repeat = Arc::new(AtomicUsize::new(0));
|
||||
let repeat2 = repeat.clone();
|
||||
let total_chunks = Arc::new(AtomicUsize::new(0));
|
||||
let total_chunks2 = total_chunks.clone();
|
||||
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||
let known_chunk_count2 = known_chunk_count.clone();
|
||||
|
||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
||||
let stream_len2 = stream_len.clone();
|
||||
let reused_len = Arc::new(AtomicUsize::new(0));
|
||||
let reused_len2 = reused_len.clone();
|
||||
|
||||
let append_chunk_path = format!("{}_index", prefix);
|
||||
let upload_chunk_path = format!("{}_chunk", prefix);
|
||||
@ -501,7 +526,7 @@ impl BackupWriter {
|
||||
|
||||
let chunk_len = data.len();
|
||||
|
||||
repeat.fetch_add(1, Ordering::SeqCst);
|
||||
total_chunks.fetch_add(1, Ordering::SeqCst);
|
||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||
|
||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
||||
@ -524,6 +549,8 @@ impl BackupWriter {
|
||||
|
||||
let chunk_is_known = known_chunks.contains(digest);
|
||||
if chunk_is_known {
|
||||
known_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
||||
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
||||
} else {
|
||||
known_chunks.insert(*digest);
|
||||
@ -546,7 +573,7 @@ impl BackupWriter {
|
||||
let digest = chunk_info.digest;
|
||||
let digest_str = digest_to_hex(&digest);
|
||||
|
||||
if verbose {
|
||||
if false && verbose { // TO verbose, needs finer verbosity setting granularity
|
||||
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
||||
chunk_info.chunk_len, offset);
|
||||
}
|
||||
@ -589,14 +616,16 @@ impl BackupWriter {
|
||||
upload_result.await?.and(result)
|
||||
}.boxed())
|
||||
.and_then(move |_| {
|
||||
let repeat = repeat2.load(Ordering::SeqCst);
|
||||
let duration = start_time.elapsed();
|
||||
let total_chunks = total_chunks2.load(Ordering::SeqCst);
|
||||
let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst);
|
||||
let stream_len = stream_len2.load(Ordering::SeqCst);
|
||||
let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
||||
let reused_len = reused_len2.load(Ordering::SeqCst);
|
||||
|
||||
let mut guard = index_csum_2.lock().unwrap();
|
||||
let csum = guard.take().unwrap().finish();
|
||||
|
||||
futures::future::ok((repeat, stream_len, start_time.elapsed(), speed, csum))
|
||||
futures::future::ok((total_chunks, known_chunk_count, stream_len, reused_len, duration, csum))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,7 @@ use percent_encoding::percent_encode;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use proxmox::{
|
||||
api::error::HttpError,
|
||||
sys::linux::tty,
|
||||
tools::{
|
||||
fs::{file_get_json, replace_file, CreateOptions},
|
||||
@ -606,7 +607,7 @@ impl HttpClient {
|
||||
Ok(value)
|
||||
}
|
||||
} else {
|
||||
bail!("HTTP Error {}: {}", status, text);
|
||||
Err(Error::from(HttpError::new(status, text)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -819,7 +820,7 @@ impl H2Client {
|
||||
bail!("got result without data property");
|
||||
}
|
||||
} else {
|
||||
bail!("HTTP Error {}: {}", status, text);
|
||||
Err(Error::from(HttpError::new(status, text)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,8 +6,8 @@ use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use chrono::{Utc, TimeZone};
|
||||
|
||||
use proxmox::api::error::{StatusCode, HttpError};
|
||||
use crate::server::{WorkerTask};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
@ -152,7 +152,28 @@ async fn pull_snapshot(
|
||||
let mut tmp_manifest_name = manifest_name.clone();
|
||||
tmp_manifest_name.set_extension("tmp");
|
||||
|
||||
let mut tmp_manifest_file = download_manifest(&reader, &tmp_manifest_name).await?;
|
||||
let download_res = download_manifest(&reader, &tmp_manifest_name).await;
|
||||
let mut tmp_manifest_file = match download_res {
|
||||
Ok(manifest_file) => manifest_file,
|
||||
Err(err) => {
|
||||
match err.downcast_ref::<HttpError>() {
|
||||
Some(HttpError { code, message }) => {
|
||||
match code {
|
||||
&StatusCode::NOT_FOUND => {
|
||||
worker.log(format!("skipping snapshot {} - vanished since start of sync", snapshot));
|
||||
return Ok(());
|
||||
},
|
||||
_ => {
|
||||
bail!("HTTP error {} - {}", code, message);
|
||||
},
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return Err(err);
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
|
||||
tmp_manifest_blob.verify_crc()?;
|
||||
|
||||
@ -302,7 +323,16 @@ pub async fn pull_group(
|
||||
let mut remote_snapshots = std::collections::HashSet::new();
|
||||
|
||||
for item in list {
|
||||
let backup_time = Utc.timestamp(item.backup_time, 0);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
|
||||
// in-progress backups can't be synced
|
||||
if let None = item.size {
|
||||
worker.log(format!("skipping snapshot {} - in-progress backup", snapshot));
|
||||
continue;
|
||||
}
|
||||
|
||||
let backup_time = snapshot.backup_time();
|
||||
|
||||
remote_snapshots.insert(backup_time);
|
||||
|
||||
if let Some(last_sync_time) = last_sync {
|
||||
@ -319,14 +349,12 @@ pub async fn pull_group(
|
||||
new_client,
|
||||
None,
|
||||
src_repo.store(),
|
||||
&item.backup_type,
|
||||
&item.backup_id,
|
||||
snapshot.group().backup_type(),
|
||||
snapshot.group().backup_id(),
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,8 @@ constnamemap! {
|
||||
PRIV_REMOTE_MODIFY("Remote.Modify") = 1 << 10;
|
||||
PRIV_REMOTE_READ("Remote.Read") = 1 << 11;
|
||||
PRIV_REMOTE_PRUNE("Remote.Prune") = 1 << 12;
|
||||
|
||||
PRIV_SYS_CONSOLE("Sys.Console") = 1 << 13;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,9 @@ impl CachedUserInfo {
|
||||
(user_privs & required_privs) == required_privs
|
||||
};
|
||||
if !allowed {
|
||||
bail!("no permissions");
|
||||
// printing the path doesn't leaks any information as long as we
|
||||
// always check privilege before resource existence
|
||||
bail!("no permissions on '/{}'", path.join("/"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ use proxmox::tools::fd::RawFdNum;
|
||||
use proxmox::tools::vec;
|
||||
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
use crate::pxar::metadata::errno_is_unsupported;
|
||||
use crate::pxar::Flags;
|
||||
use crate::pxar::tools::assert_single_path_component;
|
||||
use crate::tools::{acl, fs, xattr, Fd};
|
||||
@ -289,11 +290,13 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
|
||||
let old_pattern_count = self.patterns.len();
|
||||
|
||||
let path_bytes = self.path.as_os_str().as_bytes();
|
||||
|
||||
if let Some(fd) = fd {
|
||||
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||
|
||||
use io::BufRead;
|
||||
for line in io::BufReader::new(file).lines() {
|
||||
for line in io::BufReader::new(file).split(b'\n') {
|
||||
let line = match line {
|
||||
Ok(line) => line,
|
||||
Err(err) => {
|
||||
@ -308,13 +311,29 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
||||
}
|
||||
};
|
||||
|
||||
let line = line.trim();
|
||||
let line = crate::tools::strip_ascii_whitespace(&line);
|
||||
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
continue;
|
||||
}
|
||||
|
||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Exclude) {
|
||||
let mut buf;
|
||||
let (line, mode) = if line[0] == b'/' {
|
||||
buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
|
||||
buf.extend(path_bytes);
|
||||
buf.extend(line);
|
||||
(&buf[..], MatchType::Exclude)
|
||||
} else if line.starts_with(b"!/") {
|
||||
// inverted case with absolute path
|
||||
buf = Vec::with_capacity(path_bytes.len() + line.len());
|
||||
buf.extend(path_bytes);
|
||||
buf.extend(&line[1..]); // without the '!'
|
||||
(&buf[..], MatchType::Include)
|
||||
} else {
|
||||
(line, MatchType::Exclude)
|
||||
};
|
||||
|
||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
|
||||
Ok(pattern) => self.patterns.push(pattern),
|
||||
Err(err) => {
|
||||
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||
@ -698,13 +717,6 @@ fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64) -> Resu
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
fn errno_is_unsupported(errno: Errno) -> bool {
|
||||
match errno {
|
||||
Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags) -> Result<(), Error> {
|
||||
if flags.contains(Flags::WITH_FCAPS) {
|
||||
return Ok(());
|
||||
@ -769,7 +781,7 @@ fn get_xattr_fcaps_acl(
|
||||
}
|
||||
|
||||
fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
|
||||
let mut attr: usize = 0;
|
||||
let mut attr: libc::c_long = 0;
|
||||
|
||||
match unsafe { fs::read_attr_fd(fd, &mut attr) } {
|
||||
Ok(_) => (),
|
||||
@ -779,7 +791,7 @@ fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
|
||||
Err(err) => bail!("failed to read file attributes: {}", err),
|
||||
}
|
||||
|
||||
metadata.stat.flags |= Flags::from_chattr(attr as u32).bits();
|
||||
metadata.stat.flags |= Flags::from_chattr(attr).bits();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -230,7 +230,8 @@ impl Extractor {
|
||||
dir.metadata(),
|
||||
fd,
|
||||
&CString::new(dir.file_name().as_bytes())?,
|
||||
)?;
|
||||
)
|
||||
.map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -241,7 +242,9 @@ impl Extractor {
|
||||
}
|
||||
|
||||
fn parent_fd(&mut self) -> Result<RawFd, Error> {
|
||||
self.dir_stack.last_dir_fd(self.allow_existing_dirs)
|
||||
self.dir_stack
|
||||
.last_dir_fd(self.allow_existing_dirs)
|
||||
.map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
|
||||
}
|
||||
|
||||
pub fn extract_symlink(
|
||||
@ -320,10 +323,14 @@ impl Extractor {
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)?)
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?)
|
||||
};
|
||||
|
||||
let extracted = io::copy(&mut *contents, &mut file)?;
|
||||
metadata::apply_initial_flags(self.feature_flags, metadata, file.as_raw_fd())?;
|
||||
|
||||
let extracted = io::copy(&mut *contents, &mut file)
|
||||
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
|
||||
if size != extracted {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
@ -345,10 +352,15 @@ impl Extractor {
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)?)
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?)
|
||||
});
|
||||
|
||||
let extracted = tokio::io::copy(&mut *contents, &mut file).await?;
|
||||
metadata::apply_initial_flags(self.feature_flags, metadata, file.as_raw_fd())?;
|
||||
|
||||
let extracted = tokio::io::copy(&mut *contents, &mut file)
|
||||
.await
|
||||
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
|
||||
if size != extracted {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
|
@ -3,6 +3,8 @@
|
||||
//! Flags for known supported features for a given filesystem can be derived
|
||||
//! from the superblocks magic number.
|
||||
|
||||
use libc::c_long;
|
||||
|
||||
use bitflags::bitflags;
|
||||
|
||||
bitflags! {
|
||||
@ -149,22 +151,27 @@ impl Default for Flags {
|
||||
}
|
||||
}
|
||||
|
||||
impl Flags {
|
||||
/// Get a set of feature flags from file attributes.
|
||||
pub fn from_chattr(attr: u32) -> Flags {
|
||||
// form /usr/include/linux/fs.h
|
||||
const FS_APPEND_FL: u32 = 0x0000_0020;
|
||||
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
||||
const FS_COMPR_FL: u32 = 0x0000_0004;
|
||||
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
||||
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
||||
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
||||
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
||||
const FS_SYNC_FL: u32 = 0x0000_0008;
|
||||
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
||||
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
||||
// form /usr/include/linux/fs.h
|
||||
const FS_APPEND_FL: c_long = 0x0000_0020;
|
||||
const FS_NOATIME_FL: c_long = 0x0000_0080;
|
||||
const FS_COMPR_FL: c_long = 0x0000_0004;
|
||||
const FS_NOCOW_FL: c_long = 0x0080_0000;
|
||||
const FS_NODUMP_FL: c_long = 0x0000_0040;
|
||||
const FS_DIRSYNC_FL: c_long = 0x0001_0000;
|
||||
const FS_IMMUTABLE_FL: c_long = 0x0000_0010;
|
||||
const FS_SYNC_FL: c_long = 0x0000_0008;
|
||||
const FS_NOCOMP_FL: c_long = 0x0000_0400;
|
||||
const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
|
||||
|
||||
const CHATTR_MAP: [(Flags, u32); 10] = [
|
||||
pub(crate) const INITIAL_FS_FLAGS: c_long =
|
||||
FS_NOATIME_FL
|
||||
| FS_COMPR_FL
|
||||
| FS_NOCOW_FL
|
||||
| FS_NOCOMP_FL
|
||||
| FS_PROJINHERIT_FL;
|
||||
|
||||
#[rustfmt::skip]
|
||||
const CHATTR_MAP: [(Flags, c_long); 10] = [
|
||||
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||
@ -175,8 +182,23 @@ impl Flags {
|
||||
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||
];
|
||||
];
|
||||
|
||||
// from /usr/include/linux/msdos_fs.h
|
||||
const ATTR_HIDDEN: u32 = 2;
|
||||
const ATTR_SYS: u32 = 4;
|
||||
const ATTR_ARCH: u32 = 32;
|
||||
|
||||
#[rustfmt::skip]
|
||||
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
||||
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||
];
|
||||
|
||||
impl Flags {
|
||||
/// Get a set of feature flags from file attributes.
|
||||
pub fn from_chattr(attr: c_long) -> Flags {
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||
@ -188,19 +210,25 @@ impl Flags {
|
||||
flags
|
||||
}
|
||||
|
||||
/// Get the chattr bit representation of these feature flags.
|
||||
pub fn to_chattr(self) -> c_long {
|
||||
let mut flags: c_long = 0;
|
||||
|
||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||
if self.contains(*fe_flag) {
|
||||
flags |= *fs_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
pub fn to_initial_chattr(self) -> c_long {
|
||||
self.to_chattr() & INITIAL_FS_FLAGS
|
||||
}
|
||||
|
||||
/// Get a set of feature flags from FAT attributes.
|
||||
pub fn from_fat_attr(attr: u32) -> Flags {
|
||||
// from /usr/include/linux/msdos_fs.h
|
||||
const ATTR_HIDDEN: u32 = 2;
|
||||
const ATTR_SYS: u32 = 4;
|
||||
const ATTR_ARCH: u32 = 32;
|
||||
|
||||
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
||||
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||
];
|
||||
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||
@ -212,6 +240,19 @@ impl Flags {
|
||||
flags
|
||||
}
|
||||
|
||||
/// Get the fat attribute bit representation of these feature flags.
|
||||
pub fn to_fat_attr(self) -> u32 {
|
||||
let mut flags = 0u32;
|
||||
|
||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||
if self.contains(*fe_flag) {
|
||||
flags |= *fs_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||
pub fn from_magic(magic: i64) -> Flags {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
|
@ -79,13 +79,19 @@ pub fn apply_at(
|
||||
apply(flags, metadata, fd.as_raw_fd(), file_name)
|
||||
}
|
||||
|
||||
pub fn apply_initial_flags(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
fd: RawFd,
|
||||
) -> Result<(), Error> {
|
||||
let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
|
||||
apply_chattr(fd, entry_flags.to_initial_chattr(), flags.to_initial_chattr())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
|
||||
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
||||
|
||||
if metadata.stat.flags != 0 {
|
||||
todo!("apply flags!");
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// UID and GID first, as this fails if we lose access anyway.
|
||||
c_result!(libc::chown(
|
||||
@ -94,13 +100,15 @@ pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) ->
|
||||
metadata.stat.gid
|
||||
))
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)?;
|
||||
.or_else(allow_notsupp)
|
||||
.map_err(|err| format_err!("failed to set ownership: {}", err))?;
|
||||
}
|
||||
|
||||
let mut skip_xattrs = false;
|
||||
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
apply_acls(flags, &c_proc_path, metadata)?;
|
||||
apply_acls(flags, &c_proc_path, metadata)
|
||||
.map_err(|err| format_err!("failed to apply acls: {}", err))?;
|
||||
apply_quota_project_id(flags, fd, metadata)?;
|
||||
|
||||
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
||||
@ -110,7 +118,12 @@ pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) ->
|
||||
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)?;
|
||||
.or_else(allow_notsupp)
|
||||
.map_err(|err| format_err!("failed to change file mode: {}", err))?;
|
||||
}
|
||||
|
||||
if metadata.stat.flags != 0 {
|
||||
apply_flags(flags, fd, metadata.stat.flags)?;
|
||||
}
|
||||
|
||||
let res = c_result!(unsafe {
|
||||
@ -160,7 +173,8 @@ fn add_fcaps(
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))?;
|
||||
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))
|
||||
.map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -195,7 +209,8 @@ fn apply_xattrs(
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))?;
|
||||
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
|
||||
.map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -317,3 +332,49 @@ fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Resul
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
|
||||
match errno {
|
||||
Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
|
||||
if chattr == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut fattr: libc::c_long = 0;
|
||||
match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
|
||||
Ok(_) => (),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => bail!("failed to read file attributes: {}", err),
|
||||
}
|
||||
|
||||
let attr = (chattr & mask) | (fattr & !mask);
|
||||
match unsafe { fs::write_attr_fd(fd, &attr) } {
|
||||
Ok(_) => Ok(()),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
|
||||
Err(err) => bail!("failed to set file attributes: {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
|
||||
let entry_flags = Flags::from_bits_truncate(entry_flags);
|
||||
|
||||
apply_chattr(fd, entry_flags.to_chattr(), flags.to_chattr())?;
|
||||
|
||||
let fatattr = (flags & entry_flags).to_fat_attr();
|
||||
if fatattr != 0 {
|
||||
match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
|
||||
Ok(_) => (),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
|
||||
Err(err) => bail!("failed to set file attributes: {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,9 +1,13 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{PathBuf};
|
||||
use anyhow::Error;
|
||||
use std::path::PathBuf;
|
||||
use std::time::SystemTime;
|
||||
use std::fs::metadata;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use anyhow::{bail, Error, format_err};
|
||||
use hyper::Method;
|
||||
use handlebars::Handlebars;
|
||||
use serde::Serialize;
|
||||
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
|
||||
|
||||
@ -12,21 +16,20 @@ pub struct ApiConfig {
|
||||
router: &'static Router,
|
||||
aliases: HashMap<String, PathBuf>,
|
||||
env_type: RpcEnvironmentType,
|
||||
pub templates: Handlebars<'static>,
|
||||
templates: RwLock<Handlebars<'static>>,
|
||||
template_files: RwLock<HashMap<String, (SystemTime, PathBuf)>>,
|
||||
}
|
||||
|
||||
impl ApiConfig {
|
||||
|
||||
pub fn new<B: Into<PathBuf>>(basedir: B, router: &'static Router, env_type: RpcEnvironmentType) -> Result<Self, Error> {
|
||||
let mut templates = Handlebars::new();
|
||||
let basedir = basedir.into();
|
||||
templates.register_template_file("index", basedir.join("index.hbs"))?;
|
||||
Ok(Self {
|
||||
basedir,
|
||||
basedir: basedir.into(),
|
||||
router,
|
||||
aliases: HashMap::new(),
|
||||
env_type,
|
||||
templates
|
||||
templates: RwLock::new(Handlebars::new()),
|
||||
template_files: RwLock::new(HashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
@ -67,4 +70,52 @@ impl ApiConfig {
|
||||
pub fn env_type(&self) -> RpcEnvironmentType {
|
||||
self.env_type
|
||||
}
|
||||
|
||||
pub fn register_template<P>(&self, name: &str, path: P) -> Result<(), Error>
|
||||
where
|
||||
P: Into<PathBuf>
|
||||
{
|
||||
if self.template_files.read().unwrap().contains_key(name) {
|
||||
bail!("template already registered");
|
||||
}
|
||||
|
||||
let path: PathBuf = path.into();
|
||||
let metadata = metadata(&path)?;
|
||||
let mtime = metadata.modified()?;
|
||||
|
||||
self.templates.write().unwrap().register_template_file(name, &path)?;
|
||||
self.template_files.write().unwrap().insert(name.to_string(), (mtime, path));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the template was modified since the last rendering
|
||||
/// if yes, it loads a the new version of the template
|
||||
pub fn render_template<T>(&self, name: &str, data: &T) -> Result<String, Error>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
let path;
|
||||
let mtime;
|
||||
{
|
||||
let template_files = self.template_files.read().unwrap();
|
||||
let (old_mtime, old_path) = template_files.get(name).ok_or_else(|| format_err!("template not found"))?;
|
||||
|
||||
mtime = metadata(old_path)?.modified()?;
|
||||
if mtime <= *old_mtime {
|
||||
return self.templates.read().unwrap().render(name, data).map_err(|err| format_err!("{}", err));
|
||||
}
|
||||
path = old_path.to_path_buf();
|
||||
}
|
||||
|
||||
{
|
||||
let mut template_files = self.template_files.write().unwrap();
|
||||
let mut templates = self.templates.write().unwrap();
|
||||
|
||||
templates.register_template_file(name, &path)?;
|
||||
template_files.insert(name.to_string(), (mtime, path));
|
||||
|
||||
templates.render(name, data).map_err(|err| format_err!("{}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ impl <E: RpcEnvironment + Clone> H2Service<E> {
|
||||
|
||||
match self.router.find_method(&components, method, &mut uri_param) {
|
||||
None => {
|
||||
let err = http_err!(NOT_FOUND, "Path not found.".to_string());
|
||||
let err = http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string());
|
||||
future::ok((formatter.format_error)(err)).boxed()
|
||||
}
|
||||
Some(api_method) => {
|
||||
|
@ -16,7 +16,6 @@ use serde_json::{json, Value};
|
||||
use tokio::fs::File;
|
||||
use tokio::time::Instant;
|
||||
use url::form_urlencoded;
|
||||
use handlebars::Handlebars;
|
||||
|
||||
use proxmox::http_err;
|
||||
use proxmox::api::{ApiHandler, ApiMethod, HttpError};
|
||||
@ -312,7 +311,7 @@ pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher +
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
fn get_index(username: Option<String>, token: Option<String>, template: &Handlebars, parts: Parts) -> Response<Body> {
|
||||
fn get_index(username: Option<String>, token: Option<String>, api: &Arc<ApiConfig>, parts: Parts) -> Response<Body> {
|
||||
|
||||
let nodename = proxmox::tools::nodename();
|
||||
let username = username.unwrap_or_else(|| String::from(""));
|
||||
@ -320,11 +319,14 @@ fn get_index(username: Option<String>, token: Option<String>, template: &Handleb
|
||||
let token = token.unwrap_or_else(|| String::from(""));
|
||||
|
||||
let mut debug = false;
|
||||
let mut template_file = "index";
|
||||
|
||||
if let Some(query_str) = parts.uri.query() {
|
||||
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
||||
if k == "debug" && v != "0" && v != "false" {
|
||||
debug = true;
|
||||
} else if k == "console" {
|
||||
template_file = "console";
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -338,12 +340,12 @@ fn get_index(username: Option<String>, token: Option<String>, template: &Handleb
|
||||
|
||||
let mut ct = "text/html";
|
||||
|
||||
let index = match template.render("index", &data) {
|
||||
let index = match api.render_template(template_file, &data) {
|
||||
Ok(index) => index,
|
||||
Err(err) => {
|
||||
ct = "text/plain";
|
||||
format!("Error rendering template: {}", err.desc)
|
||||
},
|
||||
format!("Error rendering template: {}", err)
|
||||
}
|
||||
};
|
||||
|
||||
Response::builder()
|
||||
@ -493,7 +495,7 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
let (parts, body) = req.into_parts();
|
||||
|
||||
let method = parts.method.clone();
|
||||
let (_path, components) = tools::normalize_uri_path(parts.uri.path())?;
|
||||
let (path, components) = tools::normalize_uri_path(parts.uri.path())?;
|
||||
|
||||
let comp_len = components.len();
|
||||
|
||||
@ -542,7 +544,7 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
|
||||
match api.find_method(&components[2..], method, &mut uri_param) {
|
||||
None => {
|
||||
let err = http_err!(NOT_FOUND, "Path not found.".to_string());
|
||||
let err = http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string());
|
||||
return Ok((formatter.format_error)(err));
|
||||
}
|
||||
Some(api_method) => {
|
||||
@ -580,15 +582,15 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
match check_auth(&method, &ticket, &token, &user_info) {
|
||||
Ok(username) => {
|
||||
let new_token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||
return Ok(get_index(Some(username), Some(new_token), &api.templates, parts));
|
||||
return Ok(get_index(Some(username), Some(new_token), &api, parts));
|
||||
}
|
||||
_ => {
|
||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||
return Ok(get_index(None, None, &api.templates, parts));
|
||||
return Ok(get_index(None, None, &api, parts));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Ok(get_index(None, None, &api.templates, parts));
|
||||
return Ok(get_index(None, None, &api, parts));
|
||||
}
|
||||
} else {
|
||||
let filename = api.find_alias(&components);
|
||||
@ -596,5 +598,5 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
}
|
||||
}
|
||||
|
||||
Err(http_err!(NOT_FOUND, "Path not found.".to_string()))
|
||||
Err(http_err!(NOT_FOUND, format!("Path '{}' not found.", path).to_string()))
|
||||
}
|
||||
|
11
src/tools.rs
11
src/tools.rs
@ -647,3 +647,14 @@ pub fn setup_safe_path_env() {
|
||||
std::env::remove_var(name);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn strip_ascii_whitespace(line: &[u8]) -> &[u8] {
|
||||
let line = match line.iter().position(|&b| !b.is_ascii_whitespace()) {
|
||||
Some(n) => &line[n..],
|
||||
None => return &[],
|
||||
};
|
||||
match line.iter().rev().position(|&b| !b.is_ascii_whitespace()) {
|
||||
Some(n) => &line[..(line.len() - n)],
|
||||
None => &[],
|
||||
}
|
||||
}
|
||||
|
@ -46,3 +46,49 @@ pub fn render_bool_with_default_true(value: &Value, _record: &Value) -> Result<S
|
||||
let value = value.as_bool().unwrap_or(true);
|
||||
Ok((if value { "1" } else { "0" }).to_string())
|
||||
}
|
||||
|
||||
pub struct HumanByte {
|
||||
b: usize,
|
||||
}
|
||||
impl std::fmt::Display for HumanByte {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if self.b < 1024 {
|
||||
return write!(f, "{} B", self.b);
|
||||
}
|
||||
let kb: f64 = self.b as f64 / 1024.0;
|
||||
if kb < 1024.0 {
|
||||
return write!(f, "{:.2} KiB", kb);
|
||||
}
|
||||
let mb: f64 = kb / 1024.0;
|
||||
if mb < 1024.0 {
|
||||
return write!(f, "{:.2} MiB", mb);
|
||||
}
|
||||
let gb: f64 = mb / 1024.0;
|
||||
if gb < 1024.0 {
|
||||
return write!(f, "{:.2} GiB", gb);
|
||||
}
|
||||
let tb: f64 = gb / 1024.0;
|
||||
if tb < 1024.0 {
|
||||
return write!(f, "{:.2} TiB", tb);
|
||||
}
|
||||
let pb: f64 = tb / 1024.0;
|
||||
return write!(f, "{:.2} PiB", pb);
|
||||
}
|
||||
}
|
||||
impl From<usize> for HumanByte {
|
||||
fn from(v: usize) -> Self {
|
||||
HumanByte { b: v }
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn correct_byte_convert() {
|
||||
fn convert(b: usize) -> String {
|
||||
HumanByte::from(b).to_string()
|
||||
}
|
||||
assert_eq!(convert(1023), "1023 B");
|
||||
assert_eq!(convert(1<<10), "1.00 KiB");
|
||||
assert_eq!(convert(1<<20), "1.00 MiB");
|
||||
assert_eq!(convert((1<<30) + (103 * 1<<20)), "1.10 GiB");
|
||||
assert_eq!(convert((2<<50) + (500 * 1<<40)), "2.49 PiB");
|
||||
}
|
||||
|
@ -222,11 +222,13 @@ where
|
||||
|
||||
// /usr/include/linux/fs.h: #define FS_IOC_GETFLAGS _IOR('f', 1, long)
|
||||
// read Linux file system attributes (see man chattr)
|
||||
nix::ioctl_read!(read_attr_fd, b'f', 1, usize);
|
||||
nix::ioctl_read!(read_attr_fd, b'f', 1, libc::c_long);
|
||||
nix::ioctl_write_ptr!(write_attr_fd, b'f', 2, libc::c_long);
|
||||
|
||||
// /usr/include/linux/msdos_fs.h: #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32)
|
||||
// read FAT file system attributes
|
||||
nix::ioctl_read!(read_fat_attr_fd, b'r', 0x10, u32);
|
||||
nix::ioctl_write_ptr!(write_fat_attr_fd, b'r', 0x11, u32);
|
||||
|
||||
// From /usr/include/linux/fs.h
|
||||
// #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
|
||||
|
@ -219,7 +219,16 @@ fn parse_calendar_event_incomplete(mut i: &str) -> IResult<&str, CalendarEvent>
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
"monthly" | "weekly" | "yearly" | "quarterly" | "semiannually" => {
|
||||
"weekly" => {
|
||||
return Ok(("", CalendarEvent {
|
||||
hour: vec![DateTimeValue::Single(0)],
|
||||
minute: vec![DateTimeValue::Single(0)],
|
||||
second: vec![DateTimeValue::Single(0)],
|
||||
days: WeekDays::MONDAY,
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
"monthly" | "yearly" | "quarterly" | "semiannually" => {
|
||||
return Err(parse_error(i, "unimplemented date or time specification"));
|
||||
}
|
||||
_ => { /* continue */ }
|
||||
|
@ -88,12 +88,27 @@ impl DateTimeValue {
|
||||
}
|
||||
}
|
||||
|
||||
/// Calendar events may be used to refer to one or more points in time in a
|
||||
/// single expression. They are designed after the systemd.time Calendar Events
|
||||
/// specification, but are not guaranteed to be 100% compatible.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct CalendarEvent {
|
||||
/// the days in a week this event should trigger
|
||||
pub days: WeekDays,
|
||||
/// the second(s) this event should trigger
|
||||
pub second: Vec<DateTimeValue>, // todo: support float values
|
||||
/// the minute(s) this event should trigger
|
||||
pub minute: Vec<DateTimeValue>,
|
||||
/// the hour(s) this event should trigger
|
||||
pub hour: Vec<DateTimeValue>,
|
||||
/* FIXME: TODO
|
||||
/// the day(s) in a month this event should trigger
|
||||
pub day: Vec<DateTimeValue>,
|
||||
/// the month(s) in a year this event should trigger
|
||||
pub month: Vec<DateTimeValue>,
|
||||
/// the years(s) this event should trigger
|
||||
pub year: Vec<DateTimeValue>,
|
||||
*/
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
|
@ -11,6 +11,38 @@ use crate::tools::epoch_now_u64;
|
||||
|
||||
pub const TICKET_LIFETIME: i64 = 3600*2; // 2 hours
|
||||
|
||||
const TERM_PREFIX: &str = "PBSTERM";
|
||||
|
||||
pub fn assemble_term_ticket(
|
||||
keypair: &PKey<Private>,
|
||||
username: &str,
|
||||
path: &str,
|
||||
port: u16,
|
||||
) -> Result<String, Error> {
|
||||
assemble_rsa_ticket(
|
||||
keypair,
|
||||
TERM_PREFIX,
|
||||
None,
|
||||
Some(&format!("{}{}{}", username, path, port)),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verify_term_ticket(
|
||||
keypair: &PKey<Public>,
|
||||
username: &str,
|
||||
path: &str,
|
||||
port: u16,
|
||||
ticket: &str,
|
||||
) -> Result<(i64, Option<String>), Error> {
|
||||
verify_rsa_ticket(
|
||||
keypair,
|
||||
TERM_PREFIX,
|
||||
ticket,
|
||||
Some(&format!("{}{}{}", username, path, port)),
|
||||
-300,
|
||||
TICKET_LIFETIME,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn assemble_rsa_ticket(
|
||||
keypair: &PKey<Private>,
|
||||
|
@ -82,7 +82,7 @@ pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
|
||||
let mut size = 256;
|
||||
let mut buffer = vec::undefined(size);
|
||||
let mut bytes = unsafe {
|
||||
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut i8, buffer.len())
|
||||
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
|
||||
};
|
||||
while bytes < 0 {
|
||||
let err = Errno::last();
|
||||
@ -96,7 +96,7 @@ pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
|
||||
// Retry to read the list with new buffer
|
||||
buffer.resize(size, 0);
|
||||
bytes = unsafe {
|
||||
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut i8, buffer.len())
|
||||
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
|
||||
};
|
||||
}
|
||||
buffer.truncate(bytes as usize);
|
||||
@ -125,7 +125,7 @@ pub fn fgetxattr(fd: RawFd, name: &CStr) -> Result<Vec<u8>, nix::errno::Errno> {
|
||||
}
|
||||
buffer.resize(size, 0);
|
||||
bytes = unsafe {
|
||||
libc::fgetxattr(fd, name.as_ptr() as *const i8, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
|
||||
libc::fgetxattr(fd, name.as_ptr() as *const libc::c_char, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
|
||||
};
|
||||
}
|
||||
buffer.resize(bytes as usize, 0);
|
||||
|
@ -209,7 +209,7 @@ Ext.define('PBS.Dashboard', {
|
||||
autoDestroy: true,
|
||||
proxy: {
|
||||
type: 'proxmox',
|
||||
url: '/api2/json/subscription'
|
||||
url: '/api2/json/nodes/localhost/subscription'
|
||||
},
|
||||
listeners: {
|
||||
load: 'updateSubscription'
|
||||
|
@ -8,6 +8,7 @@ JSSRC= \
|
||||
form/UserSelector.js \
|
||||
form/RemoteSelector.js \
|
||||
form/DataStoreSelector.js \
|
||||
form/CalendarEvent.js \
|
||||
data/RunningTasksStore.js \
|
||||
button/TaskButton.js \
|
||||
config/UserView.js \
|
||||
@ -55,6 +56,10 @@ js/proxmox-backup-gui.js: js OnlineHelpInfo.js ${JSSRC}
|
||||
cat OnlineHelpInfo.js ${JSSRC} >$@.tmp
|
||||
mv $@.tmp $@
|
||||
|
||||
.PHONY: lint
|
||||
lint: ${JSSRC}
|
||||
eslint ${JSSRC}
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
find . -name '*~' -exec rm {} ';'
|
||||
|
@ -86,7 +86,15 @@ Ext.define('PBS.ServerStatus', {
|
||||
iconCls: 'fa fa-power-off'
|
||||
});
|
||||
|
||||
me.tbar = [ restartBtn, shutdownBtn, '->', { xtype: 'proxmoxRRDTypeSelector' } ];
|
||||
var consoleBtn = Ext.create('Proxmox.button.Button', {
|
||||
text: gettext('Console'),
|
||||
iconCls: 'fa fa-terminal',
|
||||
handler: function() {
|
||||
Proxmox.Utils.openXtermJsViewer('shell', 0, Proxmox.NodeName);
|
||||
}
|
||||
});
|
||||
|
||||
me.tbar = [ consoleBtn, restartBtn, shutdownBtn, '->', { xtype: 'proxmoxRRDTypeSelector' } ];
|
||||
|
||||
var rrdstore = Ext.create('Proxmox.data.RRDStore', {
|
||||
rrdurl: "/api2/json/nodes/localhost/rrd",
|
||||
|
@ -37,7 +37,7 @@ Ext.define('PBS.Subscription', {
|
||||
me.rstore.load();
|
||||
};
|
||||
|
||||
var baseurl = '/subscription';
|
||||
var baseurl = '/nodes/localhost/subscription';
|
||||
|
||||
var render_status = function(value) {
|
||||
|
||||
|
@ -13,7 +13,7 @@ Ext.define('PBS.data.RunningTasksStore', {
|
||||
proxy: {
|
||||
type: 'proxmox',
|
||||
// maybe separate api call?
|
||||
url: '/api2/json/nodes/localhost/tasks?running=1',
|
||||
url: '/api2/json/nodes/localhost/tasks?running=1&limit=100',
|
||||
},
|
||||
});
|
||||
me.callParent([config]);
|
||||
|
64
www/form/CalendarEvent.js
Normal file
64
www/form/CalendarEvent.js
Normal file
@ -0,0 +1,64 @@
|
||||
Ext.define('PBS.data.CalendarEventExamples', {
|
||||
extend: 'Ext.data.Store',
|
||||
alias: 'store.calendarEventExamples',
|
||||
|
||||
field: ['value', 'text'],
|
||||
data: [
|
||||
//FIXME { value: '*/30', text: Ext.String.format(gettext("Every {0} minutes"), 30) },
|
||||
{ value: 'hourly', text: gettext("Every hour") },
|
||||
//FIXME { value: '*/2:00', text: gettext("Every two hours") },
|
||||
{ value: '2,22:30', text: gettext("Every day") + " 02:30, 22:30" },
|
||||
{ value: 'daily', text: gettext("Every day") + " 00:00" },
|
||||
{ value: 'mon..fri', text: gettext("Monday to Friday") + " 00:00" },
|
||||
//FIXME{ value: 'mon..fri */1:00', text: gettext("Monday to Friday") + ': ' + gettext("hourly") },
|
||||
{ value: 'sat 18:15', text: gettext("Every Saturday") + " 18:15" },
|
||||
//FIXME{ value: 'monthly', text: gettext("Every 1st of Month") + " 00:00" }, // not yet possible..
|
||||
],
|
||||
});
|
||||
|
||||
Ext.define('PBS.form.CalendarEvent', {
|
||||
extend: 'Ext.form.field.ComboBox',
|
||||
xtype: 'pbsCalendarEvent',
|
||||
|
||||
editable: true,
|
||||
|
||||
valueField: 'value',
|
||||
displayField: 'text',
|
||||
queryMode: 'local',
|
||||
|
||||
config: {
|
||||
deleteEmpty: true,
|
||||
},
|
||||
// overide framework function to implement deleteEmpty behaviour
|
||||
getSubmitData: function() {
|
||||
let me = this, data = null;
|
||||
if (!me.disabled && me.submitValue) {
|
||||
let val = me.getSubmitValue();
|
||||
if (val !== null && val !== '' && val !== '__default__') {
|
||||
data = {};
|
||||
data[me.getName()] = val;
|
||||
} else if (me.getDeleteEmpty()) {
|
||||
data = {};
|
||||
data.delete = me.getName();
|
||||
}
|
||||
}
|
||||
return data;
|
||||
},
|
||||
|
||||
|
||||
store: {
|
||||
type: 'calendarEventExamples',
|
||||
},
|
||||
|
||||
tpl: [
|
||||
'<ul class="x-list-plain"><tpl for=".">',
|
||||
'<li role="option" class="x-boundlist-item">{text}</li>',
|
||||
'</tpl></ul>',
|
||||
],
|
||||
|
||||
displayTpl: [
|
||||
'<tpl for=".">',
|
||||
'{value}',
|
||||
'</tpl>',
|
||||
],
|
||||
});
|
@ -15,14 +15,16 @@ Ext.define('PBS.DataStoreEdit', {
|
||||
let baseurl = '/api2/extjs/config/datastore';
|
||||
|
||||
me.isCreate = !name;
|
||||
if (!me.isCreate) {
|
||||
me.defaultFocus = 'textfield[name=comment]';
|
||||
}
|
||||
me.url = name ? baseurl + '/' + name : baseurl;
|
||||
me.method = name ? 'PUT' : 'POST';
|
||||
me.autoLoad = !!name;
|
||||
return {};
|
||||
},
|
||||
|
||||
items: [
|
||||
{
|
||||
items: {
|
||||
xtype: 'tabpanel',
|
||||
bodyPadding: 10,
|
||||
items: [
|
||||
@ -50,26 +52,26 @@ Ext.define('PBS.DataStoreEdit', {
|
||||
emptyText: gettext('An absolute path'),
|
||||
},
|
||||
],
|
||||
|
||||
column2: [
|
||||
{
|
||||
xtype: 'proxmoxtextfield',
|
||||
xtype: 'pbsCalendarEvent',
|
||||
name: 'gc-schedule',
|
||||
fieldLabel: gettext("GC Schedule"),
|
||||
emptyText: gettext('none'),
|
||||
cbind: {
|
||||
deleteEmpty: '{!isCreate}',
|
||||
},
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxtextfield',
|
||||
xtype: 'pbsCalendarEvent',
|
||||
name: 'prune-schedule',
|
||||
fieldLabel: gettext("Prune Schedule"),
|
||||
emptyText: gettext('none'),
|
||||
cbind: {
|
||||
deleteEmpty: '{!isCreate}',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
columnB: [
|
||||
{
|
||||
xtype: 'textfield',
|
||||
@ -113,7 +115,6 @@ Ext.define('PBS.DataStoreEdit', {
|
||||
allowBlank: true,
|
||||
},
|
||||
],
|
||||
|
||||
column2: [
|
||||
{
|
||||
xtype: 'proxmoxintegerfield',
|
||||
@ -146,9 +147,7 @@ Ext.define('PBS.DataStoreEdit', {
|
||||
allowBlank: true,
|
||||
},
|
||||
],
|
||||
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
|
@ -28,7 +28,7 @@ Ext.define('PBS.window.SyncJobEdit', {
|
||||
xtype: 'inputpanel',
|
||||
column1: [
|
||||
{
|
||||
fieldLabel: gettext('Sync Job'),
|
||||
fieldLabel: gettext('Sync Job ID'),
|
||||
xtype: 'pmxDisplayEditField',
|
||||
name: 'id',
|
||||
renderer: Ext.htmlEncode,
|
||||
@ -39,23 +39,23 @@ Ext.define('PBS.window.SyncJobEdit', {
|
||||
},
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Remote'),
|
||||
fieldLabel: gettext('Source Remote'),
|
||||
xtype: 'pbsRemoteSelector',
|
||||
allowBlank: false,
|
||||
name: 'remote',
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Source Datastore'),
|
||||
xtype: 'proxmoxtextfield',
|
||||
allowBlank: false,
|
||||
name: 'remote-store',
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Local Datastore'),
|
||||
xtype: 'pbsDataStoreSelector',
|
||||
allowBlank: false,
|
||||
name: 'store',
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Remote Datastore'),
|
||||
xtype: 'proxmoxtextfield',
|
||||
allowBlank: false,
|
||||
name: 'remote-store',
|
||||
},
|
||||
],
|
||||
|
||||
column2: [
|
||||
@ -64,12 +64,13 @@ Ext.define('PBS.window.SyncJobEdit', {
|
||||
xtype: 'proxmoxcheckbox',
|
||||
name: 'remove-vanished',
|
||||
uncheckedValue: false,
|
||||
value: true,
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
fieldLabel: gettext('Schedule'),
|
||||
xtype: 'proxmoxtextfield',
|
||||
xtype: 'pbsCalendarEvent',
|
||||
name: 'schedule',
|
||||
emptyText: gettext('none'),
|
||||
cbind: {
|
||||
deleteEmpty: '{!isCreate}',
|
||||
},
|
||||
|
Reference in New Issue
Block a user