Compare commits

..

20 Commits

Author SHA1 Message Date
3e4a67f350 bump version to 0.8.16-1 2020-09-11 15:55:37 +02:00
e0e5b4426a BackupDir: make constructor fallible
since converting from i64 epoch timestamp to DateTime is not always
possible. previously, passing invalid backup-time from client to server
(or vice-versa) panicked the corresponding tokio task. now we get proper
error messages including the invalid timestamp.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:49:35 +02:00
7158b304f5 handle invalid mtime when formating entries
otherwise operations like catalog shell panic when viewing pxar archives
containing such entries, e.g. with mtime very far ahead into the future.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:48:43 +02:00
833eca6d2f use non-panicky timestamp_opt where appropriate
by either printing the original, out-of-range timestamp as-is, or
bailing with a proper error message instead of panicking.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:48:24 +02:00
151acf5d96 don't truncate DateTime nanoseconds
where we don't care about them anyway..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:48:10 +02:00
4a363fb4a7 catalog dump: preserve original mtime
even if it can't be handled by chrono. silently replacing it with epoch
0 is confusing..

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-11 15:43:54 +02:00
229adeb746 ui/docs: add onlineHelp button for syncjobs
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:54 +02:00
1eff9a1e89 docs: add section for calendar events
and move the info defined in 'Schedules' there,
the explanation of calendar events is inspired by the systemd.time
manpage and the pve docs (especially the examples are mostly
copied/adapted from there)

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:42 +02:00
ed4f0a0edc ui: fix calendarevent examples
*/x is valid syntax for us, but not systemd, so to not confuse users
write it like systemd would accept it

also an timespec must at least have hours and minutes

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:32 +02:00
13bed6226e tools/systemd/parse_time: enable */x syntax for calendar events
we support this in pve, so also support it here to have a more
consistent syntax

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-11 12:17:22 +02:00
d937daedb3 docs: set html img width limitation through css
avoid hardcoding width in the docs itself, so that other render
outputs can choose another size.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:10:08 +02:00
8cce51135c docs: do not render TODOs in release builts
they are not useful for endusers...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:09:00 +02:00
0cfe1b3f13 docs: set GmbH as copyright holder
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:08:36 +02:00
05c16a6e59 docs: use alabaster theme
It's not all perfect (yet) but way cleaner and simpler to use than
the sphinx one.

Custom do the scrolling for the fixed side bar and make some other
slight adjustments.

Main issue for now is that the "Developer Appendix" is always shown
in the navigation tree, but we only include that toctree for
devbuilds...

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-11 11:08:13 +02:00
3294b516d3 faq: fix typo
In note block:
    Proxmox Packup Server -> Proxmox Backup Server

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-10 15:18:13 +02:00
139bcedc53 benchmark: update TLS reference speed
We are now faster with recent patches.
2020-09-10 12:55:43 +02:00
cf9ea3c4c7 server: set http2 max frame size
else we get the default of 16k, which is quite low for our use case.
this improves the TLS upload benchmark speed by about 30-40% for me.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2020-09-10 12:43:51 +02:00
e84fde3e14 docs: faq: spell out PBS
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-10 12:35:04 +02:00
1de47507ff Add section "FAQ"
Adds an FAQ to the docs, based on question that have
been appearing on the forum.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2020-09-10 11:33:00 +02:00
1a9948a488 examples/upload-speed.rs: pass new benchmark parameter 2020-09-10 09:34:51 +02:00
30 changed files with 365 additions and 127 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "0.8.15"
version = "0.8.16"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018"
license = "AGPL-3"
@ -26,7 +26,7 @@ futures = "0.3"
h2 = { version = "0.2", features = ["stream"] }
handlebars = "3.0"
http = "0.2"
hyper = "0.13"
hyper = "0.13.6"
lazy_static = "1.4"
libc = "0.2"
log = "0.4"

22
debian/changelog vendored
View File

@ -1,3 +1,25 @@
rust-proxmox-backup (0.8.16-1) unstable; urgency=medium
* BackupDir: make constructor fallible
* handle invalid mtime when formating entries
* ui/docs: add onlineHelp button for syncjobs
* docs: add section for calendar events
* tools/systemd/parse_time: enable */x syntax for calendar events
* docs: set html img width limitation through css
* docs: use alabaster theme
* server: set http2 max frame size
* doc: Add section "FAQ"
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Sep 2020 15:54:57 +0200
rust-proxmox-backup (0.8.15-1) unstable; urgency=medium
* verify: skip benchmark directory

View File

@ -76,6 +76,8 @@ onlinehelpinfo:
.PHONY: html
html: ${GENERATED_SYNOPSIS}
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
cp custom.css $(BUILDDIR)/html/_static/
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@ -150,7 +150,6 @@ Disk Management
~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-disks.png
:width: 230
:align: right
:alt: List of disks
@ -182,7 +181,6 @@ To initialize a disk with a new GPT, use the ``initialize`` subcommand:
# proxmox-backup-manager disk initialize sdX
.. image:: images/screenshots/pbs-gui-disks-dir-create.png
:width: 230
:align: right
:alt: Create a directory
@ -193,19 +191,11 @@ web interface and creating one from there. The following command creates an
automatically create a datastore on the disk (in this case ``sdd``). This will
create a datastore at the location ``/mnt/datastore/store1``:
|
.. code-block:: console
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
create datastore 'store1' on disk sdd
Percentage done: 1
...
Percentage done: 99
TASK OK
.. image:: images/screenshots/pbs-gui-disks-zfs-create.png
:width: 230
:align: right
:alt: Create a directory
@ -214,15 +204,9 @@ You can also create a ``zpool`` with various raid levels from **Administration
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
mounts it on the root directory (default):
|
.. code-block:: console
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
create Mirror zpool 'zpool1' on devices 'sdb,sdc'
# "zpool" "create" "-o" "ashift=12" "zpool1" "mirror" "sdb" "sdc"
TASK OK
.. note::
You can also pass the ``--add-datastore`` parameter here, to automatically
@ -243,7 +227,6 @@ Datastore Configuration
~~~~~~~~~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-datastore.png
:width: 230
:align: right
:alt: Datastore Overview
@ -254,12 +237,11 @@ settings of how many backup snapshots for each interval of ``hourly``,
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
number of backups to keep in that store. :ref:`Pruning <pruning>` and
:ref:`garbage collection <garbage-collection>` can also be configured to run
periodically based on a configured :term:`schedule` per datastore.
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
Creating a Datastore
^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
:width: 230
:align: right
:alt: Create a datastore
@ -382,7 +364,6 @@ User Management
~~~~~~~~~~~~~~~
.. image:: images/screenshots/pbs-gui-user-management.png
:width: 230
:align: right
:alt: User management
@ -411,7 +392,6 @@ users:
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
:width: 230
:align: right
:alt: Add a new user
@ -503,7 +483,6 @@ following roles exist:
**RemoteSyncOperator**
Is allowed to read data from a remote.
:width: 230
:align: right
:alt: Add permissions for user
@ -571,7 +550,6 @@ To get a list of available interfaces, use the following command:
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
.. image:: images/screenshots/pbs-gui-network-create-bond.png
:width: 230
:align: right
:alt: Add a network interface
@ -632,7 +610,6 @@ installation, from which you can `sync` datastores to a local datastore with a
.. image:: images/screenshots/pbs-gui-remote-add.png
.. image:: images/screenshots/pbs-gui-permissions-add.png
:width: 230
:align: right
:alt: Add a remote
@ -667,18 +644,18 @@ Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
# proxmox-backup-manager remote remove pbs2
.. _syncjobs:
Sync Jobs
~~~~~~~~~
.. image:: images/screenshots/pbs-gui-syncjob-add.png
:width: 230
:align: right
:alt: Add a remote
Sync jobs are configured to pull the contents of a datastore on a **Remote** to a
local datastore. You can either start a sync job manually on the GUI or
provide it with a :term:`schedule` to run regularly. You can manage sync jobs
provide it with a schedule (see :ref:`calendar-events`) to run regularly. You can manage sync jobs
under **Configuration -> Sync Jobs** in the web interface, or using the
``proxmox-backup-manager sync-job`` command:

100
docs/calendarevents.rst Normal file
View File

@ -0,0 +1,100 @@
.. _calendar-events:
Calendar Events
===============
Introduction and Format
-----------------------
Certain tasks, for example pruning and garbage collection, need to be
performed on a regular basis. Proxmox Backup Server uses a format inspired
by the systemd Time and Date Specification (see `systemd.time manpage`_)
called `calendar events` for its schedules.
`Calendar events` are expressions to specify one or more points in time.
They are mostly compatible with systemds calendar events.
The general format is as follows:
.. code-block:: console
:caption: Calendar event
[WEEKDAY] [[YEARS-]MONTHS-DAYS] [HOURS:MINUTES[:SECONDS]]
Note that there either has to be at least a weekday, date or time part.
If the weekday or date part is omitted, all (week)days are included.
If the time part is omitted, the time 00:00:00 is implied.
(e.g. '2020-01-01' refers to '2020-01-01 00:00:00')
Weekdays are specified with the abbreviated english version:
`mon, tue, wed, thu, fri, sat, sun`.
Each field can contain multiple values in the following formats:
* comma-separated: e.g., 01,02,03
* as a range: e.g., 01..10
* as a repetition: e.g, 05/10 (means starting at 5 every 10)
* and a combination of the above: e.g., 01,05..10,12/02
* or a `*` for every possible value: e.g., \*:00
There are some special values that have specific meaning:
================================= ==============================
Value Syntax
================================= ==============================
`minutely` `*-*-* *:*:00`
`hourly` `*-*-* *:00:00`
`daily` `*-*-* 00:00:00`
`weekly` `mon *-*-* 00:00:00`
`monthly` `*-*-01 00:00:00`
`yearly` or `annualy` `*-01-01 00:00:00`
`quarterly` `*-01,04,07,10-01 00:00:00`
`semiannually` or `semi-annually` `*-01,07-01 00:00:00`
================================= ==============================
Here is a table with some useful examples:
======================== ============================= ===================================
Example Alternative Explanation
======================== ============================= ===================================
`mon,tue,wed,thu,fri` `mon..fri` Every working day at 00:00
`sat,sun` `sat..sun` Only on weekends at 00:00
`mon,wed,fri` -- Monday, Wednesday, Friday at 00:00
`12:05` -- Every day at 12:05 PM
`*:00/5` `0/1:0/5` Every five minutes
`mon..wed *:30/10` `mon,tue,wed *:30/10` Monday, Tuesday, Wednesday 30, 40 and 50 minutes after every full hour
`mon..fri 8..17,22:0/15` -- Every working day every 15 minutes between 8 AM and 6 PM and between 10 PM and 11 PM
`fri 12..13:5/20` `fri 12,13:5/20` Friday at 12:05, 12:25, 12:45, 13:05, 13:25 and 13:45
`12,14,16,18,20,22:5` `12/2:5` Every day starting at 12:05 until 22:05, every 2 hours
`*:*` `0/1:0/1` Every minute (minimum interval)
`*-05` -- On the 5th day of every Month
`Sat *-1..7 15:00` -- First Saturday each Month at 15:00
`2015-10-21` -- 21st October 2015 at 00:00
======================== ============================= ===================================
Differences to systemd
----------------------
Not all features of systemd calendar events are implemented:
* no unix timestamps (e.g. `@12345`): instead use date and time to specify
a specific point in time
* no timezone: all schedules use the set timezone on the server
* no sub-second resolution
* no reverse day syntax (e.g. 2020-03~01)
* no repetition of ranges (e.g. 1..10/2)
Notes on scheduling
-------------------
In `Proxmox Backup`_ scheduling for most tasks is done in the
`proxmox-backup-proxy`. This daemon checks all job schedules
if they are due every minute. This means that even if
`calendar events` can contain seconds, it will only be checked
once a minute.
Also, all schedules will be checked against the timezone set
in the `Proxmox Backup`_ server.

View File

@ -74,7 +74,7 @@ rst_epilog = epilog_file.read()
# General information about the project.
project = 'Proxmox Backup'
copyright = '2019-2020, Proxmox Support Team'
copyright = '2019-2020, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team'
# The version info for the project you're documenting, acts as replacement for
@ -148,7 +148,7 @@ pygments_style = 'sphinx'
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
todo_include_todos = not tags.has('release')
# -- Options for HTML output ----------------------------------------------
@ -156,13 +156,32 @@ todo_include_todos = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'fixed_sidebar': True,
#'sidebar_includehidden': False,
'sidebar_collapse': False, # FIXME: documented, but does not works?!
'show_relbar_bottom': True, # FIXME: documented, but does not works?!
'show_powered_by': False,
'logo': 'proxmox-logo.svg',
'logo_name': True, # show project name below logo
#'logo_text_align': 'center',
#'description': 'Fast, Secure & Efficient.',
'sidebar_width': '300px',
'page_width': '1280px',
# font styles
'head_font_family': 'Lato, sans-serif',
'caption_font_family': 'Lato, sans-serif',
'caption_font_size': '20px',
'font_family': 'Open Sans, sans-serif',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
@ -179,7 +198,7 @@ html_theme = 'sphinxdoc'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'images/proxmox-logo.svg'
#html_logo = 'images/proxmox-logo.svg' # replaced by html_theme_options.logo
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
@ -232,7 +251,7 @@ html_static_path = ['_static']
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#

15
docs/custom.css Normal file
View File

@ -0,0 +1,15 @@
div.sphinxsidebar {
height: calc(100% - 20px);
overflow: auto;
}
h1.logo-name {
font-size: 24px;
}
div.body img {
width: 250px;
}
pre {
padding: 5px 10px;
}

View File

@ -38,3 +38,6 @@
.. _RFC3399: https://tools.ietf.org/html/rfc3339
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html

71
docs/faq.rst Normal file
View File

@ -0,0 +1,71 @@
FAQ
===
What distribution is Proxmox Backup Server (PBS) based on?
----------------------------------------------------------
Proxmox Backup Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
Which platforms are supported as a backup source (client)?
----------------------------------------------------------
The client tool works on most modern Linux systems, meaning you are not limited
to Debian-based backups.
Will Proxmox Backup Server run on a 32-bit processor?
-----------------------------------------------------
Proxmox Backup Server only supports 64-bit CPUs (AMD or Intel). There are no
future plans to support 32-bit processors.
How long will my Proxmox Backup Server version be supported?
------------------------------------------------------------
+-----------------------+--------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+====================+===============+============+====================+
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
+-----------------------+--------------------+---------------+------------+--------------------+
Can I copy or synchronize my datastore to another location?
-----------------------------------------------------------
Proxmox Backup Server allows you to copy or synchroize datastores to other
locations, through the use of *Remotes* and *Sync Jobs*. *Remote* is the term
given to a separate server, which has a datastore that can be synced to a local store.
A *Sync Job* is the process which is used to pull the contents of a datastore from
a *Remote* to a local datastore.
Can Proxmox Backup Server verify data integrity of a backup archive?
--------------------------------------------------------------------
Proxmox Backup Server uses a built-in SHA-256 checksum algorithm, to ensure
data integrity. Within each backup, a manifest file (index.json) is created,
which contains a list of all the backup files, along with their sizes and
checksums. This manifest file is used to verify the integrity of each backup.
When backing up to remote servers, do I have to trust the remote server?
------------------------------------------------------------------------
Proxmox Backup Server supports client-side encryption, meaning your data is
encrypted before it reaches the server. Thus, in the event that an attacker
gains access to the server, they will not be able to read the data.
.. note:: Encryption is not enabled by default. To set up encryption, see the
`Encryption
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
of the Proxmox Backup Server Administration Guide.
Is the backup incremental/deduplicated?
---------------------------------------
With Proxmox Backup Server, backups are sent incremental and data is
deduplicated on the server.
This minimizes both the storage consumed and the network impact.

View File

@ -51,14 +51,3 @@ Glossary
A remote Proxmox Backup Server installation and credentials for a user on it.
You can pull datastores from a remote to a local datastore in order to
have redundant backups.
Schedule
Certain tasks, for example pruning and garbage collection, need to be
performed on a regular basis. Proxmox Backup Server uses a subset of the
`systemd Time and Date Specification
<https://www.freedesktop.org/software/systemd/man/systemd.time.html#>`_.
The subset currently supports time of day specifications and weekdays, in
addition to the shorthand expressions 'minutely', 'hourly', 'daily'.
There is no support for specifying timezones, the tasks are run in the
timezone configured on the server.

View File

@ -24,6 +24,7 @@ in the section entitled "GNU Free Documentation License".
installation.rst
administration-guide.rst
sysadmin.rst
faq.rst
.. raw:: latex
@ -36,6 +37,7 @@ in the section entitled "GNU Free Documentation License".
command-syntax.rst
file-formats.rst
backup-protocol.rst
calendarevents.rst
glossary.rst
GFDL.rst
@ -49,4 +51,3 @@ in the section entitled "GNU Free Documentation License".
* :ref:`genindex`

View File

@ -18,7 +18,7 @@ async fn upload_speed() -> Result<f64, Error> {
let backup_time = chrono::Utc::now();
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
println!("start upload speed test");
let res = client.upload_speedtest(true).await?;

View File

@ -230,7 +230,7 @@ pub fn list_snapshot_files(
let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
@ -280,7 +280,7 @@ fn delete_snapshot(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store)?;
@ -490,7 +490,7 @@ pub fn verify(
match (backup_type, backup_id, backup_time) {
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
let dir = BackupDir::new(backup_type, backup_id, backup_time);
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
backup_dir = Some(dir);
}
(Some(backup_type), Some(backup_id), None) => {
@ -897,7 +897,7 @@ fn download_file(
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@ -970,7 +970,7 @@ fn download_file_decoded(
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@ -1083,7 +1083,7 @@ fn upload_backup_log(
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
@ -1159,7 +1159,7 @@ fn catalog(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@ -1276,7 +1276,7 @@ fn pxar_file_download(
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@ -1417,7 +1417,7 @@ fn get_notes(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@ -1470,7 +1470,7 @@ fn set_notes(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }

View File

@ -114,7 +114,7 @@ async move {
}
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
@ -159,6 +159,7 @@ async move {
let window_size = 32*1024*1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size);
http.http2_max_frame_size(4*1024*1024);
http.serve_connection(conn, service)
.map_err(Error::from)

View File

@ -83,7 +83,7 @@ fn upgrade_to_backup_reader_protocol(
let env_type = rpcenv.env_type();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let path = datastore.base_path();
//let files = BackupInfo::list_files(&path, &backup_dir)?;
@ -121,6 +121,7 @@ fn upgrade_to_backup_reader_protocol(
let window_size = 32*1024*1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size);
http.http2_max_frame_size(4*1024*1024);
http.serve_connection(conn, service)
.map_err(Error::from)

View File

@ -2,9 +2,10 @@ use crate::tools;
use anyhow::{bail, format_err, Error};
use regex::Regex;
use std::convert::TryFrom;
use std::os::unix::io::RawFd;
use chrono::{DateTime, TimeZone, SecondsFormat, Utc};
use chrono::{DateTime, LocalResult, TimeZone, SecondsFormat, Utc};
use std::path::{PathBuf, Path};
use lazy_static::lazy_static;
@ -106,7 +107,7 @@ impl BackupGroup {
if file_type != nix::dir::Type::Directory { return Ok(()); }
let dt = backup_time.parse::<DateTime<Utc>>()?;
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp());
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp())?;
let files = list_backup_files(l2_fd, backup_time)?;
list.push(BackupInfo { backup_dir, files });
@ -208,19 +209,22 @@ pub struct BackupDir {
impl BackupDir {
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Self
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Result<Self, Error>
where
T: Into<String>,
U: Into<String>,
{
// Note: makes sure that nanoseconds is 0
Self {
group: BackupGroup::new(backup_type.into(), backup_id.into()),
backup_time: Utc.timestamp(timestamp, 0),
}
let group = BackupGroup::new(backup_type.into(), backup_id.into());
BackupDir::new_with_group(group, timestamp)
}
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Self {
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Result<Self, Error> {
let backup_time = match Utc.timestamp_opt(timestamp, 0) {
LocalResult::Single(time) => time,
_ => bail!("can't create BackupDir with invalid backup time {}", timestamp),
};
Ok(Self { group, backup_time })
}
pub fn group(&self) -> &BackupGroup {
@ -257,7 +261,7 @@ impl std::str::FromStr for BackupDir {
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
Ok(BackupDir::from((group, backup_time.timestamp())))
BackupDir::try_from((group, backup_time.timestamp()))
}
}
@ -270,9 +274,11 @@ impl std::fmt::Display for BackupDir {
}
}
impl From<(BackupGroup, i64)> for BackupDir {
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
impl TryFrom<(BackupGroup, i64)> for BackupDir {
type Error = Error;
fn try_from((group, timestamp): (BackupGroup, i64)) -> Result<Self, Error> {
BackupDir::new_with_group(group, timestamp)
}
}
@ -334,7 +340,7 @@ impl BackupInfo {
if file_type != nix::dir::Type::Directory { return Ok(()); }
let dt = backup_time.parse::<DateTime<Utc>>()?;
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp());
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp())?;
let files = list_backup_files(l2_fd, backup_time)?;

View File

@ -5,7 +5,7 @@ use std::io::{Read, Write, Seek, SeekFrom};
use std::os::unix::ffi::OsStrExt;
use anyhow::{bail, format_err, Error};
use chrono::offset::{TimeZone, Local};
use chrono::offset::{TimeZone, Local, LocalResult};
use pathpatterns::{MatchList, MatchType};
use proxmox::tools::io::ReadExt;
@ -533,17 +533,17 @@ impl <R: Read + Seek> CatalogReader<R> {
self.dump_dir(&path, pos)?;
}
CatalogEntryType::File => {
let dt = Local
.timestamp_opt(mtime as i64, 0)
.single() // chrono docs say timestamp_opt can only be None or Single!
.unwrap_or_else(|| Local.timestamp(0, 0));
let mtime_string = match Local.timestamp_opt(mtime as i64, 0) {
LocalResult::Single(time) => time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false),
_ => (mtime as i64).to_string(),
};
println!(
"{} {:?} {} {}",
etype,
path,
size,
dt.to_rfc3339_opts(chrono::SecondsFormat::Secs, false),
mtime_string,
);
}
_ => {

View File

@ -10,7 +10,7 @@
use std::io::Write;
use anyhow::{bail, Error};
use chrono::{Local, TimeZone, DateTime};
use chrono::{Local, DateTime};
use openssl::hash::MessageDigest;
use openssl::pkcs5::pbkdf2_hmac;
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
@ -219,7 +219,7 @@ impl CryptConfig {
created: DateTime<Local>,
) -> Result<Vec<u8>, Error> {
let modified = Local.timestamp(Local::now().timestamp(), 0);
let modified = Local::now();
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();

View File

@ -6,7 +6,7 @@ use super::chunk_store::*;
use super::{IndexFile, ChunkReadInfo};
use crate::tools::{self, epoch_now_u64};
use chrono::{Local, TimeZone};
use chrono::{Local, LocalResult, TimeZone};
use std::fs::File;
use std::io::Write;
use std::os::unix::io::AsRawFd;
@ -150,7 +150,10 @@ impl FixedIndexReader {
println!("ChunkSize: {}", self.chunk_size);
println!(
"CTime: {}",
Local.timestamp(self.ctime as i64, 0).format("%c")
match Local.timestamp_opt(self.ctime as i64, 0) {
LocalResult::Single(ctime) => ctime.format("%c").to_string(),
_ => (self.ctime as i64).to_string(),
}
);
println!("UUID: {:?}", self.uuid);
}

View File

@ -1,7 +1,7 @@
use anyhow::{bail, format_err, Context, Error};
use serde::{Deserialize, Serialize};
use chrono::{Local, TimeZone, DateTime};
use chrono::{Local, DateTime};
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox::try_block;
@ -136,7 +136,7 @@ pub fn encrypt_key_with_passphrase(
enc_data.extend_from_slice(&tag);
enc_data.extend_from_slice(&encrypted_key);
let created = Local.timestamp(Local::now().timestamp(), 0);
let created = Local::now();
Ok(KeyConfig {
kdf: Some(kdf),

View File

@ -8,7 +8,7 @@ use std::sync::{Arc, Mutex};
use std::task::Context;
use anyhow::{bail, format_err, Error};
use chrono::{Local, DateTime, Utc, TimeZone};
use chrono::{Local, LocalResult, DateTime, Utc, TimeZone};
use futures::future::FutureExt;
use futures::stream::{StreamExt, TryStreamExt};
use serde_json::{json, Value};
@ -257,7 +257,11 @@ pub async fn api_datastore_latest_snapshot(
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
let backup_time = Utc.timestamp(list[0].backup_time, 0);
let backup_time = match Utc.timestamp_opt(list[0].backup_time, 0) {
LocalResult::Single(time) => time,
_ => bail!("last snapshot of backup group {:?} has invalid timestmap {}.",
group.group_path(), list[0].backup_time),
};
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
}
@ -373,7 +377,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
@ -444,7 +448,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
@ -986,7 +990,15 @@ async fn create_backup(
}
}
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
let backup_time = match backup_time_opt {
Some(timestamp) => {
match Utc.timestamp_opt(timestamp, 0) {
LocalResult::Single(time) => time,
_ => bail!("Invalid backup-time parameter: {}", timestamp),
}
},
_ => Utc::now(),
};
let client = connect(repo.host(), repo.user())?;
record_repository(&repo);
@ -1035,7 +1047,7 @@ async fn create_backup(
None
};
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp())?;
let mut manifest = BackupManifest::new(snapshot);
let mut catalog = None;
@ -1560,7 +1572,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
@ -1752,8 +1764,9 @@ async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<Str
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
{
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
}
}
}
}

View File

@ -3,7 +3,7 @@ use std::sync::Arc;
use anyhow::{Error};
use serde_json::Value;
use chrono::{TimeZone, Utc};
use chrono::Utc;
use serde::Serialize;
use proxmox::api::{ApiMethod, RpcEnvironment};
@ -82,7 +82,7 @@ struct BenchmarkResult {
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
tls: Speed {
speed: None,
top: 1_000_000.0 * 590.0, // TLS to localhost, AMD Ryzen 7 2700X
top: 1_000_000.0 * 690.0, // TLS to localhost, AMD Ryzen 7 2700X
},
sha256: Speed {
speed: None,
@ -212,7 +212,7 @@ async fn test_upload_speed(
verbose: bool,
) -> Result<(), Error> {
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
let backup_time = Utc::now();
let client = connect(repo.host(), repo.user())?;
record_repository(&repo);

View File

@ -1,7 +1,7 @@
use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
use chrono::{Local, TimeZone};
use chrono::Local;
use serde::{Deserialize, Serialize};
use proxmox::api::api;
@ -112,7 +112,7 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
match kdf {
Kdf::None => {
let created = Local.timestamp(Local::now().timestamp(), 0);
let created = Local::now();
store_key_config(
&path,
@ -180,7 +180,7 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
match kdf {
Kdf::None => {
let modified = Local.timestamp(Local::now().timestamp(), 0);
let modified = Local::now();
store_key_config(
&path,

View File

@ -347,7 +347,7 @@ pub async fn pull_group(
let mut remote_snapshots = std::collections::HashSet::new();
for item in list {
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
// in-progress backups can't be synced
if let None = item.size {

View File

@ -8,7 +8,7 @@ use std::path::Path;
use anyhow::{bail, format_err, Error};
use nix::sys::stat::Mode;
use pxar::{mode, Entry, EntryKind, Metadata};
use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
/// Get the file permissions as `nix::Mode`
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
@ -114,13 +114,19 @@ fn mode_string(entry: &Entry) -> String {
)
}
pub fn format_single_line_entry(entry: &Entry) -> String {
fn format_mtime(mtime: &StatxTimestamp) -> String {
use chrono::offset::TimeZone;
match chrono::Local.timestamp_opt(mtime.secs, mtime.nanos) {
chrono::LocalResult::Single(mtime) => mtime.format("%Y-%m-%d %H:%M:%S").to_string(),
_ => format!("{}.{}", mtime.secs, mtime.nanos),
}
}
pub fn format_single_line_entry(entry: &Entry) -> String {
let mode_string = mode_string(entry);
let meta = entry.metadata();
let mtime = chrono::Local.timestamp(meta.stat.mtime.secs, meta.stat.mtime.nanos);
let (size, link) = match entry.kind() {
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
@ -134,7 +140,7 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
"{} {:<13} {} {:>8} {:?}{}",
mode_string,
format!("{}/{}", meta.stat.uid, meta.stat.gid),
mtime.format("%Y-%m-%d %H:%M:%S"),
format_mtime(&meta.stat.mtime),
size,
entry.path(),
link,
@ -142,12 +148,9 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
}
pub fn format_multi_line_entry(entry: &Entry) -> String {
use chrono::offset::TimeZone;
let mode_string = mode_string(entry);
let meta = entry.metadata();
let mtime = chrono::Local.timestamp(meta.stat.mtime.secs, meta.stat.mtime.nanos);
let (size, link, type_name) = match entry.kind() {
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
@ -196,6 +199,6 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
mode_string,
meta.stat.uid,
meta.stat.gid,
mtime.format("%Y-%m-%d %H:%M:%S"),
format_mtime(&meta.stat.mtime),
)
}

View File

@ -1,5 +1,5 @@
use anyhow::{Error};
use chrono::{TimeZone, Local};
use chrono::Local;
use std::io::Write;
/// Log messages with timestamps into files
@ -56,7 +56,7 @@ impl FileLogger {
stdout.write_all(b"\n").unwrap();
}
let line = format!("{}: {}\n", Local.timestamp(Local::now().timestamp(), 0).to_rfc3339(), msg);
let line = format!("{}: {}\n", Local::now().to_rfc3339(), msg);
self.file.write_all(line.as_bytes()).unwrap();
}
}

View File

@ -1,6 +1,6 @@
use anyhow::{Error};
use serde_json::Value;
use chrono::{Local, TimeZone};
use chrono::{Local, TimeZone, LocalResult};
pub fn strip_server_file_expenstion(name: &str) -> String {
@ -25,8 +25,11 @@ pub fn render_epoch(value: &Value, _record: &Value) -> Result<String, Error> {
if value.is_null() { return Ok(String::new()); }
let text = match value.as_i64() {
Some(epoch) => {
Local.timestamp(epoch, 0).format("%c").to_string()
}
match Local.timestamp_opt(epoch, 0) {
LocalResult::Single(epoch) => epoch.format("%c").to_string(),
_ => epoch.to_string(),
}
},
None => {
value.to_string()
}

View File

@ -161,10 +161,17 @@ fn parse_date_time_comp(max: usize) -> impl Fn(&str) -> IResult<&str, DateTimeVa
}
}
fn parse_date_time_comp_list(max: usize) -> impl Fn(&str) -> IResult<&str, Vec<DateTimeValue>> {
fn parse_date_time_comp_list(start: u32, max: usize) -> impl Fn(&str) -> IResult<&str, Vec<DateTimeValue>> {
move |i: &str| {
if i.starts_with("*") {
return Ok((&i[1..], Vec::new()));
let i = &i[1..];
if i.starts_with("/") {
let (n, repeat) = parse_time_comp(max)(&i[1..])?;
if repeat > 0 {
return Ok((n, vec![DateTimeValue::Repeated(start, repeat)]));
}
}
return Ok((i, Vec::new()));
}
separated_nonempty_list(tag(","), parse_date_time_comp(max))(i)
@ -174,9 +181,9 @@ fn parse_date_time_comp_list(max: usize) -> impl Fn(&str) -> IResult<&str, Vec<D
fn parse_time_spec(i: &str) -> IResult<&str, (Vec<DateTimeValue>, Vec<DateTimeValue>, Vec<DateTimeValue>)> {
let (i, (hour, minute, opt_second)) = tuple((
parse_date_time_comp_list(24),
preceded(tag(":"), parse_date_time_comp_list(60)),
opt(preceded(tag(":"), parse_date_time_comp_list(60))),
parse_date_time_comp_list(0, 24),
preceded(tag(":"), parse_date_time_comp_list(0, 60)),
opt(preceded(tag(":"), parse_date_time_comp_list(0, 60))),
))(i)?;
if let Some(second) = opt_second {
@ -190,14 +197,14 @@ fn parse_date_spec(i: &str) -> IResult<&str, (Vec<DateTimeValue>, Vec<DateTimeVa
// TODO: implement ~ for days (man systemd.time)
if let Ok((i, (year, month, day))) = tuple((
parse_date_time_comp_list(2200), // the upper limit for systemd, stay compatible
preceded(tag("-"), parse_date_time_comp_list(13)),
preceded(tag("-"), parse_date_time_comp_list(32)),
parse_date_time_comp_list(0, 2200), // the upper limit for systemd, stay compatible
preceded(tag("-"), parse_date_time_comp_list(1, 13)),
preceded(tag("-"), parse_date_time_comp_list(1, 32)),
))(i) {
Ok((i, (year, month, day)))
} else if let Ok((i, (month, day))) = tuple((
parse_date_time_comp_list(13),
preceded(tag("-"), parse_date_time_comp_list(32)),
parse_date_time_comp_list(1, 13),
preceded(tag("-"), parse_date_time_comp_list(1, 32)),
))(i) {
Ok((i, (Vec::new(), month, day)))
} else {

View File

@ -4,9 +4,9 @@ Ext.define('PBS.data.CalendarEventExamples', {
field: ['value', 'text'],
data: [
{ value: '*/30', text: Ext.String.format(gettext("Every {0} minutes"), 30) },
{ value: '*:0/30', text: Ext.String.format(gettext("Every {0} minutes"), 30) },
{ value: 'hourly', text: gettext("Every hour") },
{ value: '*/2:00', text: gettext("Every two hours") },
{ value: '0/2:00', text: gettext("Every two hours") },
{ value: '2,22:30', text: gettext("Every day") + " 02:30, 22:30" },
{ value: 'daily', text: gettext("Every day") + " 00:00" },
{ value: 'mon..fri', text: gettext("Monday to Friday") + " 00:00" },

View File

@ -5,6 +5,8 @@ Ext.define('PBS.window.SyncJobEdit', {
userid: undefined,
onlineHelp: 'syncjobs',
isAdd: true,
subject: gettext('SyncJob'),