Spiegel von
https://github.com/dani-garcia/vaultwarden.git
synchronisiert 2024-11-22 05:10:29 +01:00
Merge remote-tracking branch 'origin/master' into fmt
Dieser Commit ist enthalten in:
Commit
994669fb69
25 geänderte Dateien mit 326 neuen und 147 gelöschten Zeilen
|
@ -56,6 +56,23 @@
|
||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||||
# WEBSOCKET_PORT=3012
|
# WEBSOCKET_PORT=3012
|
||||||
|
|
||||||
|
## Job scheduler settings
|
||||||
|
##
|
||||||
|
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||||
|
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||||
|
##
|
||||||
|
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||||
|
## Set to 0 to globally disable scheduled jobs.
|
||||||
|
# JOB_POLL_INTERVAL_MS=30000
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for Sends past their deletion date.
|
||||||
|
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||||
|
# SEND_PURGE_SCHEDULE="0 5 * * * *"
|
||||||
|
##
|
||||||
|
## Cron schedule of the job that checks for trashed items to delete permanently.
|
||||||
|
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||||
|
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||||
|
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
# EXTENDED_LOGGING=true
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
|
2
.gitattributes
gevendort
2
.gitattributes
gevendort
|
@ -1,3 +1,3 @@
|
||||||
# Ignore vendored scripts in GitHub stats
|
# Ignore vendored scripts in GitHub stats
|
||||||
src/static/* linguist-vendored
|
src/static/scripts/* linguist-vendored
|
||||||
|
|
||||||
|
|
33
Cargo.lock
generiert
33
Cargo.lock
generiert
|
@ -161,6 +161,7 @@ dependencies = [
|
||||||
"handlebars",
|
"handlebars",
|
||||||
"html5ever",
|
"html5ever",
|
||||||
"idna 0.2.2",
|
"idna 0.2.2",
|
||||||
|
"job_scheduler",
|
||||||
"jsonwebtoken",
|
"jsonwebtoken",
|
||||||
"lettre",
|
"lettre",
|
||||||
"libsqlite3-sys",
|
"libsqlite3-sys",
|
||||||
|
@ -401,6 +402,17 @@ dependencies = [
|
||||||
"cfg-if 1.0.0",
|
"cfg-if 1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cron"
|
||||||
|
version = "0.9.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e009ed0b762cf7a967a34dfdc67d5967d3f828f12901d37081432c3dd1668f8f"
|
||||||
|
dependencies = [
|
||||||
|
"chrono",
|
||||||
|
"nom 4.1.1",
|
||||||
|
"once_cell",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crypto-mac"
|
name = "crypto-mac"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
|
@ -1097,6 +1109,16 @@ version = "0.4.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "job_scheduler"
|
||||||
|
version = "1.2.1"
|
||||||
|
source = "git+https://github.com/jjlin/job_scheduler?rev=ee023418dbba2bfe1e30a5fd7d937f9e33739806#ee023418dbba2bfe1e30a5fd7d937f9e33739806"
|
||||||
|
dependencies = [
|
||||||
|
"chrono",
|
||||||
|
"cron",
|
||||||
|
"uuid",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "js-sys"
|
name = "js-sys"
|
||||||
version = "0.3.49"
|
version = "0.3.49"
|
||||||
|
@ -1160,7 +1182,7 @@ dependencies = [
|
||||||
"idna 0.2.2",
|
"idna 0.2.2",
|
||||||
"mime 0.3.16",
|
"mime 0.3.16",
|
||||||
"native-tls",
|
"native-tls",
|
||||||
"nom",
|
"nom 6.1.2",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"quoted_printable",
|
"quoted_printable",
|
||||||
"rand 0.8.3",
|
"rand 0.8.3",
|
||||||
|
@ -1475,6 +1497,15 @@ version = "0.1.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
|
checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "nom"
|
||||||
|
version = "4.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9c349f68f25f596b9f44cf0e7c69752a5c633b0550c3ff849518bfba0233774a"
|
||||||
|
dependencies = [
|
||||||
|
"memchr",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "nom"
|
name = "nom"
|
||||||
version = "6.1.2"
|
version = "6.1.2"
|
||||||
|
|
10
Cargo.toml
10
Cargo.toml
|
@ -73,6 +73,9 @@ chrono = { version = "0.4.19", features = ["serde"] }
|
||||||
chrono-tz = "0.5.3"
|
chrono-tz = "0.5.3"
|
||||||
time = "0.2.26"
|
time = "0.2.26"
|
||||||
|
|
||||||
|
# Job scheduler
|
||||||
|
job_scheduler = "1.2.1"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
oath = "0.10.2"
|
oath = "0.10.2"
|
||||||
|
|
||||||
|
@ -136,3 +139,10 @@ rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e3
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||||
|
|
||||||
|
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||||
|
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||||
|
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||||
|
# In particular, `cron` has since implemented parsing of some common syntax
|
||||||
|
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||||
|
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||||
|
|
|
@ -44,8 +44,8 @@
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_version = "2.19.0" %}
|
{% set vault_version = "2.19.0b" %}
|
||||||
{% set vault_image_digest = "sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4" %}
|
{% set vault_image_digest = "sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e" %}
|
||||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# Using the digest instead of the tag name provides better security,
|
# Using the digest instead of the tag name provides better security,
|
||||||
# as the digest of an image is immutable, whereas a tag name can later
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
|
@ -215,9 +215,6 @@ RUN apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
{% if "sqlite" in features %}
|
|
||||||
sqlite \
|
|
||||||
{% endif %}
|
|
||||||
{% if "mysql" in features %}
|
{% if "mysql" in features %}
|
||||||
mariadb-connector-c \
|
mariadb-connector-c \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -232,7 +229,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
|
@ -14,15 +14,15 @@
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull bitwardenrs/web-vault:v2.19.0b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [bitwardenrs/web-vault:v2.19.0b]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.50 as build
|
||||||
|
@ -86,7 +86,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
|
@ -14,15 +14,15 @@
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull bitwardenrs/web-vault:v2.19.0b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [bitwardenrs/web-vault:v2.19.0b]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM clux/muslrust:nightly-2021-02-22 as build
|
FROM clux/muslrust:nightly-2021-02-22 as build
|
||||||
|
@ -82,7 +82,6 @@ RUN apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite \
|
|
||||||
postgresql-libs \
|
postgresql-libs \
|
||||||
ca-certificates
|
ca-certificates
|
||||||
|
|
||||||
|
|
|
@ -14,15 +14,15 @@
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull bitwardenrs/web-vault:v2.19.0b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [bitwardenrs/web-vault:v2.19.0b]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.50 as build
|
||||||
|
@ -129,7 +129,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
|
@ -14,15 +14,15 @@
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull bitwardenrs/web-vault:v2.19.0b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [bitwardenrs/web-vault:v2.19.0b]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.50 as build
|
||||||
|
@ -129,7 +129,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
|
@ -14,15 +14,15 @@
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull bitwardenrs/web-vault:v2.19.0b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [bitwardenrs/web-vault:v2.19.0b]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM rust:1.50 as build
|
FROM rust:1.50 as build
|
||||||
|
@ -129,7 +129,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite3 \
|
|
||||||
libmariadb-dev-compat \
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
|
@ -14,15 +14,15 @@
|
||||||
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull bitwardenrs/web-vault:v2.19.0
|
# $ docker pull bitwardenrs/web-vault:v2.19.0b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b
|
||||||
# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4]
|
# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e
|
||||||
# [bitwardenrs/web-vault:v2.19.0]
|
# [bitwardenrs/web-vault:v2.19.0b]
|
||||||
#
|
#
|
||||||
FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault
|
FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||||
|
@ -86,7 +86,6 @@ RUN apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
dumb-init \
|
dumb-init \
|
||||||
sqlite \
|
|
||||||
ca-certificates
|
ca-certificates
|
||||||
|
|
||||||
RUN mkdir /data
|
RUN mkdir /data
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::{env, process::Command, time::Duration};
|
use std::{env, time::Duration};
|
||||||
|
|
||||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||||
use rocket::{
|
use rocket::{
|
||||||
|
@ -64,10 +64,8 @@ static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||||
.unwrap_or("Unknown")
|
.unwrap_or("Unknown")
|
||||||
});
|
});
|
||||||
|
|
||||||
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
static CAN_BACKUP: Lazy<bool> =
|
||||||
DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false)
|
Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false));
|
||||||
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
|
||||||
});
|
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
fn admin_disabled() -> &'static str {
|
fn admin_disabled() -> &'static str {
|
||||||
|
@ -503,9 +501,16 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||||
use std::net::ToSocketAddrs;
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
// Get current running versions
|
// Get current running versions
|
||||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
let web_vault_version: WebVaultVersion =
|
||||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
|
||||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||||
|
Ok(s) => serde_json::from_str(&s)?,
|
||||||
|
_ => WebVaultVersion {
|
||||||
|
version: String::from("Version file missing"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
// Execute some environment checks
|
// Execute some environment checks
|
||||||
let running_within_docker = is_running_in_docker();
|
let running_within_docker = is_running_in_docker();
|
||||||
|
@ -561,9 +566,10 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
"web_vault_version": web_vault_version.version,
|
|
||||||
"latest_release": latest_release,
|
"latest_release": latest_release,
|
||||||
"latest_commit": latest_commit,
|
"latest_commit": latest_commit,
|
||||||
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
|
"web_vault_version": web_vault_version.version,
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
"running_within_docker": running_within_docker,
|
"running_within_docker": running_within_docker,
|
||||||
"has_http_access": has_http_access,
|
"has_http_access": has_http_access,
|
||||||
|
@ -575,6 +581,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||||
"db_type": *DB_TYPE,
|
"db_type": *DB_TYPE,
|
||||||
"db_version": get_sql_server_version(&conn),
|
"db_version": get_sql_server_version(&conn),
|
||||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||||
|
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -600,11 +607,11 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/config/backup_db")]
|
#[post("/config/backup_db")]
|
||||||
fn backup_db(_token: AdminToken) -> EmptyResult {
|
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
if *CAN_BACKUP {
|
if *CAN_BACKUP {
|
||||||
backup_database()
|
backup_database(&conn)
|
||||||
} else {
|
} else {
|
||||||
err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)");
|
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ use crate::{
|
||||||
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn, DbPool},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,6 +77,15 @@ pub fn routes() -> Vec<Route> {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn purge_trashed_ciphers(pool: DbPool) {
|
||||||
|
debug!("Purging trashed ciphers");
|
||||||
|
if let Ok(conn) = pool.get() {
|
||||||
|
Cipher::purge_trash(&conn);
|
||||||
|
} else {
|
||||||
|
error!("Failed to get DB connection while purging trashed ciphers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(FromForm, Default)]
|
#[derive(FromForm, Default)]
|
||||||
struct SyncData {
|
struct SyncData {
|
||||||
#[form(field = "excludeDomains")]
|
#[form(field = "excludeDomains")]
|
||||||
|
@ -118,6 +127,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
|
||||||
"Ciphers": ciphers_json,
|
"Ciphers": ciphers_json,
|
||||||
"Domains": domains_json,
|
"Domains": domains_json,
|
||||||
"Sends": sends_json,
|
"Sends": sends_json,
|
||||||
|
"unofficialServer": true,
|
||||||
"Object": "sync"
|
"Object": "sync"
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,8 @@ mod organizations;
|
||||||
mod sends;
|
mod sends;
|
||||||
pub mod two_factor;
|
pub mod two_factor;
|
||||||
|
|
||||||
pub use sends::start_send_deletion_scheduler;
|
pub use ciphers::purge_trashed_ciphers;
|
||||||
|
pub use sends::purge_sends;
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut mod_routes =
|
let mut mod_routes =
|
||||||
|
|
|
@ -9,7 +9,7 @@ use serde_json::Value;
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||||
auth::{Headers, Host},
|
auth::{Headers, Host},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn, DbPool},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -19,22 +19,14 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||||
routes![post_send, post_send_file, post_access, post_access_file, put_send, delete_send, put_remove_password]
|
routes![post_send, post_send_file, post_access, post_access_file, put_send, delete_send, put_remove_password]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start_send_deletion_scheduler(pool: crate::db::DbPool) {
|
pub fn purge_sends(pool: DbPool) {
|
||||||
std::thread::spawn(move || {
|
debug!("Purging sends");
|
||||||
loop {
|
|
||||||
if let Ok(conn) = pool.get() {
|
if let Ok(conn) = pool.get() {
|
||||||
info!("Initiating send deletion");
|
Send::purge(&conn);
|
||||||
for send in Send::find_all(&conn) {
|
} else {
|
||||||
if chrono::Utc::now().naive_utc() >= send.deletion_date {
|
error!("Failed to get DB connection while purging sends")
|
||||||
send.delete(&conn).ok();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
std::thread::sleep(std::time::Duration::from_secs(3600));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
|
|
|
@ -42,6 +42,7 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||||
|
|
||||||
// Build Regex only once since this takes a lot of time.
|
// Build Regex only once since this takes a lot of time.
|
||||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||||
|
static ICON_REL_BLACKLIST: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap());
|
||||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||||
|
|
||||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||||
|
@ -60,7 +61,9 @@ fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
||||||
}
|
}
|
||||||
|
|
||||||
match get_icon(&domain) {
|
match get_icon(&domain) {
|
||||||
Some(i) => Cached::ttl(Content(ContentType::new("image", "x-icon"), i), CONFIG.icon_cache_ttl()),
|
Some((icon, icon_type)) => {
|
||||||
|
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl())
|
||||||
|
}
|
||||||
_ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()),
|
_ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -254,7 +257,7 @@ fn is_domain_blacklisted(domain: &str) -> bool {
|
||||||
is_blacklisted
|
is_blacklisted
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||||
|
|
||||||
// Check for expiration of negatively cached copy
|
// Check for expiration of negatively cached copy
|
||||||
|
@ -263,7 +266,11 @@ fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(icon) = get_cached_icon(&path) {
|
if let Some(icon) = get_cached_icon(&path) {
|
||||||
return Some(icon);
|
let icon_type = match get_icon_type(&icon) {
|
||||||
|
Some(x) => x,
|
||||||
|
_ => "x-icon",
|
||||||
|
};
|
||||||
|
return Some((icon, icon_type.to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.disable_icon_download() {
|
if CONFIG.disable_icon_download() {
|
||||||
|
@ -272,9 +279,9 @@ fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||||
|
|
||||||
// Get the icon, or None in case of error
|
// Get the icon, or None in case of error
|
||||||
match download_icon(&domain) {
|
match download_icon(&domain) {
|
||||||
Ok(icon) => {
|
Ok((icon, icon_type)) => {
|
||||||
save_icon(&path, &icon);
|
save_icon(&path, &icon);
|
||||||
Some(icon)
|
Some((icon, icon_type.unwrap_or("x-icon").to_string()))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error downloading icon: {:?}", e);
|
error!("Error downloading icon: {:?}", e);
|
||||||
|
@ -335,7 +342,6 @@ fn icon_is_expired(path: &str) -> bool {
|
||||||
expired.unwrap_or(true)
|
expired.unwrap_or(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct Icon {
|
struct Icon {
|
||||||
priority: u8,
|
priority: u8,
|
||||||
href: String,
|
href: String,
|
||||||
|
@ -367,7 +373,8 @@ fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Ve
|
||||||
let attr_name = attr.name.local.as_ref();
|
let attr_name = attr.name.local.as_ref();
|
||||||
let attr_value = attr.value.as_ref();
|
let attr_value = attr.value.as_ref();
|
||||||
|
|
||||||
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) {
|
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value)
|
||||||
|
{
|
||||||
has_rel = true;
|
has_rel = true;
|
||||||
} else if attr_name == "href" {
|
} else if attr_name == "href" {
|
||||||
href = Some(attr_value);
|
href = Some(attr_value);
|
||||||
|
@ -616,7 +623,7 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||||
(width, height)
|
(width, height)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||||
if is_domain_blacklisted(domain) {
|
if is_domain_blacklisted(domain) {
|
||||||
err!("Domain is blacklisted", domain)
|
err!("Domain is blacklisted", domain)
|
||||||
}
|
}
|
||||||
|
@ -624,6 +631,7 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||||
let icon_result = get_icon_url(&domain)?;
|
let icon_result = get_icon_url(&domain)?;
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
|
let mut icon_type: Option<&str> = None;
|
||||||
|
|
||||||
use data_url::DataUrl;
|
use data_url::DataUrl;
|
||||||
|
|
||||||
|
@ -635,17 +643,31 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||||
Ok((body, _fragment)) => {
|
Ok((body, _fragment)) => {
|
||||||
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
||||||
if body.len() >= 67 {
|
if body.len() >= 67 {
|
||||||
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
|
icon_type = get_icon_type(&body);
|
||||||
|
if icon_type.is_none() {
|
||||||
|
debug!("Icon from {} data:image uri, is not a valid image type", domain);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
info!("Extracted icon from data:image uri for {}", domain);
|
||||||
buffer = body;
|
buffer = body;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => warn!("data uri is invalid"),
|
_ => warn!("Extracted icon from data:image uri is invalid"),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
|
match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
|
||||||
Ok(mut res) => {
|
Ok(mut res) => {
|
||||||
info!("Downloaded icon from {}", icon.href);
|
|
||||||
res.copy_to(&mut buffer)?;
|
res.copy_to(&mut buffer)?;
|
||||||
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
|
icon_type = get_icon_type(&buffer);
|
||||||
|
if icon_type.is_none() {
|
||||||
|
buffer.clear();
|
||||||
|
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
info!("Downloaded icon from {}", icon.href);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
_ => warn!("Download failed for {}", icon.href),
|
_ => warn!("Download failed for {}", icon.href),
|
||||||
|
@ -654,10 +676,10 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if buffer.is_empty() {
|
if buffer.is_empty() {
|
||||||
err!("Empty response")
|
err!("Empty response downloading icon")
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(buffer)
|
Ok((buffer, icon_type))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_icon(path: &str, icon: &[u8]) {
|
fn save_icon(path: &str, icon: &[u8]) {
|
||||||
|
@ -669,7 +691,18 @@ fn save_icon(path: &str, icon: &[u8]) {
|
||||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
info!("Icon save error: {:?}", e);
|
warn!("Icon save error: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||||
|
match bytes {
|
||||||
|
[137, 80, 78, 71, ..] => Some("png"),
|
||||||
|
[0, 0, 1, 0, ..] => Some("x-icon"),
|
||||||
|
[82, 73, 70, 70, ..] => Some("webp"),
|
||||||
|
[255, 216, 255, ..] => Some("jpeg"),
|
||||||
|
[66, 77, ..] => Some("bmp"),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -72,7 +72,8 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||||
"Kdf": user.client_kdf_type,
|
"Kdf": user.client_kdf_type,
|
||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||||
"scope": "api offline_access"
|
"scope": "api offline_access",
|
||||||
|
"unofficialServer": true,
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,7 +164,8 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||||
"Kdf": user.client_kdf_type,
|
"Kdf": user.client_kdf_type,
|
||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"ResetMasterPassword": false,// TODO: Same as above
|
"ResetMasterPassword": false,// TODO: Same as above
|
||||||
"scope": "api offline_access"
|
"scope": "api offline_access",
|
||||||
|
"unofficialServer": true,
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Some(token) = twofactor_token {
|
if let Some(token) = twofactor_token {
|
||||||
|
|
|
@ -10,8 +10,9 @@ use serde_json::Value;
|
||||||
|
|
||||||
pub use crate::api::{
|
pub use crate::api::{
|
||||||
admin::routes as admin_routes,
|
admin::routes as admin_routes,
|
||||||
|
core::purge_sends,
|
||||||
|
core::purge_trashed_ciphers,
|
||||||
core::routes as core_routes,
|
core::routes as core_routes,
|
||||||
core::start_send_deletion_scheduler,
|
|
||||||
icons::routes as icons_routes,
|
icons::routes as icons_routes,
|
||||||
identity::routes as identity_routes,
|
identity::routes as identity_routes,
|
||||||
notifications::routes as notifications_routes,
|
notifications::routes as notifications_routes,
|
||||||
|
|
|
@ -316,6 +316,17 @@ make_config! {
|
||||||
/// Websocket port
|
/// Websocket port
|
||||||
websocket_port: u16, false, def, 3012;
|
websocket_port: u16, false, def, 3012;
|
||||||
},
|
},
|
||||||
|
jobs {
|
||||||
|
/// Job scheduler poll interval |> How often the job scheduler thread checks for jobs to run.
|
||||||
|
/// Set to 0 to globally disable scheduled jobs.
|
||||||
|
job_poll_interval_ms: u64, false, def, 30_000;
|
||||||
|
/// Send purge schedule |> Cron schedule of the job that checks for Sends past their deletion date.
|
||||||
|
/// Defaults to hourly. Set blank to disable this job.
|
||||||
|
send_purge_schedule: String, false, def, "0 5 * * * *".to_string();
|
||||||
|
/// Trash purge schedule |> Cron schedule of the job that checks for trashed items to delete permanently.
|
||||||
|
/// Defaults to daily. Set blank to disable this job.
|
||||||
|
trash_purge_schedule: String, false, def, "0 5 0 * * *".to_string();
|
||||||
|
},
|
||||||
|
|
||||||
/// General settings
|
/// General settings
|
||||||
settings {
|
settings {
|
||||||
|
@ -339,6 +350,11 @@ make_config! {
|
||||||
/// Per-organization attachment limit (KB) |> Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
/// Per-organization attachment limit (KB) |> Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||||
org_attachment_limit: i64, true, option;
|
org_attachment_limit: i64, true, option;
|
||||||
|
|
||||||
|
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
|
||||||
|
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
|
||||||
|
/// sure to inform all users of any changes to this setting.
|
||||||
|
trash_auto_delete_days: i64, true, option;
|
||||||
|
|
||||||
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
|
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
|
||||||
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||||
/// otherwise it will delete them and they won't be downloaded again.
|
/// otherwise it will delete them and they won't be downloaded again.
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
use std::process::Command;
|
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
|
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
|
||||||
use rocket::{
|
use rocket::{
|
||||||
|
@ -142,6 +140,7 @@ macro_rules! db_run {
|
||||||
// Different code for each db
|
// Different code for each db
|
||||||
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
||||||
#[allow(unused)] use diesel::prelude::*;
|
#[allow(unused)] use diesel::prelude::*;
|
||||||
|
#[allow(unused_variables)]
|
||||||
match $conn {
|
match $conn {
|
||||||
$($(
|
$($(
|
||||||
#[cfg($db)]
|
#[cfg($db)]
|
||||||
|
@ -218,50 +217,35 @@ macro_rules! db_object {
|
||||||
// Reexport the models, needs to be after the macros are defined so it can access them
|
// Reexport the models, needs to be after the macros are defined so it can access them
|
||||||
pub mod models;
|
pub mod models;
|
||||||
|
|
||||||
/// Creates a back-up of the database using sqlite3
|
/// Creates a back-up of the sqlite database
|
||||||
pub fn backup_database() -> Result<(), Error> {
|
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||||
|
pub fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||||
|
db_run! {@raw conn:
|
||||||
|
postgresql, mysql {
|
||||||
|
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||||
|
}
|
||||||
|
sqlite {
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
let db_url = CONFIG.database_url();
|
let db_url = CONFIG.database_url();
|
||||||
let db_path = Path::new(&db_url).parent().unwrap();
|
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
||||||
|
let file_date = Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
||||||
let now: DateTime<Utc> = Utc::now();
|
diesel::sql_query(format!("VACUUM INTO '{}/db_{}.sqlite3'", db_path, file_date)).execute(conn)?;
|
||||||
let file_date = now.format("%Y%m%d").to_string();
|
}
|
||||||
let backup_command: String = format!("{}{}{}", ".backup 'db_", file_date, ".sqlite3'");
|
}
|
||||||
|
|
||||||
Command::new("sqlite3")
|
|
||||||
.current_dir(db_path)
|
|
||||||
.args(&["db.sqlite3", &backup_command])
|
|
||||||
.output()
|
|
||||||
.expect("Can't open database, sqlite3 is not available, make sure it's installed and available on the PATH");
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the SQL Server version
|
/// Get the SQL Server version
|
||||||
pub fn get_sql_server_version(conn: &DbConn) -> String {
|
pub fn get_sql_server_version(conn: &DbConn) -> String {
|
||||||
use diesel::sql_types::Text;
|
|
||||||
#[derive(QueryableByName)]
|
|
||||||
struct SqlVersion {
|
|
||||||
#[sql_type = "Text"]
|
|
||||||
version: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
db_run! {@raw conn:
|
db_run! {@raw conn:
|
||||||
postgresql, mysql {
|
postgresql, mysql {
|
||||||
match diesel::sql_query("SELECT version() AS version;").get_result::<SqlVersion>(conn).ok() {
|
no_arg_sql_function!(version, diesel::sql_types::Text);
|
||||||
Some(v) => {
|
diesel::select(version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||||
v.version
|
|
||||||
},
|
|
||||||
_ => "Unknown".to_string()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
sqlite {
|
sqlite {
|
||||||
match diesel::sql_query("SELECT sqlite_version() AS version;").get_result::<SqlVersion>(conn).ok() {
|
no_arg_sql_function!(sqlite_version, diesel::sql_types::Text);
|
||||||
Some(v) => {
|
diesel::select(sqlite_version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||||
v.version
|
|
||||||
},
|
|
||||||
_ => "Unknown".to_string()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{Duration, NaiveDateTime, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
Attachment, CollectionCipher, Favorite, FolderCipher, Organization, User, UserOrgStatus, UserOrgType,
|
Attachment, CollectionCipher, Favorite, FolderCipher, Organization, User, UserOrgStatus, UserOrgType,
|
||||||
UserOrganization,
|
UserOrganization,
|
||||||
|
@ -262,6 +264,17 @@ impl Cipher {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Purge all ciphers that are old enough to be auto-deleted.
|
||||||
|
pub fn purge_trash(conn: &DbConn) {
|
||||||
|
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||||
|
let now = Utc::now().naive_utc();
|
||||||
|
let dt = now - Duration::days(auto_delete_days);
|
||||||
|
for cipher in Self::find_deleted_before(&dt, conn) {
|
||||||
|
cipher.delete(&conn).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(user_uuid, conn);
|
User::update_uuid_revision(user_uuid, conn);
|
||||||
|
|
||||||
|
@ -502,6 +515,15 @@ impl Cipher {
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Find all ciphers that were deleted before the specified datetime.
|
||||||
|
pub fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||||
|
db_run! {conn: {
|
||||||
|
ciphers::table
|
||||||
|
.filter(ciphers::deleted_at.lt(dt))
|
||||||
|
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
ciphers_collections::table
|
ciphers_collections::table
|
||||||
|
|
|
@ -205,6 +205,13 @@ impl Send {
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Purge all sends that are past their deletion date.
|
||||||
|
pub fn purge(conn: &DbConn) {
|
||||||
|
for send in Self::find_by_past_deletion_date(&conn) {
|
||||||
|
send.delete(&conn).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||||
match &self.user_uuid {
|
match &self.user_uuid {
|
||||||
Some(user_uuid) => {
|
Some(user_uuid) => {
|
||||||
|
@ -223,12 +230,6 @@ impl Send {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_all(conn: &DbConn) -> Vec<Self> {
|
|
||||||
db_run! {conn: {
|
|
||||||
sends::table.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option<Self> {
|
||||||
use data_encoding::BASE64URL_NOPAD;
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
@ -271,4 +272,13 @@ impl Send {
|
||||||
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn find_by_past_deletion_date(conn: &DbConn) -> Vec<Self> {
|
||||||
|
let now = Utc::now().naive_utc();
|
||||||
|
db_run! {conn: {
|
||||||
|
sends::table
|
||||||
|
.filter(sends::deletion_date.lt(now))
|
||||||
|
.load::<SendDb>(conn).expect("Error loading sends").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
53
src/main.rs
53
src/main.rs
|
@ -16,6 +16,7 @@ extern crate diesel;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate diesel_migrations;
|
extern crate diesel_migrations;
|
||||||
|
|
||||||
|
use job_scheduler::{Job, JobScheduler};
|
||||||
use std::{
|
use std::{
|
||||||
fs::create_dir_all,
|
fs::create_dir_all,
|
||||||
panic,
|
panic,
|
||||||
|
@ -23,6 +24,7 @@ use std::{
|
||||||
process::{exit, Command},
|
process::{exit, Command},
|
||||||
str::FromStr,
|
str::FromStr,
|
||||||
thread,
|
thread,
|
||||||
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
|
@ -56,7 +58,9 @@ fn main() {
|
||||||
|
|
||||||
create_icon_cache_folder();
|
create_icon_cache_folder();
|
||||||
|
|
||||||
launch_rocket(extra_debug);
|
let pool = create_db_pool();
|
||||||
|
schedule_jobs(pool.clone());
|
||||||
|
launch_rocket(pool, extra_debug); // Blocks until program termination.
|
||||||
}
|
}
|
||||||
|
|
||||||
const HELP: &str = "\
|
const HELP: &str = "\
|
||||||
|
@ -306,17 +310,17 @@ fn check_web_vault() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn launch_rocket(extra_debug: bool) {
|
fn create_db_pool() -> db::DbPool {
|
||||||
let pool = match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
||||||
Ok(p) => p,
|
Ok(p) => p,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error creating database pool: {:?}", e);
|
error!("Error creating database pool: {:?}", e);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
}
|
||||||
api::start_send_deletion_scheduler(pool.clone());
|
|
||||||
|
|
||||||
|
fn launch_rocket(pool: db::DbPool, extra_debug: bool) {
|
||||||
let basepath = &CONFIG.domain_path();
|
let basepath = &CONFIG.domain_path();
|
||||||
|
|
||||||
// If adding more paths here, consider also adding them to
|
// If adding more paths here, consider also adding them to
|
||||||
|
@ -339,3 +343,40 @@ fn launch_rocket(extra_debug: bool) {
|
||||||
// The launch will restore the original logging level
|
// The launch will restore the original logging level
|
||||||
error!("Launch error {:#?}", result);
|
error!("Launch error {:#?}", result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn schedule_jobs(pool: db::DbPool) {
|
||||||
|
if CONFIG.job_poll_interval_ms() == 0 {
|
||||||
|
info!("Job scheduler disabled.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
thread::Builder::new()
|
||||||
|
.name("job-scheduler".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
let mut sched = JobScheduler::new();
|
||||||
|
|
||||||
|
// Purge sends that are past their deletion date.
|
||||||
|
if !CONFIG.send_purge_schedule().is_empty() {
|
||||||
|
sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || {
|
||||||
|
api::purge_sends(pool.clone());
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge trashed items that are old enough to be auto-deleted.
|
||||||
|
if !CONFIG.trash_purge_schedule().is_empty() {
|
||||||
|
sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || {
|
||||||
|
api::purge_trashed_ciphers(pool.clone());
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Periodically check for jobs to run. We probably won't need any
|
||||||
|
// jobs that run more often than once a minute, so a default poll
|
||||||
|
// interval of 30 seconds should be sufficient. Users who want to
|
||||||
|
// schedule jobs to run more frequently for some reason can reduce
|
||||||
|
// the poll interval accordingly.
|
||||||
|
loop {
|
||||||
|
sched.tick();
|
||||||
|
thread::sleep(Duration::from_millis(CONFIG.job_poll_interval_ms()));
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.expect("Error spawning job scheduler thread");
|
||||||
|
}
|
||||||
|
|
|
@ -772,7 +772,8 @@
|
||||||
"stackoverflow.com",
|
"stackoverflow.com",
|
||||||
"serverfault.com",
|
"serverfault.com",
|
||||||
"mathoverflow.net",
|
"mathoverflow.net",
|
||||||
"askubuntu.com"
|
"askubuntu.com",
|
||||||
|
"stackapps.com"
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span id="server-latest">{{diagnostics.latest_release}}<span id="server-latest-commit" class="d-none">-{{diagnostics.latest_commit}}</span></span>
|
<span id="server-latest">{{diagnostics.latest_release}}<span id="server-latest-commit" class="d-none">-{{diagnostics.latest_commit}}</span></span>
|
||||||
</dd>
|
</dd>
|
||||||
|
{{#if diagnostics.web_vault_enabled}}
|
||||||
<dt class="col-sm-5">Web Installed
|
<dt class="col-sm-5">Web Installed
|
||||||
<span class="badge badge-success d-none" id="web-success" title="Latest version is installed.">Ok</span>
|
<span class="badge badge-success d-none" id="web-success" title="Latest version is installed.">Ok</span>
|
||||||
<span class="badge badge-warning d-none" id="web-warning" title="There seems to be an update available.">Update</span>
|
<span class="badge badge-warning d-none" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||||
|
@ -35,6 +36,13 @@
|
||||||
<span id="web-latest">{{diagnostics.latest_web_build}}</span>
|
<span id="web-latest">{{diagnostics.latest_web_build}}</span>
|
||||||
</dd>
|
</dd>
|
||||||
{{/unless}}
|
{{/unless}}
|
||||||
|
{{/if}}
|
||||||
|
{{#unless diagnostics.web_vault_enabled}}
|
||||||
|
<dt class="col-sm-5">Web Installed</dt>
|
||||||
|
<dd class="col-sm-7">
|
||||||
|
<span id="web-installed">Web Vault is disabled</span>
|
||||||
|
</dd>
|
||||||
|
{{/unless}}
|
||||||
<dt class="col-sm-5">Database</dt>
|
<dt class="col-sm-5">Database</dt>
|
||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span><b>{{diagnostics.db_type}}:</b> {{diagnostics.db_version}}</span>
|
<span><b>{{diagnostics.db_type}}:</b> {{diagnostics.db_version}}</span>
|
||||||
|
@ -118,7 +126,10 @@
|
||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span id="dns-resolved">{{diagnostics.dns_resolved}}</span>
|
<span id="dns-resolved">{{diagnostics.dns_resolved}}</span>
|
||||||
</dd>
|
</dd>
|
||||||
|
<dt class="col-sm-5">Date & Time (Local)</dt>
|
||||||
|
<dd class="col-sm-7">
|
||||||
|
<span><b>Server:</b> {{diagnostics.server_time_local}}</span>
|
||||||
|
</dd>
|
||||||
<dt class="col-sm-5">Date & Time (UTC)
|
<dt class="col-sm-5">Date & Time (UTC)
|
||||||
<span class="badge badge-success d-none" id="time-success" title="Time offsets seem to be correct.">Ok</span>
|
<span class="badge badge-success d-none" id="time-success" title="Time offsets seem to be correct.">Ok</span>
|
||||||
<span class="badge badge-danger d-none" id="time-warning" title="Time offsets are too mouch at drift.">Error</span>
|
<span class="badge badge-danger d-none" id="time-warning" title="Time offsets are too mouch at drift.">Error</span>
|
||||||
|
|
Laden …
In neuem Issue referenzieren