Spiegel von
https://github.com/dani-garcia/vaultwarden.git
synchronisiert 2024-11-13 03:42:54 +01:00
Commits vergleichen
171 Commits
Autor | SHA1 | Datum | |
---|---|---|---|
|
adb21d5c1a | ||
|
e927b8aa5e | ||
|
ba48ca68fc | ||
|
294b429436 | ||
|
37c14c3c69 | ||
|
d0581da638 | ||
|
38aad4f7be | ||
|
20d9e885bf | ||
|
2f20ad86f9 | ||
|
33bae5fbe9 | ||
|
f60502a17e | ||
|
13f4b66e62 | ||
|
c967d0ddc1 | ||
|
ae6ed0ece8 | ||
|
b7c254eb30 | ||
|
a47b484172 | ||
|
65629a99f0 | ||
|
49c5dec9b6 | ||
|
cd195ff243 | ||
|
e3541763fd | ||
|
f0efec7c96 | ||
|
040e2a7bb0 | ||
|
d184c8f08c | ||
|
7d6dec6413 | ||
|
de01111082 | ||
|
0bd8f607cb | ||
|
21efc0800d | ||
|
1031c2e286 | ||
|
1bf85201e7 | ||
|
6ceed9284d | ||
|
25d99e3506 | ||
|
dca14285fd | ||
|
66baa5e7d8 | ||
|
248e561b3f | ||
|
55623ad9c6 | ||
|
e9acd8bd3c | ||
|
544b7229e8 | ||
|
978f009293 | ||
|
92f1530e96 | ||
|
2b824e8096 | ||
|
059661be48 | ||
|
0f3f97cc76 | ||
|
aa0fe7785a | ||
|
65d11a9720 | ||
|
c722006385 | ||
|
aaab7f9640 | ||
|
cbdb5657f1 | ||
|
669b9db758 | ||
|
3466a8040e | ||
|
7d47155d83 | ||
|
9e26014b4d | ||
|
339612c917 | ||
|
9eebbf3b9f | ||
|
b557c11724 | ||
|
a1204cc935 | ||
|
1ea511cbfc | ||
|
2e6a6fa39f | ||
|
e7d5c17ff7 | ||
|
a7be8fab9b | ||
|
39d4d31080 | ||
|
c28246cf34 | ||
|
d7df0ad79e | ||
|
7c8ba0c232 | ||
|
d335187172 | ||
|
f858523d92 | ||
|
529c39c6c5 | ||
|
b428481ac0 | ||
|
b4b2701905 | ||
|
de66e56b6c | ||
|
ecfebaf3c7 | ||
|
0e53f58288 | ||
|
bc7ceb2ee3 | ||
|
b27e6e30c9 | ||
|
505b30eec2 | ||
|
54bfcb8bc3 | ||
|
035f694d2f | ||
|
a4ab014ade | ||
|
6fedfceaa9 | ||
|
8e8483481f | ||
|
d04b94b77d | ||
|
247d0706ff | ||
|
0e8b410798 | ||
|
fda77afc2a | ||
|
d9835f530c | ||
|
bd91964170 | ||
|
d42b264a93 | ||
|
a4c7fadbf4 | ||
|
8e2a87fd79 | ||
|
4233dbf3db | ||
|
a2bf8def2a | ||
|
8f05a90b96 | ||
|
9082e7cebb | ||
|
55fdee3bf8 | ||
|
377969ea67 | ||
|
f05398a6b3 | ||
|
9555ac7bb8 | ||
|
f01ef40a8e | ||
|
8e7b27cc36 | ||
|
d230ee087c | ||
|
f8f14727b9 | ||
|
753a9e0bae | ||
|
f5fb69b64f | ||
|
3261534438 | ||
|
46762d9fde | ||
|
6cadb2627a | ||
|
0fe93edea6 | ||
|
e9aa5a545e | ||
|
9dcc738f85 | ||
|
84a7c7da5d | ||
|
ca9234ed86 | ||
|
27dc67fadd | ||
|
2ad33ec97f | ||
|
e1a8df96db | ||
|
e42a37c6c1 | ||
|
129b835ac7 | ||
|
2d98aa3045 | ||
|
93636eb3c3 | ||
|
1e42755187 | ||
|
ce8efcc48f | ||
|
79ce5b49bc | ||
|
7c3cad197c | ||
|
000c606029 | ||
|
29144b2ce0 | ||
|
ea04b6f151 | ||
|
3427217686 | ||
|
a1fbd6d729 | ||
|
2cbfe6fa5b | ||
|
d86c4f2c23 | ||
|
6d73f30b4f | ||
|
d0c22b9fc9 | ||
|
d6b97090fa | ||
|
94b077cb2d | ||
|
bb2412d033 | ||
|
b9bdc9b8e2 | ||
|
897bdf8343 | ||
|
569add453d | ||
|
77cd5b5954 | ||
|
4438da39f9 | ||
|
0b2383ab56 | ||
|
ad1d65bdf8 | ||
|
3b283c289e | ||
|
4b9384cb2b | ||
|
0f39d96518 | ||
|
edf7484a70 | ||
|
8b66e34415 | ||
|
1d00e34bbb | ||
|
1b801406d6 | ||
|
5e46a43306 | ||
|
5c77431c2d | ||
|
2775c6ce8a | ||
|
890e668071 | ||
|
596c167312 | ||
|
ae3a153bdb | ||
|
2c36993792 | ||
|
d672ad3f76 | ||
|
a641b48884 | ||
|
98b2178c7d | ||
|
76a3f0f531 | ||
|
c5665e7b77 | ||
|
cbdcf8ef9f | ||
|
3337594d60 | ||
|
2daa8be1f1 | ||
|
eccb3ab947 | ||
|
3246251f29 | ||
|
8ab200224e | ||
|
34e00e1478 | ||
|
0fdda3bc2f | ||
|
48836501bf | ||
|
f863ffb89a | ||
|
03c6ed2e07 | ||
|
efc6eb0073 |
133 geänderte Dateien mit 16057 neuen und 14622 gelöschten Zeilen
|
@ -1,40 +1,15 @@
|
|||
# Local build artifacts
|
||||
target
|
||||
// Ignore everything
|
||||
*
|
||||
|
||||
# Data folder
|
||||
data
|
||||
|
||||
# Misc
|
||||
.env
|
||||
.env.template
|
||||
.gitattributes
|
||||
.gitignore
|
||||
rustfmt.toml
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
.idea
|
||||
.editorconfig
|
||||
*.iml
|
||||
|
||||
# Documentation
|
||||
.github
|
||||
*.md
|
||||
*.txt
|
||||
*.yml
|
||||
*.yaml
|
||||
|
||||
# Docker
|
||||
hooks
|
||||
tools
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
docker/**
|
||||
// Allow what is needed
|
||||
!.git
|
||||
!docker/healthcheck.sh
|
||||
!docker/start.sh
|
||||
!migrations
|
||||
!src
|
||||
|
||||
# Web vault
|
||||
web-vault
|
||||
|
||||
# Vaultwarden Resources
|
||||
resources
|
||||
!build.rs
|
||||
!Cargo.lock
|
||||
!Cargo.toml
|
||||
!rustfmt.toml
|
||||
!rust-toolchain.toml
|
||||
|
|
588
.env.template
588
.env.template
|
@ -10,39 +10,13 @@
|
|||
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||
## Vaultwarden.
|
||||
|
||||
####################
|
||||
### Data folders ###
|
||||
####################
|
||||
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
|
||||
## Database URL
|
||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||
# DATABASE_URL=data/db.sqlite3
|
||||
## When using MySQL, specify an appropriate connection URI.
|
||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||
## or keyword/value connection string.
|
||||
## Details:
|
||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||
|
||||
## Database max connections
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Database timeout
|
||||
## Timeout when acquiring database connection
|
||||
# DATABASE_TIMEOUT=30
|
||||
|
||||
## Database connection initialization
|
||||
## Allows SQL statements to be run whenever a new database connection is created.
|
||||
## This is mainly useful for connection-scoped pragmas.
|
||||
## If empty, a database-specific default is used:
|
||||
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
||||
## - MySQL: ""
|
||||
## - PostgreSQL: ""
|
||||
# DATABASE_CONN_INIT=""
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
|
@ -52,65 +26,90 @@
|
|||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
# TEMPLATES_FOLDER=/path/to/templates
|
||||
# TEMPLATES_FOLDER=data/templates
|
||||
## Automatically reload the templates for every request, slow, use only for development
|
||||
# RELOAD_TEMPLATES=false
|
||||
|
||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||
# IP_HEADER=X-Real-IP
|
||||
|
||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||
# ICON_CACHE_TTL=2592000
|
||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||
# ICON_CACHE_NEGTTL=259200
|
||||
|
||||
## Web vault settings
|
||||
# WEB_VAULT_FOLDER=web-vault/
|
||||
# WEB_VAULT_ENABLED=true
|
||||
|
||||
## Enables websocket notifications
|
||||
# WEBSOCKET_ENABLED=false
|
||||
#########################
|
||||
### Database settings ###
|
||||
#########################
|
||||
|
||||
## Controls the WebSocket server address and port
|
||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
||||
# WEBSOCKET_PORT=3012
|
||||
## Database URL
|
||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||
# DATABASE_URL=data/db.sqlite3
|
||||
## When using MySQL, specify an appropriate connection URI.
|
||||
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||
## or keyword/value connection string.
|
||||
## Details:
|
||||
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
|
||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
|
||||
## Database connection retries
|
||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||
# DB_CONNECTION_RETRIES=15
|
||||
|
||||
## Database timeout
|
||||
## Timeout when acquiring database connection
|
||||
# DATABASE_TIMEOUT=30
|
||||
|
||||
## Database max connections
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Database connection initialization
|
||||
## Allows SQL statements to be run whenever a new database connection is created.
|
||||
## This is mainly useful for connection-scoped pragmas.
|
||||
## If empty, a database-specific default is used:
|
||||
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
||||
## - MySQL: ""
|
||||
## - PostgreSQL: ""
|
||||
# DATABASE_CONN_INIT=""
|
||||
|
||||
#################
|
||||
### WebSocket ###
|
||||
#################
|
||||
|
||||
## Enable websocket notifications
|
||||
# ENABLE_WEBSOCKET=true
|
||||
|
||||
##########################
|
||||
### Push notifications ###
|
||||
##########################
|
||||
|
||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||
# PUSH_ENABLED=true
|
||||
## Details about mobile client push notification:
|
||||
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||
# PUSH_ENABLED=false
|
||||
# PUSH_INSTALLATION_ID=CHANGEME
|
||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||
## Don't change this unless you know what you're doing.
|
||||
|
||||
# WARNING: Do not modify the following settings unless you fully understand their implications!
|
||||
# Default Push Relay and Identity URIs
|
||||
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||
# European Union Data Region Settings
|
||||
# If you have selected "European Union" as your data region, use the following URIs instead.
|
||||
# PUSH_RELAY_URI=https://api.bitwarden.eu
|
||||
# PUSH_IDENTITY_URI=https://identity.bitwarden.eu
|
||||
|
||||
## Controls whether users are allowed to create Bitwarden Sends.
|
||||
## This setting applies globally to all users.
|
||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||
# SENDS_ALLOWED=true
|
||||
|
||||
## Controls whether users can enable emergency access to their accounts.
|
||||
## This setting applies globally to all users.
|
||||
# EMERGENCY_ACCESS_ALLOWED=true
|
||||
|
||||
## Controls whether event logging is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||
# ORG_EVENTS_ENABLED=false
|
||||
|
||||
## Controls whether users can change their email.
|
||||
## This setting applies globally to all users
|
||||
# EMAIL_CHANGE_ALLOWED=true
|
||||
|
||||
## Number of days to retain events stored in the database.
|
||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||
# EVENTS_DAYS_RETAIN=
|
||||
|
||||
## BETA FEATURE: Groups
|
||||
## Controls whether group support is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default because this is a beta feature, it contains known issues!
|
||||
## KNOW WHAT YOU ARE DOING!
|
||||
# ORG_GROUPS_ENABLED=false
|
||||
#####################
|
||||
### Schedule jobs ###
|
||||
#####################
|
||||
|
||||
## Job scheduler settings
|
||||
##
|
||||
|
@ -151,60 +150,73 @@
|
|||
## Cron schedule of the job that cleans old events from the event table.
|
||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
|
||||
## Timestamp format used in extended logging.
|
||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||
|
||||
## Logging to file
|
||||
# LOG_FILE=/path/to/log
|
||||
|
||||
## Logging to Syslog
|
||||
## This requires extended logging
|
||||
# USE_SYSLOG=false
|
||||
|
||||
## Log level
|
||||
## Change the verbosity of the log output
|
||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||
## routes and static file, websocket and alive requests
|
||||
# LOG_LEVEL=Info
|
||||
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
|
||||
## Database connection retries
|
||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||
# DB_CONNECTION_RETRIES=15
|
||||
|
||||
## Icon service
|
||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||
## Number of days to retain events stored in the database.
|
||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||
# EVENTS_DAYS_RETAIN=
|
||||
##
|
||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||
## redirect to the corresponding icon at the external service. An external service may
|
||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||
## for certain sites have been cached.
|
||||
# ICON_SERVICE=internal
|
||||
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||
## Defaults to every minute. Set blank to disable this job.
|
||||
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||
##
|
||||
## Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
|
||||
## Defaults to every minute. Set blank to disable this job.
|
||||
# DUO_CONTEXT_PURGE_SCHEDULE="30 * * * * *"
|
||||
|
||||
## Icon redirect code
|
||||
## The HTTP status code to use for redirects to an external icon service.
|
||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||
## Temporary redirects are useful while testing different icon services, but once a service
|
||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||
## are currently better supported by the Bitwarden clients.
|
||||
# ICON_REDIRECT_CODE=302
|
||||
########################
|
||||
### General settings ###
|
||||
########################
|
||||
|
||||
## Domain settings
|
||||
## The domain must match the address from where you access the server
|
||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||
## like attachment downloads, email links and U2F.
|
||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
|
||||
## Details:
|
||||
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
|
||||
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
|
||||
## For development
|
||||
# DOMAIN=http://localhost
|
||||
## For public server
|
||||
# DOMAIN=https://vw.domain.tld
|
||||
## For public server (URL with port number)
|
||||
# DOMAIN=https://vw.domain.tld:8443
|
||||
## For public server (URL with path)
|
||||
# DOMAIN=https://domain.tld/vw
|
||||
|
||||
## Controls whether users are allowed to create Bitwarden Sends.
|
||||
## This setting applies globally to all users.
|
||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||
# SENDS_ALLOWED=true
|
||||
|
||||
## HIBP Api Key
|
||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||
# HIBP_API_KEY=
|
||||
|
||||
## Per-organization attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per organization.
|
||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||
# ORG_ATTACHMENT_LIMIT=
|
||||
## Per-user attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per user.
|
||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||
# USER_ATTACHMENT_LIMIT=
|
||||
## Per-user send storage limit (KB)
|
||||
## Max kilobytes of send storage allowed per user.
|
||||
## When this limit is reached, the user will not be allowed to upload further sends.
|
||||
# USER_SEND_LIMIT=
|
||||
|
||||
## Number of days to wait before auto-deleting a trashed item.
|
||||
## If unset (the default), trashed items are not auto-deleted.
|
||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||
# TRASH_AUTO_DELETE_DAYS=
|
||||
|
||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||
## master password was provided but the required 2FA step was not completed, which
|
||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||
## This setting applies globally to all users.
|
||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||
|
||||
## Disable icon downloading
|
||||
## Set to true to disable icon downloading in the internal icon service.
|
||||
|
@ -213,38 +225,6 @@
|
|||
## will be deleted eventually, but won't be downloaded again.
|
||||
# DISABLE_ICON_DOWNLOAD=false
|
||||
|
||||
## Icon download timeout
|
||||
## Configure the timeout value when downloading the favicons.
|
||||
## The default is 10 seconds, but this could be to low on slower network connections
|
||||
# ICON_DOWNLOAD_TIMEOUT=10
|
||||
|
||||
## Icon blacklist Regex
|
||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
## NOTE: Always enclose this regex withing single quotes!
|
||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||
|
||||
## Any IP which is not defined as a global IP will be blacklisted.
|
||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||
|
||||
## Disable 2FA remember
|
||||
## Enabling this would force the users to use a second factor to login every time.
|
||||
## Note that the checkbox would still be present, but ignored.
|
||||
# DISABLE_2FA_REMEMBER=false
|
||||
|
||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||
# EMAIL_ATTEMPTS_LIMIT=3
|
||||
|
||||
## Token expiration time
|
||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||
# EMAIL_EXPIRATION_TIME=600
|
||||
|
||||
## Email token size
|
||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||
# EMAIL_TOKEN_SIZE=6
|
||||
|
||||
## Controls if new users can register
|
||||
# SIGNUPS_ALLOWED=true
|
||||
|
||||
|
@ -266,6 +246,11 @@
|
|||
## even if SIGNUPS_ALLOWED is set to false
|
||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||
|
||||
## Controls whether event logging is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||
# ORG_EVENTS_ENABLED=false
|
||||
|
||||
## Controls which users can create new orgs.
|
||||
## Blank or 'all' means all users can create orgs (this is the default):
|
||||
# ORG_CREATION_USERS=
|
||||
|
@ -274,6 +259,125 @@
|
|||
## A comma-separated list means only those users can create orgs:
|
||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
|
||||
## The number of hours after which an organization invite token, emergency access invite token,
|
||||
## email verification token and deletion request token will expire (must be at least 1)
|
||||
# INVITATION_EXPIRATION_HOURS=120
|
||||
|
||||
## Controls whether users can enable emergency access to their accounts.
|
||||
## This setting applies globally to all users.
|
||||
# EMERGENCY_ACCESS_ALLOWED=true
|
||||
|
||||
## Controls whether users can change their email.
|
||||
## This setting applies globally to all users
|
||||
# EMAIL_CHANGE_ALLOWED=true
|
||||
|
||||
## Number of server-side passwords hashing iterations for the password hash.
|
||||
## The default for new users. If changed, it will be updated during login for existing users.
|
||||
# PASSWORD_ITERATIONS=600000
|
||||
|
||||
## Controls whether users can set or show password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
|
||||
## Controls whether a password hint should be shown directly in the web page if
|
||||
## SMTP service is not configured and password hints are allowed.
|
||||
## Not recommended for publicly-accessible instances because this provides
|
||||
## unauthenticated access to potentially sensitive data.
|
||||
# SHOW_PASSWORD_HINT=false
|
||||
|
||||
#########################
|
||||
### Advanced settings ###
|
||||
#########################
|
||||
|
||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||
# IP_HEADER=X-Real-IP
|
||||
|
||||
## Icon service
|
||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||
##
|
||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||
## redirect to the corresponding icon at the external service. An external service may
|
||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||
## for certain sites have been cached.
|
||||
# ICON_SERVICE=internal
|
||||
|
||||
## Icon redirect code
|
||||
## The HTTP status code to use for redirects to an external icon service.
|
||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||
## Temporary redirects are useful while testing different icon services, but once a service
|
||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||
## are currently better supported by the Bitwarden clients.
|
||||
# ICON_REDIRECT_CODE=302
|
||||
|
||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||
## Default: 2592000 (30 days)
|
||||
# ICON_CACHE_TTL=2592000
|
||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||
## Default: 2592000 (3 days)
|
||||
# ICON_CACHE_NEGTTL=259200
|
||||
|
||||
## Icon download timeout
|
||||
## Configure the timeout value when downloading the favicons.
|
||||
## The default is 10 seconds, but this could be to low on slower network connections
|
||||
# ICON_DOWNLOAD_TIMEOUT=10
|
||||
|
||||
## Block HTTP domains/IPs by Regex
|
||||
## Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
## NOTE: Always enclose this regex withing single quotes!
|
||||
# HTTP_REQUEST_BLOCK_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||
|
||||
## Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
|
||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
# HTTP_REQUEST_BLOCK_NON_GLOBAL_IPS=true
|
||||
|
||||
## Client Settings
|
||||
## Enable experimental feature flags for clients.
|
||||
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||
##
|
||||
## The following flags are available:
|
||||
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
||||
## - "autofill-v2": Use the new autofill implementation.
|
||||
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||
## - "extension-refresh": Temporarily enable the new extension design until general availability (should be used with the beta Chrome extension)
|
||||
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||
|
||||
## Require new device emails. When a user logs in an email is required to be sent.
|
||||
## If sending the email fails the login attempt will fail!!
|
||||
# REQUIRE_DEVICE_EMAIL=false
|
||||
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
|
||||
## Timestamp format used in extended logging.
|
||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||
|
||||
## Logging to Syslog
|
||||
## This requires extended logging
|
||||
# USE_SYSLOG=false
|
||||
|
||||
## Logging to file
|
||||
# LOG_FILE=/path/to/log
|
||||
|
||||
## Log level
|
||||
## Change the verbosity of the log output
|
||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||
## Setting it to "trace" or "debug" would also show logs for mounted routes and static file, websocket and alive requests
|
||||
## For a specific module append a comma separated `path::to::module=log_level`
|
||||
## For example, to only see debug logs for icons use: LOG_LEVEL="info,vaultwarden::api::icons=debug"
|
||||
# LOG_LEVEL=info
|
||||
|
||||
## Token for the admin interface, preferably an Argon2 PCH string
|
||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||
|
@ -289,54 +393,13 @@
|
|||
## meant to be used with the use of a separate auth layer in front
|
||||
# DISABLE_ADMIN_TOKEN=false
|
||||
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||
# ADMIN_RATELIMIT_SECONDS=300
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||
|
||||
## The number of hours after which an organization invite token, emergency access invite token,
|
||||
## email verification token and deletion request token will expire (must be at least 1)
|
||||
# INVITATION_EXPIRATION_HOURS=120
|
||||
|
||||
## Per-organization attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per organization.
|
||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||
# ORG_ATTACHMENT_LIMIT=
|
||||
## Per-user attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per user.
|
||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||
# USER_ATTACHMENT_LIMIT=
|
||||
|
||||
## Number of days to wait before auto-deleting a trashed item.
|
||||
## If unset (the default), trashed items are not auto-deleted.
|
||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||
# TRASH_AUTO_DELETE_DAYS=
|
||||
|
||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||
## master password was provided but the required 2FA step was not completed, which
|
||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||
## This setting applies globally to all users.
|
||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||
|
||||
## Number of server-side passwords hashing iterations for the password hash.
|
||||
## The default for new users. If changed, it will be updated during login for existing users.
|
||||
# PASSWORD_ITERATIONS=350000
|
||||
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
|
||||
## Controls whether a password hint should be shown directly in the web page if
|
||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||
## as this provides unauthenticated access to potentially sensitive data.
|
||||
# SHOW_PASSWORD_HINT=false
|
||||
|
||||
## Domain settings
|
||||
## The domain must match the address from where you access the server
|
||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||
## like attachment downloads, email links and U2F.
|
||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
# DOMAIN=https://vw.domain.tld:8443
|
||||
## Set the lifetime of admin sessions to this value (in minutes).
|
||||
# ADMIN_SESSION_LIFETIME=20
|
||||
|
||||
## Allowed iframe ancestors (Know the risks!)
|
||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||
|
@ -351,13 +414,28 @@
|
|||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||
|
||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||
# ADMIN_RATELIMIT_SECONDS=300
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||
## BETA FEATURE: Groups
|
||||
## Controls whether group support is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default because this is a beta feature, it contains known issues!
|
||||
## KNOW WHAT YOU ARE DOING!
|
||||
# ORG_GROUPS_ENABLED=false
|
||||
|
||||
## Set the lifetime of admin sessions to this value (in minutes).
|
||||
# ADMIN_SESSION_LIFETIME=20
|
||||
## Increase secure note size limit (Know the risks!)
|
||||
## Sets the secure note size limit to 100_000 instead of the default 10_000.
|
||||
## WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
|
||||
## KNOW WHAT YOU ARE DOING!
|
||||
# INCREASE_NOTE_SIZE_LIMIT=false
|
||||
|
||||
## Enforce Single Org with Reset Password Policy
|
||||
## Enforce that the Single Org policy is enabled before setting the Reset Password policy
|
||||
## Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
|
||||
## Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
|
||||
# ENFORCE_SINGLE_ORG_WITH_RESET_PW_POLICY=false
|
||||
|
||||
########################
|
||||
### MFA/2FA settings ###
|
||||
########################
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
|
@ -368,16 +446,46 @@
|
|||
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
|
||||
|
||||
## Duo Settings
|
||||
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
|
||||
## You need to configure the DUO_IKEY, DUO_SKEY, and DUO_HOST options to enable global Duo support.
|
||||
## Otherwise users will need to configure it themselves.
|
||||
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
|
||||
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
|
||||
## Then set the following options, based on the values obtained from the last step:
|
||||
# DUO_IKEY=<Integration Key>
|
||||
# DUO_SKEY=<Secret Key>
|
||||
# DUO_IKEY=<Client ID>
|
||||
# DUO_SKEY=<Client Secret>
|
||||
# DUO_HOST=<API Hostname>
|
||||
## After that, you should be able to follow the rest of the guide linked above,
|
||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||
##
|
||||
## If you want to attempt to use Duo's 'Traditional Prompt' (deprecated, iframe based) set DUO_USE_IFRAME to 'true'.
|
||||
## Duo no longer supports this, but it still works for some integrations.
|
||||
## If you aren't sure, leave this alone.
|
||||
# DUO_USE_IFRAME=false
|
||||
|
||||
## Email 2FA settings
|
||||
## Email token size
|
||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||
# EMAIL_TOKEN_SIZE=6
|
||||
##
|
||||
## Token expiration time
|
||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||
# EMAIL_EXPIRATION_TIME=600
|
||||
##
|
||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||
# EMAIL_ATTEMPTS_LIMIT=3
|
||||
##
|
||||
## Setup email 2FA regardless of any organization policy
|
||||
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
|
||||
## Automatically setup email 2FA as fallback provider when needed
|
||||
# EMAIL_2FA_AUTO_FALLBACK=false
|
||||
|
||||
## Other MFA/2FA settings
|
||||
## Disable 2FA remember
|
||||
## Enabling this would force the users to use a second factor to login every time.
|
||||
## Note that the checkbox would still be present, but ignored.
|
||||
# DISABLE_2FA_REMEMBER=false
|
||||
##
|
||||
## Authenticator Settings
|
||||
## Disable authenticator time drifted codes to be valid.
|
||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||
|
@ -390,12 +498,9 @@
|
|||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||
|
||||
## Rocket specific settings
|
||||
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
||||
# ROCKET_ADDRESS=0.0.0.0
|
||||
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
||||
# ROCKET_WORKERS=10
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
###########################
|
||||
### SMTP Email settings ###
|
||||
###########################
|
||||
|
||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||
|
@ -403,12 +508,19 @@
|
|||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=vaultwarden@domain.tld
|
||||
# SMTP_FROM_NAME=Vaultwarden
|
||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_TIMEOUT=15
|
||||
|
||||
## Choose the type of secure connection for SMTP. The default is "starttls".
|
||||
## The available options are:
|
||||
## - "starttls": The default port is 587.
|
||||
## - "force_tls": The default port is 465.
|
||||
## - "off": The default port is 25.
|
||||
## Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||
# SMTP_SECURITY=starttls
|
||||
# SMTP_PORT=587
|
||||
|
||||
# Whether to send mail via the `sendmail` command
|
||||
# USE_SENDMAIL=false
|
||||
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||
|
@ -417,7 +529,7 @@
|
|||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||
## Multiple options need to be separated by a comma ','.
|
||||
# SMTP_AUTH_MECHANISM="Plain"
|
||||
# SMTP_AUTH_MECHANISM=
|
||||
|
||||
## Server name sent during the SMTP HELO
|
||||
## By default this value should be is on the machine's hostname,
|
||||
|
@ -425,30 +537,34 @@
|
|||
# HELO_NAME=
|
||||
|
||||
## Embed images as email attachments
|
||||
# SMTP_EMBED_IMAGES=false
|
||||
# SMTP_EMBED_IMAGES=true
|
||||
|
||||
## SMTP debugging
|
||||
## When set to true this will output very detailed SMTP messages.
|
||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
# SMTP_DEBUG=false
|
||||
|
||||
## Accept Invalid Hostnames
|
||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||
|
||||
## Accept Invalid Certificates
|
||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||
|
||||
## Require new device emails. When a user logs in an email is required to be sent.
|
||||
## If sending the email fails the login attempt will fail!!
|
||||
# REQUIRE_DEVICE_EMAIL=false
|
||||
## Accept Invalid Hostnames
|
||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||
|
||||
#######################
|
||||
### Rocket settings ###
|
||||
#######################
|
||||
|
||||
## Rocket specific settings
|
||||
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
||||
# ROCKET_ADDRESS=0.0.0.0
|
||||
## The default port is 8000, unless running in a Docker container, in which case it is 80.
|
||||
# ROCKET_PORT=8000
|
||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||
|
||||
## HIBP Api Key
|
||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||
# HIBP_API_KEY=
|
||||
|
||||
# vim: syntax=ini
|
||||
|
|
3
.github/CODEOWNERS
gevendort
Normale Datei
3
.github/CODEOWNERS
gevendort
Normale Datei
|
@ -0,0 +1,3 @@
|
|||
/.github @dani-garcia @BlackDex
|
||||
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||
/.github/workflows/** @dani-garcia @BlackDex
|
66
.github/ISSUE_TEMPLATE/bug_report.md
gevendort
66
.github/ISSUE_TEMPLATE/bug_report.md
gevendort
|
@ -1,66 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
<!--
|
||||
# ###
|
||||
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
|
||||
This saves you and us a lot of time and troubleshooting.
|
||||
See:
|
||||
* https://github.com/dani-garcia/vaultwarden/issues/1180
|
||||
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
|
||||
# ###
|
||||
-->
|
||||
|
||||
<!--
|
||||
Please fill out the following template to make solving your problem easier and faster for us.
|
||||
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||
|
||||
Remember to hide/redact personal or confidential information,
|
||||
such as passwords, IP addresses, and DNS names as appropriate.
|
||||
-->
|
||||
|
||||
### Subject of the issue
|
||||
<!-- Describe your issue here. -->
|
||||
|
||||
### Deployment environment
|
||||
|
||||
<!--
|
||||
=========================================================================================
|
||||
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
|
||||
That will auto-generate most of the info requested in this section.
|
||||
=========================================================================================
|
||||
-->
|
||||
|
||||
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
|
||||
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
|
||||
<!-- Remember to check if your issue exists on the latest version first! -->
|
||||
* vaultwarden version:
|
||||
|
||||
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
|
||||
* Install method:
|
||||
|
||||
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
|
||||
|
||||
* Reverse proxy and version: <!-- if applicable -->
|
||||
|
||||
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
|
||||
|
||||
* Other relevant details:
|
||||
|
||||
### Steps to reproduce
|
||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||
and how did you start vaultwarden? -->
|
||||
|
||||
### Expected behaviour
|
||||
<!-- Tell us what you expected to happen -->
|
||||
|
||||
### Actual behaviour
|
||||
<!-- Tell us what actually happened -->
|
||||
|
||||
### Troubleshooting data
|
||||
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->
|
167
.github/ISSUE_TEMPLATE/bug_report.yml
gevendort
Normale Datei
167
.github/ISSUE_TEMPLATE/bug_report.yml
gevendort
Normale Datei
|
@ -0,0 +1,167 @@
|
|||
name: Bug Report
|
||||
description: File a bug report
|
||||
labels: ["bug"]
|
||||
body:
|
||||
#
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
|
||||
Please *do not* submit feature requests or ask for help on how to configure Vaultwarden here.
|
||||
|
||||
The [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions/) has sections for Questions and Ideas.
|
||||
|
||||
Also, make sure you are running [![GitHub Release](https://img.shields.io/github/release/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/releases/latest) of Vaultwarden!
|
||||
And search for existing open or closed issues or discussions regarding your topic before posting.
|
||||
|
||||
Be sure to check and validate the Vaultwarden Admin Diagnostics (`/admin/diagnostics`) page for any errors!
|
||||
See here [how to enable the admin page](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page).
|
||||
#
|
||||
- id: support-string
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Vaultwarden Support String
|
||||
description: Output of the **Generate Support String** from the `/admin/diagnostics` page.
|
||||
placeholder: |
|
||||
1. Go to the Vaultwarden Admin of your instance https://example.domain.tld/admin/diagnostics
|
||||
2. Click on `Generate Support String`
|
||||
3. Click on `Copy To Clipboard`
|
||||
4. Replace this text by pasting it into this textarea without any modifications
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: version
|
||||
type: input
|
||||
attributes:
|
||||
label: Vaultwarden Build Version
|
||||
description: What version of Vaultwarden are you running?
|
||||
placeholder: ex. v1.31.0 or v1.32.0-3466a804
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: deployment
|
||||
type: dropdown
|
||||
attributes:
|
||||
label: Deployment method
|
||||
description: How did you deploy Vaultwarden?
|
||||
multiple: false
|
||||
options:
|
||||
- Official Container Image
|
||||
- Build from source
|
||||
- OS Package (apt, yum/dnf, pacman, apk, nix, ...)
|
||||
- Manually Extracted from Container Image
|
||||
- Downloaded from GitHub Actions Release Workflow
|
||||
- Other method
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: deployment-other
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Custom deployment method
|
||||
description: If you deployed Vaultwarden via any other method, please describe how.
|
||||
#
|
||||
- id: reverse-proxy
|
||||
type: input
|
||||
attributes:
|
||||
label: Reverse Proxy
|
||||
description: Are you using a reverse proxy, if so which and what version?
|
||||
placeholder: ex. nginx 1.26.2, caddy 2.8.4, traefik 3.1.2, haproxy 3.0
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: os
|
||||
type: dropdown
|
||||
attributes:
|
||||
label: Host/Server Operating System
|
||||
description: On what operating system are you running the Vaultwarden server?
|
||||
multiple: false
|
||||
options:
|
||||
- Linux
|
||||
- NAS/SAN
|
||||
- Cloud
|
||||
- Windows
|
||||
- macOS
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: os-version
|
||||
type: input
|
||||
attributes:
|
||||
label: Operating System Version
|
||||
description: What version of the operating system(s) are you seeing the problem on?
|
||||
placeholder: ex. Arch Linux, Ubuntu 24.04, Kubernetes, Synology DSM 7.x, Windows 11
|
||||
#
|
||||
- id: clients
|
||||
type: dropdown
|
||||
attributes:
|
||||
label: Clients
|
||||
description: What client(s) are you seeing the problem on?
|
||||
multiple: true
|
||||
options:
|
||||
- Web Vault
|
||||
- Browser Extension
|
||||
- CLI
|
||||
- Desktop
|
||||
- Android
|
||||
- iOS
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: client-version
|
||||
type: input
|
||||
attributes:
|
||||
label: Client Version
|
||||
description: What version(s) of the client(s) are you seeing the problem on?
|
||||
placeholder: ex. CLI v2024.7.2, Firefox 130 - v2024.7.0
|
||||
#
|
||||
- id: reproduce
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
description: How can we reproduce the behavior.
|
||||
value: |
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. Click on '...'
|
||||
5. Etc '...'
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: expected
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Expected Result
|
||||
description: A clear and concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: actual
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Actual Result
|
||||
description: A clear and concise description of what is happening.
|
||||
validations:
|
||||
required: true
|
||||
#
|
||||
- id: logs
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Logs
|
||||
description: Provide the logs generated by Vaultwarden during the time this issue occurs.
|
||||
render: text
|
||||
#
|
||||
- id: screenshots
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Screenshots or Videos
|
||||
description: If applicable, add screenshots and/or a short video to help explain your problem.
|
||||
#
|
||||
- id: additional-context
|
||||
type: textarea
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: Add any other context about the problem here.
|
10
.github/ISSUE_TEMPLATE/config.yml
gevendort
10
.github/ISSUE_TEMPLATE/config.yml
gevendort
|
@ -1,8 +1,8 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Discourse forum for vaultwarden
|
||||
url: https://vaultwarden.discourse.group/
|
||||
about: Use this forum to request features or get help with usage/configuration.
|
||||
- name: GitHub Discussions for vaultwarden
|
||||
- name: GitHub Discussions for Vaultwarden
|
||||
url: https://github.com/dani-garcia/vaultwarden/discussions
|
||||
about: An alternative to the Discourse forum, if this is easier for you.
|
||||
about: Use the discussions to request features or get help with usage/configuration.
|
||||
- name: Discourse forum for Vaultwarden
|
||||
url: https://vaultwarden.discourse.group/
|
||||
about: An alternative to the GitHub Discussions, if this is easier for you.
|
||||
|
|
9
.github/workflows/build.yml
gevendort
9
.github/workflows/build.yml
gevendort
|
@ -28,6 +28,7 @@ on:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
# We use Ubuntu 22.04 here because this matches the library versions used within the Debian docker containers
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 120
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
|
@ -46,7 +47,7 @@ jobs:
|
|||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
|
@ -74,7 +75,7 @@ jobs:
|
|||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
||||
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
|
@ -84,7 +85,7 @@ jobs:
|
|||
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
||||
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
|
@ -106,7 +107,7 @@ jobs:
|
|||
# End Show environment
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
|
||||
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
||||
with:
|
||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||
|
|
30
.github/workflows/hadolint.yml
gevendort
30
.github/workflows/hadolint.yml
gevendort
|
@ -8,14 +8,26 @@ on: [
|
|||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
# End Checkout the repo
|
||||
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
buildkitd-config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 2
|
||||
driver-opts: |
|
||||
network=host
|
||||
|
||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
|
@ -26,8 +38,18 @@ jobs:
|
|||
HADOLINT_VERSION: 2.12.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
# Test Dockerfiles with hadolint
|
||||
- name: Run hadolint
|
||||
shell: bash
|
||||
run: hadolint docker/Dockerfile.{debian,alpine}
|
||||
# End Test Dockerfiles
|
||||
# End Test Dockerfiles with hadolint
|
||||
|
||||
# Test Dockerfiles with docker build checks
|
||||
- name: Run docker build check
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Checking docker/Dockerfile.debian"
|
||||
docker build --check . -f docker/Dockerfile.debian
|
||||
echo "Checking docker/Dockerfile.alpine"
|
||||
docker build --check . -f docker/Dockerfile.alpine
|
||||
# End Test Dockerfiles with docker build checks
|
||||
|
|
144
.github/workflows/release.yml
gevendort
144
.github/workflows/release.yml
gevendort
|
@ -2,21 +2,10 @@ name: Release
|
|||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- ".github/workflows/release.yml"
|
||||
- "src/**"
|
||||
- "migrations/**"
|
||||
- "docker/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "diesel.toml"
|
||||
- "rust-toolchain.toml"
|
||||
|
||||
branches: # Only on paths above
|
||||
branches:
|
||||
- main
|
||||
- release-build-revision
|
||||
|
||||
tags: # Always, regardless of paths above
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
|
@ -24,30 +13,30 @@ jobs:
|
|||
# Some checks to determine if we need to continue with building a new docker.
|
||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||
skip_check:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- name: Skip Duplicates Actions
|
||||
id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
||||
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
# Only run this when not creating a tag
|
||||
if: ${{ github.ref_type == 'branch' }}
|
||||
|
||||
docker-build:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120
|
||||
needs: skip_check
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
# TODO: Start a local docker registry to be used to extract the final Alpine static build images
|
||||
# services:
|
||||
# registry:
|
||||
# image: registry:2
|
||||
# ports:
|
||||
# - 5000:5000
|
||||
# Start a local docker registry to extract the final Alpine static build binaries
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
env:
|
||||
SOURCE_COMMIT: ${{ github.sha }}
|
||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||
|
@ -69,22 +58,22 @@ jobs:
|
|||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Initialize QEMU binfmt support
|
||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
|
||||
with:
|
||||
platforms: "arm64,arm"
|
||||
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
config-inline: |
|
||||
buildkitd-config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 2
|
||||
driver-opts: |
|
||||
|
@ -113,7 +102,7 @@ jobs:
|
|||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
@ -127,7 +116,7 @@ jobs:
|
|||
|
||||
# Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -142,7 +131,7 @@ jobs:
|
|||
|
||||
# Login to Quay.io
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
|
@ -155,8 +144,28 @@ jobs:
|
|||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
- name: Configure build cache from/to
|
||||
shell: bash
|
||||
run: |
|
||||
#
|
||||
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
||||
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
|
||||
else
|
||||
echo "BAKE_CACHE_FROM="
|
||||
echo "BAKE_CACHE_TO="
|
||||
fi
|
||||
#
|
||||
|
||||
- name: Add localhost registry
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
- name: Bake ${{ matrix.base_image }} containers
|
||||
uses: docker/bake-action@511fde2517761e303af548ec9e0ea74a8a100112 # v4.0.0
|
||||
uses: docker/bake-action@2e3d19baedb14545e5d41222653874f25d5b4dfb # v5.10.0
|
||||
env:
|
||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||
|
@ -168,3 +177,76 @@ jobs:
|
|||
push: true
|
||||
files: docker/docker-bake.hcl
|
||||
targets: "${{ matrix.base_image }}-multi"
|
||||
set: |
|
||||
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||
|
||||
|
||||
# Extract the Alpine binaries from the containers
|
||||
- name: Extract binaries
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref_type
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
EXTRACT_TAG="latest"
|
||||
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||
EXTRACT_TAG="testing"
|
||||
fi
|
||||
|
||||
# After each extraction the image is removed.
|
||||
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||
|
||||
# Extract amd64 binary
|
||||
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp amd64:/vaultwarden vaultwarden-amd64
|
||||
docker rm --force amd64
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
|
||||
# Extract arm64 binary
|
||||
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp arm64:/vaultwarden vaultwarden-arm64
|
||||
docker rm --force arm64
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
|
||||
# Extract armv7 binary
|
||||
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp armv7:/vaultwarden vaultwarden-armv7
|
||||
docker rm --force armv7
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
|
||||
# Extract armv6 binary
|
||||
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp armv6:/vaultwarden vaultwarden-armv6
|
||||
docker rm --force armv6
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
|
||||
# Upload artifacts to Github Actions
|
||||
- name: "Upload amd64 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||
path: vaultwarden-amd64
|
||||
|
||||
- name: "Upload arm64 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||
path: vaultwarden-arm64
|
||||
|
||||
- name: "Upload armv7 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||
path: vaultwarden-armv7
|
||||
|
||||
- name: "Upload armv6 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||
path: vaultwarden-armv6
|
||||
# End Upload artifacts to Github Actions
|
||||
|
|
26
.github/workflows/releasecache-cleanup.yml
gevendort
Normale Datei
26
.github/workflows/releasecache-cleanup.yml
gevendort
Normale Datei
|
@ -0,0 +1,26 @@
|
|||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual_trigger:
|
||||
description: "Manual trigger buildcache cleanup"
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
schedule:
|
||||
- cron: '0 1 * * FRI'
|
||||
|
||||
name: Cleanup
|
||||
jobs:
|
||||
releasecache-cleanup:
|
||||
name: Releasecache Cleanup
|
||||
runs-on: ubuntu-24.04
|
||||
continue-on-error: true
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Delete vaultwarden-buildcache containers
|
||||
uses: actions/delete-package-versions@e5bc658cc4c965c472efe991f8beea3981499c55 # v5.0.0
|
||||
with:
|
||||
package-name: 'vaultwarden-buildcache'
|
||||
package-type: 'container'
|
||||
min-versions-to-keep: 0
|
||||
delete-only-untagged-versions: 'false'
|
14
.github/workflows/trivy.yml
gevendort
14
.github/workflows/trivy.yml
gevendort
|
@ -4,21 +4,23 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- main
|
||||
- release-build-revision
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '00 12 * * *'
|
||||
- cron: '08 11 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
trivy-scan:
|
||||
# Only run this in the master repo and not on forks
|
||||
# When all forks run this at the same time, it is causing `Too Many Requests` issues
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
name: Check
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -26,10 +28,10 @@ jobs:
|
|||
actions: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@f78e9ecf42a1271402d4f484518b9313235990e1 # v0.13.1
|
||||
uses: aquasecurity/trivy-action@5681af892cd0f4997658e2bacc62bd0a894cf564 # v0.27.0
|
||||
with:
|
||||
scan-type: repo
|
||||
ignore-unfixed: true
|
||||
|
@ -38,6 +40,6 @@ jobs:
|
|||
severity: CRITICAL,HIGH
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@bad341350a2f5616f9e048e51360cedc49181ce8 # v2.22.4
|
||||
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.26.6
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
|
|
2590
Cargo.lock
generiert
2590
Cargo.lock
generiert
Datei-Diff unterdrückt, da er zu groß ist
Diff laden
208
Cargo.toml
208
Cargo.toml
|
@ -3,7 +3,7 @@ name = "vaultwarden"
|
|||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.71.1"
|
||||
rust-version = "1.80.0"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
|
@ -18,93 +18,92 @@ build = "build.rs"
|
|||
enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
|
||||
# Enable to use a vendored and statically linked openssl
|
||||
vendored_openssl = ["openssl/vendored"]
|
||||
# Enable MiMalloc memory allocator to replace the default malloc
|
||||
# This can improve performance for Alpine builds
|
||||
enable_mimalloc = ["mimalloc"]
|
||||
enable_mimalloc = ["dep:mimalloc"]
|
||||
# This is a development dependency, and should only be used during development!
|
||||
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
||||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||
# if you want to turn off the logging for a specific run.
|
||||
query_logger = ["diesel_logger"]
|
||||
query_logger = ["dep:diesel_logger"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
[target."cfg(unix)".dependencies]
|
||||
# Logging
|
||||
syslog = "6.1.0"
|
||||
syslog = "6.1.1"
|
||||
|
||||
[dependencies]
|
||||
# Logging
|
||||
log = "0.4.20"
|
||||
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||
log = "0.4.22"
|
||||
fern = { version = "0.7.0", features = ["syslog-6", "reopen-1"] }
|
||||
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.7", default-features = false }
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.18.0"
|
||||
once_cell = "1.20.2"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.17"
|
||||
num-derive = "0.4.1"
|
||||
num-traits = "0.2.19"
|
||||
num-derive = "0.4.2"
|
||||
bigdecimal = "0.4.6"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
||||
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
|
||||
rocket_ws = { version ="0.1.1" }
|
||||
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.19.0"
|
||||
rmpv = "1.0.1" # MessagePack library
|
||||
rmpv = "1.3.0" # MessagePack library
|
||||
|
||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||
dashmap = "5.5.3"
|
||||
dashmap = "6.1.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.28"
|
||||
tokio = { version = "1.33.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
futures = "0.3.31"
|
||||
tokio = { version = "1.41.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.189", features = ["derive"] }
|
||||
serde_json = "1.0.107"
|
||||
serde = { version = "1.0.214", features = ["derive"] }
|
||||
serde_json = "1.0.132"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "2.1.3", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.1.0"
|
||||
diesel = { version = "2.2.4", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel_migrations = "2.2.0"
|
||||
diesel_logger = { version = "0.3.0", optional = true }
|
||||
|
||||
# Bundled/Static SQLite
|
||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.30.1", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.17.5"
|
||||
ring = "0.17.8"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.5.0", features = ["v4"] }
|
||||
uuid = { version = "1.11.0", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.31", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.3"
|
||||
time = "0.3.30"
|
||||
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.10.0"
|
||||
time = "0.3.36"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.0.4"
|
||||
job_scheduler_ng = "2.0.5"
|
||||
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.4.0"
|
||||
data-encoding = "2.6.0"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "9.0.0"
|
||||
jsonwebtoken = "9.3.0"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
totp-lite = "2.0.1"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||
|
@ -113,71 +112,67 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
|||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Handling of URL's for WebAuthn and favicons
|
||||
url = "2.4.1"
|
||||
url = "2.5.3"
|
||||
|
||||
# Email libraries
|
||||
lettre = { version = "0.11.0", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.4"
|
||||
lettre = { version = "0.11.10", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.9"
|
||||
|
||||
# HTML Template library
|
||||
handlebars = { version = "4.4.0", features = ["dir_source"] }
|
||||
handlebars = { version = "6.2.0", features = ["dir_source"] }
|
||||
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.11.22", features = ["stream", "json", "deflate", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
||||
reqwest = { version = "0.12.9", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
hickory-resolver = "0.24.1"
|
||||
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.5.7"
|
||||
regex = { version = "1.10.2", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.0"
|
||||
bytes = "1.5.0"
|
||||
html5gum = "0.6.1"
|
||||
regex = { version = "1.11.1", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.1"
|
||||
bytes = "1.8.0"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = { version = "0.46.0", features = ["async"] }
|
||||
cached = { version = "0.54.0", features = ["async"] }
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.2"
|
||||
cookie_store = "0.19.1"
|
||||
cookie = "0.18.1"
|
||||
cookie_store = "0.21.1"
|
||||
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.57"
|
||||
# Set openssl-sys fixed to v0.9.92 to prevent building issues with musl, arm and 32bit pointer width
|
||||
# It will force add a dynamically linked library which prevents the build from being static
|
||||
openssl-sys = "=0.9.92"
|
||||
openssl = "0.10.68"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.14"
|
||||
governor = "0.6.0"
|
||||
paste = "1.0.15"
|
||||
governor = "0.7.0"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.20"
|
||||
semver = "1.0.23"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
||||
which = "5.0.0"
|
||||
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
|
||||
which = "7.0.0"
|
||||
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.2"
|
||||
argon2 = "0.5.3"
|
||||
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.2.0"
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
||||
rpassword = "7.3.1"
|
||||
|
||||
# Loading a dynamic CSS Stylesheet
|
||||
grass_compiler = { version = "0.13.4", default-features = false }
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
# The symbols are the provide better panic traces
|
||||
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
|
||||
# A little bit of a speedup
|
||||
[profile.dev]
|
||||
|
@ -187,3 +182,86 @@ split-debuginfo = "unpacked"
|
|||
# This is a huge speed improvement during testing
|
||||
[profile.dev.package.argon2]
|
||||
opt-level = 3
|
||||
|
||||
# Optimize for size
|
||||
[profile.release-micro]
|
||||
inherits = "release"
|
||||
opt-level = "z"
|
||||
strip = "symbols"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
panic = "abort"
|
||||
|
||||
# Profile for systems with low resources
|
||||
# It will use less resources during build
|
||||
[profile.release-low]
|
||||
inherits = "release"
|
||||
strip = "symbols"
|
||||
lto = "thin"
|
||||
codegen-units = 16
|
||||
|
||||
# Linting config
|
||||
# https://doc.rust-lang.org/rustc/lints/groups.html
|
||||
[lints.rust]
|
||||
# Forbid
|
||||
unsafe_code = "forbid"
|
||||
non_ascii_idents = "forbid"
|
||||
|
||||
# Deny
|
||||
deprecated_in_future = "deny"
|
||||
future_incompatible = { level = "deny", priority = -1 }
|
||||
keyword_idents = { level = "deny", priority = -1 }
|
||||
let_underscore = { level = "deny", priority = -1 }
|
||||
noop_method_call = "deny"
|
||||
refining_impl_trait = { level = "deny", priority = -1 }
|
||||
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||
# rust_2024_compatibility = { level = "deny", priority = -1 } # Enable once we are at MSRV 1.81.0
|
||||
single_use_lifetimes = "deny"
|
||||
trivial_casts = "deny"
|
||||
trivial_numeric_casts = "deny"
|
||||
unused = { level = "deny", priority = -1 }
|
||||
unused_import_braces = "deny"
|
||||
unused_lifetimes = "deny"
|
||||
unused_qualifications = "deny"
|
||||
variant_size_differences = "deny"
|
||||
# The lints below are part of the rust_2024_compatibility group
|
||||
static-mut-refs = "deny"
|
||||
unsafe-op-in-unsafe-fn = "deny"
|
||||
|
||||
# https://rust-lang.github.io/rust-clippy/stable/index.html
|
||||
[lints.clippy]
|
||||
# Warn
|
||||
dbg_macro = "warn"
|
||||
todo = "warn"
|
||||
|
||||
# Deny
|
||||
case_sensitive_file_extension_comparisons = "deny"
|
||||
cast_lossless = "deny"
|
||||
clone_on_ref_ptr = "deny"
|
||||
equatable_if_let = "deny"
|
||||
filter_map_next = "deny"
|
||||
float_cmp_const = "deny"
|
||||
inefficient_to_string = "deny"
|
||||
iter_on_empty_collections = "deny"
|
||||
iter_on_single_items = "deny"
|
||||
linkedlist = "deny"
|
||||
macro_use_imports = "deny"
|
||||
manual_assert = "deny"
|
||||
manual_instant_elapsed = "deny"
|
||||
manual_string_new = "deny"
|
||||
match_on_vec_items = "deny"
|
||||
match_wildcard_for_single_variants = "deny"
|
||||
mem_forget = "deny"
|
||||
needless_continue = "deny"
|
||||
needless_lifetimes = "deny"
|
||||
option_option = "deny"
|
||||
string_add_assign = "deny"
|
||||
string_to_string = "deny"
|
||||
unnecessary_join = "deny"
|
||||
unnecessary_self_imports = "deny"
|
||||
unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
verbose_file_reads = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
|
|
195
README.md
195
README.md
|
@ -1,95 +1,144 @@
|
|||
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
![Vaultwarden Logo](./resources/vaultwarden-logo-auto.svg)
|
||||
|
||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
An alternative server implementation of the Bitwarden Client API, written in Rust and compatible with [official Bitwarden clients](https://bitwarden.com/download/) [[disclaimer](#disclaimer)], perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
|
||||
---
|
||||
[![Build](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml/badge.svg)](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml)
|
||||
[![ghcr.io](https://img.shields.io/badge/ghcr.io-download-blue)](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/vaultwarden/server.svg)](https://hub.docker.com/r/vaultwarden/server)
|
||||
[![Quay.io](https://img.shields.io/badge/Quay.io-download-blue)](https://quay.io/repository/vaultwarden/server)
|
||||
[![Dependency Status](https://deps.rs/repo/github/dani-garcia/vaultwarden/status.svg)](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[![GitHub Release](https://img.shields.io/github/release/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[![AGPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[![Matrix Chat](https://img.shields.io/matrix/vaultwarden:matrix.org.svg?logo=matrix)](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
[![GitHub Release](https://img.shields.io/github/release/dani-garcia/vaultwarden.svg?style=for-the-badge&logo=vaultwarden&color=005AA4)](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[![ghcr.io Pulls](https://img.shields.io/badge/dynamic/json?style=for-the-badge&logo=github&logoColor=fff&color=005AA4&url=https%3A%2F%2Fipitio.github.io%2Fbackage%2Fdani-garcia%2Fvaultwarden%2Fvaultwarden.json&query=%24.downloads&label=ghcr.io%20pulls&cacheSeconds=14400)](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/vaultwarden/server.svg?style=for-the-badge&logo=docker&logoColor=fff&color=005AA4&label=docker.io%20pulls)](https://hub.docker.com/r/vaultwarden/server)
|
||||
[![Quay.io](https://img.shields.io/badge/quay.io-download-005AA4?style=for-the-badge&logo=redhat&cacheSeconds=14400)](https://quay.io/repository/vaultwarden/server) <br>
|
||||
[![Contributors](https://img.shields.io/github/contributors-anon/dani-garcia/vaultwarden.svg?style=flat-square&logo=vaultwarden&color=005AA4)](https://github.com/dani-garcia/vaultwarden/graphs/contributors)
|
||||
[![Forks](https://img.shields.io/github/forks/dani-garcia/vaultwarden.svg?style=flat-square&logo=github&logoColor=fff&color=005AA4)](https://github.com/dani-garcia/vaultwarden/network/members)
|
||||
[![Stars](https://img.shields.io/github/stars/dani-garcia/vaultwarden.svg?style=flat-square&logo=github&logoColor=fff&color=005AA4)](https://github.com/dani-garcia/vaultwarden/stargazers)
|
||||
[![Issues Open](https://img.shields.io/github/issues/dani-garcia/vaultwarden.svg?style=flat-square&logo=github&logoColor=fff&color=005AA4&cacheSeconds=300)](https://github.com/dani-garcia/vaultwarden/issues)
|
||||
[![Issues Closed](https://img.shields.io/github/issues-closed/dani-garcia/vaultwarden.svg?style=flat-square&logo=github&logoColor=fff&color=005AA4&cacheSeconds=300)](https://github.com/dani-garcia/vaultwarden/issues?q=is%3Aissue+is%3Aclosed)
|
||||
[![AGPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/vaultwarden.svg?style=flat-square&logo=vaultwarden&color=944000&cacheSeconds=14400)](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt) <br>
|
||||
[![Dependency Status](https://img.shields.io/badge/dynamic/xml?url=https%3A%2F%2Fdeps.rs%2Frepo%2Fgithub%2Fdani-garcia%2Fvaultwarden%2Fstatus.svg&query=%2F*%5Blocal-name()%3D'svg'%5D%2F*%5Blocal-name()%3D'g'%5D%5B2%5D%2F*%5Blocal-name()%3D'text'%5D%5B4%5D&style=flat-square&logo=rust&label=dependencies&color=005AA4)](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[![GHA Release](https://img.shields.io/github/actions/workflow/status/dani-garcia/vaultwarden/release.yml?style=flat-square&logo=github&logoColor=fff&label=Release%20Workflow)](https://github.com/dani-garcia/vaultwarden/actions/workflows/release.yml)
|
||||
[![GHA Build](https://img.shields.io/github/actions/workflow/status/dani-garcia/vaultwarden/build.yml?style=flat-square&logo=github&logoColor=fff&label=Build%20Workflow)](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml) <br>
|
||||
[![Matrix Chat](https://img.shields.io/matrix/vaultwarden:matrix.org.svg?style=flat-square&logo=matrix&logoColor=fff&color=953B00&cacheSeconds=14400)](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
[![GitHub Discussions](https://img.shields.io/github/discussions/dani-garcia/vaultwarden?style=flat-square&logo=github&logoColor=fff&color=953B00&cacheSeconds=300)](https://github.com/dani-garcia/vaultwarden/discussions)
|
||||
[![Discourse Discussions](https://img.shields.io/discourse/topics?server=https%3A%2F%2Fvaultwarden.discourse.group%2F&style=flat-square&logo=discourse&color=953B00)](https://vaultwarden.discourse.group/)
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**
|
||||
> [!IMPORTANT]
|
||||
> **When using this server, please report any bugs or suggestions directly to us (see [Get in touch](#get-in-touch)), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official Bitwarden support channels.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
---
|
||||
<br>
|
||||
|
||||
## Features
|
||||
|
||||
Basically full implementation of Bitwarden API is provided including:
|
||||
A nearly complete implementation of the Bitwarden Client API is provided, including:
|
||||
|
||||
* Organizations support
|
||||
* Attachments and Send
|
||||
* Vault API support
|
||||
* Serving the static files for Vault interface
|
||||
* Website icons API
|
||||
* Authenticator and U2F support
|
||||
* YubiKey and Duo support
|
||||
* Emergency Access
|
||||
* [Personal Vault](https://bitwarden.com/help/managing-items/)
|
||||
* [Send](https://bitwarden.com/help/about-send/)
|
||||
* [Attachments](https://bitwarden.com/help/attachments/)
|
||||
* [Website icons](https://bitwarden.com/help/website-icons/)
|
||||
* [Personal API Key](https://bitwarden.com/help/personal-api-key/)
|
||||
* [Organizations](https://bitwarden.com/help/getting-started-organizations/)
|
||||
- [Collections](https://bitwarden.com/help/about-collections/),
|
||||
[Password Sharing](https://bitwarden.com/help/sharing/),
|
||||
[Member Roles](https://bitwarden.com/help/user-types-access-control/),
|
||||
[Groups](https://bitwarden.com/help/about-groups/),
|
||||
[Event Logs](https://bitwarden.com/help/event-logs/),
|
||||
[Admin Password Reset](https://bitwarden.com/help/admin-reset/),
|
||||
[Directory Connector](https://bitwarden.com/help/directory-sync/),
|
||||
[Policies](https://bitwarden.com/help/policies/)
|
||||
* [Multi/Two Factor Authentication](https://bitwarden.com/help/bitwarden-field-guide-two-step-login/)
|
||||
- [Authenticator](https://bitwarden.com/help/setup-two-step-login-authenticator/),
|
||||
[Email](https://bitwarden.com/help/setup-two-step-login-email/),
|
||||
[FIDO2 WebAuthn](https://bitwarden.com/help/setup-two-step-login-fido/),
|
||||
[YubiKey](https://bitwarden.com/help/setup-two-step-login-yubikey/),
|
||||
[Duo](https://bitwarden.com/help/setup-two-step-login-duo/)
|
||||
* [Emergency Access](https://bitwarden.com/help/emergency-access/)
|
||||
* [Vaultwarden Admin Backend](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page)
|
||||
* [Modified Web Vault client](https://github.com/dani-garcia/bw_web_builds) (Bundled within our containers)
|
||||
|
||||
## Installation
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
||||
```sh
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p 80:80 vaultwarden/server:latest
|
||||
```
|
||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||
<br>
|
||||
|
||||
## Usage
|
||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
>
|
||||
>This can be configured in [Vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
>
|
||||
>If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy or Traefik (see examples linked above).
|
||||
|
||||
> [!TIP]
|
||||
>**For more detailed examples on how to install, use and configure Vaultwarden you can check our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).**
|
||||
|
||||
The main way to use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
|
||||
|
||||
There are also [community driven packages](https://github.com/dani-garcia/vaultwarden/wiki/Third-party-packages) which can be used, but those might be lagging behind the latest version or might deviate in the way Vaultwarden is configured, as described in our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).
|
||||
|
||||
### Docker/Podman CLI
|
||||
|
||||
Pull the container image and mount a volume from the host for persistent storage.<br>
|
||||
You can replace `docker` with `podman` if you prefer to use podman.
|
||||
|
||||
```shell
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run --detach --name vaultwarden \
|
||||
--env DOMAIN="https://vw.domain.tld" \
|
||||
--volume /vw-data/:/data/ \
|
||||
--restart unless-stopped \
|
||||
--publish 80:80 \
|
||||
vaultwarden/server:latest
|
||||
```
|
||||
|
||||
This will preserve any persistent data under `/vw-data/`, you can adapt the path to whatever suits you.
|
||||
|
||||
### Docker Compose
|
||||
|
||||
To use Docker compose you need to create a `compose.yaml` which will hold the configuration to run the Vaultwarden container.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
vaultwarden:
|
||||
image: vaultwarden/server:latest
|
||||
container_name: vaultwarden
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
DOMAIN: "https://vw.domain.tld"
|
||||
volumes:
|
||||
- ./vw-data/:/data/
|
||||
ports:
|
||||
- 80:80
|
||||
```
|
||||
|
||||
<br>
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please use [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [the forum](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure you are on the latest version and there aren't any similar issues open, though!
|
||||
Have a question, suggestion or need help? Join our community on [Matrix](https://matrix.to/#/#vaultwarden:matrix.org), [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [Discourse Forums](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||
Encountered a bug or crash? Please search our issue tracker and discussions to see if it's already been reported. If not, please [start a new discussion](https://github.com/dani-garcia/vaultwarden/discussions) or [create a new issue](https://github.com/dani-garcia/vaultwarden/issues/). Ensure you're using the latest version of Vaultwarden and there aren't any similar issues open or closed!
|
||||
|
||||
<br>
|
||||
|
||||
## Contributors
|
||||
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
<!--
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/username">
|
||||
<img src="https://avatars.githubusercontent.com/u/725423?s=75&v=4" width="75px;" alt="username"/>
|
||||
<br />
|
||||
<sub><b>username</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
[![Contributors Count](https://img.shields.io/github/contributors-anon/dani-garcia/vaultwarden?style=for-the-badge&logo=vaultwarden&color=005AA4)](https://github.com/dani-garcia/vaultwarden/graphs/contributors)<br>
|
||||
[![Contributors Avatars](https://contributors-img.web.app/image?repo=dani-garcia/vaultwarden)](https://github.com/dani-garcia/vaultwarden/graphs/contributors)
|
||||
|
||||
<br/>
|
||||
-->
|
||||
<br>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/themightychris" style="width: 75px">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/numberly" style="width: 75px">
|
||||
<sub><b>Numberly</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
## Disclaimer
|
||||
|
||||
**This project is not associated with [Bitwarden](https://bitwarden.com/) or Bitwarden, Inc.**
|
||||
|
||||
However, one of the active maintainers for Vaultwarden is employed by Bitwarden and is allowed to contribute to the project on their own time. These contributions are independent of Bitwarden and are reviewed by other maintainers.
|
||||
|
||||
The maintainers work together to set the direction for the project, focusing on serving the self-hosting community, including individuals, families, and small organizations, while ensuring the project's sustainability.
|
||||
|
||||
**Please note:** We cannot be held liable for any data loss that may occur while using Vaultwarden. This includes passwords, attachments, and other information handled by the application. We highly recommend performing regular backups of your files and database. However, should you experience data loss, we encourage you to contact us immediately.
|
||||
|
||||
<br>
|
||||
|
||||
## Bitwarden_RS
|
||||
|
||||
This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues.<br>
|
||||
Please see [#1642 - v1.21.0 release and project rename to Vaultwarden](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
|
|
12
SECURITY.md
12
SECURITY.md
|
@ -39,7 +39,11 @@ Thank you for helping keep Vaultwarden and our users safe!
|
|||
|
||||
# How to contact us
|
||||
|
||||
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`)
|
||||
- You can send an ![security-contact](/.github/security-contact.gif) to report a security issue.
|
||||
- If you want to send an encrypted email you can use the following GPG key:<br>
|
||||
https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index
|
||||
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (users: `@danig:matrix.org` and/or `@blackdex:matrix.org`)
|
||||
- You can send an ![security-contact](/.github/security-contact.gif) to report a security issue.<br>
|
||||
If you want to send an encrypted email you can use the following GPG key: 13BB3A34C9E380258CE43D595CB150B31F6426BC<br>
|
||||
It can be found on several public GPG key servers.<br>
|
||||
* https://keys.openpgp.org/search?q=security%40vaultwarden.org
|
||||
* https://keys.mailvelope.com/pks/lookup?op=get&search=security%40vaultwarden.org
|
||||
* https://pgpkeys.eu/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
|
||||
* https://keyserver.ubuntu.com/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
|
||||
|
|
24
build.rs
24
build.rs
|
@ -17,6 +17,20 @@ fn main() {
|
|||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||
);
|
||||
|
||||
// Use check-cfg to let cargo know which cfg's we define,
|
||||
// and avoid warnings when they are used in the code.
|
||||
println!("cargo::rustc-check-cfg=cfg(sqlite)");
|
||||
println!("cargo::rustc-check-cfg=cfg(mysql)");
|
||||
println!("cargo::rustc-check-cfg=cfg(postgresql)");
|
||||
println!("cargo::rustc-check-cfg=cfg(query_logger)");
|
||||
|
||||
// Rerun when these paths are changed.
|
||||
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||
println!("cargo:rerun-if-changed=.git");
|
||||
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||
println!("cargo:rerun-if-changed=.git/index");
|
||||
println!("cargo:rerun-if-changed=.git/refs/tags");
|
||||
|
||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||
|
||||
|
@ -42,11 +56,11 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
|||
|
||||
/// This method reads info from Git, namely tags, branch, and revision
|
||||
/// To access these values, use:
|
||||
/// - env!("GIT_EXACT_TAG")
|
||||
/// - env!("GIT_LAST_TAG")
|
||||
/// - env!("GIT_BRANCH")
|
||||
/// - env!("GIT_REV")
|
||||
/// - env!("VW_VERSION")
|
||||
/// - `env!("GIT_EXACT_TAG")`
|
||||
/// - `env!("GIT_LAST_TAG")`
|
||||
/// - `env!("GIT_BRANCH")`
|
||||
/// - `env!("GIT_REV")`
|
||||
/// - `env!("VW_VERSION")`
|
||||
fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||
// The exact tag for the current commit, can be empty when
|
||||
// the current commit doesn't have an associated tag
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
---
|
||||
vault_version: "v2023.10.0"
|
||||
vault_image_digest: "sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935"
|
||||
# Cross Compile Docker Helper Scripts v1.3.0
|
||||
vault_version: "v2024.6.2c"
|
||||
vault_image_digest: "sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b"
|
||||
# Cross Compile Docker Helper Scripts v1.5.0
|
||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
||||
rust_version: 1.73.0 # Rust version to be used
|
||||
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
|
||||
xx_image_digest: "sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa"
|
||||
rust_version: 1.82.0 # Rust version to be used
|
||||
debian_version: bookworm # Debian release name to be used
|
||||
alpine_version: 3.18 # Alpine version to be used
|
||||
alpine_version: "3.20" # Alpine version to be used
|
||||
# For which platforms/architectures will we try to build images
|
||||
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||
# Determine the build images per OS/Arch
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||
|
@ -18,27 +19,27 @@
|
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
||||
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
|
||||
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
|
||||
|
||||
########################## ALPINE BUILD IMAGES ##########################
|
||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.73.0 as build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.73.0 as build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.73.0 as build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.73.0 as build_armv6
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.82.0 AS build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.82.0 AS build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.82.0 AS build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.82.0 AS build_armv6
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
|
||||
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} AS build
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG TARGETPLATFORM
|
||||
|
@ -58,33 +59,29 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
|||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||
rustup set profile minimal
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Shared variables across Debian and Alpine
|
||||
# Environment variables for Cargo on Alpine based builds
|
||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
RUN source /env-cargo && \
|
||||
rustup target add "${CARGO_TARGET}"
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
ARG VW_VERSION
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||
COPY ./build.rs ./build.rs
|
||||
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
|
@ -97,10 +94,13 @@ RUN source /env-cargo && \
|
|||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
ARG VW_VERSION
|
||||
|
||||
# Builds again, this time it will be the actual source files being build
|
||||
RUN source /env-cargo && \
|
||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||
touch src/main.rs && \
|
||||
# Also do this for build.rs to ensure the version is rechecked
|
||||
touch build.rs src/main.rs && \
|
||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||
|
@ -126,7 +126,7 @@ RUN source /env-cargo && \
|
|||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
#
|
||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.18
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
|
@ -143,14 +143,12 @@ RUN mkdir /data && \
|
|||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
COPY docker/healthcheck.sh docker/start.sh /
|
||||
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/final/vaultwarden .
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||
|
@ -18,24 +19,24 @@
|
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
||||
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
||||
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
|
||||
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
|
||||
|
||||
########################## Cross Compile Docker Helper Scripts ##########################
|
||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||
## And these bash scripts do not have any significant difference if at all
|
||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa AS xx
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.73.0-slim-bookworm as build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.82.0-slim-bookworm AS build
|
||||
COPY --from=xx / /
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
@ -64,32 +65,40 @@ RUN apt-get update && \
|
|||
"libc6-$(xx-info debian-arch)-cross" \
|
||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||
|
||||
RUN xx-apt-get install -y \
|
||||
xx-apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc \
|
||||
libmariadb3 \
|
||||
libpq-dev \
|
||||
libpq5 \
|
||||
libssl-dev && \
|
||||
libssl-dev \
|
||||
zlib1g-dev && \
|
||||
# Force install arch dependend mariadb dev packages
|
||||
# Installing them the normal way breaks several other packages (again)
|
||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||
rustup set profile minimal
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Environment variables for cargo across Debian and Alpine
|
||||
# Environment variables for Cargo on Debian based builds
|
||||
ARG ARCH_OPENSSL_LIB_DIR \
|
||||
ARCH_OPENSSL_INCLUDE_DIR
|
||||
|
||||
RUN source /env-cargo && \
|
||||
if xx-info is-cross ; then \
|
||||
# Some special variables if needed to override some build paths
|
||||
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||
fi && \
|
||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
|
@ -102,19 +111,16 @@ RUN source /env-cargo && \
|
|||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
RUN source /env-cargo && \
|
||||
rustup target add "${CARGO_TARGET}"
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
ARG VW_VERSION
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||
COPY ./build.rs ./build.rs
|
||||
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
|
@ -127,10 +133,13 @@ RUN source /env-cargo && \
|
|||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
ARG VW_VERSION
|
||||
|
||||
# Builds again, this time it will be the actual source files being build
|
||||
RUN source /env-cargo && \
|
||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||
touch src/main.rs && \
|
||||
# Also do this for build.rs to ensure the version is rechecked
|
||||
touch build.rs src/main.rs && \
|
||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||
|
@ -177,14 +186,12 @@ RUN mkdir /data && \
|
|||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
COPY docker/healthcheck.sh docker/start.sh /
|
||||
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/final/vaultwarden .
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
|
||||
|
@ -26,7 +27,7 @@
|
|||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} AS vault
|
||||
|
||||
{% if base == "debian" %}
|
||||
########################## Cross Compile Docker Helper Scripts ##########################
|
||||
|
@ -38,13 +39,13 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
|
|||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
{% for arch in build_stage_image[base].arch_image %}
|
||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
|
||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
|
||||
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} AS build
|
||||
{% if base == "debian" %}
|
||||
COPY --from=xx / /
|
||||
{% endif %}
|
||||
|
@ -82,34 +83,42 @@ RUN apt-get update && \
|
|||
"libc6-$(xx-info debian-arch)-cross" \
|
||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||
|
||||
RUN xx-apt-get install -y \
|
||||
xx-apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc \
|
||||
libmariadb3 \
|
||||
libpq-dev \
|
||||
libpq5 \
|
||||
libssl-dev && \
|
||||
libssl-dev \
|
||||
zlib1g-dev && \
|
||||
# Force install arch dependend mariadb dev packages
|
||||
# Installing them the normal way breaks several other packages (again)
|
||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
||||
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||
{% endif %}
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||
rustup set profile minimal
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
{% if base == "debian" %}
|
||||
# Environment variables for cargo across Debian and Alpine
|
||||
# Environment variables for Cargo on Debian based builds
|
||||
ARG ARCH_OPENSSL_LIB_DIR \
|
||||
ARCH_OPENSSL_INCLUDE_DIR
|
||||
|
||||
RUN source /env-cargo && \
|
||||
if xx-info is-cross ; then \
|
||||
# Some special variables if needed to override some build paths
|
||||
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||
fi && \
|
||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||
|
@ -122,30 +131,28 @@ RUN source /env-cargo && \
|
|||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% elif base == "alpine" %}
|
||||
# Shared variables across Debian and Alpine
|
||||
# Environment variables for Cargo on Alpine based builds
|
||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||
# Output the current contents of the file
|
||||
cat /env-cargo
|
||||
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
{% endif %}
|
||||
|
||||
RUN source /env-cargo && \
|
||||
rustup target add "${CARGO_TARGET}"
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
ARG VW_VERSION
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
||||
COPY ./build.rs ./build.rs
|
||||
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
{% if base == "debian" %}
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% elif base == "alpine" %}
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
{% endif %}
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
|
@ -158,10 +165,13 @@ RUN source /env-cargo && \
|
|||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
ARG VW_VERSION
|
||||
|
||||
# Builds again, this time it will be the actual source files being build
|
||||
RUN source /env-cargo && \
|
||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||
touch src/main.rs && \
|
||||
# Also do this for build.rs to ensure the version is rechecked
|
||||
touch build.rs src/main.rs && \
|
||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||
|
@ -220,14 +230,12 @@ RUN mkdir /data && \
|
|||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
COPY docker/healthcheck.sh docker/start.sh /
|
||||
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/final/vaultwarden .
|
||||
|
|
|
@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
|
|||
- armv7 (linux/arm/v7)
|
||||
- armv6 (linux/arm/v6)
|
||||
|
||||
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
|
||||
- 386 (linux/386)
|
||||
- ppc64le (linux/ppc64le)
|
||||
- s390x (linux/s390x)
|
||||
|
||||
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||
This ensures the container build process can run binaries from other architectures.<br>
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ target "debian" {
|
|||
inherits = ["_default_attributes"]
|
||||
dockerfile = "docker/Dockerfile.debian"
|
||||
tags = generate_tags("", platform_tag())
|
||||
output = [join(",", flatten([["type=docker"], image_index_annotations()]))]
|
||||
output = ["type=docker"]
|
||||
}
|
||||
|
||||
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||
|
@ -125,6 +125,40 @@ target "debian-armv6" {
|
|||
tags = generate_tags("", "-armv6")
|
||||
}
|
||||
|
||||
// ==== Start of unsupported Debian architecture targets ===
|
||||
// These are provided just to help users build for these rare platforms
|
||||
// They will not be built by default
|
||||
target "debian-386" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/386"]
|
||||
tags = generate_tags("", "-386")
|
||||
args = {
|
||||
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
|
||||
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
|
||||
}
|
||||
}
|
||||
|
||||
target "debian-ppc64le" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/ppc64le"]
|
||||
tags = generate_tags("", "-ppc64le")
|
||||
args = {
|
||||
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
|
||||
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
|
||||
}
|
||||
}
|
||||
|
||||
target "debian-s390x" {
|
||||
inherits = ["debian"]
|
||||
platforms = ["linux/s390x"]
|
||||
tags = generate_tags("", "-s390x")
|
||||
args = {
|
||||
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
|
||||
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
|
||||
}
|
||||
}
|
||||
// ==== End of unsupported Debian architecture targets ===
|
||||
|
||||
// A Group to build all platforms individually for local testing
|
||||
group "debian-all" {
|
||||
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||
|
@ -138,7 +172,7 @@ target "alpine" {
|
|||
inherits = ["_default_attributes"]
|
||||
dockerfile = "docker/Dockerfile.alpine"
|
||||
tags = generate_tags("-alpine", platform_tag())
|
||||
output = [join(",", flatten([["type=docker"], image_index_annotations()]))]
|
||||
output = ["type=docker"]
|
||||
}
|
||||
|
||||
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||
|
@ -216,7 +250,13 @@ function "generate_tags" {
|
|||
result = flatten([
|
||||
for registry in get_container_registries() :
|
||||
[for base_tag in get_base_tags() :
|
||||
concat(["${registry}:${base_tag}${suffix}${platform}"])]
|
||||
concat(
|
||||
# If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too
|
||||
base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [],
|
||||
# The default tagging strategy
|
||||
["${registry}:${base_tag}${suffix}${platform}"]
|
||||
)
|
||||
]
|
||||
])
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,20 @@
|
|||
#!/bin/sh
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# Use the value of the corresponding env var (if present),
|
||||
# or a default value otherwise.
|
||||
: "${DATA_FOLDER:="data"}"
|
||||
: "${DATA_FOLDER:="/data"}"
|
||||
: "${ROCKET_PORT:="80"}"
|
||||
: "${ENV_FILE:="/.env"}"
|
||||
|
||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||
|
||||
# Check if the $ENV_FILE file exist and is readable
|
||||
# If that is the case, load it into the environment before running any check
|
||||
if [ -r "${ENV_FILE}" ]; then
|
||||
# shellcheck disable=SC1090
|
||||
. "${ENV_FILE}"
|
||||
fi
|
||||
|
||||
# Given a config key, return the corresponding config value from the
|
||||
# config file. If the key doesn't exist, return an empty string.
|
||||
get_config_val() {
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ -n "${UMASK}" ]; then
|
||||
umask "${UMASK}"
|
||||
fi
|
||||
|
||||
if [ -r /etc/vaultwarden.sh ]; then
|
||||
. /etc/vaultwarden.sh
|
||||
elif [ -r /etc/bitwarden_rs.sh ]; then
|
||||
|
|
1
migrations/mysql/2024-01-12-210182_change_attachment_size/up.sql
Normale Datei
1
migrations/mysql/2024-01-12-210182_change_attachment_size/up.sql
Normale Datei
|
@ -0,0 +1 @@
|
|||
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE twofactor MODIFY last_used BIGINT NOT NULL;
|
1
migrations/mysql/2024-06-05-131359_add_2fa_duo_store/down.sql
Normale Datei
1
migrations/mysql/2024-06-05-131359_add_2fa_duo_store/down.sql
Normale Datei
|
@ -0,0 +1 @@
|
|||
DROP TABLE twofactor_duo_ctx;
|
8
migrations/mysql/2024-06-05-131359_add_2fa_duo_store/up.sql
Normale Datei
8
migrations/mysql/2024-06-05-131359_add_2fa_duo_store/up.sql
Normale Datei
|
@ -0,0 +1,8 @@
|
|||
CREATE TABLE twofactor_duo_ctx (
|
||||
state VARCHAR(64) NOT NULL,
|
||||
user_email VARCHAR(255) NOT NULL,
|
||||
nonce VARCHAR(64) NOT NULL,
|
||||
exp BIGINT NOT NULL,
|
||||
|
||||
PRIMARY KEY (state)
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE attachments
|
||||
ALTER COLUMN file_size TYPE BIGINT,
|
||||
ALTER COLUMN file_size SET NOT NULL;
|
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE twofactor
|
||||
ALTER COLUMN last_used TYPE BIGINT,
|
||||
ALTER COLUMN last_used SET NOT NULL;
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE twofactor_duo_ctx;
|
8
migrations/postgresql/2024-06-05-131359_add_2fa_duo_store/up.sql
Normale Datei
8
migrations/postgresql/2024-06-05-131359_add_2fa_duo_store/up.sql
Normale Datei
|
@ -0,0 +1,8 @@
|
|||
CREATE TABLE twofactor_duo_ctx (
|
||||
state VARCHAR(64) NOT NULL,
|
||||
user_email VARCHAR(255) NOT NULL,
|
||||
nonce VARCHAR(64) NOT NULL,
|
||||
exp BIGINT NOT NULL,
|
||||
|
||||
PRIMARY KEY (state)
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE twofactor_incomplete DROP COLUMN device_type;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
|
@ -0,0 +1 @@
|
|||
-- Integer size in SQLite is already i64, so we don't need to do anything
|
|
@ -0,0 +1 @@
|
|||
-- Integer size in SQLite is already i64, so we don't need to do anything
|
1
migrations/sqlite/2024-06-05-131359_add_2fa_duo_store/down.sql
Normale Datei
1
migrations/sqlite/2024-06-05-131359_add_2fa_duo_store/down.sql
Normale Datei
|
@ -0,0 +1 @@
|
|||
DROP TABLE twofactor_duo_ctx;
|
8
migrations/sqlite/2024-06-05-131359_add_2fa_duo_store/up.sql
Normale Datei
8
migrations/sqlite/2024-06-05-131359_add_2fa_duo_store/up.sql
Normale Datei
|
@ -0,0 +1,8 @@
|
|||
CREATE TABLE twofactor_duo_ctx (
|
||||
state TEXT NOT NULL,
|
||||
user_email TEXT NOT NULL,
|
||||
nonce TEXT NOT NULL,
|
||||
exp INTEGER NOT NULL,
|
||||
|
||||
PRIMARY KEY (state)
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser
|
78
resources/vaultwarden-logo-auto.svg
Normale Datei
78
resources/vaultwarden-logo-auto.svg
Normale Datei
|
@ -0,0 +1,78 @@
|
|||
<svg width="1365.8256" height="280.48944" version="1.1" viewBox="0 0 1365.8255 280.48944" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<style>
|
||||
@media (prefers-color-scheme: dark) {
|
||||
svg { -webkit-filter:invert(0.90); filter:invert(0.90); }
|
||||
}</style>
|
||||
<title>Vaultwarden Logo</title>
|
||||
<defs>
|
||||
<mask id="d">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff"/>
|
||||
<circle id="b" cy="-40" r="3"/>
|
||||
<use transform="rotate(72)" xlink:href="#b"/>
|
||||
<use transform="rotate(144)" xlink:href="#b"/>
|
||||
<use transform="rotate(216)" xlink:href="#b"/>
|
||||
<use transform="rotate(-72)" xlink:href="#b"/>
|
||||
</mask>
|
||||
</defs>
|
||||
<g transform="translate(-10.708266,-9.2965379)" aria-label="aultwarden">
|
||||
<path d="m371.55338 223.43649-5.76172-14.84375h-0.78125q-7.51953 9.47266-15.52735 13.1836-7.91015 3.61328-20.70312 3.61328-15.72266 0-24.80469-8.98438-8.98437-8.98437-8.98437-25.58593 0-17.38282 12.10937-25.58594 12.20703-8.30078 36.71875-9.17969l18.94531-0.58594v-4.78515q0-16.60157-16.99218-16.60157-13.08594 0-30.76172 7.91016l-9.86328-20.11719q18.84765-9.86328 41.79687-9.86328 21.97266 0 33.69141 9.57031 11.71875 9.57032 11.71875 29.10157v72.7539zm-8.78907-50.58593-11.52343 0.39062q-12.98829 0.39063-19.33594 4.6875-6.34766 4.29688-6.34766 13.08594 0 12.59765 14.45313 12.59765 10.35156 0 16.5039-5.95703 6.25-5.95703 6.25-15.82031zm137.59766 50.58593-4.00391-13.96484h-1.5625q-4.78515 7.61719-13.57422 11.81641-8.78906 4.10156-20.01953 4.10156-19.23828 0-29.0039-10.25391-9.76563-10.35156-9.76563-29.6875v-71.1914h29.78516v63.76953q0 11.8164 4.19922 17.77343 4.19922 5.85938 13.3789 5.85938 12.5 0 18.06641-8.30078 5.56641-8.39844 5.56641-27.73438v-51.36718h29.78515v109.17968zm83.88672 0h-29.78516v-151.953122h29.78516zm77.24609-21.77734q7.8125 0 18.75-3.41797v22.16797q-11.13281 4.98047-27.34375 4.98047-17.87109 0-26.07422-8.98438-8.10547-9.08203-8.10547-27.14843v-52.63672h-14.25781v-12.59766l16.40625-9.96094 8.59375-23.046872h19.04297v23.242192h30.56641v22.36328h-30.56641v52.63672q0 6.34765 3.51563 9.375 3.61328 3.02734 9.47265 3.02734z"/>
|
||||
<path d="m791.27994 223.43649-19.62891-62.79297q-1.85547-5.76171-6.93359-26.17187h-0.78125q-3.90625 17.08984-6.83594 26.36719l-20.21484 62.59765h-18.75l-29.19922-107.03125h16.99219q10.35156 40.33203 15.72265 61.42578 5.46875 21.09375 6.25 28.41797h0.78125q1.07422-5.5664 3.41797-14.35547 2.44141-8.88671 4.19922-14.0625l19.62891-61.42578h17.57812l19.14063 61.42578q5.46875 16.79688 7.42187 28.22266h0.78125q0.39063-3.51562 2.05078-10.83984 1.75781-7.32422 20.41016-78.8086h16.79687l-29.58984 107.03125zm133.98437 0-3.22265-15.23437h-0.78125q-8.00782 10.05859-16.01563 13.67187-7.91015 3.51563-19.82422 3.51563-15.91797 0-25-8.20313-8.98437-8.20312-8.98437-23.33984 0-32.42188 51.85547-33.98438l18.16406-0.58593v-6.64063q0-12.59765-5.46875-18.55469-5.37109-6.05468-17.28516-6.05468-13.3789 0-30.27343 8.20312l-4.98047-12.40234q7.91015-4.29688 17.28515-6.73828 9.47266-2.44141 18.94532-2.44141 19.14062 0 28.32031 8.49609 9.27734 8.4961 9.27734 27.2461v73.04687zm-36.62109-11.42578q15.13672 0 23.73047-8.30078 8.6914-8.30078 8.6914-23.24219v-9.66797l-16.21093 0.6836q-19.33594 0.68359-27.92969 6.05469-8.49609 5.27343-8.49609 16.5039 0 8.78906 5.27343 13.37891 5.3711 4.58984 14.94141 4.58984zm130.85938-97.55859q7.1289 0 12.793 1.17187l-2.2461 15.03907q-6.6407-1.46485-11.7188-1.46485-12.9883 0-22.26561 10.54688-9.17968 10.54687-9.17968 26.26953v57.42187h-16.21094v-107.03125h13.37891l1.85546 19.82422h0.78125q5.95704-10.44922 14.35551-16.11328 8.3984-5.66406 18.457-5.66406zm101.6602 94.6289h-0.879q-11.2304 16.3086-33.5937 16.3086-20.9961 0-32.7148-14.35547-11.6211-14.35547-11.6211-40.82031 0-26.46485 11.7187-41.11328 11.7188-14.64844 32.6172-14.64844 21.7773 0 33.3984 15.82031h1.2696l-0.6836-7.71484-0.3907-7.51953v-43.554692h16.211v151.953122h-13.1836zm-32.4219 2.73438q16.6015 0 24.0234-8.98438 7.5195-9.08203 7.5195-29.19921v-3.41797q0-22.75391-7.6171-32.42188-7.5196-9.76562-24.1211-9.76562-14.2578 0-21.875 11.13281-7.5196 11.03516-7.5196 31.25 0 20.50781 7.5196 30.95703 7.5195 10.44922 22.0703 10.44922zm127.3437 13.57422q-23.7304 0-37.5-14.45313-13.6718-14.45312-13.6718-40.13672 0-25.8789 12.6953-41.11328 12.7929-15.23437 34.2773-15.23437 20.1172 0 31.8359 13.28125 11.7188 13.18359 11.7188 34.86328v10.25391h-73.7305q0.4883 18.84765 9.4727 28.61328 9.082 9.76562 25.4883 9.76562 17.2851 0 34.1797-7.22656v14.45312q-8.5938 3.71094-16.3086 5.27344-7.6172 1.66016-18.4571 1.66016zm-4.3945-97.36328q-12.8906 0-20.6055 8.39843-7.6172 8.39844-8.9843 23.24219h55.957q0-15.33203-6.836-23.4375-6.8359-8.20312-19.5312-8.20312zm144.6289 95.41015v-69.23828q0-13.08594-5.957-19.53125-5.9571-6.44531-18.6524-6.44531-16.7968 0-24.6093 9.08203t-7.8125 29.98047v56.15234h-16.211v-107.03125h13.1836l2.6367 14.64844h0.7813q4.9804-7.91016 13.9648-12.20703 8.9844-4.39453 20.0196-4.39453 19.3359 0 29.1015 9.375 9.7656 9.27734 9.7656 29.78515v69.82422z"/>
|
||||
</g>
|
||||
<g transform="translate(-10.708266,-9.2965379)">
|
||||
<g id="e" transform="matrix(2.6712834,0,0,2.6712834,150.95027,149.53854)">
|
||||
<g id="f" mask="url(#d)">
|
||||
<path d="m-31.1718-33.813208 26.496029 74.188883h9.3515399l26.49603-74.188883h-9.767164l-16.728866 47.588948q-1.662496 4.571864-2.805462 8.624198-1.142966 3.948427-1.870308 7.585137-.72734199-3.63671-1.8703079-7.689043-1.142966-4.052334-2.805462-8.728104l-16.624959-47.381136z" stroke="#000" stroke-width="4.51171"/>
|
||||
<circle transform="scale(-1,1)" r="43" fill="none" stroke="#000" stroke-width="9"/>
|
||||
<g id="g" transform="scale(-1,1)">
|
||||
<polygon id="a" points="46 -3 46 3 51 0" stroke="#000" stroke-linejoin="round" stroke-width="3"/>
|
||||
<use transform="rotate(11.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(22.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(33.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(45)" xlink:href="#a"/>
|
||||
<use transform="rotate(56.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(67.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(78.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(90)" xlink:href="#a"/>
|
||||
<use transform="rotate(101.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(112.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(123.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(135)" xlink:href="#a"/>
|
||||
<use transform="rotate(146.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(157.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(168.75)" xlink:href="#a"/>
|
||||
<use transform="scale(-1)" xlink:href="#a"/>
|
||||
<use transform="rotate(191.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(202.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(213.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(225)" xlink:href="#a"/>
|
||||
<use transform="rotate(236.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(247.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(258.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(-90)" xlink:href="#a"/>
|
||||
<use transform="rotate(-78.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(-67.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(-56.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(-45)" xlink:href="#a"/>
|
||||
<use transform="rotate(-33.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(-22.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(-11.25)" xlink:href="#a"/>
|
||||
</g>
|
||||
<g id="h" transform="scale(-1,1)">
|
||||
<polygon id="c" points="7 -42 -7 -42 0 -35" stroke="#000" stroke-linejoin="round" stroke-width="6"/>
|
||||
<use transform="rotate(72)" xlink:href="#c"/>
|
||||
<use transform="rotate(144)" xlink:href="#c"/>
|
||||
<use transform="rotate(216)" xlink:href="#c"/>
|
||||
<use transform="rotate(-72)" xlink:href="#c"/>
|
||||
</g>
|
||||
</g>
|
||||
<mask>
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff"/>
|
||||
<circle cy="-40" r="3"/>
|
||||
<use transform="rotate(72)" xlink:href="#b"/>
|
||||
<use transform="rotate(144)" xlink:href="#b"/>
|
||||
<use transform="rotate(216)" xlink:href="#b"/>
|
||||
<use transform="rotate(-72)" xlink:href="#b"/>
|
||||
</mask>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
Nachher Breite: | Höhe: | Größe: 7,6 KiB |
|
@ -1,4 +1,4 @@
|
|||
[toolchain]
|
||||
channel = "1.73.0"
|
||||
channel = "1.82.0"
|
||||
components = [ "rustfmt", "clippy" ]
|
||||
profile = "minimal"
|
||||
|
|
154
src/api/admin.rs
154
src/api/admin.rs
|
@ -1,4 +1,5 @@
|
|||
use once_cell::sync::Lazy;
|
||||
use reqwest::Method;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::env;
|
||||
|
@ -13,14 +14,19 @@ use rocket::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
api::{
|
||||
core::{log_event, two_factor},
|
||||
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||
},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp, Secure},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
error::{Error, MapResult},
|
||||
http_client::make_http_request,
|
||||
mail,
|
||||
util::{
|
||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
||||
container_base_image, format_naive_datetime_local, get_display_size, get_web_vault_version,
|
||||
is_running_in_container, NumberOrString,
|
||||
},
|
||||
CONFIG, VERSION,
|
||||
};
|
||||
|
@ -164,7 +170,12 @@ struct LoginForm {
|
|||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
cookies: &CookieJar<'_>,
|
||||
ip: ClientIp,
|
||||
secure: Secure,
|
||||
) -> Result<Redirect, AdminResponse> {
|
||||
let data = data.into_inner();
|
||||
let redirect = data.redirect;
|
||||
|
||||
|
@ -184,12 +195,12 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||
let claims = generate_admin_claims();
|
||||
let jwt = encode_jwt(&claims);
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||
.path(admin_path())
|
||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||
.max_age(time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
.secure(secure.https);
|
||||
|
||||
cookies.add(cookie);
|
||||
if let Some(redirect) = redirect {
|
||||
|
@ -262,8 +273,8 @@ fn admin_page_login() -> ApiResult<Html<String>> {
|
|||
render_admin_login(None, None)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct InviteData {
|
||||
email: String,
|
||||
}
|
||||
|
@ -287,7 +298,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
|
|||
|
||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
mail::send_invite(user, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
let invitation = Invitation::new(&user.email);
|
||||
invitation.save(conn).await
|
||||
|
@ -313,7 +324,7 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||
|
||||
#[get("/logout")]
|
||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||
Redirect::to(admin_path())
|
||||
}
|
||||
|
||||
|
@ -323,9 +334,9 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
|||
let mut users_json = Vec::with_capacity(users.len());
|
||||
for u in users {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["LastActive"] = match u.last_active(&mut conn).await {
|
||||
usr["userEnabled"] = json!(u.enabled);
|
||||
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["lastActive"] = match u.last_active(&mut conn).await {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||
None => json!(None::<String>),
|
||||
};
|
||||
|
@ -343,7 +354,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||
|
@ -361,8 +372,8 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["userEnabled"] = json!(u.enabled);
|
||||
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
|
@ -373,8 +384,8 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn)
|
|||
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(uuid, &mut conn).await?;
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["userEnabled"] = json!(u.enabled);
|
||||
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
}
|
||||
|
||||
|
@ -391,7 +402,7 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
|
|||
EventType::OrganizationUserRemoved as i32,
|
||||
&user_org.uuid,
|
||||
&user_org.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
ACTING_ADMIN_USER,
|
||||
14, // Use UnknownBrowser type
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
|
@ -410,7 +421,7 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
|
|||
|
||||
if CONFIG.push_enabled() {
|
||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||
match unregister_push_device(device.uuid).await {
|
||||
match unregister_push_device(device.push_uuid).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||
};
|
||||
|
@ -446,9 +457,10 @@ async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyR
|
|||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
||||
user.totp_recover = None;
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
@ -462,7 +474,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
|
|||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
mail::send_invite(&user, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -471,7 +483,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct UserOrgTypeData {
|
||||
user_type: NumberOrString,
|
||||
user_uuid: String,
|
||||
|
@ -506,7 +518,11 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||
Ok(_) => {}
|
||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
||||
if CONFIG.email_2fa_auto_fallback() {
|
||||
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
|
||||
} else {
|
||||
err!("You cannot modify this user to this type because they have not setup 2FA");
|
||||
}
|
||||
}
|
||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||
|
@ -518,7 +534,7 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||
EventType::OrganizationUserUpdated as i32,
|
||||
&user_to_edit.uuid,
|
||||
&data.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
ACTING_ADMIN_USER,
|
||||
14, // Use UnknownBrowser type
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
|
@ -546,7 +562,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
|
||||
organizations_json.push(org);
|
||||
}
|
||||
|
||||
|
@ -560,11 +576,6 @@ async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -
|
|||
org.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct WebVaultVersion {
|
||||
version: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GitRelease {
|
||||
tag_name: String,
|
||||
|
@ -586,15 +597,15 @@ struct TimeApi {
|
|||
}
|
||||
|
||||
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let json_api = get_reqwest_client();
|
||||
|
||||
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
async fn has_http_access() -> bool {
|
||||
let http_access = get_reqwest_client();
|
||||
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
||||
let req = match make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") {
|
||||
Ok(r) => r,
|
||||
Err(_) => return false,
|
||||
};
|
||||
match req.send().await {
|
||||
Ok(r) => r.status().is_success(),
|
||||
_ => false,
|
||||
}
|
||||
|
@ -604,7 +615,7 @@ use cached::proc_macro::cached;
|
|||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||
#[cached(time = 300, sync_writes = true)]
|
||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
||||
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
if has_http_access {
|
||||
(
|
||||
|
@ -621,9 +632,9 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
|||
}
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
// Do not fetch the web-vault version when running within Docker.
|
||||
// Do not fetch the web-vault version when running within a container.
|
||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||
if running_within_docker {
|
||||
if running_within_container {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_json_api::<GitRelease>(
|
||||
|
@ -664,20 +675,8 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let running_within_container = is_running_in_container();
|
||||
let has_http_access = has_http_access().await;
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|
@ -691,12 +690,12 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
get_release_info(has_http_access, running_within_docker).await;
|
||||
get_release_info(has_http_access, running_within_container).await;
|
||||
|
||||
let ip_header_name = match &ip_header.0 {
|
||||
Some(h) => h,
|
||||
_ => "",
|
||||
};
|
||||
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version = get_web_vault_version();
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
|
@ -704,13 +703,13 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
||||
"web_vault_version": web_vault_version,
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
|
||||
"running_within_container": running_within_container,
|
||||
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
"ip_header_exists": !ip_header_name.is_empty(),
|
||||
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
|
||||
"ip_header_name": ip_header_name,
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
|
@ -718,8 +717,8 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||
"db_version": get_sql_server_version(&mut conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"host_arch": std::env::consts::ARCH,
|
||||
"host_os": std::env::consts::OS,
|
||||
"host_arch": env::consts::ARCH,
|
||||
"host_os": env::consts::OS,
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
|
||||
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
|
||||
|
@ -738,18 +737,27 @@ fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
|||
#[post("/config", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
CONFIG.update_config(data)
|
||||
if let Err(e) = CONFIG.update_config(data) {
|
||||
err!(format!("Unable to save config: {e:?}"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/config/delete")]
|
||||
fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
CONFIG.delete_user_config()
|
||||
if let Err(e) = CONFIG.delete_user_config() {
|
||||
err!(format!("Unable to delete config: {e:?}"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> ApiResult<String> {
|
||||
if *CAN_BACKUP {
|
||||
backup_database(&mut conn).await
|
||||
match backup_database(&mut conn).await {
|
||||
Ok(f) => Ok(format!("Backup to '{f}' was successful")),
|
||||
Err(e) => err!(format!("Backup was unsuccessful {e}")),
|
||||
}
|
||||
} else {
|
||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||
}
|
||||
|
@ -786,16 +794,16 @@ impl<'r> FromRequest<'r> for AdminToken {
|
|||
if requested_page.is_empty() {
|
||||
return Outcome::Forward(Status::Unauthorized);
|
||||
} else {
|
||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
||||
return Outcome::Error((Status::Unauthorized, "Unauthorized"));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
||||
return Outcome::Error((Status::Unauthorized, "Session expired"));
|
||||
}
|
||||
|
||||
Outcome::Success(Self {
|
||||
|
|
Datei-Diff unterdrückt, da er zu groß ist
Diff laden
Datei-Diff unterdrückt, da er zu groß ist
Diff laden
|
@ -1,15 +1,17 @@
|
|||
use chrono::{Duration, Utc};
|
||||
use chrono::{TimeDelta, Utc};
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::{CipherSyncData, CipherSyncType},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
EmptyResult, JsonResult,
|
||||
},
|
||||
auth::{decode_emergency_access_invite, Headers},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
mail,
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
|
@ -18,6 +20,7 @@ pub fn routes() -> Vec<Route> {
|
|||
get_grantees,
|
||||
get_emergency_access,
|
||||
put_emergency_access,
|
||||
post_emergency_access,
|
||||
delete_emergency_access,
|
||||
post_delete_emergency_access,
|
||||
send_invite,
|
||||
|
@ -37,45 +40,66 @@ pub fn routes() -> Vec<Route> {
|
|||
// region get
|
||||
|
||||
#[get("/emergency-access/trusted")]
|
||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
return Json(json!({
|
||||
"data": [{
|
||||
"id": "",
|
||||
"status": 2,
|
||||
"type": 0,
|
||||
"waitTimeDays": 0,
|
||||
"granteeId": "",
|
||||
"email": "",
|
||||
"name": "NOTE: Emergency Access is disabled!",
|
||||
"object": "emergencyAccessGranteeDetails",
|
||||
|
||||
}],
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
}));
|
||||
}
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
||||
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
|
||||
emergency_access_list_json.push(grantee)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
Json(json!({
|
||||
"data": emergency_access_list_json,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/emergency-access/granted")]
|
||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let emergency_access_list = if CONFIG.emergency_access_allowed() {
|
||||
EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
Json(json!({
|
||||
"data": emergency_access_list_json,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/emergency-access/<emer_id>")]
|
||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emergency_access) => Ok(Json(
|
||||
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
|
||||
)),
|
||||
None => err!("Emergency access not valid."),
|
||||
}
|
||||
}
|
||||
|
@ -85,42 +109,49 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
|||
// region put/post
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EmergencyAccessUpdateData {
|
||||
Type: NumberOrString,
|
||||
WaitTimeDays: i32,
|
||||
KeyEncrypted: Option<String>,
|
||||
r#type: NumberOrString,
|
||||
wait_time_days: i32,
|
||||
key_encrypted: Option<String>,
|
||||
}
|
||||
|
||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn).await
|
||||
async fn put_emergency_access(
|
||||
emer_id: &str,
|
||||
data: Json<EmergencyAccessUpdateData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, headers, conn).await
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn post_emergency_access(
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
data: Json<EmergencyAccessUpdateData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||
let data: EmergencyAccessUpdateData = data.into_inner();
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
||||
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
None => err!("Invalid emergency access type."),
|
||||
};
|
||||
|
||||
emergency_access.atype = new_type;
|
||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||
if data.KeyEncrypted.is_some() {
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
emergency_access.wait_time_days = data.wait_time_days;
|
||||
if data.key_encrypted.is_some() {
|
||||
emergency_access.key_encrypted = data.key_encrypted;
|
||||
}
|
||||
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
@ -133,19 +164,23 @@ async fn post_emergency_access(
|
|||
|
||||
#[delete("/emergency-access/<emer_id>")]
|
||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => {
|
||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
emer
|
||||
let emergency_access = match (
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||
) {
|
||||
(Some(grantor_emer), None) => {
|
||||
info!("Grantor deleted emergency access {emer_id}");
|
||||
grantor_emer
|
||||
}
|
||||
None => err!("Emergency access not valid."),
|
||||
(None, Some(grantee_emer)) => {
|
||||
info!("Grantee deleted emergency access {emer_id}");
|
||||
grantee_emer
|
||||
}
|
||||
_ => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
emergency_access.delete(&mut conn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -160,24 +195,24 @@ async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbC
|
|||
// region invite
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EmergencyAccessInviteData {
|
||||
Email: String,
|
||||
Type: NumberOrString,
|
||||
WaitTimeDays: i32,
|
||||
email: String,
|
||||
r#type: NumberOrString,
|
||||
wait_time_days: i32,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/invite", data = "<data>")]
|
||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||
let email = data.Email.to_lowercase();
|
||||
let wait_time_days = data.WaitTimeDays;
|
||||
let data: EmergencyAccessInviteData = data.into_inner();
|
||||
let email = data.email.to_lowercase();
|
||||
let wait_time_days = data.wait_time_days;
|
||||
|
||||
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
||||
|
||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
||||
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
None => err!("Invalid emergency access type."),
|
||||
};
|
||||
|
@ -189,7 +224,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||
err!("You can not set yourself as an emergency contact.")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("Grantee user does not exist: {}", &email))
|
||||
|
@ -206,9 +241,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||
|
||||
let mut user = User::new(email.clone());
|
||||
user.save(&mut conn).await?;
|
||||
user
|
||||
(user, true)
|
||||
}
|
||||
Some(user) => user,
|
||||
Some(user) if user.password_hash.is_empty() => (user, true),
|
||||
Some(user) => (user, false),
|
||||
};
|
||||
|
||||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
|
@ -236,15 +272,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
},
|
||||
None => err!("Grantee user not found."),
|
||||
}
|
||||
} else if !new_user {
|
||||
// if mail is not enabled immediately accept the invitation for existing users
|
||||
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -252,16 +282,13 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||
|
||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.grantor_uuid != headers.user.uuid {
|
||||
err!("Emergency access not valid.");
|
||||
}
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
||||
err!("The grantee user is already accepted or confirmed to the organization");
|
||||
|
@ -288,34 +315,29 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
|
|||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
} else if !grantee_user.password_hash.is_empty() {
|
||||
// accept the invitation for existing user
|
||||
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct AcceptData {
|
||||
Token: String,
|
||||
token: String,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
let token = &data.Token;
|
||||
let data: AcceptData = data.into_inner();
|
||||
let token = &data.token;
|
||||
let claims = decode_emergency_access_invite(token)?;
|
||||
|
||||
// This can happen if the user who received the invite used a different email to signup.
|
||||
|
@ -332,10 +354,13 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||
None => err!("Invited user not found"),
|
||||
};
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
// We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
|
||||
// The uuid of the grantee gets stored once accepted.
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
|
@ -347,10 +372,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||
&& grantor_user.name == claims.grantor_name
|
||||
&& grantor_user.email == claims.grantor_email
|
||||
{
|
||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||
|
@ -362,49 +384,30 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||
}
|
||||
}
|
||||
|
||||
async fn accept_invite_process(
|
||||
grantee_uuid: &str,
|
||||
emergency_access: &mut EmergencyAccess,
|
||||
grantee_email: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
||||
err!("User email does not match invite.");
|
||||
}
|
||||
|
||||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
||||
err!("Emergency contact already accepted.");
|
||||
}
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
||||
emergency_access.email = None;
|
||||
emergency_access.save(conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ConfirmData {
|
||||
Key: String,
|
||||
key: String,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||
async fn confirm_emergency_access(
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<ConfirmData>,
|
||||
data: Json<ConfirmData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let confirming_user = headers.user;
|
||||
let data: ConfirmData = data.into_inner().data;
|
||||
let key = data.Key;
|
||||
let data: ConfirmData = data.into_inner();
|
||||
let key = data.key;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
||||
|| emergency_access.grantor_uuid != confirming_user.uuid
|
||||
|
@ -444,17 +447,16 @@ async fn confirm_emergency_access(
|
|||
|
||||
#[post("/emergency-access/<emer_id>/initiate")]
|
||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let initiating_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
|
||||
{
|
||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
|
@ -484,16 +486,15 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||
|
||||
#[post("/emergency-access/<emer_id>/approve")]
|
||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
{
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
|
@ -522,25 +523,20 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
|
|||
|
||||
#[post("/emergency-access/<emer_id>/reject")]
|
||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
|
@ -551,7 +547,7 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &headers.user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
|
@ -565,12 +561,13 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||
|
||||
#[post("/emergency-access/<emer_id>/view")]
|
||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
||||
err!("Emergency access not valid.")
|
||||
|
@ -594,21 +591,22 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
|
|||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Ciphers": ciphers_json,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessView",
|
||||
"ciphers": ciphers_json,
|
||||
"keyEncrypted": &emergency_access.key_encrypted,
|
||||
"object": "emergencyAccessView",
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/takeover")]
|
||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
|
@ -620,42 +618,43 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||
};
|
||||
|
||||
let result = json!({
|
||||
"Kdf": grantor_user.client_kdf_type,
|
||||
"KdfIterations": grantor_user.client_kdf_iter,
|
||||
"KdfMemory": grantor_user.client_kdf_memory,
|
||||
"KdfParallelism": grantor_user.client_kdf_parallelism,
|
||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
||||
"Object": "emergencyAccessTakeover",
|
||||
"kdf": grantor_user.client_kdf_type,
|
||||
"kdfIterations": grantor_user.client_kdf_iter,
|
||||
"kdfMemory": grantor_user.client_kdf_memory,
|
||||
"kdfParallelism": grantor_user.client_kdf_parallelism,
|
||||
"keyEncrypted": &emergency_access.key_encrypted,
|
||||
"object": "emergencyAccessTakeover",
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EmergencyAccessPasswordData {
|
||||
NewMasterPasswordHash: String,
|
||||
Key: String,
|
||||
new_master_password_hash: String,
|
||||
key: String,
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||
async fn password_emergency_access(
|
||||
emer_id: &str,
|
||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
||||
data: Json<EmergencyAccessPasswordData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
||||
let data: EmergencyAccessPasswordData = data.into_inner();
|
||||
let new_master_password_hash = &data.new_master_password_hash;
|
||||
//let key = &data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
|
@ -667,7 +666,7 @@ async fn password_emergency_access(
|
|||
};
|
||||
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
|
||||
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
|
||||
grantor_user.save(&mut conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
|
@ -687,10 +686,11 @@ async fn password_emergency_access(
|
|||
#[get("/emergency-access/<emer_id>/policies")]
|
||||
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
|
@ -705,9 +705,9 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": policies_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
"data": policies_json,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -722,9 +722,9 @@ fn is_valid_request(
|
|||
&& emergency_access.atype == requested_access_type as i32
|
||||
}
|
||||
|
||||
fn check_emergency_access_allowed() -> EmptyResult {
|
||||
fn check_emergency_access_enabled() -> EmptyResult {
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
err!("Emergency access is not allowed.")
|
||||
err!("Emergency access is not enabled.")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -746,7 +746,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
|
|||
for mut emer in emergency_access_list {
|
||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
let recovery_allowed_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
||||
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
|
||||
if recovery_allowed_at.le(&now) {
|
||||
// Only update the access status
|
||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||
|
@ -802,10 +802,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
|||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
// Calculate the day before the recovery will become active
|
||||
let final_recovery_reminder_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
||||
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
|
||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||
last_notification_at + Duration::days(1)
|
||||
last_notification_at + TimeDelta::try_days(1).unwrap()
|
||||
} else {
|
||||
now
|
||||
};
|
||||
|
|
|
@ -5,7 +5,7 @@ use rocket::{form::FromForm, serde::json::Json, Route};
|
|||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
||||
api::{EmptyResult, JsonResult},
|
||||
auth::{AdminHeaders, Headers},
|
||||
db::{
|
||||
models::{Cipher, Event, UserOrganization},
|
||||
|
@ -22,7 +22,6 @@ pub fn routes() -> Vec<Route> {
|
|||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EventRange {
|
||||
start: String,
|
||||
end: String,
|
||||
|
@ -53,9 +52,9 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
|
|||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
"data": events_json,
|
||||
"object": "list",
|
||||
"continuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -85,9 +84,9 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
|
|||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
"data": events_json,
|
||||
"object": "list",
|
||||
"continuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -119,13 +118,13 @@ async fn get_user_events(
|
|||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
"data": events_json,
|
||||
"object": "list",
|
||||
"continuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
|
||||
fn get_continuation_token(events_json: &[Value]) -> Option<&str> {
|
||||
// When the length of the vec equals the max page_size there probably is more data
|
||||
// When it is less, then all events are loaded.
|
||||
if events_json.len() as i64 == Event::PAGE_SIZE {
|
||||
|
@ -145,33 +144,33 @@ pub fn main_routes() -> Vec<Route> {
|
|||
routes![post_events_collect,]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EventCollection {
|
||||
// Mandatory
|
||||
Type: i32,
|
||||
Date: String,
|
||||
r#type: i32,
|
||||
date: String,
|
||||
|
||||
// Optional
|
||||
CipherId: Option<String>,
|
||||
OrganizationId: Option<String>,
|
||||
cipher_id: Option<String>,
|
||||
organization_id: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream:
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
#[post("/collect", format = "application/json", data = "<data>")]
|
||||
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for event in data.iter().map(|d| &d.data) {
|
||||
let event_date = parse_date(&event.Date);
|
||||
match event.Type {
|
||||
for event in data.iter() {
|
||||
let event_date = parse_date(&event.date);
|
||||
match event.r#type {
|
||||
1000..=1099 => {
|
||||
_log_user_event(
|
||||
event.Type,
|
||||
event.r#type,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
|
@ -181,9 +180,9 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||
.await;
|
||||
}
|
||||
1600..=1699 => {
|
||||
if let Some(org_uuid) = &event.OrganizationId {
|
||||
if let Some(org_uuid) = &event.organization_id {
|
||||
_log_event(
|
||||
event.Type,
|
||||
event.r#type,
|
||||
org_uuid,
|
||||
org_uuid,
|
||||
&headers.user.uuid,
|
||||
|
@ -196,11 +195,11 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||
}
|
||||
}
|
||||
_ => {
|
||||
if let Some(cipher_uuid) = &event.CipherId {
|
||||
if let Some(cipher_uuid) = &event.cipher_id {
|
||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||
if let Some(org_uuid) = cipher.organization_uuid {
|
||||
_log_event(
|
||||
event.Type,
|
||||
event.r#type,
|
||||
cipher_uuid,
|
||||
&org_uuid,
|
||||
&headers.user.uuid,
|
||||
|
@ -263,7 +262,7 @@ pub async fn log_event(
|
|||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: &str,
|
||||
act_user_uuid: String,
|
||||
act_user_uuid: &str,
|
||||
device_type: i32,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
|
@ -271,7 +270,7 @@ pub async fn log_event(
|
|||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
||||
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
|
@ -289,7 +288,7 @@ async fn _log_event(
|
|||
let mut event = Event::new(event_type, event_date);
|
||||
match event_type {
|
||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||
// Collection Events
|
||||
// Cipher Events
|
||||
1100..=1199 => {
|
||||
event.cipher_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ use rocket::serde::json::Json;
|
|||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
api::{EmptyResult, JsonResult, Notify, UpdateType},
|
||||
auth::Headers,
|
||||
db::{models::*, DbConn},
|
||||
};
|
||||
|
@ -17,9 +17,9 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": folders_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
"data": folders_json,
|
||||
"object": "list",
|
||||
"continuationToken": null,
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -38,16 +38,17 @@ async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
|
|||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct FolderData {
|
||||
pub Name: String,
|
||||
pub name: String,
|
||||
pub id: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/folders", data = "<data>")]
|
||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner();
|
||||
|
||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||
let mut folder = Folder::new(headers.user.uuid, data.name);
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
|
@ -56,25 +57,19 @@ async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn:
|
|||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
async fn post_folder(
|
||||
uuid: &str,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
async fn put_folder(
|
||||
uuid: &str,
|
||||
data: JsonUpcase<FolderData>,
|
||||
data: Json<FolderData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
let data: FolderData = data.into_inner();
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
|
@ -85,7 +80,7 @@ async fn put_folder(
|
|||
err!("Folder belongs to another user")
|
||||
}
|
||||
|
||||
folder.name = data.Name;
|
||||
folder.name = data.name;
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
||||
|
|
|
@ -12,8 +12,8 @@ pub use accounts::purge_auth_requests;
|
|||
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||
use reqwest::Method;
|
||||
pub use sends::purge_sends;
|
||||
pub use two_factor::send_incomplete_2fa_notifications;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||
|
@ -47,23 +47,23 @@ pub fn events_routes() -> Vec<Route> {
|
|||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::{serde::json::Json, Catcher, Route};
|
||||
use serde_json::Value;
|
||||
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
|
||||
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
api::{JsonResult, Notify, UpdateType},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
util::get_reqwest_client,
|
||||
http_client::make_http_request,
|
||||
util::parse_experimental_client_feature_flags,
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GlobalDomain {
|
||||
Type: i32,
|
||||
Domains: Vec<String>,
|
||||
Excluded: bool,
|
||||
r#type: i32,
|
||||
domains: Vec<String>,
|
||||
excluded: bool,
|
||||
}
|
||||
|
||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||
|
@ -83,38 +83,38 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
|||
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
|
||||
|
||||
for global in &mut globals {
|
||||
global.Excluded = excluded_globals.contains(&global.Type);
|
||||
global.excluded = excluded_globals.contains(&global.r#type);
|
||||
}
|
||||
|
||||
if no_excluded {
|
||||
globals.retain(|g| !g.Excluded);
|
||||
globals.retain(|g| !g.excluded);
|
||||
}
|
||||
|
||||
Json(json!({
|
||||
"EquivalentDomains": equivalent_domains,
|
||||
"GlobalEquivalentDomains": globals,
|
||||
"Object": "domains",
|
||||
"equivalentDomains": equivalent_domains,
|
||||
"globalEquivalentDomains": globals,
|
||||
"object": "domains",
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EquivDomainData {
|
||||
ExcludedGlobalEquivalentDomains: Option<Vec<i32>>,
|
||||
EquivalentDomains: Option<Vec<Vec<String>>>,
|
||||
excluded_global_equivalent_domains: Option<Vec<i32>>,
|
||||
equivalent_domains: Option<Vec<Vec<String>>>,
|
||||
}
|
||||
|
||||
#[post("/settings/domains", data = "<data>")]
|
||||
async fn post_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
data: Json<EquivDomainData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: EquivDomainData = data.into_inner().data;
|
||||
let data: EquivDomainData = data.into_inner();
|
||||
|
||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
|
||||
let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default();
|
||||
let equivalent_domains = data.equivalent_domains.unwrap_or_default();
|
||||
|
||||
let mut user = headers.user;
|
||||
use serde_json::to_string;
|
||||
|
@ -130,25 +130,19 @@ async fn post_eq_domains(
|
|||
}
|
||||
|
||||
#[put("/settings/domains", data = "<data>")]
|
||||
async fn put_eq_domains(
|
||||
data: JsonUpcase<EquivDomainData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
async fn hibp_breach(username: &str) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
|
||||
async fn hibp_breach(username: &str, _headers: Headers) -> JsonResult {
|
||||
let username: String = url::form_urlencoded::byte_serialize(username.as_bytes()).collect();
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = get_reqwest_client();
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
||||
let res = make_http_request(Method::GET, &url)?.header("hibp-api-key", api_key).send().await?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
if res.status() == 404 {
|
||||
|
@ -159,15 +153,15 @@ async fn hibp_breach(username: &str) -> JsonResult {
|
|||
Ok(Json(value))
|
||||
} else {
|
||||
Ok(Json(json!([{
|
||||
"Name": "HaveIBeenPwned",
|
||||
"Title": "Manual HIBP Check",
|
||||
"Domain": "haveibeenpwned.com",
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||
"LogoPath": "vw_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
"name": "HaveIBeenPwned",
|
||||
"title": "Manual HIBP Check",
|
||||
"domain": "haveibeenpwned.com",
|
||||
"breachDate": "2019-08-18T00:00:00Z",
|
||||
"addedDate": "2019-08-18T00:00:00Z",
|
||||
"description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||
"logoPath": "vw_static/hibp.png",
|
||||
"pwnCount": 0,
|
||||
"dataClasses": [
|
||||
"Error - No API key set!"
|
||||
]
|
||||
}])))
|
||||
|
@ -193,18 +187,27 @@ fn version() -> Json<&'static str> {
|
|||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
let mut feature_states =
|
||||
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||
// Force the new key rotation feature
|
||||
feature_states.insert("key-rotation-improvements".to_string(), true);
|
||||
feature_states.insert("flexible-collections-v-1".to_string(), false);
|
||||
|
||||
Json(json!({
|
||||
// Note: The clients use this version to handle backwards compatibility concerns
|
||||
// This means they expect a version that closely matches the Bitwarden server version
|
||||
// We should make sure that we keep this updated when we support the new server features
|
||||
// Version history:
|
||||
// - Individual cipher key encryption: 2023.9.1
|
||||
"version": "2023.9.1",
|
||||
"version": "2024.2.0",
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||
},
|
||||
"settings": {
|
||||
"disableUserRegistration": !crate::CONFIG.signups_allowed() && crate::CONFIG.signups_domains_whitelist().is_empty(),
|
||||
},
|
||||
"environment": {
|
||||
"vault": domain,
|
||||
"api": format!("{domain}/api"),
|
||||
|
@ -212,13 +215,7 @@ fn config() -> Json<Value> {
|
|||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
"featureStates": {
|
||||
// Any feature flags that we want the clients to use
|
||||
// Can check the enabled ones at:
|
||||
// https://vault.bitwarden.com/api/config
|
||||
"autofill-v2": true,
|
||||
"fido2-vault-credentials": true
|
||||
},
|
||||
"featureStates": feature_states,
|
||||
"object": "config",
|
||||
}))
|
||||
}
|
||||
|
|
Datei-Diff unterdrückt, da er zu groß ist
Diff laden
|
@ -1,13 +1,14 @@
|
|||
use chrono::Utc;
|
||||
use rocket::{
|
||||
request::{self, FromRequest, Outcome},
|
||||
request::{FromRequest, Outcome},
|
||||
serde::json::Json,
|
||||
Request, Route,
|
||||
};
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonUpcase},
|
||||
api::EmptyResult,
|
||||
auth,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
|
@ -18,43 +19,43 @@ pub fn routes() -> Vec<Route> {
|
|||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct OrgImportGroupData {
|
||||
Name: String,
|
||||
ExternalId: String,
|
||||
MemberExternalIds: Vec<String>,
|
||||
name: String,
|
||||
external_id: String,
|
||||
member_external_ids: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct OrgImportUserData {
|
||||
Email: String,
|
||||
ExternalId: String,
|
||||
Deleted: bool,
|
||||
email: String,
|
||||
external_id: String,
|
||||
deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct OrgImportData {
|
||||
Groups: Vec<OrgImportGroupData>,
|
||||
Members: Vec<OrgImportUserData>,
|
||||
OverwriteExisting: bool,
|
||||
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
||||
groups: Vec<OrgImportGroupData>,
|
||||
members: Vec<OrgImportUserData>,
|
||||
overwrite_existing: bool,
|
||||
// largeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
||||
}
|
||||
|
||||
#[post("/public/organization/import", data = "<data>")]
|
||||
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
||||
async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
||||
// Most of the logic for this function can be found here
|
||||
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
|
||||
|
||||
let org_id = token.0;
|
||||
let data = data.into_inner().data;
|
||||
let data = data.into_inner();
|
||||
|
||||
for user_data in &data.Members {
|
||||
if user_data.Deleted {
|
||||
for user_data in &data.members {
|
||||
if user_data.deleted {
|
||||
// If user is marked for deletion and it exists, revoke it
|
||||
if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
|
||||
{
|
||||
// Only revoke a user if it is not the last confirmed owner
|
||||
let revoked = if user_org.atype == UserOrgType::Owner
|
||||
|
@ -72,27 +73,27 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||
user_org.revoke()
|
||||
};
|
||||
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
|
||||
if revoked || ext_modified {
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
// If user is part of the organization, restore it
|
||||
} else if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
||||
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
|
||||
{
|
||||
let restored = user_org.restore();
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
|
||||
if restored || ext_modified {
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
} else {
|
||||
// If user is not part of the organization
|
||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
||||
let user = match User::find_by_mail(&user_data.email, &mut conn).await {
|
||||
Some(user) => user, // exists in vaultwarden
|
||||
None => {
|
||||
// User does not exist yet
|
||||
let mut new_user = User::new(user_data.Email.clone());
|
||||
let mut new_user = User::new(user_data.email.clone());
|
||||
new_user.save(&mut conn).await?;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
|
@ -109,7 +110,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||
};
|
||||
|
||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
|
||||
new_org_user.set_external_id(Some(user_data.external_id.clone()));
|
||||
new_org_user.access_all = false;
|
||||
new_org_user.atype = UserOrgType::User as i32;
|
||||
new_org_user.status = user_org_status;
|
||||
|
@ -122,26 +123,24 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||
None => err!("Error looking up organization"),
|
||||
};
|
||||
|
||||
mail::send_invite(
|
||||
&user_data.Email,
|
||||
&user.uuid,
|
||||
Some(org_id.clone()),
|
||||
Some(new_org_user.uuid),
|
||||
&org_name,
|
||||
Some(org_email),
|
||||
)
|
||||
.await?;
|
||||
mail::send_invite(&user, Some(org_id.clone()), Some(new_org_user.uuid), &org_name, Some(org_email))
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.org_groups_enabled() {
|
||||
for group_data in &data.Groups {
|
||||
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
|
||||
for group_data in &data.groups {
|
||||
let group_uuid = match Group::find_by_external_id_and_org(&group_data.external_id, &org_id, &mut conn).await
|
||||
{
|
||||
Some(group) => group.uuid,
|
||||
None => {
|
||||
let mut group =
|
||||
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone()));
|
||||
let mut group = Group::new(
|
||||
org_id.clone(),
|
||||
group_data.name.clone(),
|
||||
false,
|
||||
Some(group_data.external_id.clone()),
|
||||
);
|
||||
group.save(&mut conn).await?;
|
||||
group.uuid
|
||||
}
|
||||
|
@ -149,7 +148,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||
|
||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||
|
||||
for ext_id in &group_data.MemberExternalIds {
|
||||
for ext_id in &group_data.member_external_ids {
|
||||
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
||||
{
|
||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||
|
@ -162,9 +161,9 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||
}
|
||||
|
||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||
if data.OverwriteExisting {
|
||||
if data.overwrite_existing {
|
||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
||||
let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
|
||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||
if let Some(ref user_external_id) = user_org.external_id {
|
||||
if !sync_members.contains(user_external_id) {
|
||||
|
@ -193,7 +192,7 @@ pub struct PublicToken(String);
|
|||
impl<'r> FromRequest<'r> for PublicToken {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
|
@ -209,19 +208,15 @@ impl<'r> FromRequest<'r> for PublicToken {
|
|||
Err(_) => err_handler!("Invalid claim"),
|
||||
};
|
||||
// Check if time is between claims.nbf and claims.exp
|
||||
let time_now = Utc::now().naive_utc().timestamp();
|
||||
let time_now = Utc::now().timestamp();
|
||||
if time_now < claims.nbf {
|
||||
err_handler!("Token issued in the future");
|
||||
}
|
||||
if time_now > claims.exp {
|
||||
err_handler!("Token expired");
|
||||
}
|
||||
// Check if claims.iss is host|claims.scope[0]
|
||||
let host = match auth::Host::from_request(request).await {
|
||||
Outcome::Success(host) => host,
|
||||
_ => err_handler!("Error getting Host"),
|
||||
};
|
||||
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
|
||||
// Check if claims.iss is domain|claims.scope[0]
|
||||
let complete_host = format!("{}|{}", CONFIG.domain_origin(), claims.scope[0]);
|
||||
if complete_host != claims.iss {
|
||||
err_handler!("Token not issued by this server");
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use std::path::Path;
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use num_traits::ToPrimitive;
|
||||
use rocket::form::Form;
|
||||
use rocket::fs::NamedFile;
|
||||
use rocket::fs::TempFile;
|
||||
|
@ -8,17 +9,17 @@ use rocket::serde::json::Json;
|
|||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
||||
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::SafeString,
|
||||
util::{NumberOrString, SafeString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
|
||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
const SIZE_525_MB: i64 = 550_502_400;
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
|
@ -47,23 +48,26 @@ pub async fn purge_sends(pool: DbPool) {
|
|||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct SendData {
|
||||
Type: i32,
|
||||
Key: String,
|
||||
Password: Option<String>,
|
||||
MaxAccessCount: Option<NumberOrString>,
|
||||
ExpirationDate: Option<DateTime<Utc>>,
|
||||
DeletionDate: DateTime<Utc>,
|
||||
Disabled: bool,
|
||||
HideEmail: Option<bool>,
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendData {
|
||||
r#type: i32,
|
||||
key: String,
|
||||
password: Option<String>,
|
||||
max_access_count: Option<NumberOrString>,
|
||||
expiration_date: Option<DateTime<Utc>>,
|
||||
deletion_date: DateTime<Utc>,
|
||||
disabled: bool,
|
||||
hide_email: Option<bool>,
|
||||
|
||||
// Data field
|
||||
Name: String,
|
||||
Notes: Option<String>,
|
||||
Text: Option<Value>,
|
||||
File: Option<Value>,
|
||||
FileLength: Option<NumberOrString>,
|
||||
name: String,
|
||||
notes: Option<String>,
|
||||
text: Option<Value>,
|
||||
file: Option<Value>,
|
||||
file_length: Option<NumberOrString>,
|
||||
|
||||
// Used for key rotations
|
||||
pub id: Option<String>,
|
||||
}
|
||||
|
||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||
|
@ -92,7 +96,7 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
|
|||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let hide_email = data.HideEmail.unwrap_or(false);
|
||||
let hide_email = data.hide_email.unwrap_or(false);
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
||||
err!(
|
||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||
|
@ -103,40 +107,40 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
|
|||
}
|
||||
|
||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
let data_val = if data.Type == SendType::Text as i32 {
|
||||
data.Text
|
||||
} else if data.Type == SendType::File as i32 {
|
||||
data.File
|
||||
let data_val = if data.r#type == SendType::Text as i32 {
|
||||
data.text
|
||||
} else if data.r#type == SendType::File as i32 {
|
||||
data.file
|
||||
} else {
|
||||
err!("Invalid Send type")
|
||||
};
|
||||
|
||||
let data_str = if let Some(mut d) = data_val {
|
||||
d.as_object_mut().and_then(|o| o.remove("Response"));
|
||||
d.as_object_mut().and_then(|o| o.remove("response"));
|
||||
serde_json::to_string(&d)?
|
||||
} else {
|
||||
err!("Send data not provided");
|
||||
};
|
||||
|
||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||
err!(
|
||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||
);
|
||||
}
|
||||
|
||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
||||
let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
|
||||
send.user_uuid = Some(user_uuid);
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = match data.MaxAccessCount {
|
||||
send.notes = data.notes;
|
||||
send.max_access_count = match data.max_access_count {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
send.hide_email = data.HideEmail;
|
||||
send.atype = data.Type;
|
||||
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
|
||||
send.disabled = data.disabled;
|
||||
send.hide_email = data.hide_email;
|
||||
send.atype = data.r#type;
|
||||
|
||||
send.set_password(data.Password.as_deref());
|
||||
send.set_password(data.password.as_deref());
|
||||
|
||||
Ok(send)
|
||||
}
|
||||
|
@ -147,9 +151,9 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": sends_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
"data": sends_json,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -168,13 +172,13 @@ async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult
|
|||
}
|
||||
|
||||
#[post("/sends", data = "<data>")]
|
||||
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
let data: SendData = data.into_inner();
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
if data.Type == SendType::File as i32 {
|
||||
if data.r#type == SendType::File as i32 {
|
||||
err!("File sends should use /api/sends/file")
|
||||
}
|
||||
|
||||
|
@ -194,7 +198,7 @@ async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbCon
|
|||
|
||||
#[derive(FromForm)]
|
||||
struct UploadData<'f> {
|
||||
model: Json<crate::util::UpCase<SendData>>,
|
||||
model: Json<SendData>,
|
||||
data: TempFile<'f>,
|
||||
}
|
||||
|
||||
|
@ -214,32 +218,43 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||
model,
|
||||
mut data,
|
||||
} = data.into_inner();
|
||||
let model = model.into_inner().data;
|
||||
let model = model.into_inner();
|
||||
|
||||
let Some(size) = data.len().to_i64() else {
|
||||
err!("Invalid send size");
|
||||
};
|
||||
if size < 0 {
|
||||
err!("Send size can't be negative")
|
||||
}
|
||||
|
||||
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
let size_limit = match CONFIG.user_send_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||
err!("Existing sends overflow")
|
||||
};
|
||||
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||
err!("Send size overflow");
|
||||
};
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
err!("Send storage limit reached! Delete some sends to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
i64::clamp(left, 0, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
if size > size_limit {
|
||||
err!("Send storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let mut send = create_send(model, headers.user.uuid)?;
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let size = data.len();
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
|
@ -251,9 +266,9 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
||||
o.insert(String::from("id"), Value::String(file_id));
|
||||
o.insert(String::from("size"), Value::Number(size.into()));
|
||||
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(size)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
|
@ -273,36 +288,44 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
||||
#[post("/sends/file/v2", data = "<data>")]
|
||||
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data = data.into_inner().data;
|
||||
let data = data.into_inner();
|
||||
|
||||
if data.Type != SendType::File as i32 {
|
||||
if data.r#type != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let file_length = match &data.FileLength {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
let file_length = match &data.file_length {
|
||||
Some(m) => m.into_i64()?,
|
||||
_ => err!("Invalid send length"),
|
||||
};
|
||||
if file_length < 0 {
|
||||
err!("Send size can't be negative")
|
||||
}
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
let size_limit = match CONFIG.user_send_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||
err!("Existing sends overflow")
|
||||
};
|
||||
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||
err!("Send size overflow");
|
||||
};
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
err!("Send storage limit reached! Delete some sends to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
i64::clamp(left, 0, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
if file_length > size_limit {
|
||||
err!("Send storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
|
@ -311,9 +334,9 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
||||
o.insert(String::from("id"), Value::String(file_id.clone()));
|
||||
o.insert(String::from("size"), Value::Number(file_length.into()));
|
||||
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(file_length)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
@ -326,7 +349,15 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||
})))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendFileData {
|
||||
id: String,
|
||||
size: u64,
|
||||
fileName: String,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250
|
||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file_v2_data(
|
||||
send_uuid: &str,
|
||||
|
@ -344,15 +375,55 @@ async fn post_send_file_v2_data(
|
|||
err!("Send not found. Unable to save the file.")
|
||||
};
|
||||
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send is not a file type send.");
|
||||
}
|
||||
|
||||
let Some(send_user_id) = &send.user_uuid else {
|
||||
err!("Sends are only supported for users at the moment")
|
||||
err!("Sends are only supported for users at the moment.")
|
||||
};
|
||||
|
||||
if send_user_id != &headers.user.uuid {
|
||||
err!("Send doesn't belong to user");
|
||||
err!("Send doesn't belong to user.");
|
||||
}
|
||||
|
||||
let Ok(send_data) = serde_json::from_str::<SendFileData>(&send.data) else {
|
||||
err!("Unable to decode send data as json.")
|
||||
};
|
||||
|
||||
match data.data.raw_name() {
|
||||
Some(raw_file_name) if raw_file_name.dangerous_unsafe_unsanitized_raw() == send_data.fileName => (),
|
||||
Some(raw_file_name) => err!(
|
||||
"Send file name does not match.",
|
||||
format!(
|
||||
"Expected file name '{}' got '{}'",
|
||||
send_data.fileName,
|
||||
raw_file_name.dangerous_unsafe_unsanitized_raw()
|
||||
)
|
||||
),
|
||||
_ => err!("Send file name does not match or is not provided."),
|
||||
}
|
||||
|
||||
if file_id != send_data.id {
|
||||
err!("Send file does not match send data.", format!("Expected id {} got {file_id}", send_data.id));
|
||||
}
|
||||
|
||||
let Some(size) = data.data.len().to_u64() else {
|
||||
err!("Send file size overflow.");
|
||||
};
|
||||
|
||||
if size != send_data.size {
|
||||
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
|
||||
}
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid);
|
||||
let file_path = folder_path.join(file_id);
|
||||
|
||||
// Check if the file already exists, if that is the case do not overwrite it
|
||||
if tokio::fs::metadata(&file_path).await.is_ok() {
|
||||
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
|
||||
}
|
||||
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
|
@ -372,15 +443,15 @@ async fn post_send_file_v2_data(
|
|||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendAccessData {
|
||||
pub Password: Option<String>,
|
||||
pub password: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||
async fn post_access(
|
||||
access_id: &str,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
data: Json<SendAccessData>,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
|
@ -411,7 +482,7 @@ async fn post_access(
|
|||
}
|
||||
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
match data.into_inner().password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
||||
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
||||
|
@ -441,7 +512,7 @@ async fn post_access(
|
|||
async fn post_access_file(
|
||||
send_id: &str,
|
||||
file_id: &str,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
data: Json<SendAccessData>,
|
||||
host: Host,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
|
@ -472,7 +543,7 @@ async fn post_access_file(
|
|||
}
|
||||
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
match data.into_inner().password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
|
@ -495,9 +566,9 @@ async fn post_access_file(
|
|||
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
Ok(Json(json!({
|
||||
"Object": "send-fileDownload",
|
||||
"Id": file_id,
|
||||
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
||||
"object": "send-fileDownload",
|
||||
"id": file_id,
|
||||
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -512,16 +583,10 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
|
|||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
async fn put_send(
|
||||
id: &str,
|
||||
data: JsonUpcase<SendData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
let data: SendData = data.into_inner();
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
||||
|
@ -529,19 +594,38 @@ async fn put_send(
|
|||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
pub async fn update_send_from_data(
|
||||
send: &mut Send,
|
||||
data: SendData,
|
||||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
nt: &Notify<'_>,
|
||||
ut: UpdateType,
|
||||
) -> EmptyResult {
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
if send.atype != data.Type {
|
||||
if send.atype != data.r#type {
|
||||
err!("Sends can't change type")
|
||||
}
|
||||
|
||||
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||
err!(
|
||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||
);
|
||||
}
|
||||
|
||||
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||
// so we only need to update the data field in the Text case
|
||||
if data.Type == SendType::Text as i32 {
|
||||
let data_str = if let Some(mut d) = data.Text {
|
||||
d.as_object_mut().and_then(|d| d.remove("Response"));
|
||||
if data.r#type == SendType::Text as i32 {
|
||||
let data_str = if let Some(mut d) = data.text {
|
||||
d.as_object_mut().and_then(|d| d.remove("response"));
|
||||
serde_json::to_string(&d)?
|
||||
} else {
|
||||
err!("Send data not provided");
|
||||
|
@ -549,39 +633,28 @@ async fn put_send(
|
|||
send.data = data_str;
|
||||
}
|
||||
|
||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
||||
err!(
|
||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||
);
|
||||
}
|
||||
send.name = data.Name;
|
||||
send.akey = data.Key;
|
||||
send.deletion_date = data.DeletionDate.naive_utc();
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = match data.MaxAccessCount {
|
||||
send.name = data.name;
|
||||
send.akey = data.key;
|
||||
send.deletion_date = data.deletion_date.naive_utc();
|
||||
send.notes = data.notes;
|
||||
send.max_access_count = match data.max_access_count {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.hide_email = data.HideEmail;
|
||||
send.disabled = data.Disabled;
|
||||
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
|
||||
send.hide_email = data.hide_email;
|
||||
send.disabled = data.disabled;
|
||||
|
||||
// Only change the value if it's present
|
||||
if let Some(password) = data.Password {
|
||||
if let Some(password) = data.password {
|
||||
send.set_password(Some(&password));
|
||||
}
|
||||
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&headers.device.uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
send.save(conn).await?;
|
||||
if ut != UpdateType::None {
|
||||
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
|
|
|
@ -3,16 +3,14 @@ use rocket::serde::json::Json;
|
|||
use rocket::Route;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
||||
NumberOrString, PasswordData,
|
||||
},
|
||||
api::{core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, PasswordOrOtpData},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
util::NumberOrString,
|
||||
};
|
||||
|
||||
pub use crate::config::CONFIG;
|
||||
|
@ -22,13 +20,11 @@ pub fn routes() -> Vec<Route> {
|
|||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
data.validate(&user, false, &mut conn).await?;
|
||||
|
||||
let type_ = TwoFactorType::Authenticator as i32;
|
||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
||||
|
@ -39,36 +35,35 @@ async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers
|
|||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": enabled,
|
||||
"Key": key,
|
||||
"Object": "twoFactorAuthenticator"
|
||||
"enabled": enabled,
|
||||
"key": key,
|
||||
"object": "twoFactorAuthenticator"
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EnableAuthenticatorData {
|
||||
MasterPasswordHash: String,
|
||||
Key: String,
|
||||
Token: NumberOrString,
|
||||
key: String,
|
||||
token: NumberOrString,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/two-factor/authenticator", data = "<data>")]
|
||||
async fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let key = data.Key;
|
||||
let token = data.Token.into_string();
|
||||
async fn activate_authenticator(data: Json<EnableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner();
|
||||
let key = data.key;
|
||||
let token = data.token.into_string();
|
||||
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&password_hash) {
|
||||
err!("Invalid password");
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: data.master_password_hash,
|
||||
otp: data.otp,
|
||||
}
|
||||
.validate(&user, true, &mut conn)
|
||||
.await?;
|
||||
|
||||
// Validate key as base32 and 20 bytes length
|
||||
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
||||
|
@ -88,18 +83,14 @@ async fn activate_authenticator(
|
|||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Key": key,
|
||||
"Object": "twoFactorAuthenticator"
|
||||
"enabled": true,
|
||||
"key": key,
|
||||
"object": "twoFactorAuthenticator"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/authenticator", data = "<data>")]
|
||||
async fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_authenticator(data, headers, conn).await
|
||||
}
|
||||
|
||||
|
@ -154,8 +145,8 @@ pub async fn validate_totp_code(
|
|||
let time = (current_timestamp + step * 30i64) as u64;
|
||||
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||
|
||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
||||
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
|
||||
// Check the given code equals the generated and if the time_step is larger then the one last used.
|
||||
if generated == totp_code && time_step > twofactor.last_used {
|
||||
// If the step does not equals 0 the time is drifted either server or client side.
|
||||
if step != 0 {
|
||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||
|
@ -163,10 +154,10 @@ pub async fn validate_totp_code(
|
|||
|
||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||
// This will also save a newly created twofactor if the code is correct.
|
||||
twofactor.last_used = time_step as i32;
|
||||
twofactor.last_used = time_step;
|
||||
twofactor.save(conn).await?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used {
|
||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||
err!(
|
||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||
|
|
|
@ -5,8 +5,8 @@ use rocket::Route;
|
|||
|
||||
use crate::{
|
||||
api::{
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
PasswordData,
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult,
|
||||
PasswordOrOtpData,
|
||||
},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
|
@ -15,7 +15,7 @@ use crate::{
|
|||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
util::get_reqwest_client,
|
||||
http_client::make_http_request,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
@ -92,14 +92,13 @@ impl DuoStatus {
|
|||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||
|
||||
#[post("/two-factor/get-duo", data = "<data>")]
|
||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
data.validate(&user, false, &mut conn).await?;
|
||||
|
||||
let data = get_user_duo_data(&headers.user.uuid, &mut conn).await;
|
||||
let data = get_user_duo_data(&user.uuid, &mut conn).await;
|
||||
|
||||
let (enabled, data) = match data {
|
||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||
|
@ -110,16 +109,16 @@ async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbC
|
|||
|
||||
let json = if let Some(data) = data {
|
||||
json!({
|
||||
"Enabled": enabled,
|
||||
"Host": data.host,
|
||||
"SecretKey": data.sk,
|
||||
"IntegrationKey": data.ik,
|
||||
"Object": "twoFactorDuo"
|
||||
"enabled": enabled,
|
||||
"host": data.host,
|
||||
"secretKey": data.sk,
|
||||
"integrationKey": data.ik,
|
||||
"object": "twoFactorDuo"
|
||||
})
|
||||
} else {
|
||||
json!({
|
||||
"Enabled": enabled,
|
||||
"Object": "twoFactorDuo"
|
||||
"enabled": enabled,
|
||||
"object": "twoFactorDuo"
|
||||
})
|
||||
};
|
||||
|
||||
|
@ -127,20 +126,21 @@ async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbC
|
|||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case, dead_code)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EnableDuoData {
|
||||
MasterPasswordHash: String,
|
||||
Host: String,
|
||||
SecretKey: String,
|
||||
IntegrationKey: String,
|
||||
host: String,
|
||||
secret_key: String,
|
||||
integration_key: String,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
impl From<EnableDuoData> for DuoData {
|
||||
fn from(d: EnableDuoData) -> Self {
|
||||
Self {
|
||||
host: d.Host,
|
||||
ik: d.IntegrationKey,
|
||||
sk: d.SecretKey,
|
||||
host: d.host,
|
||||
ik: d.integration_key,
|
||||
sk: d.secret_key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -151,17 +151,20 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
|||
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
|
||||
}
|
||||
|
||||
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey)
|
||||
!empty_or_default(&data.host) && !empty_or_default(&data.secret_key) && !empty_or_default(&data.integration_key)
|
||||
}
|
||||
|
||||
#[post("/two-factor/duo", data = "<data>")]
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableDuoData = data.into_inner().data;
|
||||
async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableDuoData = data.into_inner();
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: data.master_password_hash.clone(),
|
||||
otp: data.otp.clone(),
|
||||
}
|
||||
.validate(&user, true, &mut conn)
|
||||
.await?;
|
||||
|
||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||
let data_req: DuoData = data.into();
|
||||
|
@ -181,16 +184,16 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
|||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Host": data.host,
|
||||
"SecretKey": data.sk,
|
||||
"IntegrationKey": data.ik,
|
||||
"Object": "twoFactorDuo"
|
||||
"enabled": true,
|
||||
"host": data.host,
|
||||
"secretKey": data.sk,
|
||||
"integrationKey": data.ik,
|
||||
"object": "twoFactorDuo"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/duo", data = "<data>")]
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_duo_put(data: Json<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_duo(data, headers, conn).await
|
||||
}
|
||||
|
||||
|
@ -207,10 +210,7 @@ async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData)
|
|||
|
||||
let m = Method::from_str(method).unwrap_or_default();
|
||||
|
||||
let client = get_reqwest_client();
|
||||
|
||||
client
|
||||
.request(m, &url)
|
||||
make_http_request(m, &url)?
|
||||
.basic_auth(username, Some(password))
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||
.header(header::DATE, date)
|
||||
|
@ -252,7 +252,7 @@ async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
|
|||
}
|
||||
|
||||
// let (ik, sk, ak, host) = get_duo_keys();
|
||||
async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = match User::find_by_mail(email, conn).await {
|
||||
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
||||
_ => DuoData::global(),
|
||||
|
@ -281,10 +281,6 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
|||
}
|
||||
|
||||
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
// email is as entered by the user, so it needs to be normalized before
|
||||
// comparison with auth_user below.
|
||||
let email = &email.to_lowercase();
|
||||
|
||||
let split: Vec<&str> = response.split(':').collect();
|
||||
if split.len() != 2 {
|
||||
err!(
|
||||
|
|
498
src/api/core/two_factor/duo_oidc.rs
Normale Datei
498
src/api/core/two_factor/duo_oidc.rs
Normale Datei
|
@ -0,0 +1,498 @@
|
|||
use chrono::Utc;
|
||||
use data_encoding::HEXLOWER;
|
||||
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header, Validation};
|
||||
use reqwest::{header, StatusCode};
|
||||
use ring::digest::{digest, Digest, SHA512_256};
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::duo::get_duo_keys_email, EmptyResult},
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactorDuoContext},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
error::Error,
|
||||
http_client::make_http_request,
|
||||
CONFIG,
|
||||
};
|
||||
use url::Url;
|
||||
|
||||
// The location on this service that Duo should redirect users to. For us, this is a bridge
|
||||
// built in to the Bitwarden clients.
|
||||
// See: https://github.com/bitwarden/clients/blob/main/apps/web/src/connectors/duo-redirect.ts
|
||||
const DUO_REDIRECT_LOCATION: &str = "duo-redirect-connector.html";
|
||||
|
||||
// Number of seconds that a JWT we generate for Duo should be valid for.
|
||||
const JWT_VALIDITY_SECS: i64 = 300;
|
||||
|
||||
// Number of seconds that a Duo context stored in the database should be valid for.
|
||||
const CTX_VALIDITY_SECS: i64 = 300;
|
||||
|
||||
// Expected algorithm used by Duo to sign JWTs.
|
||||
const DUO_RESP_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
|
||||
|
||||
// Signature algorithm we're using to sign JWTs for Duo. Must be either HS512 or HS256.
|
||||
const JWT_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
|
||||
|
||||
// Size of random strings for state and nonce. Must be at least 16 characters and at most 1024 characters.
|
||||
// If increasing this above 64, also increase the size of the twofactor_duo_ctx.state and
|
||||
// twofactor_duo_ctx.nonce database columns for postgres and mariadb.
|
||||
const STATE_LENGTH: usize = 64;
|
||||
|
||||
// client_assertion payload for health checks and obtaining MFA results.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct ClientAssertion {
|
||||
pub iss: String,
|
||||
pub sub: String,
|
||||
pub aud: String,
|
||||
pub exp: i64,
|
||||
pub jti: String,
|
||||
pub iat: i64,
|
||||
}
|
||||
|
||||
// authorization request payload sent with clients to Duo for MFA
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct AuthorizationRequest {
|
||||
pub response_type: String,
|
||||
pub scope: String,
|
||||
pub exp: i64,
|
||||
pub client_id: String,
|
||||
pub redirect_uri: String,
|
||||
pub state: String,
|
||||
pub duo_uname: String,
|
||||
pub iss: String,
|
||||
pub aud: String,
|
||||
pub nonce: String,
|
||||
}
|
||||
|
||||
// Duo service health check responses
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum HealthCheckResponse {
|
||||
HealthOK {
|
||||
stat: String,
|
||||
},
|
||||
HealthFail {
|
||||
message: String,
|
||||
message_detail: String,
|
||||
},
|
||||
}
|
||||
|
||||
// Outer structure of response when exchanging authz code for MFA results
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct IdTokenResponse {
|
||||
id_token: String, // IdTokenClaims
|
||||
access_token: String,
|
||||
expires_in: i64,
|
||||
token_type: String,
|
||||
}
|
||||
|
||||
// Inner structure of IdTokenResponse.id_token
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct IdTokenClaims {
|
||||
preferred_username: String,
|
||||
nonce: String,
|
||||
}
|
||||
|
||||
// Duo OIDC Authorization Client
|
||||
// See https://duo.com/docs/oauthapi
|
||||
struct DuoClient {
|
||||
client_id: String, // Duo Client ID (DuoData.ik)
|
||||
client_secret: String, // Duo Client Secret (DuoData.sk)
|
||||
api_host: String, // Duo API hostname (DuoData.host)
|
||||
redirect_uri: String, // URL in this application clients should call for MFA verification
|
||||
}
|
||||
|
||||
impl DuoClient {
|
||||
// Construct a new DuoClient
|
||||
fn new(client_id: String, client_secret: String, api_host: String, redirect_uri: String) -> DuoClient {
|
||||
DuoClient {
|
||||
client_id,
|
||||
client_secret,
|
||||
api_host,
|
||||
redirect_uri,
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a client assertion for health checks and authorization code exchange.
|
||||
fn new_client_assertion(&self, url: &str) -> ClientAssertion {
|
||||
let now = Utc::now().timestamp();
|
||||
let jwt_id = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||
|
||||
ClientAssertion {
|
||||
iss: self.client_id.clone(),
|
||||
sub: self.client_id.clone(),
|
||||
aud: url.to_string(),
|
||||
exp: now + JWT_VALIDITY_SECS,
|
||||
jti: jwt_id,
|
||||
iat: now,
|
||||
}
|
||||
}
|
||||
|
||||
// Given a serde-serializable struct, attempt to encode it as a JWT
|
||||
fn encode_duo_jwt<T: Serialize>(&self, jwt_payload: T) -> Result<String, Error> {
|
||||
match jsonwebtoken::encode(
|
||||
&Header::new(JWT_SIGNATURE_ALG),
|
||||
&jwt_payload,
|
||||
&EncodingKey::from_secret(self.client_secret.as_bytes()),
|
||||
) {
|
||||
Ok(token) => Ok(token),
|
||||
Err(e) => err!(format!("Error encoding Duo JWT: {e:?}")),
|
||||
}
|
||||
}
|
||||
|
||||
// "required" health check to verify the integration is configured and Duo's services
|
||||
// are up.
|
||||
// https://duo.com/docs/oauthapi#health-check
|
||||
async fn health_check(&self) -> Result<(), Error> {
|
||||
let health_check_url: String = format!("https://{}/oauth/v1/health_check", self.api_host);
|
||||
|
||||
let jwt_payload = self.new_client_assertion(&health_check_url);
|
||||
|
||||
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||
Ok(token) => token,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let mut post_body = HashMap::new();
|
||||
post_body.insert("client_assertion", token);
|
||||
post_body.insert("client_id", self.client_id.clone());
|
||||
|
||||
let res = match make_http_request(reqwest::Method::POST, &health_check_url)?
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
|
||||
.form(&post_body)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("Error requesting Duo health check: {e:?}")),
|
||||
};
|
||||
|
||||
let response: HealthCheckResponse = match res.json::<HealthCheckResponse>().await {
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("Duo health check response decode error: {e:?}")),
|
||||
};
|
||||
|
||||
let health_stat: String = match response {
|
||||
HealthCheckResponse::HealthOK {
|
||||
stat,
|
||||
} => stat,
|
||||
HealthCheckResponse::HealthFail {
|
||||
message,
|
||||
message_detail,
|
||||
} => err!(format!("Duo health check FAIL response, msg: {}, detail: {}", message, message_detail)),
|
||||
};
|
||||
|
||||
if health_stat != "OK" {
|
||||
err!(format!("Duo health check failed, got OK-like body with stat {health_stat}"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Constructs the URL for the authorization request endpoint on Duo's service.
|
||||
// Clients are sent here to continue authentication.
|
||||
// https://duo.com/docs/oauthapi#authorization-request
|
||||
fn make_authz_req_url(&self, duo_username: &str, state: String, nonce: String) -> Result<String, Error> {
|
||||
let now = Utc::now().timestamp();
|
||||
|
||||
let jwt_payload = AuthorizationRequest {
|
||||
response_type: String::from("code"),
|
||||
scope: String::from("openid"),
|
||||
exp: now + JWT_VALIDITY_SECS,
|
||||
client_id: self.client_id.clone(),
|
||||
redirect_uri: self.redirect_uri.clone(),
|
||||
state,
|
||||
duo_uname: String::from(duo_username),
|
||||
iss: self.client_id.clone(),
|
||||
aud: format!("https://{}", self.api_host),
|
||||
nonce,
|
||||
};
|
||||
|
||||
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||
Ok(token) => token,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let authz_endpoint = format!("https://{}/oauth/v1/authorize", self.api_host);
|
||||
let mut auth_url = match Url::parse(authz_endpoint.as_str()) {
|
||||
Ok(url) => url,
|
||||
Err(e) => err!(format!("Error parsing Duo authorization URL: {e:?}")),
|
||||
};
|
||||
|
||||
{
|
||||
let mut query_params = auth_url.query_pairs_mut();
|
||||
query_params.append_pair("response_type", "code");
|
||||
query_params.append_pair("client_id", self.client_id.as_str());
|
||||
query_params.append_pair("request", token.as_str());
|
||||
}
|
||||
|
||||
let final_auth_url = auth_url.to_string();
|
||||
Ok(final_auth_url)
|
||||
}
|
||||
|
||||
// Exchange the authorization code obtained from an access token provided by the user
|
||||
// for the result of the MFA and validate.
|
||||
// See: https://duo.com/docs/oauthapi#access-token (under Response Format)
|
||||
async fn exchange_authz_code_for_result(
|
||||
&self,
|
||||
duo_code: &str,
|
||||
duo_username: &str,
|
||||
nonce: &str,
|
||||
) -> Result<(), Error> {
|
||||
if duo_code.is_empty() {
|
||||
err!("Empty Duo authorization code")
|
||||
}
|
||||
|
||||
let token_url = format!("https://{}/oauth/v1/token", self.api_host);
|
||||
|
||||
let jwt_payload = self.new_client_assertion(&token_url);
|
||||
|
||||
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||
Ok(token) => token,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let mut post_body = HashMap::new();
|
||||
post_body.insert("grant_type", String::from("authorization_code"));
|
||||
post_body.insert("code", String::from(duo_code));
|
||||
|
||||
// Must be the same URL that was supplied in the authorization request for the supplied duo_code
|
||||
post_body.insert("redirect_uri", self.redirect_uri.clone());
|
||||
|
||||
post_body
|
||||
.insert("client_assertion_type", String::from("urn:ietf:params:oauth:client-assertion-type:jwt-bearer"));
|
||||
post_body.insert("client_assertion", token);
|
||||
|
||||
let res = match make_http_request(reqwest::Method::POST, &token_url)?
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
|
||||
.form(&post_body)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("Error exchanging Duo code: {e:?}")),
|
||||
};
|
||||
|
||||
let status_code = res.status();
|
||||
if status_code != StatusCode::OK {
|
||||
err!(format!("Failure response from Duo: {}", status_code))
|
||||
}
|
||||
|
||||
let response: IdTokenResponse = match res.json::<IdTokenResponse>().await {
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("Error decoding ID token response: {e:?}")),
|
||||
};
|
||||
|
||||
let mut validation = Validation::new(DUO_RESP_SIGNATURE_ALG);
|
||||
validation.set_required_spec_claims(&["exp", "aud", "iss"]);
|
||||
validation.set_audience(&[&self.client_id]);
|
||||
validation.set_issuer(&[token_url.as_str()]);
|
||||
|
||||
let token_data = match jsonwebtoken::decode::<IdTokenClaims>(
|
||||
&response.id_token,
|
||||
&DecodingKey::from_secret(self.client_secret.as_bytes()),
|
||||
&validation,
|
||||
) {
|
||||
Ok(c) => c,
|
||||
Err(e) => err!(format!("Failed to decode Duo token {e:?}")),
|
||||
};
|
||||
|
||||
let matching_nonces = crypto::ct_eq(nonce, &token_data.claims.nonce);
|
||||
let matching_usernames = crypto::ct_eq(duo_username, &token_data.claims.preferred_username);
|
||||
|
||||
if !(matching_nonces && matching_usernames) {
|
||||
err!("Error validating Duo authorization, nonce or username mismatch.")
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct DuoAuthContext {
|
||||
pub state: String,
|
||||
pub user_email: String,
|
||||
pub nonce: String,
|
||||
pub exp: i64,
|
||||
}
|
||||
|
||||
// Given a state string, retrieve the associated Duo auth context and
|
||||
// delete the retrieved state from the database.
|
||||
async fn extract_context(state: &str, conn: &mut DbConn) -> Option<DuoAuthContext> {
|
||||
let ctx: TwoFactorDuoContext = match TwoFactorDuoContext::find_by_state(state, conn).await {
|
||||
Some(c) => c,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
if ctx.exp < Utc::now().timestamp() {
|
||||
ctx.delete(conn).await.ok();
|
||||
return None;
|
||||
}
|
||||
|
||||
// Copy the context data, so that we can delete the context from
|
||||
// the database before returning.
|
||||
let ret_ctx = DuoAuthContext {
|
||||
state: ctx.state.clone(),
|
||||
user_email: ctx.user_email.clone(),
|
||||
nonce: ctx.nonce.clone(),
|
||||
exp: ctx.exp,
|
||||
};
|
||||
|
||||
ctx.delete(conn).await.ok();
|
||||
Some(ret_ctx)
|
||||
}
|
||||
|
||||
// Task to clean up expired Duo authentication contexts that may have accumulated in the database.
|
||||
pub async fn purge_duo_contexts(pool: DbPool) {
|
||||
debug!("Purging Duo authentication contexts");
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
TwoFactorDuoContext::purge_expired_duo_contexts(&mut conn).await;
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging expired Duo authentications")
|
||||
}
|
||||
}
|
||||
|
||||
// Construct the url that Duo should redirect users to.
|
||||
fn make_callback_url(client_name: &str) -> Result<String, Error> {
|
||||
// Get the location of this application as defined in the config.
|
||||
let base = match Url::parse(&format!("{}/", CONFIG.domain())) {
|
||||
Ok(url) => url,
|
||||
Err(e) => err!(format!("Error parsing configured domain URL (check your domain configuration): {e:?}")),
|
||||
};
|
||||
|
||||
// Add the client redirect bridge location
|
||||
let mut callback = match base.join(DUO_REDIRECT_LOCATION) {
|
||||
Ok(url) => url,
|
||||
Err(e) => err!(format!("Error constructing Duo redirect URL (check your domain configuration): {e:?}")),
|
||||
};
|
||||
|
||||
// Add the 'client' string with the authenticating device type. The callback connector uses this
|
||||
// information to figure out how it should handle certain clients.
|
||||
{
|
||||
let mut query_params = callback.query_pairs_mut();
|
||||
query_params.append_pair("client", client_name);
|
||||
}
|
||||
Ok(callback.to_string())
|
||||
}
|
||||
|
||||
// Pre-redirect first stage of the Duo OIDC authentication flow.
|
||||
// Returns the "AuthUrl" that should be returned to clients for MFA.
|
||||
pub async fn get_duo_auth_url(
|
||||
email: &str,
|
||||
client_id: &str,
|
||||
device_identifier: &String,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<String, Error> {
|
||||
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
let callback_url = match make_callback_url(client_id) {
|
||||
Ok(url) => url,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let client = DuoClient::new(ik, sk, host, callback_url);
|
||||
|
||||
match client.health_check().await {
|
||||
Ok(()) => {}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
// Generate random OAuth2 state and OIDC Nonce
|
||||
let state: String = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||
let nonce: String = crypto::get_random_string_alphanum(STATE_LENGTH);
|
||||
|
||||
// Bind the nonce to the device that's currently authing by hashing the nonce and device id
|
||||
// and sending the result as the OIDC nonce.
|
||||
let d: Digest = digest(&SHA512_256, format!("{nonce}{device_identifier}").as_bytes());
|
||||
let hash: String = HEXLOWER.encode(d.as_ref());
|
||||
|
||||
match TwoFactorDuoContext::save(state.as_str(), email, nonce.as_str(), CTX_VALIDITY_SECS, conn).await {
|
||||
Ok(()) => client.make_authz_req_url(email, state, hash),
|
||||
Err(e) => err!(format!("Error saving Duo authentication context: {e:?}")),
|
||||
}
|
||||
}
|
||||
|
||||
// Post-redirect second stage of the Duo OIDC authentication flow.
|
||||
// Exchanges an authorization code for the MFA result with Duo's API and validates the result.
|
||||
pub async fn validate_duo_login(
|
||||
email: &str,
|
||||
two_factor_token: &str,
|
||||
client_id: &str,
|
||||
device_identifier: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
// Result supplied to us by clients in the form "<authz code>|<state>"
|
||||
let split: Vec<&str> = two_factor_token.split('|').collect();
|
||||
if split.len() != 2 {
|
||||
err!(
|
||||
"Invalid response length",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
let code = split[0];
|
||||
let state = split[1];
|
||||
|
||||
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
// Get the context by the state reported by the client. If we don't have one,
|
||||
// it means the context is either missing or expired.
|
||||
let ctx = match extract_context(state, conn).await {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
err!(
|
||||
"Error validating duo authentication",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
// Context validation steps
|
||||
let matching_usernames = crypto::ct_eq(email, &ctx.user_email);
|
||||
|
||||
// Probably redundant, but we're double-checking them anyway.
|
||||
let matching_states = crypto::ct_eq(state, &ctx.state);
|
||||
let unexpired_context = ctx.exp > Utc::now().timestamp();
|
||||
|
||||
if !(matching_usernames && matching_states && unexpired_context) {
|
||||
err!(
|
||||
"Error validating duo authentication",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let callback_url = match make_callback_url(client_id) {
|
||||
Ok(url) => url,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let client = DuoClient::new(ik, sk, host, callback_url);
|
||||
|
||||
match client.health_check().await {
|
||||
Ok(()) => {}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let d: Digest = digest(&SHA512_256, format!("{}{}", ctx.nonce, device_identifier).as_bytes());
|
||||
let hash: String = HEXLOWER.encode(d.as_ref());
|
||||
|
||||
match client.exchange_authz_code_for_result(code, email, hash.as_str()).await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => {
|
||||
err!(
|
||||
"Error validating duo authentication",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,16 +1,16 @@
|
|||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||
},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
|
@ -22,28 +22,31 @@ pub fn routes() -> Vec<Route> {
|
|||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct SendEmailLoginData {
|
||||
Email: String,
|
||||
MasterPasswordHash: String,
|
||||
// DeviceIdentifier: String, // Currently not used
|
||||
#[serde(alias = "Email")]
|
||||
email: String,
|
||||
#[serde(alias = "MasterPasswordHash")]
|
||||
master_password_hash: String,
|
||||
}
|
||||
|
||||
/// User is trying to login and wants to use email 2FA.
|
||||
/// Does not require Bearer token
|
||||
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
||||
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailLoginData = data.into_inner().data;
|
||||
async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailLoginData = data.into_inner();
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let user = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
|
||||
// Check password
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
if !user.check_valid_password(&data.master_password_hash) {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
}
|
||||
|
||||
|
@ -76,13 +79,11 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
|||
|
||||
/// When user clicks on Manage email 2FA show the user the related information
|
||||
#[post("/two-factor/get-email", data = "<data>")]
|
||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
async fn get_email(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
data.validate(&user, false, &mut conn).await?;
|
||||
|
||||
let (enabled, mfa_email) =
|
||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
||||
|
@ -94,29 +95,33 @@ async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: D
|
|||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": mfa_email,
|
||||
"Enabled": enabled,
|
||||
"Object": "twoFactorEmail"
|
||||
"email": mfa_email,
|
||||
"enabled": enabled,
|
||||
"object": "twoFactorEmail"
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct SendEmailData {
|
||||
/// Email where 2FA codes will be sent to, can be different than user email account.
|
||||
Email: String,
|
||||
MasterPasswordHash: String,
|
||||
email: String,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||
#[post("/two-factor/send-email", data = "<data>")]
|
||||
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailData = data.into_inner().data;
|
||||
async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: data.master_password_hash,
|
||||
otp: data.otp,
|
||||
}
|
||||
.validate(&user, false, &mut conn)
|
||||
.await?;
|
||||
|
||||
if !CONFIG._enable_email_2fa() {
|
||||
err!("Email 2FA is disabled")
|
||||
|
@ -129,7 +134,7 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||
}
|
||||
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
||||
let twofactor_data = EmailTokenData::new(data.email, generated_token);
|
||||
|
||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||
|
@ -141,22 +146,27 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EmailData {
|
||||
Email: String,
|
||||
MasterPasswordHash: String,
|
||||
Token: String,
|
||||
email: String,
|
||||
token: String,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||
#[put("/two-factor/email", data = "<data>")]
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EmailData = data.into_inner().data;
|
||||
async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EmailData = data.into_inner();
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
// This is the last step in the verification process, delete the otp directly afterwards
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: data.master_password_hash,
|
||||
otp: data.otp,
|
||||
}
|
||||
.validate(&user, true, &mut conn)
|
||||
.await?;
|
||||
|
||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||
let mut twofactor =
|
||||
|
@ -169,7 +179,7 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||
_ => err!("No token available"),
|
||||
};
|
||||
|
||||
if !crypto::ct_eq(issued_token, data.Token) {
|
||||
if !crypto::ct_eq(issued_token, data.token) {
|
||||
err!("Token is invalid")
|
||||
}
|
||||
|
||||
|
@ -183,9 +193,9 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": email_data.email,
|
||||
"Enabled": "true",
|
||||
"Object": "twoFactorEmail"
|
||||
"email": email_data.email,
|
||||
"enabled": "true",
|
||||
"object": "twoFactorEmail"
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -225,9 +235,9 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
|
|||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
|
||||
let date = DateTime::from_timestamp(email_data.token_sent, 0).expect("Email token timestamp invalid.").naive_utc();
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
||||
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||
err!(
|
||||
"Token has expired",
|
||||
ErrorEvent {
|
||||
|
@ -258,14 +268,14 @@ impl EmailTokenData {
|
|||
EmailTokenData {
|
||||
email,
|
||||
last_token: Some(token),
|
||||
token_sent: Utc::now().naive_utc().timestamp(),
|
||||
token_sent: Utc::now().timestamp(),
|
||||
attempts: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_token(&mut self, token: String) {
|
||||
self.last_token = Some(token);
|
||||
self.token_sent = Utc::now().naive_utc().timestamp();
|
||||
self.token_sent = Utc::now().timestamp();
|
||||
}
|
||||
|
||||
pub fn reset_token(&mut self) {
|
||||
|
@ -282,7 +292,7 @@ impl EmailTokenData {
|
|||
}
|
||||
|
||||
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
|
||||
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(string);
|
||||
let res: Result<EmailTokenData, serde_json::Error> = serde_json::from_str(string);
|
||||
match res {
|
||||
Ok(x) => Ok(x),
|
||||
Err(_) => err!("Could not decode EmailTokenData from string"),
|
||||
|
@ -290,6 +300,15 @@ impl EmailTokenData {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn activate_email_2fa(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||
if user.verified_at.is_none() {
|
||||
err!("Auto-enabling of email 2FA failed because the users email address has not been verified!");
|
||||
}
|
||||
let twofactor_data = EmailTokenData::new(user.email.clone(), String::new());
|
||||
let twofactor = TwoFactor::new(user.uuid.clone(), TwoFactorType::Email, twofactor_data.to_json());
|
||||
twofactor.save(conn).await
|
||||
}
|
||||
|
||||
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
||||
pub fn obscure_email(email: &str) -> String {
|
||||
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
||||
|
@ -311,6 +330,14 @@ pub fn obscure_email(email: &str) -> String {
|
|||
format!("{}@{}", new_name, &domain)
|
||||
}
|
||||
|
||||
pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
|
||||
activate_email_2fa(&user, conn).await
|
||||
} else {
|
||||
err!("User not found!");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
@ -1,20 +1,27 @@
|
|||
use chrono::{Duration, Utc};
|
||||
use chrono::{TimeDelta, Utc};
|
||||
use data_encoding::BASE32;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
api::{
|
||||
core::{log_event, log_user_event},
|
||||
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||
},
|
||||
auth::{ClientHeaders, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
mail,
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub mod authenticator;
|
||||
pub mod duo;
|
||||
pub mod duo_oidc;
|
||||
pub mod email;
|
||||
pub mod protected_actions;
|
||||
pub mod webauthn;
|
||||
pub mod yubikey;
|
||||
|
||||
|
@ -33,6 +40,7 @@ pub fn routes() -> Vec<Route> {
|
|||
routes.append(&mut email::routes());
|
||||
routes.append(&mut webauthn::routes());
|
||||
routes.append(&mut yubikey::routes());
|
||||
routes.append(&mut protected_actions::routes());
|
||||
|
||||
routes
|
||||
}
|
||||
|
@ -43,59 +51,58 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": twofactors_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null,
|
||||
"data": twofactors_json,
|
||||
"object": "list",
|
||||
"continuationToken": null,
|
||||
}))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-recover", data = "<data>")]
|
||||
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
data.validate(&user, true, &mut conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Code": user.totp_recover,
|
||||
"Object": "twoFactorRecover"
|
||||
"code": user.totp_recover,
|
||||
"object": "twoFactorRecover"
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RecoverTwoFactor {
|
||||
MasterPasswordHash: String,
|
||||
Email: String,
|
||||
RecoveryCode: String,
|
||||
master_password_hash: String,
|
||||
email: String,
|
||||
recovery_code: String,
|
||||
}
|
||||
|
||||
#[post("/two-factor/recover", data = "<data>")]
|
||||
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner().data;
|
||||
async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner();
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
let mut user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
|
||||
// Check password
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
if !user.check_valid_password(&data.master_password_hash) {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
}
|
||||
|
||||
// Check if recovery code is correct
|
||||
if !user.check_valid_recovery_code(&data.RecoveryCode) {
|
||||
if !user.check_valid_recovery_code(&data.recovery_code) {
|
||||
err!("Recovery code is incorrect. Try again.")
|
||||
}
|
||||
|
||||
// Remove all twofactors from the user
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &mut conn).await?;
|
||||
|
||||
log_user_event(
|
||||
EventType::UserRecovered2fa as i32,
|
||||
|
@ -121,23 +128,27 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
|||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct DisableTwoFactorData {
|
||||
MasterPasswordHash: String,
|
||||
Type: NumberOrString,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
r#type: NumberOrString,
|
||||
}
|
||||
|
||||
#[post("/two-factor/disable", data = "<data>")]
|
||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: DisableTwoFactorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
async fn disable_twofactor(data: Json<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: DisableTwoFactorData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&password_hash) {
|
||||
err!("Invalid password");
|
||||
// Delete directly after a valid token has been provided
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: data.master_password_hash,
|
||||
otp: data.otp,
|
||||
}
|
||||
.validate(&user, true, &mut conn)
|
||||
.await?;
|
||||
|
||||
let type_ = data.Type.into_i32()?;
|
||||
let type_ = data.r#type.into_i32()?;
|
||||
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
twofactor.delete(&mut conn).await?;
|
||||
|
@ -145,36 +156,94 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
|
|||
.await;
|
||||
}
|
||||
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
||||
|
||||
if twofactor_disabled {
|
||||
for user_org in
|
||||
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn)
|
||||
.await
|
||||
.into_iter()
|
||||
{
|
||||
if user_org.atype < UserOrgType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
}
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
|
||||
enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": false,
|
||||
"Type": type_,
|
||||
"Object": "twoFactorProvider"
|
||||
"enabled": false,
|
||||
"type": type_,
|
||||
"object": "twoFactorProvider"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/disable", data = "<data>")]
|
||||
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub async fn enforce_2fa_policy(
|
||||
user: &User,
|
||||
act_uuid: &str,
|
||||
device_type: i32,
|
||||
ip: &std::net::IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn)
|
||||
.await
|
||||
.into_iter()
|
||||
{
|
||||
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
||||
if member.atype < UserOrgType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
}
|
||||
let mut member = member;
|
||||
member.revoke();
|
||||
member.save(conn).await?;
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationUserRevoked as i32,
|
||||
&member.uuid,
|
||||
&member.org_uuid,
|
||||
act_uuid,
|
||||
device_type,
|
||||
ip,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn enforce_2fa_policy_for_org(
|
||||
org_uuid: &str,
|
||||
act_uuid: &str,
|
||||
device_type: i32,
|
||||
ip: &std::net::IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap();
|
||||
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() {
|
||||
// Don't enforce the policy for Admins and Owners.
|
||||
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
||||
if CONFIG.mail_enabled() {
|
||||
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
}
|
||||
let mut member = member;
|
||||
member.revoke();
|
||||
member.save(conn).await?;
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationUserRevoked as i32,
|
||||
&member.uuid,
|
||||
org_uuid,
|
||||
act_uuid,
|
||||
device_type,
|
||||
ip,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
debug!("Sending notifications for incomplete 2FA logins");
|
||||
|
||||
|
@ -191,7 +260,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
|||
};
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
||||
let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap();
|
||||
let time_before = now - time_limit;
|
||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
||||
for login in incomplete_logins {
|
||||
|
@ -200,10 +269,24 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
|||
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
||||
user.email, login.ip_address
|
||||
);
|
||||
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
|
||||
.await
|
||||
.expect("Error sending incomplete 2FA email");
|
||||
login.delete(&mut conn).await.expect("Error deleting incomplete 2FA record");
|
||||
match mail::send_incomplete_2fa_login(
|
||||
&user.email,
|
||||
&login.ip_address,
|
||||
&login.login_time,
|
||||
&login.device_name,
|
||||
&DeviceType::from_i32(login.device_type).to_string(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
if let Err(e) = login.delete(&mut conn).await {
|
||||
error!("Error deleting incomplete 2FA record: {e:#?}");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error sending incomplete 2FA email: {e:#?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
143
src/api/core/two_factor/protected_actions.rs
Normale Datei
143
src/api/core/two_factor/protected_actions.rs
Normale Datei
|
@ -0,0 +1,143 @@
|
|||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use rocket::{serde::json::Json, Route};
|
||||
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![request_otp, verify_otp]
|
||||
}
|
||||
|
||||
/// Data stored in the TwoFactor table in the db
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ProtectedActionData {
|
||||
/// Token issued to validate the protected action
|
||||
pub token: String,
|
||||
/// UNIX timestamp of token issue.
|
||||
pub token_sent: i64,
|
||||
// The total amount of attempts
|
||||
pub attempts: u8,
|
||||
}
|
||||
|
||||
impl ProtectedActionData {
|
||||
pub fn new(token: String) -> Self {
|
||||
Self {
|
||||
token,
|
||||
token_sent: Utc::now().timestamp(),
|
||||
attempts: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> String {
|
||||
serde_json::to_string(&self).unwrap()
|
||||
}
|
||||
|
||||
pub fn from_json(string: &str) -> Result<Self, Error> {
|
||||
let res: Result<Self, serde_json::Error> = serde_json::from_str(string);
|
||||
match res {
|
||||
Ok(x) => Ok(x),
|
||||
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_attempt(&mut self) {
|
||||
self.attempts += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/accounts/request-otp")]
|
||||
async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.mail_enabled() {
|
||||
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||
}
|
||||
|
||||
let user = headers.user;
|
||||
|
||||
// Only one Protected Action per user is allowed to take place, delete the previous one
|
||||
if let Some(pa) =
|
||||
TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::ProtectedActions as i32, &mut conn).await
|
||||
{
|
||||
pa.delete(&mut conn).await?;
|
||||
}
|
||||
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
let pa_data = ProtectedActionData::new(generated_token);
|
||||
|
||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::ProtectedActions, pa_data.to_json());
|
||||
twofactor.save(&mut conn).await?;
|
||||
|
||||
mail::send_protected_action_token(&user.email, &pa_data.token).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ProtectedActionVerify {
|
||||
#[serde(rename = "OTP", alias = "otp")]
|
||||
otp: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-otp", data = "<data>")]
|
||||
async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.mail_enabled() {
|
||||
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||
}
|
||||
|
||||
let user = headers.user;
|
||||
let data: ProtectedActionVerify = data.into_inner();
|
||||
|
||||
// Delete the token after one validation attempt
|
||||
// This endpoint only gets called for the vault export, and doesn't need a second attempt
|
||||
validate_protected_action_otp(&data.otp, &user.uuid, true, &mut conn).await
|
||||
}
|
||||
|
||||
pub async fn validate_protected_action_otp(
|
||||
otp: &str,
|
||||
user_uuid: &str,
|
||||
delete_if_valid: bool,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn)
|
||||
.await
|
||||
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
||||
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
||||
|
||||
pa_data.add_attempt();
|
||||
// Delete the token after x attempts if it has been used too many times
|
||||
// We use the 6, which should be more then enough for invalid attempts and multiple valid checks
|
||||
if pa_data.attempts > 6 {
|
||||
pa.delete(conn).await?;
|
||||
err!("Token has expired")
|
||||
}
|
||||
|
||||
// Check if the token has expired (Using the email 2fa expiration time)
|
||||
let date =
|
||||
DateTime::from_timestamp(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.").naive_utc();
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||
pa.delete(conn).await?;
|
||||
err!("Token has expired")
|
||||
}
|
||||
|
||||
if !crypto::ct_eq(&pa_data.token, otp) {
|
||||
pa.save(conn).await?;
|
||||
err!("Token is invalid")
|
||||
}
|
||||
|
||||
if delete_if_valid {
|
||||
pa.delete(conn).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
|
|||
use crate::{
|
||||
api::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
|
@ -15,6 +15,7 @@ use crate::{
|
|||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
@ -95,40 +96,42 @@ pub struct WebauthnRegistration {
|
|||
impl WebauthnRegistration {
|
||||
fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.id,
|
||||
"Name": self.name,
|
||||
"id": self.id,
|
||||
"name": self.name,
|
||||
"migrated": self.migrated,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||
async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||
}
|
||||
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &mut conn).await?;
|
||||
data.validate(&user, false, &mut conn).await?;
|
||||
|
||||
let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &mut conn).await?;
|
||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": enabled,
|
||||
"Keys": registrations_json,
|
||||
"Object": "twoFactorWebAuthn"
|
||||
"enabled": enabled,
|
||||
"keys": registrations_json,
|
||||
"object": "twoFactorWebAuthn"
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||
async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &mut conn)
|
||||
data.validate(&user, false, &mut conn).await?;
|
||||
|
||||
let registrations = get_webauthn_registrations(&user.uuid, &mut conn)
|
||||
.await?
|
||||
.1
|
||||
.into_iter()
|
||||
|
@ -136,16 +139,16 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||
.collect();
|
||||
|
||||
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
||||
headers.user.uuid.as_bytes().to_vec(),
|
||||
headers.user.email,
|
||||
headers.user.name,
|
||||
user.uuid.as_bytes().to_vec(),
|
||||
user.email,
|
||||
user.name,
|
||||
Some(registrations),
|
||||
None,
|
||||
None,
|
||||
)?;
|
||||
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||
|
||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||
challenge_value["status"] = "ok".into();
|
||||
|
@ -154,101 +157,97 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EnableWebauthnData {
|
||||
Id: NumberOrString, // 1..5
|
||||
Name: String,
|
||||
MasterPasswordHash: String,
|
||||
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
||||
id: NumberOrString, // 1..5
|
||||
name: String,
|
||||
device_response: RegisterPublicKeyCredentialCopy,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RegisterPublicKeyCredentialCopy {
|
||||
pub Id: String,
|
||||
pub RawId: Base64UrlSafeData,
|
||||
pub Response: AuthenticatorAttestationResponseRawCopy,
|
||||
pub Type: String,
|
||||
pub id: String,
|
||||
pub raw_id: Base64UrlSafeData,
|
||||
pub response: AuthenticatorAttestationResponseRawCopy,
|
||||
pub r#type: String,
|
||||
}
|
||||
|
||||
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuthenticatorAttestationResponseRawCopy {
|
||||
pub AttestationObject: Base64UrlSafeData,
|
||||
pub ClientDataJson: Base64UrlSafeData,
|
||||
#[serde(rename = "AttestationObject", alias = "attestationObject")]
|
||||
pub attestation_object: Base64UrlSafeData,
|
||||
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
|
||||
pub client_data_json: Base64UrlSafeData,
|
||||
}
|
||||
|
||||
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
|
||||
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
|
||||
Self {
|
||||
id: r.Id,
|
||||
raw_id: r.RawId,
|
||||
id: r.id,
|
||||
raw_id: r.raw_id,
|
||||
response: AuthenticatorAttestationResponseRaw {
|
||||
attestation_object: r.Response.AttestationObject,
|
||||
client_data_json: r.Response.ClientDataJson,
|
||||
attestation_object: r.response.attestation_object,
|
||||
client_data_json: r.response.client_data_json,
|
||||
},
|
||||
type_: r.Type,
|
||||
type_: r.r#type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is copied from PublicKeyCredential to change the Response objects casing
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PublicKeyCredentialCopy {
|
||||
pub Id: String,
|
||||
pub RawId: Base64UrlSafeData,
|
||||
pub Response: AuthenticatorAssertionResponseRawCopy,
|
||||
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
|
||||
pub Type: String,
|
||||
pub id: String,
|
||||
pub raw_id: Base64UrlSafeData,
|
||||
pub response: AuthenticatorAssertionResponseRawCopy,
|
||||
pub extensions: Option<AuthenticationExtensionsClientOutputs>,
|
||||
pub r#type: String,
|
||||
}
|
||||
|
||||
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuthenticatorAssertionResponseRawCopy {
|
||||
pub AuthenticatorData: Base64UrlSafeData,
|
||||
pub ClientDataJson: Base64UrlSafeData,
|
||||
pub Signature: Base64UrlSafeData,
|
||||
pub UserHandle: Option<Base64UrlSafeData>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct AuthenticationExtensionsClientOutputsCopy {
|
||||
#[serde(default)]
|
||||
pub Appid: bool,
|
||||
pub authenticator_data: Base64UrlSafeData,
|
||||
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
|
||||
pub client_data_json: Base64UrlSafeData,
|
||||
pub signature: Base64UrlSafeData,
|
||||
pub user_handle: Option<Base64UrlSafeData>,
|
||||
}
|
||||
|
||||
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||
fn from(r: PublicKeyCredentialCopy) -> Self {
|
||||
Self {
|
||||
id: r.Id,
|
||||
raw_id: r.RawId,
|
||||
id: r.id,
|
||||
raw_id: r.raw_id,
|
||||
response: AuthenticatorAssertionResponseRaw {
|
||||
authenticator_data: r.Response.AuthenticatorData,
|
||||
client_data_json: r.Response.ClientDataJson,
|
||||
signature: r.Response.Signature,
|
||||
user_handle: r.Response.UserHandle,
|
||||
authenticator_data: r.response.authenticator_data,
|
||||
client_data_json: r.response.client_data_json,
|
||||
signature: r.response.signature,
|
||||
user_handle: r.response.user_handle,
|
||||
},
|
||||
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs {
|
||||
appid: e.Appid,
|
||||
}),
|
||||
type_: r.Type,
|
||||
extensions: r.extensions,
|
||||
type_: r.r#type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/webauthn", data = "<data>")]
|
||||
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner().data;
|
||||
async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner();
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: data.master_password_hash,
|
||||
otp: data.otp,
|
||||
}
|
||||
.validate(&user, true, &mut conn)
|
||||
.await?;
|
||||
|
||||
// Retrieve and delete the saved challenge state
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||
|
@ -263,13 +262,13 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||
|
||||
// Verify the credentials with the saved state
|
||||
let (credential, _data) =
|
||||
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
||||
WebauthnConfig::load().register_credential(&data.device_response.into(), &state, |_| Ok(false))?;
|
||||
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
|
||||
// TODO: Check for repeated ID's
|
||||
registrations.push(WebauthnRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
name: data.Name,
|
||||
id: data.id.into_i32()?,
|
||||
name: data.name,
|
||||
migrated: false,
|
||||
|
||||
credential,
|
||||
|
@ -285,28 +284,28 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||
|
||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
"enabled": true,
|
||||
"keys": keys_json,
|
||||
"object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/webauthn", data = "<data>")]
|
||||
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_webauthn_put(data: Json<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct DeleteU2FData {
|
||||
Id: NumberOrString,
|
||||
MasterPasswordHash: String,
|
||||
id: NumberOrString,
|
||||
master_password_hash: String,
|
||||
}
|
||||
|
||||
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let id = data.data.Id.into_i32()?;
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let id = data.id.into_i32()?;
|
||||
if !headers.user.check_valid_password(&data.master_password_hash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
|
@ -347,9 +346,9 @@ async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut
|
|||
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
"enabled": true,
|
||||
"keys": keys_json,
|
||||
"object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -402,8 +401,8 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
|
|||
),
|
||||
};
|
||||
|
||||
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
||||
let rsp: PublicKeyCredential = rsp.data.into();
|
||||
let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
|
||||
let rsp: PublicKeyCredential = rsp.into();
|
||||
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use serde_json::Value;
|
||||
use yubico::{config::Config, verify};
|
||||
use yubico::{config::Config, verify_async};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
|
@ -21,32 +21,35 @@ pub fn routes() -> Vec<Route> {
|
|||
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EnableYubikeyData {
|
||||
MasterPasswordHash: String,
|
||||
Key1: Option<String>,
|
||||
Key2: Option<String>,
|
||||
Key3: Option<String>,
|
||||
Key4: Option<String>,
|
||||
Key5: Option<String>,
|
||||
Nfc: bool,
|
||||
key1: Option<String>,
|
||||
key2: Option<String>,
|
||||
key3: Option<String>,
|
||||
key4: Option<String>,
|
||||
key5: Option<String>,
|
||||
nfc: bool,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct YubikeyMetadata {
|
||||
Keys: Vec<String>,
|
||||
pub Nfc: bool,
|
||||
#[serde(rename = "keys", alias = "Keys")]
|
||||
keys: Vec<String>,
|
||||
#[serde(rename = "nfc", alias = "Nfc")]
|
||||
pub nfc: bool,
|
||||
}
|
||||
|
||||
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
||||
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
|
||||
let data_keys = [&data.key1, &data.key2, &data.key3, &data.key4, &data.key5];
|
||||
|
||||
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
|
||||
}
|
||||
|
||||
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
|
||||
fn jsonify_yubikeys(yubikeys: Vec<String>) -> Value {
|
||||
let mut result = Value::Object(serde_json::Map::new());
|
||||
|
||||
for (i, key) in yubikeys.into_iter().enumerate() {
|
||||
|
@ -73,26 +76,21 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
|||
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
||||
|
||||
match CONFIG.yubico_server() {
|
||||
Some(server) => {
|
||||
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap()
|
||||
}
|
||||
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
|
||||
Some(server) => verify_async(otp, config.set_api_hosts(vec![server])).await,
|
||||
None => verify_async(otp, config).await,
|
||||
}
|
||||
.map_res("Failed to verify OTP")
|
||||
.and(Ok(()))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||
async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
// Make sure the credentials are set
|
||||
get_yubico_credentials()?;
|
||||
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let data: PasswordOrOtpData = data.into_inner();
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
data.validate(&user, false, &mut conn).await?;
|
||||
|
||||
let user_uuid = &user.uuid;
|
||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||
|
@ -102,29 +100,32 @@ async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut
|
|||
if let Some(r) = r {
|
||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
||||
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
|
||||
|
||||
result["Enabled"] = Value::Bool(true);
|
||||
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
|
||||
result["Object"] = Value::String("twoFactorU2f".to_owned());
|
||||
result["enabled"] = Value::Bool(true);
|
||||
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
|
||||
result["object"] = Value::String("twoFactorU2f".to_owned());
|
||||
|
||||
Ok(Json(result))
|
||||
} else {
|
||||
Ok(Json(json!({
|
||||
"Enabled": false,
|
||||
"Object": "twoFactorU2f",
|
||||
"enabled": false,
|
||||
"object": "twoFactorU2f",
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/yubikey", data = "<data>")]
|
||||
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableYubikeyData = data.into_inner().data;
|
||||
async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EnableYubikeyData = data.into_inner();
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
PasswordOrOtpData {
|
||||
master_password_hash: data.master_password_hash.clone(),
|
||||
otp: data.otp.clone(),
|
||||
}
|
||||
.validate(&user, true, &mut conn)
|
||||
.await?;
|
||||
|
||||
// Check if we already have some data
|
||||
let mut yubikey_data =
|
||||
|
@ -137,8 +138,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||
|
||||
if yubikeys.is_empty() {
|
||||
return Ok(Json(json!({
|
||||
"Enabled": false,
|
||||
"Object": "twoFactorU2f",
|
||||
"enabled": false,
|
||||
"object": "twoFactorU2f",
|
||||
})));
|
||||
}
|
||||
|
||||
|
@ -155,8 +156,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
|
||||
|
||||
let yubikey_metadata = YubikeyMetadata {
|
||||
Keys: yubikey_ids,
|
||||
Nfc: data.Nfc,
|
||||
keys: yubikey_ids,
|
||||
nfc: data.nfc,
|
||||
};
|
||||
|
||||
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
|
||||
|
@ -166,17 +167,17 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
|
||||
|
||||
result["Enabled"] = Value::Bool(true);
|
||||
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
|
||||
result["Object"] = Value::String("twoFactorU2f".to_owned());
|
||||
result["enabled"] = Value::Bool(true);
|
||||
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
|
||||
result["object"] = Value::String("twoFactorU2f".to_owned());
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
#[put("/two-factor/yubikey", data = "<data>")]
|
||||
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_yubikey_put(data: Json<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn).await
|
||||
}
|
||||
|
||||
|
@ -188,14 +189,10 @@ pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> Emp
|
|||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
|
||||
let response_id = &response[..12];
|
||||
|
||||
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
|
||||
if !yubikey_metadata.keys.contains(&response_id.to_owned()) {
|
||||
err!("Given Yubikey is not registered");
|
||||
}
|
||||
|
||||
let result = verify_yubikey_otp(response.to_owned()).await;
|
||||
|
||||
match result {
|
||||
Ok(_answer) => Ok(()),
|
||||
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
|
||||
}
|
||||
verify_yubikey_otp(response.to_owned()).await.map_res("Failed to verify Yubikey against OTP server")?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
340
src/api/icons.rs
340
src/api/icons.rs
|
@ -1,4 +1,5 @@
|
|||
use std::{
|
||||
collections::HashMap,
|
||||
net::IpAddr,
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
|
@ -16,14 +17,14 @@ use rocket::{http::ContentType, response::Redirect, Route};
|
|||
use tokio::{
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
net::lookup_host,
|
||||
};
|
||||
|
||||
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
||||
|
||||
use crate::{
|
||||
error::Error,
|
||||
util::{get_reqwest_client_builder, Cached},
|
||||
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
|
||||
util::Cached,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
@ -49,48 +50,32 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
|||
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
||||
let pool_idle_timeout = Duration::from_secs(10);
|
||||
// Reuse the client between requests
|
||||
let client = get_reqwest_client_builder()
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(Arc::clone(&cookie_store))
|
||||
.timeout(icon_download_timeout)
|
||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||
.trust_dns(true)
|
||||
.default_headers(default_headers.clone());
|
||||
|
||||
match client.build() {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(cookie_store)
|
||||
.timeout(icon_download_timeout)
|
||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||
.trust_dns(false)
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.expect("Failed to build client")
|
||||
}
|
||||
}
|
||||
.default_headers(default_headers.clone())
|
||||
.build()
|
||||
.expect("Failed to build client")
|
||||
});
|
||||
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
|
||||
|
||||
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_external(domain: &str) -> Option<Redirect> {
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return None;
|
||||
}
|
||||
|
||||
if check_domain_blacklist_reason(domain).await.is_some() {
|
||||
if should_block_address(domain) {
|
||||
warn!("Blocked address: {}", domain);
|
||||
return None;
|
||||
}
|
||||
|
||||
let url = template.replace("{}", domain);
|
||||
let url = CONFIG._icon_service_url().replace("{}", domain);
|
||||
match CONFIG.icon_redirect_code() {
|
||||
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
||||
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
||||
|
@ -103,11 +88,6 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
|||
}
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
async fn icon_external(domain: &str) -> Option<Redirect> {
|
||||
icon_redirect(domain, &CONFIG._icon_service_url()).await
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
@ -121,6 +101,15 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
|||
);
|
||||
}
|
||||
|
||||
if should_block_address(domain) {
|
||||
warn!("Blocked address: {}", domain);
|
||||
return Cached::ttl(
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
match get_icon(domain).await {
|
||||
Some((icon, icon_type)) => {
|
||||
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
||||
|
@ -166,155 +155,6 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||
true
|
||||
}
|
||||
|
||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||
#[allow(clippy::nonminimal_bool)]
|
||||
#[cfg(not(feature = "unstable"))]
|
||||
fn is_global(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
|
||||
// globally routable addresses in the 192.0.0.0/24 range.
|
||||
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
|
||||
return true;
|
||||
}
|
||||
!ip.is_private()
|
||||
&& !ip.is_loopback()
|
||||
&& !ip.is_link_local()
|
||||
&& !ip.is_broadcast()
|
||||
&& !ip.is_documentation()
|
||||
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
|
||||
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
||||
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
|
||||
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
|
||||
// Make sure the address is not in 0.0.0.0/8
|
||||
&& ip.octets()[0] != 0
|
||||
}
|
||||
IpAddr::V6(ip) => {
|
||||
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
|
||||
true
|
||||
} else {
|
||||
!ip.is_multicast()
|
||||
&& !ip.is_loopback()
|
||||
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
|
||||
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
|
||||
&& !ip.is_unspecified()
|
||||
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "unstable")]
|
||||
fn is_global(ip: IpAddr) -> bool {
|
||||
ip.is_global()
|
||||
}
|
||||
|
||||
/// These are some tests to check that the implementations match
|
||||
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
|
||||
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
|
||||
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
||||
///
|
||||
/// To run while showing progress output:
|
||||
/// cargo test --features sqlite,unstable -- --nocapture --ignored
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "unstable")]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ipv4_global() {
|
||||
for a in 0..u8::MAX {
|
||||
println!("Iter: {}/255", a);
|
||||
for b in 0..u8::MAX {
|
||||
for c in 0..u8::MAX {
|
||||
for d in 0..u8::MAX {
|
||||
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
||||
assert_eq!(ip.is_global(), is_global(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ipv6_global() {
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
let mut v = [0u8; 16];
|
||||
let rand = SystemRandom::new();
|
||||
for i in 0..1_000 {
|
||||
println!("Iter: {}/1_000", i);
|
||||
for _ in 0..10_000_000 {
|
||||
rand.fill(&mut v).expect("Error generating random values");
|
||||
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
|
||||
(v[14] as u16) << 8 | v[15] as u16,
|
||||
(v[12] as u16) << 8 | v[13] as u16,
|
||||
(v[10] as u16) << 8 | v[11] as u16,
|
||||
(v[8] as u16) << 8 | v[9] as u16,
|
||||
(v[6] as u16) << 8 | v[7] as u16,
|
||||
(v[4] as u16) << 8 | v[5] as u16,
|
||||
(v[2] as u16) << 8 | v[3] as u16,
|
||||
(v[0] as u16) << 8 | v[1] as u16,
|
||||
));
|
||||
assert_eq!(ip.is_global(), is_global(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum DomainBlacklistReason {
|
||||
Regex,
|
||||
IP,
|
||||
}
|
||||
|
||||
use cached::proc_macro::cached;
|
||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
||||
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
|
||||
// First check the blacklist regex if there is a match.
|
||||
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
|
||||
regex.is_match(domain)
|
||||
} else {
|
||||
// Clear the current list if the previous key doesn't exists.
|
||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||
if ICON_BLACKLIST_REGEX.len() >= 1 {
|
||||
ICON_BLACKLIST_REGEX.clear();
|
||||
}
|
||||
|
||||
// Generate the regex to store in too the Lazy Static HashMap.
|
||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||
let is_match = blacklist_regex.is_match(domain);
|
||||
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
|
||||
|
||||
is_match
|
||||
};
|
||||
|
||||
if is_match {
|
||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
||||
return Some(DomainBlacklistReason::Regex);
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.icon_blacklist_non_global_ips() {
|
||||
if let Ok(s) = lookup_host((domain, 0)).await {
|
||||
for addr in s {
|
||||
if !is_global(addr.ip()) {
|
||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
||||
return Some(DomainBlacklistReason::IP);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
|
@ -342,6 +182,13 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
|||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||
}
|
||||
Err(e) => {
|
||||
// If this error comes from the custom resolver, this means this is a blocked domain
|
||||
// or non global IP, don't save the miss file in this case to avoid leaking it
|
||||
if let Some(error) = CustomHttpClientError::downcast_ref(&e) {
|
||||
warn!("{error}");
|
||||
return None;
|
||||
}
|
||||
|
||||
warn!("Unable to download icon: {:?}", e);
|
||||
let miss_indicator = path + ".miss";
|
||||
save_icon(&miss_indicator, &[]).await;
|
||||
|
@ -491,42 +338,48 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
|||
let ssldomain = format!("https://{domain}");
|
||||
let httpdomain = format!("http://{domain}");
|
||||
|
||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
|
||||
Ok(c) => Ok(c),
|
||||
Err(e) => {
|
||||
let mut sub_resp = Err(e);
|
||||
// First check the domain as given during the request for HTTPS.
|
||||
let resp = match get_page(&ssldomain).await {
|
||||
Err(e) if CustomHttpClientError::downcast_ref(&e).is_none() => {
|
||||
// If we get an error that is not caused by the blacklist, we retry with HTTP
|
||||
match get_page(&httpdomain).await {
|
||||
mut sub_resp @ Err(_) => {
|
||||
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||
let is_ip = domain.parse::<IpAddr>();
|
||||
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||
let mut domain_parts = domain.split('.');
|
||||
let base_domain = format!(
|
||||
"{base}.{tld}",
|
||||
tld = domain_parts.next_back().unwrap(),
|
||||
base = domain_parts.next_back().unwrap()
|
||||
);
|
||||
if is_valid_domain(&base_domain) {
|
||||
let sslbase = format!("https://{base_domain}");
|
||||
let httpbase = format!("http://{base_domain}");
|
||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||
|
||||
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||
let is_ip = domain.parse::<IpAddr>();
|
||||
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||
let mut domain_parts = domain.split('.');
|
||||
let base_domain = format!(
|
||||
"{base}.{tld}",
|
||||
tld = domain_parts.next_back().unwrap(),
|
||||
base = domain_parts.next_back().unwrap()
|
||||
);
|
||||
if is_valid_domain(&base_domain) {
|
||||
let sslbase = format!("https://{base_domain}");
|
||||
let httpbase = format!("http://{base_domain}");
|
||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
||||
}
|
||||
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
||||
}
|
||||
|
||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||
let www_domain = format!("www.{domain}");
|
||||
if is_valid_domain(&www_domain) {
|
||||
let sslwww = format!("https://{www_domain}");
|
||||
let httpwww = format!("http://{www_domain}");
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||
let www_domain = format!("www.{domain}");
|
||||
if is_valid_domain(&www_domain) {
|
||||
let sslwww = format!("https://{www_domain}");
|
||||
let httpwww = format!("http://{www_domain}");
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
||||
}
|
||||
}
|
||||
sub_resp
|
||||
}
|
||||
res => res,
|
||||
}
|
||||
sub_resp
|
||||
}
|
||||
|
||||
// If we get a result or a blacklist error, just continue
|
||||
res => res,
|
||||
};
|
||||
|
||||
// Create the iconlist
|
||||
|
@ -573,21 +426,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
|
|||
}
|
||||
|
||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
||||
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
|
||||
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let mut client = CLIENT.get(url);
|
||||
if !referer.is_empty() {
|
||||
client = client.header("Referer", referer)
|
||||
}
|
||||
|
||||
match client.send().await {
|
||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
||||
Err(e) => err_silent!(format!("{e}")),
|
||||
}
|
||||
Ok(client.send().await?.error_for_status()?)
|
||||
}
|
||||
|
||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||
|
@ -603,6 +447,9 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Err
|
|||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||
/// ```
|
||||
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
||||
static PRIORITY_MAP: Lazy<HashMap<&'static str, u8>> =
|
||||
Lazy::new(|| [(".png", 10), (".jpg", 20), (".jpeg", 20)].into_iter().collect());
|
||||
|
||||
// Check if there is a dimension set
|
||||
let (width, height) = parse_sizes(sizes);
|
||||
|
||||
|
@ -627,13 +474,9 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
|||
200
|
||||
}
|
||||
} else {
|
||||
// Change priority by file extension
|
||||
if href.ends_with(".png") {
|
||||
10
|
||||
} else if href.ends_with(".jpg") || href.ends_with(".jpeg") {
|
||||
20
|
||||
} else {
|
||||
30
|
||||
match href.rsplit_once('.') {
|
||||
Some((_, extension)) => PRIORITY_MAP.get(&*extension.to_ascii_lowercase()).copied().unwrap_or(30),
|
||||
None => 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -670,12 +513,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
|
|||
}
|
||||
|
||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
match check_domain_blacklist_reason(domain).await {
|
||||
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
|
||||
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let icon_result = get_icon_url(domain).await?;
|
||||
|
||||
let mut buffer = Bytes::new();
|
||||
|
@ -711,22 +548,19 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
|||
_ => debug!("Extracted icon from data:image uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
match get_page_with_referer(&icon.href, &icon_result.referer).await {
|
||||
Ok(res) => {
|
||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
|
||||
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&buffer);
|
||||
if icon_type.is_none() {
|
||||
buffer.clear();
|
||||
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||
continue;
|
||||
}
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
break;
|
||||
}
|
||||
Err(e) => debug!("{:?}", e),
|
||||
};
|
||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&buffer);
|
||||
if icon_type.is_none() {
|
||||
buffer.clear();
|
||||
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||
continue;
|
||||
}
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -789,7 +623,7 @@ use cookie_store::CookieStore;
|
|||
pub struct Jar(std::sync::RwLock<CookieStore>);
|
||||
|
||||
impl reqwest::cookie::CookieStore for Jar {
|
||||
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
|
||||
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &HeaderValue>, url: &url::Url) {
|
||||
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
|
||||
use time::Duration;
|
||||
|
||||
|
@ -808,7 +642,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||
cookie_store.store_response_cookies(cookies, url);
|
||||
}
|
||||
|
||||
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
|
||||
fn cookies(&self, url: &url::Url) -> Option<HeaderValue> {
|
||||
let cookie_store = self.0.read().unwrap();
|
||||
let s = cookie_store
|
||||
.get_request_values(url)
|
||||
|
@ -820,7 +654,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
|||
return None;
|
||||
}
|
||||
|
||||
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||
HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,10 +9,13 @@ use serde_json::Value;
|
|||
|
||||
use crate::{
|
||||
api::{
|
||||
core::accounts::{PreloginData, RegisterData, _prelogin, _register},
|
||||
core::log_user_event,
|
||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
||||
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
core::{
|
||||
accounts::{PreloginData, RegisterData, _prelogin, _register},
|
||||
log_user_event,
|
||||
two_factor::{authenticator, duo, duo_oidc, email, enforce_2fa_policy, webauthn, yubikey},
|
||||
},
|
||||
push::register_push_device,
|
||||
ApiResult, EmptyResult, JsonResult,
|
||||
},
|
||||
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
||||
db::{models::*, DbConn},
|
||||
|
@ -103,8 +106,13 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||
|
||||
// Common
|
||||
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
// ---
|
||||
// Disabled this variable, it was used to generate the JWT
|
||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
let result = json!({
|
||||
|
@ -112,21 +120,25 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
"refresh_token": device.refresh_token,
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"KdfMemory": user.client_kdf_memory,
|
||||
"KdfParallelism": user.client_kdf_parallelism,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct MasterPasswordPolicy {
|
||||
min_complexity: u8,
|
||||
min_length: u32,
|
||||
require_lower: bool,
|
||||
require_upper: bool,
|
||||
require_numbers: bool,
|
||||
require_special: bool,
|
||||
enforce_on_login: bool,
|
||||
}
|
||||
|
||||
async fn _password_login(
|
||||
data: ConnectData,
|
||||
user_uuid: &mut Option<String>,
|
||||
|
@ -153,20 +165,22 @@ async fn _password_login(
|
|||
// Set the user_uuid here to be passed back used for event logging.
|
||||
*user_uuid = Some(user.uuid.clone());
|
||||
|
||||
// Check password
|
||||
let password = data.password.as_ref().unwrap();
|
||||
if let Some(auth_request_uuid) = data.auth_request.clone() {
|
||||
if let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await {
|
||||
if !auth_request.check_access_code(password) {
|
||||
err!(
|
||||
"Username or access code is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
} else {
|
||||
)
|
||||
}
|
||||
|
||||
let password = data.password.as_ref().unwrap();
|
||||
|
||||
// If we get an auth request, we don't check the user's password, but the access code of the auth request
|
||||
if let Some(ref auth_request_uuid) = data.auth_request {
|
||||
let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await else {
|
||||
err!(
|
||||
"Auth request not found. Try again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
|
@ -174,6 +188,20 @@ async fn _password_login(
|
|||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
};
|
||||
|
||||
if auth_request.user_uuid != user.uuid
|
||||
|| !auth_request.approved.unwrap_or(false)
|
||||
|| ip.ip.to_string() != auth_request.request_ip
|
||||
|| !auth_request.check_access_code(password)
|
||||
{
|
||||
err!(
|
||||
"Username or access code is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
}
|
||||
} else if !user.check_valid_password(password) {
|
||||
err!(
|
||||
|
@ -185,8 +213,8 @@ async fn _password_login(
|
|||
)
|
||||
}
|
||||
|
||||
// Change the KDF Iterations
|
||||
if user.password_iterations != CONFIG.password_iterations() {
|
||||
// Change the KDF Iterations (only when not logging in with an auth request)
|
||||
if data.auth_request.is_none() && user.password_iterations != CONFIG.password_iterations() {
|
||||
user.password_iterations = CONFIG.password_iterations();
|
||||
user.set_password(password, None, false, None);
|
||||
|
||||
|
@ -195,17 +223,6 @@ async fn _password_login(
|
|||
}
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||
|
@ -242,10 +259,10 @@ async fn _password_login(
|
|||
|
||||
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
||||
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
|
||||
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
|
@ -259,11 +276,51 @@ async fn _password_login(
|
|||
}
|
||||
}
|
||||
|
||||
// register push device
|
||||
if !new_device {
|
||||
register_push_device(&mut device, conn).await?;
|
||||
}
|
||||
|
||||
// Common
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
// ---
|
||||
// Disabled this variable, it was used to generate the JWT
|
||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
// Fetch all valid Master Password Policies and merge them into one with all true's and larges numbers as one policy
|
||||
let master_password_policies: Vec<MasterPasswordPolicy> =
|
||||
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
|
||||
&user.uuid,
|
||||
OrgPolicyType::MasterPassword,
|
||||
conn,
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|p| serde_json::from_str(&p.data).ok())
|
||||
.collect();
|
||||
|
||||
let master_password_policy = if !master_password_policies.is_empty() {
|
||||
let mut mpp_json = json!(master_password_policies.into_iter().reduce(|acc, policy| {
|
||||
MasterPasswordPolicy {
|
||||
min_complexity: acc.min_complexity.max(policy.min_complexity),
|
||||
min_length: acc.min_length.max(policy.min_length),
|
||||
require_lower: acc.require_lower || policy.require_lower,
|
||||
require_upper: acc.require_upper || policy.require_upper,
|
||||
require_numbers: acc.require_numbers || policy.require_numbers,
|
||||
require_special: acc.require_special || policy.require_special,
|
||||
enforce_on_login: acc.enforce_on_login || policy.enforce_on_login,
|
||||
}
|
||||
}));
|
||||
mpp_json["object"] = json!("masterPasswordPolicy");
|
||||
mpp_json
|
||||
} else {
|
||||
json!({"object": "masterPasswordPolicy"})
|
||||
};
|
||||
|
||||
let mut result = json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": expires_in,
|
||||
|
@ -277,9 +334,11 @@ async fn _password_login(
|
|||
"KdfIterations": user.client_kdf_iter,
|
||||
"KdfMemory": user.client_kdf_memory,
|
||||
"KdfParallelism": user.client_kdf_parallelism,
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"ResetMasterPassword": false, // TODO: Same as above
|
||||
"ForcePasswordReset": false,
|
||||
"MasterPasswordPolicy": master_password_policy,
|
||||
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
"UserDecryptionOptions": {
|
||||
"HasMasterPassword": !user.password_hash.is_empty(),
|
||||
"Object": "userDecryptionOptions"
|
||||
|
@ -358,7 +417,7 @@ async fn _user_api_key_login(
|
|||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
let now = Utc::now().naive_utc();
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
|
@ -374,8 +433,13 @@ async fn _user_api_key_login(
|
|||
|
||||
// Common
|
||||
let scope_vec = vec!["api".into()];
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
// ---
|
||||
// Disabled this variable, it was used to generate the JWT
|
||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||
|
@ -393,9 +457,8 @@ async fn _user_api_key_login(
|
|||
"KdfIterations": user.client_kdf_iter,
|
||||
"KdfMemory": user.client_kdf_memory,
|
||||
"KdfParallelism": user.client_kdf_parallelism,
|
||||
"ResetMasterPassword": false, // TODO: Same as above
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": "api",
|
||||
"unofficialServer": true,
|
||||
});
|
||||
|
||||
Ok(Json(result))
|
||||
|
@ -427,7 +490,6 @@ async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &
|
|||
"expires_in": 3600,
|
||||
"token_type": "Bearer",
|
||||
"scope": "api.organization",
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -453,32 +515,34 @@ async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Devi
|
|||
}
|
||||
|
||||
async fn twofactor_auth(
|
||||
user_uuid: &str,
|
||||
user: &User,
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
ip: &ClientIp,
|
||||
conn: &mut DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn).await;
|
||||
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
|
||||
|
||||
// No twofactor token if twofactor is disabled
|
||||
if twofactors.is_empty() {
|
||||
enforce_2fa_policy(user, &user.uuid, device.atype, &ip.ip, conn).await?;
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
||||
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, device.atype, ip, conn).await?;
|
||||
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
Some(ref code) => code,
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"),
|
||||
None => {
|
||||
err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?, "2FA token not provided")
|
||||
}
|
||||
};
|
||||
|
||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
|
||||
use crate::api::core::two_factor as _tf;
|
||||
use crate::crypto::ct_eq;
|
||||
|
||||
let selected_data = _selected_data(selected_twofactor);
|
||||
|
@ -486,17 +550,31 @@ async fn twofactor_auth(
|
|||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => {
|
||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await?
|
||||
authenticator::validate_totp_code_str(&user.uuid, twofactor_code, &selected_data?, ip, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::Webauthn) => {
|
||||
_tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
||||
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
|
||||
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
||||
Some(TwoFactorType::Duo) => {
|
||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
||||
match CONFIG.duo_use_iframe() {
|
||||
true => {
|
||||
// Legacy iframe prompt flow
|
||||
duo::validate_duo_login(&user.email, twofactor_code, conn).await?
|
||||
}
|
||||
false => {
|
||||
// OIDC based flow
|
||||
duo_oidc::validate_duo_login(
|
||||
&user.email,
|
||||
twofactor_code,
|
||||
data.client_id.as_ref().unwrap(),
|
||||
data.device_identifier.as_ref().unwrap(),
|
||||
conn,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(TwoFactorType::Email) => {
|
||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await?
|
||||
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Remember) => {
|
||||
|
@ -506,7 +584,7 @@ async fn twofactor_auth(
|
|||
}
|
||||
_ => {
|
||||
err_json!(
|
||||
_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?,
|
||||
_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?,
|
||||
"2FA Remember token not provided"
|
||||
)
|
||||
}
|
||||
|
@ -520,7 +598,7 @@ async fn twofactor_auth(
|
|||
),
|
||||
}
|
||||
|
||||
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;
|
||||
TwoFactorIncomplete::mark_complete(&user.uuid, &device.uuid, conn).await?;
|
||||
|
||||
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||
Ok(Some(device.refresh_twofactor_remember()))
|
||||
|
@ -534,14 +612,20 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
|||
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
||||
}
|
||||
|
||||
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
||||
use crate::api::core::two_factor;
|
||||
|
||||
async fn _json_err_twofactor(
|
||||
providers: &[i32],
|
||||
user_uuid: &str,
|
||||
data: &ConnectData,
|
||||
conn: &mut DbConn,
|
||||
) -> ApiResult<Value> {
|
||||
let mut result = json!({
|
||||
"error" : "invalid_grant",
|
||||
"error_description" : "Two factor required.",
|
||||
"TwoFactorProviders" : providers,
|
||||
"TwoFactorProviders2" : {} // { "0" : null }
|
||||
"TwoFactorProviders" : providers.iter().map(ToString::to_string).collect::<Vec<String>>(),
|
||||
"TwoFactorProviders2" : {}, // { "0" : null }
|
||||
"MasterPasswordPolicy": {
|
||||
"Object": "masterPasswordPolicy"
|
||||
}
|
||||
});
|
||||
|
||||
for provider in providers {
|
||||
|
@ -551,7 +635,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||
|
||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
||||
let request = webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||
}
|
||||
|
||||
|
@ -561,12 +645,30 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||
None => err!("User does not exist"),
|
||||
};
|
||||
|
||||
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
||||
match CONFIG.duo_use_iframe() {
|
||||
true => {
|
||||
// Legacy iframe prompt flow
|
||||
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Host": host,
|
||||
"Signature": signature,
|
||||
})
|
||||
}
|
||||
false => {
|
||||
// OIDC based flow
|
||||
let auth_url = duo_oidc::get_duo_auth_url(
|
||||
&email,
|
||||
data.client_id.as_ref().unwrap(),
|
||||
data.device_identifier.as_ref().unwrap(),
|
||||
conn,
|
||||
)
|
||||
.await?;
|
||||
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Host": host,
|
||||
"Signature": signature,
|
||||
});
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"AuthUrl": auth_url,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||
|
@ -578,13 +680,11 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
||||
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Nfc": yubikey_metadata.Nfc,
|
||||
"Nfc": yubikey_metadata.nfc,
|
||||
})
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::Email) => {
|
||||
use crate::api::core::two_factor as _tf;
|
||||
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("No twofactor email registered"),
|
||||
|
@ -592,10 +692,10 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||
|
||||
// Send email immediately if email is the only 2FA option
|
||||
if providers.len() == 1 {
|
||||
_tf::email::send_token(user_uuid, conn).await?
|
||||
email::send_token(user_uuid, conn).await?
|
||||
}
|
||||
|
||||
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Email": email::obscure_email(&email_data.email),
|
||||
})
|
||||
|
@ -609,19 +709,18 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||
}
|
||||
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, conn).await
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
||||
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||
#[derive(Debug, Clone, Default, FromForm)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ConnectData {
|
||||
#[field(name = uncased("grant_type"))]
|
||||
#[field(name = uncased("granttype"))]
|
||||
|
|
|
@ -23,7 +23,7 @@ pub use crate::api::{
|
|||
icons::routes as icons_routes,
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS},
|
||||
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
|
||||
push::{
|
||||
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
||||
unregister_push_device,
|
||||
|
@ -32,47 +32,39 @@ pub use crate::api::{
|
|||
web::routes as web_routes,
|
||||
web::static_files,
|
||||
};
|
||||
use crate::util;
|
||||
use crate::db::{models::User, DbConn};
|
||||
|
||||
// Type aliases for API methods results
|
||||
type ApiResult<T> = Result<T, crate::error::Error>;
|
||||
pub type JsonResult = ApiResult<Json<Value>>;
|
||||
pub type EmptyResult = ApiResult<()>;
|
||||
|
||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
||||
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
||||
type JsonVec<T> = Json<Vec<T>>;
|
||||
|
||||
// Common structs representing JSON data received
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct PasswordData {
|
||||
MasterPasswordHash: String,
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct PasswordOrOtpData {
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
#[serde(untagged)]
|
||||
enum NumberOrString {
|
||||
Number(i32),
|
||||
String(String),
|
||||
}
|
||||
impl PasswordOrOtpData {
|
||||
/// Tokens used via this struct can be used multiple times during the process
|
||||
/// First for the validation to continue, after that to enable or validate the following actions
|
||||
/// This is different per caller, so it can be adjusted to delete the token or not
|
||||
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
|
||||
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
|
||||
|
||||
impl NumberOrString {
|
||||
fn into_string(self) -> String {
|
||||
match self {
|
||||
NumberOrString::Number(n) => n.to_string(),
|
||||
NumberOrString::String(s) => s,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn into_i32(&self) -> ApiResult<i32> {
|
||||
use std::num::ParseIntError as PIE;
|
||||
match self {
|
||||
NumberOrString::Number(n) => Ok(*n),
|
||||
NumberOrString::String(s) => {
|
||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||
match (self.master_password_hash.as_deref(), self.otp.as_deref()) {
|
||||
(Some(pw_hash), None) => {
|
||||
if !user.check_valid_password(pw_hash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
}
|
||||
(None, Some(otp)) => {
|
||||
validate_protected_action_otp(otp, &user.uuid, delete_if_valid, conn).await?;
|
||||
}
|
||||
_ => err!("No validation provided"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,23 +1,11 @@
|
|||
use std::{
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
use std::{net::IpAddr, sync::Arc, time::Duration};
|
||||
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use rmpv::Value;
|
||||
use rocket::{
|
||||
futures::{SinkExt, StreamExt},
|
||||
Route,
|
||||
};
|
||||
use tokio::{
|
||||
net::{TcpListener, TcpStream},
|
||||
sync::mpsc::Sender,
|
||||
};
|
||||
use tokio_tungstenite::{
|
||||
accept_hdr_async,
|
||||
tungstenite::{handshake, Message},
|
||||
};
|
||||
use rocket::{futures::StreamExt, Route};
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
use rocket_ws::{Message, WebSocket};
|
||||
|
||||
use crate::{
|
||||
auth::{ClientIp, WsAccessTokenHeader},
|
||||
|
@ -30,7 +18,7 @@ use crate::{
|
|||
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
||||
pub static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
||||
Arc::new(WebSocketUsers {
|
||||
map: Arc::new(dashmap::DashMap::new()),
|
||||
})
|
||||
|
@ -47,8 +35,15 @@ use super::{
|
|||
push_send_update, push_user_update,
|
||||
};
|
||||
|
||||
static NOTIFICATIONS_DISABLED: Lazy<bool> = Lazy::new(|| !CONFIG.enable_websocket() && !CONFIG.push_enabled());
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![websockets_hub, anonymous_websockets_hub]
|
||||
if CONFIG.enable_websocket() {
|
||||
routes![websockets_hub, anonymous_websockets_hub]
|
||||
} else {
|
||||
info!("WebSocket are disabled, realtime sync functionality will not work!");
|
||||
routes![]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(FromForm, Debug)]
|
||||
|
@ -108,7 +103,7 @@ impl Drop for WSAnonymousEntryMapGuard {
|
|||
|
||||
#[get("/hub?<data..>")]
|
||||
fn websockets_hub<'r>(
|
||||
ws: rocket_ws::WebSocket,
|
||||
ws: WebSocket,
|
||||
data: WsAccessToken,
|
||||
ip: ClientIp,
|
||||
header_token: WsAccessTokenHeader,
|
||||
|
@ -164,6 +159,11 @@ fn websockets_hub<'r>(
|
|||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Prevent sending anything back when a `Close` Message is received.
|
||||
// Just break the loop
|
||||
Message::Close(_) => break,
|
||||
|
||||
// Just echo anything else the client sends
|
||||
_ => yield message,
|
||||
}
|
||||
|
@ -187,11 +187,7 @@ fn websockets_hub<'r>(
|
|||
}
|
||||
|
||||
#[get("/anonymous-hub?<token..>")]
|
||||
fn anonymous_websockets_hub<'r>(
|
||||
ws: rocket_ws::WebSocket,
|
||||
token: String,
|
||||
ip: ClientIp,
|
||||
) -> Result<rocket_ws::Stream!['r], Error> {
|
||||
fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> {
|
||||
let addr = ip.ip;
|
||||
info!("Accepting Anonymous Rocket WS connection from {addr}");
|
||||
|
||||
|
@ -230,6 +226,11 @@ fn anonymous_websockets_hub<'r>(
|
|||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Prevent sending anything back when a `Close` Message is received.
|
||||
// Just break the loop
|
||||
Message::Close(_) => break,
|
||||
|
||||
// Just echo anything else the client sends
|
||||
_ => yield message,
|
||||
}
|
||||
|
@ -287,8 +288,8 @@ fn serialize(val: Value) -> Vec<u8> {
|
|||
}
|
||||
|
||||
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||
let seconds: i64 = date.timestamp();
|
||||
let nanos: i64 = date.timestamp_subsec_nanos().into();
|
||||
let seconds: i64 = date.and_utc().timestamp();
|
||||
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
|
||||
let timestamp = nanos << 34 | seconds;
|
||||
|
||||
let bs = timestamp.to_be_bytes();
|
||||
|
@ -339,13 +340,19 @@ impl WebSocketUsers {
|
|||
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
None,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_user_update(ut, user);
|
||||
|
@ -353,13 +360,19 @@ impl WebSocketUsers {
|
|||
}
|
||||
|
||||
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
UpdateType::LogOut,
|
||||
acting_device_uuid.clone(),
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_logout(user, acting_device_uuid);
|
||||
|
@ -373,6 +386,10 @@ impl WebSocketUsers {
|
|||
acting_device_uuid: &String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), folder.uuid.clone().into()),
|
||||
|
@ -383,7 +400,9 @@ impl WebSocketUsers {
|
|||
Some(acting_device_uuid.into()),
|
||||
);
|
||||
|
||||
self.send_update(&folder.user_uuid, &data).await;
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(&folder.user_uuid, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_folder_update(ut, folder, acting_device_uuid, conn).await;
|
||||
|
@ -399,13 +418,17 @@ impl WebSocketUsers {
|
|||
collection_uuids: Option<Vec<String>>,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||
// Depending if there are collections provided or not, we need to have different values for the following variables.
|
||||
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
|
||||
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
||||
(
|
||||
Value::Nil,
|
||||
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<rmpv::Value>>()),
|
||||
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<Value>>()),
|
||||
serialize_date(Utc::now().naive_utc()),
|
||||
)
|
||||
} else {
|
||||
|
@ -424,8 +447,10 @@ impl WebSocketUsers {
|
|||
Some(acting_device_uuid.into()),
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).await;
|
||||
if CONFIG.enable_websocket() {
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||
|
@ -441,6 +466,10 @@ impl WebSocketUsers {
|
|||
acting_device_uuid: &String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let user_uuid = convert_option(send.user_uuid.clone());
|
||||
|
||||
let data = create_update(
|
||||
|
@ -453,8 +482,10 @@ impl WebSocketUsers {
|
|||
None,
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).await;
|
||||
if CONFIG.enable_websocket() {
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||
push_send_update(ut, send, acting_device_uuid, conn).await;
|
||||
|
@ -468,12 +499,18 @@ impl WebSocketUsers {
|
|||
acting_device_uuid: &String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
UpdateType::AuthRequest,
|
||||
Some(acting_device_uuid.to_string()),
|
||||
);
|
||||
self.send_update(user_uuid, &data).await;
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(user_uuid, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
|
||||
|
@ -487,12 +524,18 @@ impl WebSocketUsers {
|
|||
approving_device_uuid: String,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
UpdateType::AuthRequestResponse,
|
||||
approving_device_uuid.clone().into(),
|
||||
);
|
||||
self.send_update(auth_response_uuid, &data).await;
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(auth_response_uuid, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
|
||||
|
@ -516,6 +559,9 @@ impl AnonymousWebSocketSubscriptions {
|
|||
}
|
||||
|
||||
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
|
||||
if !CONFIG.enable_websocket() {
|
||||
return;
|
||||
}
|
||||
let data = create_anonymous_update(
|
||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
UpdateType::AuthRequestResponse,
|
||||
|
@ -610,127 +656,3 @@ pub enum UpdateType {
|
|||
|
||||
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
||||
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
|
||||
|
||||
pub fn start_notification_server() -> Arc<WebSocketUsers> {
|
||||
let users = Arc::clone(&WS_USERS);
|
||||
if CONFIG.websocket_enabled() {
|
||||
let users2 = Arc::<WebSocketUsers>::clone(&users);
|
||||
tokio::spawn(async move {
|
||||
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
|
||||
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
|
||||
let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port");
|
||||
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>();
|
||||
CONFIG.set_ws_shutdown_handle(shutdown_tx);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Ok((stream, addr)) = listener.accept() => {
|
||||
tokio::spawn(handle_connection(stream, Arc::<WebSocketUsers>::clone(&users2), addr));
|
||||
}
|
||||
|
||||
_ = &mut shutdown_rx => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Shutting down WebSockets server!")
|
||||
});
|
||||
}
|
||||
|
||||
users
|
||||
}
|
||||
|
||||
async fn handle_connection(stream: TcpStream, users: Arc<WebSocketUsers>, addr: SocketAddr) -> Result<(), Error> {
|
||||
let mut user_uuid: Option<String> = None;
|
||||
|
||||
info!("Accepting WS connection from {addr}");
|
||||
|
||||
// Accept connection, do initial handshake, validate auth token and get the user ID
|
||||
use handshake::server::{Request, Response};
|
||||
let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| {
|
||||
if let Some(token) = get_request_token(req) {
|
||||
if let Ok(claims) = crate::auth::decode_login(&token) {
|
||||
user_uuid = Some(claims.sub);
|
||||
return Ok(res);
|
||||
}
|
||||
}
|
||||
Err(Response::builder().status(401).body(None).unwrap())
|
||||
})
|
||||
.await?;
|
||||
|
||||
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
|
||||
|
||||
let (mut rx, guard) = {
|
||||
// Add a channel to send messages to this client to the map
|
||||
let entry_uuid = uuid::Uuid::new_v4();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
||||
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
|
||||
|
||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
||||
(rx, WSEntryMapGuard::new(users, user_uuid, entry_uuid, addr.ip()))
|
||||
};
|
||||
|
||||
let _guard = guard;
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
tokio::select! {
|
||||
res = stream.next() => {
|
||||
match res {
|
||||
Some(Ok(message)) => {
|
||||
match message {
|
||||
// Respond to any pings
|
||||
Message::Ping(ping) => stream.send(Message::Pong(ping)).await?,
|
||||
Message::Pong(_) => {/* Ignored */},
|
||||
|
||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
||||
Message::Text(ref message) => {
|
||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
stream.send(Message::binary(INITIAL_RESPONSE)).await?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// Just echo anything else the client sends
|
||||
_ => stream.send(message).await?,
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
res = rx.recv() => {
|
||||
match res {
|
||||
Some(res) => stream.send(res).await?,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
_ = interval.tick() => stream.send(Message::Ping(create_ping())).await?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_request_token(req: &handshake::server::Request) -> Option<String> {
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) {
|
||||
if let Some(token_part) = auth.strip_prefix("Bearer ") {
|
||||
return Some(token_part.to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(params) = req.uri().query() {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||
return Some(stripped.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
use reqwest::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE};
|
||||
use reqwest::{
|
||||
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
|
||||
Method,
|
||||
};
|
||||
use serde_json::Value;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, UpdateType},
|
||||
db::models::{Cipher, Device, Folder, Send, User},
|
||||
util::get_reqwest_client,
|
||||
http_client::make_http_request,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
@ -50,7 +53,10 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
|||
("client_secret", &client_secret),
|
||||
];
|
||||
|
||||
let res = match get_reqwest_client().post("https://identity.bitwarden.com/connect/token").form(¶ms).send().await
|
||||
let res = match make_http_request(Method::POST, &format!("{}/connect/token", CONFIG.push_identity_uri()))?
|
||||
.form(¶ms)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => err!(format!("Error getting push token from bitwarden server: {e}")),
|
||||
|
@ -72,45 +78,62 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
|||
Ok(push_token.access_token.clone())
|
||||
}
|
||||
|
||||
pub async fn register_push_device(user_uuid: String, device: Device) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() {
|
||||
pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbConn) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() || !device.is_push_device() || device.is_registered() {
|
||||
return Ok(());
|
||||
}
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
|
||||
if device.push_token.is_none() {
|
||||
warn!("Skipping the registration of the device {} because the push_token field is empty.", device.uuid);
|
||||
warn!("To get rid of this message you need to clear the app data and reconnect the device.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!("Registering Device {}", device.uuid);
|
||||
|
||||
// generate a random push_uuid so we know the device is registered
|
||||
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
||||
|
||||
//Needed to register a device for push to bitwarden :
|
||||
let data = json!({
|
||||
"userId": user_uuid,
|
||||
"userId": device.user_uuid,
|
||||
"deviceId": device.push_uuid,
|
||||
"identifier": device.uuid,
|
||||
"type": device.atype,
|
||||
"pushToken": device.push_token
|
||||
});
|
||||
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
|
||||
get_reqwest_client()
|
||||
.post(CONFIG.push_relay_uri() + "/push/register")
|
||||
if let Err(e) = make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/register"))?
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.header(ACCEPT, "application/json")
|
||||
.header(AUTHORIZATION, auth_header)
|
||||
.json(&data)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()?;
|
||||
.error_for_status()
|
||||
{
|
||||
err!(format!("An error occurred while proceeding registration of a device: {e}"));
|
||||
}
|
||||
|
||||
if let Err(e) = device.save(conn).await {
|
||||
err!(format!("An error occurred while trying to save the (registered) device push uuid: {e}"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unregister_push_device(uuid: String) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() {
|
||||
pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() || push_uuid.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
|
||||
match get_reqwest_client()
|
||||
.delete(CONFIG.push_relay_uri() + "/push/" + &uuid)
|
||||
match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap()))?
|
||||
.header(AUTHORIZATION, auth_header)
|
||||
.send()
|
||||
.await
|
||||
|
@ -243,8 +266,15 @@ async fn send_to_push_relay(notification_data: Value) {
|
|||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
|
||||
if let Err(e) = get_reqwest_client()
|
||||
.post(CONFIG.push_relay_uri() + "/push/send")
|
||||
let req = match make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/send")) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
error!("An error occurred while sending a send update to the push relay: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = req
|
||||
.header(ACCEPT, "application/json")
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.header(AUTHORIZATION, &auth_header)
|
||||
|
|
109
src/api/web.rs
109
src/api/web.rs
|
@ -1,13 +1,20 @@
|
|||
use once_cell::sync::Lazy;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rocket::{fs::NamedFile, http::ContentType, response::content::RawHtml as Html, serde::json::Json, Catcher, Route};
|
||||
use rocket::{
|
||||
fs::NamedFile,
|
||||
http::ContentType,
|
||||
response::{content::RawCss as Css, content::RawHtml as Html, Redirect},
|
||||
serde::json::Json,
|
||||
Catcher, Route,
|
||||
};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::now, ApiResult, EmptyResult},
|
||||
auth::decode_file_download,
|
||||
error::Error,
|
||||
util::{Cached, SafeString},
|
||||
util::{get_web_vault_version, Cached, SafeString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
@ -16,7 +23,7 @@ pub fn routes() -> Vec<Route> {
|
|||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
let mut routes = routes![attachments, alive, alive_head, static_files];
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes.append(&mut routes![web_index, web_index_head, app_id, web_files]);
|
||||
routes.append(&mut routes![web_index, web_index_direct, web_index_head, app_id, web_files, vaultwarden_css]);
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
|
@ -45,11 +52,101 @@ fn not_found() -> ApiResult<Html<String>> {
|
|||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/css/vaultwarden.css")]
|
||||
fn vaultwarden_css() -> Cached<Css<String>> {
|
||||
// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then.
|
||||
// The default is based upon the version since this feature is added.
|
||||
static WEB_VAULT_VERSION: Lazy<u32> = Lazy::new(|| {
|
||||
let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap();
|
||||
let vault_version = get_web_vault_version();
|
||||
|
||||
let (major, minor, patch) = match re.captures(&vault_version) {
|
||||
Some(c) if c.len() == 4 => (
|
||||
c.get(1).unwrap().as_str().parse().unwrap(),
|
||||
c.get(2).unwrap().as_str().parse().unwrap(),
|
||||
c.get(3).unwrap().as_str().parse().unwrap(),
|
||||
),
|
||||
_ => (2024, 6, 2),
|
||||
};
|
||||
format!("{major}{minor:02}{patch:02}").parse::<u32>().unwrap()
|
||||
});
|
||||
|
||||
// Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then.
|
||||
// The default is based upon the version since this feature is added.
|
||||
static VW_VERSION: Lazy<u32> = Lazy::new(|| {
|
||||
let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap();
|
||||
let vw_version = crate::VERSION.unwrap_or("1.32.1");
|
||||
|
||||
let (major, minor, patch) = match re.captures(vw_version) {
|
||||
Some(c) if c.len() == 4 => (
|
||||
c.get(1).unwrap().as_str().parse().unwrap(),
|
||||
c.get(2).unwrap().as_str().parse().unwrap(),
|
||||
c.get(3).unwrap().as_str().parse().unwrap(),
|
||||
),
|
||||
_ => (1, 32, 1),
|
||||
};
|
||||
format!("{major}{minor:02}{patch:02}").parse::<u32>().unwrap()
|
||||
});
|
||||
|
||||
let css_options = json!({
|
||||
"web_vault_version": *WEB_VAULT_VERSION,
|
||||
"vw_version": *VW_VERSION,
|
||||
"signup_disabled": !CONFIG.signups_allowed() && CONFIG.signups_domains_whitelist().is_empty(),
|
||||
"mail_enabled": CONFIG.mail_enabled(),
|
||||
"yubico_enabled": CONFIG._enable_yubico() && (CONFIG.yubico_client_id().is_some() == CONFIG.yubico_secret_key().is_some()),
|
||||
"emergency_access_allowed": CONFIG.emergency_access_allowed(),
|
||||
"sends_allowed": CONFIG.sends_allowed(),
|
||||
"load_user_scss": true,
|
||||
});
|
||||
|
||||
let scss = match CONFIG.render_template("scss/vaultwarden.scss", &css_options) {
|
||||
Ok(t) => t,
|
||||
Err(e) => {
|
||||
// Something went wrong loading the template. Use the fallback
|
||||
warn!("Loading scss/vaultwarden.scss.hbs or scss/user.vaultwarden.scss.hbs failed. {e}");
|
||||
CONFIG
|
||||
.render_fallback_template("scss/vaultwarden.scss", &css_options)
|
||||
.expect("Fallback scss/vaultwarden.scss.hbs to render")
|
||||
}
|
||||
};
|
||||
|
||||
let css = match grass_compiler::from_string(
|
||||
scss,
|
||||
&grass_compiler::Options::default().style(grass_compiler::OutputStyle::Compressed),
|
||||
) {
|
||||
Ok(css) => css,
|
||||
Err(e) => {
|
||||
// Something went wrong compiling the scss. Use the fallback
|
||||
warn!("Compiling the Vaultwarden SCSS styles failed. {e}");
|
||||
let mut css_options = css_options;
|
||||
css_options["load_user_scss"] = json!(false);
|
||||
let scss = CONFIG
|
||||
.render_fallback_template("scss/vaultwarden.scss", &css_options)
|
||||
.expect("Fallback scss/vaultwarden.scss.hbs to render");
|
||||
grass_compiler::from_string(
|
||||
scss,
|
||||
&grass_compiler::Options::default().style(grass_compiler::OutputStyle::Compressed),
|
||||
)
|
||||
.expect("SCSS to compile")
|
||||
}
|
||||
};
|
||||
|
||||
// Cache for one day should be enough and not too much
|
||||
Cached::ttl(Css(css), 86_400, false)
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
async fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
||||
}
|
||||
|
||||
// Make sure that `/index.html` redirect to actual domain path.
|
||||
// If not, this might cause issues with the web-vault
|
||||
#[get("/index.html")]
|
||||
fn web_index_direct() -> Redirect {
|
||||
Redirect::to(format!("{}/", CONFIG.domain_path()))
|
||||
}
|
||||
|
||||
#[head("/")]
|
||||
fn web_index_head() -> EmptyResult {
|
||||
// Add an explicit HEAD route to prevent uptime monitoring services from
|
||||
|
@ -170,11 +267,11 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
|
|||
}
|
||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
|
||||
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
|
||||
"jdenticon-3.3.0.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon-3.3.0.js"))),
|
||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.7.0.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.0.slim.js")))
|
||||
"jquery-3.7.1.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.1.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {filename}")),
|
||||
}
|
||||
|
|
180
src/auth.rs
180
src/auth.rs
|
@ -1,18 +1,24 @@
|
|||
// JWT Handling
|
||||
//
|
||||
use chrono::{Duration, Utc};
|
||||
use chrono::{TimeDelta, Utc};
|
||||
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use num_traits::FromPrimitive;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use once_cell::sync::{Lazy, OnceCell};
|
||||
use openssl::rsa::Rsa;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
use std::{
|
||||
env,
|
||||
fs::File,
|
||||
io::{Read, Write},
|
||||
net::IpAddr,
|
||||
};
|
||||
|
||||
use crate::{error::Error, CONFIG};
|
||||
|
||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
|
||||
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2));
|
||||
pub static DEFAULT_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
|
||||
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
||||
|
||||
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
|
||||
|
@ -26,23 +32,55 @@ static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.do
|
|||
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
|
||||
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
||||
let key =
|
||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}"));
|
||||
EncodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}"))
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
||||
let key = std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}"));
|
||||
DecodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}"))
|
||||
});
|
||||
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
|
||||
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
|
||||
|
||||
pub fn load_keys() {
|
||||
Lazy::force(&PRIVATE_RSA_KEY);
|
||||
Lazy::force(&PUBLIC_RSA_KEY);
|
||||
pub fn initialize_keys() -> Result<(), Error> {
|
||||
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> {
|
||||
let mut priv_key_buffer = Vec::with_capacity(2048);
|
||||
|
||||
let mut priv_key_file = File::options()
|
||||
.create(create_if_missing)
|
||||
.truncate(false)
|
||||
.read(true)
|
||||
.write(create_if_missing)
|
||||
.open(CONFIG.private_rsa_key())?;
|
||||
|
||||
#[allow(clippy::verbose_file_reads)]
|
||||
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
||||
|
||||
let rsa_key = if bytes_read > 0 {
|
||||
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
|
||||
} else if create_if_missing {
|
||||
// Only create the key if the file doesn't exist or is empty
|
||||
let rsa_key = Rsa::generate(2048)?;
|
||||
priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||
priv_key_file.write_all(&priv_key_buffer)?;
|
||||
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
|
||||
rsa_key
|
||||
} else {
|
||||
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
|
||||
};
|
||||
|
||||
Ok((rsa_key, priv_key_buffer))
|
||||
}
|
||||
|
||||
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?;
|
||||
let pub_key_buffer = priv_key.public_key_to_pem()?;
|
||||
|
||||
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
|
||||
let dec: DecodingKey = DecodingKey::from_rsa_pem(&pub_key_buffer)?;
|
||||
if PRIVATE_RSA_KEY.set(enc).is_err() {
|
||||
err!("PRIVATE_RSA_KEY must only be initialized once")
|
||||
}
|
||||
if PUBLIC_RSA_KEY.set(dec).is_err() {
|
||||
err!("PUBLIC_RSA_KEY must only be initialized once")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, PRIVATE_RSA_KEY.wait()) {
|
||||
Ok(token) => token,
|
||||
Err(e) => panic!("Error encoding jwt {e}"),
|
||||
}
|
||||
|
@ -56,7 +94,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
|||
validation.set_issuer(&[issuer]);
|
||||
|
||||
let token = token.replace(char::is_whitespace, "");
|
||||
match jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation) {
|
||||
match jsonwebtoken::decode(&token, PUBLIC_RSA_KEY.wait(), &validation) {
|
||||
Ok(d) => Ok(d.claims),
|
||||
Err(err) => match *err.kind() {
|
||||
ErrorKind::InvalidToken => err!("Token is invalid"),
|
||||
|
@ -119,10 +157,16 @@ pub struct LoginJwtClaims {
|
|||
pub email: String,
|
||||
pub email_verified: bool,
|
||||
|
||||
pub orgowner: Vec<String>,
|
||||
pub orgadmin: Vec<String>,
|
||||
pub orguser: Vec<String>,
|
||||
pub orgmanager: Vec<String>,
|
||||
// ---
|
||||
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// pub orgowner: Vec<String>,
|
||||
// pub orgadmin: Vec<String>,
|
||||
// pub orguser: Vec<String>,
|
||||
// pub orgmanager: Vec<String>,
|
||||
|
||||
// user security_stamp
|
||||
pub sstamp: String,
|
||||
|
@ -158,11 +202,11 @@ pub fn generate_invite_claims(
|
|||
user_org_id: Option<String>,
|
||||
invited_by_email: Option<String>,
|
||||
) -> InviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
InviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||
iss: JWT_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
email,
|
||||
|
@ -196,11 +240,11 @@ pub fn generate_emergency_access_invite_claims(
|
|||
grantor_name: String,
|
||||
grantor_email: String,
|
||||
) -> EmergencyAccessInviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
EmergencyAccessInviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
email,
|
||||
|
@ -227,10 +271,10 @@ pub struct OrgApiKeyLoginJwtClaims {
|
|||
}
|
||||
|
||||
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
OrgApiKeyLoginJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::hours(1)).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
|
||||
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
client_id: format!("organization.{org_id}"),
|
||||
|
@ -254,10 +298,10 @@ pub struct FileDownloadClaims {
|
|||
}
|
||||
|
||||
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
FileDownloadClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(5)).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
|
||||
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
file_id,
|
||||
|
@ -277,42 +321,42 @@ pub struct BasicJwtClaims {
|
|||
}
|
||||
|
||||
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||
iss: JWT_DELETE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_minutes(CONFIG.admin_session_lifetime()).unwrap()).timestamp(),
|
||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||
sub: "admin_panel".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let time_now = Utc::now();
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::minutes(2)).timestamp(),
|
||||
exp: (time_now + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||
iss: JWT_SEND_ISSUER.to_string(),
|
||||
sub: format!("{send_id}/{file_id}"),
|
||||
}
|
||||
|
@ -349,8 +393,6 @@ impl<'r> FromRequest<'r> for Host {
|
|||
referer.to_string()
|
||||
} else {
|
||||
// Try to guess from the headers
|
||||
use std::env;
|
||||
|
||||
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
|
||||
proto
|
||||
} else if env::var("ROCKET_TLS").is_ok() {
|
||||
|
@ -361,10 +403,8 @@ impl<'r> FromRequest<'r> for Host {
|
|||
|
||||
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
|
||||
host
|
||||
} else if let Some(host) = headers.get_one("Host") {
|
||||
host
|
||||
} else {
|
||||
""
|
||||
headers.get_one("Host").unwrap_or_default()
|
||||
};
|
||||
|
||||
format!("{protocol}://{host}")
|
||||
|
@ -377,7 +417,6 @@ impl<'r> FromRequest<'r> for Host {
|
|||
}
|
||||
|
||||
pub struct ClientHeaders {
|
||||
pub host: String,
|
||||
pub device_type: i32,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
@ -387,7 +426,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
|||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
|
@ -397,7 +435,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
|||
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
||||
|
||||
Outcome::Success(ClientHeaders {
|
||||
host,
|
||||
device_type,
|
||||
ip,
|
||||
})
|
||||
|
@ -469,7 +506,7 @@ impl<'r> FromRequest<'r> for Headers {
|
|||
// Check if the stamp exception has expired first.
|
||||
// Then, check if the current route matches any of the allowed routes.
|
||||
// After that check the stamp in exception matches the one in the claims.
|
||||
if Utc::now().naive_utc().timestamp() > stamp_exception.expire {
|
||||
if Utc::now().timestamp() > stamp_exception.expire {
|
||||
// If the stamp exception has been expired remove it from the database.
|
||||
// This prevents checking this stamp exception for new requests.
|
||||
let mut user = user;
|
||||
|
@ -503,7 +540,6 @@ pub struct OrgHeaders {
|
|||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_id: String,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
|
@ -566,7 +602,6 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
|||
}
|
||||
},
|
||||
org_user,
|
||||
org_id: String::from(org_id),
|
||||
ip: headers.ip,
|
||||
})
|
||||
}
|
||||
|
@ -643,7 +678,6 @@ pub struct ManagerHeaders {
|
|||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
|
@ -661,7 +695,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
|||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
||||
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
||||
err_handler!("The current user isn't a manager for this collection")
|
||||
}
|
||||
}
|
||||
|
@ -672,7 +706,6 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
|||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
|
@ -699,7 +732,6 @@ pub struct ManagerHeadersLoose {
|
|||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
|
@ -715,7 +747,6 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
|||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user: headers.org_user,
|
||||
org_user_type: headers.org_user_type,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
|
@ -734,10 +765,6 @@ impl From<ManagerHeadersLoose> for Headers {
|
|||
}
|
||||
}
|
||||
}
|
||||
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
||||
org_user.has_full_access()
|
||||
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
|
||||
}
|
||||
|
||||
impl ManagerHeaders {
|
||||
pub async fn from_loose(
|
||||
|
@ -749,7 +776,7 @@ impl ManagerHeaders {
|
|||
if uuid::Uuid::parse_str(col_id).is_err() {
|
||||
err!("Collection Id is malformed!");
|
||||
}
|
||||
if !can_access_collection(&h.org_user, col_id, conn).await {
|
||||
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
|
||||
err!("You don't have access to all collections!");
|
||||
}
|
||||
}
|
||||
|
@ -758,14 +785,12 @@ impl ManagerHeaders {
|
|||
host: h.host,
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
org_user_type: h.org_user_type,
|
||||
ip: h.ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OwnerHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub ip: ClientIp,
|
||||
|
@ -779,7 +804,6 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
|||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type == UserOrgType::Owner {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
ip: headers.ip,
|
||||
|
@ -793,7 +817,6 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
|||
//
|
||||
// Client IP address detection
|
||||
//
|
||||
use std::net::IpAddr;
|
||||
|
||||
pub struct ClientIp {
|
||||
pub ip: IpAddr,
|
||||
|
@ -826,6 +849,35 @@ impl<'r> FromRequest<'r> for ClientIp {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct Secure {
|
||||
pub https: bool,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for Secure {
|
||||
type Error = ();
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
// Try to guess from the headers
|
||||
let protocol = match headers.get_one("X-Forwarded-Proto") {
|
||||
Some(proto) => proto,
|
||||
None => {
|
||||
if env::var("ROCKET_TLS").is_ok() {
|
||||
"https"
|
||||
} else {
|
||||
"http"
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Outcome::Success(Secure {
|
||||
https: protocol == "https",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WsAccessTokenHeader {
|
||||
pub access_token: Option<String>,
|
||||
}
|
||||
|
|
268
src/config.rs
268
src/config.rs
|
@ -1,6 +1,9 @@
|
|||
use std::env::consts::EXE_SUFFIX;
|
||||
use std::process::exit;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
RwLock,
|
||||
};
|
||||
|
||||
use job_scheduler_ng::Schedule;
|
||||
use once_cell::sync::Lazy;
|
||||
|
@ -9,7 +12,7 @@ use reqwest::Url;
|
|||
use crate::{
|
||||
db::DbConnType,
|
||||
error::Error,
|
||||
util::{get_env, get_env_bool},
|
||||
util::{get_env, get_env_bool, parse_experimental_client_feature_flags},
|
||||
};
|
||||
|
||||
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
||||
|
@ -17,6 +20,8 @@ static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
|||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
|
||||
});
|
||||
|
||||
pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||
Config::load().unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
|
@ -39,7 +44,6 @@ macro_rules! make_config {
|
|||
|
||||
struct Inner {
|
||||
rocket_shutdown_handle: Option<rocket::Shutdown>,
|
||||
ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>,
|
||||
|
||||
templates: Handlebars<'static>,
|
||||
config: ConfigItems,
|
||||
|
@ -147,6 +151,12 @@ macro_rules! make_config {
|
|||
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
||||
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
||||
|
||||
|
||||
// Copy the values from the deprecated flags to the new ones
|
||||
if config.http_request_block_regex.is_none() {
|
||||
config.http_request_block_regex = config.icon_blacklist_regex.clone();
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
}
|
||||
|
@ -326,7 +336,7 @@ macro_rules! make_config {
|
|||
}
|
||||
}
|
||||
}};
|
||||
( @build $value:expr, $config:expr, gen, $default_fn:expr ) => {{
|
||||
( @build $value:expr, $config:expr, generated, $default_fn:expr ) => {{
|
||||
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
|
||||
f($config)
|
||||
}};
|
||||
|
@ -344,10 +354,10 @@ macro_rules! make_config {
|
|||
// }
|
||||
//
|
||||
// Where action applied when the value wasn't provided and can be:
|
||||
// def: Use a default value
|
||||
// auto: Value is auto generated based on other values
|
||||
// option: Value is optional
|
||||
// gen: Value is always autogenerated and it's original value ignored
|
||||
// def: Use a default value
|
||||
// auto: Value is auto generated based on other values
|
||||
// option: Value is optional
|
||||
// generated: Value is always autogenerated and it's original value ignored
|
||||
make_config! {
|
||||
folders {
|
||||
/// Data folder |> Main data folder
|
||||
|
@ -361,7 +371,7 @@ make_config! {
|
|||
/// Sends folder
|
||||
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
||||
/// Temp folder |> Used for storing temporary file uploads
|
||||
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
||||
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
||||
/// Templates folder
|
||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||
/// Session JWT key
|
||||
|
@ -371,17 +381,15 @@ make_config! {
|
|||
},
|
||||
ws {
|
||||
/// Enable websocket notifications
|
||||
websocket_enabled: bool, false, def, false;
|
||||
/// Websocket address
|
||||
websocket_address: String, false, def, "0.0.0.0".to_string();
|
||||
/// Websocket port
|
||||
websocket_port: u16, false, def, 3012;
|
||||
enable_websocket: bool, false, def, true;
|
||||
},
|
||||
push {
|
||||
/// Enable push notifications
|
||||
push_enabled: bool, false, def, false;
|
||||
/// Push relay base uri
|
||||
/// Push relay uri
|
||||
push_relay_uri: String, false, def, "https://push.bitwarden.com".to_string();
|
||||
/// Push identity uri
|
||||
push_identity_uri: String, false, def, "https://identity.bitwarden.com".to_string();
|
||||
/// Installation id |> The installation id from https://bitwarden.com/host
|
||||
push_installation_id: Pass, false, def, String::new();
|
||||
/// Installation key |> The installation key from https://bitwarden.com/host
|
||||
|
@ -412,7 +420,9 @@ make_config! {
|
|||
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
|
||||
/// Defaults to every minute. Set blank to disable this job.
|
||||
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
||||
|
||||
/// Duo Auth context cleanup schedule |> Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
|
||||
/// Defaults to once every minute. Set blank to disable this job.
|
||||
duo_context_purge_schedule: String, false, def, "30 * * * * *".to_string();
|
||||
},
|
||||
|
||||
/// General settings
|
||||
|
@ -440,6 +450,8 @@ make_config! {
|
|||
user_attachment_limit: i64, true, option;
|
||||
/// Per-organization attachment storage limit (KB) |> Max kilobytes of attachment storage allowed per org. When this limit is reached, org members will not be allowed to upload further attachments for ciphers owned by that org.
|
||||
org_attachment_limit: i64, true, option;
|
||||
/// Per-user send storage limit (KB) |> Max kilobytes of sends storage allowed per user. When this limit is reached, the user will not be allowed to upload further sends.
|
||||
user_send_limit: i64, true, option;
|
||||
|
||||
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
|
||||
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
|
||||
|
@ -478,18 +490,18 @@ make_config! {
|
|||
/// Invitation token expiration time (in hours) |> The number of hours after which an organization invite token, emergency access invite token,
|
||||
/// email verification token and deletion request token will expire (must be at least 1)
|
||||
invitation_expiration_hours: u32, false, def, 120;
|
||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||
/// Enable emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||
emergency_access_allowed: bool, true, def, true;
|
||||
/// Allow email change |> Controls whether users can change their email. This setting applies globally to all users.
|
||||
email_change_allowed: bool, true, def, true;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
||||
/// The default for new users. If changed, it will be updated during login for existing users.
|
||||
password_iterations: i32, true, def, 600_000;
|
||||
/// Allow password hints |> Controls whether users can set password hints. This setting applies globally to all users.
|
||||
/// Allow password hints |> Controls whether users can set or show password hints. This setting applies globally to all users.
|
||||
password_hints_allowed: bool, true, def, true;
|
||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
||||
/// if SMTP service is not configured. Not recommended for publicly-accessible instances as this
|
||||
/// provides unauthenticated access to potentially sensitive data.
|
||||
/// Show password hint (Know the risks!) |> Controls whether a password hint should be shown directly in the web page
|
||||
/// if SMTP service is not configured and password hints are allowed. Not recommended for publicly-accessible instances
|
||||
/// because this provides unauthenticated access to potentially sensitive data.
|
||||
show_password_hint: bool, true, def, false;
|
||||
|
||||
/// Admin token/Argon2 PHC |> The plain text token or Argon2 PHC string used to authenticate in this very same page. Changing it here will not deauthorize the current session!
|
||||
|
@ -508,7 +520,7 @@ make_config! {
|
|||
/// Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||
ip_header: String, true, def, "X-Real-IP".to_string();
|
||||
/// Internal IP header property, used to avoid recomputing each time
|
||||
_ip_header_enabled: bool, false, gen, |c| &c.ip_header.trim().to_lowercase() != "none";
|
||||
_ip_header_enabled: bool, false, generated, |c| &c.ip_header.trim().to_lowercase() != "none";
|
||||
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||
|
@ -517,9 +529,9 @@ make_config! {
|
|||
/// corresponding icon at the external service.
|
||||
icon_service: String, false, def, "internal".to_string();
|
||||
/// _icon_service_url
|
||||
_icon_service_url: String, false, gen, |c| generate_icon_service_url(&c.icon_service);
|
||||
_icon_service_url: String, false, generated, |c| generate_icon_service_url(&c.icon_service);
|
||||
/// _icon_service_csp
|
||||
_icon_service_csp: String, false, gen, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
||||
_icon_service_csp: String, false, generated, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
||||
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
|
||||
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||
/// Temporary redirects are useful while testing different icon services, but once a service
|
||||
|
@ -532,12 +544,18 @@ make_config! {
|
|||
icon_cache_negttl: u64, true, def, 259_200;
|
||||
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
|
||||
icon_download_timeout: u64, true, def, 10;
|
||||
/// Icon blacklist Regex |> Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||
|
||||
/// [Deprecated] Icon blacklist Regex |> Use `http_request_block_regex` instead
|
||||
icon_blacklist_regex: String, false, option;
|
||||
/// [Deprecated] Icon blacklist non global IPs |> Use `http_request_block_non_global_ips` instead
|
||||
icon_blacklist_non_global_ips: bool, false, def, true;
|
||||
|
||||
/// Block HTTP domains/IPs by Regex |> Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
|
||||
/// Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
icon_blacklist_regex: String, true, option;
|
||||
/// Icon blacklist non global IPs |> Any IP which is not defined as a global IP will be blacklisted.
|
||||
http_request_block_regex: String, true, option;
|
||||
/// Block non global IPs |> Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
|
||||
/// Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
icon_blacklist_non_global_ips: bool, true, def, true;
|
||||
http_request_block_non_global_ips: bool, true, auto, |c| c.icon_blacklist_non_global_ips;
|
||||
|
||||
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
|
||||
/// Note that the checkbox would still be present, but ignored.
|
||||
|
@ -547,6 +565,9 @@ make_config! {
|
|||
/// TOTP codes of the previous and next 30 seconds will be invalid.
|
||||
authenticator_disable_time_drift: bool, true, def, false;
|
||||
|
||||
/// Customize the enabled feature flags on the clients |> This is a comma separated list of feature flags to enable.
|
||||
experimental_client_feature_flags: String, false, def, "fido2-vault-credentials".to_string();
|
||||
|
||||
/// Require new device emails |> When a user logs in an email is required to be sent.
|
||||
/// If sending the email fails the login attempt will fail.
|
||||
require_device_email: bool, true, def, false;
|
||||
|
@ -562,8 +583,9 @@ make_config! {
|
|||
use_syslog: bool, false, def, false;
|
||||
/// Log file path
|
||||
log_file: String, false, option;
|
||||
/// Log level
|
||||
log_level: String, false, def, "Info".to_string();
|
||||
/// Log level |> Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||
/// For a specific module append it as a comma separated value "info,path::to::module=debug"
|
||||
log_level: String, false, def, "info".to_string();
|
||||
|
||||
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
|
||||
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||
|
@ -601,7 +623,18 @@ make_config! {
|
|||
admin_session_lifetime: i64, true, def, 20;
|
||||
|
||||
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
||||
org_groups_enabled: bool, false, def, false;
|
||||
org_groups_enabled: bool, false, def, false;
|
||||
|
||||
/// Increase note size limit (Know the risks!) |> Sets the secure note size limit to 100_000 instead of the default 10_000.
|
||||
/// WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
|
||||
increase_note_size_limit: bool, true, def, false;
|
||||
/// Generated max_note_size value to prevent if..else matching during every check
|
||||
_max_note_size: usize, false, generated, |c| if c.increase_note_size_limit {100_000} else {10_000};
|
||||
|
||||
/// Enforce Single Org with Reset Password Policy |> Enforce that the Single Org policy is enabled before setting the Reset Password policy
|
||||
/// Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
|
||||
/// Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
|
||||
enforce_single_org_with_reset_pw_policy: bool, false, def, false;
|
||||
},
|
||||
|
||||
/// Yubikey settings
|
||||
|
@ -620,6 +653,8 @@ make_config! {
|
|||
duo: _enable_duo {
|
||||
/// Enabled
|
||||
_enable_duo: bool, true, def, true;
|
||||
/// Attempt to use deprecated iframe-based Traditional Prompt (Duo WebSDK 2)
|
||||
duo_use_iframe: bool, false, def, false;
|
||||
/// Integration Key
|
||||
duo_ikey: String, true, option;
|
||||
/// Secret Key
|
||||
|
@ -665,7 +700,7 @@ make_config! {
|
|||
/// Embed images as email attachments.
|
||||
smtp_embed_images: bool, true, def, true;
|
||||
/// _smtp_img_src
|
||||
_smtp_img_src: String, false, gen, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
|
||||
_smtp_img_src: String, false, generated, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
|
||||
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
smtp_debug: bool, false, def, false;
|
||||
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
|
@ -684,6 +719,10 @@ make_config! {
|
|||
email_expiration_time: u64, true, def, 600;
|
||||
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
|
||||
email_attempts_limit: u64, true, def, 3;
|
||||
/// Automatically enforce at login |> Setup email 2FA provider regardless of any organization policy
|
||||
email_2fa_enforce_on_verified_invite: bool, true, def, false;
|
||||
/// Auto-enable 2FA (Know the risks!) |> Automatically setup email 2FA as fallback provider when needed
|
||||
email_2fa_auto_fallback: bool, true, def, false;
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -751,6 +790,57 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||
)
|
||||
}
|
||||
|
||||
if cfg.push_enabled {
|
||||
let push_relay_uri = cfg.push_relay_uri.to_lowercase();
|
||||
if !push_relay_uri.starts_with("https://") {
|
||||
err!("`PUSH_RELAY_URI` must start with 'https://'.")
|
||||
}
|
||||
|
||||
if Url::parse(&push_relay_uri).is_err() {
|
||||
err!("Invalid URL format for `PUSH_RELAY_URI`.");
|
||||
}
|
||||
|
||||
let push_identity_uri = cfg.push_identity_uri.to_lowercase();
|
||||
if !push_identity_uri.starts_with("https://") {
|
||||
err!("`PUSH_IDENTITY_URI` must start with 'https://'.")
|
||||
}
|
||||
|
||||
if Url::parse(&push_identity_uri).is_err() {
|
||||
err!("Invalid URL format for `PUSH_IDENTITY_URI`.");
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: deal with deprecated flags so they can be removed from this list, cf. #4263
|
||||
const KNOWN_FLAGS: &[&str] =
|
||||
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "extension-refresh", "fido2-vault-credentials"];
|
||||
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
|
||||
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
|
||||
if !invalid_flags.is_empty() {
|
||||
err!(format!("Unrecognized experimental client feature flags: {invalid_flags:?}.\n\n\
|
||||
Please ensure all feature flags are spelled correctly and that they are supported in this version.\n\
|
||||
Supported flags: {KNOWN_FLAGS:?}"));
|
||||
}
|
||||
|
||||
const MAX_FILESIZE_KB: i64 = i64::MAX >> 10;
|
||||
|
||||
if let Some(limit) = cfg.user_attachment_limit {
|
||||
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||
err!("`USER_ATTACHMENT_LIMIT` is out of bounds");
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(limit) = cfg.org_attachment_limit {
|
||||
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||
err!("`ORG_ATTACHMENT_LIMIT` is out of bounds");
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(limit) = cfg.user_send_limit {
|
||||
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||
err!("`USER_SEND_LIMIT` is out of bounds");
|
||||
}
|
||||
}
|
||||
|
||||
if cfg._enable_duo
|
||||
&& (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
|
||||
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
|
||||
|
@ -835,12 +925,19 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||
err!("To enable email 2FA, a mail transport must be configured")
|
||||
}
|
||||
|
||||
// Check if the icon blacklist regex is valid
|
||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||
if !cfg._enable_email_2fa && cfg.email_2fa_enforce_on_verified_invite {
|
||||
err!("To enforce email 2FA on verified invitations, email 2fa has to be enabled!");
|
||||
}
|
||||
if !cfg._enable_email_2fa && cfg.email_2fa_auto_fallback {
|
||||
err!("To use email 2FA as automatic fallback, email 2fa has to be enabled!");
|
||||
}
|
||||
|
||||
// Check if the HTTP request block regex is valid
|
||||
if let Some(ref r) = cfg.http_request_block_regex {
|
||||
let validate_regex = regex::Regex::new(r);
|
||||
match validate_regex {
|
||||
Ok(_) => (),
|
||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {e:#?}")),
|
||||
Err(e) => err!(format!("`HTTP_REQUEST_BLOCK_REGEX` is invalid: {e:#?}")),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -920,6 +1017,11 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.increase_note_size_limit {
|
||||
println!("[WARNING] Secure Note size limit is increased to 100_000!");
|
||||
println!("[WARNING] This could cause issues with clients. Also exports will not work on Bitwarden servers!.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1008,12 +1110,13 @@ impl Config {
|
|||
|
||||
// Fill any missing with defaults
|
||||
let config = builder.build();
|
||||
validate_config(&config)?;
|
||||
if !SKIP_CONFIG_VALIDATION.load(Ordering::Relaxed) {
|
||||
validate_config(&config)?;
|
||||
}
|
||||
|
||||
Ok(Config {
|
||||
inner: RwLock::new(Inner {
|
||||
rocket_shutdown_handle: None,
|
||||
ws_shutdown_handle: None,
|
||||
templates: load_templates(&config.templates_folder),
|
||||
config,
|
||||
_env,
|
||||
|
@ -1106,7 +1209,7 @@ impl Config {
|
|||
}
|
||||
|
||||
pub fn delete_user_config(&self) -> Result<(), Error> {
|
||||
crate::util::delete_file(&CONFIG_FILE)?;
|
||||
std::fs::remove_file(&*CONFIG_FILE)?;
|
||||
|
||||
// Empty user config
|
||||
let usr = ConfigBuilder::default();
|
||||
|
@ -1129,10 +1232,7 @@ impl Config {
|
|||
}
|
||||
|
||||
pub fn private_rsa_key(&self) -> String {
|
||||
format!("{}.pem", CONFIG.rsa_key_filename())
|
||||
}
|
||||
pub fn public_rsa_key(&self) -> String {
|
||||
format!("{}.pub.pem", CONFIG.rsa_key_filename())
|
||||
format!("{}.pem", self.rsa_key_filename())
|
||||
}
|
||||
pub fn mail_enabled(&self) -> bool {
|
||||
let inner = &self.inner.read().unwrap().config;
|
||||
|
@ -1163,35 +1263,28 @@ impl Config {
|
|||
token.is_some() && !token.unwrap().trim().is_empty()
|
||||
}
|
||||
|
||||
pub fn render_template<T: serde::ser::Serialize>(
|
||||
&self,
|
||||
name: &str,
|
||||
data: &T,
|
||||
) -> Result<String, crate::error::Error> {
|
||||
if CONFIG.reload_templates() {
|
||||
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||
if self.reload_templates() {
|
||||
warn!("RELOADING TEMPLATES");
|
||||
let hb = load_templates(CONFIG.templates_folder());
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
} else {
|
||||
let hb = &CONFIG.inner.read().unwrap().templates;
|
||||
let hb = &self.inner.read().unwrap().templates;
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn render_fallback_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||
let hb = &self.inner.read().unwrap().templates;
|
||||
hb.render(&format!("fallback_{name}"), data).map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) {
|
||||
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
||||
}
|
||||
|
||||
pub fn set_ws_shutdown_handle(&self, handle: tokio::sync::oneshot::Sender<()>) {
|
||||
self.inner.write().unwrap().ws_shutdown_handle = Some(handle);
|
||||
}
|
||||
|
||||
pub fn shutdown(&self) {
|
||||
if let Ok(mut c) = self.inner.write() {
|
||||
if let Some(handle) = c.ws_shutdown_handle.take() {
|
||||
handle.send(()).ok();
|
||||
}
|
||||
|
||||
if let Some(handle) = c.rocket_shutdown_handle.take() {
|
||||
handle.notify();
|
||||
}
|
||||
|
@ -1199,7 +1292,10 @@ impl Config {
|
|||
}
|
||||
}
|
||||
|
||||
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable};
|
||||
use handlebars::{
|
||||
Context, DirectorySourceOptions, Handlebars, Helper, HelperResult, Output, RenderContext, RenderErrorReason,
|
||||
Renderable,
|
||||
};
|
||||
|
||||
fn load_templates<P>(path: P) -> Handlebars<'static>
|
||||
where
|
||||
|
@ -1210,7 +1306,6 @@ where
|
|||
hb.set_strict_mode(true);
|
||||
// Register helpers
|
||||
hb.register_helper("case", Box::new(case_helper));
|
||||
hb.register_helper("jsesc", Box::new(js_escape_helper));
|
||||
hb.register_helper("to_json", Box::new(to_json));
|
||||
|
||||
macro_rules! reg {
|
||||
|
@ -1222,6 +1317,11 @@ where
|
|||
reg!($name);
|
||||
reg!(concat!($name, $ext));
|
||||
}};
|
||||
(@withfallback $name:expr) => {{
|
||||
let template = include_str!(concat!("static/templates/", $name, ".hbs"));
|
||||
hb.register_template_string($name, template).unwrap();
|
||||
hb.register_template_string(concat!("fallback_", $name), template).unwrap();
|
||||
}};
|
||||
}
|
||||
|
||||
// First register default templates here
|
||||
|
@ -1243,17 +1343,18 @@ where
|
|||
reg!("email/invite_accepted", ".html");
|
||||
reg!("email/invite_confirmed", ".html");
|
||||
reg!("email/new_device_logged_in", ".html");
|
||||
reg!("email/protected_action", ".html");
|
||||
reg!("email/pw_hint_none", ".html");
|
||||
reg!("email/pw_hint_some", ".html");
|
||||
reg!("email/send_2fa_removed_from_org", ".html");
|
||||
reg!("email/send_single_org_removed_from_org", ".html");
|
||||
reg!("email/send_org_invite", ".html");
|
||||
reg!("email/send_emergency_access_invite", ".html");
|
||||
reg!("email/send_org_invite", ".html");
|
||||
reg!("email/send_single_org_removed_from_org", ".html");
|
||||
reg!("email/smtp_test", ".html");
|
||||
reg!("email/twofactor_email", ".html");
|
||||
reg!("email/verify_email", ".html");
|
||||
reg!("email/welcome", ".html");
|
||||
reg!("email/welcome_must_verify", ".html");
|
||||
reg!("email/smtp_test", ".html");
|
||||
reg!("email/welcome", ".html");
|
||||
|
||||
reg!("admin/base");
|
||||
reg!("admin/login");
|
||||
|
@ -1264,22 +1365,26 @@ where
|
|||
|
||||
reg!("404");
|
||||
|
||||
reg!(@withfallback "scss/vaultwarden.scss");
|
||||
reg!("scss/user.vaultwarden.scss");
|
||||
|
||||
// And then load user templates to overwrite the defaults
|
||||
// Use .hbs extension for the files
|
||||
// Templates get registered with their relative name
|
||||
hb.register_templates_directory(".hbs", path).unwrap();
|
||||
hb.register_templates_directory(path, DirectorySourceOptions::default()).unwrap();
|
||||
|
||||
hb
|
||||
}
|
||||
|
||||
fn case_helper<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
h: &Helper<'rc>,
|
||||
r: &'reg Handlebars<'_>,
|
||||
ctx: &'rc Context,
|
||||
rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
||||
let param =
|
||||
h.param(0).ok_or_else(|| RenderErrorReason::Other(String::from("Param not found for helper \"case\"")))?;
|
||||
let value = param.value().clone();
|
||||
|
||||
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
||||
|
@ -1289,38 +1394,19 @@ fn case_helper<'reg, 'rc>(
|
|||
}
|
||||
}
|
||||
|
||||
fn js_escape_helper<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
_r: &'reg Handlebars<'_>,
|
||||
_ctx: &'rc Context,
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"jsesc\""))?;
|
||||
|
||||
let no_quote = h.param(1).is_some();
|
||||
|
||||
let value = param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"jsesc\" is not a String"))?;
|
||||
|
||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
if !no_quote {
|
||||
escaped_value = format!(""{escaped_value}"");
|
||||
}
|
||||
|
||||
out.write(&escaped_value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn to_json<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
h: &Helper<'rc>,
|
||||
_r: &'reg Handlebars<'_>,
|
||||
_ctx: &'rc Context,
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Expected 1 parameter for \"to_json\""))?.value();
|
||||
let param = h
|
||||
.param(0)
|
||||
.ok_or_else(|| RenderErrorReason::Other(String::from("Expected 1 parameter for \"to_json\"")))?
|
||||
.value();
|
||||
let json = serde_json::to_string(param)
|
||||
.map_err(|e| RenderError::new(format!("Can't serialize parameter to JSON: {e}")))?;
|
||||
.map_err(|e| RenderErrorReason::Other(format!("Can't serialize parameter to JSON: {e}")))?;
|
||||
out.write(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ use diesel::{
|
|||
|
||||
use rocket::{
|
||||
http::Status,
|
||||
outcome::IntoOutcome,
|
||||
request::{FromRequest, Outcome},
|
||||
Request,
|
||||
};
|
||||
|
@ -301,19 +300,17 @@ pub trait FromDb {
|
|||
|
||||
impl<T: FromDb> FromDb for Vec<T> {
|
||||
type Output = Vec<T::Output>;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)]
|
||||
fn from_db(self) -> Self::Output {
|
||||
self.into_iter().map(crate::db::FromDb::from_db).collect()
|
||||
self.into_iter().map(FromDb::from_db).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: FromDb> FromDb for Option<T> {
|
||||
type Output = Option<T::Output>;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)]
|
||||
fn from_db(self) -> Self::Output {
|
||||
self.map(crate::db::FromDb::from_db)
|
||||
self.map(FromDb::from_db)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -369,34 +366,42 @@ pub mod models;
|
|||
|
||||
/// Creates a back-up of the sqlite database
|
||||
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||
pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
||||
pub async fn backup_database(conn: &mut DbConn) -> Result<String, Error> {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
let _ = conn;
|
||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||
}
|
||||
sqlite {
|
||||
use std::path::Path;
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
|
||||
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
diesel::sql_query(format!("VACUUM INTO '{db_path}/db_{file_date}.sqlite3'")).execute(conn)?;
|
||||
Ok(())
|
||||
backup_sqlite_database(conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(sqlite)]
|
||||
pub fn backup_sqlite_database(conn: &mut diesel::sqlite::SqliteConnection) -> Result<String, Error> {
|
||||
use diesel::RunQueryDsl;
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = std::path::Path::new(&db_url).parent().unwrap();
|
||||
let backup_file = db_path
|
||||
.join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S")))
|
||||
.to_string_lossy()
|
||||
.into_owned();
|
||||
diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?;
|
||||
Ok(backup_file)
|
||||
}
|
||||
|
||||
/// Get the SQL Server version
|
||||
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
sql_function!{
|
||||
define_sql_function!{
|
||||
fn version() -> diesel::sql_types::Text;
|
||||
}
|
||||
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
}
|
||||
sqlite {
|
||||
sql_function!{
|
||||
define_sql_function!{
|
||||
fn sqlite_version() -> diesel::sql_types::Text;
|
||||
}
|
||||
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
|
@ -413,8 +418,11 @@ impl<'r> FromRequest<'r> for DbConn {
|
|||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
match request.rocket().state::<DbPool>() {
|
||||
Some(p) => p.get().await.map_err(|_| ()).into_outcome(Status::ServiceUnavailable),
|
||||
None => Outcome::Failure((Status::InternalServerError, ())),
|
||||
Some(p) => match p.get().await {
|
||||
Ok(dbconn) => Outcome::Success(dbconn),
|
||||
_ => Outcome::Error((Status::ServiceUnavailable, ())),
|
||||
},
|
||||
None => Outcome::Error((Status::InternalServerError, ())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::io::ErrorKind;
|
||||
|
||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
@ -13,14 +14,14 @@ db_object! {
|
|||
pub id: String,
|
||||
pub cipher_uuid: String,
|
||||
pub file_name: String, // encrypted
|
||||
pub file_size: i32,
|
||||
pub file_size: i64,
|
||||
pub akey: Option<String>,
|
||||
}
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl Attachment {
|
||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i32, akey: Option<String>) -> Self {
|
||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option<String>) -> Self {
|
||||
Self {
|
||||
id,
|
||||
cipher_uuid,
|
||||
|
@ -41,13 +42,13 @@ impl Attachment {
|
|||
|
||||
pub fn to_json(&self, host: &str) -> Value {
|
||||
json!({
|
||||
"Id": self.id,
|
||||
"Url": self.get_url(host),
|
||||
"FileName": self.file_name,
|
||||
"Size": self.file_size.to_string(),
|
||||
"SizeName": crate::util::get_display_size(self.file_size),
|
||||
"Key": self.akey,
|
||||
"Object": "attachment"
|
||||
"id": self.id,
|
||||
"url": self.get_url(host),
|
||||
"fileName": self.file_name,
|
||||
"size": self.file_size.to_string(),
|
||||
"sizeName": crate::util::get_display_size(self.file_size),
|
||||
"key": self.akey,
|
||||
"object": "attachment"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -94,7 +95,7 @@ impl Attachment {
|
|||
|
||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
crate::util::retry(
|
||||
let _: () = crate::util::retry(
|
||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||
10,
|
||||
)
|
||||
|
@ -102,7 +103,7 @@ impl Attachment {
|
|||
|
||||
let file_path = &self.get_file_path();
|
||||
|
||||
match crate::util::delete_file(file_path) {
|
||||
match std::fs::remove_file(file_path) {
|
||||
// Ignore "file not found" errors. This can happen when the
|
||||
// upstream caller has already cleaned up the file as part of
|
||||
// its own error handling.
|
||||
|
@ -145,13 +146,18 @@ impl Attachment {
|
|||
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
let result: Option<BigDecimal> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
.select(diesel::dsl::sum(attachments::file_size))
|
||||
.first(conn)
|
||||
.expect("Error loading user attachment total size");
|
||||
result.unwrap_or(0)
|
||||
|
||||
match result.map(|r| r.to_i64()) {
|
||||
Some(Some(r)) => r,
|
||||
Some(None) => i64::MAX,
|
||||
None => 0
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
|
@ -168,13 +174,18 @@ impl Attachment {
|
|||
|
||||
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
let result: Option<BigDecimal> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
.select(diesel::dsl::sum(attachments::file_size))
|
||||
.first(conn)
|
||||
.expect("Error loading user attachment total size");
|
||||
result.unwrap_or(0)
|
||||
|
||||
match result.map(|r| r.to_i64()) {
|
||||
Some(Some(r)) => r,
|
||||
Some(None) => i64::MAX,
|
||||
None => 0
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ impl AuthRequest {
|
|||
}
|
||||
|
||||
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
||||
let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request
|
||||
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
|
||||
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
||||
auth_request.delete(conn).await.ok();
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use crate::util::LowerCase;
|
||||
use crate::CONFIG;
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{
|
||||
|
@ -78,21 +79,39 @@ impl Cipher {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
|
||||
pub fn validate_cipher_data(cipher_data: &[CipherData]) -> EmptyResult {
|
||||
let mut validation_errors = serde_json::Map::new();
|
||||
let max_note_size = CONFIG._max_note_size();
|
||||
let max_note_size_msg =
|
||||
format!("The field Notes exceeds the maximum encrypted value length of {} characters.", &max_note_size);
|
||||
for (index, cipher) in cipher_data.iter().enumerate() {
|
||||
if let Some(note) = &cipher.Notes {
|
||||
if note.len() > 10_000 {
|
||||
validation_errors.insert(
|
||||
format!("Ciphers[{index}].Notes"),
|
||||
serde_json::to_value([
|
||||
"The field Notes exceeds the maximum encrypted value length of 10000 characters.",
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
// Validate the note size and if it is exceeded return a warning
|
||||
if let Some(note) = &cipher.notes {
|
||||
if note.len() > max_note_size {
|
||||
validation_errors
|
||||
.insert(format!("Ciphers[{index}].Notes"), serde_json::to_value([&max_note_size_msg]).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the password history if it contains `null` values and if so, return a warning
|
||||
if let Some(Value::Array(password_history)) = &cipher.password_history {
|
||||
for pwh in password_history {
|
||||
if let Value::Object(pwo) = pwh {
|
||||
if pwo.get("password").is_some_and(|p| !p.is_string()) {
|
||||
validation_errors.insert(
|
||||
format!("Ciphers[{index}].Notes"),
|
||||
serde_json::to_value([
|
||||
"The password history contains a `null` value. Only strings are allowed.",
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !validation_errors.is_empty() {
|
||||
let err_json = json!({
|
||||
"message": "The model state is invalid.",
|
||||
|
@ -135,10 +154,6 @@ impl Cipher {
|
|||
}
|
||||
}
|
||||
|
||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json =
|
||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
// We don't need these values at all for Organizational syncs
|
||||
// Skip any other database calls if this is the case and just return false.
|
||||
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
|
||||
|
@ -153,20 +168,96 @@ impl Cipher {
|
|||
(false, false)
|
||||
};
|
||||
|
||||
let fields_json: Vec<_> = self
|
||||
.fields
|
||||
.as_ref()
|
||||
.and_then(|s| {
|
||||
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
||||
.inspect_err(|e| warn!("Error parsing fields {e:?} for {}", self.uuid))
|
||||
.ok()
|
||||
})
|
||||
.map(|d| {
|
||||
d.into_iter()
|
||||
.map(|mut f| {
|
||||
// Check if the `type` key is a number, strings break some clients
|
||||
// The fallback type is the hidden type `1`. this should prevent accidental data disclosure
|
||||
// If not try to convert the string value to a number and fallback to `1`
|
||||
// If it is both not a number and not a string, fallback to `1`
|
||||
match f.data.get("type") {
|
||||
Some(t) if t.is_number() => {}
|
||||
Some(t) if t.is_string() => {
|
||||
let type_num = &t.as_str().unwrap_or("1").parse::<u8>().unwrap_or(1);
|
||||
f.data["type"] = json!(type_num);
|
||||
}
|
||||
_ => {
|
||||
f.data["type"] = json!(1);
|
||||
}
|
||||
}
|
||||
f.data
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let password_history_json: Vec<_> = self
|
||||
.password_history
|
||||
.as_ref()
|
||||
.and_then(|s| {
|
||||
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
||||
.inspect_err(|e| warn!("Error parsing password history {e:?} for {}", self.uuid))
|
||||
.ok()
|
||||
})
|
||||
.map(|d| {
|
||||
// Check every password history item if they are valid and return it.
|
||||
// If a password field has the type `null` skip it, it breaks newer Bitwarden clients
|
||||
// A second check is done to verify the lastUsedDate exists and is a valid DateTime string, if not the epoch start time will be used
|
||||
d.into_iter()
|
||||
.filter_map(|d| match d.data.get("password") {
|
||||
Some(p) if p.is_string() => Some(d.data),
|
||||
_ => None,
|
||||
})
|
||||
.map(|mut d| match d.get("lastUsedDate").and_then(|l| l.as_str()) {
|
||||
Some(l) => {
|
||||
d["lastUsedDate"] = json!(crate::util::validate_and_format_date(l));
|
||||
d
|
||||
}
|
||||
_ => {
|
||||
d["lastUsedDate"] = json!("1970-01-01T00:00:00.000000Z");
|
||||
d
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
// Get the type_data or a default to an empty json object '{}'.
|
||||
// If not passing an empty object, mobile clients will crash.
|
||||
let mut type_data_json: Value =
|
||||
serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
|
||||
let mut type_data_json =
|
||||
serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_else(|_| {
|
||||
warn!("Error parsing data field for {}", self.uuid);
|
||||
Value::Object(serde_json::Map::new())
|
||||
});
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||
if self.atype == 1 {
|
||||
if type_data_json["Uris"].is_array() {
|
||||
let uri = type_data_json["Uris"][0]["Uri"].clone();
|
||||
type_data_json["Uri"] = uri;
|
||||
if type_data_json["uris"].is_array() {
|
||||
let uri = type_data_json["uris"][0]["uri"].clone();
|
||||
type_data_json["uri"] = uri;
|
||||
} else {
|
||||
// Upstream always has an Uri key/value
|
||||
type_data_json["Uri"] = Value::Null;
|
||||
type_data_json["uri"] = Value::Null;
|
||||
}
|
||||
}
|
||||
|
||||
// Fix secure note issues when data is invalid
|
||||
// This breaks at least the native mobile clients
|
||||
if self.atype == 2 {
|
||||
match type_data_json {
|
||||
Value::Object(ref t) if t.get("type").is_some_and(|t| t.is_number()) => {}
|
||||
_ => {
|
||||
type_data_json = json!({"type": 0});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,10 +266,10 @@ impl Cipher {
|
|||
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// data_json should always contain the following keys with every atype
|
||||
data_json["Fields"] = fields_json.clone();
|
||||
data_json["Name"] = json!(self.name);
|
||||
data_json["Notes"] = json!(self.notes);
|
||||
data_json["PasswordHistory"] = password_history_json.clone();
|
||||
data_json["fields"] = json!(fields_json);
|
||||
data_json["name"] = json!(self.name);
|
||||
data_json["notes"] = json!(self.notes);
|
||||
data_json["passwordHistory"] = Value::Array(password_history_json.clone());
|
||||
|
||||
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
|
@ -187,7 +278,7 @@ impl Cipher {
|
|||
Cow::from(Vec::with_capacity(0))
|
||||
}
|
||||
} else {
|
||||
Cow::from(self.get_collections(user_uuid.to_string(), conn).await)
|
||||
Cow::from(self.get_admin_collections(user_uuid.to_string(), conn).await)
|
||||
};
|
||||
|
||||
// There are three types of cipher response models in upstream
|
||||
|
@ -198,48 +289,48 @@ impl Cipher {
|
|||
//
|
||||
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
|
||||
let mut json_object = json!({
|
||||
"Object": "cipherDetails",
|
||||
"Id": self.uuid,
|
||||
"Type": self.atype,
|
||||
"CreationDate": format_date(&self.created_at),
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Key": self.key,
|
||||
"Attachments": attachments_json,
|
||||
"object": "cipherDetails",
|
||||
"id": self.uuid,
|
||||
"type": self.atype,
|
||||
"creationDate": format_date(&self.created_at),
|
||||
"revisionDate": format_date(&self.updated_at),
|
||||
"deletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"organizationId": self.organization_uuid,
|
||||
"key": self.key,
|
||||
"attachments": attachments_json,
|
||||
// We have UseTotp set to true by default within the Organization model.
|
||||
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
||||
"OrganizationUseTotp": true,
|
||||
"organizationUseTotp": true,
|
||||
|
||||
// This field is specific to the cipherDetails type.
|
||||
"CollectionIds": collection_ids,
|
||||
"collectionIds": collection_ids,
|
||||
|
||||
"Name": self.name,
|
||||
"Notes": self.notes,
|
||||
"Fields": fields_json,
|
||||
"name": self.name,
|
||||
"notes": self.notes,
|
||||
"fields": fields_json,
|
||||
|
||||
"Data": data_json,
|
||||
"data": data_json,
|
||||
|
||||
"PasswordHistory": password_history_json,
|
||||
"passwordHistory": password_history_json,
|
||||
|
||||
// All Cipher types are included by default as null, but only the matching one will be populated
|
||||
"Login": null,
|
||||
"SecureNote": null,
|
||||
"Card": null,
|
||||
"Identity": null,
|
||||
"login": null,
|
||||
"secureNote": null,
|
||||
"card": null,
|
||||
"identity": null,
|
||||
});
|
||||
|
||||
// These values are only needed for user/default syncs
|
||||
// Not during an organizational sync like `get_org_details`
|
||||
// Skip adding these fields in that case
|
||||
if sync_type == CipherSyncType::User {
|
||||
json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
|
||||
} else {
|
||||
self.get_folder_uuid(user_uuid, conn).await
|
||||
});
|
||||
json_object["Favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
json_object["favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_favorites.contains(&self.uuid)
|
||||
} else {
|
||||
self.is_favorite(user_uuid, conn).await
|
||||
|
@ -247,15 +338,15 @@ impl Cipher {
|
|||
// These values are true by default, but can be false if the
|
||||
// cipher belongs to a collection or group where the org owner has enabled
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
json_object["Edit"] = json!(!read_only);
|
||||
json_object["ViewPassword"] = json!(!hide_passwords);
|
||||
json_object["edit"] = json!(!read_only);
|
||||
json_object["viewPassword"] = json!(!hide_passwords);
|
||||
}
|
||||
|
||||
let key = match self.atype {
|
||||
1 => "Login",
|
||||
2 => "SecureNote",
|
||||
3 => "Card",
|
||||
4 => "Identity",
|
||||
1 => "login",
|
||||
2 => "secureNote",
|
||||
3 => "card",
|
||||
4 => "identity",
|
||||
_ => panic!("Wrong type"),
|
||||
};
|
||||
|
||||
|
@ -273,7 +364,16 @@ impl Cipher {
|
|||
None => {
|
||||
// Belongs to Organization, need to update affected users
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
for user_org in UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await.iter() {
|
||||
// users having access to the collection
|
||||
let mut collection_users =
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await;
|
||||
if CONFIG.org_groups_enabled() {
|
||||
// members of a group having access to the collection
|
||||
let group_users =
|
||||
UserOrganization::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await;
|
||||
collection_users.extend(group_users);
|
||||
}
|
||||
for user_org in collection_users {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
}
|
||||
|
@ -352,7 +452,7 @@ impl Cipher {
|
|||
pub async fn purge_trash(conn: &mut DbConn) {
|
||||
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||
let now = Utc::now().naive_utc();
|
||||
let dt = now - Duration::days(auto_delete_days);
|
||||
let dt = now - TimeDelta::try_days(auto_delete_days).unwrap();
|
||||
for cipher in Self::find_deleted_before(&dt, conn).await {
|
||||
cipher.delete(conn).await.ok();
|
||||
}
|
||||
|
@ -417,9 +517,12 @@ impl Cipher {
|
|||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
return false;
|
||||
}
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some();
|
||||
return cipher_sync_data.user_group_full_access_for_organizations.contains(org_uuid);
|
||||
} else {
|
||||
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
|
||||
}
|
||||
|
@ -512,6 +615,9 @@ impl Cipher {
|
|||
}
|
||||
|
||||
async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
return Vec::new();
|
||||
}
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(&self.uuid))
|
||||
|
@ -580,6 +686,17 @@ impl Cipher {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(cipher_uuid))
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
.first::<CipherDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
// Find all ciphers accessible or visible to the specified user.
|
||||
//
|
||||
// "Accessible" means the user has read access to the cipher, either via
|
||||
|
@ -593,50 +710,84 @@ impl Cipher {
|
|||
// result, those ciphers will not appear in "My Vault" for the org
|
||||
// owner/admin, but they can still be accessed via the org vault view.
|
||||
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
let mut query = ciphers::table
|
||||
.left_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
collections_groups::groups_uuid.eq(groups::uuid)
|
||||
)
|
||||
))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||
.or_filter(groups::access_all.eq(true)) // Access via groups
|
||||
.or_filter(collections_groups::collections_uuid.is_not_null()) // Access via groups
|
||||
.into_boxed();
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! {conn: {
|
||||
let mut query = ciphers::table
|
||||
.left_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
collections_groups::groups_uuid.eq(groups::uuid)
|
||||
)
|
||||
))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||
.or_filter(groups::access_all.eq(true)) // Access via groups
|
||||
.or_filter(collections_groups::collections_uuid.is_not_null()) // Access via groups
|
||||
.into_boxed();
|
||||
|
||||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
}
|
||||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
}
|
||||
|
||||
query
|
||||
.select(ciphers::all_columns)
|
||||
.distinct()
|
||||
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||
}}
|
||||
query
|
||||
.select(ciphers::all_columns)
|
||||
.distinct()
|
||||
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||
}}
|
||||
} else {
|
||||
db_run! {conn: {
|
||||
let mut query = ciphers::table
|
||||
.left_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||
))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||
.into_boxed();
|
||||
|
||||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
}
|
||||
|
||||
query
|
||||
.select(ciphers::all_columns)
|
||||
.distinct()
|
||||
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
// Find all ciphers visible to the specified user.
|
||||
|
@ -705,30 +856,123 @@ impl Cipher {
|
|||
}
|
||||
|
||||
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_id.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
users_collections::user_uuid.eq(user_id.clone())
|
||||
)
|
||||
))
|
||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
|
||||
users_organizations::access_all.eq(true).or( // User has access all
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
|
||||
)
|
||||
))
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
}}
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
.or(groups::access_all.eq(true)) // Access via groups
|
||||
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
|
||||
.and(collections_groups::read_only.eq(false)))
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
}}
|
||||
} else {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_admin_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
.or(groups::access_all.eq(true)) // Access via groups
|
||||
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
|
||||
.and(collections_groups::read_only.eq(false)))
|
||||
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
}}
|
||||
} else {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a Vec with (cipher_uuid, collection_uuid)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use serde_json::Value;
|
||||
|
||||
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
|
@ -48,11 +49,11 @@ impl Collection {
|
|||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"ExternalId": self.external_id,
|
||||
"Id": self.uuid,
|
||||
"OrganizationId": self.org_uuid,
|
||||
"Name": self.name,
|
||||
"Object": "collection",
|
||||
"externalId": self.external_id,
|
||||
"id": self.uuid,
|
||||
"organizationId": self.org_uuid,
|
||||
"name": self.name,
|
||||
"object": "collection",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -77,30 +78,57 @@ impl Collection {
|
|||
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
|
||||
Some(uo) if uo.has_full_access() => (false, false),
|
||||
Some(_) => {
|
||||
// Only for Manager types Bitwarden returns true for the can_manage option
|
||||
// Owners and Admins always have true
|
||||
Some(uo) if uo.has_full_access() => (false, false, uo.atype >= UserOrgType::Manager),
|
||||
Some(uo) => {
|
||||
// Only let a manager manage collections when the have full read/write access
|
||||
let is_manager = uo.atype == UserOrgType::Manager;
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||
(uc.read_only, uc.hide_passwords)
|
||||
(uc.read_only, uc.hide_passwords, is_manager && !uc.read_only && !uc.hide_passwords)
|
||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
||||
(cg.read_only, cg.hide_passwords)
|
||||
(cg.read_only, cg.hide_passwords, is_manager && !cg.read_only && !cg.hide_passwords)
|
||||
} else {
|
||||
(false, false)
|
||||
(false, false, false)
|
||||
}
|
||||
}
|
||||
_ => (true, true),
|
||||
_ => (true, true, false),
|
||||
}
|
||||
} else {
|
||||
(!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
|
||||
match UserOrganization::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
Some(ou) if ou.has_full_access() => (false, false, ou.atype >= UserOrgType::Manager),
|
||||
Some(ou) => {
|
||||
let is_manager = ou.atype == UserOrgType::Manager;
|
||||
let read_only = !self.is_writable_by_user(user_uuid, conn).await;
|
||||
let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await;
|
||||
(read_only, hide_passwords, is_manager && !read_only && !hide_passwords)
|
||||
}
|
||||
_ => (
|
||||
!self.is_writable_by_user(user_uuid, conn).await,
|
||||
self.hide_passwords_for_user(user_uuid, conn).await,
|
||||
false,
|
||||
),
|
||||
}
|
||||
};
|
||||
|
||||
let mut json_object = self.to_json();
|
||||
json_object["Object"] = json!("collectionDetails");
|
||||
json_object["ReadOnly"] = json!(read_only);
|
||||
json_object["HidePasswords"] = json!(hide_passwords);
|
||||
json_object["object"] = json!("collectionDetails");
|
||||
json_object["readOnly"] = json!(read_only);
|
||||
json_object["hidePasswords"] = json!(hide_passwords);
|
||||
json_object["manage"] = json!(can_manage);
|
||||
json_object
|
||||
}
|
||||
|
||||
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
||||
org_user.has_status(UserOrgStatus::Confirmed)
|
||||
&& (org_user.has_full_access()
|
||||
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|
||||
|| (CONFIG.org_groups_enabled()
|
||||
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|
||||
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
|
@ -181,58 +209,74 @@ impl Collection {
|
|||
}
|
||||
|
||||
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(
|
||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true) // access_all in Organization
|
||||
).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null()
|
||||
.filter(
|
||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true) // access_all in Organization
|
||||
).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null()
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
.select(collections::all_columns)
|
||||
.distinct()
|
||||
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
// Check if a user has access to a specific collection
|
||||
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
|
||||
// For now this is a good solution without making to much changes.
|
||||
pub async fn has_access_by_collection_and_user_uuid(
|
||||
collection_uuid: &str,
|
||||
user_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
|
||||
.select(collections::all_columns)
|
||||
.distinct()
|
||||
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
||||
}}
|
||||
} else {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(
|
||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true) // access_all in Organization
|
||||
)
|
||||
)
|
||||
.select(collections::all_columns)
|
||||
.distinct()
|
||||
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
|
@ -277,91 +321,132 @@ impl Collection {
|
|||
}
|
||||
|
||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null()
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
)
|
||||
).select(collections::all_columns)
|
||||
.first::<CollectionDb>(conn).ok()
|
||||
.from_db()
|
||||
}}
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null()
|
||||
)
|
||||
)
|
||||
).select(collections::all_columns)
|
||||
.first::<CollectionDb>(conn).ok()
|
||||
.from_db()
|
||||
}}
|
||||
} else {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
))
|
||||
).select(collections::all_columns)
|
||||
.first::<CollectionDb>(conn).ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null().and(
|
||||
collections_groups::read_only.eq(false))
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.inner_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid)
|
||||
.and(users_collections::user_uuid.eq(user_uuid))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||
.and(collections_groups::collections_uuid.eq(collections::uuid))
|
||||
))
|
||||
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
.or(groups::access_all.eq(true)) // access_all via group
|
||||
.or(collections_groups::collections_uuid.is_not_null() // write access given via group
|
||||
.and(collections_groups::read_only.eq(false)))
|
||||
)
|
||||
)
|
||||
)
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
} else {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.inner_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid)
|
||||
.and(users_collections::user_uuid.eq(user_uuid))
|
||||
))
|
||||
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
)
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
|
@ -581,7 +666,7 @@ impl CollectionUser {
|
|||
|
||||
db_run! { conn: {
|
||||
for user in collectionusers {
|
||||
diesel::delete(users_collections::table.filter(
|
||||
let _: () = diesel::delete(users_collections::table.filter(
|
||||
users_collections::user_uuid.eq(user_uuid)
|
||||
.and(users_collections::collection_uuid.eq(user.collection_uuid))
|
||||
))
|
||||
|
@ -591,6 +676,10 @@ impl CollectionUser {
|
|||
Ok(())
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
|
|
|
@ -16,7 +16,7 @@ db_object! {
|
|||
pub user_uuid: String,
|
||||
|
||||
pub name: String,
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs
|
||||
pub push_uuid: Option<String>,
|
||||
pub push_token: Option<String>,
|
||||
|
||||
|
@ -59,12 +59,7 @@ impl Device {
|
|||
self.twofactor_remember = None;
|
||||
}
|
||||
|
||||
pub fn refresh_tokens(
|
||||
&mut self,
|
||||
user: &super::User,
|
||||
orgs: Vec<super::UserOrganization>,
|
||||
scope: Vec<String>,
|
||||
) -> (String, i64) {
|
||||
pub fn refresh_tokens(&mut self, user: &super::User, scope: Vec<String>) -> (String, i64) {
|
||||
// If there is no refresh token, we create one
|
||||
if self.refresh_token.is_empty() {
|
||||
use data_encoding::BASE64URL;
|
||||
|
@ -72,13 +67,20 @@ impl Device {
|
|||
}
|
||||
|
||||
// Update the expiration of the device and the last update date
|
||||
let time_now = Utc::now().naive_utc();
|
||||
self.updated_at = time_now;
|
||||
let time_now = Utc::now();
|
||||
self.updated_at = time_now.naive_utc();
|
||||
|
||||
let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
// ---
|
||||
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||
// ---
|
||||
// fn arg: orgs: Vec<super::UserOrganization>,
|
||||
// ---
|
||||
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
|
||||
// Create the JWT claims struct, to send to the client
|
||||
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
||||
|
@ -93,11 +95,16 @@ impl Device {
|
|||
email: user.email.clone(),
|
||||
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
|
||||
|
||||
orgowner,
|
||||
orgadmin,
|
||||
orguser,
|
||||
orgmanager,
|
||||
|
||||
// ---
|
||||
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// orgowner,
|
||||
// orgadmin,
|
||||
// orguser,
|
||||
// orgmanager,
|
||||
sstamp: user.security_stamp.clone(),
|
||||
device: self.uuid.clone(),
|
||||
scope,
|
||||
|
@ -106,6 +113,14 @@ impl Device {
|
|||
|
||||
(encode_jwt(&claims), DEFAULT_VALIDITY.num_seconds())
|
||||
}
|
||||
|
||||
pub fn is_push_device(&self) -> bool {
|
||||
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
|
||||
}
|
||||
|
||||
pub fn is_registered(&self) -> bool {
|
||||
self.push_uuid.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
|
@ -203,6 +218,7 @@ impl Device {
|
|||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
|
@ -251,6 +267,9 @@ pub enum DeviceType {
|
|||
SafariExtension = 20,
|
||||
Sdk = 21,
|
||||
Server = 22,
|
||||
WindowsCLI = 23,
|
||||
MacOsCLI = 24,
|
||||
LinuxCLI = 25,
|
||||
}
|
||||
|
||||
impl fmt::Display for DeviceType {
|
||||
|
@ -262,23 +281,26 @@ impl fmt::Display for DeviceType {
|
|||
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
|
||||
DeviceType::OperaExtension => write!(f, "Opera Extension"),
|
||||
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
|
||||
DeviceType::WindowsDesktop => write!(f, "Windows Desktop"),
|
||||
DeviceType::MacOsDesktop => write!(f, "MacOS Desktop"),
|
||||
DeviceType::LinuxDesktop => write!(f, "Linux Desktop"),
|
||||
DeviceType::ChromeBrowser => write!(f, "Chrome Browser"),
|
||||
DeviceType::FirefoxBrowser => write!(f, "Firefox Browser"),
|
||||
DeviceType::OperaBrowser => write!(f, "Opera Browser"),
|
||||
DeviceType::EdgeBrowser => write!(f, "Edge Browser"),
|
||||
DeviceType::WindowsDesktop => write!(f, "Windows"),
|
||||
DeviceType::MacOsDesktop => write!(f, "macOS"),
|
||||
DeviceType::LinuxDesktop => write!(f, "Linux"),
|
||||
DeviceType::ChromeBrowser => write!(f, "Chrome"),
|
||||
DeviceType::FirefoxBrowser => write!(f, "Firefox"),
|
||||
DeviceType::OperaBrowser => write!(f, "Opera"),
|
||||
DeviceType::EdgeBrowser => write!(f, "Edge"),
|
||||
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
|
||||
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
|
||||
DeviceType::AndroidAmazon => write!(f, "Android Amazon"),
|
||||
DeviceType::AndroidAmazon => write!(f, "Android"),
|
||||
DeviceType::Uwp => write!(f, "UWP"),
|
||||
DeviceType::SafariBrowser => write!(f, "Safari Browser"),
|
||||
DeviceType::VivaldiBrowser => write!(f, "Vivaldi Browser"),
|
||||
DeviceType::SafariBrowser => write!(f, "Safari"),
|
||||
DeviceType::VivaldiBrowser => write!(f, "Vivaldi"),
|
||||
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
|
||||
DeviceType::SafariExtension => write!(f, "Safari Extension"),
|
||||
DeviceType::Sdk => write!(f, "SDK"),
|
||||
DeviceType::Server => write!(f, "Server"),
|
||||
DeviceType::WindowsCLI => write!(f, "Windows CLI"),
|
||||
DeviceType::MacOsCLI => write!(f, "macOS CLI"),
|
||||
DeviceType::LinuxCLI => write!(f, "Linux CLI"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -309,6 +331,9 @@ impl DeviceType {
|
|||
20 => DeviceType::SafariExtension,
|
||||
21 => DeviceType::Sdk,
|
||||
22 => DeviceType::Server,
|
||||
23 => DeviceType::WindowsCLI,
|
||||
24 => DeviceType::MacOsCLI,
|
||||
25 => DeviceType::LinuxCLI,
|
||||
_ => DeviceType::UnknownBrowser,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ db_object! {
|
|||
}
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
// Local methods
|
||||
|
||||
impl EmergencyAccess {
|
||||
pub fn new(grantor_uuid: String, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self {
|
||||
|
@ -58,11 +58,11 @@ impl EmergencyAccess {
|
|||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Status": self.status,
|
||||
"Type": self.atype,
|
||||
"WaitTimeDays": self.wait_time_days,
|
||||
"Object": "emergencyAccess",
|
||||
"id": self.uuid,
|
||||
"status": self.status,
|
||||
"type": self.atype,
|
||||
"waitTimeDays": self.wait_time_days,
|
||||
"object": "emergencyAccess",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -70,36 +70,43 @@ impl EmergencyAccess {
|
|||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Status": self.status,
|
||||
"Type": self.atype,
|
||||
"WaitTimeDays": self.wait_time_days,
|
||||
"GrantorId": grantor_user.uuid,
|
||||
"Email": grantor_user.email,
|
||||
"Name": grantor_user.name,
|
||||
"Object": "emergencyAccessGrantorDetails",
|
||||
"id": self.uuid,
|
||||
"status": self.status,
|
||||
"type": self.atype,
|
||||
"waitTimeDays": self.wait_time_days,
|
||||
"grantorId": grantor_user.uuid,
|
||||
"email": grantor_user.email,
|
||||
"name": grantor_user.name,
|
||||
"object": "emergencyAccessGrantorDetails",
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
|
||||
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
|
||||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
||||
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
|
||||
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
|
||||
} else if let Some(email) = self.email.as_deref() {
|
||||
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
|
||||
match User::find_by_mail(email, conn).await {
|
||||
Some(user) => user,
|
||||
None => {
|
||||
// remove outstanding invitations which should not exist
|
||||
Self::delete_all_by_grantee_email(email, conn).await.ok();
|
||||
return None;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
return None;
|
||||
};
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Status": self.status,
|
||||
"Type": self.atype,
|
||||
"WaitTimeDays": self.wait_time_days,
|
||||
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
|
||||
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
|
||||
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
|
||||
"Object": "emergencyAccessGranteeDetails",
|
||||
})
|
||||
Some(json!({
|
||||
"id": self.uuid,
|
||||
"status": self.status,
|
||||
"type": self.atype,
|
||||
"waitTimeDays": self.wait_time_days,
|
||||
"granteeId": grantee_user.uuid,
|
||||
"email": grantee_user.email,
|
||||
"name": grantee_user.name,
|
||||
"object": "emergencyAccessGranteeDetails",
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -174,7 +181,7 @@ impl EmergencyAccess {
|
|||
// Update the grantee so that it will refresh it's status.
|
||||
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
||||
self.status = status;
|
||||
self.updated_at = date.to_owned();
|
||||
date.clone_into(&mut self.updated_at);
|
||||
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
|
@ -192,7 +199,7 @@ impl EmergencyAccess {
|
|||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
self.last_notification_at = Some(date.to_owned());
|
||||
self.updated_at = date.to_owned();
|
||||
date.clone_into(&mut self.updated_at);
|
||||
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
|
@ -214,6 +221,13 @@ impl EmergencyAccess {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
|
||||
ea.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||
|
||||
|
@ -224,15 +238,6 @@ impl EmergencyAccess {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
.first::<EmergencyAccessDb>(conn)
|
||||
.ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
grantor_uuid: &str,
|
||||
grantee_uuid: &str,
|
||||
|
@ -267,6 +272,26 @@ impl EmergencyAccess {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_grantee_uuid(uuid: &str, grantee_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
||||
.first::<EmergencyAccessDb>(conn)
|
||||
.ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_grantee_email(uuid: &str, grantee_email: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
.filter(emergency_access::email.eq(grantee_email))
|
||||
.first::<EmergencyAccessDb>(conn)
|
||||
.ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
|
@ -285,6 +310,15 @@ impl EmergencyAccess {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::email.eq(grantee_email))
|
||||
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
|
||||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
|
@ -292,6 +326,21 @@ impl EmergencyAccess {
|
|||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
|
||||
err!("User email does not match invite.");
|
||||
}
|
||||
|
||||
if self.status == EmergencyAccessStatus::Accepted as i32 {
|
||||
err!("Emergency contact already accepted.");
|
||||
}
|
||||
|
||||
self.status = EmergencyAccessStatus::Accepted as i32;
|
||||
self.grantee_uuid = Some(String::from(grantee_uuid));
|
||||
self.email = None;
|
||||
self.save(conn).await
|
||||
}
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
|
|
@ -3,7 +3,7 @@ use serde_json::Value;
|
|||
|
||||
use crate::{api::EmptyResult, error::MapResult, CONFIG};
|
||||
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||
|
||||
// https://bitwarden.com/help/event-logs/
|
||||
|
||||
|
@ -316,7 +316,7 @@ impl Event {
|
|||
|
||||
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
|
||||
if let Some(days_to_retain) = CONFIG.events_days_retain() {
|
||||
let dt = Utc::now().naive_utc() - Duration::days(days_to_retain);
|
||||
let dt = Utc::now().naive_utc() - TimeDelta::try_days(days_to_retain).unwrap();
|
||||
db_run! { conn: {
|
||||
diesel::delete(event::table.filter(event::event_date.lt(dt)))
|
||||
.execute(conn)
|
||||
|
|
|
@ -43,10 +43,10 @@ impl Folder {
|
|||
use crate::util::format_date;
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"Name": self.name,
|
||||
"Object": "folder",
|
||||
"id": self.uuid,
|
||||
"revisionDate": format_date(&self.updated_at),
|
||||
"name": self.name,
|
||||
"object": "folder",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
use super::{User, UserOrgType, UserOrganization};
|
||||
use crate::api::EmptyResult;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
|
@ -58,38 +62,39 @@ impl Group {
|
|||
use crate::util::format_date;
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"OrganizationId": self.organizations_uuid,
|
||||
"Name": self.name,
|
||||
"AccessAll": self.access_all,
|
||||
"ExternalId": self.external_id,
|
||||
"CreationDate": format_date(&self.creation_date),
|
||||
"RevisionDate": format_date(&self.revision_date),
|
||||
"Object": "group"
|
||||
"id": self.uuid,
|
||||
"organizationId": self.organizations_uuid,
|
||||
"name": self.name,
|
||||
"accessAll": self.access_all,
|
||||
"externalId": self.external_id,
|
||||
"creationDate": format_date(&self.creation_date),
|
||||
"revisionDate": format_date(&self.revision_date),
|
||||
"object": "group"
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
|
||||
pub async fn to_json_details(&self, user_org_type: &i32, conn: &mut DbConn) -> Value {
|
||||
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|entry| {
|
||||
json!({
|
||||
"Id": entry.collections_uuid,
|
||||
"ReadOnly": entry.read_only,
|
||||
"HidePasswords": entry.hide_passwords
|
||||
"id": entry.collections_uuid,
|
||||
"readOnly": entry.read_only,
|
||||
"hidePasswords": entry.hide_passwords,
|
||||
"manage": *user_org_type >= UserOrgType::Admin || (*user_org_type == UserOrgType::Manager && !entry.read_only && !entry.hide_passwords)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"OrganizationId": self.organizations_uuid,
|
||||
"Name": self.name,
|
||||
"AccessAll": self.access_all,
|
||||
"ExternalId": self.external_id,
|
||||
"Collections": collections_groups,
|
||||
"Object": "groupDetails"
|
||||
"id": self.uuid,
|
||||
"organizationId": self.organizations_uuid,
|
||||
"name": self.name,
|
||||
"accessAll": self.access_all,
|
||||
"externalId": self.external_id,
|
||||
"collections": collections_groups,
|
||||
"object": "groupDetails"
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -122,13 +127,6 @@ impl GroupUser {
|
|||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
use super::{User, UserOrganization};
|
||||
|
||||
/// Database methods
|
||||
impl Group {
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
|
@ -203,10 +201,11 @@ impl Group {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_external_id_and_org(external_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
.filter(groups::external_id.eq(id))
|
||||
.filter(groups::external_id.eq(external_id))
|
||||
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||
.first::<GroupDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
|
@ -486,6 +485,39 @@ impl GroupUser {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn has_access_to_collection_by_member(
|
||||
collection_uuid: &str,
|
||||
member_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
db_run! { conn: {
|
||||
groups_users::table
|
||||
.inner_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
||||
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn has_full_access_by_member(org_uuid: &str, member_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
db_run! { conn: {
|
||||
groups_users::table
|
||||
.inner_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||
.filter(groups::access_all.eq(true))
|
||||
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn update_user_revision(&self, conn: &mut DbConn) {
|
||||
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
|
||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
||||
|
|
|
@ -12,6 +12,7 @@ mod org_policy;
|
|||
mod organization;
|
||||
mod send;
|
||||
mod two_factor;
|
||||
mod two_factor_duo_context;
|
||||
mod two_factor_incomplete;
|
||||
mod user;
|
||||
|
||||
|
@ -29,5 +30,6 @@ pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
|
|||
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
pub use self::send::{Send, SendType};
|
||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||
pub use self::two_factor_duo_context::TwoFactorDuoContext;
|
||||
pub use self::two_factor_incomplete::TwoFactorIncomplete;
|
||||
pub use self::user::{Invitation, User, UserKdfType, UserStampException};
|
||||
|
|
|
@ -4,7 +4,6 @@ use serde_json::Value;
|
|||
use crate::api::EmptyResult;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
use crate::util::UpCase;
|
||||
|
||||
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
|
@ -39,16 +38,18 @@ pub enum OrgPolicyType {
|
|||
|
||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendOptionsPolicyData {
|
||||
pub DisableHideEmail: bool,
|
||||
#[serde(rename = "disableHideEmail", alias = "DisableHideEmail")]
|
||||
pub disable_hide_email: bool,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResetPasswordDataModel {
|
||||
pub AutoEnrollEnabled: bool,
|
||||
#[serde(rename = "autoEnrollEnabled", alias = "AutoEnrollEnabled")]
|
||||
pub auto_enroll_enabled: bool,
|
||||
}
|
||||
|
||||
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
|
||||
|
@ -78,12 +79,12 @@ impl OrgPolicy {
|
|||
pub fn to_json(&self) -> Value {
|
||||
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"OrganizationId": self.org_uuid,
|
||||
"Type": self.atype,
|
||||
"Data": data_json,
|
||||
"Enabled": self.enabled,
|
||||
"Object": "policy",
|
||||
"id": self.uuid,
|
||||
"organizationId": self.org_uuid,
|
||||
"type": self.atype,
|
||||
"data": data_json,
|
||||
"enabled": self.enabled,
|
||||
"object": "policy",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +115,7 @@ impl OrgPolicy {
|
|||
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||
// not support multiple constraints on ON CONFLICT clauses.
|
||||
diesel::delete(
|
||||
let _: () = diesel::delete(
|
||||
org_policies::table
|
||||
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
||||
.filter(org_policies::atype.eq(&self.atype)),
|
||||
|
@ -307,9 +308,9 @@ impl OrgPolicy {
|
|||
|
||||
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
|
||||
Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) {
|
||||
Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
return policy.enabled && opts.data.AutoEnrollEnabled;
|
||||
return policy.enabled && opts.auto_enroll_enabled;
|
||||
}
|
||||
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
|
||||
},
|
||||
|
@ -327,9 +328,9 @@ impl OrgPolicy {
|
|||
{
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
|
||||
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
if opts.data.DisableHideEmail {
|
||||
if opts.disable_hide_email {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -340,4 +341,13 @@ impl OrgPolicy {
|
|||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub async fn is_enabled_for_member(org_user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool {
|
||||
if let Some(membership) = UserOrganization::find_by_uuid(org_user_uuid, conn).await {
|
||||
if let Some(policy) = OrgPolicy::find_by_org_and_type(&membership.org_uuid, policy_type, conn).await {
|
||||
return policy.enabled;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
use chrono::{NaiveDateTime, Utc};
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::Value;
|
||||
use std::cmp::Ordering;
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::{HashMap, HashSet},
|
||||
};
|
||||
|
||||
use super::{CollectionUser, Group, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User};
|
||||
use crate::db::models::{Collection, CollectionGroup};
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
|
@ -112,7 +116,7 @@ impl PartialOrd<i32> for UserOrgType {
|
|||
}
|
||||
|
||||
fn ge(&self, other: &i32) -> bool {
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Greater) | Some(Ordering::Equal))
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Greater | Ordering::Equal))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +139,7 @@ impl PartialOrd<UserOrgType> for i32 {
|
|||
}
|
||||
|
||||
fn le(&self, other: &UserOrgType) -> bool {
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Less) | Some(Ordering::Equal) | None)
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Less | Ordering::Equal) | None)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -153,39 +157,38 @@ impl Organization {
|
|||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Identifier": null, // not supported by us
|
||||
"Name": self.name,
|
||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||
// "MaxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
|
||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
"Use2fa": true,
|
||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"UseEvents": CONFIG.org_events_enabled(),
|
||||
"UseGroups": CONFIG.org_groups_enabled(),
|
||||
"UseTotp": true,
|
||||
"UsePolicies": true,
|
||||
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
"UseSso": false, // Not supported
|
||||
// "UseKeyConnector": false, // Not supported
|
||||
"SelfHost": true,
|
||||
"UseApi": true,
|
||||
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
|
||||
"UseResetPassword": CONFIG.mail_enabled(),
|
||||
"id": self.uuid,
|
||||
"identifier": null, // not supported by us
|
||||
"name": self.name,
|
||||
"seats": null,
|
||||
"maxCollections": null,
|
||||
"maxStorageGb": i16::MAX, // The value doesn't matter, we don't check server-side
|
||||
"use2fa": true,
|
||||
"useCustomPermissions": false,
|
||||
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"useEvents": CONFIG.org_events_enabled(),
|
||||
"useGroups": CONFIG.org_groups_enabled(),
|
||||
"useTotp": true,
|
||||
"usePolicies": true,
|
||||
// "useScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
"useSso": false, // Not supported
|
||||
// "useKeyConnector": false, // Not supported
|
||||
"selfHost": true,
|
||||
"useApi": true,
|
||||
"hasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
|
||||
"useResetPassword": CONFIG.mail_enabled(),
|
||||
|
||||
"BusinessName": null,
|
||||
"BusinessAddress1": null,
|
||||
"BusinessAddress2": null,
|
||||
"BusinessAddress3": null,
|
||||
"BusinessCountry": null,
|
||||
"BusinessTaxNumber": null,
|
||||
"businessName": null,
|
||||
"businessAddress1": null,
|
||||
"businessAddress2": null,
|
||||
"businessAddress3": null,
|
||||
"businessCountry": null,
|
||||
"businessTaxNumber": null,
|
||||
|
||||
"BillingEmail": self.billing_email,
|
||||
"Plan": "TeamsAnnually",
|
||||
"PlanType": 5, // TeamsAnnually plan
|
||||
"UsersGetPremium": true,
|
||||
"Object": "organization",
|
||||
"billingEmail": self.billing_email,
|
||||
"planType": 6, // Custom plan
|
||||
"usersGetPremium": true,
|
||||
"object": "organization",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -214,7 +217,7 @@ impl UserOrganization {
|
|||
}
|
||||
|
||||
pub fn restore(&mut self) -> bool {
|
||||
if self.status < UserOrgStatus::Accepted as i32 {
|
||||
if self.status < UserOrgStatus::Invited as i32 {
|
||||
self.status += ACTIVATE_REVOKE_DIFF;
|
||||
return true;
|
||||
}
|
||||
|
@ -229,6 +232,14 @@ impl UserOrganization {
|
|||
false
|
||||
}
|
||||
|
||||
/// Return the status of the user in an unrevoked state
|
||||
pub fn get_unrevoked_status(&self) -> i32 {
|
||||
if self.status <= UserOrgStatus::Revoked as i32 {
|
||||
return self.status + ACTIVATE_REVOKE_DIFF;
|
||||
}
|
||||
self.status
|
||||
}
|
||||
|
||||
pub fn set_external_id(&mut self, external_id: Option<String>) -> bool {
|
||||
//Check if external id is empty. We don't want to have
|
||||
//empty strings in the database
|
||||
|
@ -316,6 +327,7 @@ impl Organization {
|
|||
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
Group::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
OrganizationApiKey::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||
|
@ -344,65 +356,83 @@ impl UserOrganization {
|
|||
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
||||
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
|
||||
|
||||
let permissions = json!({
|
||||
// TODO: Add support for Custom User Roles
|
||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||
"accessEventLogs": false,
|
||||
"accessImportExport": false,
|
||||
"accessReports": false,
|
||||
"createNewCollections": false,
|
||||
"editAnyCollection": false,
|
||||
"deleteAnyCollection": false,
|
||||
"editAssignedCollections": false,
|
||||
"deleteAssignedCollections": false,
|
||||
"manageGroups": false,
|
||||
"managePolicies": false,
|
||||
"manageSso": false, // Not supported
|
||||
"manageUsers": false,
|
||||
"manageResetPassword": false,
|
||||
"manageScim": false // Not supported (Not AGPLv3 Licensed)
|
||||
});
|
||||
|
||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
|
||||
json!({
|
||||
"Id": self.org_uuid,
|
||||
"Identifier": null, // Not supported
|
||||
"Name": org.name,
|
||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||
"UsersGetPremium": true,
|
||||
"Use2fa": true,
|
||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"UseEvents": CONFIG.org_events_enabled(),
|
||||
"UseGroups": CONFIG.org_groups_enabled(),
|
||||
"UseTotp": true,
|
||||
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
"UsePolicies": true,
|
||||
"UseApi": true,
|
||||
"SelfHost": true,
|
||||
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
|
||||
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||
"UseResetPassword": CONFIG.mail_enabled(),
|
||||
"SsoBound": false, // Not supported
|
||||
"UseSso": false, // Not supported
|
||||
"ProviderId": null,
|
||||
"ProviderName": null,
|
||||
// "KeyConnectorEnabled": false,
|
||||
// "KeyConnectorUrl": null,
|
||||
"id": self.org_uuid,
|
||||
"identifier": null, // Not supported
|
||||
"name": org.name,
|
||||
"seats": null,
|
||||
"maxCollections": null,
|
||||
"usersGetPremium": true,
|
||||
"use2fa": true,
|
||||
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"useEvents": CONFIG.org_events_enabled(),
|
||||
"useGroups": CONFIG.org_groups_enabled(),
|
||||
"useTotp": true,
|
||||
"useScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
"usePolicies": true,
|
||||
"useApi": true,
|
||||
"selfHost": true,
|
||||
"hasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
|
||||
"resetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||
"useResetPassword": CONFIG.mail_enabled(),
|
||||
"ssoBound": false, // Not supported
|
||||
"useSso": false, // Not supported
|
||||
"useKeyConnector": false,
|
||||
"useSecretsManager": false,
|
||||
"usePasswordManager": true,
|
||||
"useCustomPermissions": false,
|
||||
"useActivateAutofillPolicy": false,
|
||||
|
||||
// TODO: Add support for Custom User Roles
|
||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||
// "Permissions": {
|
||||
// "AccessEventLogs": false,
|
||||
// "AccessImportExport": false,
|
||||
// "AccessReports": false,
|
||||
// "ManageAllCollections": false,
|
||||
// "CreateNewCollections": false,
|
||||
// "EditAnyCollection": false,
|
||||
// "DeleteAnyCollection": false,
|
||||
// "ManageAssignedCollections": false,
|
||||
// "editAssignedCollections": false,
|
||||
// "deleteAssignedCollections": false,
|
||||
// "ManageCiphers": false,
|
||||
// "ManageGroups": false,
|
||||
// "ManagePolicies": false,
|
||||
// "ManageResetPassword": false,
|
||||
// "ManageSso": false, // Not supported
|
||||
// "ManageUsers": false,
|
||||
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
// },
|
||||
"organizationUserId": self.uuid,
|
||||
"providerId": null,
|
||||
"providerName": null,
|
||||
"providerType": null,
|
||||
"familySponsorshipFriendlyName": null,
|
||||
"familySponsorshipAvailable": false,
|
||||
"planProductType": 3,
|
||||
"productTierType": 3, // Enterprise tier
|
||||
"keyConnectorEnabled": false,
|
||||
"keyConnectorUrl": null,
|
||||
"familySponsorshipLastSyncDate": null,
|
||||
"familySponsorshipValidUntil": null,
|
||||
"familySponsorshipToDelete": null,
|
||||
"accessSecretsManager": false,
|
||||
"limitCollectionCreationDeletion": false, // This should be set to true only when we can handle roles like createNewCollections
|
||||
"allowAdminAccessToAllCollectionItems": true,
|
||||
"flexibleCollections": false,
|
||||
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
"permissions": permissions,
|
||||
|
||||
"maxStorageGb": i16::MAX, // The value doesn't matter, we don't check server-side
|
||||
|
||||
// These are per user
|
||||
"UserId": self.user_uuid,
|
||||
"Key": self.akey,
|
||||
"Status": self.status,
|
||||
"Type": self.atype,
|
||||
"Enabled": true,
|
||||
"userId": self.user_uuid,
|
||||
"key": self.akey,
|
||||
"status": self.status,
|
||||
"type": self.atype,
|
||||
"enabled": true,
|
||||
|
||||
"Object": "profileOrganization",
|
||||
"object": "profileOrganization",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -433,45 +463,104 @@ impl UserOrganization {
|
|||
};
|
||||
|
||||
let collections: Vec<Value> = if include_collections {
|
||||
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
||||
// Get all collections for the user here already to prevent more queries
|
||||
let cu: HashMap<String, CollectionUser> =
|
||||
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|cu| (cu.collection_uuid.clone(), cu))
|
||||
.collect();
|
||||
|
||||
// Get all collection groups for this user to prevent there inclusion
|
||||
let cg: HashSet<String> = CollectionGroup::find_by_user(&self.user_uuid, conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|cu| {
|
||||
json!({
|
||||
"Id": cu.collection_uuid,
|
||||
"ReadOnly": cu.read_only,
|
||||
"HidePasswords": cu.hide_passwords,
|
||||
})
|
||||
.into_iter()
|
||||
.map(|cg| cg.collections_uuid)
|
||||
.collect();
|
||||
|
||||
Collection::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|c| {
|
||||
let (read_only, hide_passwords, can_manage) = if self.has_full_access() {
|
||||
(false, false, self.atype >= UserOrgType::Manager)
|
||||
} else if let Some(cu) = cu.get(&c.uuid) {
|
||||
(
|
||||
cu.read_only,
|
||||
cu.hide_passwords,
|
||||
self.atype == UserOrgType::Manager && !cu.read_only && !cu.hide_passwords,
|
||||
)
|
||||
// If previous checks failed it might be that this user has access via a group, but we should not return those elements here
|
||||
// Those are returned via a special group endpoint
|
||||
} else if cg.contains(&c.uuid) {
|
||||
return None;
|
||||
} else {
|
||||
(true, true, false)
|
||||
};
|
||||
|
||||
Some(json!({
|
||||
"id": c.uuid,
|
||||
"readOnly": read_only,
|
||||
"hidePasswords": hide_passwords,
|
||||
"manage": can_manage,
|
||||
}))
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
Vec::with_capacity(0)
|
||||
};
|
||||
|
||||
let permissions = json!({
|
||||
// TODO: Add support for Custom User Roles
|
||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||
"accessEventLogs": false,
|
||||
"accessImportExport": false,
|
||||
"accessReports": false,
|
||||
"createNewCollections": false,
|
||||
"editAnyCollection": false,
|
||||
"deleteAnyCollection": false,
|
||||
"editAssignedCollections": false,
|
||||
"deleteAssignedCollections": false,
|
||||
"manageGroups": false,
|
||||
"managePolicies": false,
|
||||
"manageSso": false, // Not supported
|
||||
"manageUsers": false,
|
||||
"manageResetPassword": false,
|
||||
"manageScim": false // Not supported (Not AGPLv3 Licensed)
|
||||
});
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"UserId": self.user_uuid,
|
||||
"Name": user.name,
|
||||
"Email": user.email,
|
||||
"ExternalId": self.external_id,
|
||||
"Groups": groups,
|
||||
"Collections": collections,
|
||||
"id": self.uuid,
|
||||
"userId": self.user_uuid,
|
||||
"name": if self.get_unrevoked_status() >= UserOrgStatus::Accepted as i32 { Some(user.name) } else { None },
|
||||
"email": user.email,
|
||||
"externalId": self.external_id,
|
||||
"avatarColor": user.avatar_color,
|
||||
"groups": groups,
|
||||
"collections": collections,
|
||||
|
||||
"Status": status,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
"TwoFactorEnabled": twofactor_enabled,
|
||||
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||
"status": status,
|
||||
"type": self.atype,
|
||||
"accessAll": self.access_all,
|
||||
"twoFactorEnabled": twofactor_enabled,
|
||||
"resetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||
"hasMasterPassword": !user.password_hash.is_empty(),
|
||||
|
||||
"Object": "organizationUserUserDetails",
|
||||
"permissions": permissions,
|
||||
|
||||
"ssoBound": false, // Not supported
|
||||
"usesKeyConnector": false, // Not supported
|
||||
"accessSecretsManager": false, // Not supported (Not AGPLv3 Licensed)
|
||||
|
||||
"object": "organizationUserUserDetails",
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value {
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"ReadOnly": col_user.read_only,
|
||||
"HidePasswords": col_user.hide_passwords,
|
||||
"id": self.uuid,
|
||||
"readOnly": col_user.read_only,
|
||||
"hidePasswords": col_user.hide_passwords,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -485,9 +574,9 @@ impl UserOrganization {
|
|||
.iter()
|
||||
.map(|c| {
|
||||
json!({
|
||||
"Id": c.collection_uuid,
|
||||
"ReadOnly": c.read_only,
|
||||
"HidePasswords": c.hide_passwords,
|
||||
"id": c.collection_uuid,
|
||||
"readOnly": c.read_only,
|
||||
"hidePasswords": c.hide_passwords,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
|
@ -502,15 +591,15 @@ impl UserOrganization {
|
|||
};
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"UserId": self.user_uuid,
|
||||
"id": self.uuid,
|
||||
"userId": self.user_uuid,
|
||||
|
||||
"Status": status,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
"Collections": coll_uuids,
|
||||
"status": status,
|
||||
"type": self.atype,
|
||||
"accessAll": self.access_all,
|
||||
"collections": coll_uuids,
|
||||
|
||||
"Object": "organizationUserDetails",
|
||||
"object": "organizationUserDetails",
|
||||
})
|
||||
}
|
||||
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
|
@ -575,7 +664,7 @@ impl UserOrganization {
|
|||
}
|
||||
|
||||
pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &mut DbConn) -> Option<UserOrganization> {
|
||||
if let Some(user) = super::User::find_by_mail(email, conn).await {
|
||||
if let Some(user) = User::find_by_mail(email, conn).await {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await {
|
||||
return Some(user_org);
|
||||
}
|
||||
|
@ -648,8 +737,7 @@ impl UserOrganization {
|
|||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32))
|
||||
.or_filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
.filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32).or(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.unwrap_or(0)
|
||||
|
@ -665,6 +753,16 @@ impl UserOrganization {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_confirmed_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
.load::<UserOrganizationDb>(conn)
|
||||
.unwrap_or_default().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
|
@ -708,6 +806,19 @@ impl UserOrganization {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_confirmed_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
.first::<UserOrganizationDb>(conn)
|
||||
.ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
|
@ -769,6 +880,32 @@ impl UserOrganization {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_cipher_and_org_with_group(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.inner_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||
.left_join(ciphers_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(collections_groups::collections_uuid).and(ciphers_collections::cipher_uuid.eq(&cipher_uuid))
|
||||
|
||||
))
|
||||
.filter(
|
||||
groups::access_all.eq(true).or( // AccessAll via groups
|
||||
ciphers_collections::cipher_uuid.eq(&cipher_uuid) // ..or access to collection via group
|
||||
)
|
||||
)
|
||||
.select(users_organizations::all_columns)
|
||||
.distinct()
|
||||
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations with groups").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
|
@ -852,6 +989,14 @@ impl OrganizationApiKey {
|
|||
.ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(organization_api_key::table.filter(organization_api_key::org_uuid.eq(org_uuid)))
|
||||
.execute(conn)
|
||||
.map_res("Error removing organization api key from organization")
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::util::LowerCase;
|
||||
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
|
@ -122,48 +124,58 @@ impl Send {
|
|||
use data_encoding::BASE64URL_NOPAD;
|
||||
use uuid::Uuid;
|
||||
|
||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
||||
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
|
||||
|
||||
// Mobile clients expect size to be a string instead of a number
|
||||
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
|
||||
data["size"] = Value::String(size.to_string());
|
||||
}
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
|
||||
"Type": self.atype,
|
||||
"id": self.uuid,
|
||||
"accessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
|
||||
"type": self.atype,
|
||||
|
||||
"Name": self.name,
|
||||
"Notes": self.notes,
|
||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
"name": self.name,
|
||||
"notes": self.notes,
|
||||
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
|
||||
"Key": self.akey,
|
||||
"MaxAccessCount": self.max_access_count,
|
||||
"AccessCount": self.access_count,
|
||||
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
||||
"Disabled": self.disabled,
|
||||
"HideEmail": self.hide_email,
|
||||
"key": self.akey,
|
||||
"maxAccessCount": self.max_access_count,
|
||||
"accessCount": self.access_count,
|
||||
"password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
||||
"disabled": self.disabled,
|
||||
"hideEmail": self.hide_email,
|
||||
|
||||
"RevisionDate": format_date(&self.revision_date),
|
||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"DeletionDate": format_date(&self.deletion_date),
|
||||
"Object": "send",
|
||||
"revisionDate": format_date(&self.revision_date),
|
||||
"expirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"deletionDate": format_date(&self.deletion_date),
|
||||
"object": "send",
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
|
||||
use crate::util::format_date;
|
||||
|
||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
||||
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
|
||||
|
||||
// Mobile clients expect size to be a string instead of a number
|
||||
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
|
||||
data["size"] = Value::String(size.to_string());
|
||||
}
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Type": self.atype,
|
||||
"id": self.uuid,
|
||||
"type": self.atype,
|
||||
|
||||
"Name": self.name,
|
||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
"name": self.name,
|
||||
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
|
||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"CreatorIdentifier": self.creator_identifier(conn).await,
|
||||
"Object": "send-access",
|
||||
"expirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"creatorIdentifier": self.creator_identifier(conn).await,
|
||||
"object": "send-access",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -172,6 +184,7 @@ use crate::db::DbConn;
|
|||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
use crate::util::NumberOrString;
|
||||
|
||||
impl Send {
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
|
@ -286,6 +299,29 @@ impl Send {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
|
||||
let sends = Self::find_by_user(user_uuid, conn).await;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct FileData {
|
||||
#[serde(rename = "size", alias = "Size")]
|
||||
size: NumberOrString,
|
||||
}
|
||||
|
||||
let mut total: i64 = 0;
|
||||
for send in sends {
|
||||
if send.atype == SendType::File as i32 {
|
||||
if let Ok(size) =
|
||||
serde_json::from_str::<FileData>(&send.data).map_err(Into::into).and_then(|d| d.size.into_i64())
|
||||
{
|
||||
total = total.checked_add(size)?;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Some(total)
|
||||
}
|
||||
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
|
|
|
@ -12,7 +12,7 @@ db_object! {
|
|||
pub atype: i32,
|
||||
pub enabled: bool,
|
||||
pub data: String,
|
||||
pub last_used: i32,
|
||||
pub last_used: i64,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,9 @@ pub enum TwoFactorType {
|
|||
EmailVerificationChallenge = 1002,
|
||||
WebauthnRegisterChallenge = 1003,
|
||||
WebauthnLoginChallenge = 1004,
|
||||
|
||||
// Special type for Protected Actions verification via email
|
||||
ProtectedActions = 2000,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
|
@ -51,17 +54,17 @@ impl TwoFactor {
|
|||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Enabled": self.enabled,
|
||||
"Key": "", // This key and value vary
|
||||
"Object": "twoFactorAuthenticator" // This value varies
|
||||
"enabled": self.enabled,
|
||||
"key": "", // This key and value vary
|
||||
"Oobject": "twoFactorAuthenticator" // This value varies
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_provider(&self) -> Value {
|
||||
json!({
|
||||
"Enabled": self.enabled,
|
||||
"Type": self.atype,
|
||||
"Object": "twoFactorProvider"
|
||||
"enabled": self.enabled,
|
||||
"type": self.atype,
|
||||
"object": "twoFactorProvider"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -92,7 +95,7 @@ impl TwoFactor {
|
|||
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
|
||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||
// not support multiple constraints on ON CONFLICT clauses.
|
||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
||||
let _: () = diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
||||
.execute(conn)
|
||||
.map_res("Error deleting twofactor for insert")?;
|
||||
|
||||
|
|
84
src/db/models/two_factor_duo_context.rs
Normale Datei
84
src/db/models/two_factor_duo_context.rs
Normale Datei
|
@ -0,0 +1,84 @@
|
|||
use chrono::Utc;
|
||||
|
||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = twofactor_duo_ctx)]
|
||||
#[diesel(primary_key(state))]
|
||||
pub struct TwoFactorDuoContext {
|
||||
pub state: String,
|
||||
pub user_email: String,
|
||||
pub nonce: String,
|
||||
pub exp: i64,
|
||||
}
|
||||
}
|
||||
|
||||
impl TwoFactorDuoContext {
|
||||
pub async fn find_by_state(state: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {
|
||||
conn: {
|
||||
twofactor_duo_ctx::table
|
||||
.filter(twofactor_duo_ctx::state.eq(state))
|
||||
.first::<TwoFactorDuoContextDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn save(state: &str, user_email: &str, nonce: &str, ttl: i64, conn: &mut DbConn) -> EmptyResult {
|
||||
// A saved context should never be changed, only created or deleted.
|
||||
let exists = Self::find_by_state(state, conn).await;
|
||||
if exists.is_some() {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let exp = Utc::now().timestamp() + ttl;
|
||||
|
||||
db_run! {
|
||||
conn: {
|
||||
diesel::insert_into(twofactor_duo_ctx::table)
|
||||
.values((
|
||||
twofactor_duo_ctx::state.eq(state),
|
||||
twofactor_duo_ctx::user_email.eq(user_email),
|
||||
twofactor_duo_ctx::nonce.eq(nonce),
|
||||
twofactor_duo_ctx::exp.eq(exp)
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error saving context to twofactor_duo_ctx")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_expired(conn: &mut DbConn) -> Vec<Self> {
|
||||
let now = Utc::now().timestamp();
|
||||
db_run! {
|
||||
conn: {
|
||||
twofactor_duo_ctx::table
|
||||
.filter(twofactor_duo_ctx::exp.lt(now))
|
||||
.load::<TwoFactorDuoContextDb>(conn)
|
||||
.expect("Error finding expired contexts in twofactor_duo_ctx")
|
||||
.from_db()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! {
|
||||
conn: {
|
||||
diesel::delete(
|
||||
twofactor_duo_ctx::table
|
||||
.filter(twofactor_duo_ctx::state.eq(&self.state)))
|
||||
.execute(conn)
|
||||
.map_res("Error deleting from twofactor_duo_ctx")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn purge_expired_duo_contexts(conn: &mut DbConn) {
|
||||
for context in Self::find_expired(conn).await {
|
||||
context.delete(conn).await.ok();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -13,6 +13,7 @@ db_object! {
|
|||
// must complete 2FA login before being added into the devices table.
|
||||
pub device_uuid: String,
|
||||
pub device_name: String,
|
||||
pub device_type: i32,
|
||||
pub login_time: NaiveDateTime,
|
||||
pub ip_address: String,
|
||||
}
|
||||
|
@ -23,6 +24,7 @@ impl TwoFactorIncomplete {
|
|||
user_uuid: &str,
|
||||
device_uuid: &str,
|
||||
device_name: &str,
|
||||
device_type: i32,
|
||||
ip: &ClientIp,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
|
@ -44,6 +46,7 @@ impl TwoFactorIncomplete {
|
|||
twofactor_incomplete::user_uuid.eq(user_uuid),
|
||||
twofactor_incomplete::device_uuid.eq(device_uuid),
|
||||
twofactor_incomplete::device_name.eq(device_name),
|
||||
twofactor_incomplete::device_type.eq(device_type),
|
||||
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()),
|
||||
twofactor_incomplete::ip_address.eq(ip.ip.to_string()),
|
||||
))
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use crate::util::{format_date, get_uuid, retry};
|
||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::crypto;
|
||||
|
@ -90,7 +91,7 @@ impl User {
|
|||
let email = email.to_lowercase();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: get_uuid(),
|
||||
enabled: true,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
|
@ -107,7 +108,7 @@ impl User {
|
|||
salt: crypto::get_random_bytes::<64>().to_vec(),
|
||||
password_iterations: CONFIG.password_iterations(),
|
||||
|
||||
security_stamp: crate::util::get_uuid(),
|
||||
security_stamp: get_uuid(),
|
||||
stamp_exception: None,
|
||||
|
||||
password_hint: None,
|
||||
|
@ -144,14 +145,14 @@ impl User {
|
|||
|
||||
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
|
||||
if let Some(ref totp_recover) = self.totp_recover {
|
||||
crate::crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
|
||||
crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_valid_api_key(&self, key: &str) -> bool {
|
||||
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
|
||||
matches!(self.api_key, Some(ref api_key) if crypto::ct_eq(api_key, key))
|
||||
}
|
||||
|
||||
/// Set the password hash generated
|
||||
|
@ -188,7 +189,7 @@ impl User {
|
|||
}
|
||||
|
||||
pub fn reset_security_stamp(&mut self) {
|
||||
self.security_stamp = crate::util::get_uuid();
|
||||
self.security_stamp = get_uuid();
|
||||
}
|
||||
|
||||
/// Set the stamp_exception to only allow a subsequent request matching a specific route using the current security-stamp.
|
||||
|
@ -202,7 +203,7 @@ impl User {
|
|||
let stamp_exception = UserStampException {
|
||||
routes: route_exception,
|
||||
security_stamp: self.security_stamp.clone(),
|
||||
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(),
|
||||
expire: (Utc::now() + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||
};
|
||||
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||
}
|
||||
|
@ -240,24 +241,27 @@ impl User {
|
|||
};
|
||||
|
||||
json!({
|
||||
"_Status": status as i32,
|
||||
"Id": self.uuid,
|
||||
"Name": self.name,
|
||||
"Email": self.email,
|
||||
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
||||
"Premium": true,
|
||||
"MasterPasswordHint": self.password_hint,
|
||||
"Culture": "en-US",
|
||||
"TwoFactorEnabled": twofactor_enabled,
|
||||
"Key": self.akey,
|
||||
"PrivateKey": self.private_key,
|
||||
"SecurityStamp": self.security_stamp,
|
||||
"Organizations": orgs_json,
|
||||
"Providers": [],
|
||||
"ProviderOrganizations": [],
|
||||
"ForcePasswordReset": false,
|
||||
"AvatarColor": self.avatar_color,
|
||||
"Object": "profile",
|
||||
"_status": status as i32,
|
||||
"id": self.uuid,
|
||||
"name": self.name,
|
||||
"email": self.email,
|
||||
"emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
||||
"premium": true,
|
||||
"premiumFromOrganization": false,
|
||||
"masterPasswordHint": self.password_hint,
|
||||
"culture": "en-US",
|
||||
"twoFactorEnabled": twofactor_enabled,
|
||||
"key": self.akey,
|
||||
"privateKey": self.private_key,
|
||||
"securityStamp": self.security_stamp,
|
||||
"organizations": orgs_json,
|
||||
"providers": [],
|
||||
"providerOrganizations": [],
|
||||
"forcePasswordReset": false,
|
||||
"avatarColor": self.avatar_color,
|
||||
"usesKeyConnector": false,
|
||||
"creationDate": format_date(&self.created_at),
|
||||
"object": "profile",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -311,6 +315,7 @@ impl User {
|
|||
|
||||
Send::delete_all_by_user(&self.uuid, conn).await?;
|
||||
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
||||
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
|
||||
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
||||
|
@ -337,7 +342,7 @@ impl User {
|
|||
let updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
retry(|| {
|
||||
diesel::update(users::table)
|
||||
.set(users::updated_at.eq(updated_at))
|
||||
.execute(conn)
|
||||
|
@ -354,7 +359,7 @@ impl User {
|
|||
|
||||
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
retry(|| {
|
||||
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
||||
.set(users::updated_at.eq(date))
|
||||
.execute(conn)
|
||||
|
|
|
@ -3,7 +3,7 @@ table! {
|
|||
id -> Text,
|
||||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
file_size -> BigInt,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ table! {
|
|||
atype -> Integer,
|
||||
enabled -> Bool,
|
||||
data -> Text,
|
||||
last_used -> Integer,
|
||||
last_used -> BigInt,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,11 +169,21 @@ table! {
|
|||
user_uuid -> Text,
|
||||
device_uuid -> Text,
|
||||
device_name -> Text,
|
||||
device_type -> Integer,
|
||||
login_time -> Timestamp,
|
||||
ip_address -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor_duo_ctx (state) {
|
||||
state -> Text,
|
||||
user_email -> Text,
|
||||
nonce -> Text,
|
||||
exp -> BigInt,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users (uuid) {
|
||||
uuid -> Text,
|
||||
|
|
|
@ -3,7 +3,7 @@ table! {
|
|||
id -> Text,
|
||||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
file_size -> BigInt,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ table! {
|
|||
atype -> Integer,
|
||||
enabled -> Bool,
|
||||
data -> Text,
|
||||
last_used -> Integer,
|
||||
last_used -> BigInt,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,11 +169,21 @@ table! {
|
|||
user_uuid -> Text,
|
||||
device_uuid -> Text,
|
||||
device_name -> Text,
|
||||
device_type -> Integer,
|
||||
login_time -> Timestamp,
|
||||
ip_address -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor_duo_ctx (state) {
|
||||
state -> Text,
|
||||
user_email -> Text,
|
||||
nonce -> Text,
|
||||
exp -> BigInt,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users (uuid) {
|
||||
uuid -> Text,
|
||||
|
|
|
@ -3,7 +3,7 @@ table! {
|
|||
id -> Text,
|
||||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
file_size -> BigInt,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ table! {
|
|||
atype -> Integer,
|
||||
enabled -> Bool,
|
||||
data -> Text,
|
||||
last_used -> Integer,
|
||||
last_used -> BigInt,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,11 +169,21 @@ table! {
|
|||
user_uuid -> Text,
|
||||
device_uuid -> Text,
|
||||
device_name -> Text,
|
||||
device_type -> Integer,
|
||||
login_time -> Timestamp,
|
||||
ip_address -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor_duo_ctx (state) {
|
||||
state -> Text,
|
||||
user_email -> Text,
|
||||
nonce -> Text,
|
||||
exp -> BigInt,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users (uuid) {
|
||||
uuid -> Text,
|
||||
|
|
31
src/error.rs
31
src/error.rs
|
@ -2,6 +2,7 @@
|
|||
// Error generator macro
|
||||
//
|
||||
use crate::db::models::EventType;
|
||||
use crate::http_client::CustomHttpClientError;
|
||||
use std::error::Error as StdError;
|
||||
|
||||
macro_rules! make_error {
|
||||
|
@ -52,7 +53,6 @@ use rocket::error::Error as RocketErr;
|
|||
use serde_json::{Error as SerdeErr, Value};
|
||||
use std::io::Error as IoErr;
|
||||
use std::time::SystemTimeError as TimeErr;
|
||||
use tokio_tungstenite::tungstenite::Error as TungstError;
|
||||
use webauthn_rs::error::WebauthnError as WebauthnErr;
|
||||
use yubico::yubicoerror::YubicoError as YubiErr;
|
||||
|
||||
|
@ -69,6 +69,10 @@ make_error! {
|
|||
Empty(Empty): _no_source, _serialize,
|
||||
// Used to represent err! calls
|
||||
Simple(String): _no_source, _api_error,
|
||||
|
||||
// Used in our custom http client to handle non-global IPs and blocked domains
|
||||
CustomHttpClient(CustomHttpClientError): _has_source, _api_error,
|
||||
|
||||
// Used for special return values, like 2FA errors
|
||||
Json(Value): _no_source, _serialize,
|
||||
Db(DieselErr): _has_source, _api_error,
|
||||
|
@ -91,7 +95,6 @@ make_error! {
|
|||
|
||||
DieselCon(DieselConErr): _has_source, _api_error,
|
||||
Webauthn(WebauthnErr): _has_source, _api_error,
|
||||
WebSocket(TungstError): _has_source, _api_error,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
|
@ -181,18 +184,18 @@ fn _serialize(e: &impl serde::Serialize, _msg: &str) -> String {
|
|||
|
||||
fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
|
||||
let json = json!({
|
||||
"Message": msg,
|
||||
"message": msg,
|
||||
"error": "",
|
||||
"error_description": "",
|
||||
"ValidationErrors": {"": [ msg ]},
|
||||
"ErrorModel": {
|
||||
"Message": msg,
|
||||
"Object": "error"
|
||||
"validationErrors": {"": [ msg ]},
|
||||
"errorModel": {
|
||||
"message": msg,
|
||||
"object": "error"
|
||||
},
|
||||
"ExceptionMessage": null,
|
||||
"ExceptionStackTrace": null,
|
||||
"InnerExceptionMessage": null,
|
||||
"Object": "error"
|
||||
"exceptionMessage": null,
|
||||
"exceptionStackTrace": null,
|
||||
"innerExceptionMessage": null,
|
||||
"object": "error"
|
||||
});
|
||||
_serialize(&json, "")
|
||||
}
|
||||
|
@ -206,7 +209,7 @@ use rocket::http::{ContentType, Status};
|
|||
use rocket::request::Request;
|
||||
use rocket::response::{self, Responder, Response};
|
||||
|
||||
impl<'r> Responder<'r, 'static> for Error {
|
||||
impl Responder<'_, 'static> for Error {
|
||||
fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
|
||||
match self.error {
|
||||
ErrorKind::Empty(_) => {} // Don't print the error in this situation
|
||||
|
@ -291,10 +294,10 @@ macro_rules! err_json {
|
|||
macro_rules! err_handler {
|
||||
($expr:expr) => {{
|
||||
error!(target: "auth", "Unauthorized Error: {}", $expr);
|
||||
return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $expr));
|
||||
return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $expr));
|
||||
}};
|
||||
($usr_msg:expr, $log_value:expr) => {{
|
||||
error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value);
|
||||
return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $usr_msg));
|
||||
return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $usr_msg));
|
||||
}};
|
||||
}
|
||||
|
|
246
src/http_client.rs
Normale Datei
246
src/http_client.rs
Normale Datei
|
@ -0,0 +1,246 @@
|
|||
use std::{
|
||||
fmt,
|
||||
net::{IpAddr, SocketAddr},
|
||||
str::FromStr,
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::{
|
||||
dns::{Name, Resolve, Resolving},
|
||||
header, Client, ClientBuilder,
|
||||
};
|
||||
use url::Host;
|
||||
|
||||
use crate::{util::is_global, CONFIG};
|
||||
|
||||
pub fn make_http_request(method: reqwest::Method, url: &str) -> Result<reqwest::RequestBuilder, crate::Error> {
|
||||
let Ok(url) = url::Url::parse(url) else {
|
||||
err!("Invalid URL");
|
||||
};
|
||||
let Some(host) = url.host() else {
|
||||
err!("Invalid host");
|
||||
};
|
||||
|
||||
should_block_host(host)?;
|
||||
|
||||
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
|
||||
|
||||
Ok(INSTANCE.request(method, url))
|
||||
}
|
||||
|
||||
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
||||
let mut headers = header::HeaderMap::new();
|
||||
headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Vaultwarden"));
|
||||
|
||||
let redirect_policy = reqwest::redirect::Policy::custom(|attempt| {
|
||||
if attempt.previous().len() >= 5 {
|
||||
return attempt.error("Too many redirects");
|
||||
}
|
||||
|
||||
let Some(host) = attempt.url().host() else {
|
||||
return attempt.error("Invalid host");
|
||||
};
|
||||
|
||||
if let Err(e) = should_block_host(host) {
|
||||
return attempt.error(e);
|
||||
}
|
||||
|
||||
attempt.follow()
|
||||
});
|
||||
|
||||
Client::builder()
|
||||
.default_headers(headers)
|
||||
.redirect(redirect_policy)
|
||||
.dns_resolver(CustomDnsResolver::instance())
|
||||
.timeout(Duration::from_secs(10))
|
||||
}
|
||||
|
||||
pub fn should_block_address(domain_or_ip: &str) -> bool {
|
||||
if let Ok(ip) = IpAddr::from_str(domain_or_ip) {
|
||||
if should_block_ip(ip) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
should_block_address_regex(domain_or_ip)
|
||||
}
|
||||
|
||||
fn should_block_ip(ip: IpAddr) -> bool {
|
||||
if !CONFIG.http_request_block_non_global_ips() {
|
||||
return false;
|
||||
}
|
||||
|
||||
!is_global(ip)
|
||||
}
|
||||
|
||||
fn should_block_address_regex(domain_or_ip: &str) -> bool {
|
||||
let Some(block_regex) = CONFIG.http_request_block_regex() else {
|
||||
return false;
|
||||
};
|
||||
|
||||
static COMPILED_REGEX: Mutex<Option<(String, Regex)>> = Mutex::new(None);
|
||||
let mut guard = COMPILED_REGEX.lock().unwrap();
|
||||
|
||||
// If the stored regex is up to date, use it
|
||||
if let Some((value, regex)) = &*guard {
|
||||
if value == &block_regex {
|
||||
return regex.is_match(domain_or_ip);
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have a regex stored, or it's not up to date, recreate it
|
||||
let regex = Regex::new(&block_regex).unwrap();
|
||||
let is_match = regex.is_match(domain_or_ip);
|
||||
*guard = Some((block_regex, regex));
|
||||
|
||||
is_match
|
||||
}
|
||||
|
||||
fn should_block_host(host: Host<&str>) -> Result<(), CustomHttpClientError> {
|
||||
let (ip, host_str): (Option<IpAddr>, String) = match host {
|
||||
Host::Ipv4(ip) => (Some(ip.into()), ip.to_string()),
|
||||
Host::Ipv6(ip) => (Some(ip.into()), ip.to_string()),
|
||||
Host::Domain(d) => (None, d.to_string()),
|
||||
};
|
||||
|
||||
if let Some(ip) = ip {
|
||||
if should_block_ip(ip) {
|
||||
return Err(CustomHttpClientError::NonGlobalIp {
|
||||
domain: None,
|
||||
ip,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if should_block_address_regex(&host_str) {
|
||||
return Err(CustomHttpClientError::Blocked {
|
||||
domain: host_str,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum CustomHttpClientError {
|
||||
Blocked {
|
||||
domain: String,
|
||||
},
|
||||
NonGlobalIp {
|
||||
domain: Option<String>,
|
||||
ip: IpAddr,
|
||||
},
|
||||
}
|
||||
|
||||
impl CustomHttpClientError {
|
||||
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
|
||||
let mut source = e.source();
|
||||
|
||||
while let Some(err) = source {
|
||||
source = err.source();
|
||||
if let Some(err) = err.downcast_ref::<CustomHttpClientError>() {
|
||||
return Some(err);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CustomHttpClientError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Blocked {
|
||||
domain,
|
||||
} => write!(f, "Blocked domain: {domain} matched HTTP_REQUEST_BLOCK_REGEX"),
|
||||
Self::NonGlobalIp {
|
||||
domain: Some(domain),
|
||||
ip,
|
||||
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
|
||||
Self::NonGlobalIp {
|
||||
domain: None,
|
||||
ip,
|
||||
} => write!(f, "IP {ip} is not a global IP!"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for CustomHttpClientError {}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum CustomDnsResolver {
|
||||
Default(),
|
||||
Hickory(Arc<TokioAsyncResolver>),
|
||||
}
|
||||
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||
|
||||
impl CustomDnsResolver {
|
||||
fn instance() -> Arc<Self> {
|
||||
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
|
||||
Arc::clone(&*INSTANCE)
|
||||
}
|
||||
|
||||
fn new() -> Arc<Self> {
|
||||
match read_system_conf() {
|
||||
Ok((config, opts)) => {
|
||||
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
|
||||
Arc::new(Self::Hickory(Arc::new(resolver)))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
|
||||
Arc::new(Self::Default())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note that we get an iterator of addresses, but we only grab the first one for convenience
|
||||
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
|
||||
pre_resolve(name)?;
|
||||
|
||||
let result = match self {
|
||||
Self::Default() => tokio::net::lookup_host(name).await?.next(),
|
||||
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
|
||||
};
|
||||
|
||||
if let Some(addr) = &result {
|
||||
post_resolve(name, addr.ip())?;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
fn pre_resolve(name: &str) -> Result<(), CustomHttpClientError> {
|
||||
if should_block_address(name) {
|
||||
return Err(CustomHttpClientError::Blocked {
|
||||
domain: name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomHttpClientError> {
|
||||
if should_block_ip(ip) {
|
||||
Err(CustomHttpClientError::NonGlobalIp {
|
||||
domain: Some(name.to_string()),
|
||||
ip,
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve for CustomDnsResolver {
|
||||
fn resolve(&self, name: Name) -> Resolving {
|
||||
let this = self.clone();
|
||||
Box::pin(async move {
|
||||
let name = name.as_str();
|
||||
let result = this.resolve_domain(name).await?;
|
||||
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
|
||||
})
|
||||
}
|
||||
}
|
Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden Mehr anzeigen
Laden …
In neuem Issue referenzieren