From 2f9d7060bd966007bb3ac6435463a1389073d9b3 Mon Sep 17 00:00:00 2001 From: Stefan Melmuk Date: Tue, 22 Nov 2022 04:40:20 +0100 Subject: [PATCH 1/4] check if sqlite folder exists instead of creating the parent folders to a sqlite database vaultwarden should just exit if it does not. this should fix issues like #2835 when a wrongly configured `DATABASE_URL` falls back to using sqlite --- src/config.rs | 10 +++++++++- src/db/mod.rs | 21 +++++++-------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/config.rs b/src/config.rs index 8427c37f..b8485af5 100644 --- a/src/config.rs +++ b/src/config.rs @@ -630,7 +630,15 @@ make_config! { fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { // Validate connection URL is valid and DB feature is enabled - DbConnType::from_url(&cfg.database_url)?; + let url = &cfg.database_url; + if DbConnType::from_url(url)? == DbConnType::sqlite { + let path = std::path::Path::new(&url); + if let Some(parent) = path.parent() { + if !parent.exists() { + err!(format!("SQLite database directory `{}` does not exist", parent.display())); + } + } + } let limit = 256; if cfg.database_max_conns < 1 || cfg.database_max_conns > limit { diff --git a/src/db/mod.rs b/src/db/mod.rs index a84002cd..c2570d9d 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -424,22 +424,15 @@ mod sqlite_migrations { pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/sqlite"); pub fn run_migrations() -> Result<(), super::Error> { - // Make sure the directory exists - let url = crate::CONFIG.database_url(); - let path = std::path::Path::new(&url); - - if let Some(parent) = path.parent() { - if std::fs::create_dir_all(parent).is_err() { - error!("Error creating database directory"); - std::process::exit(1); - } - } - use diesel::{Connection, RunQueryDsl}; - // Make sure the database is up to date (create if it doesn't exist, or run the migrations) - let mut connection = diesel::sqlite::SqliteConnection::establish(&crate::CONFIG.database_url())?; - // Disable Foreign Key Checks during migration + let url = crate::CONFIG.database_url(); + // Establish a connection to the sqlite database (this will create a new one, if it does + // not exist, and exit if there is an error). + let mut connection = diesel::sqlite::SqliteConnection::establish(&url)?; + + // Run the migrations after successfully establishing a connection + // Disable Foreign Key Checks during migration // Scoped to a connection. diesel::sql_query("PRAGMA foreign_keys = OFF") .execute(&mut connection) From 5a13efefd3e551e4fd47e2d748f6dab31ef1118d Mon Sep 17 00:00:00 2001 From: Stefan Melmuk Date: Tue, 22 Nov 2022 05:46:51 +0100 Subject: [PATCH 2/4] only check sqlite parent if there could be one --- src/config.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/config.rs b/src/config.rs index b8485af5..4aa8d649 100644 --- a/src/config.rs +++ b/src/config.rs @@ -631,11 +631,11 @@ make_config! { fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { // Validate connection URL is valid and DB feature is enabled let url = &cfg.database_url; - if DbConnType::from_url(url)? == DbConnType::sqlite { + if DbConnType::from_url(url)? == DbConnType::sqlite && url.contains('/') { let path = std::path::Path::new(&url); if let Some(parent) = path.parent() { - if !parent.exists() { - err!(format!("SQLite database directory `{}` does not exist", parent.display())); + if !parent.is_dir() { + err!(format!("SQLite database directory `{}` does not exist or is not a directory", parent.display())); } } } From b186813049a5c1c92aa44d118d7339f9ceaf9bb8 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Sun, 20 Nov 2022 19:15:45 +0100 Subject: [PATCH 3/4] Add Organizational event logging feature This PR adds event/audit logging support for organizations. By default this feature is disabled, since it does log a lot and adds extra database transactions. All events are touched except a few, since we do not support those features (yet), like SSO for example. This feature is tested with multiple clients and all database types. Fixes #229 --- .env.template | 27 +- .../2022-10-18-170602_add_events/down.sql | 1 + .../mysql/2022-10-18-170602_add_events/up.sql | 19 + .../2022-10-18-170602_add_events/down.sql | 1 + .../2022-10-18-170602_add_events/up.sql | 19 + .../2022-10-18-170602_add_events/down.sql | 1 + .../2022-10-18-170602_add_events/up.sql | 19 + src/api/admin.rs | 44 +- src/api/core/accounts.rs | 25 +- src/api/core/ciphers.rs | 334 ++++++++++--- src/api/core/events.rs | 341 +++++++++++++ src/api/core/mod.rs | 10 + src/api/core/organizations.rs | 467 +++++++++++++++--- src/api/core/two_factor/authenticator.rs | 21 +- src/api/core/two_factor/duo.rs | 31 +- src/api/core/two_factor/email.rs | 37 +- src/api/core/two_factor/mod.rs | 25 +- src/api/core/two_factor/webauthn.rs | 39 +- src/api/core/two_factor/yubikey.rs | 27 +- src/api/identity.rs | 147 ++++-- src/api/mod.rs | 1 + src/config.rs | 19 +- src/db/models/event.rs | 318 ++++++++++++ src/db/models/mod.rs | 2 + src/db/models/organization.rs | 28 +- src/db/schemas/mysql/schema.rs | 23 + src/db/schemas/postgresql/schema.rs | 23 + src/db/schemas/sqlite/schema.rs | 24 + src/error.rs | 34 +- src/main.rs | 11 + src/util.rs | 9 +- 31 files changed, 1887 insertions(+), 240 deletions(-) create mode 100644 migrations/mysql/2022-10-18-170602_add_events/down.sql create mode 100644 migrations/mysql/2022-10-18-170602_add_events/up.sql create mode 100644 migrations/postgresql/2022-10-18-170602_add_events/down.sql create mode 100644 migrations/postgresql/2022-10-18-170602_add_events/up.sql create mode 100644 migrations/sqlite/2022-10-18-170602_add_events/down.sql create mode 100644 migrations/sqlite/2022-10-18-170602_add_events/up.sql create mode 100644 src/api/core/events.rs create mode 100644 src/db/models/event.rs diff --git a/.env.template b/.env.template index e06c09bd..736b6463 100644 --- a/.env.template +++ b/.env.template @@ -1,13 +1,14 @@ +# shellcheck disable=SC2034,SC2148 ## Vaultwarden Configuration File ## Uncomment any of the following lines to change the defaults ## ## Be aware that most of these settings will be overridden if they were changed ## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json . ## -## By default, vaultwarden expects for this file to be named ".env" and located +## By default, Vaultwarden expects for this file to be named ".env" and located ## in the current working directory. If this is not the case, the environment ## variable ENV_FILE can be set to the location of this file prior to starting -## vaultwarden. +## Vaultwarden. ## Main data folder # DATA_FOLDER=data @@ -80,11 +81,27 @@ ## This setting applies globally to all users. # EMERGENCY_ACCESS_ALLOWED=true +## Controls whether event logging is enabled for organizations +## This setting applies to organizations. +## Default this is disabled. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings. +# ORG_EVENTS_ENABLED=false + +## Number of days to retain events stored in the database. +## If unset (the default), events are kept indefently and also disables the scheduled job! +# EVENTS_DAYS_RETAIN= + ## Job scheduler settings ## ## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron), ## and are always in terms of UTC time (regardless of your local time zone settings). ## +## The schedule format is a bit different from crontab as crontab does not contains seconds. +## You can test the the format here: https://crontab.guru, but remove the first digit! +## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK +## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri" +## "0 30 * * * * " +## "0 30 1 * * * " +## ## How often (in ms) the job scheduler thread checks for jobs that need running. ## Set to 0 to globally disable scheduled jobs. # JOB_POLL_INTERVAL_MS=30000 @@ -108,6 +125,10 @@ ## Cron schedule of the job that grants emergency access requests that have met the required wait time. ## Defaults to hourly (5 minutes after the hour). Set blank to disable this job. # EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 5 * * * *" +## +## Cron schedule of the job that cleans old events from the event table. +## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start. +# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *" ## Enable extended logging, which shows timestamps and targets in the logs # EXTENDED_LOGGING=true @@ -133,7 +154,7 @@ ## Enable WAL for the DB ## Set to false to avoid enabling WAL during startup. ## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB, -## this setting only prevents vaultwarden from automatically enabling it on start. +## this setting only prevents Vaultwarden from automatically enabling it on start. ## Please read project wiki page about this setting first before changing the value as it can ## cause performance degradation or might render the service unable to start. # ENABLE_DB_WAL=true diff --git a/migrations/mysql/2022-10-18-170602_add_events/down.sql b/migrations/mysql/2022-10-18-170602_add_events/down.sql new file mode 100644 index 00000000..8b975bc3 --- /dev/null +++ b/migrations/mysql/2022-10-18-170602_add_events/down.sql @@ -0,0 +1 @@ +DROP TABLE event; diff --git a/migrations/mysql/2022-10-18-170602_add_events/up.sql b/migrations/mysql/2022-10-18-170602_add_events/up.sql new file mode 100644 index 00000000..24e1c8cd --- /dev/null +++ b/migrations/mysql/2022-10-18-170602_add_events/up.sql @@ -0,0 +1,19 @@ +CREATE TABLE event ( + uuid CHAR(36) NOT NULL PRIMARY KEY, + event_type INTEGER NOT NULL, + user_uuid CHAR(36), + org_uuid CHAR(36), + cipher_uuid CHAR(36), + collection_uuid CHAR(36), + group_uuid CHAR(36), + org_user_uuid CHAR(36), + act_user_uuid CHAR(36), + device_type INTEGER, + ip_address TEXT, + event_date DATETIME NOT NULL, + policy_uuid CHAR(36), + provider_uuid CHAR(36), + provider_user_uuid CHAR(36), + provider_org_uuid CHAR(36), + UNIQUE (uuid) +); diff --git a/migrations/postgresql/2022-10-18-170602_add_events/down.sql b/migrations/postgresql/2022-10-18-170602_add_events/down.sql new file mode 100644 index 00000000..8b975bc3 --- /dev/null +++ b/migrations/postgresql/2022-10-18-170602_add_events/down.sql @@ -0,0 +1 @@ +DROP TABLE event; diff --git a/migrations/postgresql/2022-10-18-170602_add_events/up.sql b/migrations/postgresql/2022-10-18-170602_add_events/up.sql new file mode 100644 index 00000000..2d107b41 --- /dev/null +++ b/migrations/postgresql/2022-10-18-170602_add_events/up.sql @@ -0,0 +1,19 @@ +CREATE TABLE event ( + uuid CHAR(36) NOT NULL PRIMARY KEY, + event_type INTEGER NOT NULL, + user_uuid CHAR(36), + org_uuid CHAR(36), + cipher_uuid CHAR(36), + collection_uuid CHAR(36), + group_uuid CHAR(36), + org_user_uuid CHAR(36), + act_user_uuid CHAR(36), + device_type INTEGER, + ip_address TEXT, + event_date TIMESTAMP NOT NULL, + policy_uuid CHAR(36), + provider_uuid CHAR(36), + provider_user_uuid CHAR(36), + provider_org_uuid CHAR(36), + UNIQUE (uuid) +); diff --git a/migrations/sqlite/2022-10-18-170602_add_events/down.sql b/migrations/sqlite/2022-10-18-170602_add_events/down.sql new file mode 100644 index 00000000..8b975bc3 --- /dev/null +++ b/migrations/sqlite/2022-10-18-170602_add_events/down.sql @@ -0,0 +1 @@ +DROP TABLE event; diff --git a/migrations/sqlite/2022-10-18-170602_add_events/up.sql b/migrations/sqlite/2022-10-18-170602_add_events/up.sql new file mode 100644 index 00000000..e6b32722 --- /dev/null +++ b/migrations/sqlite/2022-10-18-170602_add_events/up.sql @@ -0,0 +1,19 @@ +CREATE TABLE event ( + uuid TEXT NOT NULL PRIMARY KEY, + event_type INTEGER NOT NULL, + user_uuid TEXT, + org_uuid TEXT, + cipher_uuid TEXT, + collection_uuid TEXT, + group_uuid TEXT, + org_user_uuid TEXT, + act_user_uuid TEXT, + device_type INTEGER, + ip_address TEXT, + event_date DATETIME NOT NULL, + policy_uuid TEXT, + provider_uuid TEXT, + provider_user_uuid TEXT, + provider_org_uuid TEXT, + UNIQUE (uuid) +); diff --git a/src/api/admin.rs b/src/api/admin.rs index 30ffdec2..6e10f0b6 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -13,7 +13,7 @@ use rocket::{ }; use crate::{ - api::{ApiResult, EmptyResult, JsonResult, NumberOrString}, + api::{core::log_event, ApiResult, EmptyResult, JsonResult, NumberOrString}, auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp}, config::ConfigBuilder, db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType}, @@ -81,6 +81,8 @@ const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z"; const BASE_TEMPLATE: &str = "admin/base"; +const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000"; + fn admin_path() -> String { format!("{}{}", CONFIG.domain_path(), ADMIN_PATH) } @@ -361,9 +363,27 @@ async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> Js } #[post("/users//delete")] -async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult { +async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: ClientIp) -> EmptyResult { let user = get_user_or_404(&uuid, &mut conn).await?; - user.delete(&mut conn).await + + // Get the user_org records before deleting the actual user + let user_orgs = UserOrganization::find_any_state_by_user(&uuid, &mut conn).await; + let res = user.delete(&mut conn).await; + + for user_org in user_orgs { + log_event( + EventType::OrganizationUserRemoved as i32, + &user_org.uuid, + user_org.org_uuid, + String::from(ACTING_ADMIN_USER), + 14, // Use UnknownBrowser type + &ip.ip, + &mut conn, + ) + .await; + } + + res } #[post("/users//deauth")] @@ -409,7 +429,12 @@ struct UserOrgTypeData { } #[post("/users/org_type", data = "")] -async fn update_user_org_type(data: Json, _token: AdminToken, mut conn: DbConn) -> EmptyResult { +async fn update_user_org_type( + data: Json, + _token: AdminToken, + mut conn: DbConn, + ip: ClientIp, +) -> EmptyResult { let data: UserOrgTypeData = data.into_inner(); let mut user_to_edit = @@ -444,6 +469,17 @@ async fn update_user_org_type(data: Json, _token: AdminToken, m } } + log_event( + EventType::OrganizationUserUpdated as i32, + &user_to_edit.uuid, + data.org_uuid, + String::from(ACTING_ADMIN_USER), + 14, // Use UnknownBrowser type + &ip.ip, + &mut conn, + ) + .await; + user_to_edit.atype = new_type; user_to_edit.save(&mut conn).await } diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs index 2cd500d1..3315fbce 100644 --- a/src/api/core/accounts.rs +++ b/src/api/core/accounts.rs @@ -3,8 +3,10 @@ use rocket::serde::json::Json; use serde_json::Value; use crate::{ - api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType}, - auth::{decode_delete, decode_invite, decode_verify_email, Headers}, + api::{ + core::log_user_event, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType, + }, + auth::{decode_delete, decode_invite, decode_verify_email, ClientIp, Headers}, crypto, db::{models::*, DbConn}, mail, CONFIG, @@ -268,7 +270,12 @@ struct ChangePassData { } #[post("/accounts/password", data = "")] -async fn post_password(data: JsonUpcase, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn post_password( + data: JsonUpcase, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, +) -> EmptyResult { let data: ChangePassData = data.into_inner().data; let mut user = headers.user; @@ -279,6 +286,8 @@ async fn post_password(data: JsonUpcase, headers: Headers, mut c user.password_hint = clean_password_hint(&data.MasterPasswordHint); enforce_password_hint_setting(&user.password_hint)?; + log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; + user.set_password( &data.NewMasterPasswordHash, Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]), @@ -334,7 +343,13 @@ struct KeyData { } #[post("/accounts/key", data = "")] -async fn post_rotatekey(data: JsonUpcase, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_rotatekey( + data: JsonUpcase, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> EmptyResult { let data: KeyData = data.into_inner().data; if !headers.user.check_valid_password(&data.MasterPasswordHash) { @@ -373,7 +388,7 @@ async fn post_rotatekey(data: JsonUpcase, headers: Headers, mut conn: D // Prevent triggering cipher updates via WebSockets by settings UpdateType::None // The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues. - update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None) + update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None) .await? } diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index 39635efb..c8c741d4 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -10,8 +10,8 @@ use rocket::{ use serde_json::Value; use crate::{ - api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}, - auth::Headers, + api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}, + auth::{ClientIp, Headers}, crypto, db::{models::*, DbConn, DbPool}, CONFIG, @@ -247,9 +247,10 @@ async fn post_ciphers_admin( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - post_ciphers_create(data, headers, conn, nt).await + post_ciphers_create(data, headers, conn, ip, nt).await } /// Called when creating a new org-owned cipher, or cloning a cipher (whether @@ -260,6 +261,7 @@ async fn post_ciphers_create( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { let mut data: ShareCipherData = data.into_inner().data; @@ -287,12 +289,18 @@ async fn post_ciphers_create( // or otherwise), we can just ignore this field entirely. data.Cipher.LastKnownRevisionDate = None; - share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt).await + share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &ip, &nt).await } /// Called when creating a new user-owned cipher. #[post("/ciphers", data = "")] -async fn post_ciphers(data: JsonUpcase, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn post_ciphers( + data: JsonUpcase, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> JsonResult { let mut data: CipherData = data.into_inner().data; // The web/browser clients set this field to null as expected, but the @@ -302,7 +310,7 @@ async fn post_ciphers(data: JsonUpcase, headers: Headers, mut conn: data.LastKnownRevisionDate = None; let mut cipher = Cipher::new(data.Type, data.Name.clone()); - update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::CipherCreate).await?; + update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &ip, &nt, UpdateType::CipherCreate).await?; Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await)) } @@ -329,12 +337,14 @@ async fn enforce_personal_ownership_policy( Ok(()) } +#[allow(clippy::too_many_arguments)] pub async fn update_cipher_from_data( cipher: &mut Cipher, data: CipherData, headers: &Headers, shared_to_collection: bool, conn: &mut DbConn, + ip: &ClientIp, nt: &Notify<'_>, ut: UpdateType, ) -> EmptyResult { @@ -356,6 +366,9 @@ pub async fn update_cipher_from_data( err!("Organization mismatch. Please resync the client before updating the cipher") } + // Check if this cipher is being transferred from a personal to an organization vault + let transfer_cipher = cipher.organization_uuid.is_none() && data.OrganizationId.is_some(); + if let Some(org_id) = data.OrganizationId { match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await { None => err!("You don't have permission to add item to organization"), @@ -460,6 +473,26 @@ pub async fn update_cipher_from_data( cipher.set_favorite(data.Favorite, &headers.user.uuid, conn).await?; if ut != UpdateType::None { + // Only log events for organizational ciphers + if let Some(org_uuid) = &cipher.organization_uuid { + let event_type = match (&ut, transfer_cipher) { + (UpdateType::CipherCreate, true) => EventType::CipherCreated, + (UpdateType::CipherUpdate, true) => EventType::CipherShared, + (_, _) => EventType::CipherUpdated, + }; + + log_event( + event_type as i32, + &cipher.uuid, + String::from(org_uuid), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + conn, + ) + .await; + } + nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await).await; } @@ -488,6 +521,7 @@ async fn post_ciphers_import( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { enforce_personal_ownership_policy(None, &headers, &mut conn).await?; @@ -516,7 +550,8 @@ async fn post_ciphers_import( cipher_data.FolderId = folder_uuid; let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); - update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?; + update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None) + .await?; } let mut user = headers.user; @@ -532,9 +567,10 @@ async fn put_cipher_admin( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - put_cipher(uuid, data, headers, conn, nt).await + put_cipher(uuid, data, headers, conn, ip, nt).await } #[post("/ciphers//admin", data = "")] @@ -543,9 +579,10 @@ async fn post_cipher_admin( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - post_cipher(uuid, data, headers, conn, nt).await + post_cipher(uuid, data, headers, conn, ip, nt).await } #[post("/ciphers/", data = "")] @@ -554,9 +591,10 @@ async fn post_cipher( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - put_cipher(uuid, data, headers, conn, nt).await + put_cipher(uuid, data, headers, conn, ip, nt).await } #[put("/ciphers/", data = "")] @@ -565,6 +603,7 @@ async fn put_cipher( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { let data: CipherData = data.into_inner().data; @@ -583,7 +622,7 @@ async fn put_cipher( err!("Cipher is not write accessible") } - update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::CipherUpdate).await?; + update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &ip, &nt, UpdateType::CipherUpdate).await?; Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await)) } @@ -600,8 +639,9 @@ async fn put_collections_update( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - post_collections_admin(uuid, data, headers, conn).await + post_collections_admin(uuid, data, headers, conn, ip).await } #[post("/ciphers//collections", data = "")] @@ -610,8 +650,9 @@ async fn post_collections_update( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - post_collections_admin(uuid, data, headers, conn).await + post_collections_admin(uuid, data, headers, conn, ip).await } #[put("/ciphers//collections-admin", data = "")] @@ -620,8 +661,9 @@ async fn put_collections_admin( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - post_collections_admin(uuid, data, headers, conn).await + post_collections_admin(uuid, data, headers, conn, ip).await } #[post("/ciphers//collections-admin", data = "")] @@ -630,6 +672,7 @@ async fn post_collections_admin( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { let data: CollectionsAdminData = data.into_inner().data; @@ -665,6 +708,17 @@ async fn post_collections_admin( } } + log_event( + EventType::CipherUpdatedCollections as i32, + &cipher.uuid, + cipher.organization_uuid.unwrap(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + Ok(()) } @@ -681,11 +735,12 @@ async fn post_cipher_share( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { let data: ShareCipherData = data.into_inner().data; - share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &nt).await + share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &ip, &nt).await } #[put("/ciphers//share", data = "")] @@ -694,11 +749,12 @@ async fn put_cipher_share( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { let data: ShareCipherData = data.into_inner().data; - share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &nt).await + share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &ip, &nt).await } #[derive(Deserialize)] @@ -713,6 +769,7 @@ async fn put_cipher_share_selected( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { let mut data: ShareSelectedCipherData = data.into_inner().data; @@ -740,7 +797,7 @@ async fn put_cipher_share_selected( }; match shared_cipher_data.Cipher.Id.take() { - Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await?, + Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &ip, &nt).await?, None => err!("Request missing ids field"), }; } @@ -753,6 +810,7 @@ async fn share_cipher_by_uuid( data: ShareCipherData, headers: &Headers, conn: &mut DbConn, + ip: &ClientIp, nt: &Notify<'_>, ) -> JsonResult { let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { @@ -768,37 +826,30 @@ async fn share_cipher_by_uuid( let mut shared_to_collection = false; - match data.Cipher.OrganizationId.clone() { - // If we don't get an organization ID, we don't do anything - // No error because this is used when using the Clone functionality - None => {} - Some(organization_uuid) => { - for uuid in &data.CollectionIds { - match Collection::find_by_uuid_and_org(uuid, &organization_uuid, conn).await { - None => err!("Invalid collection ID provided"), - Some(collection) => { - if collection.is_writable_by_user(&headers.user.uuid, conn).await { - CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?; - shared_to_collection = true; - } else { - err!("No rights to modify the collection") - } + if let Some(organization_uuid) = &data.Cipher.OrganizationId { + for uuid in &data.CollectionIds { + match Collection::find_by_uuid_and_org(uuid, organization_uuid, conn).await { + None => err!("Invalid collection ID provided"), + Some(collection) => { + if collection.is_writable_by_user(&headers.user.uuid, conn).await { + CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?; + shared_to_collection = true; + } else { + err!("No rights to modify the collection") } } } } }; - update_cipher_from_data( - &mut cipher, - data.Cipher, - headers, - shared_to_collection, - conn, - nt, - UpdateType::CipherUpdate, - ) - .await?; + // When LastKnownRevisionDate is None, it is a new cipher, so send CipherCreate. + let ut = if data.Cipher.LastKnownRevisionDate.is_some() { + UpdateType::CipherUpdate + } else { + UpdateType::CipherCreate + }; + + update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, ip, nt, ut).await?; Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await)) } @@ -893,6 +944,7 @@ async fn save_attachment( data: Form>, headers: &Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> Result<(Cipher, DbConn), crate::error::Error> { let cipher = match Cipher::find_by_uuid(&cipher_uuid, &mut conn).await { @@ -1011,6 +1063,19 @@ async fn save_attachment( nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&mut conn).await).await; + if let Some(org_uuid) = &cipher.organization_uuid { + log_event( + EventType::CipherAttachmentCreated as i32, + &cipher.uuid, + String::from(org_uuid), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + } + Ok((cipher, conn)) } @@ -1025,6 +1090,7 @@ async fn post_attachment_v2_data( data: Form>, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { let attachment = match Attachment::find_by_id(&attachment_id, &mut conn).await { @@ -1033,7 +1099,7 @@ async fn post_attachment_v2_data( None => err!("Attachment doesn't exist"), }; - save_attachment(attachment, uuid, data, &headers, conn, nt).await?; + save_attachment(attachment, uuid, data, &headers, conn, ip, nt).await?; Ok(()) } @@ -1045,13 +1111,14 @@ async fn post_attachment( data: Form>, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { // Setting this as None signifies to save_attachment() that it should create // the attachment database record as well as saving the data to disk. let attachment = None; - let (cipher, mut conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?; + let (cipher, mut conn) = save_attachment(attachment, uuid, data, &headers, conn, ip, nt).await?; Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await)) } @@ -1062,9 +1129,10 @@ async fn post_attachment_admin( data: Form>, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - post_attachment(uuid, data, headers, conn, nt).await + post_attachment(uuid, data, headers, conn, ip, nt).await } #[post("/ciphers//attachment//share", format = "multipart/form-data", data = "")] @@ -1074,10 +1142,11 @@ async fn post_attachment_share( data: Form>, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &nt).await?; - post_attachment(uuid, data, headers, conn, nt).await + _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &ip, &nt).await?; + post_attachment(uuid, data, headers, conn, ip, nt).await } #[post("/ciphers//attachment//delete-admin")] @@ -1086,9 +1155,10 @@ async fn delete_attachment_post_admin( attachment_id: String, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - delete_attachment(uuid, attachment_id, headers, conn, nt).await + delete_attachment(uuid, attachment_id, headers, conn, ip, nt).await } #[post("/ciphers//attachment//delete")] @@ -1097,9 +1167,10 @@ async fn delete_attachment_post( attachment_id: String, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - delete_attachment(uuid, attachment_id, headers, conn, nt).await + delete_attachment(uuid, attachment_id, headers, conn, ip, nt).await } #[delete("/ciphers//attachment/")] @@ -1108,9 +1179,10 @@ async fn delete_attachment( attachment_id: String, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &nt).await + _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &ip, &nt).await } #[delete("/ciphers//attachment//admin")] @@ -1119,39 +1191,70 @@ async fn delete_attachment_admin( attachment_id: String, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &nt).await + _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &ip, &nt).await } #[post("/ciphers//delete")] -async fn delete_cipher_post(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await +async fn delete_cipher_post( + uuid: String, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete } #[post("/ciphers//delete-admin")] -async fn delete_cipher_post_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await +async fn delete_cipher_post_admin( + uuid: String, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete } #[put("/ciphers//delete")] -async fn delete_cipher_put(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &nt).await +async fn delete_cipher_put( + uuid: String, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &ip, &nt).await // soft delete } #[put("/ciphers//delete-admin")] -async fn delete_cipher_put_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &nt).await +async fn delete_cipher_put_admin( + uuid: String, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &ip, &nt).await } #[delete("/ciphers/")] -async fn delete_cipher(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await +async fn delete_cipher(uuid: String, headers: Headers, mut conn: DbConn, ip: ClientIp, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete } #[delete("/ciphers//admin")] -async fn delete_cipher_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await +async fn delete_cipher_admin( + uuid: String, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete } #[delete("/ciphers", data = "")] @@ -1159,9 +1262,10 @@ async fn delete_cipher_selected( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - _delete_multiple_ciphers(data, headers, conn, false, nt).await + _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete } #[post("/ciphers/delete", data = "")] @@ -1169,9 +1273,10 @@ async fn delete_cipher_selected_post( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - _delete_multiple_ciphers(data, headers, conn, false, nt).await + _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete } #[put("/ciphers/delete", data = "")] @@ -1179,9 +1284,10 @@ async fn delete_cipher_selected_put( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - _delete_multiple_ciphers(data, headers, conn, true, nt).await // soft delete + _delete_multiple_ciphers(data, headers, conn, true, ip, nt).await // soft delete } #[delete("/ciphers/admin", data = "")] @@ -1189,9 +1295,10 @@ async fn delete_cipher_selected_admin( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - delete_cipher_selected(data, headers, conn, nt).await + _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete } #[post("/ciphers/delete-admin", data = "")] @@ -1199,9 +1306,10 @@ async fn delete_cipher_selected_post_admin( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - delete_cipher_selected_post(data, headers, conn, nt).await + _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete } #[put("/ciphers/delete-admin", data = "")] @@ -1209,19 +1317,32 @@ async fn delete_cipher_selected_put_admin( data: JsonUpcase, headers: Headers, conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { - delete_cipher_selected_put(data, headers, conn, nt).await + _delete_multiple_ciphers(data, headers, conn, true, ip, nt).await // soft delete } #[put("/ciphers//restore")] -async fn restore_cipher_put(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &nt).await +async fn restore_cipher_put( + uuid: String, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> JsonResult { + _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &ip, &nt).await } #[put("/ciphers//restore-admin")] -async fn restore_cipher_put_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &nt).await +async fn restore_cipher_put_admin( + uuid: String, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, + nt: Notify<'_>, +) -> JsonResult { + _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &ip, &nt).await } #[put("/ciphers/restore", data = "")] @@ -1229,9 +1350,10 @@ async fn restore_cipher_selected( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - _restore_multiple_ciphers(data, &headers, &mut conn, &nt).await + _restore_multiple_ciphers(data, &headers, &mut conn, ip, &nt).await } #[derive(Deserialize)] @@ -1303,6 +1425,7 @@ async fn delete_all( data: JsonUpcase, headers: Headers, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { let data: PasswordData = data.into_inner().data; @@ -1323,6 +1446,18 @@ async fn delete_all( if user_org.atype == UserOrgType::Owner { Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?; nt.send_user_update(UpdateType::Vault, &user).await; + + log_event( + EventType::OrganizationPurgedVault as i32, + &org_data.org_id, + org_data.org_id.clone(), + user.uuid, + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + Ok(()) } else { err!("You don't have permission to purge the organization vault"); @@ -1354,6 +1489,7 @@ async fn _delete_cipher_by_uuid( headers: &Headers, conn: &mut DbConn, soft_delete: bool, + ip: &ClientIp, nt: &Notify<'_>, ) -> EmptyResult { let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { @@ -1374,6 +1510,16 @@ async fn _delete_cipher_by_uuid( nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(conn).await).await; } + if let Some(org_uuid) = cipher.organization_uuid { + let event_type = match soft_delete { + true => EventType::CipherSoftDeleted as i32, + false => EventType::CipherDeleted as i32, + }; + + log_event(event_type, &cipher.uuid, org_uuid, headers.user.uuid.clone(), headers.device.atype, &ip.ip, conn) + .await; + } + Ok(()) } @@ -1382,6 +1528,7 @@ async fn _delete_multiple_ciphers( headers: Headers, mut conn: DbConn, soft_delete: bool, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { let data: Value = data.into_inner().data; @@ -1395,7 +1542,7 @@ async fn _delete_multiple_ciphers( }; for uuid in uuids { - if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &mut conn, soft_delete, &nt).await { + if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &mut conn, soft_delete, &ip, &nt).await { return error; }; } @@ -1403,7 +1550,13 @@ async fn _delete_multiple_ciphers( Ok(()) } -async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbConn, nt: &Notify<'_>) -> JsonResult { +async fn _restore_cipher_by_uuid( + uuid: &str, + headers: &Headers, + conn: &mut DbConn, + ip: &ClientIp, + nt: &Notify<'_>, +) -> JsonResult { let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), @@ -1417,6 +1570,19 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbCon cipher.save(conn).await?; nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await).await; + if let Some(org_uuid) = &cipher.organization_uuid { + log_event( + EventType::CipherRestored as i32, + &cipher.uuid.clone(), + String::from(org_uuid), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + conn, + ) + .await; + } + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await)) } @@ -1424,6 +1590,7 @@ async fn _restore_multiple_ciphers( data: JsonUpcase, headers: &Headers, conn: &mut DbConn, + ip: ClientIp, nt: &Notify<'_>, ) -> JsonResult { let data: Value = data.into_inner().data; @@ -1438,7 +1605,7 @@ async fn _restore_multiple_ciphers( let mut ciphers: Vec = Vec::new(); for uuid in uuids { - match _restore_cipher_by_uuid(uuid, headers, conn, nt).await { + match _restore_cipher_by_uuid(uuid, headers, conn, &ip, nt).await { Ok(json) => ciphers.push(json.into_inner()), err => return err, } @@ -1456,6 +1623,7 @@ async fn _delete_cipher_attachment_by_id( attachment_id: &str, headers: &Headers, conn: &mut DbConn, + ip: &ClientIp, nt: &Notify<'_>, ) -> EmptyResult { let attachment = match Attachment::find_by_id(attachment_id, conn).await { @@ -1479,6 +1647,18 @@ async fn _delete_cipher_attachment_by_id( // Delete attachment attachment.delete(conn).await?; nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await).await; + if let Some(org_uuid) = cipher.organization_uuid { + log_event( + EventType::CipherAttachmentDeleted as i32, + &cipher.uuid, + org_uuid, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + conn, + ) + .await; + } Ok(()) } diff --git a/src/api/core/events.rs b/src/api/core/events.rs new file mode 100644 index 00000000..43102712 --- /dev/null +++ b/src/api/core/events.rs @@ -0,0 +1,341 @@ +use std::net::IpAddr; + +use chrono::NaiveDateTime; +use rocket::{form::FromForm, serde::json::Json, Route}; +use serde_json::Value; + +use crate::{ + api::{EmptyResult, JsonResult, JsonUpcaseVec}, + auth::{AdminHeaders, ClientIp, Headers}, + db::{ + models::{Cipher, Event, UserOrganization}, + DbConn, DbPool, + }, + util::parse_date, + CONFIG, +}; + +/// ############################################################################################################### +/// /api routes +pub fn routes() -> Vec { + routes![get_org_events, get_cipher_events, get_user_events,] +} + +#[derive(FromForm)] +#[allow(non_snake_case)] +struct EventRange { + start: String, + end: String, + #[field(name = "continuationToken")] + continuation_token: Option, +} + +// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41 +#[get("/organizations//events?")] +async fn get_org_events(org_id: String, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { + // Return an empty vec when we org events are disabled. + // This prevents client errors + let events_json: Vec = if !CONFIG.org_events_enabled() { + Vec::with_capacity(0) + } else { + let start_date = parse_date(&data.start); + let end_date = if let Some(before_date) = &data.continuation_token { + parse_date(before_date) + } else { + parse_date(&data.end) + }; + + Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn) + .await + .iter() + .map(|e| e.to_json()) + .collect() + }; + + Ok(Json(json!({ + "Data": events_json, + "Object": "list", + "ContinuationToken": get_continuation_token(&events_json), + }))) +} + +#[get("/ciphers//events?")] +async fn get_cipher_events(cipher_id: String, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult { + // Return an empty vec when we org events are disabled. + // This prevents client errors + let events_json: Vec = if !CONFIG.org_events_enabled() { + Vec::with_capacity(0) + } else { + let mut events_json = Vec::with_capacity(0); + if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await { + let start_date = parse_date(&data.start); + let end_date = if let Some(before_date) = &data.continuation_token { + parse_date(before_date) + } else { + parse_date(&data.end) + }; + + events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn) + .await + .iter() + .map(|e| e.to_json()) + .collect() + } + events_json + }; + + Ok(Json(json!({ + "Data": events_json, + "Object": "list", + "ContinuationToken": get_continuation_token(&events_json), + }))) +} + +#[get("/organizations//users//events?")] +async fn get_user_events( + org_id: String, + user_org_id: String, + data: EventRange, + _headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { + // Return an empty vec when we org events are disabled. + // This prevents client errors + let events_json: Vec = if !CONFIG.org_events_enabled() { + Vec::with_capacity(0) + } else { + let start_date = parse_date(&data.start); + let end_date = if let Some(before_date) = &data.continuation_token { + parse_date(before_date) + } else { + parse_date(&data.end) + }; + + Event::find_by_org_and_user_org(&org_id, &user_org_id, &start_date, &end_date, &mut conn) + .await + .iter() + .map(|e| e.to_json()) + .collect() + }; + + Ok(Json(json!({ + "Data": events_json, + "Object": "list", + "ContinuationToken": get_continuation_token(&events_json), + }))) +} + +fn get_continuation_token(events_json: &Vec) -> Option<&str> { + // When the length of the vec equals the max page_size there probably is more data + // When it is less, then all events are loaded. + if events_json.len() as i64 == Event::PAGE_SIZE { + if let Some(last_event) = events_json.last() { + last_event["date"].as_str() + } else { + None + } + } else { + None + } +} + +/// ############################################################################################################### +/// /events routes +pub fn main_routes() -> Vec { + routes![post_events_collect,] +} + +#[derive(Deserialize, Debug)] +#[allow(non_snake_case)] +struct EventCollection { + // Mandatory + Type: i32, + Date: String, + + // Optional + CipherId: Option, + OrganizationId: Option, +} + +// Upstream: +// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs +// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs +#[post("/collect", format = "application/json", data = "")] +async fn post_events_collect( + data: JsonUpcaseVec, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, +) -> EmptyResult { + if !CONFIG.org_events_enabled() { + return Ok(()); + } + + for event in data.iter().map(|d| &d.data) { + let event_date = parse_date(&event.Date); + match event.Type { + 1000..=1099 => { + _log_user_event( + event.Type, + &headers.user.uuid, + headers.device.atype, + Some(event_date), + &ip.ip, + &mut conn, + ) + .await; + } + 1600..=1699 => { + if let Some(org_uuid) = &event.OrganizationId { + _log_event( + event.Type, + org_uuid, + String::from(org_uuid), + &headers.user.uuid, + headers.device.atype, + Some(event_date), + &ip.ip, + &mut conn, + ) + .await; + } + } + _ => { + if let Some(cipher_uuid) = &event.CipherId { + if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await { + if let Some(org_uuid) = cipher.organization_uuid { + _log_event( + event.Type, + cipher_uuid, + org_uuid, + &headers.user.uuid, + headers.device.atype, + Some(event_date), + &ip.ip, + &mut conn, + ) + .await; + } + } + } + } + } + } + Ok(()) +} + +pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) { + if !CONFIG.org_events_enabled() { + return; + } + _log_user_event(event_type, user_uuid, device_type, None, ip, conn).await; +} + +async fn _log_user_event( + event_type: i32, + user_uuid: &str, + device_type: i32, + event_date: Option, + ip: &IpAddr, + conn: &mut DbConn, +) { + let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await; + let mut events: Vec = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org + + // Upstream saves the event also without any org_uuid. + let mut event = Event::new(event_type, event_date); + event.user_uuid = Some(String::from(user_uuid)); + event.act_user_uuid = Some(String::from(user_uuid)); + event.device_type = Some(device_type); + event.ip_address = Some(ip.to_string()); + events.push(event); + + // For each org a user is a member of store these events per org + for org_uuid in orgs { + let mut event = Event::new(event_type, event_date); + event.user_uuid = Some(String::from(user_uuid)); + event.org_uuid = Some(org_uuid); + event.act_user_uuid = Some(String::from(user_uuid)); + event.device_type = Some(device_type); + event.ip_address = Some(ip.to_string()); + events.push(event); + } + + Event::save_user_event(events, conn).await.unwrap_or(()); +} + +pub async fn log_event( + event_type: i32, + source_uuid: &str, + org_uuid: String, + act_user_uuid: String, + device_type: i32, + ip: &IpAddr, + conn: &mut DbConn, +) { + if !CONFIG.org_events_enabled() { + return; + } + _log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await; +} + +#[allow(clippy::too_many_arguments)] +async fn _log_event( + event_type: i32, + source_uuid: &str, + org_uuid: String, + act_user_uuid: &str, + device_type: i32, + event_date: Option, + ip: &IpAddr, + conn: &mut DbConn, +) { + // Create a new empty event + let mut event = Event::new(event_type, event_date); + match event_type { + // 1000..=1099 Are user events, they need to be logged via log_user_event() + // Collection Events + 1100..=1199 => { + event.cipher_uuid = Some(String::from(source_uuid)); + } + // Collection Events + 1300..=1399 => { + event.collection_uuid = Some(String::from(source_uuid)); + } + // Group Events + 1400..=1499 => { + event.group_uuid = Some(String::from(source_uuid)); + } + // Org User Events + 1500..=1599 => { + event.org_user_uuid = Some(String::from(source_uuid)); + } + // 1600..=1699 Are organizational events, and they do not need the source_uuid + // Policy Events + 1700..=1799 => { + event.policy_uuid = Some(String::from(source_uuid)); + } + // Ignore others + _ => {} + } + + event.org_uuid = Some(org_uuid); + event.act_user_uuid = Some(String::from(act_user_uuid)); + event.device_type = Some(device_type); + event.ip_address = Some(ip.to_string()); + event.save(conn).await.unwrap_or(()); +} + +pub async fn event_cleanup_job(pool: DbPool) { + debug!("Start events cleanup job"); + if CONFIG.events_days_retain().is_none() { + debug!("events_days_retain is not configured, abort"); + return; + } + + if let Ok(mut conn) = pool.get().await { + Event::clean_events(&mut conn).await.ok(); + } else { + error!("Failed to get DB connection while trying to cleanup the events table") + } +} diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 0df9a9dc..885fae81 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -1,6 +1,7 @@ pub mod accounts; mod ciphers; mod emergency_access; +mod events; mod folders; mod organizations; mod sends; @@ -9,6 +10,7 @@ pub mod two_factor; pub use ciphers::purge_trashed_ciphers; pub use ciphers::{CipherSyncData, CipherSyncType}; pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job}; +pub use events::{event_cleanup_job, log_event, log_user_event}; pub use sends::purge_sends; pub use two_factor::send_incomplete_2fa_notifications; @@ -22,6 +24,7 @@ pub fn routes() -> Vec { routes.append(&mut accounts::routes()); routes.append(&mut ciphers::routes()); routes.append(&mut emergency_access::routes()); + routes.append(&mut events::routes()); routes.append(&mut folders::routes()); routes.append(&mut organizations::routes()); routes.append(&mut two_factor::routes()); @@ -34,6 +37,13 @@ pub fn routes() -> Vec { routes } +pub fn events_routes() -> Vec { + let mut routes = Vec::new(); + routes.append(&mut events::main_routes()); + + routes +} + // // Move this somewhere else // diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index 744b0a12..57a982f9 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -5,11 +5,11 @@ use serde_json::Value; use crate::{ api::{ - core::{CipherSyncData, CipherSyncType}, + core::{log_event, CipherSyncData, CipherSyncType}, ApiResult, EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData, UpdateType, }, - auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders}, + auth::{decode_invite, AdminHeaders, ClientIp, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders}, db::{models::*, DbConn}, error::Error, mail, @@ -203,7 +203,7 @@ async fn post_delete_organization( } #[post("/organizations//leave")] -async fn leave_organization(org_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn leave_organization(org_id: String, headers: Headers, mut conn: DbConn, ip: ClientIp) -> EmptyResult { match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await { None => err!("User not part of organization"), Some(user_org) => { @@ -213,6 +213,17 @@ async fn leave_organization(org_id: String, headers: Headers, mut conn: DbConn) err!("The last owner can't leave") } + log_event( + EventType::OrganizationUserRemoved as i32, + &user_org.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + user_org.delete(&mut conn).await } } @@ -232,16 +243,18 @@ async fn put_organization( headers: OwnerHeaders, data: JsonUpcase, conn: DbConn, + ip: ClientIp, ) -> JsonResult { - post_organization(org_id, headers, data, conn).await + post_organization(org_id, headers, data, conn, ip).await } #[post("/organizations/", data = "")] async fn post_organization( org_id: String, - _headers: OwnerHeaders, + headers: OwnerHeaders, data: JsonUpcase, mut conn: DbConn, + ip: ClientIp, ) -> JsonResult { let data: OrganizationUpdateData = data.into_inner().data; @@ -254,6 +267,18 @@ async fn post_organization( org.billing_email = data.BillingEmail; org.save(&mut conn).await?; + + log_event( + EventType::OrganizationUpdated as i32, + &org_id, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + Ok(Json(org.to_json())) } @@ -290,6 +315,7 @@ async fn post_organization_collections( headers: ManagerHeadersLoose, data: JsonUpcase, mut conn: DbConn, + ip: ClientIp, ) -> JsonResult { let data: NewCollectionData = data.into_inner().data; @@ -307,6 +333,17 @@ async fn post_organization_collections( let collection = Collection::new(org.uuid, data.Name); collection.save(&mut conn).await?; + log_event( + EventType::CollectionCreated as i32, + &collection.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + for group in data.Groups { CollectionGroup::new(collection.uuid.clone(), group.Id, group.ReadOnly, group.HidePasswords) .save(&mut conn) @@ -330,17 +367,19 @@ async fn put_organization_collection_update( headers: ManagerHeaders, data: JsonUpcase, conn: DbConn, + ip: ClientIp, ) -> JsonResult { - post_organization_collection_update(org_id, col_id, headers, data, conn).await + post_organization_collection_update(org_id, col_id, headers, data, conn, ip).await } #[post("/organizations//collections/", data = "")] async fn post_organization_collection_update( org_id: String, col_id: String, - _headers: ManagerHeaders, + headers: ManagerHeaders, data: JsonUpcase, mut conn: DbConn, + ip: ClientIp, ) -> JsonResult { let data: NewCollectionData = data.into_inner().data; @@ -361,6 +400,17 @@ async fn post_organization_collection_update( collection.name = data.Name; collection.save(&mut conn).await?; + log_event( + EventType::CollectionUpdated as i32, + &collection.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?; for group in data.Groups { @@ -415,13 +465,24 @@ async fn post_organization_collection_delete_user( async fn delete_organization_collection( org_id: String, col_id: String, - _headers: ManagerHeaders, + headers: ManagerHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { match Collection::find_by_uuid(&col_id, &mut conn).await { None => err!("Collection not found"), Some(collection) => { if collection.org_uuid == org_id { + log_event( + EventType::CollectionDeleted as i32, + &collection.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; collection.delete(&mut conn).await } else { err!("Collection and Organization id do not match") @@ -444,8 +505,9 @@ async fn post_organization_collection_delete( headers: ManagerHeaders, _data: JsonUpcase, conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - delete_organization_collection(org_id, col_id, headers, conn).await + delete_organization_collection(org_id, col_id, headers, conn, ip).await } #[get("/organizations//collections//details")] @@ -632,6 +694,7 @@ async fn send_invite( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { let data: InviteData = data.into_inner().data; @@ -700,6 +763,17 @@ async fn send_invite( new_user.save(&mut conn).await?; + log_event( + EventType::OrganizationUserInvited as i32, + &new_user.uuid, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + if CONFIG.mail_enabled() { let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await { Some(org) => org.name, @@ -882,6 +956,7 @@ async fn bulk_confirm_invite( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> Json { let data = data.into_inner().data; @@ -891,7 +966,7 @@ async fn bulk_confirm_invite( for invite in keys { let org_user_id = invite["Id"].as_str().unwrap_or_default(); let user_key = invite["Key"].as_str().unwrap_or_default(); - let err_msg = match _confirm_invite(&org_id, org_user_id, user_key, &headers, &mut conn).await { + let err_msg = match _confirm_invite(&org_id, org_user_id, user_key, &headers, &mut conn, &ip).await { Ok(_) => String::new(), Err(e) => format!("{:?}", e), }; @@ -922,10 +997,11 @@ async fn confirm_invite( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { let data = data.into_inner().data; let user_key = data["Key"].as_str().unwrap_or_default(); - _confirm_invite(&org_id, &org_user_id, user_key, &headers, &mut conn).await + _confirm_invite(&org_id, &org_user_id, user_key, &headers, &mut conn, &ip).await } async fn _confirm_invite( @@ -934,6 +1010,7 @@ async fn _confirm_invite( key: &str, headers: &AdminHeaders, conn: &mut DbConn, + ip: &ClientIp, ) -> EmptyResult { if key.is_empty() || org_user_id.is_empty() { err!("Key or UserId is not set, unable to process request"); @@ -969,6 +1046,17 @@ async fn _confirm_invite( user_to_confirm.status = UserOrgStatus::Confirmed as i32; user_to_confirm.akey = key.to_string(); + log_event( + EventType::OrganizationUserConfirmed as i32, + &user_to_confirm.uuid, + String::from(org_id), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + conn, + ) + .await; + if CONFIG.mail_enabled() { let org_name = match Organization::find_by_uuid(org_id, conn).await { Some(org) => org.name, @@ -1009,8 +1097,9 @@ async fn put_organization_user( data: JsonUpcase, headers: AdminHeaders, conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - edit_user(org_id, org_user_id, data, headers, conn).await + edit_user(org_id, org_user_id, data, headers, conn, ip).await } #[post("/organizations//users/", data = "", rank = 1)] @@ -1020,6 +1109,7 @@ async fn edit_user( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { let data: EditUserData = data.into_inner().data; @@ -1095,6 +1185,17 @@ async fn edit_user( } } + log_event( + EventType::OrganizationUserUpdated as i32, + &user_to_edit.uuid, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + user_to_edit.save(&mut conn).await } @@ -1104,12 +1205,13 @@ async fn bulk_delete_user( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> Json { let data: OrgBulkIds = data.into_inner().data; let mut bulk_response = Vec::new(); for org_user_id in data.Ids { - let err_msg = match _delete_user(&org_id, &org_user_id, &headers, &mut conn).await { + let err_msg = match _delete_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await { Ok(_) => String::new(), Err(e) => format!("{:?}", e), }; @@ -1131,11 +1233,34 @@ async fn bulk_delete_user( } #[delete("/organizations//users/")] -async fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { - _delete_user(&org_id, &org_user_id, &headers, &mut conn).await +async fn delete_user( + org_id: String, + org_user_id: String, + headers: AdminHeaders, + mut conn: DbConn, + ip: ClientIp, +) -> EmptyResult { + _delete_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await } -async fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, conn: &mut DbConn) -> EmptyResult { +#[post("/organizations//users//delete")] +async fn post_delete_user( + org_id: String, + org_user_id: String, + headers: AdminHeaders, + mut conn: DbConn, + ip: ClientIp, +) -> EmptyResult { + _delete_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await +} + +async fn _delete_user( + org_id: &str, + org_user_id: &str, + headers: &AdminHeaders, + conn: &mut DbConn, + ip: &ClientIp, +) -> EmptyResult { let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { Some(user) => user, None => err!("User to delete isn't member of the organization"), @@ -1152,12 +1277,18 @@ async fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, c } } - user_to_delete.delete(conn).await -} + log_event( + EventType::OrganizationUserRemoved as i32, + &user_to_delete.uuid, + String::from(org_id), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + conn, + ) + .await; -#[post("/organizations//users//delete")] -async fn post_delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult { - delete_user(org_id, org_user_id, headers, conn).await + user_to_delete.delete(conn).await } #[post("/organizations//users/public-keys", data = "")] @@ -1223,6 +1354,7 @@ async fn post_org_import( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, nt: Notify<'_>, ) -> EmptyResult { let data: ImportData = data.into_inner().data; @@ -1249,7 +1381,9 @@ async fn post_org_import( let mut ciphers = Vec::new(); for cipher_data in data.Ciphers { let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); - update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok(); + update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None) + .await + .ok(); ciphers.push(cipher); } @@ -1333,8 +1467,9 @@ async fn put_policy( org_id: String, pol_type: i32, data: Json, - _headers: AdminHeaders, + headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> JsonResult { let data: PolicyData = data.into_inner(); @@ -1360,6 +1495,18 @@ async fn put_policy( mail::send_2fa_removed_from_org(&user.email, &org.name).await?; } + + log_event( + EventType::OrganizationUserRemoved as i32, + &member.uuid, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + member.delete(&mut conn).await?; } } @@ -1382,6 +1529,18 @@ async fn put_policy( mail::send_single_org_removed_from_org(&user.email, &org.name).await?; } + + log_event( + EventType::OrganizationUserRemoved as i32, + &member.uuid, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + member.delete(&mut conn).await?; } } @@ -1389,13 +1548,24 @@ async fn put_policy( let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &mut conn).await { Some(p) => p, - None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()), + None => OrgPolicy::new(org_id.clone(), pol_type_enum, "{}".to_string()), }; policy.enabled = data.enabled; policy.data = serde_json::to_string(&data.data)?; policy.save(&mut conn).await?; + log_event( + EventType::PolicyUpdated as i32, + &policy.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + Ok(Json(policy.to_json())) } @@ -1467,7 +1637,13 @@ struct OrgImportData { } #[post("/organizations//import", data = "")] -async fn import(org_id: String, data: JsonUpcase, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn import( + org_id: String, + data: JsonUpcase, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, +) -> EmptyResult { let data = data.into_inner().data; // TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way @@ -1487,6 +1663,17 @@ async fn import(org_id: String, data: JsonUpcase, headers: Header // If user is marked for deletion and it exists, delete it if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await { + log_event( + EventType::OrganizationUserRemoved as i32, + &user_org.uuid, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + user_org.delete(&mut conn).await?; } @@ -1506,6 +1693,17 @@ async fn import(org_id: String, data: JsonUpcase, headers: Header new_org_user.save(&mut conn).await?; + log_event( + EventType::OrganizationUserInvited as i32, + &new_org_user.uuid, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + if CONFIG.mail_enabled() { let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await { Some(org) => org.name, @@ -1531,6 +1729,17 @@ async fn import(org_id: String, data: JsonUpcase, headers: Header for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User, &mut conn).await { if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.email) { if !data.Users.iter().any(|u| u.Email == user_email) { + log_event( + EventType::OrganizationUserRemoved as i32, + &user_org.uuid, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + user_org.delete(&mut conn).await?; } } @@ -1547,8 +1756,9 @@ async fn deactivate_organization_user( org_user_id: String, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn).await + _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await } // Pre web-vault v2022.9.x endpoint @@ -1558,8 +1768,9 @@ async fn bulk_deactivate_organization_user( data: JsonUpcase, headers: AdminHeaders, conn: DbConn, + ip: ClientIp, ) -> Json { - bulk_revoke_organization_user(org_id, data, headers, conn).await + bulk_revoke_organization_user(org_id, data, headers, conn, ip).await } #[put("/organizations//users//revoke")] @@ -1568,8 +1779,9 @@ async fn revoke_organization_user( org_user_id: String, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn).await + _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await } #[put("/organizations//users/revoke", data = "")] @@ -1578,6 +1790,7 @@ async fn bulk_revoke_organization_user( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> Json { let data = data.into_inner().data; @@ -1586,7 +1799,7 @@ async fn bulk_revoke_organization_user( Some(org_users) => { for org_user_id in org_users { let org_user_id = org_user_id.as_str().unwrap_or_default(); - let err_msg = match _revoke_organization_user(&org_id, org_user_id, &headers, &mut conn).await { + let err_msg = match _revoke_organization_user(&org_id, org_user_id, &headers, &mut conn, &ip).await { Ok(_) => String::new(), Err(e) => format!("{:?}", e), }; @@ -1615,6 +1828,7 @@ async fn _revoke_organization_user( org_user_id: &str, headers: &AdminHeaders, conn: &mut DbConn, + ip: &ClientIp, ) -> EmptyResult { match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { Some(mut user_org) if user_org.status > UserOrgStatus::Revoked as i32 => { @@ -1632,6 +1846,17 @@ async fn _revoke_organization_user( user_org.revoke(); user_org.save(conn).await?; + + log_event( + EventType::OrganizationUserRevoked as i32, + &user_org.uuid, + org_id.to_string(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + conn, + ) + .await; } Some(_) => err!("User is already revoked"), None => err!("User not found in organization"), @@ -1646,8 +1871,9 @@ async fn activate_organization_user( org_user_id: String, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn).await + _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await } // Pre web-vault v2022.9.x endpoint @@ -1657,8 +1883,9 @@ async fn bulk_activate_organization_user( data: JsonUpcase, headers: AdminHeaders, conn: DbConn, + ip: ClientIp, ) -> Json { - bulk_restore_organization_user(org_id, data, headers, conn).await + bulk_restore_organization_user(org_id, data, headers, conn, ip).await } #[put("/organizations//users//restore")] @@ -1667,8 +1894,9 @@ async fn restore_organization_user( org_user_id: String, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn).await + _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await } #[put("/organizations//users/restore", data = "")] @@ -1677,6 +1905,7 @@ async fn bulk_restore_organization_user( data: JsonUpcase, headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> Json { let data = data.into_inner().data; @@ -1685,7 +1914,7 @@ async fn bulk_restore_organization_user( Some(org_users) => { for org_user_id in org_users { let org_user_id = org_user_id.as_str().unwrap_or_default(); - let err_msg = match _restore_organization_user(&org_id, org_user_id, &headers, &mut conn).await { + let err_msg = match _restore_organization_user(&org_id, org_user_id, &headers, &mut conn, &ip).await { Ok(_) => String::new(), Err(e) => format!("{:?}", e), }; @@ -1714,6 +1943,7 @@ async fn _restore_organization_user( org_user_id: &str, headers: &AdminHeaders, conn: &mut DbConn, + ip: &ClientIp, ) -> EmptyResult { match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { Some(mut user_org) if user_org.status < UserOrgStatus::Accepted as i32 => { @@ -1740,6 +1970,17 @@ async fn _restore_organization_user( user_org.restore(); user_org.save(conn).await?; + + log_event( + EventType::OrganizationUserRestored as i32, + &user_org.uuid, + org_id.to_string(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + conn, + ) + .await; } Some(_) => err!("User is already active"), None => err!("User not found in organization"), @@ -1828,37 +2069,51 @@ impl SelectionReadOnly { } } -#[post("/organizations/<_org_id>/groups/", data = "")] +#[post("/organizations//groups/", data = "")] async fn post_group( - _org_id: String, + org_id: String, group_id: String, data: JsonUpcase, - _headers: AdminHeaders, + headers: AdminHeaders, conn: DbConn, + ip: ClientIp, ) -> JsonResult { - put_group(_org_id, group_id, data, _headers, conn).await + put_group(org_id, group_id, data, headers, conn, ip).await } #[post("/organizations//groups", data = "")] async fn post_groups( org_id: String, - _headers: AdminHeaders, + headers: AdminHeaders, data: JsonUpcase, mut conn: DbConn, + ip: ClientIp, ) -> JsonResult { let group_request = data.into_inner().data; let group = group_request.to_group(&org_id)?; + log_event( + EventType::GroupCreated as i32, + &group.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + add_update_group(group, group_request.Collections, &mut conn).await } -#[put("/organizations/<_org_id>/groups/", data = "")] +#[put("/organizations//groups/", data = "")] async fn put_group( - _org_id: String, + org_id: String, group_id: String, data: JsonUpcase, - _headers: AdminHeaders, + headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> JsonResult { let group = match Group::find_by_uuid(&group_id, &mut conn).await { Some(group) => group, @@ -1870,6 +2125,17 @@ async fn put_group( CollectionGroup::delete_all_by_group(&group_id, &mut conn).await?; + log_event( + EventType::GroupUpdated as i32, + &updated_group.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + add_update_group(updated_group, group_request.Collections, &mut conn).await } @@ -1915,17 +2181,40 @@ async fn get_group_details(_org_id: String, group_id: String, _headers: AdminHea } #[post("/organizations//groups//delete")] -async fn post_delete_group(org_id: String, group_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult { - delete_group(org_id, group_id, _headers, conn).await +async fn post_delete_group( + org_id: String, + group_id: String, + headers: AdminHeaders, + conn: DbConn, + ip: ClientIp, +) -> EmptyResult { + delete_group(org_id, group_id, headers, conn, ip).await } -#[delete("/organizations/<_org_id>/groups/")] -async fn delete_group(_org_id: String, group_id: String, _headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { +#[delete("/organizations//groups/")] +async fn delete_group( + org_id: String, + group_id: String, + headers: AdminHeaders, + mut conn: DbConn, + ip: ClientIp, +) -> EmptyResult { let group = match Group::find_by_uuid(&group_id, &mut conn).await { Some(group) => group, _ => err!("Group not found"), }; + log_event( + EventType::GroupDeleted as i32, + &group.uuid, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + group.delete(&mut conn).await } @@ -1955,13 +2244,14 @@ async fn get_group_users(_org_id: String, group_id: String, _headers: AdminHeade Ok(Json(json!(group_users))) } -#[put("/organizations/<_org_id>/groups//users", data = "")] +#[put("/organizations//groups//users", data = "")] async fn put_group_users( - _org_id: String, + org_id: String, group_id: String, - _headers: AdminHeaders, + headers: AdminHeaders, data: JsonVec, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { match Group::find_by_uuid(&group_id, &mut conn).await { Some(_) => { /* Do nothing */ } @@ -1972,8 +2262,19 @@ async fn put_group_users( let assigned_user_ids = data.into_inner(); for assigned_user_id in assigned_user_ids { - let mut user_entry = GroupUser::new(group_id.clone(), assigned_user_id); + let mut user_entry = GroupUser::new(group_id.clone(), assigned_user_id.clone()); user_entry.save(&mut conn).await?; + + log_event( + EventType::OrganizationUserUpdatedGroups as i32, + &assigned_user_id, + org_id.clone(), + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; } Ok(()) @@ -1998,61 +2299,76 @@ struct OrganizationUserUpdateGroupsRequest { GroupIds: Vec, } -#[post("/organizations/<_org_id>/users//groups", data = "")] +#[post("/organizations//users//groups", data = "")] async fn post_user_groups( - _org_id: String, - user_id: String, + org_id: String, + org_user_id: String, data: JsonUpcase, - _headers: AdminHeaders, + headers: AdminHeaders, conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - put_user_groups(_org_id, user_id, data, _headers, conn).await + put_user_groups(org_id, org_user_id, data, headers, conn, ip).await } -#[put("/organizations/<_org_id>/users//groups", data = "")] +#[put("/organizations//users//groups", data = "")] async fn put_user_groups( - _org_id: String, - user_id: String, + org_id: String, + org_user_id: String, data: JsonUpcase, - _headers: AdminHeaders, + headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - match UserOrganization::find_by_uuid(&user_id, &mut conn).await { + match UserOrganization::find_by_uuid(&org_user_id, &mut conn).await { Some(_) => { /* Do nothing */ } _ => err!("User could not be found!"), }; - GroupUser::delete_all_by_user(&user_id, &mut conn).await?; + GroupUser::delete_all_by_user(&org_user_id, &mut conn).await?; let assigned_group_ids = data.into_inner().data; for assigned_group_id in assigned_group_ids.GroupIds { - let mut group_user = GroupUser::new(assigned_group_id.clone(), user_id.clone()); + let mut group_user = GroupUser::new(assigned_group_id.clone(), org_user_id.clone()); group_user.save(&mut conn).await?; } + log_event( + EventType::OrganizationUserUpdatedGroups as i32, + &org_user_id, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + Ok(()) } -#[post("/organizations//groups//delete-user/")] +#[post("/organizations//groups//delete-user/")] async fn post_delete_group_user( org_id: String, group_id: String, - user_id: String, + org_user_id: String, headers: AdminHeaders, conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - delete_group_user(org_id, group_id, user_id, headers, conn).await + delete_group_user(org_id, group_id, org_user_id, headers, conn, ip).await } -#[delete("/organizations/<_org_id>/groups//users/")] +#[delete("/organizations//groups//users/")] async fn delete_group_user( - _org_id: String, + org_id: String, group_id: String, - user_id: String, - _headers: AdminHeaders, + org_user_id: String, + headers: AdminHeaders, mut conn: DbConn, + ip: ClientIp, ) -> EmptyResult { - match UserOrganization::find_by_uuid(&user_id, &mut conn).await { + match UserOrganization::find_by_uuid(&org_user_id, &mut conn).await { Some(_) => { /* Do nothing */ } _ => err!("User could not be found!"), }; @@ -2062,7 +2378,18 @@ async fn delete_group_user( _ => err!("Group could not be found!"), }; - GroupUser::delete_by_group_id_and_user_id(&group_id, &user_id, &mut conn).await + log_event( + EventType::OrganizationUserUpdatedGroups as i32, + &org_user_id, + org_id, + headers.user.uuid.clone(), + headers.device.atype, + &ip.ip, + &mut conn, + ) + .await; + + GroupUser::delete_by_group_id_and_user_id(&group_id, &org_user_id, &mut conn).await } // This is a new function active since the v2022.9.x clients. diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs index 68342e95..fa1792f2 100644 --- a/src/api/core/two_factor/authenticator.rs +++ b/src/api/core/two_factor/authenticator.rs @@ -4,12 +4,13 @@ use rocket::Route; use crate::{ api::{ - core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData, + core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, + NumberOrString, PasswordData, }, auth::{ClientIp, Headers}, crypto, db::{ - models::{TwoFactor, TwoFactorType}, + models::{EventType, TwoFactor, TwoFactorType}, DbConn, }, }; @@ -85,6 +86,8 @@ async fn activate_authenticator( _generate_recover_code(&mut user, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; + Ok(Json(json!({ "Enabled": true, "Key": key, @@ -167,10 +170,20 @@ pub async fn validate_totp_code( return Ok(()); } else if generated == totp_code && time_step <= i64::from(twofactor.last_used) { warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps); - err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip)); + err!( + format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip), + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ); } } // Else no valide code received, deny access - err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip)); + err!( + format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip), + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ); } diff --git a/src/api/core/two_factor/duo.rs b/src/api/core/two_factor/duo.rs index 42cc709e..06210d23 100644 --- a/src/api/core/two_factor/duo.rs +++ b/src/api/core/two_factor/duo.rs @@ -4,11 +4,14 @@ use rocket::serde::json::Json; use rocket::Route; use crate::{ - api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData}, - auth::Headers, + api::{ + core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, + PasswordData, + }, + auth::{ClientIp, Headers}, crypto, db::{ - models::{TwoFactor, TwoFactorType, User}, + models::{EventType, TwoFactor, TwoFactorType, User}, DbConn, }, error::MapResult, @@ -152,7 +155,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool { } #[post("/two-factor/duo", data = "")] -async fn activate_duo(data: JsonUpcase, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_duo(data: JsonUpcase, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult { let data: EnableDuoData = data.into_inner().data; let mut user = headers.user; @@ -175,6 +178,8 @@ async fn activate_duo(data: JsonUpcase, headers: Headers, mut con _generate_recover_code(&mut user, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; + Ok(Json(json!({ "Enabled": true, "Host": data.host, @@ -185,8 +190,8 @@ async fn activate_duo(data: JsonUpcase, headers: Headers, mut con } #[put("/two-factor/duo", data = "")] -async fn activate_duo_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - activate_duo(data, headers, conn).await +async fn activate_duo_put(data: JsonUpcase, headers: Headers, conn: DbConn, ip: ClientIp) -> JsonResult { + activate_duo(data, headers, conn, ip).await } async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult { @@ -282,7 +287,12 @@ pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) let split: Vec<&str> = response.split(':').collect(); if split.len() != 2 { - err!("Invalid response length"); + err!( + "Invalid response length", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ); } let auth_sig = split[0]; @@ -296,7 +306,12 @@ pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?; if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) { - err!("Error validating duo authentication") + err!( + "Error validating duo authentication", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ) } Ok(()) diff --git a/src/api/core/two_factor/email.rs b/src/api/core/two_factor/email.rs index 90247f53..9a95c465 100644 --- a/src/api/core/two_factor/email.rs +++ b/src/api/core/two_factor/email.rs @@ -3,11 +3,14 @@ use rocket::serde::json::Json; use rocket::Route; use crate::{ - api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData}, - auth::Headers, + api::{ + core::{log_user_event, two_factor::_generate_recover_code}, + EmptyResult, JsonResult, JsonUpcase, PasswordData, + }, + auth::{ClientIp, Headers}, crypto, db::{ - models::{TwoFactor, TwoFactorType}, + models::{EventType, TwoFactor, TwoFactorType}, DbConn, }, error::{Error, MapResult}, @@ -147,7 +150,7 @@ struct EmailData { /// Verify email belongs to user and can be used for 2FA email codes. #[put("/two-factor/email", data = "")] -async fn email(data: JsonUpcase, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn email(data: JsonUpcase, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult { let data: EmailData = data.into_inner().data; let mut user = headers.user; @@ -177,6 +180,8 @@ async fn email(data: JsonUpcase, headers: Headers, mut conn: DbConn) _generate_recover_code(&mut user, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; + Ok(Json(json!({ "Email": email_data.email, "Enabled": "true", @@ -192,7 +197,12 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c .map_res("Two factor not found")?; let issued_token = match &email_data.last_token { Some(t) => t, - _ => err!("No token available"), + _ => err!( + "No token available", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ), }; if !crypto::ct_eq(issued_token, token) { @@ -203,21 +213,32 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c twofactor.data = email_data.to_json(); twofactor.save(conn).await?; - err!("Token is invalid") + err!( + "Token is invalid", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ) } email_data.reset_token(); twofactor.data = email_data.to_json(); twofactor.save(conn).await?; - let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0); + let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid."); let max_time = CONFIG.email_expiration_time() as i64; if date + Duration::seconds(max_time) < Utc::now().naive_utc() { - err!("Token has expired") + err!( + "Token has expired", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ) } Ok(()) } + /// Data stored in the TwoFactor table in the db #[derive(Serialize, Deserialize)] pub struct EmailTokenData { diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs index 3d5eee83..ce3cfb72 100644 --- a/src/api/core/two_factor/mod.rs +++ b/src/api/core/two_factor/mod.rs @@ -5,8 +5,8 @@ use rocket::Route; use serde_json::Value; use crate::{ - api::{JsonResult, JsonUpcase, NumberOrString, PasswordData}, - auth::Headers, + api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData}, + auth::{ClientIp, Headers}, crypto, db::{models::*, DbConn, DbPool}, mail, CONFIG, @@ -73,7 +73,7 @@ struct RecoverTwoFactor { } #[post("/two-factor/recover", data = "")] -async fn recover(data: JsonUpcase, mut conn: DbConn) -> JsonResult { +async fn recover(data: JsonUpcase, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult { let data: RecoverTwoFactor = data.into_inner().data; use crate::db::models::User; @@ -97,6 +97,8 @@ async fn recover(data: JsonUpcase, mut conn: DbConn) -> JsonRe // Remove all twofactors from the user TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?; + log_user_event(EventType::UserRecovered2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; + // Remove the recovery code, not needed without twofactors user.totp_recover = None; user.save(&mut conn).await?; @@ -119,7 +121,12 @@ struct DisableTwoFactorData { } #[post("/two-factor/disable", data = "")] -async fn disable_twofactor(data: JsonUpcase, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn disable_twofactor( + data: JsonUpcase, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, +) -> JsonResult { let data: DisableTwoFactorData = data.into_inner().data; let password_hash = data.MasterPasswordHash; let user = headers.user; @@ -132,6 +139,7 @@ async fn disable_twofactor(data: JsonUpcase, headers: Head if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { twofactor.delete(&mut conn).await?; + log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; } let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty(); @@ -160,8 +168,13 @@ async fn disable_twofactor(data: JsonUpcase, headers: Head } #[put("/two-factor/disable", data = "")] -async fn disable_twofactor_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - disable_twofactor(data, headers, conn).await +async fn disable_twofactor_put( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + ip: ClientIp, +) -> JsonResult { + disable_twofactor(data, headers, conn, ip).await } pub async fn send_incomplete_2fa_notifications(pool: DbPool) { diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs index 0d9e5542..97711c75 100644 --- a/src/api/core/two_factor/webauthn.rs +++ b/src/api/core/two_factor/webauthn.rs @@ -6,11 +6,12 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, use crate::{ api::{ - core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData, + core::{log_user_event, two_factor::_generate_recover_code}, + EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData, }, - auth::Headers, + auth::{ClientIp, Headers}, db::{ - models::{TwoFactor, TwoFactorType}, + models::{EventType, TwoFactor, TwoFactorType}, DbConn, }, error::Error, @@ -241,7 +242,12 @@ impl From for PublicKeyCredential { } #[post("/two-factor/webauthn", data = "")] -async fn activate_webauthn(data: JsonUpcase, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_webauthn( + data: JsonUpcase, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, +) -> JsonResult { let data: EnableWebauthnData = data.into_inner().data; let mut user = headers.user; @@ -280,6 +286,8 @@ async fn activate_webauthn(data: JsonUpcase, headers: Header .await?; _generate_recover_code(&mut user, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; + let keys_json: Vec = registrations.iter().map(WebauthnRegistration::to_json).collect(); Ok(Json(json!({ "Enabled": true, @@ -289,8 +297,13 @@ async fn activate_webauthn(data: JsonUpcase, headers: Header } #[put("/two-factor/webauthn", data = "")] -async fn activate_webauthn_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - activate_webauthn(data, headers, conn).await +async fn activate_webauthn_put( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + ip: ClientIp, +) -> JsonResult { + activate_webauthn(data, headers, conn, ip).await } #[derive(Deserialize, Debug)] @@ -391,7 +404,12 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut tf.delete(conn).await?; state } - None => err!("Can't recover login challenge"), + None => err!( + "Can't recover login challenge", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ), }; let rsp: crate::util::UpCase = serde_json::from_str(response)?; @@ -414,5 +432,10 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut } } - err!("Credential not present") + err!( + "Credential not present", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ) } diff --git a/src/api/core/two_factor/yubikey.rs b/src/api/core/two_factor/yubikey.rs index 7994bea0..00ef7df2 100644 --- a/src/api/core/two_factor/yubikey.rs +++ b/src/api/core/two_factor/yubikey.rs @@ -4,10 +4,13 @@ use serde_json::Value; use yubico::{config::Config, verify}; use crate::{ - api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData}, - auth::Headers, + api::{ + core::{log_user_event, two_factor::_generate_recover_code}, + EmptyResult, JsonResult, JsonUpcase, PasswordData, + }, + auth::{ClientIp, Headers}, db::{ - models::{TwoFactor, TwoFactorType}, + models::{EventType, TwoFactor, TwoFactorType}, DbConn, }, error::{Error, MapResult}, @@ -113,7 +116,12 @@ async fn generate_yubikey(data: JsonUpcase, headers: Headers, mut } #[post("/two-factor/yubikey", data = "")] -async fn activate_yubikey(data: JsonUpcase, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_yubikey( + data: JsonUpcase, + headers: Headers, + mut conn: DbConn, + ip: ClientIp, +) -> JsonResult { let data: EnableYubikeyData = data.into_inner().data; let mut user = headers.user; @@ -159,6 +167,8 @@ async fn activate_yubikey(data: JsonUpcase, headers: Headers, _generate_recover_code(&mut user, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await; + let mut result = jsonify_yubikeys(yubikey_metadata.Keys); result["Enabled"] = Value::Bool(true); @@ -169,8 +179,13 @@ async fn activate_yubikey(data: JsonUpcase, headers: Headers, } #[put("/two-factor/yubikey", data = "")] -async fn activate_yubikey_put(data: JsonUpcase, headers: Headers, conn: DbConn) -> JsonResult { - activate_yubikey(data, headers, conn).await +async fn activate_yubikey_put( + data: JsonUpcase, + headers: Headers, + conn: DbConn, + ip: ClientIp, +) -> JsonResult { + activate_yubikey(data, headers, conn, ip).await } pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult { diff --git a/src/api/identity.rs b/src/api/identity.rs index 9e747c7d..6499ee38 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -10,6 +10,7 @@ use serde_json::Value; use crate::{ api::{ core::accounts::{PreloginData, RegisterData, _prelogin, _register}, + core::log_user_event, core::two_factor::{duo, email, email::EmailTokenData, yubikey}, ApiResult, EmptyResult, JsonResult, JsonUpcase, }, @@ -24,13 +25,16 @@ pub fn routes() -> Vec { } #[post("/connect/token", data = "")] -async fn login(data: Form, conn: DbConn, ip: ClientIp) -> JsonResult { +async fn login(data: Form, mut conn: DbConn, ip: ClientIp) -> JsonResult { let data: ConnectData = data.into_inner(); - match data.grant_type.as_ref() { + let mut user_uuid: Option = None; + let device_type = data.device_type.clone(); + + let login_result = match data.grant_type.as_ref() { "refresh_token" => { _check_is_some(&data.refresh_token, "refresh_token cannot be blank")?; - _refresh_login(data, conn).await + _refresh_login(data, &mut conn).await } "password" => { _check_is_some(&data.client_id, "client_id cannot be blank")?; @@ -42,34 +46,51 @@ async fn login(data: Form, conn: DbConn, ip: ClientIp) -> JsonResul _check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?; - _password_login(data, conn, &ip).await + _password_login(data, &mut user_uuid, &mut conn, &ip).await } "client_credentials" => { _check_is_some(&data.client_id, "client_id cannot be blank")?; _check_is_some(&data.client_secret, "client_secret cannot be blank")?; _check_is_some(&data.scope, "scope cannot be blank")?; - _api_key_login(data, conn, &ip).await + _api_key_login(data, &mut user_uuid, &mut conn, &ip).await } t => err!("Invalid type", t), + }; + + if let Some(user_uuid) = user_uuid { + // When unknown or unable to parse, return 14, which is 'Unknown Browser' + let device_type = util::try_parse_string(device_type).unwrap_or(14); + match &login_result { + Ok(_) => { + log_user_event(EventType::UserLoggedIn as i32, &user_uuid, device_type, &ip.ip, &mut conn).await; + } + Err(e) => { + if let Some(ev) = e.get_event() { + log_user_event(ev.event as i32, &user_uuid, device_type, &ip.ip, &mut conn).await + } + } + } } + + login_result } -async fn _refresh_login(data: ConnectData, mut conn: DbConn) -> JsonResult { +async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult { // Extract token let token = data.refresh_token.unwrap(); // Get device by refresh token - let mut device = Device::find_by_refresh_token(&token, &mut conn).await.map_res("Invalid refresh token")?; + let mut device = Device::find_by_refresh_token(&token, conn).await.map_res("Invalid refresh token")?; let scope = "api offline_access"; let scope_vec = vec!["api".into(), "offline_access".into()]; // Common - let user = User::find_by_uuid(&device.user_uuid, &mut conn).await.unwrap(); - let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &mut conn).await; + let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap(); + let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); - device.save(&mut conn).await?; + device.save(conn).await?; Ok(Json(json!({ "access_token": access_token, @@ -87,7 +108,12 @@ async fn _refresh_login(data: ConnectData, mut conn: DbConn) -> JsonResult { }))) } -async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> JsonResult { +async fn _password_login( + data: ConnectData, + user_uuid: &mut Option, + conn: &mut DbConn, + ip: &ClientIp, +) -> JsonResult { // Validate scope let scope = data.scope.as_ref().unwrap(); if scope != "api offline_access" { @@ -100,20 +126,35 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> // Get the user let username = data.username.as_ref().unwrap().trim(); - let user = match User::find_by_mail(username, &mut conn).await { + let user = match User::find_by_mail(username, conn).await { Some(user) => user, None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)), }; + // Set the user_uuid here to be passed back used for event logging. + *user_uuid = Some(user.uuid.clone()); + // Check password let password = data.password.as_ref().unwrap(); if !user.check_valid_password(password) { - err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)) + err!( + "Username or password is incorrect. Try again", + format!("IP: {}. Username: {}.", ip.ip, username), + ErrorEvent { + event: EventType::UserFailedLogIn, + } + ) } // Check if the user is disabled if !user.enabled { - err!("This user has been disabled", format!("IP: {}. Username: {}.", ip.ip, username)) + err!( + "This user has been disabled", + format!("IP: {}. Username: {}.", ip.ip, username), + ErrorEvent { + event: EventType::UserFailedLogIn + } + ) } let now = Utc::now().naive_utc(); @@ -131,7 +172,7 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> user.last_verifying_at = Some(now); user.login_verify_count += 1; - if let Err(e) = user.save(&mut conn).await { + if let Err(e) = user.save(conn).await { error!("Error updating user: {:#?}", e); } @@ -142,27 +183,38 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> } // We still want the login to fail until they actually verified the email address - err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username)) + err!( + "Please verify your email before trying again.", + format!("IP: {}. Username: {}.", ip.ip, username), + ErrorEvent { + event: EventType::UserFailedLogIn + } + ) } - let (mut device, new_device) = get_device(&data, &mut conn, &user).await; + let (mut device, new_device) = get_device(&data, conn, &user).await; - let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &mut conn).await?; + let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?; if CONFIG.mail_enabled() && new_device { if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await { error!("Error sending new device email: {:#?}", e); if CONFIG.require_device_email() { - err!("Could not send login notification email. Please contact your administrator.") + err!( + "Could not send login notification email. Please contact your administrator.", + ErrorEvent { + event: EventType::UserFailedLogIn + } + ) } } } // Common - let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &mut conn).await; + let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); - device.save(&mut conn).await?; + device.save(conn).await?; let mut result = json!({ "access_token": access_token, @@ -188,7 +240,12 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> Ok(Json(result)) } -async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> JsonResult { +async fn _api_key_login( + data: ConnectData, + user_uuid: &mut Option, + conn: &mut DbConn, + ip: &ClientIp, +) -> JsonResult { // Validate scope let scope = data.scope.as_ref().unwrap(); if scope != "api" { @@ -201,27 +258,42 @@ async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> J // Get the user via the client_id let client_id = data.client_id.as_ref().unwrap(); - let user_uuid = match client_id.strip_prefix("user.") { + let client_user_uuid = match client_id.strip_prefix("user.") { Some(uuid) => uuid, None => err!("Malformed client_id", format!("IP: {}.", ip.ip)), }; - let user = match User::find_by_uuid(user_uuid, &mut conn).await { + let user = match User::find_by_uuid(client_user_uuid, conn).await { Some(user) => user, None => err!("Invalid client_id", format!("IP: {}.", ip.ip)), }; + // Set the user_uuid here to be passed back used for event logging. + *user_uuid = Some(user.uuid.clone()); + // Check if the user is disabled if !user.enabled { - err!("This user has been disabled (API key login)", format!("IP: {}. Username: {}.", ip.ip, user.email)) + err!( + "This user has been disabled (API key login)", + format!("IP: {}. Username: {}.", ip.ip, user.email), + ErrorEvent { + event: EventType::UserFailedLogIn + } + ) } // Check API key. Note that API key logins bypass 2FA. let client_secret = data.client_secret.as_ref().unwrap(); if !user.check_valid_api_key(client_secret) { - err!("Incorrect client_secret", format!("IP: {}. Username: {}.", ip.ip, user.email)) + err!( + "Incorrect client_secret", + format!("IP: {}. Username: {}.", ip.ip, user.email), + ErrorEvent { + event: EventType::UserFailedLogIn + } + ) } - let (mut device, new_device) = get_device(&data, &mut conn, &user).await; + let (mut device, new_device) = get_device(&data, conn, &user).await; if CONFIG.mail_enabled() && new_device { let now = Utc::now().naive_utc(); @@ -229,15 +301,20 @@ async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> J error!("Error sending new device email: {:#?}", e); if CONFIG.require_device_email() { - err!("Could not send login notification email. Please contact your administrator.") + err!( + "Could not send login notification email. Please contact your administrator.", + ErrorEvent { + event: EventType::UserFailedLogIn + } + ) } } } // Common - let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &mut conn).await; + let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); - device.save(&mut conn).await?; + device.save(conn).await?; info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip); @@ -261,7 +338,8 @@ async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> J /// Retrieves an existing device or creates a new device from ConnectData and the User async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Device, bool) { // On iOS, device_type sends "iOS", on others it sends a number - let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0); + // When unknown or unable to parse, return 14, which is 'Unknown Browser' + let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(14); let device_id = data.device_identifier.clone().expect("No device id provided"); let device_name = data.device_name.clone().expect("No device name provided"); @@ -338,7 +416,12 @@ async fn twofactor_auth( } } } - _ => err!("Invalid two factor provider"), + _ => err!( + "Invalid two factor provider", + ErrorEvent { + event: EventType::UserFailedLogIn2fa + } + ), } TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?; diff --git a/src/api/mod.rs b/src/api/mod.rs index 49283dd2..0861ea2d 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -16,6 +16,7 @@ pub use crate::api::{ core::routes as core_routes, core::two_factor::send_incomplete_2fa_notifications, core::{emergency_notification_reminder_job, emergency_request_timeout_job}, + core::{event_cleanup_job, events_routes as core_events_routes}, icons::routes as icons_routes, identity::routes as identity_routes, notifications::routes as notifications_routes, diff --git a/src/config.rs b/src/config.rs index 4aa8d649..fe98d2df 100644 --- a/src/config.rs +++ b/src/config.rs @@ -371,6 +371,9 @@ make_config! { /// Emergency request timeout schedule |> Cron schedule of the job that grants emergency access requests that have met the required wait time. /// Defaults to hourly. Set blank to disable this job. emergency_request_timeout_schedule: String, false, def, "0 5 * * * *".to_string(); + /// Event cleanup schedule |> Cron schedule of the job that cleans old events from the event table. + /// Defaults to daily. Set blank to disable this job. + event_cleanup_schedule: String, false, def, "0 10 0 * * *".to_string(); }, /// General settings @@ -426,6 +429,8 @@ make_config! { signups_verify_resend_limit: u32, true, def, 6; /// Email domain whitelist |> Allow signups only from this list of comma-separated domains, even when signups are otherwise disabled signups_domains_whitelist: String, true, def, String::new(); + /// Enable event logging |> Enables event logging for organizations. + org_events_enabled: bool, false, def, false; /// Org creation users |> Allow org creation only by this list of comma-separated user emails. /// Blank or 'all' means all users can create orgs; 'none' means no users can create orgs. org_creation_users: String, true, def, String::new(); @@ -451,6 +456,9 @@ make_config! { /// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization invitation_org_name: String, true, def, "Vaultwarden".to_string(); + + /// Events days retain |> Number of days to retain events stored in the database. If unset, events are kept indefently. + events_days_retain: i64, false, option; }, /// Advanced settings @@ -746,26 +754,35 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { err!("`INVITATION_EXPIRATION_HOURS` has a minimum duration of 1 hour") } + // Validate schedule crontab format if !cfg.send_purge_schedule.is_empty() && cfg.send_purge_schedule.parse::().is_err() { err!("`SEND_PURGE_SCHEDULE` is not a valid cron expression") } + if !cfg.trash_purge_schedule.is_empty() && cfg.trash_purge_schedule.parse::().is_err() { err!("`TRASH_PURGE_SCHEDULE` is not a valid cron expression") } + if !cfg.incomplete_2fa_schedule.is_empty() && cfg.incomplete_2fa_schedule.parse::().is_err() { err!("`INCOMPLETE_2FA_SCHEDULE` is not a valid cron expression") } + if !cfg.emergency_notification_reminder_schedule.is_empty() && cfg.emergency_notification_reminder_schedule.parse::().is_err() { err!("`EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE` is not a valid cron expression") } + if !cfg.emergency_request_timeout_schedule.is_empty() && cfg.emergency_request_timeout_schedule.parse::().is_err() { err!("`EMERGENCY_REQUEST_TIMEOUT_SCHEDULE` is not a valid cron expression") } + if !cfg.event_cleanup_schedule.is_empty() && cfg.event_cleanup_schedule.parse::().is_err() { + err!("`EVENT_CLEANUP_SCHEDULE` is not a valid cron expression") + } + Ok(()) } @@ -1125,7 +1142,7 @@ fn case_helper<'reg, 'rc>( let value = param.value().clone(); if h.params().iter().skip(1).any(|x| x.value() == &value) { - h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or(Ok(())) + h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or_else(|| Ok(())) } else { Ok(()) } diff --git a/src/db/models/event.rs b/src/db/models/event.rs new file mode 100644 index 00000000..9196b8a8 --- /dev/null +++ b/src/db/models/event.rs @@ -0,0 +1,318 @@ +use crate::db::DbConn; +use serde_json::Value; + +use crate::{api::EmptyResult, error::MapResult, CONFIG}; + +use chrono::{Duration, NaiveDateTime, Utc}; + +// https://bitwarden.com/help/event-logs/ + +db_object! { + // Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs + // Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Api/Models/Public/Response/EventResponseModel.cs + // Upstream SQL: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Sql/dbo/Tables/Event.sql + #[derive(Identifiable, Queryable, Insertable, AsChangeset)] + #[diesel(table_name = event)] + #[diesel(primary_key(uuid))] + pub struct Event { + pub uuid: String, + pub event_type: i32, // EventType + pub user_uuid: Option, + pub org_uuid: Option, + pub cipher_uuid: Option, + pub collection_uuid: Option, + pub group_uuid: Option, + pub org_user_uuid: Option, + pub act_user_uuid: Option, + // Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs + pub device_type: Option, + pub ip_address: Option, + pub event_date: NaiveDateTime, + pub policy_uuid: Option, + pub provider_uuid: Option, + pub provider_user_uuid: Option, + pub provider_org_uuid: Option, + } +} + +// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/EventType.cs +#[derive(Debug, Copy, Clone)] +pub enum EventType { + // User + UserLoggedIn = 1000, + UserChangedPassword = 1001, + UserUpdated2fa = 1002, + UserDisabled2fa = 1003, + UserRecovered2fa = 1004, + UserFailedLogIn = 1005, + UserFailedLogIn2fa = 1006, + UserClientExportedVault = 1007, + // UserUpdatedTempPassword = 1008, // Not supported + // UserMigratedKeyToKeyConnector = 1009, // Not supported + + // Cipher + CipherCreated = 1100, + CipherUpdated = 1101, + CipherDeleted = 1102, + CipherAttachmentCreated = 1103, + CipherAttachmentDeleted = 1104, + CipherShared = 1105, + CipherUpdatedCollections = 1106, + CipherClientViewed = 1107, + CipherClientToggledPasswordVisible = 1108, + CipherClientToggledHiddenFieldVisible = 1109, + CipherClientToggledCardCodeVisible = 1110, + CipherClientCopiedPassword = 1111, + CipherClientCopiedHiddenField = 1112, + CipherClientCopiedCardCode = 1113, + CipherClientAutofilled = 1114, + CipherSoftDeleted = 1115, + CipherRestored = 1116, + CipherClientToggledCardNumberVisible = 1117, + + // Collection + CollectionCreated = 1300, + CollectionUpdated = 1301, + CollectionDeleted = 1302, + + // Group + GroupCreated = 1400, + GroupUpdated = 1401, + GroupDeleted = 1402, + + // OrganizationUser + OrganizationUserInvited = 1500, + OrganizationUserConfirmed = 1501, + OrganizationUserUpdated = 1502, + OrganizationUserRemoved = 1503, + OrganizationUserUpdatedGroups = 1504, + // OrganizationUserUnlinkedSso = 1505, // Not supported + // OrganizationUserResetPasswordEnroll = 1506, // Not supported + // OrganizationUserResetPasswordWithdraw = 1507, // Not supported + // OrganizationUserAdminResetPassword = 1508, // Not supported + // OrganizationUserResetSsoLink = 1509, // Not supported + // OrganizationUserFirstSsoLogin = 1510, // Not supported + OrganizationUserRevoked = 1511, + OrganizationUserRestored = 1512, + + // Organization + OrganizationUpdated = 1600, + OrganizationPurgedVault = 1601, + OrganizationClientExportedVault = 1602, + // OrganizationVaultAccessed = 1603, + // OrganizationEnabledSso = 1604, // Not supported + // OrganizationDisabledSso = 1605, // Not supported + // OrganizationEnabledKeyConnector = 1606, // Not supported + // OrganizationDisabledKeyConnector = 1607, // Not supported + // OrganizationSponsorshipsSynced = 1608, // Not supported + + // Policy + PolicyUpdated = 1700, + // Provider (Not yet supported) + // ProviderUserInvited = 1800, // Not supported + // ProviderUserConfirmed = 1801, // Not supported + // ProviderUserUpdated = 1802, // Not supported + // ProviderUserRemoved = 1803, // Not supported + // ProviderOrganizationCreated = 1900, // Not supported + // ProviderOrganizationAdded = 1901, // Not supported + // ProviderOrganizationRemoved = 1902, // Not supported + // ProviderOrganizationVaultAccessed = 1903, // Not supported +} + +/// Local methods +impl Event { + pub fn new(event_type: i32, event_date: Option) -> Self { + let event_date = match event_date { + Some(d) => d, + None => Utc::now().naive_utc(), + }; + + Self { + uuid: crate::util::get_uuid(), + event_type, + user_uuid: None, + org_uuid: None, + cipher_uuid: None, + collection_uuid: None, + group_uuid: None, + org_user_uuid: None, + act_user_uuid: None, + device_type: None, + ip_address: None, + event_date, + policy_uuid: None, + provider_uuid: None, + provider_user_uuid: None, + provider_org_uuid: None, + } + } + + pub fn to_json(&self) -> Value { + use crate::util::format_date; + + json!({ + "type": self.event_type, + "userId": self.user_uuid, + "organizationId": self.org_uuid, + "cipherId": self.cipher_uuid, + "collectionId": self.collection_uuid, + "groupId": self.group_uuid, + "organizationUserId": self.org_user_uuid, + "actingUserId": self.act_user_uuid, + "date": format_date(&self.event_date), + "deviceType": self.device_type, + "ipAddress": self.ip_address, + "policyId": self.policy_uuid, + "providerId": self.provider_uuid, + "providerUserId": self.provider_user_uuid, + "providerOrganizationId": self.provider_org_uuid, + // "installationId": null, // Not supported + }) + } +} + +/// Database methods +/// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs +impl Event { + pub const PAGE_SIZE: i64 = 30; + + /// ############# + /// Basic Queries + pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + db_run! { conn: + sqlite, mysql { + diesel::replace_into(event::table) + .values(EventDb::to_db(self)) + .execute(conn) + .map_res("Error saving event") + } + postgresql { + diesel::insert_into(event::table) + .values(EventDb::to_db(self)) + .on_conflict(event::uuid) + .do_update() + .set(EventDb::to_db(self)) + .execute(conn) + .map_res("Error saving event") + } + } + } + + pub async fn save_user_event(events: Vec, conn: &mut DbConn) -> EmptyResult { + // Special save function which is able to handle multiple events. + // SQLite doesn't support the DEFAULT argument, and does not support inserting multiple values at the same time. + // MySQL and PostgreSQL do. + // We also ignore duplicate if they ever will exists, else it could break the whole flow. + db_run! { conn: + // Unfortunately SQLite does not support inserting multiple records at the same time + // We loop through the events here and insert them one at a time. + sqlite { + for event in events { + diesel::insert_or_ignore_into(event::table) + .values(EventDb::to_db(&event)) + .execute(conn) + .unwrap_or_default(); + } + Ok(()) + } + mysql { + let events: Vec = events.iter().map(EventDb::to_db).collect(); + diesel::insert_or_ignore_into(event::table) + .values(&events) + .execute(conn) + .unwrap_or_default(); + Ok(()) + } + postgresql { + let events: Vec = events.iter().map(EventDb::to_db).collect(); + diesel::insert_into(event::table) + .values(&events) + .on_conflict_do_nothing() + .execute(conn) + .unwrap_or_default(); + Ok(()) + } + } + } + + pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + db_run! { conn: { + diesel::delete(event::table.filter(event::uuid.eq(self.uuid))) + .execute(conn) + .map_res("Error deleting event") + }} + } + + /// ############## + /// Custom Queries + pub async fn find_by_organization_uuid( + org_uuid: &str, + start: &NaiveDateTime, + end: &NaiveDateTime, + conn: &mut DbConn, + ) -> Vec { + db_run! { conn: { + event::table + .filter(event::org_uuid.eq(org_uuid)) + .filter(event::event_date.between(start, end)) + .order_by(event::event_date.desc()) + .limit(Self::PAGE_SIZE) + .load::(conn) + .expect("Error filtering events") + .from_db() + }} + } + + pub async fn find_by_org_and_user_org( + org_uuid: &str, + user_org_uuid: &str, + start: &NaiveDateTime, + end: &NaiveDateTime, + conn: &mut DbConn, + ) -> Vec { + db_run! { conn: { + event::table + .inner_join(users_organizations::table.on(users_organizations::uuid.eq(user_org_uuid))) + .filter(event::org_uuid.eq(org_uuid)) + .filter(event::event_date.between(start, end)) + .filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable()))) + .select(event::all_columns) + .order_by(event::event_date.desc()) + .limit(Self::PAGE_SIZE) + .load::(conn) + .expect("Error filtering events") + .from_db() + }} + } + + pub async fn find_by_cipher_uuid( + cipher_uuid: &str, + start: &NaiveDateTime, + end: &NaiveDateTime, + conn: &mut DbConn, + ) -> Vec { + db_run! { conn: { + event::table + .filter(event::cipher_uuid.eq(cipher_uuid)) + .filter(event::event_date.between(start, end)) + .order_by(event::event_date.desc()) + .limit(Self::PAGE_SIZE) + .load::(conn) + .expect("Error filtering events") + .from_db() + }} + } + + pub async fn clean_events(conn: &mut DbConn) -> EmptyResult { + if let Some(days_to_retain) = CONFIG.events_days_retain() { + let dt = Utc::now().naive_utc() - Duration::days(days_to_retain); + db_run! { conn: { + diesel::delete(event::table.filter(event::event_date.lt(dt))) + .execute(conn) + .map_res("Error cleaning old events") + }} + } else { + Ok(()) + } + } +} diff --git a/src/db/models/mod.rs b/src/db/models/mod.rs index 20e659c6..274d48e8 100644 --- a/src/db/models/mod.rs +++ b/src/db/models/mod.rs @@ -3,6 +3,7 @@ mod cipher; mod collection; mod device; mod emergency_access; +mod event; mod favorite; mod folder; mod group; @@ -18,6 +19,7 @@ pub use self::cipher::Cipher; pub use self::collection::{Collection, CollectionCipher, CollectionUser}; pub use self::device::Device; pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType}; +pub use self::event::{Event, EventType}; pub use self::favorite::Favorite; pub use self::folder::{Folder, FolderCipher}; pub use self::group::{CollectionGroup, Group, GroupUser}; diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs index 0c4cadc4..3bc2ddad 100644 --- a/src/db/models/organization.rs +++ b/src/db/models/organization.rs @@ -3,6 +3,7 @@ use serde_json::Value; use std::cmp::Ordering; use super::{CollectionUser, GroupUser, OrgPolicy, OrgPolicyType, User}; +use crate::CONFIG; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] @@ -147,7 +148,7 @@ impl Organization { "MaxStorageGb": 10, // The value doesn't matter, we don't check server-side "Use2fa": true, "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet) - "UseEvents": false, // Not supported + "UseEvents": CONFIG.org_events_enabled(), "UseGroups": true, "UseTotp": true, "UsePolicies": true, @@ -300,10 +301,9 @@ impl UserOrganization { "Seats": 10, // The value doesn't matter, we don't check server-side "MaxCollections": 10, // The value doesn't matter, we don't check server-side "UsersGetPremium": true, - "Use2fa": true, "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet) - "UseEvents": false, // Not supported + "UseEvents": CONFIG.org_events_enabled(), "UseGroups": true, "UseTotp": true, // "UseScim": false, // Not supported (Not AGPLv3 Licensed) @@ -629,6 +629,16 @@ impl UserOrganization { }} } + pub async fn get_org_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + db_run! { conn: { + users_organizations::table + .filter(users_organizations::user_uuid.eq(user_uuid)) + .select(users_organizations::org_uuid) + .load::(conn) + .unwrap_or_default() + }} + } + pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table @@ -670,6 +680,18 @@ impl UserOrganization { }} } + pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool { + db_run! { conn: { + users_organizations::table + .inner_join(ciphers::table.on(ciphers::uuid.eq(cipher_uuid).and(ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())))) + .filter(users_organizations::user_uuid.eq(user_uuid)) + .filter(users_organizations::atype.eq_any(vec![UserOrgType::Owner as i32, UserOrgType::Admin as i32])) + .count() + .first::(conn) + .ok().unwrap_or(0) != 0 + }} + } + pub async fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table diff --git a/src/db/schemas/mysql/schema.rs b/src/db/schemas/mysql/schema.rs index 514bc67a..0073a9d5 100644 --- a/src/db/schemas/mysql/schema.rs +++ b/src/db/schemas/mysql/schema.rs @@ -55,6 +55,27 @@ table! { } } +table! { + event (uuid) { + uuid -> Varchar, + event_type -> Integer, + user_uuid -> Nullable, + org_uuid -> Nullable, + cipher_uuid -> Nullable, + collection_uuid -> Nullable, + group_uuid -> Nullable, + org_user_uuid -> Nullable, + act_user_uuid -> Nullable, + device_type -> Nullable, + ip_address -> Nullable, + event_date -> Timestamp, + policy_uuid -> Nullable, + provider_uuid -> Nullable, + provider_user_uuid -> Nullable, + provider_org_uuid -> Nullable, + } +} + table! { favorites (user_uuid, cipher_uuid) { user_uuid -> Text, @@ -272,6 +293,7 @@ joinable!(groups_users -> users_organizations (users_organizations_uuid)); joinable!(groups_users -> groups (groups_uuid)); joinable!(collections_groups -> collections (collections_uuid)); joinable!(collections_groups -> groups (groups_uuid)); +joinable!(event -> users_organizations (uuid)); allow_tables_to_appear_in_same_query!( attachments, @@ -293,4 +315,5 @@ allow_tables_to_appear_in_same_query!( groups, groups_users, collections_groups, + event, ); diff --git a/src/db/schemas/postgresql/schema.rs b/src/db/schemas/postgresql/schema.rs index 23f9af7e..1421513c 100644 --- a/src/db/schemas/postgresql/schema.rs +++ b/src/db/schemas/postgresql/schema.rs @@ -55,6 +55,27 @@ table! { } } +table! { + event (uuid) { + uuid -> Text, + event_type -> Integer, + user_uuid -> Nullable, + org_uuid -> Nullable, + cipher_uuid -> Nullable, + collection_uuid -> Nullable, + group_uuid -> Nullable, + org_user_uuid -> Nullable, + act_user_uuid -> Nullable, + device_type -> Nullable, + ip_address -> Nullable, + event_date -> Timestamp, + policy_uuid -> Nullable, + provider_uuid -> Nullable, + provider_user_uuid -> Nullable, + provider_org_uuid -> Nullable, + } +} + table! { favorites (user_uuid, cipher_uuid) { user_uuid -> Text, @@ -272,6 +293,7 @@ joinable!(groups_users -> users_organizations (users_organizations_uuid)); joinable!(groups_users -> groups (groups_uuid)); joinable!(collections_groups -> collections (collections_uuid)); joinable!(collections_groups -> groups (groups_uuid)); +joinable!(event -> users_organizations (uuid)); allow_tables_to_appear_in_same_query!( attachments, @@ -293,4 +315,5 @@ allow_tables_to_appear_in_same_query!( groups, groups_users, collections_groups, + event, ); diff --git a/src/db/schemas/sqlite/schema.rs b/src/db/schemas/sqlite/schema.rs index 23f9af7e..0fedcf1d 100644 --- a/src/db/schemas/sqlite/schema.rs +++ b/src/db/schemas/sqlite/schema.rs @@ -55,6 +55,27 @@ table! { } } +table! { + event (uuid) { + uuid -> Text, + event_type -> Integer, + user_uuid -> Nullable, + org_uuid -> Nullable, + cipher_uuid -> Nullable, + collection_uuid -> Nullable, + group_uuid -> Nullable, + org_user_uuid -> Nullable, + act_user_uuid -> Nullable, + device_type -> Nullable, + ip_address -> Nullable, + event_date -> Timestamp, + policy_uuid -> Nullable, + provider_uuid -> Nullable, + provider_user_uuid -> Nullable, + provider_org_uuid -> Nullable, + } +} + table! { favorites (user_uuid, cipher_uuid) { user_uuid -> Text, @@ -266,12 +287,14 @@ joinable!(users_collections -> collections (collection_uuid)); joinable!(users_collections -> users (user_uuid)); joinable!(users_organizations -> organizations (org_uuid)); joinable!(users_organizations -> users (user_uuid)); +joinable!(users_organizations -> ciphers (org_uuid)); joinable!(emergency_access -> users (grantor_uuid)); joinable!(groups -> organizations (organizations_uuid)); joinable!(groups_users -> users_organizations (users_organizations_uuid)); joinable!(groups_users -> groups (groups_uuid)); joinable!(collections_groups -> collections (collections_uuid)); joinable!(collections_groups -> groups (groups_uuid)); +joinable!(event -> users_organizations (uuid)); allow_tables_to_appear_in_same_query!( attachments, @@ -293,4 +316,5 @@ allow_tables_to_appear_in_same_query!( groups, groups_users, collections_groups, + event, ); diff --git a/src/error.rs b/src/error.rs index d42ecd20..decae01e 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,6 +1,7 @@ // // Error generator macro // +use crate::db::models::EventType; use std::error::Error as StdError; macro_rules! make_error { @@ -8,14 +9,17 @@ macro_rules! make_error { const BAD_REQUEST: u16 = 400; pub enum ErrorKind { $($name( $ty )),+ } - pub struct Error { message: String, error: ErrorKind, error_code: u16 } + + #[derive(Debug)] + pub struct ErrorEvent { pub event: EventType } + pub struct Error { message: String, error: ErrorKind, error_code: u16, event: Option } $(impl From<$ty> for Error { fn from(err: $ty) -> Self { Error::from((stringify!($name), err)) } })+ $(impl> From<(S, $ty)> for Error { fn from(val: (S, $ty)) -> Self { - Error { message: val.0.into(), error: ErrorKind::$name(val.1), error_code: BAD_REQUEST } + Error { message: val.0.into(), error: ErrorKind::$name(val.1), error_code: BAD_REQUEST, event: None } } })+ impl StdError for Error { @@ -130,6 +134,16 @@ impl Error { self.error_code = code; self } + + #[must_use] + pub fn with_event(mut self, event: ErrorEvent) -> Self { + self.event = Some(event); + self + } + + pub fn get_event(&self) -> &Option { + &self.event + } } pub trait MapResult { @@ -216,12 +230,21 @@ macro_rules! err { error!("{}", $msg); return Err($crate::error::Error::new($msg, $msg)); }}; + ($msg:expr, ErrorEvent $err_event:tt) => {{ + error!("{}", $msg); + return Err($crate::error::Error::new($msg, $msg).with_event($crate::error::ErrorEvent $err_event)); + }}; ($usr_msg:expr, $log_value:expr) => {{ error!("{}. {}", $usr_msg, $log_value); return Err($crate::error::Error::new($usr_msg, $log_value)); }}; + ($usr_msg:expr, $log_value:expr, ErrorEvent $err_event:tt) => {{ + error!("{}. {}", $usr_msg, $log_value); + return Err($crate::error::Error::new($usr_msg, $log_value).with_event($crate::error::ErrorEvent $err_event)); + }}; } +#[macro_export] macro_rules! err_silent { ($msg:expr) => {{ return Err($crate::error::Error::new($msg, $msg)); @@ -233,11 +256,11 @@ macro_rules! err_silent { #[macro_export] macro_rules! err_code { - ($msg:expr, $err_code: expr) => {{ + ($msg:expr, $err_code:expr) => {{ error!("{}", $msg); return Err($crate::error::Error::new($msg, $msg).with_code($err_code)); }}; - ($usr_msg:expr, $log_value:expr, $err_code: expr) => {{ + ($usr_msg:expr, $log_value:expr, $err_code:expr) => {{ error!("{}. {}", $usr_msg, $log_value); return Err($crate::error::Error::new($usr_msg, $log_value).with_code($err_code)); }}; @@ -260,6 +283,9 @@ macro_rules! err_json { ($expr:expr, $log_value:expr) => {{ return Err(($log_value, $expr).into()); }}; + ($expr:expr, $log_value:expr, $err_event:expr, ErrorEvent) => {{ + return Err(($log_value, $expr).into().with_event($err_event)); + }}; } #[macro_export] diff --git a/src/main.rs b/src/main.rs index 83b3b64d..70cd5d9f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -430,6 +430,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> .mount([basepath, "/"].concat(), api::web_routes()) .mount([basepath, "/api"].concat(), api::core_routes()) .mount([basepath, "/admin"].concat(), api::admin_routes()) + .mount([basepath, "/events"].concat(), api::core_events_routes()) .mount([basepath, "/identity"].concat(), api::identity_routes()) .mount([basepath, "/icons"].concat(), api::icons_routes()) .mount([basepath, "/notifications"].concat(), api::notifications_routes()) @@ -511,6 +512,16 @@ async fn schedule_jobs(pool: db::DbPool) { })); } + // Cleanup the event table of records x days old. + if CONFIG.org_events_enabled() + && !CONFIG.event_cleanup_schedule().is_empty() + && CONFIG.events_days_retain().is_some() + { + sched.add(Job::new(CONFIG.event_cleanup_schedule().parse().unwrap(), || { + runtime.spawn(api::event_cleanup_job(pool.clone())); + })); + } + // Periodically check for jobs to run. We probably won't need any // jobs that run more often than once a minute, so a default poll // interval of 30 seconds should be sufficient. Users who want to diff --git a/src/util.rs b/src/util.rs index 41de7304..c3dde2bb 100644 --- a/src/util.rs +++ b/src/util.rs @@ -456,10 +456,13 @@ pub fn get_env_bool(key: &str) -> Option { use chrono::{DateTime, Local, NaiveDateTime, TimeZone}; +// Format used by Bitwarden API +const DATETIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.6fZ"; + /// Formats a UTC-offset `NaiveDateTime` in the format used by Bitwarden API /// responses with "date" fields (`CreationDate`, `RevisionDate`, etc.). pub fn format_date(dt: &NaiveDateTime) -> String { - dt.format("%Y-%m-%dT%H:%M:%S%.6fZ").to_string() + dt.format(DATETIME_FORMAT).to_string() } /// Formats a `DateTime` using the specified format string. @@ -500,6 +503,10 @@ pub fn format_datetime_http(dt: &DateTime) -> String { expiry_time.to_rfc2822().replace("+0000", "GMT") } +pub fn parse_date(date: &str) -> NaiveDateTime { + NaiveDateTime::parse_from_str(date, DATETIME_FORMAT).unwrap() +} + // // Deployment environment methods // From 7d506f3633c4acc576a5e752b342b6b5bd120c37 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Thu, 1 Dec 2022 11:45:26 +0100 Subject: [PATCH 4/4] Update Vaultwarden Logo's Updated the logo's so the `V` is better visible. Also the cog it self is better now, the previous version wasn't fully round. These versions also are used with the PR to update the web-vault and use these logo's. Also updated the images in the static folder. --- resources/vaultwarden-icon-white.svg | 147 +++++----- resources/vaultwarden-icon.svg | 134 +++++----- resources/vaultwarden-logo-white.svg | 355 ++++++------------------- resources/vaultwarden-logo.svg | 235 ++++++---------- src/static/images/logo-gray.png | Bin 2569 -> 2406 bytes src/static/images/vaultwarden-icon.png | Bin 945 -> 1459 bytes 6 files changed, 296 insertions(+), 575 deletions(-) diff --git a/resources/vaultwarden-icon-white.svg b/resources/vaultwarden-icon-white.svg index e0aebef1..bb241d51 100644 --- a/resources/vaultwarden-icon-white.svg +++ b/resources/vaultwarden-icon-white.svg @@ -1,91 +1,70 @@ - - Vaultwarden Icon - White - - - - - - - - - - - + + + + Vaultwarden Icon - White + - + - + Vaultwarden Icon - White diff --git a/resources/vaultwarden-icon.svg b/resources/vaultwarden-icon.svg index 67d617e6..91abbd6a 100644 --- a/resources/vaultwarden-icon.svg +++ b/resources/vaultwarden-icon.svg @@ -1,33 +1,66 @@ - - Vaultwarden Icon - + + + + Vaultwarden Icon + + - - image/svg+xml - + Vaultwarden Icon @@ -38,49 +71,4 @@ - - - - - - - - diff --git a/resources/vaultwarden-logo-white.svg b/resources/vaultwarden-logo-white.svg index ade44f23..49a75eb6 100644 --- a/resources/vaultwarden-logo-white.svg +++ b/resources/vaultwarden-logo-white.svg @@ -1,271 +1,88 @@ -Vaultwarden Logo - Whiteimage/svg+xmlVaultwarden Logo - WhiteMathijs van VeluwRust Logo - - - - -aultwarden - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + Vaultwarden Logo - White + + + + + + + + + + + aultwarden + + + + + + + image/svg+xml + + Vaultwarden Logo - White + + + Mathijs van Veluw + + + Rust Logo + + + diff --git a/resources/vaultwarden-logo.svg b/resources/vaultwarden-logo.svg index 330456cf..000cf2e9 100644 --- a/resources/vaultwarden-logo.svg +++ b/resources/vaultwarden-logo.svg @@ -1,151 +1,88 @@ -Vaultwarden Logoimage/svg+xmlVaultwarden LogoMathijs van VeluwRust Logo - - - - -aultwarden - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + Vaultwarden Logo + + + + + + + + + + + aultwarden + + + + + + + image/svg+xml + + Vaultwarden Logo + + + Mathijs van Veluw + + + Rust Logo + + + diff --git a/src/static/images/logo-gray.png b/src/static/images/logo-gray.png index 21caaf52c43a4a64cd7d365e64fbfd54e0756453..8fb1148ee0863de197f2148401500882800cf7eb 100644 GIT binary patch delta 2395 zcmV-h38eOk6y_3;B!4YQL_t(|+U=VKaHF{nfd6urnR)k^nejVjW@cE1V`k=MW@cvQ z%`!7Hm)mRNH={{4_Dk-y(>C`g(xR+5{=dkw>^k1R7GCBGJQ{F-Gw`^O#&{8ys8F4z z#FI=0X>BMn-+|7Ja=)_$7 zR(n5c^E9Usg;!MJDJQ$9^9{1Bayt7k9OgXvUv0i1)tLdW6&Ze1sN=w&fVOZ7zi^iI z|6UIKGm8X2Du2Apw{#&FqnblS!uuw2jDQ8d2j3QV^+Qb_5xt%NL|REQwz3pUZyL*v zzD#)E`@;B+C}#NgVno;fN5Zd)!EknQ!9QIwuGNU1MtI+W>ajszbzu~KVW;>@^wBNU zqgJg`i%{=Uq^rks^u~w||1JOPb%Pv*&HvN3ERF~aZ%{V&2V$p8$m z;nAOMBmzeDbX?ym{HbmC5XV|ZA+v!Il*#j@1b6TW{Tahh+VhOIh;S>fik^?!cra*d z#9zSYe19^MKex4a23xvPYx)Q{0Z4 z=&U$KB4=WlPboeSTQ-uW_=!}EqsMp*?StV#>wnvp%@?#i9u7Wb_(eW+S4;5dPYmU>3(k^@nx1UK-HWN7o zH?4m}65o@iy)5SvJdaCBQBwxr5r@YQ+RYYR`1AB*X@^G)pIM;OxmF&rBPK-s+)Ux& zKYyvLkkD3vKalYl7SoPj7%Ox5B#mT9L7)?EM9t^)Y@O@-5l4@8hqP+8#yktTQA7R|cmvGxa9rEhI83H;M zg^!;s?p6OYIfkKw$0^Lkh~8kmS$T0u6>`a;JXc}Z9nA90B>{v3 zUxDfZafbu{sD_Wdiga^rT<5?aFNv*!qa!8HD|~X?ez^vN5vT;TMo?jvLVxRm`(qZr zTmSQ*o%4yIDF-l)<_~{qA>fNJEw3iyMGTYih~+@uTE!R)U!dNc!I#Y9ptW|n@K@rw z!Zz(eh0`4P%P?Vess(?i+XGi)Fm|=`jf3=6O*|(^lFDX>4!MQn`LSK?@C!zDAIq3o zpvYfWX!vhqm@S~kVt8Ht)qfR9j3khP=4)d4mRw1Wv+2Y4e8_+h_)9|I@7L#5p`rtS zD~3%>3DF-c_-n$#*E(O99-Wri3h_QXuKlYsA7)~-&abcOYPhcc{_ya5&w_un(C{yi zcP>R4vhY(KStOAdmIQPnZ>vNL47s$iDxoI}0Dp_@fn3`u4*W&#et*c4eBjHjP;V({ z8RkGT^v9#E&V)&hR}G1#;0~9kbjMIaOHt#(X9!00H?%sy6b`=DGq-XdhB@SIdD00` zn-3%i>;%%@8T;!6fIkaEPuI%zHhkeUu(0ssG~<3}yYP@ANu?zFHGMqmczrFl;Moo@ zhh);%L*Tc^h;Ak>Xn$#20Qe6CE!&B4AB5hr?fC;kOZ6U!;YB7$A`%PnGXBS$v3Dx~ z{2wuFb>EW~Hhg*JiF&@tx7IGWjO9~{bmxx49XdZ=B#EjtC(VKH zzTbEr=3#na3IP8~^=Z!?Ji+&zip~ipu-u-xviSCrWY$1!X@9yQ6XA3+3IP9742CCB z3(0J9;8)z&wswX&^Z{7S$xPO43*K16*(Buwvt->%;3FXw)fLL-0cP8@j|2!KA|GhzX(Di z5qu>H6zSg1cs$NTPB}Omr5wP7*XS+f$luTJm=F3P zk-eWE{btGv`f7~p-wa|=^qVPv-*>VuVg>;gadz}OS%0HM_S=Y2`3KK&Y5LaFAM*eJ N002ovPDHLkV1hr8u4@1Q delta 2559 zcmV|xy5{VR$B!AFJL_t(|+U=VKoZGk(##3fyX51@8pPU>sGv5`CnVFff%*@Qp z%*@Qp% z@JD(tz5a4|OjZ9a|BbY6%&u$;=1nj7ol-5Pn|&D~>*DqZOK-P0xxl&qjqroTlx_Z+ z>C_$h^%O5qc}*JdyQGG1)lI;Ub(ixJTH6=P9ZLH&rhnW~rnAY}_lTE+rQ-|#svNQ} z`pYl=ui)eDFie;GIfk4Ov>$Vg`S;mOc`v^3QC~~O{$IhrF|(MZFY6rrcEKrkaOEu+ z!qD$RSMz!vvcwtwxc@`=*W{5c5fZ~FVfHbOVgQ>5qq&~F%Rd`FPUoGKuhc4l#me!7 zSAXP`_kW8YMIVe517%Cmpmr|ahrC<7tg4m7U_X9IhO%kFx2{IAmaMWhU-bEE7qu#) z1n)a3w^h-qcw=TR3skTwUXzErS?g9jChw;$at8OcSryeaDEs}l$a=V)4<}JsK7jhx z4VAO0_hs-o$JTYJWI~Ud1WL!;1#={|_E&)hQJr;by96 zOeS1qX2WR(gj&sYkGvib5AhLfpZx}tt+h4pi3Gn^0g9zBdip#^XML$w9hZGS>z?lm zORO?VA_JE&h|jTx^|9Kzfeng}>CBJle?9EgM=vC0wR^3F8Rnd$_%N8)3qP1wwq&vY zIDb)@*+i~cd_x)Hri{yCzf^A-z+p!~r*HJv#13ZOhaUKo*|=YTo9NsFa<(lwBCbqp zH-rn~y4c~%SL~Jtz6@vx<-GlfPE7I)Ne=ol+QDLqo;!W;2mKyGh;UTr@X<)TL^@|N z1*04X=dXh!bmRnj<5#rDW;M%kp2J5zEPvF4uODf2#D03cFHn`k$4KfighjF)no`ea z^om+ZjSb6&F<(5a5(@Hnc8M!FdVfhIGx%@P1k5Ggzy^D}d9^)#tB}?i48eEu1@R3I z+4|#ShmU*21T$g}iV3R9F*@8k#M2uza6;5m3ii=;*ah9l(!Nehyes|YNC_My{eM;* zJ`JeTa*S1+uZUPqTI*5PX^Y>q9UOi)r4guLT=QJ+FzrprDRHlPr)oH%K0@+k4h(r$ z_!3wZ4ABRCy7|rJ-fJH{>&qA$d|UE{YI_%4^rwEho7R_Vf@2v% zY@X3PPV4@g{7KDs4qpvHayG2s9)FUi$N&>%z+4WWP^KWoFPW=L(#NlN!T*%^sz1Q> zK5R5NZ#QqakU?(73|YkFgUxum%E-1FRkqwXN3SZF{_w?1B-z32BkVdU{-?=G5jwVV zrQTC*bHi7w0552-=7UG}x(oid)LQ1zP_Fw24&S{o5KpMNpgy-r_Q-$1oPYWjgM82| zJJnq?;z6S=Y{?W+!%WTV2-Yv;;kt@}&KbdC=?~vuPRZ(4(bK&=y*`k!QbO~|C>|~+~@yj zHfM!g8_Eal6TaggN#$(Dlz&@XK0hNq!~yT}7#04y7~q?aO}?Tbyr(wIYNLo#B)dY% z^v1c7%OU(qV2&`m7<@K@O~E$h%G@%mB8RWmgot${gTE>we74(}899`agE%1XJuEg) zjvz>_Hb&$@!y2`4naQ9P%D8@s>4NJcVw-uJmawwdjN=`%WfF;yj9{?g|UqSlbu z;G5hI)lK2RoZrh8^W8U4#7Ev6?#k?>tSm{l)?CtiUihMqEq__&g!iITRQM*gHkV&kUy&YSnWg=ot;Z9rbLmE#)`~K6AS#Y88kHKF?}~?@0M& z>g=U)Po>!eJj;VF`G}(<8dS~}EGFK?X)k|S+chd1@{*pGH zF9>uC>_u|zqJQCFN`+4<)-)NU40?us!p3xMoI`Z@etm8ATuJo}96kkA$Ee-ammvoD zl+SV2E`zDP2?=jHWlsVD+=ya1??4t*L@tm-X5M$rE@gu$j|5P*~y>i zB*Ot}PxW}^!z>1%B1UVy;3dmtJ?zKMDDX`nhg0%tHGi}9Ix>YUN#+r*a(ProAV`rl zMg9JoG#e&SToV<(_fFPOUxvpri5h`I%9u!1)(aH61QKodoh&v9x%N-uUIX9gc_SwTYI(&3wm$gJ2xrS$QSUpC@?56+QF*))*T=9^cjSET>awq%59hE_exV{23<;;r<ACzoTA3~jJc zU-x0GMjPu^CtMQ^$dcZkC0i=%oKv4dBlGkEO)>m}5RHqm5#V>wtHF2Tb2EC|ppkes zwZXoqCAQlyyyw67d}N8wHSyklF9Q5)#Tz(if{Vm+JL!DuE*Xt<;51h} zgm@z!^jqZJSgdvz^7q6OYw>+nQ+dbpID=EVp3$#Ef9tcFiKmoJO+2L>-y&Z?7yf)c zukPpbMJMrcg4h4oH&YUYeJAU%42TITeiXn#=Q~++ZzxgXUGR+kZNv?UZzHCE`4`;M Vyoe63+T;KL002ovPDHLkV1fWd7U2K@ diff --git a/src/static/images/vaultwarden-icon.png b/src/static/images/vaultwarden-icon.png index 36c8c79eba23c3b769a54b7a88efcbac6e9daafd..3a6c066854659172c2ba9cac811e1910a48aedbc 100644 GIT binary patch delta 1442 zcmV;T1zq~F2eS*1BYy>YNkl81)MLeVWj zKXQbr3nt(#pkp5wDCa;;O$RIIJbUYaw=9@|)r@1K{ABc9yvKBBV4kBLV;;^h!@9odQavfV+SZ?3ZQptVu2Rm+{$Q=4 z6g|U$GltS^ZGZH#a;~Z;;7`i9eDFdh63$4ne5s1G{Zl;w+bZK8jbvoY$9zmnByo*0 zwjC{CsyAE18|4Mfd&lX7kgQan1V9F_Isb z3s_ReXg(ln#QH|lv1C;e?roUk)1$YRfWIhmqG9$bPr?c9+~&E;G>^;kqG@7Gyvif; zJZVO5SDdG9A}?S^cba3EIo>J!Rgs;>3V6RFe-Ykhc47TH?I;uQeMOEG!g`7vJyyU0 z3iQjjV}BI+zDz*N)fI>49r8?iofr`<&&qRG|Hn18Ze<%&Beyn^$bjkBq4u zi~L9kTY#IKn$=co6_fB&bJ*NC8JFvW^h0#0eV0E_quD;q^X|<^NP6 zd=6|Sgv*jjU^!6{uhEs~QXy;wJ|l!_fb)IA>wiVqNVo6;x10@rlGXlszEmmTBZ_=p z2tNU5YY87Cqidt^HXSm0u5dqireDb|W41h(RSGymo;jB7SC`L#`(2?&M#tMiSl81@ zuK&&Q*|Mj|)3b(<#xmx}b8_}3wQ%Ix~_nryKB`WrPS#U+}z)vcl0}io8Qw#P{Sm zbj&&Vyf0{m^Uxbzx7l2g-y~yxpvZ?r5`WI+66e(u&`f;s!K6aGIq|?H>Ig{Rg8gcG z{3f~Q?=1De8gD_Ggynv>t|I@;J^yW~2Y;qkDZ^!48aY@DW)E9Tl%r?&#Dpt4>iguiKb3JR9PEURDC?Omo&wp&o zl+JIbC0|;S0e?)Vm-^x>rkn6=r3aQCLTO2s7Nwnv-y!wI?QK!&foUx5WhUwu$}FbQ~bz1Bo wo-T(jq07*qoM6N<$f;)uYfdBvi delta 924 zcmV;N17rNN3$X`~BYy*UNklMU9H8 zf*{6-5iDx4M#Ngwf?8EYu_B0y2q3Cf6-2FyT1Bl<1%p_%7OSc%s`g(VlgxW_X3p7b z@&iQXzIWfb$2s@t^|8%{a%2Ex%F$w5mya3%9ZBL8fM=3K2Y-M$T@DWThpHs;6ml(= z>06-*xvnHp<%|PO66Dq-91?5fG&#(WVrDXrU6g2(qAjsV`m3;x=VwX6*Hj8t@ytpR z_wjtB6ugILH~0mXQ%?L5kFo7=oq5VCEO3bJF-PPS!6?tE>m1h@pmV&*Z;Ye}Y?YD8 z`L{~A3N}S>gnwmPL&!MfQyH)wd3oQW!3xJ%4iv^bwqsUk&|*dw917!U(pq?y6og9_ z78$Rn5bAgqrArI~f2HleN*hm00r=Wf%k4&xT(pSVS086YWpZHKW7wJcm}c0wSiaU+ zRIoj88Wz?F(pcOhYE4ChDZ(=VPO;ob2*5W0JYtH^jem_$qf6h(z=wQ+<%vcj^S>Pz zy3{1GHT-Y9lNJPau)G|srxe|>(&h<$yI4Na2-xcMJv6&Twy@2UVUmeiOcEVzTgr(I zie9mx9JVVm@I99Alw+H+4fU}+s`X$K%V#oRyQXtAj8>J59tDwQHH0}#&T4PS{mCC# zuB(JDmVdk6cKjDu!tz5}E6Wg(aY{jK=};5$%314_%s7O>E_o%;gnXzH;}9A(yru%G zSpG~ps@9d24n?~v!Q|G`pWIaK*c}WiVGGM`=K#KC%$LfV_TrV-`36+N0obSPjwEjI zi;=wr{5$%(#$7C5x&e5}f-X2gcz^VD4cnATxPNIs!9GTBM}rWrm=ragL{kYrVp-xL zmUV0YCDri}em7L@dkOnD+VcI#BE;}PSm(_@UJVRbrtSe-tuPq(ZTtTHL9O2Iml^8ccS=A;$n({{sR;u3S}_6jjm yw?Ng4Xf=zhLjTEY%zjcB*6I6*|0SfG1{DB1H0x%#5bv-60000