From 233f03ca2b9a43c07c9aba19615a8e054193cf3b Mon Sep 17 00:00:00 2001 From: Jake Howard Date: Thu, 1 Apr 2021 20:44:58 +0100 Subject: [PATCH 1/8] Just ignore scripts Nothing else in `src/static` is vendored external scripts, so just ignore these. This also fixes the glob, which previously wasn't matching anything --- .gitattributes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index 20701478..b33a6211 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,3 @@ # Ignore vendored scripts in GitHub stats -src/static/* linguist-vendored +src/static/scripts/* linguist-vendored From 3565bfc939ac456b64bd8c1d0b9a9a2b3e65996c Mon Sep 17 00:00:00 2001 From: Jeremy Lin Date: Thu, 1 Apr 2021 21:57:08 -0700 Subject: [PATCH 2/8] Sync global_domains.json to bitwarden/server@261916d (Stack Exchange) --- src/static/global_domains.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/static/global_domains.json b/src/static/global_domains.json index 4a475f92..f7809f51 100644 --- a/src/static/global_domains.json +++ b/src/static/global_domains.json @@ -772,7 +772,8 @@ "stackoverflow.com", "serverfault.com", "mathoverflow.net", - "askubuntu.com" + "askubuntu.com", + "stackapps.com" ], "Excluded": false }, From 1d0eaac260d251abed23106e6356cb07e5b6e994 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Sat, 3 Apr 2021 22:51:44 +0200 Subject: [PATCH 3/8] Updated icon fetching. - Added image type checking, and prevent downloading non images. We didn't checked this before, which could in turn could allow someone to download an arbitrary file. - This also prevents SVG images from being used, while they work on the web-vault and desktop client, they didn't on the mobile versions. - Because of this image type checking we can return a valid file type instead of only 'x-icon' (which is still used as a fallback). - Prevent rel values with `icon-mask`, these are not valid favicons. --- src/api/icons.rs | 58 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 13 deletions(-) diff --git a/src/api/icons.rs b/src/api/icons.rs index 6da3af0b..3f8a41e1 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -37,6 +37,7 @@ static CLIENT: Lazy = Lazy::new(|| { // Build Regex only once since this takes a lot of time. static ICON_REL_REGEX: Lazy = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap()); +static ICON_REL_BLACKLIST: Lazy = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap()); static ICON_SIZE_REGEX: Lazy = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); // Special HashMap which holds the user defined Regex to speedup matching the regex. @@ -52,7 +53,9 @@ fn icon(domain: String) -> Cached>> { } match get_icon(&domain) { - Some(i) => Cached::ttl(Content(ContentType::new("image", "x-icon"), i), CONFIG.icon_cache_ttl()), + Some((icon, icon_type)) => { + Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl()) + }, _ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()), } } @@ -243,7 +246,7 @@ fn is_domain_blacklisted(domain: &str) -> bool { is_blacklisted } -fn get_icon(domain: &str) -> Option> { +fn get_icon(domain: &str) -> Option<(Vec, String)> { let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain); // Check for expiration of negatively cached copy @@ -252,7 +255,11 @@ fn get_icon(domain: &str) -> Option> { } if let Some(icon) = get_cached_icon(&path) { - return Some(icon); + let icon_type = match get_icon_type(&icon) { + Some(x) => x, + _ => "x-icon", + }; + return Some((icon, icon_type.to_string())); } if CONFIG.disable_icon_download() { @@ -261,9 +268,9 @@ fn get_icon(domain: &str) -> Option> { // Get the icon, or None in case of error match download_icon(&domain) { - Ok(icon) => { + Ok((icon, icon_type)) => { save_icon(&path, &icon); - Some(icon) + Some((icon, icon_type.unwrap_or("x-icon").to_string())) } Err(e) => { error!("Error downloading icon: {:?}", e); @@ -324,7 +331,6 @@ fn icon_is_expired(path: &str) -> bool { expired.unwrap_or(true) } -#[derive(Debug)] struct Icon { priority: u8, href: String, @@ -348,7 +354,7 @@ fn get_favicons_node(node: &std::rc::Rc, icons: &mut Ve let attr_name = attr.name.local.as_ref(); let attr_value = attr.value.as_ref(); - if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) { + if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value) { has_rel = true; } else if attr_name == "href" { href = Some(attr_value); @@ -597,7 +603,7 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) { (width, height) } -fn download_icon(domain: &str) -> Result, Error> { +fn download_icon(domain: &str) -> Result<(Vec, Option<&str>), Error> { if is_domain_blacklisted(domain) { err!("Domain is blacklisted", domain) } @@ -605,6 +611,7 @@ fn download_icon(domain: &str) -> Result, Error> { let icon_result = get_icon_url(&domain)?; let mut buffer = Vec::new(); + let mut icon_type: Option<&str> = None; use data_url::DataUrl; @@ -616,17 +623,31 @@ fn download_icon(domain: &str) -> Result, Error> { Ok((body, _fragment)) => { // Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create if body.len() >= 67 { + // Check if the icon type is allowed, else try an icon from the list. + icon_type = get_icon_type(&body); + if icon_type.is_none() { + debug!("Icon from {} data:image uri, is not a valid image type", domain); + continue; + } + info!("Extracted icon from data:image uri for {}", domain); buffer = body; break; } } - _ => warn!("data uri is invalid"), + _ => warn!("Extracted icon from data:image uri is invalid"), }; } else { match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) { Ok(mut res) => { - info!("Downloaded icon from {}", icon.href); res.copy_to(&mut buffer)?; + // Check if the icon type is allowed, else try an icon from the list. + icon_type = get_icon_type(&buffer); + if icon_type.is_none() { + buffer.clear(); + debug!("Icon from {}, is not a valid image type", icon.href); + continue; + } + info!("Downloaded icon from {}", icon.href); break; } _ => warn!("Download failed for {}", icon.href), @@ -635,10 +656,10 @@ fn download_icon(domain: &str) -> Result, Error> { } if buffer.is_empty() { - err!("Empty response") + err!("Empty response downloading icon") } - Ok(buffer) + Ok((buffer, icon_type)) } fn save_icon(path: &str, icon: &[u8]) { @@ -650,7 +671,18 @@ fn save_icon(path: &str, icon: &[u8]) { create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache"); } Err(e) => { - info!("Icon save error: {:?}", e); + warn!("Icon save error: {:?}", e); } } } + +fn get_icon_type(bytes: &[u8]) -> Option<&'static str> { + match bytes { + [137, 80, 78, 71, ..] => Some("png"), + [0, 0, 1, 0, ..] => Some("x-icon"), + [82, 73, 70, 70, ..] => Some("webp"), + [255, 216, 255, ..] => Some("jpeg"), + [66, 77, ..] => Some("bmp"), + _ => None + } +} From 95fc88ae5bef5f4d1e9a8da4f5de7c955fb75a19 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Mon, 5 Apr 2021 15:09:16 +0200 Subject: [PATCH 4/8] Some admin interface updates. - Fixed bug when web-vault is disabled. - Updated sql-server version check to be simpler thx to @weiznich ( https://github.com/dani-garcia/bitwarden_rs/pull/1548#discussion_r604767196 ) - Use `VACUUM INTO` to create a SQLite backup instead of using the external sqlite3 application. - This also removes the dependancy of having the sqlite3 packages installed on the final image unnecessary, and thus removed it. - Updated backup filename to also have the current time. - Add specific bitwarden_rs web-vault version check (to match letter patched versions) Will work when https://github.com/dani-garcia/bw_web_builds/pull/33 is build (But still works without it also). --- docker/Dockerfile.j2 | 4 -- docker/amd64/Dockerfile | 1 - docker/amd64/Dockerfile.alpine | 1 - docker/arm64/Dockerfile | 1 - docker/armv6/Dockerfile | 1 - docker/armv7/Dockerfile | 1 - docker/armv7/Dockerfile.alpine | 1 - src/api/admin.rs | 27 +++++++---- src/db/mod.rs | 56 ++++++++-------------- src/static/templates/admin/diagnostics.hbs | 13 ++++- 10 files changed, 50 insertions(+), 56 deletions(-) diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index be4b4151..71630fa6 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -215,9 +215,6 @@ RUN apk add --no-cache \ openssl \ curl \ dumb-init \ -{% if "sqlite" in features %} - sqlite \ -{% endif %} {% if "mysql" in features %} mariadb-connector-c \ {% endif %} @@ -232,7 +229,6 @@ RUN apt-get update && apt-get install -y \ ca-certificates \ curl \ dumb-init \ - sqlite3 \ libmariadb-dev-compat \ libpq5 \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/amd64/Dockerfile b/docker/amd64/Dockerfile index e24cca81..f524e21a 100644 --- a/docker/amd64/Dockerfile +++ b/docker/amd64/Dockerfile @@ -86,7 +86,6 @@ RUN apt-get update && apt-get install -y \ ca-certificates \ curl \ dumb-init \ - sqlite3 \ libmariadb-dev-compat \ libpq5 \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/amd64/Dockerfile.alpine b/docker/amd64/Dockerfile.alpine index eed79fc1..a7923d30 100644 --- a/docker/amd64/Dockerfile.alpine +++ b/docker/amd64/Dockerfile.alpine @@ -82,7 +82,6 @@ RUN apk add --no-cache \ openssl \ curl \ dumb-init \ - sqlite \ postgresql-libs \ ca-certificates diff --git a/docker/arm64/Dockerfile b/docker/arm64/Dockerfile index b6b50fbd..5ef151c5 100644 --- a/docker/arm64/Dockerfile +++ b/docker/arm64/Dockerfile @@ -129,7 +129,6 @@ RUN apt-get update && apt-get install -y \ ca-certificates \ curl \ dumb-init \ - sqlite3 \ libmariadb-dev-compat \ libpq5 \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/armv6/Dockerfile b/docker/armv6/Dockerfile index c6dc75e1..d86bc5d1 100644 --- a/docker/armv6/Dockerfile +++ b/docker/armv6/Dockerfile @@ -129,7 +129,6 @@ RUN apt-get update && apt-get install -y \ ca-certificates \ curl \ dumb-init \ - sqlite3 \ libmariadb-dev-compat \ libpq5 \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/armv7/Dockerfile b/docker/armv7/Dockerfile index 51d4c75c..ab95f629 100644 --- a/docker/armv7/Dockerfile +++ b/docker/armv7/Dockerfile @@ -129,7 +129,6 @@ RUN apt-get update && apt-get install -y \ ca-certificates \ curl \ dumb-init \ - sqlite3 \ libmariadb-dev-compat \ libpq5 \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/armv7/Dockerfile.alpine b/docker/armv7/Dockerfile.alpine index 14b7e9b8..07895816 100644 --- a/docker/armv7/Dockerfile.alpine +++ b/docker/armv7/Dockerfile.alpine @@ -86,7 +86,6 @@ RUN apk add --no-cache \ openssl \ curl \ dumb-init \ - sqlite \ ca-certificates RUN mkdir /data diff --git a/src/api/admin.rs b/src/api/admin.rs index d484407a..d5a743c9 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -1,7 +1,7 @@ use once_cell::sync::Lazy; use serde::de::DeserializeOwned; use serde_json::Value; -use std::{env, process::Command, time::Duration}; +use std::{env, time::Duration}; use reqwest::{blocking::Client, header::USER_AGENT}; use rocket::{ @@ -68,7 +68,6 @@ static CAN_BACKUP: Lazy = Lazy::new(|| { DbConnType::from_url(&CONFIG.database_url()) .map(|t| t == DbConnType::sqlite) .unwrap_or(false) - && Command::new("sqlite3").arg("-version").status().is_ok() }); #[get("/")] @@ -502,9 +501,17 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu use std::net::ToSocketAddrs; // Get current running versions - let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json"); - let vault_version_str = read_file_string(&vault_version_path)?; - let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?; + let web_vault_version: WebVaultVersion = match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) { + Ok(s) => serde_json::from_str(&s)?, + _ => { + match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) { + Ok(s) => serde_json::from_str(&s)?, + _ => { + WebVaultVersion{version: String::from("Version file missing")} + }, + } + }, + }; // Execute some environment checks let running_within_docker = is_running_in_docker(); @@ -557,9 +564,10 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu let diagnostics_json = json!({ "dns_resolved": dns_resolved, - "web_vault_version": web_vault_version.version, "latest_release": latest_release, "latest_commit": latest_commit, + "web_vault_enabled": &CONFIG.web_vault_enabled(), + "web_vault_version": web_vault_version.version, "latest_web_build": latest_web_build, "running_within_docker": running_within_docker, "has_http_access": has_http_access, @@ -571,6 +579,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu "db_type": *DB_TYPE, "db_version": get_sql_server_version(&conn), "admin_url": format!("{}/diagnostics", admin_url(Referer(None))), + "server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(), "server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference }); @@ -596,11 +605,11 @@ fn delete_config(_token: AdminToken) -> EmptyResult { } #[post("/config/backup_db")] -fn backup_db(_token: AdminToken) -> EmptyResult { +fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult { if *CAN_BACKUP { - backup_database() + backup_database(&conn) } else { - err!("Can't back up current DB (either it's not SQLite or the 'sqlite' binary is not present)"); + err!("Can't back up current DB (Only SQLite supports this feature)"); } } diff --git a/src/db/mod.rs b/src/db/mod.rs index 2472caa6..83532ce2 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -1,5 +1,3 @@ -use std::process::Command; - use chrono::prelude::*; use diesel::r2d2::{ConnectionManager, Pool, PooledConnection}; use rocket::{ @@ -144,6 +142,7 @@ macro_rules! db_run { // Different code for each db ( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => { #[allow(unused)] use diesel::prelude::*; + #[allow(unused_variables)] match $conn { $($( #[cfg($db)] @@ -221,21 +220,21 @@ macro_rules! db_object { // Reexport the models, needs to be after the macros are defined so it can access them pub mod models; -/// Creates a back-up of the database using sqlite3 -pub fn backup_database() -> Result<(), Error> { - use std::path::Path; - let db_url = CONFIG.database_url(); - let db_path = Path::new(&db_url).parent().unwrap(); - - let now: DateTime = Utc::now(); - let file_date = now.format("%Y%m%d").to_string(); - let backup_command: String = format!("{}{}{}", ".backup 'db_", file_date, ".sqlite3'"); - - Command::new("sqlite3") - .current_dir(db_path) - .args(&["db.sqlite3", &backup_command]) - .output() - .expect("Can't open database, sqlite3 is not available, make sure it's installed and available on the PATH"); +/// Creates a back-up of the sqlite database +/// MySQL/MariaDB and PostgreSQL are not supported. +pub fn backup_database(conn: &DbConn) -> Result<(), Error> { + db_run! {@raw conn: + postgresql, mysql { + err!("PostgreSQL and MySQL/MariaDB do not support this backup feature"); + } + sqlite { + use std::path::Path; + let db_url = CONFIG.database_url(); + let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy(); + let file_date = Utc::now().format("%Y%m%d_%H%M%S").to_string(); + diesel::sql_query(format!("VACUUM INTO '{}/db_{}.sqlite3'", db_path, file_date)).execute(conn)?; + } + } Ok(()) } @@ -243,29 +242,14 @@ pub fn backup_database() -> Result<(), Error> { /// Get the SQL Server version pub fn get_sql_server_version(conn: &DbConn) -> String { - use diesel::sql_types::Text; - #[derive(QueryableByName)] - struct SqlVersion { - #[sql_type = "Text"] - version: String, - } - db_run! {@raw conn: postgresql, mysql { - match diesel::sql_query("SELECT version() AS version;").get_result::(conn).ok() { - Some(v) => { - v.version - }, - _ => "Unknown".to_string() - } + no_arg_sql_function!(version, diesel::sql_types::Text); + diesel::select(version).get_result::(conn).unwrap_or_else(|_| "Unknown".to_string()) } sqlite { - match diesel::sql_query("SELECT sqlite_version() AS version;").get_result::(conn).ok() { - Some(v) => { - v.version - }, - _ => "Unknown".to_string() - } + no_arg_sql_function!(sqlite_version, diesel::sql_types::Text); + diesel::select(sqlite_version).get_result::(conn).unwrap_or_else(|_| "Unknown".to_string()) } } } diff --git a/src/static/templates/admin/diagnostics.hbs b/src/static/templates/admin/diagnostics.hbs index 8d7901db..1d5ca711 100644 --- a/src/static/templates/admin/diagnostics.hbs +++ b/src/static/templates/admin/diagnostics.hbs @@ -20,6 +20,7 @@
{{diagnostics.latest_release}}-{{diagnostics.latest_commit}}
+ {{#if diagnostics.web_vault_enabled}}
Web Installed Ok Update @@ -35,6 +36,13 @@ {{diagnostics.latest_web_build}} {{/unless}} + {{/if}} + {{#unless diagnostics.web_vault_enabled}} +
Web Installed
+
+ Web Vault is disabled +
+ {{/unless}}
Database
{{diagnostics.db_type}}: {{diagnostics.db_version}} @@ -118,7 +126,10 @@
{{diagnostics.dns_resolved}}
- +
Date & Time (Local)
+
+ Server: {{diagnostics.server_time_local}} +
Date & Time (UTC) Ok Error From 73ff8d79f70b36483d1d33587cdc9549c8e472bd Mon Sep 17 00:00:00 2001 From: Jeremy Lin Date: Fri, 2 Apr 2021 20:16:49 -0700 Subject: [PATCH 5/8] Add a generic job scheduler Also rewrite deletion of old sends using the job scheduler. --- .env.template | 13 +++++++++++++ Cargo.lock | 33 ++++++++++++++++++++++++++++++++- Cargo.toml | 10 ++++++++++ src/api/core/mod.rs | 2 +- src/api/core/sends.rs | 24 ++++++++---------------- src/api/mod.rs | 2 +- src/config.rs | 8 ++++++++ src/db/models/send.rs | 22 ++++++++++++++++------ src/main.rs | 43 +++++++++++++++++++++++++++++++++++++------ 9 files changed, 126 insertions(+), 31 deletions(-) diff --git a/.env.template b/.env.template index a85ce22d..ce571ff6 100644 --- a/.env.template +++ b/.env.template @@ -56,6 +56,19 @@ # WEBSOCKET_ADDRESS=0.0.0.0 # WEBSOCKET_PORT=3012 +## Job scheduler settings +## +## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron), +## and are always in terms of UTC time (regardless of your local time zone settings). +## +## How often (in ms) the job scheduler thread checks for jobs that need running. +## Set to 0 to globally disable scheduled jobs. +# JOB_POLL_INTERVAL_MS=30000 +## +## Cron schedule of the job that checks for Sends past their deletion date. +## Defaults to hourly. Set blank to disable this job. +# SEND_PURGE_SCHEDULE="0 0 * * * *" + ## Enable extended logging, which shows timestamps and targets in the logs # EXTENDED_LOGGING=true diff --git a/Cargo.lock b/Cargo.lock index e60c8d0d..7a18fadc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -161,6 +161,7 @@ dependencies = [ "handlebars", "html5ever", "idna 0.2.2", + "job_scheduler", "jsonwebtoken", "lettre", "libsqlite3-sys", @@ -401,6 +402,17 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "cron" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e009ed0b762cf7a967a34dfdc67d5967d3f828f12901d37081432c3dd1668f8f" +dependencies = [ + "chrono", + "nom 4.1.1", + "once_cell", +] + [[package]] name = "crypto-mac" version = "0.3.0" @@ -1097,6 +1109,16 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +[[package]] +name = "job_scheduler" +version = "1.2.1" +source = "git+https://github.com/jjlin/job_scheduler?rev=ee023418dbba2bfe1e30a5fd7d937f9e33739806#ee023418dbba2bfe1e30a5fd7d937f9e33739806" +dependencies = [ + "chrono", + "cron", + "uuid", +] + [[package]] name = "js-sys" version = "0.3.49" @@ -1160,7 +1182,7 @@ dependencies = [ "idna 0.2.2", "mime 0.3.16", "native-tls", - "nom", + "nom 6.1.2", "once_cell", "quoted_printable", "rand 0.8.3", @@ -1475,6 +1497,15 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" +[[package]] +name = "nom" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c349f68f25f596b9f44cf0e7c69752a5c633b0550c3ff849518bfba0233774a" +dependencies = [ + "memchr", +] + [[package]] name = "nom" version = "6.1.2" diff --git a/Cargo.toml b/Cargo.toml index 24c24eba..4edffc42 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,6 +73,9 @@ chrono = { version = "0.4.19", features = ["serde"] } chrono-tz = "0.5.3" time = "0.2.26" +# Job scheduler +job_scheduler = "1.2.1" + # TOTP library oath = "0.10.2" @@ -136,3 +139,10 @@ rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e3 # For favicon extraction from main website data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' } + +# The maintainer of the `job_scheduler` crate doesn't seem to have responded +# to any issues or PRs for almost a year (as of April 2021). This hopefully +# temporary fork updates Cargo.toml to use more up-to-date dependencies. +# In particular, `cron` has since implemented parsing of some common syntax +# that wasn't previously supported (https://github.com/zslayton/cron/pull/64). +job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' } diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 36e83f0e..8a7e5f9b 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -5,7 +5,7 @@ mod organizations; pub mod two_factor; mod sends; -pub use sends::start_send_deletion_scheduler; +pub use sends::purge_sends; pub fn routes() -> Vec { let mut mod_routes = routes![ diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs index ec6809a2..3cd568c5 100644 --- a/src/api/core/sends.rs +++ b/src/api/core/sends.rs @@ -9,7 +9,7 @@ use serde_json::Value; use crate::{ api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType}, auth::{Headers, Host}, - db::{models::*, DbConn}, + db::{models::*, DbConn, DbPool}, CONFIG, }; @@ -27,21 +27,13 @@ pub fn routes() -> Vec { ] } -pub fn start_send_deletion_scheduler(pool: crate::db::DbPool) { - std::thread::spawn(move || { - loop { - if let Ok(conn) = pool.get() { - info!("Initiating send deletion"); - for send in Send::find_all(&conn) { - if chrono::Utc::now().naive_utc() >= send.deletion_date { - send.delete(&conn).ok(); - } - } - } - - std::thread::sleep(std::time::Duration::from_secs(3600)); - } - }); +pub fn purge_sends(pool: DbPool) { + debug!("Purging sends"); + if let Ok(conn) = pool.get() { + Send::purge(&conn); + } else { + error!("Failed to get DB connection while purging sends") + } } #[derive(Deserialize)] diff --git a/src/api/mod.rs b/src/api/mod.rs index 840c65ff..f417751c 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -10,8 +10,8 @@ use serde_json::Value; pub use crate::api::{ admin::routes as admin_routes, + core::purge_sends, core::routes as core_routes, - core::start_send_deletion_scheduler, icons::routes as icons_routes, identity::routes as identity_routes, notifications::routes as notifications_routes, diff --git a/src/config.rs b/src/config.rs index 6c41c975..7c3c5461 100644 --- a/src/config.rs +++ b/src/config.rs @@ -316,6 +316,14 @@ make_config! { /// Websocket port websocket_port: u16, false, def, 3012; }, + jobs { + /// Job scheduler poll interval |> How often the job scheduler thread checks for jobs to run. + /// Set to 0 to globally disable scheduled jobs. + job_poll_interval_ms: u64, false, def, 30_000; + /// Send purge schedule |> Cron schedule of the job that checks for Sends past their deletion date. + /// Defaults to hourly. Set blank to disable this job. + send_purge_schedule: String, false, def, "0 0 * * * *".to_string(); + }, /// General settings settings { diff --git a/src/db/models/send.rs b/src/db/models/send.rs index 0356d818..0644b1e1 100644 --- a/src/db/models/send.rs +++ b/src/db/models/send.rs @@ -205,6 +205,13 @@ impl Send { }} } + /// Purge all sends that are past their deletion date. + pub fn purge(conn: &DbConn) { + for send in Self::find_by_past_deletion_date(&conn) { + send.delete(&conn).ok(); + } + } + pub fn update_users_revision(&self, conn: &DbConn) { match &self.user_uuid { Some(user_uuid) => { @@ -223,12 +230,6 @@ impl Send { Ok(()) } - pub fn find_all(conn: &DbConn) -> Vec { - db_run! {conn: { - sends::table.load::(conn).expect("Error loading sends").from_db() - }} - } - pub fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option { use data_encoding::BASE64URL_NOPAD; use uuid::Uuid; @@ -271,4 +272,13 @@ impl Send { .load::(conn).expect("Error loading sends").from_db() }} } + + pub fn find_by_past_deletion_date(conn: &DbConn) -> Vec { + let now = Utc::now().naive_utc(); + db_run! {conn: { + sends::table + .filter(sends::deletion_date.lt(now)) + .load::(conn).expect("Error loading sends").from_db() + }} + } } diff --git a/src/main.rs b/src/main.rs index 50975c66..4cdf4ff2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -16,6 +16,7 @@ extern crate diesel; #[macro_use] extern crate diesel_migrations; +use job_scheduler::{JobScheduler, Job}; use std::{ fs::create_dir_all, panic, @@ -23,6 +24,7 @@ use std::{ process::{exit, Command}, str::FromStr, thread, + time::Duration, }; #[macro_use] @@ -56,7 +58,9 @@ fn main() { create_icon_cache_folder(); - launch_rocket(extra_debug); + let pool = create_db_pool(); + schedule_jobs(pool.clone()); + launch_rocket(pool, extra_debug); // Blocks until program termination. } const HELP: &str = "\ @@ -301,17 +305,17 @@ fn check_web_vault() { } } -fn launch_rocket(extra_debug: bool) { - let pool = match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) { +fn create_db_pool() -> db::DbPool { + match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) { Ok(p) => p, Err(e) => { error!("Error creating database pool: {:?}", e); exit(1); } - }; - - api::start_send_deletion_scheduler(pool.clone()); + } +} +fn launch_rocket(pool: db::DbPool, extra_debug: bool) { let basepath = &CONFIG.domain_path(); // If adding more paths here, consider also adding them to @@ -334,3 +338,30 @@ fn launch_rocket(extra_debug: bool) { // The launch will restore the original logging level error!("Launch error {:#?}", result); } + +fn schedule_jobs(pool: db::DbPool) { + if CONFIG.job_poll_interval_ms() == 0 { + info!("Job scheduler disabled."); + return; + } + thread::Builder::new().name("job-scheduler".to_string()).spawn(move || { + let mut sched = JobScheduler::new(); + + // Purge sends that are past their deletion date. + if !CONFIG.send_purge_schedule().is_empty() { + sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || { + api::purge_sends(pool.clone()); + })); + } + + // Periodically check for jobs to run. We probably won't need any + // jobs that run more often than once a minute, so a default poll + // interval of 30 seconds should be sufficient. Users who want to + // schedule jobs to run more frequently for some reason can reduce + // the poll interval accordingly. + loop { + sched.tick(); + thread::sleep(Duration::from_millis(CONFIG.job_poll_interval_ms())); + } + }).expect("Error spawning job scheduler thread"); +} From d77333576b1268cd24f17348ffe6d72e07855f54 Mon Sep 17 00:00:00 2001 From: Jeremy Lin Date: Fri, 2 Apr 2021 20:52:15 -0700 Subject: [PATCH 6/8] Add support for auto-deleting trashed items Upstream will soon auto-delete trashed items after 30 days, but some people use the trash as an archive folder, so to avoid unexpected data loss, this implementation requires the user to explicitly enable auto-deletion. --- .env.template | 4 ++++ src/api/core/ciphers.rs | 11 ++++++++++- src/api/core/mod.rs | 1 + src/api/mod.rs | 1 + src/config.rs | 8 ++++++++ src/db/models/cipher.rs | 24 +++++++++++++++++++++++- src/main.rs | 7 +++++++ 7 files changed, 54 insertions(+), 2 deletions(-) diff --git a/.env.template b/.env.template index ce571ff6..e5665296 100644 --- a/.env.template +++ b/.env.template @@ -68,6 +68,10 @@ ## Cron schedule of the job that checks for Sends past their deletion date. ## Defaults to hourly. Set blank to disable this job. # SEND_PURGE_SCHEDULE="0 0 * * * *" +## +## Cron schedule of the job that checks for trashed items to delete permanently. +## Defaults to daily. Set blank to disable this job. +# TRASH_PURGE_SCHEDULE="0 0 0 * * *" ## Enable extended logging, which shows timestamps and targets in the logs # EXTENDED_LOGGING=true diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index 7b0de205..58ae80b1 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -13,7 +13,7 @@ use crate::{ api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}, auth::Headers, crypto, - db::{models::*, DbConn}, + db::{models::*, DbConn, DbPool}, CONFIG, }; @@ -77,6 +77,15 @@ pub fn routes() -> Vec { ] } +pub fn purge_trashed_ciphers(pool: DbPool) { + debug!("Purging trashed ciphers"); + if let Ok(conn) = pool.get() { + Cipher::purge_trash(&conn); + } else { + error!("Failed to get DB connection while purging trashed ciphers") + } +} + #[derive(FromForm, Default)] struct SyncData { #[form(field = "excludeDomains")] diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 8a7e5f9b..2964d4fb 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -5,6 +5,7 @@ mod organizations; pub mod two_factor; mod sends; +pub use ciphers::purge_trashed_ciphers; pub use sends::purge_sends; pub fn routes() -> Vec { diff --git a/src/api/mod.rs b/src/api/mod.rs index f417751c..2132b30b 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -11,6 +11,7 @@ use serde_json::Value; pub use crate::api::{ admin::routes as admin_routes, core::purge_sends, + core::purge_trashed_ciphers, core::routes as core_routes, icons::routes as icons_routes, identity::routes as identity_routes, diff --git a/src/config.rs b/src/config.rs index 7c3c5461..bc2f359e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -323,6 +323,9 @@ make_config! { /// Send purge schedule |> Cron schedule of the job that checks for Sends past their deletion date. /// Defaults to hourly. Set blank to disable this job. send_purge_schedule: String, false, def, "0 0 * * * *".to_string(); + /// Trash purge schedule |> Cron schedule of the job that checks for trashed items to delete permanently. + /// Defaults to daily. Set blank to disable this job. + trash_purge_schedule: String, false, def, "0 0 0 * * *".to_string(); }, /// General settings @@ -347,6 +350,11 @@ make_config! { /// Per-organization attachment limit (KB) |> Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more org_attachment_limit: i64, true, option; + /// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item. + /// If unset, trashed items are not auto-deleted. This setting applies globally, so make + /// sure to inform all users of any changes to this setting. + trash_auto_delete_days: i64, true, option; + /// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from /// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0, /// otherwise it will delete them and they won't be downloaded again. diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs index 365865f8..e4ae04c8 100644 --- a/src/db/models/cipher.rs +++ b/src/db/models/cipher.rs @@ -1,6 +1,8 @@ -use chrono::{NaiveDateTime, Utc}; +use chrono::{Duration, NaiveDateTime, Utc}; use serde_json::Value; +use crate::CONFIG; + use super::{ Attachment, CollectionCipher, @@ -271,6 +273,17 @@ impl Cipher { Ok(()) } + /// Purge all ciphers that are old enough to be auto-deleted. + pub fn purge_trash(conn: &DbConn) { + if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() { + let now = Utc::now().naive_utc(); + let dt = now - Duration::days(auto_delete_days); + for cipher in Self::find_deleted_before(&dt, conn) { + cipher.delete(&conn).ok(); + } + } + } + pub fn move_to_folder(&self, folder_uuid: Option, user_uuid: &str, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(user_uuid, conn); @@ -511,6 +524,15 @@ impl Cipher { }} } + /// Find all ciphers that were deleted before the specified datetime. + pub fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { + db_run! {conn: { + ciphers::table + .filter(ciphers::deleted_at.lt(dt)) + .load::(conn).expect("Error loading ciphers").from_db() + }} + } + pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec { db_run! {conn: { ciphers_collections::table diff --git a/src/main.rs b/src/main.rs index 4cdf4ff2..d5985bac 100644 --- a/src/main.rs +++ b/src/main.rs @@ -354,6 +354,13 @@ fn schedule_jobs(pool: db::DbPool) { })); } + // Purge trashed items that are old enough to be auto-deleted. + if !CONFIG.trash_purge_schedule().is_empty() { + sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || { + api::purge_trashed_ciphers(pool.clone()); + })); + } + // Periodically check for jobs to run. We probably won't need any // jobs that run more often than once a minute, so a default poll // interval of 30 seconds should be sufficient. Users who want to From 90e0b7fec6cc025561f9f732fb06d15f72e5c892 Mon Sep 17 00:00:00 2001 From: Jeremy Lin Date: Mon, 5 Apr 2021 23:12:36 -0700 Subject: [PATCH 7/8] Offset scheduled jobs by 5 minutes This is intended to avoid contention with database backups that many users probably schedule to start at exactly the top of an hour. --- .env.template | 8 ++++---- src/config.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.env.template b/.env.template index e5665296..e4d0b1e1 100644 --- a/.env.template +++ b/.env.template @@ -66,12 +66,12 @@ # JOB_POLL_INTERVAL_MS=30000 ## ## Cron schedule of the job that checks for Sends past their deletion date. -## Defaults to hourly. Set blank to disable this job. -# SEND_PURGE_SCHEDULE="0 0 * * * *" +## Defaults to hourly (5 minutes after the hour). Set blank to disable this job. +# SEND_PURGE_SCHEDULE="0 5 * * * *" ## ## Cron schedule of the job that checks for trashed items to delete permanently. -## Defaults to daily. Set blank to disable this job. -# TRASH_PURGE_SCHEDULE="0 0 0 * * *" +## Defaults to daily (5 minutes after midnight). Set blank to disable this job. +# TRASH_PURGE_SCHEDULE="0 5 0 * * *" ## Enable extended logging, which shows timestamps and targets in the logs # EXTENDED_LOGGING=true diff --git a/src/config.rs b/src/config.rs index bc2f359e..86031c72 100644 --- a/src/config.rs +++ b/src/config.rs @@ -322,10 +322,10 @@ make_config! { job_poll_interval_ms: u64, false, def, 30_000; /// Send purge schedule |> Cron schedule of the job that checks for Sends past their deletion date. /// Defaults to hourly. Set blank to disable this job. - send_purge_schedule: String, false, def, "0 0 * * * *".to_string(); + send_purge_schedule: String, false, def, "0 5 * * * *".to_string(); /// Trash purge schedule |> Cron schedule of the job that checks for trashed items to delete permanently. /// Defaults to daily. Set blank to disable this job. - trash_purge_schedule: String, false, def, "0 0 0 * * *".to_string(); + trash_purge_schedule: String, false, def, "0 5 0 * * *".to_string(); }, /// General settings From b268c3dd1cfda78f113cc5c3bf06e08324590379 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Garc=C3=ADa?= Date: Tue, 6 Apr 2021 20:38:22 +0200 Subject: [PATCH 8/8] Update web vault and add unnoficialserver response --- docker/Dockerfile.j2 | 4 ++-- docker/amd64/Dockerfile | 12 ++++++------ docker/amd64/Dockerfile.alpine | 12 ++++++------ docker/arm64/Dockerfile | 12 ++++++------ docker/armv6/Dockerfile | 12 ++++++------ docker/armv7/Dockerfile | 12 ++++++------ docker/armv7/Dockerfile.alpine | 12 ++++++------ src/api/core/ciphers.rs | 1 + src/api/identity.rs | 6 ++++-- 9 files changed, 43 insertions(+), 40 deletions(-) diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index 71630fa6..f003dd0f 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -44,8 +44,8 @@ # https://docs.docker.com/develop/develop-images/multistage-build/ # https://whitfin.io/speeding-up-rust-docker-builds/ ####################### VAULT BUILD IMAGE ####################### -{% set vault_version = "2.19.0" %} -{% set vault_image_digest = "sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4" %} +{% set vault_version = "2.19.0b" %} +{% set vault_image_digest = "sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e" %} # The web-vault digest specifies a particular web-vault build on Docker Hub. # Using the digest instead of the tag name provides better security, # as the digest of an image is immutable, whereas a tag name can later diff --git a/docker/amd64/Dockerfile b/docker/amd64/Dockerfile index f524e21a..e0f6c70b 100644 --- a/docker/amd64/Dockerfile +++ b/docker/amd64/Dockerfile @@ -14,15 +14,15 @@ # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull bitwardenrs/web-vault:v2.19.0 -# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0 -# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4] +# $ docker pull bitwardenrs/web-vault:v2.19.0b +# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b +# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 -# [bitwardenrs/web-vault:v2.19.0] +# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e +# [bitwardenrs/web-vault:v2.19.0b] # -FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault +FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault ########################## BUILD IMAGE ########################## FROM rust:1.50 as build diff --git a/docker/amd64/Dockerfile.alpine b/docker/amd64/Dockerfile.alpine index a7923d30..71b3130f 100644 --- a/docker/amd64/Dockerfile.alpine +++ b/docker/amd64/Dockerfile.alpine @@ -14,15 +14,15 @@ # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull bitwardenrs/web-vault:v2.19.0 -# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0 -# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4] +# $ docker pull bitwardenrs/web-vault:v2.19.0b +# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b +# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 -# [bitwardenrs/web-vault:v2.19.0] +# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e +# [bitwardenrs/web-vault:v2.19.0b] # -FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault +FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault ########################## BUILD IMAGE ########################## FROM clux/muslrust:nightly-2021-02-22 as build diff --git a/docker/arm64/Dockerfile b/docker/arm64/Dockerfile index 5ef151c5..937d192f 100644 --- a/docker/arm64/Dockerfile +++ b/docker/arm64/Dockerfile @@ -14,15 +14,15 @@ # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull bitwardenrs/web-vault:v2.19.0 -# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0 -# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4] +# $ docker pull bitwardenrs/web-vault:v2.19.0b +# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b +# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 -# [bitwardenrs/web-vault:v2.19.0] +# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e +# [bitwardenrs/web-vault:v2.19.0b] # -FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault +FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault ########################## BUILD IMAGE ########################## FROM rust:1.50 as build diff --git a/docker/armv6/Dockerfile b/docker/armv6/Dockerfile index d86bc5d1..2423ee16 100644 --- a/docker/armv6/Dockerfile +++ b/docker/armv6/Dockerfile @@ -14,15 +14,15 @@ # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull bitwardenrs/web-vault:v2.19.0 -# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0 -# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4] +# $ docker pull bitwardenrs/web-vault:v2.19.0b +# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b +# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 -# [bitwardenrs/web-vault:v2.19.0] +# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e +# [bitwardenrs/web-vault:v2.19.0b] # -FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault +FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault ########################## BUILD IMAGE ########################## FROM rust:1.50 as build diff --git a/docker/armv7/Dockerfile b/docker/armv7/Dockerfile index ab95f629..4f612f72 100644 --- a/docker/armv7/Dockerfile +++ b/docker/armv7/Dockerfile @@ -14,15 +14,15 @@ # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull bitwardenrs/web-vault:v2.19.0 -# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0 -# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4] +# $ docker pull bitwardenrs/web-vault:v2.19.0b +# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b +# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 -# [bitwardenrs/web-vault:v2.19.0] +# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e +# [bitwardenrs/web-vault:v2.19.0b] # -FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault +FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault ########################## BUILD IMAGE ########################## FROM rust:1.50 as build diff --git a/docker/armv7/Dockerfile.alpine b/docker/armv7/Dockerfile.alpine index 07895816..430e043e 100644 --- a/docker/armv7/Dockerfile.alpine +++ b/docker/armv7/Dockerfile.alpine @@ -14,15 +14,15 @@ # - From https://hub.docker.com/r/bitwardenrs/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull bitwardenrs/web-vault:v2.19.0 -# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0 -# [bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4] +# $ docker pull bitwardenrs/web-vault:v2.19.0b +# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.19.0b +# [bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 -# [bitwardenrs/web-vault:v2.19.0] +# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e +# [bitwardenrs/web-vault:v2.19.0b] # -FROM bitwardenrs/web-vault@sha256:8747cfaa2c6d87d1749e119dd884697e8099389aa9aca30a4d73d4ff796fe0e4 as vault +FROM bitwardenrs/web-vault@sha256:27631b913f5858895a3e109c5e701341b9d01e69818f5283e72a49fa545eb40e as vault ########################## BUILD IMAGE ########################## FROM messense/rust-musl-cross:armv7-musleabihf as build diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index 58ae80b1..a46ecb9c 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -133,6 +133,7 @@ fn sync(data: Form, headers: Headers, conn: DbConn) -> Json { "Ciphers": ciphers_json, "Domains": domains_json, "Sends": sends_json, + "unofficialServer": true, "Object": "sync" })) } diff --git a/src/api/identity.rs b/src/api/identity.rs index dcfe607a..630c1781 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -72,7 +72,8 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult { "Kdf": user.client_kdf_type, "KdfIterations": user.client_kdf_iter, "ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing - "scope": "api offline_access" + "scope": "api offline_access", + "unofficialServer": true, }))) } @@ -172,7 +173,8 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult "Kdf": user.client_kdf_type, "KdfIterations": user.client_kdf_iter, "ResetMasterPassword": false,// TODO: Same as above - "scope": "api offline_access" + "scope": "api offline_access", + "unofficialServer": true, }); if let Some(token) = twofactor_token {