Spiegel von
https://github.com/dani-garcia/vaultwarden.git
synchronisiert 2024-11-24 05:30:28 +01:00
Improve file limit handling (#4242)
* Improve file limit handling * Oops * Update PostgreSQL migration * Review comments --------- Co-authored-by: BlackDex <black.dex@gmail.com>
Dieser Commit ist enthalten in:
Ursprung
8b66e34415
Commit
edf7484a70
26 geänderte Dateien mit 281 neuen und 98 gelöschten Zeilen
|
@ -308,6 +308,10 @@
|
|||
## Max kilobytes of attachment storage allowed per user.
|
||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||
# USER_ATTACHMENT_LIMIT=
|
||||
## Per-user send storage limit (KB)
|
||||
## Max kilobytes of send storage allowed per user.
|
||||
## When this limit is reached, the user will not be allowed to upload further sends.
|
||||
# USER_SEND_LIMIT=
|
||||
|
||||
## Number of days to wait before auto-deleting a trashed item.
|
||||
## If unset (the default), trashed items are not auto-deleted.
|
||||
|
|
28
Cargo.lock
generiert
28
Cargo.lock
generiert
|
@ -373,6 +373,19 @@ version = "1.6.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
|
||||
|
||||
[[package]]
|
||||
name = "bigdecimal"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c06619be423ea5bb86c95f087d5707942791a08a85530df0db2209a3ecfb8bc9"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"libm",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "binascii"
|
||||
version = "0.1.4"
|
||||
|
@ -800,6 +813,7 @@ version = "2.1.4"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8"
|
||||
dependencies = [
|
||||
"bigdecimal",
|
||||
"bitflags 2.4.2",
|
||||
"byteorder",
|
||||
"chrono",
|
||||
|
@ -807,6 +821,9 @@ dependencies = [
|
|||
"itoa",
|
||||
"libsqlite3-sys",
|
||||
"mysqlclient-sys",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"percent-encoding",
|
||||
"pq-sys",
|
||||
"r2d2",
|
||||
|
@ -1669,6 +1686,12 @@ version = "0.2.152"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
|
||||
|
||||
[[package]]
|
||||
name = "libmimalloc-sys"
|
||||
version = "0.1.35"
|
||||
|
@ -3690,6 +3713,7 @@ name = "vaultwarden"
|
|||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"argon2",
|
||||
"bigdecimal",
|
||||
"bytes",
|
||||
"cached",
|
||||
"chrono",
|
||||
|
@ -4099,9 +4123,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
|
|||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.5.34"
|
||||
version = "0.5.35"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16"
|
||||
checksum = "1931d78a9c73861da0134f453bb1f790ce49b2e30eba8410b4b79bac72b46a2d"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
|
|
@ -53,6 +53,7 @@ once_cell = "1.19.0"
|
|||
# Numerical libraries
|
||||
num-traits = "0.2.17"
|
||||
num-derive = "0.4.1"
|
||||
bigdecimal = "0.4.2"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0", features = ["tls", "json"], default-features = false }
|
||||
|
@ -74,7 +75,7 @@ serde = { version = "1.0.195", features = ["derive"] }
|
|||
serde_json = "1.0.111"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "2.1.4", features = ["chrono", "r2d2"] }
|
||||
diesel = { version = "2.1.4", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel_migrations = "2.1.0"
|
||||
diesel_logger = { version = "0.3.0", optional = true }
|
||||
|
||||
|
|
1
migrations/mysql/2024-01-12-210182_change_attachment_size/up.sql
Normale Datei
1
migrations/mysql/2024-01-12-210182_change_attachment_size/up.sql
Normale Datei
|
@ -0,0 +1 @@
|
|||
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
|
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE attachments
|
||||
ALTER COLUMN file_size TYPE BIGINT,
|
||||
ALTER COLUMN file_size SET NOT NULL;
|
|
@ -0,0 +1 @@
|
|||
-- Integer size in SQLite is already i64, so we don't need to do anything
|
|
@ -15,7 +15,7 @@ use rocket::{
|
|||
use crate::{
|
||||
api::{
|
||||
core::{log_event, two_factor},
|
||||
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString,
|
||||
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||
},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
|
@ -24,6 +24,7 @@ use crate::{
|
|||
mail,
|
||||
util::{
|
||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
||||
NumberOrString,
|
||||
},
|
||||
CONFIG, VERSION,
|
||||
};
|
||||
|
@ -345,7 +346,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||
|
@ -549,7 +550,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
|
||||
organizations_json.push(org);
|
||||
}
|
||||
|
||||
|
|
|
@ -6,12 +6,14 @@ use serde_json::Value;
|
|||
use crate::{
|
||||
api::{
|
||||
core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
|
||||
JsonUpcase, Notify, NumberOrString, PasswordOrOtpData, UpdateType,
|
||||
JsonUpcase, Notify, PasswordOrOtpData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
mail,
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
use rocket::{
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use num_traits::ToPrimitive;
|
||||
use rocket::fs::TempFile;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{
|
||||
|
@ -956,7 +957,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
|
|||
struct AttachmentRequestData {
|
||||
Key: String,
|
||||
FileName: String,
|
||||
FileSize: i32,
|
||||
FileSize: i64,
|
||||
AdminRequest: Option<bool>, // true when attaching from an org vault view
|
||||
}
|
||||
|
||||
|
@ -985,8 +986,11 @@ async fn post_attachment_v2(
|
|||
err!("Cipher is not write accessible")
|
||||
}
|
||||
|
||||
let attachment_id = crypto::generate_attachment_id();
|
||||
let data: AttachmentRequestData = data.into_inner().data;
|
||||
if data.FileSize < 0 {
|
||||
err!("Attachment size can't be negative")
|
||||
}
|
||||
let attachment_id = crypto::generate_attachment_id();
|
||||
let attachment =
|
||||
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
|
||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||
|
@ -1028,6 +1032,15 @@ async fn save_attachment(
|
|||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
||||
let mut data = data.into_inner();
|
||||
|
||||
let Some(size) = data.data.len().to_i64() else {
|
||||
err!("Attachment data size overflow");
|
||||
};
|
||||
if size < 0 {
|
||||
err!("Attachment size can't be negative")
|
||||
}
|
||||
|
||||
let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
|
@ -1040,19 +1053,29 @@ async fn save_attachment(
|
|||
// In the v2 API, the attachment record has already been created,
|
||||
// so the size limit needs to be adjusted to account for that.
|
||||
let size_adjust = match &attachment {
|
||||
None => 0, // Legacy API
|
||||
Some(a) => i64::from(a.file_size), // v2 API
|
||||
None => 0, // Legacy API
|
||||
Some(a) => a.file_size, // v2 API
|
||||
};
|
||||
|
||||
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||
match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("Attachments are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &mut conn).await + size_adjust;
|
||||
let already_used = Attachment::size_by_user(user_uuid, &mut conn).await;
|
||||
let left = limit_kb
|
||||
.checked_mul(1024)
|
||||
.and_then(|l| l.checked_sub(already_used))
|
||||
.and_then(|l| l.checked_add(size_adjust));
|
||||
|
||||
let Some(left) = left else {
|
||||
err!("Attachment size overflow");
|
||||
};
|
||||
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
Some(left as u64)
|
||||
|
||||
Some(left)
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
|
@ -1060,11 +1083,21 @@ async fn save_attachment(
|
|||
match CONFIG.org_attachment_limit() {
|
||||
Some(0) => err!("Attachments are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &mut conn).await + size_adjust;
|
||||
let already_used = Attachment::size_by_org(org_uuid, &mut conn).await;
|
||||
let left = limit_kb
|
||||
.checked_mul(1024)
|
||||
.and_then(|l| l.checked_sub(already_used))
|
||||
.and_then(|l| l.checked_add(size_adjust));
|
||||
|
||||
let Some(left) = left else {
|
||||
err!("Attachment size overflow");
|
||||
};
|
||||
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
Some(left as u64)
|
||||
|
||||
Some(left)
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
|
@ -1072,10 +1105,8 @@ async fn save_attachment(
|
|||
err!("Cipher is neither owned by a user nor an organization");
|
||||
};
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
if let Some(size_limit) = size_limit {
|
||||
if data.data.len() > size_limit {
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
}
|
||||
|
@ -1085,20 +1116,19 @@ async fn save_attachment(
|
|||
None => crypto::generate_attachment_id(), // Legacy API
|
||||
};
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
let size = data.data.len() as i32;
|
||||
if let Some(attachment) = &mut attachment {
|
||||
// v2 API
|
||||
|
||||
// Check the actual size against the size initially provided by
|
||||
// the client. Upstream allows +/- 1 MiB deviation from this
|
||||
// size, but it's not clear when or why this is needed.
|
||||
const LEEWAY: i32 = 1024 * 1024; // 1 MiB
|
||||
let min_size = attachment.file_size - LEEWAY;
|
||||
let max_size = attachment.file_size + LEEWAY;
|
||||
const LEEWAY: i64 = 1024 * 1024; // 1 MiB
|
||||
let Some(min_size) = attachment.file_size.checked_add(LEEWAY) else {
|
||||
err!("Invalid attachment size min")
|
||||
};
|
||||
let Some(max_size) = attachment.file_size.checked_sub(LEEWAY) else {
|
||||
err!("Invalid attachment size max")
|
||||
};
|
||||
|
||||
if min_size <= size && size <= max_size {
|
||||
if size != attachment.file_size {
|
||||
|
@ -1113,6 +1143,10 @@ async fn save_attachment(
|
|||
}
|
||||
} else {
|
||||
// Legacy API
|
||||
|
||||
// SAFETY: This value is only stored in the database and is not used to access the file system.
|
||||
// As a result, the conditions specified by Rocket [0] are met and this is safe to use.
|
||||
// [0]: https://docs.rs/rocket/latest/rocket/fs/struct.FileName.html#-danger-
|
||||
let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string());
|
||||
|
||||
if encrypted_filename.is_none() {
|
||||
|
@ -1122,10 +1156,14 @@ async fn save_attachment(
|
|||
err!("No attachment key provided")
|
||||
}
|
||||
let attachment =
|
||||
Attachment::new(file_id, String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
|
||||
Attachment::new(file_id.clone(), String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
|
||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||
}
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
|
|
@ -5,11 +5,13 @@ use serde_json::Value;
|
|||
use crate::{
|
||||
api::{
|
||||
core::{CipherSyncData, CipherSyncType},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
EmptyResult, JsonResult, JsonUpcase,
|
||||
},
|
||||
auth::{decode_emergency_access_invite, Headers},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
mail,
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
|
|
|
@ -6,14 +6,13 @@ use serde_json::Value;
|
|||
use crate::{
|
||||
api::{
|
||||
core::{log_event, two_factor, CipherSyncData, CipherSyncType},
|
||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordOrOtpData,
|
||||
UpdateType,
|
||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, PasswordOrOtpData, UpdateType,
|
||||
},
|
||||
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
|
||||
db::{models::*, DbConn},
|
||||
error::Error,
|
||||
mail,
|
||||
util::convert_json_key_lcase_first,
|
||||
util::{convert_json_key_lcase_first, NumberOrString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use std::path::Path;
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use num_traits::ToPrimitive;
|
||||
use rocket::form::Form;
|
||||
use rocket::fs::NamedFile;
|
||||
use rocket::fs::TempFile;
|
||||
|
@ -8,17 +9,17 @@ use rocket::serde::json::Json;
|
|||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::SafeString,
|
||||
util::{NumberOrString, SafeString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
|
||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
const SIZE_525_MB: i64 = 550_502_400;
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
|
@ -216,30 +217,41 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||
} = data.into_inner();
|
||||
let model = model.into_inner().data;
|
||||
|
||||
let Some(size) = data.len().to_i64() else {
|
||||
err!("Invalid send size");
|
||||
};
|
||||
if size < 0 {
|
||||
err!("Send size can't be negative")
|
||||
}
|
||||
|
||||
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
let size_limit = match CONFIG.user_send_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||
err!("Existing sends overflow")
|
||||
};
|
||||
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||
err!("Send size overflow");
|
||||
};
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
err!("Send storage limit reached! Delete some sends to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
i64::clamp(left, 0, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
if size > size_limit {
|
||||
err!("Send storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let mut send = create_send(model, headers.user.uuid)?;
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let size = data.len();
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
|
@ -253,7 +265,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
|
@ -285,24 +297,32 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let file_length = match &data.FileLength {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
Some(m) => m.into_i64()?,
|
||||
_ => err!("Invalid send length"),
|
||||
};
|
||||
if file_length < 0 {
|
||||
err!("Send size can't be negative")
|
||||
}
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
let size_limit = match CONFIG.user_send_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||
err!("Existing sends overflow")
|
||||
};
|
||||
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||
err!("Send size overflow");
|
||||
};
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
err!("Send storage limit reached! Delete some sends to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
i64::clamp(left, 0, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
if file_length > size_limit {
|
||||
err!("Send storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
|
@ -312,8 +332,8 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
||||
o.insert(String::from("Size"), Value::Number(file_length.into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
|
|
@ -5,7 +5,7 @@ use rocket::Route;
|
|||
use crate::{
|
||||
api::{
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
||||
NumberOrString, PasswordOrOtpData,
|
||||
PasswordOrOtpData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
|
@ -13,6 +13,7 @@ use crate::{
|
|||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
util::NumberOrString,
|
||||
};
|
||||
|
||||
pub use crate::config::CONFIG;
|
||||
|
|
|
@ -7,12 +7,14 @@ use serde_json::Value;
|
|||
use crate::{
|
||||
api::{
|
||||
core::{log_event, log_user_event},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData,
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||
},
|
||||
auth::{ClientHeaders, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
mail,
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub mod authenticator;
|
||||
|
|
|
@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
|
|||
use crate::{
|
||||
api::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData,
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
|
@ -15,6 +15,7 @@ use crate::{
|
|||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
|
|
@ -73,30 +73,3 @@ impl PasswordOrOtpData {
|
|||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
#[serde(untagged)]
|
||||
enum NumberOrString {
|
||||
Number(i32),
|
||||
String(String),
|
||||
}
|
||||
|
||||
impl NumberOrString {
|
||||
fn into_string(self) -> String {
|
||||
match self {
|
||||
NumberOrString::Number(n) => n.to_string(),
|
||||
NumberOrString::String(s) => s,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn into_i32(&self) -> ApiResult<i32> {
|
||||
use std::num::ParseIntError as PIE;
|
||||
match self {
|
||||
NumberOrString::Number(n) => Ok(*n),
|
||||
NumberOrString::String(s) => {
|
||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -442,6 +442,8 @@ make_config! {
|
|||
user_attachment_limit: i64, true, option;
|
||||
/// Per-organization attachment storage limit (KB) |> Max kilobytes of attachment storage allowed per org. When this limit is reached, org members will not be allowed to upload further attachments for ciphers owned by that org.
|
||||
org_attachment_limit: i64, true, option;
|
||||
/// Per-user send storage limit (KB) |> Max kilobytes of sends storage allowed per user. When this limit is reached, the user will not be allowed to upload further sends.
|
||||
user_send_limit: i64, true, option;
|
||||
|
||||
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
|
||||
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
|
||||
|
@ -784,6 +786,26 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||
}
|
||||
}
|
||||
|
||||
const MAX_FILESIZE_KB: i64 = i64::MAX >> 10;
|
||||
|
||||
if let Some(limit) = cfg.user_attachment_limit {
|
||||
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||
err!("`USER_ATTACHMENT_LIMIT` is out of bounds");
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(limit) = cfg.org_attachment_limit {
|
||||
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||
err!("`ORG_ATTACHMENT_LIMIT` is out of bounds");
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(limit) = cfg.user_send_limit {
|
||||
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||
err!("`USER_SEND_LIMIT` is out of bounds");
|
||||
}
|
||||
}
|
||||
|
||||
if cfg._enable_duo
|
||||
&& (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
|
||||
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::io::ErrorKind;
|
||||
|
||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
@ -13,14 +14,14 @@ db_object! {
|
|||
pub id: String,
|
||||
pub cipher_uuid: String,
|
||||
pub file_name: String, // encrypted
|
||||
pub file_size: i32,
|
||||
pub file_size: i64,
|
||||
pub akey: Option<String>,
|
||||
}
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl Attachment {
|
||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i32, akey: Option<String>) -> Self {
|
||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option<String>) -> Self {
|
||||
Self {
|
||||
id,
|
||||
cipher_uuid,
|
||||
|
@ -145,13 +146,18 @@ impl Attachment {
|
|||
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
let result: Option<BigDecimal> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
.select(diesel::dsl::sum(attachments::file_size))
|
||||
.first(conn)
|
||||
.expect("Error loading user attachment total size");
|
||||
result.unwrap_or(0)
|
||||
|
||||
match result.map(|r| r.to_i64()) {
|
||||
Some(Some(r)) => r,
|
||||
Some(None) => i64::MAX,
|
||||
None => 0
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
|
@ -168,13 +174,18 @@ impl Attachment {
|
|||
|
||||
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
let result: Option<BigDecimal> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
.select(diesel::dsl::sum(attachments::file_size))
|
||||
.first(conn)
|
||||
.expect("Error loading user attachment total size");
|
||||
result.unwrap_or(0)
|
||||
|
||||
match result.map(|r| r.to_i64()) {
|
||||
Some(Some(r)) => r,
|
||||
Some(None) => i64::MAX,
|
||||
None => 0
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
|
|
|
@ -172,6 +172,7 @@ use crate::db::DbConn;
|
|||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
use crate::util::NumberOrString;
|
||||
|
||||
impl Send {
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
|
@ -286,6 +287,36 @@ impl Send {
|
|||
}}
|
||||
}
|
||||
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
|
||||
let sends = Self::find_by_user(user_uuid, conn).await;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(serde::Deserialize, Default)]
|
||||
struct FileData {
|
||||
Size: Option<NumberOrString>,
|
||||
size: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
let mut total: i64 = 0;
|
||||
for send in sends {
|
||||
if send.atype == SendType::File as i32 {
|
||||
let data: FileData = serde_json::from_str(&send.data).unwrap_or_default();
|
||||
|
||||
let size = match (data.size, data.Size) {
|
||||
(Some(s), _) => s.into_i64(),
|
||||
(_, Some(s)) => s.into_i64(),
|
||||
(None, None) => continue,
|
||||
};
|
||||
|
||||
if let Ok(size) = size {
|
||||
total = total.checked_add(size)?;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Some(total)
|
||||
}
|
||||
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
|
|
|
@ -3,7 +3,7 @@ table! {
|
|||
id -> Text,
|
||||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
file_size -> BigInt,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ table! {
|
|||
id -> Text,
|
||||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
file_size -> BigInt,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ table! {
|
|||
id -> Text,
|
||||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
file_size -> BigInt,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
|
50
src/util.rs
50
src/util.rs
|
@ -7,6 +7,7 @@ use std::{
|
|||
ops::Deref,
|
||||
};
|
||||
|
||||
use num_traits::ToPrimitive;
|
||||
use rocket::{
|
||||
fairing::{Fairing, Info, Kind},
|
||||
http::{ContentType, Header, HeaderMap, Method, Status},
|
||||
|
@ -367,10 +368,14 @@ pub fn delete_file(path: &str) -> IOResult<()> {
|
|||
fs::remove_file(path)
|
||||
}
|
||||
|
||||
pub fn get_display_size(size: i32) -> String {
|
||||
pub fn get_display_size(size: i64) -> String {
|
||||
const UNITS: [&str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"];
|
||||
|
||||
let mut size: f64 = size.into();
|
||||
// If we're somehow too big for a f64, just return the size in bytes
|
||||
let Some(mut size) = size.to_f64() else {
|
||||
return format!("{size} bytes");
|
||||
};
|
||||
|
||||
let mut unit_counter = 0;
|
||||
|
||||
loop {
|
||||
|
@ -638,6 +643,47 @@ fn _process_key(key: &str) -> String {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
#[serde(untagged)]
|
||||
pub enum NumberOrString {
|
||||
Number(i64),
|
||||
String(String),
|
||||
}
|
||||
|
||||
impl NumberOrString {
|
||||
pub fn into_string(self) -> String {
|
||||
match self {
|
||||
NumberOrString::Number(n) => n.to_string(),
|
||||
NumberOrString::String(s) => s,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
pub fn into_i32(&self) -> Result<i32, crate::Error> {
|
||||
use std::num::ParseIntError as PIE;
|
||||
match self {
|
||||
NumberOrString::Number(n) => match n.to_i32() {
|
||||
Some(n) => Ok(n),
|
||||
None => err!("Number does not fit in i32"),
|
||||
},
|
||||
NumberOrString::String(s) => {
|
||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
pub fn into_i64(&self) -> Result<i64, crate::Error> {
|
||||
use std::num::ParseIntError as PIE;
|
||||
match self {
|
||||
NumberOrString::Number(n) => Ok(*n),
|
||||
NumberOrString::String(s) => {
|
||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Retry methods
|
||||
//
|
||||
|
|
Laden …
In neuem Issue referenzieren