diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index 9750e4b6..79e7c768 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -205,7 +205,7 @@ pub struct CipherData { */ pub Type: i32, pub Name: String, - Notes: Option, + pub Notes: Option, Fields: Option, // Only one of these should exist, depending on type @@ -542,6 +542,12 @@ async fn post_ciphers_import( let data: ImportData = data.into_inner().data; + // Validate the import before continuing + // Bitwarden does not process the import if there is one item invalid. + // Since we check for the size of the encrypted note length, we need to do that here to pre-validate it. + // TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks. + Cipher::validate_notes(&data.Ciphers)?; + // Read and create the folders let mut folders: Vec<_> = Vec::new(); for folder in data.Folders.into_iter() { diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 3393a4d0..fb303de4 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -7,7 +7,7 @@ mod organizations; mod sends; pub mod two_factor; -pub use ciphers::{purge_trashed_ciphers, CipherSyncData, CipherSyncType}; +pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType}; pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job}; pub use events::{event_cleanup_job, log_event, log_user_event}; pub use sends::purge_sends; diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index 60d6f714..94ef3db3 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -1378,6 +1378,12 @@ async fn post_org_import( let data: ImportData = data.into_inner().data; let org_id = query.organization_id; + // Validate the import before continuing + // Bitwarden does not process the import if there is one item invalid. + // Since we check for the size of the encrypted note length, we need to do that here to pre-validate it. + // TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks. + Cipher::validate_notes(&data.Ciphers)?; + let mut collections = Vec::new(); for coll in data.Collections { let collection = Collection::new(org_id.clone(), coll.Name); diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs index da40af90..b7d26bd3 100644 --- a/src/db/models/cipher.rs +++ b/src/db/models/cipher.rs @@ -6,7 +6,7 @@ use super::{ Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization, }; -use crate::api::core::CipherSyncData; +use crate::api::core::{CipherData, CipherSyncData}; use std::borrow::Cow; @@ -73,6 +73,33 @@ impl Cipher { reprompt: None, } } + + pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult { + let mut validation_errors = serde_json::Map::new(); + for (index, cipher) in cipher_data.iter().enumerate() { + if let Some(note) = &cipher.Notes { + if note.len() > 10_000 { + validation_errors.insert( + format!("Ciphers[{index}].Notes"), + serde_json::to_value([ + "The field Notes exceeds the maximum encrypted value length of 10000 characters.", + ]) + .unwrap(), + ); + } + } + } + if !validation_errors.is_empty() { + let err_json = json!({ + "message": "The model state is invalid.", + "validationErrors" : validation_errors, + "object": "error" + }); + err_json!(err_json, "Import validation errors") + } else { + Ok(()) + } + } } use crate::db::DbConn;