geforkt von mirrored/vaultwarden
Fix failing large note imports
When importing to Vaultwarden (or Bitwarden) notes larger then 10_000 encrypted characters are invalid. This because it for one isn't compatible with Bitwarden. And some clients tend to break on very large notes. We already added a check for this limit when adding a single cipher, but this caused issues during import, and could cause a partial imported vault. Bitwarden does some validations before actually running it through the import process and generates a special error message which helps the user indicate which items are invalid during the import. This PR adds that validation check and returns the same kind of error. Fixes #3048
Dieser Commit ist enthalten in:
Ursprung
988d24927e
Commit
6be26f0a38
4 geänderte Dateien mit 42 neuen und 3 gelöschten Zeilen
|
@ -205,7 +205,7 @@ pub struct CipherData {
|
||||||
*/
|
*/
|
||||||
pub Type: i32,
|
pub Type: i32,
|
||||||
pub Name: String,
|
pub Name: String,
|
||||||
Notes: Option<String>,
|
pub Notes: Option<String>,
|
||||||
Fields: Option<Value>,
|
Fields: Option<Value>,
|
||||||
|
|
||||||
// Only one of these should exist, depending on type
|
// Only one of these should exist, depending on type
|
||||||
|
@ -542,6 +542,12 @@ async fn post_ciphers_import(
|
||||||
|
|
||||||
let data: ImportData = data.into_inner().data;
|
let data: ImportData = data.into_inner().data;
|
||||||
|
|
||||||
|
// Validate the import before continuing
|
||||||
|
// Bitwarden does not process the import if there is one item invalid.
|
||||||
|
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||||
|
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||||
|
Cipher::validate_notes(&data.Ciphers)?;
|
||||||
|
|
||||||
// Read and create the folders
|
// Read and create the folders
|
||||||
let mut folders: Vec<_> = Vec::new();
|
let mut folders: Vec<_> = Vec::new();
|
||||||
for folder in data.Folders.into_iter() {
|
for folder in data.Folders.into_iter() {
|
||||||
|
|
|
@ -7,7 +7,7 @@ mod organizations;
|
||||||
mod sends;
|
mod sends;
|
||||||
pub mod two_factor;
|
pub mod two_factor;
|
||||||
|
|
||||||
pub use ciphers::{purge_trashed_ciphers, CipherSyncData, CipherSyncType};
|
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
|
||||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||||
pub use sends::purge_sends;
|
pub use sends::purge_sends;
|
||||||
|
|
|
@ -1378,6 +1378,12 @@ async fn post_org_import(
|
||||||
let data: ImportData = data.into_inner().data;
|
let data: ImportData = data.into_inner().data;
|
||||||
let org_id = query.organization_id;
|
let org_id = query.organization_id;
|
||||||
|
|
||||||
|
// Validate the import before continuing
|
||||||
|
// Bitwarden does not process the import if there is one item invalid.
|
||||||
|
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
|
||||||
|
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||||
|
Cipher::validate_notes(&data.Ciphers)?;
|
||||||
|
|
||||||
let mut collections = Vec::new();
|
let mut collections = Vec::new();
|
||||||
for coll in data.Collections {
|
for coll in data.Collections {
|
||||||
let collection = Collection::new(org_id.clone(), coll.Name);
|
let collection = Collection::new(org_id.clone(), coll.Name);
|
||||||
|
|
|
@ -6,7 +6,7 @@ use super::{
|
||||||
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization,
|
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api::core::CipherSyncData;
|
use crate::api::core::{CipherData, CipherSyncData};
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
|
||||||
|
@ -73,6 +73,33 @@ impl Cipher {
|
||||||
reprompt: None,
|
reprompt: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
|
||||||
|
let mut validation_errors = serde_json::Map::new();
|
||||||
|
for (index, cipher) in cipher_data.iter().enumerate() {
|
||||||
|
if let Some(note) = &cipher.Notes {
|
||||||
|
if note.len() > 10_000 {
|
||||||
|
validation_errors.insert(
|
||||||
|
format!("Ciphers[{index}].Notes"),
|
||||||
|
serde_json::to_value([
|
||||||
|
"The field Notes exceeds the maximum encrypted value length of 10000 characters.",
|
||||||
|
])
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !validation_errors.is_empty() {
|
||||||
|
let err_json = json!({
|
||||||
|
"message": "The model state is invalid.",
|
||||||
|
"validationErrors" : validation_errors,
|
||||||
|
"object": "error"
|
||||||
|
});
|
||||||
|
err_json!(err_json, "Import validation errors")
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
|
|
Laden …
In neuem Issue referenzieren