Spiegel von
https://github.com/dani-garcia/vaultwarden.git
synchronisiert 2025-01-08 11:55:42 +01:00
introduce folder_id newtype
Dieser Commit ist enthalten in:
Ursprung
8b8507f8cc
Commit
5dc05d6ba4
7 geänderte Dateien mit 111 neuen und 49 gelöschten Zeilen
|
@ -445,7 +445,7 @@ struct UpdateFolderData {
|
||||||
// There is a bug in 2024.3.x which adds a `null` item.
|
// There is a bug in 2024.3.x which adds a `null` item.
|
||||||
// To bypass this we allow a Option here, but skip it during the updates
|
// To bypass this we allow a Option here, but skip it during the updates
|
||||||
// See: https://github.com/bitwarden/clients/issues/8453
|
// See: https://github.com/bitwarden/clients/issues/8453
|
||||||
id: Option<String>,
|
id: Option<FolderId>,
|
||||||
name: String,
|
name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,8 +500,8 @@ fn validate_keydata(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that we're correctly rotating all the user's folders
|
// Check that we're correctly rotating all the user's folders
|
||||||
let existing_folder_ids = existing_folders.iter().map(|f| f.uuid.as_str()).collect::<HashSet<_>>();
|
let existing_folder_ids = existing_folders.iter().map(|f| &f.uuid).collect::<HashSet<&FolderId>>();
|
||||||
let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_deref()).collect::<HashSet<_>>();
|
let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_ref()).collect::<HashSet<&FolderId>>();
|
||||||
if !provided_folder_ids.is_superset(&existing_folder_ids) {
|
if !provided_folder_ids.is_superset(&existing_folder_ids) {
|
||||||
err!("All existing folders must be included in the rotation")
|
err!("All existing folders must be included in the rotation")
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,7 +221,7 @@ pub struct CipherData {
|
||||||
// Id is optional as it is included only in bulk share
|
// Id is optional as it is included only in bulk share
|
||||||
pub id: Option<CipherId>,
|
pub id: Option<CipherId>,
|
||||||
// Folder id is not included in import
|
// Folder id is not included in import
|
||||||
pub folder_id: Option<String>,
|
pub folder_id: Option<FolderId>,
|
||||||
// TODO: Some of these might appear all the time, no need for Option
|
// TODO: Some of these might appear all the time, no need for Option
|
||||||
#[serde(alias = "organizationID")]
|
#[serde(alias = "organizationID")]
|
||||||
pub organization_id: Option<OrganizationId>,
|
pub organization_id: Option<OrganizationId>,
|
||||||
|
@ -270,7 +270,7 @@ pub struct CipherData {
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct PartialCipherData {
|
pub struct PartialCipherData {
|
||||||
folder_id: Option<String>,
|
folder_id: Option<FolderId>,
|
||||||
favorite: bool,
|
favorite: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -579,9 +579,9 @@ async fn post_ciphers_import(
|
||||||
Cipher::validate_cipher_data(&data.ciphers)?;
|
Cipher::validate_cipher_data(&data.ciphers)?;
|
||||||
|
|
||||||
// Read and create the folders
|
// Read and create the folders
|
||||||
let existing_folders: HashSet<Option<String>> =
|
let existing_folders: HashSet<Option<FolderId>> =
|
||||||
Folder::find_by_user(&headers.user.uuid, &mut conn).await.into_iter().map(|f| Some(f.uuid)).collect();
|
Folder::find_by_user(&headers.user.uuid, &mut conn).await.into_iter().map(|f| Some(f.uuid)).collect();
|
||||||
let mut folders: Vec<String> = Vec::with_capacity(data.folders.len());
|
let mut folders: Vec<FolderId> = Vec::with_capacity(data.folders.len());
|
||||||
for folder in data.folders.into_iter() {
|
for folder in data.folders.into_iter() {
|
||||||
let folder_uuid = if existing_folders.contains(&folder.id) {
|
let folder_uuid = if existing_folders.contains(&folder.id) {
|
||||||
folder.id.unwrap()
|
folder.id.unwrap()
|
||||||
|
@ -1526,7 +1526,7 @@ async fn restore_cipher_selected(
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct MoveCipherData {
|
struct MoveCipherData {
|
||||||
folder_id: Option<String>,
|
folder_id: Option<FolderId>,
|
||||||
ids: Vec<CipherId>,
|
ids: Vec<CipherId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1843,7 +1843,7 @@ async fn _delete_cipher_attachment_by_id(
|
||||||
/// This will not improve the speed of a single cipher.to_json() call that much, so better not to use it for those calls.
|
/// This will not improve the speed of a single cipher.to_json() call that much, so better not to use it for those calls.
|
||||||
pub struct CipherSyncData {
|
pub struct CipherSyncData {
|
||||||
pub cipher_attachments: HashMap<CipherId, Vec<Attachment>>,
|
pub cipher_attachments: HashMap<CipherId, Vec<Attachment>>,
|
||||||
pub cipher_folders: HashMap<CipherId, String>,
|
pub cipher_folders: HashMap<CipherId, FolderId>,
|
||||||
pub cipher_favorites: HashSet<CipherId>,
|
pub cipher_favorites: HashSet<CipherId>,
|
||||||
pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>,
|
pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>,
|
||||||
pub members: HashMap<OrganizationId, Membership>,
|
pub members: HashMap<OrganizationId, Membership>,
|
||||||
|
@ -1860,7 +1860,7 @@ pub enum CipherSyncType {
|
||||||
|
|
||||||
impl CipherSyncData {
|
impl CipherSyncData {
|
||||||
pub async fn new(user_uuid: &UserId, sync_type: CipherSyncType, conn: &mut DbConn) -> Self {
|
pub async fn new(user_uuid: &UserId, sync_type: CipherSyncType, conn: &mut DbConn) -> Self {
|
||||||
let cipher_folders: HashMap<CipherId, String>;
|
let cipher_folders: HashMap<CipherId, FolderId>;
|
||||||
let cipher_favorites: HashSet<CipherId>;
|
let cipher_favorites: HashSet<CipherId>;
|
||||||
match sync_type {
|
match sync_type {
|
||||||
// User Sync supports Folders and Favorites
|
// User Sync supports Folders and Favorites
|
||||||
|
|
|
@ -24,8 +24,8 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/folders/<uuid>")]
|
#[get("/folders/<uuid>")]
|
||||||
async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_folder(uuid: FolderId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
match Folder::find_by_uuid_and_user(uuid, &headers.user.uuid, &mut conn).await {
|
match Folder::find_by_uuid_and_user(&uuid, &headers.user.uuid, &mut conn).await {
|
||||||
Some(folder) => Ok(Json(folder.to_json())),
|
Some(folder) => Ok(Json(folder.to_json())),
|
||||||
_ => err!("Invalid folder", "Folder does not exist or belongs to another user"),
|
_ => err!("Invalid folder", "Folder does not exist or belongs to another user"),
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct FolderData {
|
pub struct FolderData {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub id: Option<String>,
|
pub id: Option<FolderId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders", data = "<data>")]
|
#[post("/folders", data = "<data>")]
|
||||||
|
@ -51,13 +51,19 @@ async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>", data = "<data>")]
|
#[post("/folders/<uuid>", data = "<data>")]
|
||||||
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn post_folder(
|
||||||
|
uuid: FolderId,
|
||||||
|
data: Json<FolderData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
nt: Notify<'_>,
|
||||||
|
) -> JsonResult {
|
||||||
put_folder(uuid, data, headers, conn, nt).await
|
put_folder(uuid, data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/folders/<uuid>", data = "<data>")]
|
#[put("/folders/<uuid>", data = "<data>")]
|
||||||
async fn put_folder(
|
async fn put_folder(
|
||||||
uuid: &str,
|
uuid: FolderId,
|
||||||
data: Json<FolderData>,
|
data: Json<FolderData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
|
@ -65,7 +71,7 @@ async fn put_folder(
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner();
|
let data: FolderData = data.into_inner();
|
||||||
|
|
||||||
let Some(mut folder) = Folder::find_by_uuid_and_user(uuid, &headers.user.uuid, &mut conn).await else {
|
let Some(mut folder) = Folder::find_by_uuid_and_user(&uuid, &headers.user.uuid, &mut conn).await else {
|
||||||
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -78,13 +84,13 @@ async fn put_folder(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>/delete")]
|
#[post("/folders/<uuid>/delete")]
|
||||||
async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn delete_folder_post(uuid: FolderId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
delete_folder(uuid, headers, conn, nt).await
|
delete_folder(uuid, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/folders/<uuid>")]
|
#[delete("/folders/<uuid>")]
|
||||||
async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn delete_folder(uuid: FolderId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
let Some(folder) = Folder::find_by_uuid_and_user(uuid, &headers.user.uuid, &mut conn).await else {
|
let Some(folder) = Folder::find_by_uuid_and_user(&uuid, &headers.user.uuid, &mut conn).await else {
|
||||||
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -392,7 +392,7 @@ impl WebSocketUsers {
|
||||||
}
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![
|
vec![
|
||||||
("Id".into(), folder.uuid.clone().into()),
|
("Id".into(), folder.uuid.to_string().into()),
|
||||||
("UserId".into(), folder.user_uuid.to_string().into()),
|
("UserId".into(), folder.user_uuid.to_string().into()),
|
||||||
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||||
],
|
],
|
||||||
|
|
|
@ -10,7 +10,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, Group, Membership, MembershipStatus,
|
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus,
|
||||||
MembershipType, OrganizationId, User, UserId,
|
MembershipType, OrganizationId, User, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -334,7 +334,7 @@ impl Cipher {
|
||||||
// Skip adding these fields in that case
|
// Skip adding these fields in that case
|
||||||
if sync_type == CipherSyncType::User {
|
if sync_type == CipherSyncType::User {
|
||||||
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
|
cipher_sync_data.cipher_folders.get(&self.uuid).cloned()
|
||||||
} else {
|
} else {
|
||||||
self.get_folder_uuid(user_uuid, conn).await
|
self.get_folder_uuid(user_uuid, conn).await
|
||||||
});
|
});
|
||||||
|
@ -469,7 +469,7 @@ impl Cipher {
|
||||||
|
|
||||||
pub async fn move_to_folder(
|
pub async fn move_to_folder(
|
||||||
&self,
|
&self,
|
||||||
folder_uuid: Option<String>,
|
folder_uuid: Option<FolderId>,
|
||||||
user_uuid: &UserId,
|
user_uuid: &UserId,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
@ -478,23 +478,25 @@ impl Cipher {
|
||||||
match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) {
|
match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) {
|
||||||
// No changes
|
// No changes
|
||||||
(None, None) => Ok(()),
|
(None, None) => Ok(()),
|
||||||
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
(Some(ref old_folder), Some(ref new_folder)) if old_folder == new_folder => Ok(()),
|
||||||
|
|
||||||
// Add to folder
|
// Add to folder
|
||||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await,
|
(None, Some(new_folder)) => FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await,
|
||||||
|
|
||||||
// Remove from folder
|
// Remove from folder
|
||||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
(Some(old_folder), None) => {
|
||||||
Some(old) => old.delete(conn).await,
|
match FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await {
|
||||||
None => err!("Couldn't move from previous folder"),
|
Some(old_folder) => old_folder.delete(conn).await,
|
||||||
},
|
None => err!("Couldn't move from previous folder"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Move to another folder
|
// Move to another folder
|
||||||
(Some(old), Some(new)) => {
|
(Some(old_folder), Some(new_folder)) => {
|
||||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
if let Some(old_folder) = FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await {
|
||||||
old.delete(conn).await?;
|
old_folder.delete(conn).await?;
|
||||||
}
|
}
|
||||||
FolderCipher::new(&new, &self.uuid).save(conn).await
|
FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -677,14 +679,14 @@ impl Cipher {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &mut DbConn) -> Option<String> {
|
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &mut DbConn) -> Option<FolderId> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
folders_ciphers::table
|
folders_ciphers::table
|
||||||
.inner_join(folders::table)
|
.inner_join(folders::table)
|
||||||
.filter(folders::user_uuid.eq(&user_uuid))
|
.filter(folders::user_uuid.eq(&user_uuid))
|
||||||
.filter(folders_ciphers::cipher_uuid.eq(&self.uuid))
|
.filter(folders_ciphers::cipher_uuid.eq(&self.uuid))
|
||||||
.select(folders_ciphers::folder_uuid)
|
.select(folders_ciphers::folder_uuid)
|
||||||
.first::<String>(conn)
|
.first::<FolderId>(conn)
|
||||||
.ok()
|
.ok()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
@ -850,7 +852,7 @@ impl Cipher {
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
folders_ciphers::table.inner_join(ciphers::table)
|
folders_ciphers::table.inner_join(ciphers::table)
|
||||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||||
|
|
|
@ -1,5 +1,11 @@
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
|
use rocket::request::FromParam;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
use std::{
|
||||||
|
borrow::Borrow,
|
||||||
|
fmt::{Display, Formatter},
|
||||||
|
ops::Deref,
|
||||||
|
};
|
||||||
|
|
||||||
use super::{CipherId, User, UserId};
|
use super::{CipherId, User, UserId};
|
||||||
|
|
||||||
|
@ -8,7 +14,7 @@ db_object! {
|
||||||
#[diesel(table_name = folders)]
|
#[diesel(table_name = folders)]
|
||||||
#[diesel(primary_key(uuid))]
|
#[diesel(primary_key(uuid))]
|
||||||
pub struct Folder {
|
pub struct Folder {
|
||||||
pub uuid: String,
|
pub uuid: FolderId,
|
||||||
pub created_at: NaiveDateTime,
|
pub created_at: NaiveDateTime,
|
||||||
pub updated_at: NaiveDateTime,
|
pub updated_at: NaiveDateTime,
|
||||||
pub user_uuid: UserId,
|
pub user_uuid: UserId,
|
||||||
|
@ -20,7 +26,7 @@ db_object! {
|
||||||
#[diesel(primary_key(cipher_uuid, folder_uuid))]
|
#[diesel(primary_key(cipher_uuid, folder_uuid))]
|
||||||
pub struct FolderCipher {
|
pub struct FolderCipher {
|
||||||
pub cipher_uuid: CipherId,
|
pub cipher_uuid: CipherId,
|
||||||
pub folder_uuid: String,
|
pub folder_uuid: FolderId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +36,7 @@ impl Folder {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: FolderId(crate::util::get_uuid()),
|
||||||
created_at: now,
|
created_at: now,
|
||||||
updated_at: now,
|
updated_at: now,
|
||||||
|
|
||||||
|
@ -52,10 +58,10 @@ impl Folder {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FolderCipher {
|
impl FolderCipher {
|
||||||
pub fn new(folder_uuid: &str, cipher_uuid: &CipherId) -> Self {
|
pub fn new(folder_uuid: FolderId, cipher_uuid: CipherId) -> Self {
|
||||||
Self {
|
Self {
|
||||||
folder_uuid: folder_uuid.to_string(),
|
folder_uuid,
|
||||||
cipher_uuid: cipher_uuid.clone(),
|
cipher_uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -120,7 +126,7 @@ impl Folder {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_user(uuid: &FolderId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders::table
|
folders::table
|
||||||
.filter(folders::uuid.eq(uuid))
|
.filter(folders::uuid.eq(uuid))
|
||||||
|
@ -185,7 +191,7 @@ impl FolderCipher {
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all_by_folder(folder_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete_all_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
|
@ -194,7 +200,7 @@ impl FolderCipher {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_folder_and_cipher(
|
pub async fn find_by_folder_and_cipher(
|
||||||
folder_uuid: &str,
|
folder_uuid: &FolderId,
|
||||||
cipher_uuid: &CipherId,
|
cipher_uuid: &CipherId,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> Option<Self> {
|
) -> Option<Self> {
|
||||||
|
@ -208,7 +214,7 @@ impl FolderCipher {
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders_ciphers::table
|
folders_ciphers::table
|
||||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||||
|
@ -220,14 +226,62 @@ impl FolderCipher {
|
||||||
|
|
||||||
/// Return a vec with (cipher_uuid, folder_uuid)
|
/// Return a vec with (cipher_uuid, folder_uuid)
|
||||||
/// This is used during a full sync so we only need one query for all folder matches.
|
/// This is used during a full sync so we only need one query for all folder matches.
|
||||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<(CipherId, String)> {
|
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<(CipherId, FolderId)> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
folders_ciphers::table
|
folders_ciphers::table
|
||||||
.inner_join(folders::table)
|
.inner_join(folders::table)
|
||||||
.filter(folders::user_uuid.eq(user_uuid))
|
.filter(folders::user_uuid.eq(user_uuid))
|
||||||
.select(folders_ciphers::all_columns)
|
.select(folders_ciphers::all_columns)
|
||||||
.load::<(CipherId, String)>(conn)
|
.load::<(CipherId, FolderId)>(conn)
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(DieselNewType, FromForm, Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
pub struct FolderId(String);
|
||||||
|
|
||||||
|
impl AsRef<str> for FolderId {
|
||||||
|
fn as_ref(&self) -> &str {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for FolderId {
|
||||||
|
type Target = str;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Borrow<str> for FolderId {
|
||||||
|
fn borrow(&self) -> &str {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for FolderId {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for FolderId {
|
||||||
|
fn from(raw: String) -> Self {
|
||||||
|
Self(raw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'r> FromParam<'r> for FolderId {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
|
||||||
|
if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
|
||||||
|
Ok(Self(param.to_string()))
|
||||||
|
} else {
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ pub use self::device::{Device, DeviceType};
|
||||||
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
|
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
|
||||||
pub use self::event::{Event, EventType};
|
pub use self::event::{Event, EventType};
|
||||||
pub use self::favorite::Favorite;
|
pub use self::favorite::Favorite;
|
||||||
pub use self::folder::{Folder, FolderCipher};
|
pub use self::folder::{Folder, FolderCipher, FolderId};
|
||||||
pub use self::group::{CollectionGroup, Group, GroupId, GroupUser};
|
pub use self::group::{CollectionGroup, Group, GroupId, GroupUser};
|
||||||
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
|
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
|
||||||
pub use self::organization::{
|
pub use self::organization::{
|
||||||
|
|
Laden …
In neuem Issue referenzieren