geforkt von mirrored/vaultwarden
Merge pull request #493 from endyman/feature/initial_mysql_support
Initial support for mysql
Dieser Commit ist enthalten in:
Commit
1322b876e9
75 geänderte Dateien mit 610 neuen und 134 gelöschten Zeilen
|
@ -6,4 +6,6 @@ cache: cargo
|
|||
|
||||
# Nothing to install
|
||||
install: true
|
||||
script: cargo build --all-features
|
||||
script:
|
||||
- cargo build --features "sqlite enable_syslog"
|
||||
- cargo build --features "mysql enable_syslog"
|
||||
|
|
25
Cargo.lock
generiert
25
Cargo.lock
generiert
|
@ -116,7 +116,6 @@ dependencies = [
|
|||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lettre 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lettre_email 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libsqlite3-sys 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"multipart 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
@ -429,8 +428,9 @@ dependencies = [
|
|||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"diesel_derives 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libsqlite3-sys 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mysqlclient-sys 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"r2d2 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1044,16 +1044,6 @@ name = "libc"
|
|||
version = "0.2.55"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "libsqlite3-sys"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.1.5"
|
||||
|
@ -1250,6 +1240,15 @@ dependencies = [
|
|||
"twoway 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mysqlclient-sys"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "native-tls"
|
||||
version = "0.2.3"
|
||||
|
@ -2861,7 +2860,6 @@ dependencies = [
|
|||
"checksum lettre 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "646aee0a55545eaffdf0df1ac19b500b51adb3095ec4dfdc704134e56ea23531"
|
||||
"checksum lettre_email 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1b3d43e4bb7beb9974a359cbb3ea4f93dfba6c1c0c6e9c9f82e538e0f9ab9f"
|
||||
"checksum libc 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "42914d39aad277d9e176efbdad68acb1d5443ab65afe0e0e4f0d49352a950880"
|
||||
"checksum libsqlite3-sys 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fd6457c70bbff456d9fe49deaba35ec47c3e598bf8d7950ff0575ceb7a8a6ad1"
|
||||
"checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"
|
||||
"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
|
||||
"checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
|
||||
|
@ -2883,6 +2881,7 @@ dependencies = [
|
|||
"checksum mio-extras 2.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "46e73a04c2fa6250b8d802134d56d554a9ec2922bf977777c805ea5def61ce40"
|
||||
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
|
||||
"checksum multipart 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)" = "136eed74cadb9edd2651ffba732b19a450316b680e4f48d6c79e905799e19d01"
|
||||
"checksum mysqlclient-sys 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7e9637d93448044078aaafea7419aed69d301b4a12bcc4aa0ae856eb169bef85"
|
||||
"checksum native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e"
|
||||
"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
|
||||
"checksum new_debug_unreachable 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f40f005c60db6e03bae699e414c58bf9aa7ea02a2d0b9bfbcf19286cc4c82b30"
|
||||
|
|
|
@ -13,6 +13,8 @@ build = "build.rs"
|
|||
[features]
|
||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||
enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
syslog = "4.0.1"
|
||||
|
@ -47,11 +49,11 @@ log = "0.4.6"
|
|||
fern = { version = "0.5.8", features = ["syslog-4"] }
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.2", features = ["sqlite", "chrono", "r2d2"] }
|
||||
diesel_migrations = { version = "1.4.0", features = ["sqlite"] }
|
||||
diesel = { version = "1.4.2", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = { version = "1.4.0" }
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.12.0", features = ["bundled"] }
|
||||
libsqlite3-sys = { version = "0.12.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto library
|
||||
ring = "0.14.6"
|
||||
|
|
14
Dockerfile
14
Dockerfile
|
@ -23,12 +23,21 @@ RUN ls
|
|||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Using bundled SQLite, no need to install it
|
||||
# RUN apt-get update && apt-get install -y\
|
||||
# sqlite3\
|
||||
# --no-install-recommends\
|
||||
# && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libmariadb-dev\
|
||||
--no-install-recommends\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin app
|
||||
WORKDIR /app
|
||||
|
@ -41,7 +50,7 @@ COPY ./build.rs ./build.rs
|
|||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --release
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
|
@ -53,7 +62,7 @@ RUN touch src/main.rs
|
|||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --release
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
|
@ -68,6 +77,7 @@ ENV ROCKET_WORKERS=10
|
|||
RUN apt-get update && apt-get install -y\
|
||||
openssl\
|
||||
ca-certificates\
|
||||
libmariadbclient-dev\
|
||||
--no-install-recommends\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
|
|
@ -23,6 +23,9 @@ RUN ls
|
|||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
gcc-aarch64-linux-gnu \
|
||||
|
@ -42,7 +45,8 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64
|
||||
libc6-dev:arm64 \
|
||||
libmariadb-dev:arm64
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
|
@ -55,7 +59,7 @@ COPY . .
|
|||
|
||||
# Build
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
RUN cargo build --release --target=aarch64-unknown-linux-gnu -v
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
|
@ -72,6 +76,7 @@ RUN [ "cross-build-start" ]
|
|||
RUN apt-get update && apt-get install -y\
|
||||
openssl\
|
||||
ca-certificates\
|
||||
libmariadbclient-dev\
|
||||
--no-install-recommends\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
|
|
@ -22,8 +22,17 @@ RUN ls
|
|||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2018-12-01 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
ENV USER "root"
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y\
|
||||
libmysqlclient-dev\
|
||||
--no-install-recommends\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copies the complete project
|
||||
|
@ -32,8 +41,11 @@ COPY . .
|
|||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Build
|
||||
RUN cargo build --release
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
|
@ -47,7 +59,8 @@ ENV SSL_CERT_DIR=/etc/ssl/certs
|
|||
|
||||
# Install needed libraries
|
||||
RUN apk add \
|
||||
openssl\
|
||||
openssl \
|
||||
mariadb-connector-c \
|
||||
ca-certificates \
|
||||
&& rm /var/cache/apk/*
|
||||
|
||||
|
|
|
@ -23,6 +23,9 @@ RUN ls
|
|||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
gcc-arm-linux-gnueabi \
|
||||
|
@ -42,7 +45,8 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel
|
||||
libc6-dev:armel \
|
||||
libmariadb-dev:armel
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
|
@ -55,7 +59,7 @@ COPY . .
|
|||
|
||||
# Build
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
RUN cargo build --release --target=arm-unknown-linux-gnueabi -v
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
|
@ -72,6 +76,7 @@ RUN [ "cross-build-start" ]
|
|||
RUN apt-get update && apt-get install -y\
|
||||
openssl\
|
||||
ca-certificates\
|
||||
libmariadbclient-dev\
|
||||
--no-install-recommends\
|
||||
&& ln -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
|
|
@ -23,6 +23,9 @@ RUN ls
|
|||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
|
@ -42,7 +45,9 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf
|
||||
libc6-dev:armhf \
|
||||
libmariadb-dev:armhf
|
||||
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
|
@ -55,7 +60,7 @@ COPY . .
|
|||
|
||||
# Build
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
RUN cargo build --release --target=armv7-unknown-linux-gnueabihf -v
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf -v
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
|
@ -72,6 +77,7 @@ RUN [ "cross-build-start" ]
|
|||
RUN apt-get update && apt-get install -y\
|
||||
openssl\
|
||||
ca-certificates\
|
||||
libmariadbclient-dev\
|
||||
--no-install-recommends\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
|
|
@ -8,10 +8,18 @@ steps:
|
|||
echo "##vso[task.prependpath]$HOME/.cargo/bin"
|
||||
displayName: 'Install Rust'
|
||||
|
||||
- script: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libmysql++-dev
|
||||
displayName: Install libmysql
|
||||
|
||||
- script: |
|
||||
rustc -Vv
|
||||
cargo -V
|
||||
displayName: Query rust and cargo versions
|
||||
|
||||
- script : cargo build --all-features
|
||||
displayName: 'Build project'
|
||||
- script : cargo build --features "sqlite"
|
||||
displayName: 'Build project with sqlite backend'
|
||||
|
||||
- script : cargo build --features "mysql"
|
||||
displayName: 'Build project with mysql backend'
|
||||
|
|
62
migrations/mysql/2018-01-14-171611_create_tables/up.sql
Normale Datei
62
migrations/mysql/2018-01-14-171611_create_tables/up.sql
Normale Datei
|
@ -0,0 +1,62 @@
|
|||
CREATE TABLE users (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
password_hash BLOB NOT NULL,
|
||||
salt BLOB NOT NULL,
|
||||
password_iterations INTEGER NOT NULL,
|
||||
password_hint TEXT,
|
||||
`key` TEXT NOT NULL,
|
||||
private_key TEXT,
|
||||
public_key TEXT,
|
||||
totp_secret TEXT,
|
||||
totp_recover TEXT,
|
||||
security_stamp TEXT NOT NULL,
|
||||
equivalent_domains TEXT NOT NULL,
|
||||
excluded_globals TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE devices (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
name TEXT NOT NULL,
|
||||
type INTEGER NOT NULL,
|
||||
push_token TEXT,
|
||||
refresh_token TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE ciphers (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
folder_uuid CHAR(36) REFERENCES folders (uuid),
|
||||
organization_uuid CHAR(36),
|
||||
type INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
fields TEXT,
|
||||
data TEXT NOT NULL,
|
||||
favorite BOOLEAN NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE attachments (
|
||||
id CHAR(36) NOT NULL PRIMARY KEY,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
file_name TEXT NOT NULL,
|
||||
file_size INTEGER NOT NULL
|
||||
|
||||
);
|
||||
|
||||
CREATE TABLE folders (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
name TEXT NOT NULL
|
||||
);
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
CREATE TABLE collections (
|
||||
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
|
||||
org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE organizations (
|
||||
uuid VARCHAR(40) NOT NULL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
billing_email TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE users_collections (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
|
||||
PRIMARY KEY (user_uuid, collection_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE users_organizations (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||
|
||||
access_all BOOLEAN NOT NULL,
|
||||
`key` TEXT NOT NULL,
|
||||
status INTEGER NOT NULL,
|
||||
type INTEGER NOT NULL,
|
||||
|
||||
UNIQUE (user_uuid, org_uuid)
|
||||
);
|
34
migrations/mysql/2018-04-27-155151_create_users_ciphers/up.sql
Normale Datei
34
migrations/mysql/2018-04-27-155151_create_users_ciphers/up.sql
Normale Datei
|
@ -0,0 +1,34 @@
|
|||
ALTER TABLE ciphers RENAME TO oldCiphers;
|
||||
|
||||
CREATE TABLE ciphers (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid CHAR(36) REFERENCES users (uuid), -- Make this optional
|
||||
organization_uuid CHAR(36) REFERENCES organizations (uuid), -- Add reference to orgs table
|
||||
-- Remove folder_uuid
|
||||
type INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
fields TEXT,
|
||||
data TEXT NOT NULL,
|
||||
favorite BOOLEAN NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE folders_ciphers (
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid),
|
||||
|
||||
PRIMARY KEY (cipher_uuid, folder_uuid)
|
||||
);
|
||||
|
||||
INSERT INTO ciphers (uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite)
|
||||
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite FROM oldCiphers;
|
||||
|
||||
INSERT INTO folders_ciphers (cipher_uuid, folder_uuid)
|
||||
SELECT uuid, folder_uuid FROM oldCiphers WHERE folder_uuid IS NOT NULL;
|
||||
|
||||
|
||||
DROP TABLE oldCiphers;
|
||||
|
||||
ALTER TABLE users_collections ADD COLUMN read_only BOOLEAN NOT NULL DEFAULT 0; -- False
|
|
@ -0,0 +1,5 @@
|
|||
CREATE TABLE ciphers_collections (
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid),
|
||||
PRIMARY KEY (cipher_uuid, collection_uuid)
|
||||
);
|
|
@ -0,0 +1,14 @@
|
|||
ALTER TABLE attachments RENAME TO oldAttachments;
|
||||
|
||||
CREATE TABLE attachments (
|
||||
id CHAR(36) NOT NULL PRIMARY KEY,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid),
|
||||
file_name TEXT NOT NULL,
|
||||
file_size INTEGER NOT NULL
|
||||
|
||||
);
|
||||
|
||||
INSERT INTO attachments (id, cipher_uuid, file_name, file_size)
|
||||
SELECT id, cipher_uuid, file_name, file_size FROM oldAttachments;
|
||||
|
||||
DROP TABLE oldAttachments;
|
15
migrations/mysql/2018-07-11-181453_create_u2f_twofactor/up.sql
Normale Datei
15
migrations/mysql/2018-07-11-181453_create_u2f_twofactor/up.sql
Normale Datei
|
@ -0,0 +1,15 @@
|
|||
CREATE TABLE twofactor (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid),
|
||||
type INTEGER NOT NULL,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
|
||||
UNIQUE (user_uuid, type)
|
||||
);
|
||||
|
||||
|
||||
INSERT INTO twofactor (uuid, user_uuid, type, enabled, data)
|
||||
SELECT UUID(), uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL;
|
||||
|
||||
UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty
|
3
migrations/mysql/2018-09-10-111213_add_invites/up.sql
Normale Datei
3
migrations/mysql/2018-09-10-111213_add_invites/up.sql
Normale Datei
|
@ -0,0 +1,3 @@
|
|||
CREATE TABLE invitations (
|
||||
email VARCHAR(255) NOT NULL PRIMARY KEY
|
||||
);
|
3
migrations/mysql/2018-11-27-152651_add_att_key_columns/up.sql
Normale Datei
3
migrations/mysql/2018-11-27-152651_add_att_key_columns/up.sql
Normale Datei
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE attachments
|
||||
ADD COLUMN
|
||||
`key` TEXT;
|
|
@ -0,0 +1,7 @@
|
|||
ALTER TABLE attachments CHANGE COLUMN akey `key` TEXT;
|
||||
ALTER TABLE ciphers CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||
ALTER TABLE devices CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||
ALTER TABLE twofactor CHANGE COLUMN atype type INTEGER NOT NULL;
|
||||
ALTER TABLE users CHANGE COLUMN akey `key` TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN akey `key` TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN atype type INTEGER NOT NULL;
|
|
@ -0,0 +1,7 @@
|
|||
ALTER TABLE attachments CHANGE COLUMN `key` akey TEXT;
|
||||
ALTER TABLE ciphers CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||
ALTER TABLE devices CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||
ALTER TABLE twofactor CHANGE COLUMN type atype INTEGER NOT NULL;
|
||||
ALTER TABLE users CHANGE COLUMN `key` akey TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN `key` akey TEXT;
|
||||
ALTER TABLE users_organizations CHANGE COLUMN type atype INTEGER NOT NULL;
|
9
migrations/sqlite/2018-01-14-171611_create_tables/down.sql
Normale Datei
9
migrations/sqlite/2018-01-14-171611_create_tables/down.sql
Normale Datei
|
@ -0,0 +1,9 @@
|
|||
DROP TABLE users;
|
||||
|
||||
DROP TABLE devices;
|
||||
|
||||
DROP TABLE ciphers;
|
||||
|
||||
DROP TABLE attachments;
|
||||
|
||||
DROP TABLE folders;
|
|
@ -0,0 +1,8 @@
|
|||
DROP TABLE collections;
|
||||
|
||||
DROP TABLE organizations;
|
||||
|
||||
|
||||
DROP TABLE users_collections;
|
||||
|
||||
DROP TABLE users_organizations;
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE ciphers_collections;
|
|
@ -0,0 +1 @@
|
|||
-- This file should undo anything in `up.sql`
|
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE devices
|
||||
ADD COLUMN
|
||||
twofactor_remember TEXT;
|
|
@ -0,0 +1,8 @@
|
|||
UPDATE users
|
||||
SET totp_secret = (
|
||||
SELECT twofactor.data FROM twofactor
|
||||
WHERE twofactor.type = 0
|
||||
AND twofactor.user_uuid = users.uuid
|
||||
);
|
||||
|
||||
DROP TABLE twofactor;
|
0
migrations/sqlite/2018-08-27-172114_update_ciphers/down.sql
Normale Datei
0
migrations/sqlite/2018-08-27-172114_update_ciphers/down.sql
Normale Datei
3
migrations/sqlite/2018-08-27-172114_update_ciphers/up.sql
Normale Datei
3
migrations/sqlite/2018-08-27-172114_update_ciphers/up.sql
Normale Datei
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
password_history TEXT;
|
1
migrations/sqlite/2018-09-10-111213_add_invites/down.sql
Normale Datei
1
migrations/sqlite/2018-09-10-111213_add_invites/down.sql
Normale Datei
|
@ -0,0 +1 @@
|
|||
DROP TABLE invitations;
|
0
migrations/sqlite/2018-09-19-144557_add_kdf_columns/down.sql
Normale Datei
0
migrations/sqlite/2018-09-19-144557_add_kdf_columns/down.sql
Normale Datei
7
migrations/sqlite/2018-09-19-144557_add_kdf_columns/up.sql
Normale Datei
7
migrations/sqlite/2018-09-19-144557_add_kdf_columns/up.sql
Normale Datei
|
@ -0,0 +1,7 @@
|
|||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2
|
||||
|
||||
ALTER TABLE users
|
||||
ADD COLUMN
|
||||
client_kdf_iter INTEGER NOT NULL DEFAULT 5000;
|
0
migrations/sqlite/2018-11-27-152651_add_att_key_columns/down.sql
Normale Datei
0
migrations/sqlite/2018-11-27-152651_add_att_key_columns/down.sql
Normale Datei
|
@ -0,0 +1,7 @@
|
|||
ALTER TABLE attachments RENAME COLUMN akey TO key;
|
||||
ALTER TABLE ciphers RENAME COLUMN atype TO type;
|
||||
ALTER TABLE devices RENAME COLUMN atype TO type;
|
||||
ALTER TABLE twofactor RENAME COLUMN atype TO type;
|
||||
ALTER TABLE users RENAME COLUMN akey TO key;
|
||||
ALTER TABLE users_organizations RENAME COLUMN akey TO key;
|
||||
ALTER TABLE users_organizations RENAME COLUMN atype TO type;
|
|
@ -0,0 +1,7 @@
|
|||
ALTER TABLE attachments RENAME COLUMN key TO akey;
|
||||
ALTER TABLE ciphers RENAME COLUMN type TO atype;
|
||||
ALTER TABLE devices RENAME COLUMN type TO atype;
|
||||
ALTER TABLE twofactor RENAME COLUMN type TO atype;
|
||||
ALTER TABLE users RENAME COLUMN key TO akey;
|
||||
ALTER TABLE users_organizations RENAME COLUMN key TO akey;
|
||||
ALTER TABLE users_organizations RENAME COLUMN type TO atype;
|
|
@ -106,7 +106,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
|
||||
// Add extra fields if present
|
||||
if let Some(name) = data.Name {
|
||||
|
@ -204,7 +204,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||
}
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
|
@ -306,7 +306,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||
// Update user data
|
||||
let mut user = headers.user;
|
||||
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
|
||||
|
@ -377,7 +377,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||
user.email = data.NewEmail;
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.key = data.Key;
|
||||
user.akey = data.Key;
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
|
|
@ -267,7 +267,7 @@ pub fn update_cipher_from_data(
|
|||
err!("Attachment is not owned by the cipher")
|
||||
}
|
||||
|
||||
saved_att.key = Some(attachment.Key);
|
||||
saved_att.akey = Some(attachment.Key);
|
||||
saved_att.file_name = attachment.FileName;
|
||||
|
||||
saved_att.save(&conn)?;
|
||||
|
@ -691,7 +691,7 @@ fn post_attachment(
|
|||
};
|
||||
|
||||
let mut attachment = Attachment::new(file_name, cipher.uuid.clone(), name, size);
|
||||
attachment.key = attachment_key.clone();
|
||||
attachment.akey = attachment_key.clone();
|
||||
attachment.save(&conn).expect("Error saving attachment");
|
||||
}
|
||||
_ => error!("Invalid multipart name"),
|
||||
|
@ -899,7 +899,7 @@ fn delete_all(
|
|||
match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn) {
|
||||
None => err!("You don't have permission to purge the organization vault"),
|
||||
Some(user_org) => {
|
||||
if user_org.type_ == UserOrgType::Owner {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||
nt.send_user_update(UpdateType::Vault, &user);
|
||||
|
|
|
@ -63,7 +63,7 @@ fn put_device_token(uuid: String, data: JsonUpcase<Value>, headers: Headers) ->
|
|||
Ok(Json(json!({
|
||||
"Id": headers.device.uuid,
|
||||
"Name": headers.device.name,
|
||||
"Type": headers.device.type_,
|
||||
"Type": headers.device.atype,
|
||||
"Identifier": headers.device.uuid,
|
||||
"CreationDate": crate::util::format_date(&headers.device.created_at),
|
||||
})))
|
||||
|
|
|
@ -80,9 +80,9 @@ fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn
|
|||
let mut user_org = UserOrganization::new(headers.user.uuid.clone(), org.uuid.clone());
|
||||
let collection = Collection::new(org.uuid.clone(), data.CollectionName);
|
||||
|
||||
user_org.key = data.Key;
|
||||
user_org.akey = data.Key;
|
||||
user_org.access_all = true;
|
||||
user_org.type_ = UserOrgType::Owner as i32;
|
||||
user_org.atype = UserOrgType::Owner as i32;
|
||||
user_org.status = UserOrgStatus::Confirmed as i32;
|
||||
|
||||
org.save(&conn)?;
|
||||
|
@ -127,7 +127,7 @@ fn leave_organization(org_id: String, headers: Headers, conn: DbConn) -> EmptyRe
|
|||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
None => err!("User not part of organization"),
|
||||
Some(user_org) => {
|
||||
if user_org.type_ == UserOrgType::Owner {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
let num_owners =
|
||||
UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
|
@ -505,7 +505,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||
let mut new_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
let access_all = data.AccessAll.unwrap_or(false);
|
||||
new_user.access_all = access_all;
|
||||
new_user.type_ = new_type;
|
||||
new_user.atype = new_type;
|
||||
new_user.status = user_org_status;
|
||||
|
||||
// If no accessAll, add the collections received
|
||||
|
@ -657,7 +657,7 @@ fn confirm_invite(
|
|||
None => err!("The specified user isn't a member of the organization"),
|
||||
};
|
||||
|
||||
if user_to_confirm.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
if user_to_confirm.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
err!("Only Owners can confirm Managers, Admins or Owners")
|
||||
}
|
||||
|
||||
|
@ -666,7 +666,7 @@ fn confirm_invite(
|
|||
}
|
||||
|
||||
user_to_confirm.status = UserOrgStatus::Confirmed as i32;
|
||||
user_to_confirm.key = match data["Key"].as_str() {
|
||||
user_to_confirm.akey = match data["Key"].as_str() {
|
||||
Some(key) => key.to_string(),
|
||||
None => err!("Invalid key provided"),
|
||||
};
|
||||
|
@ -735,18 +735,18 @@ fn edit_user(
|
|||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
|
||||
if new_type != user_to_edit.type_
|
||||
&& (user_to_edit.type_ >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
|
||||
if new_type != user_to_edit.atype
|
||||
&& (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
|
||||
&& headers.org_user_type != UserOrgType::Owner
|
||||
{
|
||||
err!("Only Owners can grant and remove Admin or Owner privileges")
|
||||
}
|
||||
|
||||
if user_to_edit.type_ == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
|
||||
if user_to_edit.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
|
||||
err!("Only Owners can edit Owner users")
|
||||
}
|
||||
|
||||
if user_to_edit.type_ == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
|
@ -756,7 +756,7 @@ fn edit_user(
|
|||
}
|
||||
|
||||
user_to_edit.access_all = data.AccessAll;
|
||||
user_to_edit.type_ = new_type as i32;
|
||||
user_to_edit.atype = new_type as i32;
|
||||
|
||||
// Delete all the odd collections
|
||||
for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &user_to_edit.user_uuid, &conn) {
|
||||
|
@ -785,11 +785,11 @@ fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn:
|
|||
None => err!("User to delete isn't member of the organization"),
|
||||
};
|
||||
|
||||
if user_to_delete.type_ != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
|
||||
err!("Only Owners can delete Admins or Owners")
|
||||
}
|
||||
|
||||
if user_to_delete.type_ == UserOrgType::Owner {
|
||||
if user_to_delete.atype == UserOrgType::Owner {
|
||||
// Removing owner, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
|
@ -842,7 +842,7 @@ fn post_org_import(
|
|||
None => err!("User is not part of the organization"),
|
||||
};
|
||||
|
||||
if org_user.type_ < UserOrgType::Admin {
|
||||
if org_user.atype < UserOrgType::Admin {
|
||||
err!("Only admins or owners can import into an organization")
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
|||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
"refresh_token": device.refresh_token,
|
||||
"Key": user.key,
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
})))
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
|||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
"refresh_token": device.refresh_token,
|
||||
"Key": user.key,
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
//"TwoFactorToken": "11122233333444555666777888999"
|
||||
});
|
||||
|
@ -158,7 +158,7 @@ fn twofactor_auth(
|
|||
return Ok(None);
|
||||
}
|
||||
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.type_).collect();
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
||||
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
|
@ -166,7 +166,7 @@ fn twofactor_auth(
|
|||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?),
|
||||
};
|
||||
|
||||
let selected_twofactor = twofactors.into_iter().filter(|tf| tf.type_ == selected_id).nth(0);
|
||||
let selected_twofactor = twofactors.into_iter().filter(|tf| tf.atype == selected_id).nth(0);
|
||||
|
||||
use crate::api::core::two_factor as _tf;
|
||||
use crate::crypto::ct_eq;
|
||||
|
|
|
@ -286,7 +286,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
|||
device: headers.device,
|
||||
user,
|
||||
org_user_type: {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.type_) {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||
org_usr_type
|
||||
} else {
|
||||
// This should only happen if the DB is corrupted
|
||||
|
|
|
@ -202,7 +202,6 @@ make_config! {
|
|||
folders {
|
||||
/// Data folder |> Main data folder
|
||||
data_folder: String, false, def, "data".to_string();
|
||||
|
||||
/// Database URL
|
||||
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
||||
/// Icon chache folder
|
||||
|
|
|
@ -2,7 +2,6 @@ use std::ops::Deref;
|
|||
|
||||
use diesel::r2d2;
|
||||
use diesel::r2d2::ConnectionManager;
|
||||
use diesel::sqlite::SqliteConnection;
|
||||
use diesel::{Connection as DieselConnection, ConnectionError};
|
||||
|
||||
use rocket::http::Status;
|
||||
|
@ -17,16 +16,25 @@ use crate::error::Error;
|
|||
use crate::CONFIG;
|
||||
|
||||
/// An alias to the database connection used
|
||||
type Connection = SqliteConnection;
|
||||
#[cfg(feature = "sqlite")]
|
||||
type Connection = diesel::sqlite::SqliteConnection;
|
||||
#[cfg(feature = "mysql")]
|
||||
type Connection = diesel::mysql::MysqlConnection;
|
||||
|
||||
/// An alias to the type for a pool of Diesel SQLite connections.
|
||||
/// An alias to the type for a pool of Diesel connections.
|
||||
type Pool = r2d2::Pool<ConnectionManager<Connection>>;
|
||||
|
||||
/// Connection request guard type: a wrapper around an r2d2 pooled connection.
|
||||
pub struct DbConn(pub r2d2::PooledConnection<ConnectionManager<Connection>>);
|
||||
|
||||
pub mod models;
|
||||
#[cfg(feature = "sqlite")]
|
||||
#[path = "schemas/sqlite/schema.rs"]
|
||||
pub mod schema;
|
||||
#[cfg(feature = "mysql")]
|
||||
#[path = "schemas/mysql/schema.rs"]
|
||||
pub mod schema;
|
||||
|
||||
|
||||
/// Initializes a database pool.
|
||||
pub fn init_pool() -> Pool {
|
||||
|
@ -36,7 +44,9 @@ pub fn init_pool() -> Pool {
|
|||
}
|
||||
|
||||
pub fn get_connection() -> Result<Connection, ConnectionError> {
|
||||
Connection::establish(&CONFIG.database_url())
|
||||
let url = CONFIG.database_url();
|
||||
println!("{}", url.to_string());
|
||||
Connection::establish(&url)
|
||||
}
|
||||
|
||||
/// Creates a back-up of the database using sqlite3
|
||||
|
|
|
@ -12,7 +12,7 @@ pub struct Attachment {
|
|||
pub cipher_uuid: String,
|
||||
pub file_name: String,
|
||||
pub file_size: i32,
|
||||
pub key: Option<String>,
|
||||
pub akey: Option<String>,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
|
@ -23,7 +23,7 @@ impl Attachment {
|
|||
cipher_uuid,
|
||||
file_name,
|
||||
file_size,
|
||||
key: None,
|
||||
akey: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ impl Attachment {
|
|||
"FileName": self.file_name,
|
||||
"Size": self.file_size.to_string(),
|
||||
"SizeName": display_size,
|
||||
"Key": self.key,
|
||||
"Key": self.akey,
|
||||
"Object": "attachment"
|
||||
})
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ pub struct Cipher {
|
|||
Card = 3,
|
||||
Identity = 4
|
||||
*/
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
pub name: String,
|
||||
pub notes: Option<String>,
|
||||
pub fields: Option<String>,
|
||||
|
@ -37,7 +37,7 @@ pub struct Cipher {
|
|||
|
||||
/// Local methods
|
||||
impl Cipher {
|
||||
pub fn new(type_: i32, name: String) -> Self {
|
||||
pub fn new(atype: i32, name: String) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
|
@ -48,7 +48,7 @@ impl Cipher {
|
|||
user_uuid: None,
|
||||
organization_uuid: None,
|
||||
|
||||
type_,
|
||||
atype,
|
||||
favorite: false,
|
||||
name,
|
||||
|
||||
|
@ -94,7 +94,7 @@ impl Cipher {
|
|||
// TODO: ******* Backwards compat start **********
|
||||
// To remove backwards compatibility, just remove this entire section
|
||||
// and remove the compat code from ciphers::update_cipher_from_data
|
||||
if self.type_ == 1 && data_json["Uris"].is_array() {
|
||||
if self.atype == 1 && data_json["Uris"].is_array() {
|
||||
let uri = data_json["Uris"][0]["Uri"].clone();
|
||||
data_json["Uri"] = uri;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ impl Cipher {
|
|||
|
||||
let mut json_object = json!({
|
||||
"Id": self.uuid,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"FolderId": self.get_folder_uuid(&user_uuid, &conn),
|
||||
"Favorite": self.favorite,
|
||||
|
@ -123,7 +123,7 @@ impl Cipher {
|
|||
"PasswordHistory": password_history_json,
|
||||
});
|
||||
|
||||
let key = match self.type_ {
|
||||
let key = match self.atype {
|
||||
1 => "Login",
|
||||
2 => "SecureNote",
|
||||
3 => "Card",
|
||||
|
@ -237,7 +237,7 @@ impl Cipher {
|
|||
// Cipher owner
|
||||
users_organizations::access_all.eq(true).or(
|
||||
// access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32).or(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32).or(
|
||||
// Org admin or owner
|
||||
users_collections::user_uuid.eq(user_uuid).and(
|
||||
users_collections::read_only.eq(false), //R/W access to collection
|
||||
|
@ -268,7 +268,7 @@ impl Cipher {
|
|||
// Cipher owner
|
||||
users_organizations::access_all.eq(true).or(
|
||||
// access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32).or(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32).or(
|
||||
// Org admin or owner
|
||||
users_collections::user_uuid.eq(user_uuid), // Access to Collection
|
||||
),
|
||||
|
@ -315,7 +315,7 @@ impl Cipher {
|
|||
))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid).or( // Cipher owner
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32).or( // Org admin or owner
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32).or( // Org admin or owner
|
||||
users_collections::user_uuid.eq(user_uuid).and( // Access to Collection
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
|
@ -365,7 +365,7 @@ impl Cipher {
|
|||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
|
||||
users_organizations::access_all.eq(true).or( // User has access all
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32) // User is admin or owner
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
|
||||
)
|
||||
))
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
|
|
|
@ -146,7 +146,7 @@ impl Collection {
|
|||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::type_.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)
|
||||
)
|
||||
).select(collections::all_columns)
|
||||
|
|
|
@ -15,7 +15,7 @@ pub struct Device {
|
|||
|
||||
pub name: String,
|
||||
/// https://github.com/bitwarden/core/tree/master/src/Core/Enums
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
pub push_token: Option<String>,
|
||||
|
||||
pub refresh_token: String,
|
||||
|
@ -25,7 +25,7 @@ pub struct Device {
|
|||
|
||||
/// Local methods
|
||||
impl Device {
|
||||
pub fn new(uuid: String, user_uuid: String, name: String, type_: i32) -> Self {
|
||||
pub fn new(uuid: String, user_uuid: String, name: String, atype: i32) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
|
@ -35,7 +35,7 @@ impl Device {
|
|||
|
||||
user_uuid,
|
||||
name,
|
||||
type_,
|
||||
atype,
|
||||
|
||||
push_token: None,
|
||||
refresh_token: String::new(),
|
||||
|
@ -70,10 +70,10 @@ impl Device {
|
|||
let time_now = Utc::now().naive_utc();
|
||||
self.updated_at = time_now;
|
||||
|
||||
let orgowner: Vec<_> = orgs.iter().filter(|o| o.type_ == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgadmin: Vec<_> = orgs.iter().filter(|o| o.type_ == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
let orguser: Vec<_> = orgs.iter().filter(|o| o.type_ == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.type_ == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
|
||||
|
||||
// Create the JWT claims struct, to send to the client
|
||||
|
|
|
@ -21,9 +21,9 @@ pub struct UserOrganization {
|
|||
pub org_uuid: String,
|
||||
|
||||
pub access_all: bool,
|
||||
pub key: String,
|
||||
pub akey: String,
|
||||
pub status: i32,
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
}
|
||||
|
||||
pub enum UserOrgStatus {
|
||||
|
@ -196,9 +196,9 @@ impl UserOrganization {
|
|||
org_uuid,
|
||||
|
||||
access_all: false,
|
||||
key: String::new(),
|
||||
akey: String::new(),
|
||||
status: UserOrgStatus::Accepted as i32,
|
||||
type_: UserOrgType::User as i32,
|
||||
atype: UserOrgType::User as i32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -266,9 +266,9 @@ impl UserOrganization {
|
|||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
|
||||
// These are per user
|
||||
"Key": self.key,
|
||||
"Key": self.akey,
|
||||
"Status": self.status,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"Enabled": true,
|
||||
|
||||
"Object": "profileOrganization",
|
||||
|
@ -285,7 +285,7 @@ impl UserOrganization {
|
|||
"Email": user.email,
|
||||
|
||||
"Status": self.status,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
|
||||
"Object": "organizationUserUserDetails",
|
||||
|
@ -315,7 +315,7 @@ impl UserOrganization {
|
|||
"UserId": self.user_uuid,
|
||||
|
||||
"Status": self.status,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
"Collections": coll_uuids,
|
||||
|
||||
|
@ -357,7 +357,7 @@ impl UserOrganization {
|
|||
}
|
||||
|
||||
pub fn has_full_access(self) -> bool {
|
||||
self.access_all || self.type_ >= UserOrgType::Admin
|
||||
self.access_all || self.atype >= UserOrgType::Admin
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
|
@ -405,10 +405,10 @@ impl UserOrganization {
|
|||
.expect("Error loading user organizations")
|
||||
}
|
||||
|
||||
pub fn find_by_org_and_type(org_uuid: &str, type_: i32, conn: &DbConn) -> Vec<Self> {
|
||||
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Vec<Self> {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.filter(users_organizations::type_.eq(type_))
|
||||
.filter(users_organizations::atype.eq(atype))
|
||||
.load::<Self>(&**conn)
|
||||
.expect("Error loading user organizations")
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ use super::User;
|
|||
pub struct TwoFactor {
|
||||
pub uuid: String,
|
||||
pub user_uuid: String,
|
||||
pub type_: i32,
|
||||
pub atype: i32,
|
||||
pub enabled: bool,
|
||||
pub data: String,
|
||||
}
|
||||
|
@ -32,11 +32,11 @@ pub enum TwoFactorType {
|
|||
|
||||
/// Local methods
|
||||
impl TwoFactor {
|
||||
pub fn new(user_uuid: String, type_: TwoFactorType, data: String) -> Self {
|
||||
pub fn new(user_uuid: String, atype: TwoFactorType, data: String) -> Self {
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
user_uuid,
|
||||
type_: type_ as i32,
|
||||
atype: atype as i32,
|
||||
enabled: true,
|
||||
data,
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ impl TwoFactor {
|
|||
pub fn to_json_list(&self) -> Value {
|
||||
json!({
|
||||
"Enabled": self.enabled,
|
||||
"Type": self.type_,
|
||||
"Type": self.atype,
|
||||
"Object": "twoFactorProvider"
|
||||
})
|
||||
}
|
||||
|
@ -85,15 +85,15 @@ impl TwoFactor {
|
|||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
.filter(twofactor::type_.lt(1000)) // Filter implementation types
|
||||
.filter(twofactor::atype.lt(1000)) // Filter implementation types
|
||||
.load::<Self>(&**conn)
|
||||
.expect("Error loading twofactor")
|
||||
}
|
||||
|
||||
pub fn find_by_user_and_type(user_uuid: &str, type_: i32, conn: &DbConn) -> Option<Self> {
|
||||
pub fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
.filter(twofactor::type_.eq(type_))
|
||||
.filter(twofactor::atype.eq(atype))
|
||||
.first::<Self>(&**conn)
|
||||
.ok()
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ pub struct User {
|
|||
pub password_iterations: i32,
|
||||
pub password_hint: Option<String>,
|
||||
|
||||
pub key: String,
|
||||
pub akey: String,
|
||||
pub private_key: Option<String>,
|
||||
pub public_key: Option<String>,
|
||||
|
||||
|
@ -58,7 +58,7 @@ impl User {
|
|||
updated_at: now,
|
||||
name: email.clone(),
|
||||
email,
|
||||
key: String::new(),
|
||||
akey: String::new(),
|
||||
|
||||
password_hash: Vec::new(),
|
||||
salt: crypto::get_random_64(),
|
||||
|
@ -140,7 +140,7 @@ impl User {
|
|||
"MasterPasswordHint": self.password_hint,
|
||||
"Culture": "en-US",
|
||||
"TwoFactorEnabled": twofactor_enabled,
|
||||
"Key": self.key,
|
||||
"Key": self.akey,
|
||||
"PrivateKey": self.private_key,
|
||||
"SecurityStamp": self.security_stamp,
|
||||
"Organizations": orgs_json,
|
||||
|
@ -163,7 +163,7 @@ impl User {
|
|||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in UserOrganization::find_by_user(&self.uuid, &*conn) {
|
||||
if user_org.type_ == UserOrgType::Owner {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
let owner_type = UserOrgType::Owner as i32;
|
||||
if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, &conn).len() <= 1 {
|
||||
err!("Can't delete last owner")
|
||||
|
|
172
src/db/schemas/mysql/schema.rs
Normale Datei
172
src/db/schemas/mysql/schema.rs
Normale Datei
|
@ -0,0 +1,172 @@
|
|||
table! {
|
||||
attachments (id) {
|
||||
id -> Varchar,
|
||||
cipher_uuid -> Varchar,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
ciphers (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
user_uuid -> Nullable<Varchar>,
|
||||
organization_uuid -> Nullable<Varchar>,
|
||||
atype -> Integer,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
fields -> Nullable<Text>,
|
||||
data -> Text,
|
||||
favorite -> Bool,
|
||||
password_history -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
ciphers_collections (cipher_uuid, collection_uuid) {
|
||||
cipher_uuid -> Varchar,
|
||||
collection_uuid -> Varchar,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
collections (uuid) {
|
||||
uuid -> Varchar,
|
||||
org_uuid -> Varchar,
|
||||
name -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
devices (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
user_uuid -> Varchar,
|
||||
name -> Text,
|
||||
atype -> Integer,
|
||||
push_token -> Nullable<Text>,
|
||||
refresh_token -> Text,
|
||||
twofactor_remember -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
folders (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
user_uuid -> Varchar,
|
||||
name -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
folders_ciphers (cipher_uuid, folder_uuid) {
|
||||
cipher_uuid -> Varchar,
|
||||
folder_uuid -> Varchar,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
invitations (email) {
|
||||
email -> Varchar,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
organizations (uuid) {
|
||||
uuid -> Varchar,
|
||||
name -> Text,
|
||||
billing_email -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
twofactor (uuid) {
|
||||
uuid -> Varchar,
|
||||
user_uuid -> Varchar,
|
||||
atype -> Integer,
|
||||
enabled -> Bool,
|
||||
data -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users (uuid) {
|
||||
uuid -> Varchar,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
email -> Varchar,
|
||||
name -> Text,
|
||||
password_hash -> Blob,
|
||||
salt -> Blob,
|
||||
password_iterations -> Integer,
|
||||
password_hint -> Nullable<Text>,
|
||||
akey -> Text,
|
||||
private_key -> Nullable<Text>,
|
||||
public_key -> Nullable<Text>,
|
||||
totp_secret -> Nullable<Text>,
|
||||
totp_recover -> Nullable<Text>,
|
||||
security_stamp -> Text,
|
||||
equivalent_domains -> Text,
|
||||
excluded_globals -> Text,
|
||||
client_kdf_type -> Integer,
|
||||
client_kdf_iter -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users_collections (user_uuid, collection_uuid) {
|
||||
user_uuid -> Varchar,
|
||||
collection_uuid -> Varchar,
|
||||
read_only -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
users_organizations (uuid) {
|
||||
uuid -> Varchar,
|
||||
user_uuid -> Varchar,
|
||||
org_uuid -> Varchar,
|
||||
access_all -> Bool,
|
||||
akey -> Text,
|
||||
status -> Integer,
|
||||
atype -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
joinable!(attachments -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers -> organizations (organization_uuid));
|
||||
joinable!(ciphers -> users (user_uuid));
|
||||
joinable!(ciphers_collections -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers_collections -> collections (collection_uuid));
|
||||
joinable!(collections -> organizations (org_uuid));
|
||||
joinable!(devices -> users (user_uuid));
|
||||
joinable!(folders -> users (user_uuid));
|
||||
joinable!(folders_ciphers -> ciphers (cipher_uuid));
|
||||
joinable!(folders_ciphers -> folders (folder_uuid));
|
||||
joinable!(twofactor -> users (user_uuid));
|
||||
joinable!(users_collections -> collections (collection_uuid));
|
||||
joinable!(users_collections -> users (user_uuid));
|
||||
joinable!(users_organizations -> organizations (org_uuid));
|
||||
joinable!(users_organizations -> users (user_uuid));
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
attachments,
|
||||
ciphers,
|
||||
ciphers_collections,
|
||||
collections,
|
||||
devices,
|
||||
folders,
|
||||
folders_ciphers,
|
||||
invitations,
|
||||
organizations,
|
||||
twofactor,
|
||||
users,
|
||||
users_collections,
|
||||
users_organizations,
|
||||
);
|
|
@ -4,7 +4,7 @@ table! {
|
|||
cipher_uuid -> Text,
|
||||
file_name -> Text,
|
||||
file_size -> Integer,
|
||||
key -> Nullable<Text>,
|
||||
akey -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,8 +15,7 @@ table! {
|
|||
updated_at -> Timestamp,
|
||||
user_uuid -> Nullable<Text>,
|
||||
organization_uuid -> Nullable<Text>,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
name -> Text,
|
||||
notes -> Nullable<Text>,
|
||||
fields -> Nullable<Text>,
|
||||
|
@ -48,8 +47,7 @@ table! {
|
|||
updated_at -> Timestamp,
|
||||
user_uuid -> Text,
|
||||
name -> Text,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
push_token -> Nullable<Text>,
|
||||
refresh_token -> Text,
|
||||
twofactor_remember -> Nullable<Text>,
|
||||
|
@ -91,8 +89,7 @@ table! {
|
|||
twofactor (uuid) {
|
||||
uuid -> Text,
|
||||
user_uuid -> Text,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
enabled -> Bool,
|
||||
data -> Text,
|
||||
}
|
||||
|
@ -109,7 +106,7 @@ table! {
|
|||
salt -> Binary,
|
||||
password_iterations -> Integer,
|
||||
password_hint -> Nullable<Text>,
|
||||
key -> Text,
|
||||
akey -> Text,
|
||||
private_key -> Nullable<Text>,
|
||||
public_key -> Nullable<Text>,
|
||||
totp_secret -> Nullable<Text>,
|
||||
|
@ -136,10 +133,9 @@ table! {
|
|||
user_uuid -> Text,
|
||||
org_uuid -> Text,
|
||||
access_all -> Bool,
|
||||
key -> Text,
|
||||
akey -> Text,
|
||||
status -> Integer,
|
||||
#[sql_name = "type"]
|
||||
type_ -> Integer,
|
||||
atype -> Integer,
|
||||
}
|
||||
}
|
||||
|
46
src/main.rs
46
src/main.rs
|
@ -45,6 +45,12 @@ fn main() {
|
|||
init_logging().ok();
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
||||
compile_error!("Can't enable both backends");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql")))]
|
||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||
|
||||
check_db();
|
||||
check_rsa_keys();
|
||||
check_web_vault();
|
||||
|
@ -123,24 +129,28 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
|||
|
||||
fn check_db() {
|
||||
let url = CONFIG.database_url();
|
||||
let path = Path::new(&url);
|
||||
if cfg!(feature = "sqlite") {
|
||||
let path = Path::new(&url);
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
use std::fs;
|
||||
if fs::create_dir_all(parent).is_err() {
|
||||
error!("Error creating database directory");
|
||||
exit(1);
|
||||
if let Some(parent) = path.parent() {
|
||||
use std::fs;
|
||||
if fs::create_dir_all(parent).is_err() {
|
||||
error!("Error creating database directory");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Turn on WAL in SQLite
|
||||
if CONFIG.enable_db_wal() {
|
||||
use diesel::RunQueryDsl;
|
||||
let connection = db::get_connection().expect("Can't conect to DB");
|
||||
diesel::sql_query("PRAGMA journal_mode=wal")
|
||||
.execute(&connection)
|
||||
.expect("Failed to turn on WAL");
|
||||
}
|
||||
}
|
||||
|
||||
// Turn on WAL in SQLite
|
||||
if CONFIG.enable_db_wal() {
|
||||
use diesel::RunQueryDsl;
|
||||
let connection = db::get_connection().expect("Can't conect to DB");
|
||||
diesel::sql_query("PRAGMA journal_mode=wal")
|
||||
.execute(&connection)
|
||||
.expect("Failed to turn on WAL");
|
||||
}
|
||||
println!("{}", url.to_string());
|
||||
db::get_connection().expect("Can't connect to DB");
|
||||
}
|
||||
|
||||
fn check_rsa_keys() {
|
||||
|
@ -207,7 +217,11 @@ fn check_web_vault() {
|
|||
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
|
||||
#[allow(unused_imports)]
|
||||
mod migrations {
|
||||
embed_migrations!();
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
embed_migrations!("migrations/sqlite");
|
||||
#[cfg(feature = "mysql")]
|
||||
embed_migrations!("migrations/mysql");
|
||||
|
||||
pub fn run_migrations() {
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
|
|
Laden …
In neuem Issue referenzieren