2018-12-07 02:05:45 +01:00
#![ feature(proc_macro_hygiene, decl_macro, vec_remove_item, try_trait) ]
2019-02-03 00:22:18 +01:00
#![ recursion_limit = " 256 " ]
2018-10-10 20:40:39 +02:00
2018-12-30 23:34:31 +01:00
#[ macro_use ]
extern crate rocket ;
#[ macro_use ]
extern crate serde_derive ;
#[ macro_use ]
extern crate serde_json ;
#[ macro_use ]
extern crate log ;
#[ macro_use ]
extern crate diesel ;
#[ macro_use ]
extern crate diesel_migrations ;
#[ macro_use ]
extern crate lazy_static ;
#[ macro_use ]
extern crate derive_more ;
#[ macro_use ]
extern crate num_derive ;
2019-01-11 14:18:13 +01:00
use rocket ::{ fairing ::AdHoc , Rocket } ;
2019-01-13 01:39:29 +01:00
2018-12-30 23:34:31 +01:00
use std ::{
path ::Path ,
process ::{ exit , Command } ,
} ;
2018-02-10 01:00:55 +01:00
2018-12-30 23:34:31 +01:00
#[ macro_use ]
mod error ;
2018-02-10 01:00:55 +01:00
mod api ;
mod auth ;
2019-01-25 18:23:51 +01:00
mod config ;
2018-12-30 23:34:31 +01:00
mod crypto ;
mod db ;
2018-08-15 08:32:19 +02:00
mod mail ;
2018-12-30 23:34:31 +01:00
mod util ;
2018-02-10 01:00:55 +01:00
2019-01-25 18:23:51 +01:00
pub use config ::CONFIG ;
2019-02-14 02:03:37 +01:00
pub use error ::{ Error , MapResult } ;
2019-01-25 18:23:51 +01:00
2019-02-13 00:03:16 +01:00
fn launch_rocket ( ) {
// Create Rocket object, this stores current log level and sets it's own
let rocket = rocket ::ignite ( ) ;
// If we aren't logging the mounts, we force the logging level down
if ! CONFIG . log_mounts ( ) {
log ::set_max_level ( log ::LevelFilter ::Warn ) ;
}
let rocket = rocket
2018-02-10 01:00:55 +01:00
. mount ( " / " , api ::web_routes ( ) )
. mount ( " /api " , api ::core_routes ( ) )
2018-12-23 22:37:02 +01:00
. mount ( " /admin " , api ::admin_routes ( ) )
2018-02-10 01:00:55 +01:00
. mount ( " /identity " , api ::identity_routes ( ) )
. mount ( " /icons " , api ::icons_routes ( ) )
2019-02-13 00:03:16 +01:00
. mount ( " /notifications " , api ::notifications_routes ( ) ) ;
// Force the level up for the fairings, managed state and lauch
if ! CONFIG . log_mounts ( ) {
log ::set_max_level ( log ::LevelFilter ::max ( ) ) ;
}
let rocket = rocket
2018-02-10 01:00:55 +01:00
. manage ( db ::init_pool ( ) )
2018-08-30 17:43:46 +02:00
. manage ( api ::start_notification_server ( ) )
2018-12-23 22:37:02 +01:00
. attach ( util ::AppHeaders ( ) )
2019-02-13 00:03:16 +01:00
. attach ( AdHoc ::on_launch ( " Launch Info " , launch_info ) ) ;
// Launch and print error if there is one
// The launch will restore the original logging level
error! ( " Launch error {:#?} " , rocket . launch ( ) ) ;
2018-02-10 01:00:55 +01:00
}
// Embed the migrations from the migrations folder into the application
// This way, the program automatically migrates the database to the latest version
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
2018-06-12 17:24:29 +02:00
#[ allow(unused_imports) ]
mod migrations {
embed_migrations! ( ) ;
pub fn run_migrations ( ) {
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
2018-12-07 02:05:45 +01:00
let connection = crate ::db ::get_connection ( ) . expect ( " Can't conect to DB " ) ;
2018-06-12 17:24:29 +02:00
use std ::io ::stdout ;
embedded_migrations ::run_with_output ( & connection , & mut stdout ( ) ) . expect ( " Can't run migrations " ) ;
}
}
2018-02-10 01:00:55 +01:00
fn main ( ) {
2019-01-25 18:23:51 +01:00
if CONFIG . extended_logging ( ) {
2018-12-06 20:35:25 +01:00
init_logging ( ) . ok ( ) ;
}
2018-05-12 22:55:18 +02:00
check_db ( ) ;
check_rsa_keys ( ) ;
2018-09-13 20:59:51 +02:00
check_web_vault ( ) ;
2018-08-30 17:43:46 +02:00
migrations ::run_migrations ( ) ;
2018-02-10 01:00:55 +01:00
2019-02-13 00:03:16 +01:00
launch_rocket ( ) ;
2018-02-10 01:00:55 +01:00
}
2018-12-06 20:35:25 +01:00
fn init_logging ( ) -> Result < ( ) , fern ::InitError > {
let mut logger = fern ::Dispatch ::new ( )
2018-12-30 23:34:31 +01:00
. format ( | out , message , record | {
out . finish ( format_args! (
" {}[{}][{}] {} " ,
chrono ::Local ::now ( ) . format ( " [%Y-%m-%d %H:%M:%S] " ) ,
record . target ( ) ,
record . level ( ) ,
message
) )
} )
. level ( log ::LevelFilter ::Debug )
. level_for ( " hyper " , log ::LevelFilter ::Warn )
2019-01-12 15:23:46 +01:00
. level_for ( " rustls " , log ::LevelFilter ::Warn )
2019-01-19 21:36:34 +01:00
. level_for ( " handlebars " , log ::LevelFilter ::Warn )
2018-12-30 23:34:31 +01:00
. level_for ( " ws " , log ::LevelFilter ::Info )
. level_for ( " multipart " , log ::LevelFilter ::Info )
2019-01-27 15:39:19 +01:00
. level_for ( " html5ever " , log ::LevelFilter ::Info )
2018-12-30 23:34:31 +01:00
. chain ( std ::io ::stdout ( ) ) ;
2018-12-06 20:35:25 +01:00
2019-01-25 18:23:51 +01:00
if let Some ( log_file ) = CONFIG . log_file ( ) {
2018-12-06 20:35:25 +01:00
logger = logger . chain ( fern ::log_file ( log_file ) ? ) ;
}
logger = chain_syslog ( logger ) ;
logger . apply ( ) ? ;
Ok ( ( ) )
}
#[ cfg(not(feature = " enable_syslog " )) ]
2018-12-30 23:34:31 +01:00
fn chain_syslog ( logger : fern ::Dispatch ) -> fern ::Dispatch {
logger
}
2018-12-06 20:35:25 +01:00
#[ cfg(feature = " enable_syslog " ) ]
fn chain_syslog ( logger : fern ::Dispatch ) -> fern ::Dispatch {
let syslog_fmt = syslog ::Formatter3164 {
facility : syslog ::Facility ::LOG_USER ,
hostname : None ,
process : " bitwarden_rs " . into ( ) ,
pid : 0 ,
} ;
match syslog ::unix ( syslog_fmt ) {
Ok ( sl ) = > logger . chain ( sl ) ,
Err ( e ) = > {
error! ( " Unable to connect to syslog: {:?} " , e ) ;
logger
}
}
}
2018-05-12 22:55:18 +02:00
fn check_db ( ) {
2019-01-25 18:23:51 +01:00
let url = CONFIG . database_url ( ) ;
let path = Path ::new ( & url ) ;
2018-05-12 22:55:18 +02:00
if let Some ( parent ) = path . parent ( ) {
use std ::fs ;
if fs ::create_dir_all ( parent ) . is_err ( ) {
2018-12-06 20:35:25 +01:00
error! ( " Error creating database directory " ) ;
2018-05-12 22:55:18 +02:00
exit ( 1 ) ;
}
}
2018-07-31 16:07:17 +02:00
// Turn on WAL in SQLite
2019-02-18 11:48:48 +01:00
if CONFIG . enable_db_wal ( ) {
use diesel ::RunQueryDsl ;
let connection = db ::get_connection ( ) . expect ( " Can't conect to DB " ) ;
diesel ::sql_query ( " PRAGMA journal_mode=wal " )
. execute ( & connection )
. expect ( " Failed to turn on WAL " ) ;
}
2018-05-12 22:55:18 +02:00
}
2018-02-17 01:13:02 +01:00
fn check_rsa_keys ( ) {
// If the RSA keys don't exist, try to create them
2019-01-25 18:23:51 +01:00
if ! util ::file_exists ( & CONFIG . private_rsa_key ( ) ) | | ! util ::file_exists ( & CONFIG . public_rsa_key ( ) ) {
2018-12-06 20:35:25 +01:00
info! ( " JWT keys don't exist, checking if OpenSSL is available... " ) ;
2018-02-17 01:13:02 +01:00
2018-12-30 23:34:31 +01:00
Command ::new ( " openssl " ) . arg ( " version " ) . output ( ) . unwrap_or_else ( | _ | {
2018-12-06 20:35:25 +01:00
info! ( " Can't create keys because OpenSSL is not available, make sure it's installed and available on the PATH " ) ;
2018-02-17 01:13:02 +01:00
exit ( 1 ) ;
} ) ;
2018-12-06 20:35:25 +01:00
info! ( " OpenSSL detected, creating keys... " ) ;
2018-02-17 01:13:02 +01:00
2018-12-30 23:34:31 +01:00
let mut success = Command ::new ( " openssl " )
. arg ( " genrsa " )
. arg ( " -out " )
2019-01-25 18:23:51 +01:00
. arg ( & CONFIG . private_rsa_key_pem ( ) )
2018-12-30 23:34:31 +01:00
. output ( )
. expect ( " Failed to create private pem file " )
. status
. success ( ) ;
success & = Command ::new ( " openssl " )
. arg ( " rsa " )
. arg ( " -in " )
2019-01-25 18:23:51 +01:00
. arg ( & CONFIG . private_rsa_key_pem ( ) )
2018-12-30 23:34:31 +01:00
. arg ( " -outform " )
. arg ( " DER " )
. arg ( " -out " )
2019-01-25 18:23:51 +01:00
. arg ( & CONFIG . private_rsa_key ( ) )
2018-12-30 23:34:31 +01:00
. output ( )
. expect ( " Failed to create private der file " )
. status
. success ( ) ;
success & = Command ::new ( " openssl " )
. arg ( " rsa " )
. arg ( " -in " )
2019-01-25 18:23:51 +01:00
. arg ( & CONFIG . private_rsa_key ( ) )
2018-12-30 23:34:31 +01:00
. arg ( " -inform " )
. arg ( " DER " )
2018-02-17 01:13:02 +01:00
. arg ( " -RSAPublicKey_out " )
2018-12-30 23:34:31 +01:00
. arg ( " -outform " )
. arg ( " DER " )
. arg ( " -out " )
2019-01-25 18:23:51 +01:00
. arg ( & CONFIG . public_rsa_key ( ) )
2018-12-30 23:34:31 +01:00
. output ( )
. expect ( " Failed to create public der file " )
. status
. success ( ) ;
2018-02-17 01:13:02 +01:00
if success {
2018-12-06 20:35:25 +01:00
info! ( " Keys created correctly. " ) ;
2018-02-17 01:13:02 +01:00
} else {
2018-12-06 20:35:25 +01:00
error! ( " Error creating keys, exiting... " ) ;
2018-02-17 01:13:02 +01:00
exit ( 1 ) ;
}
}
}
2018-04-24 22:38:23 +02:00
fn check_web_vault ( ) {
2019-01-25 18:23:51 +01:00
if ! CONFIG . web_vault_enabled ( ) {
2018-06-12 21:09:42 +02:00
return ;
}
2019-01-25 18:23:51 +01:00
let index_path = Path ::new ( & CONFIG . web_vault_folder ( ) ) . join ( " index.html " ) ;
2018-04-24 22:38:23 +02:00
if ! index_path . exists ( ) {
2019-01-29 21:44:46 +01:00
error! ( " Web vault is not found. To install it, please follow the steps in https://github.com/dani-garcia/bitwarden_rs/wiki/Building-binary#install-the-web-vault " ) ;
2018-04-24 22:38:23 +02:00
exit ( 1 ) ;
}
}
2019-02-12 22:47:00 +01:00
fn launch_info ( _ : & Rocket ) {
// Remove the target to keep the message more centered
macro_rules ! w { ( $l :literal $(, $e :expr ) * ) = > { warn! ( target : " " , $l , $( $e ) , * ) } }
w! ( " /-------------------------------------------------------------------- \\ " ) ;
w! ( " | Starting Bitwarden_RS | " ) ;
if let Some ( version ) = option_env! ( " GIT_VERSION " ) {
w! ( " |{:^68}| " , format! ( " Version {} " , version ) ) ;
}
w! ( " |--------------------------------------------------------------------| " ) ;
w! ( " | This is an *unofficial* Bitwarden implementation, DO NOT use the | " ) ;
w! ( " | official channels to report bugs/features, regardless of client. | " ) ;
w! ( " | Report URL: https://github.com/dani-garcia/bitwarden_rs/issues/new | " ) ;
w! ( " \\ --------------------------------------------------------------------/ " ) ;
2019-01-11 14:18:13 +01:00
}