From 55d7c48b1da23770d8152724ad8b6c733739e64d Mon Sep 17 00:00:00 2001 From: BlackDex Date: Sun, 10 Jul 2022 16:39:38 +0200 Subject: [PATCH] Add more clippy checks for better code/readability A bit inspired by @paolobarbolini from this commit at lettre https://github.com/lettre/lettre/pull/784 . I added a few more clippy lints here, and fixed the resulted issues. Overall i think this could help in preventing future issues, and maybe even peformance problems. It also makes some code a bit more clear. We could always add more if we want to, i left a few out which i think arn't that huge of an issue. Some like the `unused_async` are nice, which resulted in a few `async` removals. Some others are maybe a bit more estatic, like `string_to_string`, but i think it looks better to use `clone` in those cases instead of `to_string` while they already are a string. --- src/api/admin.rs | 2 +- src/api/core/ciphers.rs | 12 +++---- src/api/core/emergency_access.rs | 4 +-- src/api/core/sends.rs | 8 ++--- src/api/core/two_factor/authenticator.rs | 6 ++-- src/api/core/two_factor/yubikey.rs | 2 +- src/api/icons.rs | 45 ++++++++++++------------ src/db/models/device.rs | 10 +++--- src/db/models/send.rs | 2 +- src/db/models/user.rs | 2 +- src/main.rs | 30 ++++++++++++++-- src/util.rs | 14 ++------ 12 files changed, 77 insertions(+), 60 deletions(-) diff --git a/src/api/admin.rs b/src/api/admin.rs index 2565445a..19638ef3 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -427,7 +427,7 @@ async fn update_user_org_type(data: Json, _token: AdminToken, c } } - user_to_edit.atype = new_type as i32; + user_to_edit.atype = new_type; user_to_edit.save(&conn).await } diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index d70dd906..f8c4216d 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -913,8 +913,8 @@ async fn save_attachment( // In the v2 API, the attachment record has already been created, // so the size limit needs to be adjusted to account for that. let size_adjust = match &attachment { - None => 0, // Legacy API - Some(a) => a.file_size as i64, // v2 API + None => 0, // Legacy API + Some(a) => i64::from(a.file_size), // v2 API }; let size_limit = if let Some(ref user_uuid) = cipher.user_uuid { @@ -1494,7 +1494,7 @@ pub enum CipherSyncType { impl CipherSyncData { pub async fn new(user_uuid: &str, ciphers: &Vec, sync_type: CipherSyncType, conn: &DbConn) -> Self { // Generate a list of Cipher UUID's to be used during a query filter with an eq_any. - let cipher_uuids = stream::iter(ciphers).map(|c| c.uuid.to_string()).collect::>().await; + let cipher_uuids = stream::iter(ciphers).map(|c| c.uuid.clone()).collect::>().await; let mut cipher_folders: HashMap = HashMap::new(); let mut cipher_favorites: HashSet = HashSet::new(); @@ -1516,7 +1516,7 @@ impl CipherSyncData { // Generate a list of Cipher UUID's containing a Vec with one or more Attachment records let mut cipher_attachments: HashMap> = HashMap::new(); for attachment in Attachment::find_all_by_ciphers(&cipher_uuids, conn).await { - cipher_attachments.entry(attachment.cipher_uuid.to_string()).or_default().push(attachment); + cipher_attachments.entry(attachment.cipher_uuid.clone()).or_default().push(attachment); } // Generate a HashMap with the Cipher UUID as key and one or more Collection UUID's @@ -1528,14 +1528,14 @@ impl CipherSyncData { // Generate a HashMap with the Organization UUID as key and the UserOrganization record let user_organizations: HashMap = stream::iter(UserOrganization::find_by_user(user_uuid, conn).await) - .map(|uo| (uo.org_uuid.to_string(), uo)) + .map(|uo| (uo.org_uuid.clone(), uo)) .collect() .await; // Generate a HashMap with the User_Collections UUID as key and the CollectionUser record let user_collections: HashMap = stream::iter(CollectionUser::find_by_user(user_uuid, conn).await) - .map(|uc| (uc.collection_uuid.to_string(), uc)) + .map(|uc| (uc.collection_uuid.clone(), uc)) .collect() .await; diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs index 3c4b83c3..d01b599b 100644 --- a/src/api/core/emergency_access.rs +++ b/src/api/core/emergency_access.rs @@ -761,7 +761,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) { for mut emer in emergency_access_list { if emer.recovery_initiated_at.is_some() && Utc::now().naive_utc() - >= emer.recovery_initiated_at.unwrap() + Duration::days(emer.wait_time_days as i64) + >= emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days)) { emer.status = EmergencyAccessStatus::RecoveryApproved as i32; emer.save(&conn).await.expect("Cannot save emergency access on job"); @@ -812,7 +812,7 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) { for mut emer in emergency_access_list { if (emer.recovery_initiated_at.is_some() && Utc::now().naive_utc() - >= emer.recovery_initiated_at.unwrap() + Duration::days((emer.wait_time_days as i64) - 1)) + >= emer.recovery_initiated_at.unwrap() + Duration::days((i64::from(emer.wait_time_days)) - 1)) && (emer.last_notification_at.is_none() || (emer.last_notification_at.is_some() && Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1))) diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs index 9c53e936..f2fe73a3 100644 --- a/src/api/core/sends.rs +++ b/src/api/core/sends.rs @@ -95,7 +95,7 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c Ok(()) } -async fn create_send(data: SendData, user_uuid: String) -> ApiResult { +fn create_send(data: SendData, user_uuid: String) -> ApiResult { let data_val = if data.Type == SendType::Text as i32 { data.Text } else if data.Type == SendType::File as i32 { @@ -117,7 +117,7 @@ async fn create_send(data: SendData, user_uuid: String) -> ApiResult { ); } - let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc()).await; + let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc()); send.user_uuid = Some(user_uuid); send.notes = data.Notes; send.max_access_count = match data.MaxAccessCount { @@ -171,7 +171,7 @@ async fn post_send(data: JsonUpcase, headers: Headers, conn: DbConn, n err!("File sends should use /api/sends/file") } - let mut send = create_send(data, headers.user.uuid).await?; + let mut send = create_send(data, headers.user.uuid)?; send.save(&conn).await?; nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn).await).await; @@ -211,7 +211,7 @@ async fn post_send_file(data: Form>, headers: Headers, conn: DbCo None => SIZE_525_MB, }; - let mut send = create_send(model, headers.user.uuid).await?; + let mut send = create_send(model, headers.user.uuid)?; if send.atype != SendType::File as i32 { err!("Send content is not a file"); } diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs index 40ff233c..542651dd 100644 --- a/src/api/core/two_factor/authenticator.rs +++ b/src/api/core/two_factor/authenticator.rs @@ -139,7 +139,7 @@ pub async fn validate_totp_code( // The amount of steps back and forward in time // Also check if we need to disable time drifted TOTP codes. // If that is the case, we set the steps to 0 so only the current TOTP is valid. - let steps = !CONFIG.authenticator_disable_time_drift() as i64; + let steps = i64::from(!CONFIG.authenticator_disable_time_drift()); // Get the current system time in UNIX Epoch (UTC) let current_time = chrono::Utc::now(); @@ -154,7 +154,7 @@ pub async fn validate_totp_code( let generated = totp_custom::(30, 6, &decoded_secret, time); // Check the the given code equals the generated and if the time_step is larger then the one last used. - if generated == totp_code && time_step > twofactor.last_used as i64 { + if generated == totp_code && time_step > i64::from(twofactor.last_used) { // If the step does not equals 0 the time is drifted either server or client side. if step != 0 { warn!("TOTP Time drift detected. The step offset is {}", step); @@ -165,7 +165,7 @@ pub async fn validate_totp_code( twofactor.last_used = time_step as i32; twofactor.save(conn).await?; return Ok(()); - } else if generated == totp_code && time_step <= twofactor.last_used as i64 { + } else if generated == totp_code && time_step <= i64::from(twofactor.last_used) { warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps); err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip)); } diff --git a/src/api/core/two_factor/yubikey.rs b/src/api/core/two_factor/yubikey.rs index c430d1ed..cadb04a9 100644 --- a/src/api/core/two_factor/yubikey.rs +++ b/src/api/core/two_factor/yubikey.rs @@ -147,7 +147,7 @@ async fn activate_yubikey(data: JsonUpcase, headers: Headers, verify_yubikey_otp(yubikey.to_owned()).map_res("Invalid Yubikey OTP provided")?; } - let yubikey_ids: Vec = yubikeys.into_iter().map(|x| (&x[..12]).to_owned()).collect(); + let yubikey_ids: Vec = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect(); let yubikey_metadata = YubikeyMetadata { Keys: yubikey_ids, diff --git a/src/api/icons.rs b/src/api/icons.rs index 90d306fb..6d4d3a5a 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -51,7 +51,7 @@ static CLIENT: Lazy = Lazy::new(|| { // Reuse the client between requests let client = get_reqwest_client_builder() - .cookie_provider(cookie_store.clone()) + .cookie_provider(Arc::clone(&cookie_store)) .timeout(Duration::from_secs(CONFIG.icon_download_timeout())) .default_headers(default_headers.clone()); @@ -77,7 +77,7 @@ static ICON_SIZE_REGEX: Lazy = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+ static ICON_BLACKLIST_REGEX: Lazy> = Lazy::new(dashmap::DashMap::new); async fn icon_redirect(domain: &str, template: &str) -> Option { - if !is_valid_domain(domain).await { + if !is_valid_domain(domain) { warn!("Invalid domain: {}", domain); return None; } @@ -123,7 +123,7 @@ async fn icon_google(domain: String) -> Option { async fn icon_internal(domain: String) -> Cached<(ContentType, Vec)> { const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png"); - if !is_valid_domain(&domain).await { + if !is_valid_domain(&domain) { warn!("Invalid domain: {}", domain); return Cached::ttl( (ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), @@ -144,7 +144,7 @@ async fn icon_internal(domain: String) -> Cached<(ContentType, Vec)> { /// /// This does some manual checks and makes use of Url to do some basic checking. /// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255. -async fn is_valid_domain(domain: &str) -> bool { +fn is_valid_domain(domain: &str) -> bool { const ALLOWED_CHARS: &str = "_-."; // If parsing the domain fails using Url, it will not work with reqwest. @@ -278,6 +278,7 @@ mod tests { use cached::proc_macro::cached; #[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)] +#[allow(clippy::unused_async)] // This is needed because cached causes a false-positive here. async fn is_domain_blacklisted(domain: &str) -> bool { if CONFIG.icon_blacklist_non_global_ips() { if let Ok(s) = lookup_host((domain, 0)).await { @@ -304,7 +305,7 @@ async fn is_domain_blacklisted(domain: &str) -> bool { // Generate the regex to store in too the Lazy Static HashMap. let blacklist_regex = Regex::new(&blacklist).unwrap(); let is_match = blacklist_regex.is_match(domain); - ICON_BLACKLIST_REGEX.insert(blacklist.to_string(), blacklist_regex); + ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex); is_match }; @@ -326,7 +327,7 @@ async fn get_icon(domain: &str) -> Option<(Vec, String)> { } if let Some(icon) = get_cached_icon(&path).await { - let icon_type = match get_icon_type(&icon).await { + let icon_type = match get_icon_type(&icon) { Some(x) => x, _ => "x-icon", }; @@ -416,7 +417,7 @@ impl Icon { } } -async fn get_favicons_node( +fn get_favicons_node( dom: InfallibleTokenizer, FaviconEmitter>, icons: &mut Vec, url: &url::Url, @@ -468,7 +469,7 @@ async fn get_favicons_node( } else { "" }; - let priority = get_icon_priority(full_href.as_str(), sizes).await; + let priority = get_icon_priority(full_href.as_str(), sizes); icons.push(Icon::new(priority, full_href.to_string())); } }; @@ -512,7 +513,7 @@ async fn get_icon_url(domain: &str) -> Result { tld = domain_parts.next_back().unwrap(), base = domain_parts.next_back().unwrap() ); - if is_valid_domain(&base_domain).await { + if is_valid_domain(&base_domain) { let sslbase = format!("https://{base_domain}"); let httpbase = format!("http://{base_domain}"); debug!("[get_icon_url]: Trying without subdomains '{base_domain}'"); @@ -523,7 +524,7 @@ async fn get_icon_url(domain: &str) -> Result { // When the domain is not an IP, and has less then 2 dots, try to add www. infront of it. } else if is_ip.is_err() && domain.matches('.').count() < 2 { let www_domain = format!("www.{domain}"); - if is_valid_domain(&www_domain).await { + if is_valid_domain(&www_domain) { let sslwww = format!("https://{www_domain}"); let httpwww = format!("http://{www_domain}"); debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'"); @@ -555,7 +556,7 @@ async fn get_icon_url(domain: &str) -> Result { let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec(); let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible(); - get_favicons_node(dom, &mut iconlist, &url).await; + get_favicons_node(dom, &mut iconlist, &url); } else { // Add the default favicon.ico to the list with just the given domain iconlist.push(Icon::new(35, format!("{ssldomain}/favicon.ico"))); @@ -603,12 +604,12 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result u8 { +fn get_icon_priority(href: &str, sizes: &str) -> u8 { // Check if there is a dimension set - let (width, height) = parse_sizes(sizes).await; + let (width, height) = parse_sizes(sizes); // Check if there is a size given if width != 0 && height != 0 { @@ -650,11 +651,11 @@ async fn get_icon_priority(href: &str, sizes: &str) -> u8 { /// /// # Example /// ``` -/// let (width, height) = parse_sizes("64x64").await; // (64, 64) -/// let (width, height) = parse_sizes("x128x128").await; // (128, 128) -/// let (width, height) = parse_sizes("32").await; // (0, 0) +/// let (width, height) = parse_sizes("64x64"); // (64, 64) +/// let (width, height) = parse_sizes("x128x128"); // (128, 128) +/// let (width, height) = parse_sizes("32"); // (0, 0) /// ``` -async fn parse_sizes(sizes: &str) -> (u16, u16) { +fn parse_sizes(sizes: &str) -> (u16, u16) { let mut width: u16 = 0; let mut height: u16 = 0; @@ -698,7 +699,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { // Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create if body.len() >= 67 { // Check if the icon type is allowed, else try an icon from the list. - icon_type = get_icon_type(&body).await; + icon_type = get_icon_type(&body); if icon_type.is_none() { debug!("Icon from {} data:image uri, is not a valid image type", domain); continue; @@ -716,7 +717,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net) // Check if the icon type is allowed, else try an icon from the list. - icon_type = get_icon_type(&buffer).await; + icon_type = get_icon_type(&buffer); if icon_type.is_none() { buffer.clear(); debug!("Icon from {}, is not a valid image type", icon.href); @@ -751,7 +752,7 @@ async fn save_icon(path: &str, icon: &[u8]) { } } -async fn get_icon_type(bytes: &[u8]) -> Option<&'static str> { +fn get_icon_type(bytes: &[u8]) -> Option<&'static str> { match bytes { [137, 80, 78, 71, ..] => Some("png"), [0, 0, 1, 0, ..] => Some("x-icon"), diff --git a/src/db/models/device.rs b/src/db/models/device.rs index afd494c7..ce6fa638 100644 --- a/src/db/models/device.rs +++ b/src/db/models/device.rs @@ -87,11 +87,11 @@ impl Device { nbf: time_now.timestamp(), exp: (time_now + *DEFAULT_VALIDITY).timestamp(), iss: JWT_LOGIN_ISSUER.to_string(), - sub: user.uuid.to_string(), + sub: user.uuid.clone(), premium: true, - name: user.name.to_string(), - email: user.email.to_string(), + name: user.name.clone(), + email: user.email.clone(), email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(), orgowner, @@ -99,8 +99,8 @@ impl Device { orguser, orgmanager, - sstamp: user.security_stamp.to_string(), - device: self.uuid.to_string(), + sstamp: user.security_stamp.clone(), + device: self.uuid.clone(), scope, amr: vec!["Application".into()], }; diff --git a/src/db/models/send.rs b/src/db/models/send.rs index 8fff5143..687571d5 100644 --- a/src/db/models/send.rs +++ b/src/db/models/send.rs @@ -45,7 +45,7 @@ pub enum SendType { } impl Send { - pub async fn new(atype: i32, name: String, data: String, akey: String, deletion_date: NaiveDateTime) -> Self { + pub fn new(atype: i32, name: String, data: String, akey: String, deletion_date: NaiveDateTime) -> Self { let now = Utc::now().naive_utc(); Self { diff --git a/src/db/models/user.rs b/src/db/models/user.rs index 80a0cb8e..a8d27060 100644 --- a/src/db/models/user.rs +++ b/src/db/models/user.rs @@ -171,7 +171,7 @@ impl User { pub fn set_stamp_exception(&mut self, route_exception: Vec) { let stamp_exception = UserStampException { routes: route_exception, - security_stamp: self.security_stamp.to_string(), + security_stamp: self.security_stamp.clone(), expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(), }; self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default()); diff --git a/src/main.rs b/src/main.rs index b4f75c44..313fe77b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,30 @@ -#![forbid(unsafe_code)] -#![warn(rust_2018_idioms)] -#![warn(rust_2021_compatibility)] +#![forbid(unsafe_code, non_ascii_idents)] +#![deny( + rust_2018_idioms, + rust_2021_compatibility, + noop_method_call, + pointer_structural_match, + trivial_casts, + trivial_numeric_casts, + unused_import_braces, + clippy::cast_lossless, + clippy::clone_on_ref_ptr, + clippy::equatable_if_let, + clippy::float_cmp_const, + clippy::inefficient_to_string, + clippy::linkedlist, + clippy::macro_use_imports, + clippy::manual_assert, + clippy::match_wildcard_for_single_variants, + clippy::mem_forget, + clippy::string_add_assign, + clippy::string_to_string, + clippy::unnecessary_join, + clippy::unnecessary_self_imports, + clippy::unused_async, + clippy::verbose_file_reads, + clippy::zero_sized_map_values +)] #![cfg_attr(feature = "unstable", feature(ip))] // The recursion_limit is mainly triggered by the json!() macro. // The more key/value pairs there are the more recursion occurs. diff --git a/src/util.rs b/src/util.rs index 82cb328e..e00583b6 100644 --- a/src/util.rs +++ b/src/util.rs @@ -300,7 +300,7 @@ impl Fairing for BetterLogging { // use std::{ fs::{self, File}, - io::{Read, Result as IOResult}, + io::Result as IOResult, path::Path, }; @@ -309,11 +309,7 @@ pub fn file_exists(path: &str) -> bool { } pub fn read_file(path: &str) -> IOResult> { - let mut contents: Vec = Vec::new(); - - let mut file = File::open(Path::new(path))?; - file.read_to_end(&mut contents)?; - + let contents = fs::read(Path::new(path))?; Ok(contents) } @@ -326,11 +322,7 @@ pub fn write_file(path: &str, content: &[u8]) -> Result<(), crate::error::Error> } pub fn read_file_string(path: &str) -> IOResult { - let mut contents = String::new(); - - let mut file = File::open(Path::new(path))?; - file.read_to_string(&mut contents)?; - + let contents = fs::read_to_string(Path::new(path))?; Ok(contents) }