Fixing clippy

update_docker_images
Dessalines 2021-11-09 17:31:28 -05:00
parent 5b3be5c102
commit f035af3738
4 changed files with 7 additions and 6 deletions

View File

@ -25,7 +25,7 @@ lazy_static! {
static ref CLIENT: Client = Client::builder() static ref CLIENT: Client = Client::builder()
.user_agent(build_user_agent(&Settings::get())) .user_agent(build_user_agent(&Settings::get()))
.build() .build()
.unwrap(); .expect("Couldn't build client");
} }
/// We store Url on the heap because it is quite large (88 bytes). /// We store Url on the heap because it is quite large (88 bytes).

View File

@ -56,7 +56,7 @@ pub fn derive_activity_handler(input: proc_macro::TokenStream) -> proc_macro::To
.collect(); .collect();
let attrs: &Vec<TokenStream> = &attrs let attrs: &Vec<TokenStream> = &attrs
.first() .first()
.unwrap() .expect("Could not decode first attribute from token stream")
.tokens .tokens
.clone() .clone()
.into_iter() .into_iter()

View File

@ -65,7 +65,7 @@ async fn main() -> Result<(), LemmyError> {
let pool2 = pool.clone(); let pool2 = pool.clone();
thread::spawn(move || { thread::spawn(move || {
scheduled_tasks::setup(pool2); scheduled_tasks::setup(pool2).expect("Couldn't set up scheduled_tasks");
}); });
// Set up the rate limiter // Set up the rate limiter

View File

@ -3,14 +3,15 @@ use clokwerk::{Scheduler, TimeUnits};
// Import week days and WeekDay // Import week days and WeekDay
use diesel::{sql_query, PgConnection, RunQueryDsl}; use diesel::{sql_query, PgConnection, RunQueryDsl};
use lemmy_db_schema::{source::activity::Activity, DbPool}; use lemmy_db_schema::{source::activity::Activity, DbPool};
use lemmy_utils::LemmyError;
use log::info; use log::info;
use std::{thread, time::Duration}; use std::{thread, time::Duration};
/// Schedules various cleanup tasks for lemmy in a background thread /// Schedules various cleanup tasks for lemmy in a background thread
pub fn setup(pool: DbPool) { pub fn setup(pool: DbPool) -> Result<(), LemmyError> {
let mut scheduler = Scheduler::new(); let mut scheduler = Scheduler::new();
let conn = pool.get().unwrap(); let conn = pool.get()?;
active_counts(&conn); active_counts(&conn);
// On startup, reindex the tables non-concurrently // On startup, reindex the tables non-concurrently
@ -21,7 +22,7 @@ pub fn setup(pool: DbPool) {
reindex_aggregates_tables(&conn, true); reindex_aggregates_tables(&conn, true);
}); });
let conn = pool.get().unwrap(); let conn = pool.get()?;
clear_old_activities(&conn); clear_old_activities(&conn);
scheduler.every(1.weeks()).run(move || { scheduler.every(1.weeks()).run(move || {
clear_old_activities(&conn); clear_old_activities(&conn);