Compare commits

...

8 Commits

Author SHA1 Message Date
dullbananas f56a9918ab
Merge 691bce0e71 into b4f9ef24a5 2024-05-08 19:38:17 -07:00
Nutomic b4f9ef24a5
Dont exit early when running only scheduled tasks (#4707)
* Dont exit early when running only scheduled tasks (fixes #4709)

* fix
2024-05-08 14:56:44 +02:00
Nutomic 866d752a3c
Instance.preferred_username should be optional (fixes #4701) (#4713) 2024-05-08 08:01:04 -04:00
Nutomic e0b1d0553d
Add timeout for processing incoming activities (#4708)
* Add timeout for processing incoming activities

* move to const
2024-05-08 08:00:55 -04:00
Dull Bananas 691bce0e71 stuff 2024-05-04 14:00:39 +00:00
dullbananas f2a6d73682
rename 2024-04-28 08:26:28 -07:00
dullbananas 277524298b
Merge branch 'LemmyNet:main' into migration-runner 2024-04-28 07:11:17 -07:00
dullbananas ca271eacf5
Update schema.rs 2024-04-20 14:31:33 -07:00
11 changed files with 1679 additions and 2042 deletions

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,8 @@ use lemmy_db_schema::{
};
use lemmy_utils::error::{LemmyErrorType, LemmyResult};
use serde::{Deserialize, Serialize};
use std::ops::Deref;
use std::{ops::Deref, time::Duration};
use tokio::time::timeout;
use url::Url;
mod comment;
@ -30,13 +31,22 @@ mod post;
pub mod routes;
pub mod site;
const INCOMING_ACTIVITY_TIMEOUT: Duration = Duration::from_secs(9);
pub async fn shared_inbox(
request: HttpRequest,
body: Bytes,
data: Data<LemmyContext>,
) -> LemmyResult<HttpResponse> {
receive_activity::<SharedInboxActivities, UserOrCommunity, LemmyContext>(request, body, &data)
let receive_fut =
receive_activity::<SharedInboxActivities, UserOrCommunity, LemmyContext>(request, body, &data);
// Set a timeout shorter than `REQWEST_TIMEOUT` for processing incoming activities. This is to
// avoid taking a long time to process an incoming activity when a required data fetch times out.
// In this case our own instance would timeout and be marked as dead by the sender. Better to
// consider the activity broken and move on.
timeout(INCOMING_ACTIVITY_TIMEOUT, receive_fut)
.await
.map_err(|_| LemmyErrorType::InboxTimeout)?
}
/// Convert the data to json and turn it into an HTTP Response with the correct ActivityPub

View File

@ -100,7 +100,7 @@ impl Object for ApubSite {
kind: ApplicationType::Application,
id: self.id().into(),
name: self.name.clone(),
preferred_username: data.domain().to_string(),
preferred_username: Some(data.domain().to_string()),
content: self.sidebar.as_ref().map(|d| markdown_to_html(d)),
source: self.sidebar.clone().map(Source::new),
summary: self.description.clone(),

View File

@ -22,7 +22,7 @@ pub struct Instance {
/// site name
pub(crate) name: String,
/// instance domain, necessary for mastodon authorized fetch
pub(crate) preferred_username: String,
pub(crate) preferred_username: Option<String>,
pub(crate) inbox: Url,
/// mandatory field in activitypub, lemmy currently serves an empty outbox
pub(crate) outbox: Url,

View File

@ -800,6 +800,12 @@ diesel::table! {
}
}
diesel::table! {
previously_run_sql (content) {
content -> Text,
}
}
diesel::table! {
private_message (id) {
id -> Int4,

View File

@ -9,15 +9,6 @@ const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
/// This SQL code sets up the `r` schema, which contains things that can be safely dropped and replaced
/// instead of being changed using migrations. It may not create or modify things outside of the `r` schema
/// (indicated by `r.` before the name), unless a comment says otherwise.
///
/// Currently, this code is only run after the server starts and there's at least 1 pending migration
/// to run. This means every time you change something here, you must also create a migration (a blank
/// up.sql file works fine). This behavior will be removed when we implement a better way to avoid
/// useless schema updates and locks.
///
/// If you add something that depends on something (such as a table) created in a new migration, then down.sql
/// must use `CASCADE` when dropping it. This doesn't need to be fixed in old migrations because the
/// "replaceable-schema" migration runs `DROP SCHEMA IF EXISTS r CASCADE` in down.sql.
const REPLACEABLE_SCHEMA: &[&str] = &[
"DROP SCHEMA IF EXISTS r CASCADE;",
"CREATE SCHEMA r;",
@ -26,17 +17,25 @@ const REPLACEABLE_SCHEMA: &[&str] = &[
];
pub fn run(db_url: &str) -> Result<(), LemmyError> {
let test_enabled = std::env::var("LEMMY_TEST_MIGRATIONS")
.map(|s| !s.is_empty())
.unwrap_or(false);
// Migrations don't support async connection
let mut conn = PgConnection::establish(db_url).with_context(|| "Error connecting to database")?;
// Run all pending migrations except for the newest one, then run the newest one in the same transaction
// as `REPLACEABLE_SCHEMA`. This code will be becone less hacky when the conditional setup of things in
// `REPLACEABLE_SCHEMA` is done without using the number of pending migrations.
info!("Running Database migrations (This may take a long time)...");
let migrations = conn
let unfiltered_migrations = conn
.pending_migrations(MIGRATIONS)
.map_err(|e| anyhow::anyhow!("Couldn't determine pending migrations: {e}"))?;
for migration in migrations.iter().rev().skip(1).rev() {
// Does not include the "forbid_diesel_cli" migration
let migrations = unfiltered_migrations.iter().filter(|m| m.name().version() != "000000000000000".into());
conn.transaction::<_, LemmyError, _>(|conn|) // left off here
for migration in migrations.clone() {
conn
.run_migration(migration)
.map_err(|e| anyhow::anyhow!("Couldn't run DB Migrations: {e}"))?;

View File

@ -175,6 +175,7 @@ pub enum LemmyErrorType {
InvalidBotAction,
CantBlockLocalInstance,
UrlWithoutDomain,
InboxTimeout,
Unknown(String),
}

View File

@ -0,0 +1,6 @@
DO $$
BEGIN
RAISE NOTICE 'migrations must be managed using lemmy_server instead of diesel CLI';
END
$$;

View File

@ -0,0 +1 @@
drop table previously_run_sql;

View File

@ -0,0 +1,4 @@
drop schema if exists r cascade;
create table previously_run_sql (content text primary key);
insert into previously_run_sql (content) values ('');

View File

@ -160,10 +160,10 @@ pub async fn start_lemmy_server(args: CmdArgs) -> LemmyResult<()> {
rate_limit_cell.clone(),
);
if !args.disable_scheduled_tasks {
let scheduled_tasks = (!args.disable_scheduled_tasks).then(|| {
// Schedules various cleanup tasks for the DB
let _scheduled_tasks = tokio::task::spawn(scheduled_tasks::setup(context.clone()));
}
tokio::task::spawn(scheduled_tasks::setup(context.clone()))
});
if let Some(prometheus) = SETTINGS.prometheus.clone() {
serve_prometheus(prometheus, context.clone())?;
@ -218,7 +218,7 @@ pub async fn start_lemmy_server(args: CmdArgs) -> LemmyResult<()> {
let mut interrupt = tokio::signal::unix::signal(SignalKind::interrupt())?;
let mut terminate = tokio::signal::unix::signal(SignalKind::terminate())?;
if server.is_some() || federate.is_some() {
if server.is_some() || federate.is_some() || scheduled_tasks.is_some() {
tokio::select! {
_ = tokio::signal::ctrl_c() => {
tracing::warn!("Received ctrl-c, shutting down gracefully...");