From 5f7f96f25a27028058b6b747fba5afca2fb735f8 Mon Sep 17 00:00:00 2001 From: Froidefond Julien Date: Wed, 25 Mar 2026 08:15:04 +0100 Subject: [PATCH] chore: bump version to 2.3.0 --- Cargo.lock | 10 +- Cargo.toml | 2 +- apps/api/src/anilist.rs | 4 +- apps/api/src/main.rs | 4 + apps/api/src/reading_status_match.rs | 594 ++++++++++++++++++ apps/backoffice/app/(app)/jobs/[id]/page.tsx | 122 +++- apps/backoffice/app/(app)/jobs/page.tsx | 57 +- .../app/api/jobs/[id]/replay/route.ts | 5 +- apps/backoffice/app/components/JobRow.tsx | 2 +- apps/backoffice/app/components/ui/Badge.tsx | 1 + apps/backoffice/lib/api.ts | 36 ++ apps/backoffice/lib/i18n/en.ts | 11 + apps/backoffice/lib/i18n/fr.ts | 11 + apps/backoffice/package.json | 2 +- crates/notifications/src/lib.rs | 49 ++ ...0055_add_reading_status_match_job_type.sql | 6 + .../0056_add_reading_status_match_results.sql | 16 + 17 files changed, 916 insertions(+), 16 deletions(-) create mode 100644 apps/api/src/reading_status_match.rs create mode 100644 infra/migrations/0055_add_reading_status_match_job_type.sql create mode 100644 infra/migrations/0056_add_reading_status_match_results.sql diff --git a/Cargo.lock b/Cargo.lock index d43d9ee..0f8fd10 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,7 +64,7 @@ checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "api" -version = "2.2.0" +version = "2.3.0" dependencies = [ "anyhow", "argon2", @@ -1233,7 +1233,7 @@ dependencies = [ [[package]] name = "indexer" -version = "2.2.0" +version = "2.3.0" dependencies = [ "anyhow", "axum", @@ -1667,7 +1667,7 @@ dependencies = [ [[package]] name = "notifications" -version = "2.2.0" +version = "2.3.0" dependencies = [ "anyhow", "reqwest", @@ -1786,7 +1786,7 @@ dependencies = [ [[package]] name = "parsers" -version = "2.2.0" +version = "2.3.0" dependencies = [ "anyhow", "flate2", @@ -2923,7 +2923,7 @@ dependencies = [ [[package]] name = "stripstream-core" -version = "2.2.0" +version = "2.3.0" dependencies = [ "anyhow", "serde", diff --git a/Cargo.toml b/Cargo.toml index a86ee92..91ad200 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ resolver = "2" [workspace.package] edition = "2021" -version = "2.2.0" +version = "2.3.0" license = "MIT" [workspace.dependencies] diff --git a/apps/api/src/anilist.rs b/apps/api/src/anilist.rs index 4c3666a..bbe2782 100644 --- a/apps/api/src/anilist.rs +++ b/apps/api/src/anilist.rs @@ -13,7 +13,7 @@ use crate::{error::ApiError, state::AppState}; const ANILIST_API: &str = "https://graphql.anilist.co"; -async fn anilist_graphql( +pub(crate) async fn anilist_graphql( token: &str, query: &str, variables: Value, @@ -55,7 +55,7 @@ async fn anilist_graphql( } /// Load AniList settings from DB: (access_token, anilist_user_id, local_user_id) -async fn load_anilist_settings(pool: &sqlx::PgPool) -> Result<(String, Option, Option), ApiError> { +pub(crate) async fn load_anilist_settings(pool: &sqlx::PgPool) -> Result<(String, Option, Option), ApiError> { let row = sqlx::query("SELECT value FROM app_settings WHERE key = 'anilist'") .fetch_optional(pool) .await?; diff --git a/apps/api/src/main.rs b/apps/api/src/main.rs index 7d642a3..e1ee5f1 100644 --- a/apps/api/src/main.rs +++ b/apps/api/src/main.rs @@ -18,6 +18,7 @@ mod pages; mod prowlarr; mod qbittorrent; mod reading_progress; +mod reading_status_match; mod search; mod series; mod settings; @@ -145,6 +146,9 @@ async fn main() -> anyhow::Result<()> { .route("/metadata/batch/:id/results", get(metadata_batch::get_batch_results)) .route("/metadata/refresh", axum::routing::post(metadata_refresh::start_refresh)) .route("/metadata/refresh/:id/report", get(metadata_refresh::get_refresh_report)) + .route("/reading-status/match", axum::routing::post(reading_status_match::start_match)) + .route("/reading-status/match/:id/report", get(reading_status_match::get_match_report)) + .route("/reading-status/match/:id/results", get(reading_status_match::get_match_results)) .merge(settings::settings_routes()) .route_layer(middleware::from_fn_with_state( state.clone(), diff --git a/apps/api/src/reading_status_match.rs b/apps/api/src/reading_status_match.rs new file mode 100644 index 0000000..58902fc --- /dev/null +++ b/apps/api/src/reading_status_match.rs @@ -0,0 +1,594 @@ +use axum::{extract::State, Json}; +use serde::{Deserialize, Serialize}; +use sqlx::{PgPool, Row}; +use std::time::Duration; +use tracing::{info, warn}; +use utoipa::ToSchema; +use uuid::Uuid; + +use crate::{anilist, error::ApiError, state::AppState}; + +// --------------------------------------------------------------------------- +// DTOs +// --------------------------------------------------------------------------- + +#[derive(Deserialize, ToSchema)] +pub struct ReadingStatusMatchRequest { + pub library_id: String, +} + +#[derive(Serialize, ToSchema)] +pub struct ReadingStatusMatchReportDto { + #[schema(value_type = String)] + pub job_id: Uuid, + pub status: String, + pub total_series: i64, + pub linked: i64, + pub already_linked: i64, + pub no_results: i64, + pub ambiguous: i64, + pub errors: i64, +} + +#[derive(Serialize, ToSchema)] +pub struct ReadingStatusMatchResultDto { + #[schema(value_type = String)] + pub id: Uuid, + pub series_name: String, + /// 'linked' | 'already_linked' | 'no_results' | 'ambiguous' | 'error' + pub status: String, + pub anilist_id: Option, + pub anilist_title: Option, + pub anilist_url: Option, + pub error_message: Option, +} + +// --------------------------------------------------------------------------- +// POST /reading-status/match — Trigger a reading status match job +// --------------------------------------------------------------------------- + +#[utoipa::path( + post, + path = "/reading-status/match", + tag = "reading_status", + request_body = ReadingStatusMatchRequest, + responses( + (status = 200, description = "Job created"), + (status = 400, description = "Bad request"), + ), + security(("Bearer" = [])) +)] +pub async fn start_match( + State(state): State, + Json(body): Json, +) -> Result, ApiError> { + let library_id: Uuid = body + .library_id + .parse() + .map_err(|_| ApiError::bad_request("invalid library_id"))?; + + // Verify library exists and has a reading_status_provider configured + let lib_row = sqlx::query("SELECT reading_status_provider FROM libraries WHERE id = $1") + .bind(library_id) + .fetch_optional(&state.pool) + .await? + .ok_or_else(|| ApiError::not_found("library not found"))?; + + let provider: Option = lib_row.get("reading_status_provider"); + if provider.is_none() { + return Err(ApiError::bad_request( + "This library has no reading status provider configured", + )); + } + + // Check AniList is configured globally + anilist::load_anilist_settings(&state.pool).await?; + + // Check no existing running job for this library + let existing: Option = sqlx::query_scalar( + "SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'reading_status_match' AND status IN ('pending', 'running') LIMIT 1", + ) + .bind(library_id) + .fetch_optional(&state.pool) + .await?; + + if let Some(existing_id) = existing { + return Ok(Json(serde_json::json!({ + "id": existing_id.to_string(), + "status": "already_running", + }))); + } + + let job_id = Uuid::new_v4(); + sqlx::query( + "INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'reading_status_match', 'running', NOW())", + ) + .bind(job_id) + .bind(library_id) + .execute(&state.pool) + .await?; + + let pool = state.pool.clone(); + let library_name: Option = + sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1") + .bind(library_id) + .fetch_optional(&state.pool) + .await + .ok() + .flatten(); + + tokio::spawn(async move { + if let Err(e) = process_reading_status_match(&pool, job_id, library_id).await { + warn!("[READING_STATUS_MATCH] job {job_id} failed: {e}"); + let _ = sqlx::query( + "UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW() WHERE id = $1", + ) + .bind(job_id) + .bind(e.to_string()) + .execute(&pool) + .await; + notifications::notify( + pool.clone(), + notifications::NotificationEvent::ReadingStatusMatchFailed { + library_name, + error: e.to_string(), + }, + ); + } + }); + + Ok(Json(serde_json::json!({ + "id": job_id.to_string(), + "status": "running", + }))) +} + +// --------------------------------------------------------------------------- +// GET /reading-status/match/:id/report +// --------------------------------------------------------------------------- + +#[utoipa::path( + get, + path = "/reading-status/match/{id}/report", + tag = "reading_status", + params(("id" = String, Path, description = "Job UUID")), + responses( + (status = 200, body = ReadingStatusMatchReportDto), + (status = 404, description = "Job not found"), + ), + security(("Bearer" = [])) +)] +pub async fn get_match_report( + State(state): State, + axum::extract::Path(job_id): axum::extract::Path, +) -> Result, ApiError> { + let row = sqlx::query( + "SELECT status, total_files FROM index_jobs WHERE id = $1 AND type = 'reading_status_match'", + ) + .bind(job_id) + .fetch_optional(&state.pool) + .await? + .ok_or_else(|| ApiError::not_found("job not found"))?; + + let job_status: String = row.get("status"); + let total_files: Option = row.get("total_files"); + + let counts = sqlx::query( + "SELECT status, COUNT(*) as cnt FROM reading_status_match_results WHERE job_id = $1 GROUP BY status", + ) + .bind(job_id) + .fetch_all(&state.pool) + .await?; + + let mut linked = 0i64; + let mut already_linked = 0i64; + let mut no_results = 0i64; + let mut ambiguous = 0i64; + let mut errors = 0i64; + + for r in &counts { + let status: String = r.get("status"); + let cnt: i64 = r.get("cnt"); + match status.as_str() { + "linked" => linked = cnt, + "already_linked" => already_linked = cnt, + "no_results" => no_results = cnt, + "ambiguous" => ambiguous = cnt, + "error" => errors = cnt, + _ => {} + } + } + + Ok(Json(ReadingStatusMatchReportDto { + job_id, + status: job_status, + total_series: total_files.unwrap_or(0) as i64, + linked, + already_linked, + no_results, + ambiguous, + errors, + })) +} + +// --------------------------------------------------------------------------- +// GET /reading-status/match/:id/results +// --------------------------------------------------------------------------- + +#[utoipa::path( + get, + path = "/reading-status/match/{id}/results", + tag = "reading_status", + params( + ("id" = String, Path, description = "Job UUID"), + ("status" = Option, Query, description = "Filter by status"), + ), + responses( + (status = 200, body = Vec), + ), + security(("Bearer" = [])) +)] +pub async fn get_match_results( + State(state): State, + axum::extract::Path(job_id): axum::extract::Path, + axum::extract::Query(query): axum::extract::Query, +) -> Result>, ApiError> { + let rows = if let Some(status_filter) = &query.status { + sqlx::query( + "SELECT id, series_name, status, anilist_id, anilist_title, anilist_url, error_message + FROM reading_status_match_results + WHERE job_id = $1 AND status = $2 + ORDER BY series_name", + ) + .bind(job_id) + .bind(status_filter) + .fetch_all(&state.pool) + .await? + } else { + sqlx::query( + "SELECT id, series_name, status, anilist_id, anilist_title, anilist_url, error_message + FROM reading_status_match_results + WHERE job_id = $1 + ORDER BY status, series_name", + ) + .bind(job_id) + .fetch_all(&state.pool) + .await? + }; + + let results = rows + .iter() + .map(|row| ReadingStatusMatchResultDto { + id: row.get("id"), + series_name: row.get("series_name"), + status: row.get("status"), + anilist_id: row.get("anilist_id"), + anilist_title: row.get("anilist_title"), + anilist_url: row.get("anilist_url"), + error_message: row.get("error_message"), + }) + .collect(); + + Ok(Json(results)) +} + +#[derive(Deserialize)] +pub struct ResultsQuery { + pub status: Option, +} + +// --------------------------------------------------------------------------- +// Background processing +// --------------------------------------------------------------------------- + +pub(crate) async fn process_reading_status_match( + pool: &PgPool, + job_id: Uuid, + library_id: Uuid, +) -> Result<(), String> { + let (token, _, _) = anilist::load_anilist_settings(pool) + .await + .map_err(|e| e.message)?; + + let series_names: Vec = sqlx::query_scalar( + r#" + SELECT DISTINCT COALESCE(NULLIF(series, ''), 'unclassified') + FROM books + WHERE library_id = $1 + ORDER BY 1 + "#, + ) + .bind(library_id) + .fetch_all(pool) + .await + .map_err(|e| e.to_string())?; + + let total = series_names.len() as i32; + sqlx::query("UPDATE index_jobs SET total_files = $2 WHERE id = $1") + .bind(job_id) + .bind(total) + .execute(pool) + .await + .map_err(|e| e.to_string())?; + + let already_linked: std::collections::HashSet = sqlx::query_scalar( + "SELECT series_name FROM anilist_series_links WHERE library_id = $1", + ) + .bind(library_id) + .fetch_all(pool) + .await + .map_err(|e| e.to_string())? + .into_iter() + .collect(); + + let mut processed = 0i32; + + for series_name in &series_names { + if is_job_cancelled(pool, job_id).await { + sqlx::query( + "UPDATE index_jobs SET status = 'cancelled', finished_at = NOW() WHERE id = $1", + ) + .bind(job_id) + .execute(pool) + .await + .map_err(|e| e.to_string())?; + return Ok(()); + } + + processed += 1; + let progress = (processed * 100 / total.max(1)).min(100); + sqlx::query( + "UPDATE index_jobs SET processed_files = $2, progress_percent = $3, current_file = $4 WHERE id = $1", + ) + .bind(job_id) + .bind(processed) + .bind(progress) + .bind(series_name) + .execute(pool) + .await + .ok(); + + if series_name == "unclassified" { + insert_result(pool, job_id, library_id, series_name, "already_linked", None, None, None, None).await; + continue; + } + + if already_linked.contains(series_name) { + insert_result(pool, job_id, library_id, series_name, "already_linked", None, None, None, None).await; + continue; + } + + match search_and_link(pool, library_id, series_name, &token).await { + Ok(Outcome::Linked { anilist_id, anilist_title, anilist_url }) => { + insert_result(pool, job_id, library_id, series_name, "linked", Some(anilist_id), anilist_title.as_deref(), anilist_url.as_deref(), None).await; + } + Ok(Outcome::NoResults) => { + insert_result(pool, job_id, library_id, series_name, "no_results", None, None, None, None).await; + } + Ok(Outcome::Ambiguous) => { + insert_result(pool, job_id, library_id, series_name, "ambiguous", None, None, None, None).await; + } + Err(e) => { + warn!("[READING_STATUS_MATCH] series '{series_name}': {e}"); + insert_result(pool, job_id, library_id, series_name, "error", None, None, None, Some(&e)).await; + } + } + + // Respect AniList rate limit (~90 req/min) + tokio::time::sleep(Duration::from_millis(700)).await; + } + + // Build stats from results table + let counts = sqlx::query( + "SELECT status, COUNT(*) as cnt FROM reading_status_match_results WHERE job_id = $1 GROUP BY status", + ) + .bind(job_id) + .fetch_all(pool) + .await + .map_err(|e| e.to_string())?; + + let mut count_linked = 0i64; + let mut count_already_linked = 0i64; + let mut count_no_results = 0i64; + let mut count_ambiguous = 0i64; + let mut count_errors = 0i64; + for row in &counts { + let s: String = row.get("status"); + let c: i64 = row.get("cnt"); + match s.as_str() { + "linked" => count_linked = c, + "already_linked" => count_already_linked = c, + "no_results" => count_no_results = c, + "ambiguous" => count_ambiguous = c, + "error" => count_errors = c, + _ => {} + } + } + + let stats = serde_json::json!({ + "total_series": total as i64, + "linked": count_linked, + "already_linked": count_already_linked, + "no_results": count_no_results, + "ambiguous": count_ambiguous, + "errors": count_errors, + }); + + sqlx::query( + "UPDATE index_jobs SET status = 'success', finished_at = NOW(), stats_json = $2, progress_percent = 100 WHERE id = $1", + ) + .bind(job_id) + .bind(&stats) + .execute(pool) + .await + .map_err(|e| e.to_string())?; + + info!( + "[READING_STATUS_MATCH] job={job_id} completed: {}/{} series, linked={count_linked}, ambiguous={count_ambiguous}, no_results={count_no_results}, errors={count_errors}", + processed, total + ); + + let library_name: Option = sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1") + .bind(library_id) + .fetch_optional(pool) + .await + .ok() + .flatten(); + + notifications::notify( + pool.clone(), + notifications::NotificationEvent::ReadingStatusMatchCompleted { + library_name, + total_series: total, + linked: count_linked as i32, + }, + ); + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +#[allow(clippy::too_many_arguments)] +async fn insert_result( + pool: &PgPool, + job_id: Uuid, + library_id: Uuid, + series_name: &str, + status: &str, + anilist_id: Option, + anilist_title: Option<&str>, + anilist_url: Option<&str>, + error_message: Option<&str>, +) { + let _ = sqlx::query( + r#" + INSERT INTO reading_status_match_results + (job_id, library_id, series_name, status, anilist_id, anilist_title, anilist_url, error_message) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + "#, + ) + .bind(job_id) + .bind(library_id) + .bind(series_name) + .bind(status) + .bind(anilist_id) + .bind(anilist_title) + .bind(anilist_url) + .bind(error_message) + .execute(pool) + .await; +} + +enum Outcome { + Linked { + anilist_id: i32, + anilist_title: Option, + anilist_url: Option, + }, + NoResults, + Ambiguous, +} + +async fn search_and_link( + pool: &PgPool, + library_id: Uuid, + series_name: &str, + token: &str, +) -> Result { + let gql = r#" + query SearchManga($search: String) { + Page(perPage: 10) { + media(search: $search, type: MANGA, sort: [SEARCH_MATCH]) { + id + title { romaji english native } + siteUrl + } + } + } + "#; + + let data = anilist::anilist_graphql(token, gql, serde_json::json!({ "search": series_name })) + .await + .map_err(|e| e.message)?; + + let media: Vec = match data["Page"]["media"].as_array() { + Some(arr) => arr.clone(), + None => return Ok(Outcome::NoResults), + }; + + if media.is_empty() { + return Ok(Outcome::NoResults); + } + + let normalized_query = normalize_title(series_name); + let exact_matches: Vec<_> = media + .iter() + .filter(|m| { + let romaji = m["title"]["romaji"].as_str().map(normalize_title); + let english = m["title"]["english"].as_str().map(normalize_title); + let native = m["title"]["native"].as_str().map(normalize_title); + romaji.as_deref() == Some(&normalized_query) + || english.as_deref() == Some(&normalized_query) + || native.as_deref() == Some(&normalized_query) + }) + .collect(); + + let candidate = if exact_matches.len() == 1 { + exact_matches[0] + } else if exact_matches.is_empty() && media.len() == 1 { + &media[0] + } else { + return Ok(Outcome::Ambiguous); + }; + + let anilist_id = candidate["id"].as_i64().unwrap_or(0) as i32; + let anilist_title = candidate["title"]["english"] + .as_str() + .or_else(|| candidate["title"]["romaji"].as_str()) + .map(String::from); + let anilist_url = candidate["siteUrl"].as_str().map(String::from); + + sqlx::query( + r#" + INSERT INTO anilist_series_links (library_id, series_name, provider, anilist_id, anilist_title, anilist_url, status, linked_at) + VALUES ($1, $2, 'anilist', $3, $4, $5, 'linked', NOW()) + ON CONFLICT (library_id, series_name, provider) DO NOTHING + "#, + ) + .bind(library_id) + .bind(series_name) + .bind(anilist_id) + .bind(&anilist_title) + .bind(&anilist_url) + .execute(pool) + .await + .map_err(|e| e.to_string())?; + + Ok(Outcome::Linked { + anilist_id, + anilist_title, + anilist_url, + }) +} + +fn normalize_title(s: &str) -> String { + s.to_lowercase() + .replace([':', '!', '?', '.', ',', '\'', '"', '-', '_'], " ") + .split_whitespace() + .collect::>() + .join(" ") +} + +async fn is_job_cancelled(pool: &PgPool, job_id: Uuid) -> bool { + sqlx::query_scalar::<_, String>("SELECT status FROM index_jobs WHERE id = $1") + .bind(job_id) + .fetch_optional(pool) + .await + .ok() + .flatten() + .as_deref() + == Some("cancelled") +} diff --git a/apps/backoffice/app/(app)/jobs/[id]/page.tsx b/apps/backoffice/app/(app)/jobs/[id]/page.tsx index 051853d..9d82ef4 100644 --- a/apps/backoffice/app/(app)/jobs/[id]/page.tsx +++ b/apps/backoffice/app/(app)/jobs/[id]/page.tsx @@ -2,7 +2,7 @@ export const dynamic = "force-dynamic"; import { notFound } from "next/navigation"; import Link from "next/link"; -import { apiFetch, getMetadataBatchReport, getMetadataBatchResults, getMetadataRefreshReport, MetadataBatchReportDto, MetadataBatchResultDto, MetadataRefreshReportDto } from "@/lib/api"; +import { apiFetch, getMetadataBatchReport, getMetadataBatchResults, getMetadataRefreshReport, getReadingStatusMatchReport, getReadingStatusMatchResults, MetadataBatchReportDto, MetadataBatchResultDto, MetadataRefreshReportDto, ReadingStatusMatchReportDto, ReadingStatusMatchResultDto } from "@/lib/api"; import { Card, CardHeader, CardTitle, CardDescription, CardContent, StatusBadge, JobTypeBadge, StatBox, ProgressBar @@ -132,10 +132,16 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { description: t("jobType.metadata_refreshDesc"), isThumbnailOnly: false, }, + reading_status_match: { + label: t("jobType.reading_status_matchLabel"), + description: t("jobType.reading_status_matchDesc"), + isThumbnailOnly: false, + }, }; const isMetadataBatch = job.type === "metadata_batch"; const isMetadataRefresh = job.type === "metadata_refresh"; + const isReadingStatusMatch = job.type === "reading_status_match"; // Fetch batch report & results for metadata_batch jobs let batchReport: MetadataBatchReportDto | null = null; @@ -153,6 +159,16 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { refreshReport = await getMetadataRefreshReport(id).catch(() => null); } + // Fetch reading status match report & results + let readingStatusReport: ReadingStatusMatchReportDto | null = null; + let readingStatusResults: ReadingStatusMatchResultDto[] = []; + if (isReadingStatusMatch) { + [readingStatusReport, readingStatusResults] = await Promise.all([ + getReadingStatusMatchReport(id).catch(() => null), + getReadingStatusMatchResults(id).catch(() => []), + ]); + } + const typeInfo = JOB_TYPE_INFO[job.type] ?? { label: job.type, description: null, @@ -177,6 +193,8 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { ? t("jobDetail.metadataSearch") : isMetadataRefresh ? t("jobDetail.metadataRefresh") + : isReadingStatusMatch + ? t("jobDetail.readingStatusMatch") : isThumbnailOnly ? t("jobType.thumbnail_rebuild") : isExtractingPages @@ -189,6 +207,8 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { ? t("jobDetail.metadataSearchDesc") : isMetadataRefresh ? t("jobDetail.metadataRefreshDesc") + : isReadingStatusMatch + ? t("jobDetail.readingStatusMatchDesc") : isThumbnailOnly ? undefined : isExtractingPages @@ -240,7 +260,12 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { — {refreshReport.refreshed} {t("jobDetail.refreshed").toLowerCase()}, {refreshReport.unchanged} {t("jobDetail.unchanged").toLowerCase()}, {refreshReport.errors} {t("jobDetail.errors").toLowerCase()} )} - {!isMetadataBatch && !isMetadataRefresh && job.stats_json && ( + {isReadingStatusMatch && readingStatusReport && ( + + — {readingStatusReport.linked} {t("jobDetail.linked").toLowerCase()}, {readingStatusReport.no_results} {t("jobDetail.noResults").toLowerCase()}, {readingStatusReport.ambiguous} {t("jobDetail.ambiguous").toLowerCase()}, {readingStatusReport.errors} {t("jobDetail.errors").toLowerCase()} + + )} + {!isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && job.stats_json && ( — {job.stats_json.scanned_files} {t("jobDetail.scanned").toLowerCase()}, {job.stats_json.indexed_files} {t("jobDetail.indexed").toLowerCase()} {job.stats_json.removed_files > 0 && `, ${job.stats_json.removed_files} ${t("jobDetail.removed").toLowerCase()}`} @@ -249,7 +274,7 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { {job.total_files != null && job.total_files > 0 && `, ${job.total_files} ${t("jobType.thumbnail_rebuild").toLowerCase()}`} )} - {!isMetadataBatch && !isMetadataRefresh && !job.stats_json && isThumbnailOnly && job.total_files != null && ( + {!isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && !job.stats_json && isThumbnailOnly && job.total_files != null && ( — {job.processed_files ?? job.total_files} {t("jobDetail.generated").toLowerCase()} @@ -514,7 +539,7 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { )} {/* Index Statistics — index jobs only */} - {job.stats_json && !isThumbnailOnly && !isMetadataBatch && !isMetadataRefresh && ( + {job.stats_json && !isThumbnailOnly && !isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && ( {t("jobDetail.indexStats")} @@ -713,6 +738,95 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { )} + {/* Reading status match — summary report */} + {isReadingStatusMatch && readingStatusReport && ( + + + {t("jobDetail.readingStatusMatchReport")} + {t("jobDetail.seriesAnalyzed", { count: String(readingStatusReport.total_series) })} + + +
+ + + + + 0 ? "error" : "default"} /> +
+
+
+ )} + + {/* Reading status match — per-series detail */} + {isReadingStatusMatch && readingStatusResults.length > 0 && ( + + + {t("jobDetail.resultsBySeries")} + {t("jobDetail.seriesProcessed", { count: String(readingStatusResults.length) })} + + + {readingStatusResults.map((r) => ( +
+
+ {job.library_id ? ( + + {r.series_name} + + ) : ( + {r.series_name} + )} + + {r.status === "linked" ? t("jobDetail.linked") : + r.status === "already_linked" ? t("jobDetail.alreadyLinked") : + r.status === "no_results" ? t("jobDetail.noResults") : + r.status === "ambiguous" ? t("jobDetail.ambiguous") : + r.status === "error" ? t("common.error") : + r.status} + +
+ {r.status === "linked" && r.anilist_title && ( +
+ + + + {r.anilist_url ? ( + + {r.anilist_title} + + ) : ( + {r.anilist_title} + )} + {r.anilist_id && #{r.anilist_id}} +
+ )} + {r.error_message && ( +

{r.error_message}

+ )} +
+ ))} +
+
+ )} + {/* Metadata batch results */} {isMetadataBatch && batchResults.length > 0 && ( diff --git a/apps/backoffice/app/(app)/jobs/page.tsx b/apps/backoffice/app/(app)/jobs/page.tsx index 0256abf..b6a072c 100644 --- a/apps/backoffice/app/(app)/jobs/page.tsx +++ b/apps/backoffice/app/(app)/jobs/page.tsx @@ -1,6 +1,6 @@ import { revalidatePath } from "next/cache"; import { redirect } from "next/navigation"; -import { listJobs, fetchLibraries, rebuildIndex, rebuildThumbnails, regenerateThumbnails, startMetadataBatch, startMetadataRefresh, IndexJobDto, LibraryDto } from "@/lib/api"; +import { listJobs, fetchLibraries, rebuildIndex, rebuildThumbnails, regenerateThumbnails, startMetadataBatch, startMetadataRefresh, startReadingStatusMatch, IndexJobDto, LibraryDto } from "@/lib/api"; import { JobsList } from "@/app/components/JobsList"; import { Card, CardHeader, CardTitle, CardDescription, CardContent, FormField, FormSelect } from "@/app/components/ui"; import { getServerTranslations } from "@/lib/i18n/server"; @@ -16,6 +16,7 @@ export default async function JobsPage({ searchParams }: { searchParams: Promise ]); const libraryMap = new Map(libraries.map(l => [l.id, l.name])); + const readingStatusLibraries = libraries.filter(l => l.reading_status_provider); async function triggerRebuild(formData: FormData) { "use server"; @@ -118,6 +119,36 @@ export default async function JobsPage({ searchParams }: { searchParams: Promise } } + async function triggerReadingStatusMatch(formData: FormData) { + "use server"; + const libraryId = formData.get("library_id") as string; + if (libraryId) { + let result; + try { + result = await startReadingStatusMatch(libraryId); + } catch { + return; + } + revalidatePath("/jobs"); + redirect(`/jobs?highlight=${result.id}`); + } else { + // All libraries — only those with reading_status_provider configured + const allLibraries = await fetchLibraries().catch(() => [] as LibraryDto[]); + let lastId: string | undefined; + for (const lib of allLibraries) { + if (!lib.reading_status_provider) continue; + try { + const result = await startReadingStatusMatch(lib.id); + if (result.status !== "already_running") lastId = result.id; + } catch { + // Skip libraries with errors + } + } + revalidatePath("/jobs"); + redirect(lastId ? `/jobs?highlight=${lastId}` : "/jobs"); + } + } + return ( <>
@@ -254,6 +285,30 @@ export default async function JobsPage({ searchParams }: { searchParams: Promise
+ {/* Reading status group — only shown if at least one library has a provider configured */} + {readingStatusLibraries.length > 0 && ( +
+
+ + + + {t("jobs.groupReadingStatus")} +
+
+ +
+
+ )} + diff --git a/apps/backoffice/app/api/jobs/[id]/replay/route.ts b/apps/backoffice/app/api/jobs/[id]/replay/route.ts index 3922c77..ae61478 100644 --- a/apps/backoffice/app/api/jobs/[id]/replay/route.ts +++ b/apps/backoffice/app/api/jobs/[id]/replay/route.ts @@ -1,5 +1,5 @@ import { NextRequest, NextResponse } from "next/server"; -import { apiFetch, IndexJobDto, rebuildIndex, rebuildThumbnails, regenerateThumbnails, startMetadataBatch, startMetadataRefresh } from "@/lib/api"; +import { apiFetch, IndexJobDto, rebuildIndex, rebuildThumbnails, regenerateThumbnails, startMetadataBatch, startMetadataRefresh, startReadingStatusMatch } from "@/lib/api"; export async function POST( _request: NextRequest, @@ -29,6 +29,9 @@ export async function POST( case "metadata_refresh": if (!libraryId) return NextResponse.json({ error: "Library ID required for metadata refresh" }, { status: 400 }); return NextResponse.json(await startMetadataRefresh(libraryId)); + case "reading_status_match": + if (!libraryId) return NextResponse.json({ error: "Library ID required for reading status match" }, { status: 400 }); + return NextResponse.json(await startReadingStatusMatch(libraryId)); default: return NextResponse.json({ error: `Cannot replay job type: ${job.type}` }, { status: 400 }); } diff --git a/apps/backoffice/app/components/JobRow.tsx b/apps/backoffice/app/components/JobRow.tsx index fdf3edd..fed94f4 100644 --- a/apps/backoffice/app/components/JobRow.tsx +++ b/apps/backoffice/app/components/JobRow.tsx @@ -35,7 +35,7 @@ interface JobRowProps { formatDuration: (start: string, end: string | null) => string; } -const REPLAYABLE_TYPES = new Set(["rebuild", "full_rebuild", "rescan", "scan", "thumbnail_rebuild", "thumbnail_regenerate", "metadata_batch", "metadata_refresh"]); +const REPLAYABLE_TYPES = new Set(["rebuild", "full_rebuild", "rescan", "scan", "thumbnail_rebuild", "thumbnail_regenerate", "metadata_batch", "metadata_refresh", "reading_status_match"]); export function JobRow({ job, libraryName, highlighted, onCancel, onReplay, formatDate, formatDuration }: JobRowProps) { const { t } = useTranslation(); diff --git a/apps/backoffice/app/components/ui/Badge.tsx b/apps/backoffice/app/components/ui/Badge.tsx index 0153204..0d9d31a 100644 --- a/apps/backoffice/app/components/ui/Badge.tsx +++ b/apps/backoffice/app/components/ui/Badge.tsx @@ -117,6 +117,7 @@ export function JobTypeBadge({ type, className = "" }: JobTypeBadgeProps) { cbr_to_cbz: t("jobType.cbr_to_cbz"), metadata_batch: t("jobType.metadata_batch"), metadata_refresh: t("jobType.metadata_refresh"), + reading_status_match: t("jobType.reading_status_match"), }; const label = jobTypeLabels[key] ?? type; return {label}; diff --git a/apps/backoffice/lib/api.ts b/apps/backoffice/lib/api.ts index 18eaeb8..1ab7e56 100644 --- a/apps/backoffice/lib/api.ts +++ b/apps/backoffice/lib/api.ts @@ -1066,6 +1066,42 @@ export async function startMetadataRefresh(libraryId: string) { }); } +export async function startReadingStatusMatch(libraryId: string) { + return apiFetch<{ id: string; status: string }>("/reading-status/match", { + method: "POST", + body: JSON.stringify({ library_id: libraryId }), + }); +} + +export type ReadingStatusMatchReportDto = { + job_id: string; + status: string; + total_series: number; + linked: number; + already_linked: number; + no_results: number; + ambiguous: number; + errors: number; +}; + +export type ReadingStatusMatchResultDto = { + id: string; + series_name: string; + status: "linked" | "already_linked" | "no_results" | "ambiguous" | "error"; + anilist_id: number | null; + anilist_title: string | null; + anilist_url: string | null; + error_message: string | null; +}; + +export async function getReadingStatusMatchReport(jobId: string) { + return apiFetch(`/reading-status/match/${jobId}/report`); +} + +export async function getReadingStatusMatchResults(jobId: string) { + return apiFetch(`/reading-status/match/${jobId}/results`); +} + export type RefreshFieldDiff = { field: string; old?: unknown; diff --git a/apps/backoffice/lib/i18n/en.ts b/apps/backoffice/lib/i18n/en.ts index d6ba002..6c9b78f 100644 --- a/apps/backoffice/lib/i18n/en.ts +++ b/apps/backoffice/lib/i18n/en.ts @@ -259,6 +259,9 @@ const en: Record = { "jobs.generateThumbnailsDescription": "Generates thumbnails only for books that don't have one yet. Existing thumbnails are not affected. Useful after an import or if some thumbnails are missing.", "jobs.regenerateThumbnailsDescription": "Regenerates all thumbnails from scratch, replacing existing ones. Useful if thumbnail quality or size has changed in the configuration, or if thumbnails are corrupted.", "jobs.batchMetadataDescription": "Automatically searches metadata for each series in the library from the configured provider (with fallback if configured). Only results with a unique 100% confidence match are applied automatically. Already linked series are skipped. A detailed per-series report is available at the end of the job. Requires a specific library (does not work on \"All libraries\").", + "jobs.groupReadingStatus": "Reading status", + "jobs.matchReadingStatus": "Match series", + "jobs.matchReadingStatusShort": "Auto-link unmatched series to the reading status provider", // Jobs list "jobsList.id": "ID", @@ -360,6 +363,11 @@ const en: Record = { "jobDetail.match": "Match: {{title}}", "jobDetail.fileErrors": "File errors ({{count}})", "jobDetail.fileErrorsDesc": "Errors encountered while processing files", + "jobDetail.readingStatusMatch": "Series matching", + "jobDetail.readingStatusMatchDesc": "Searching each series against the reading status provider", + "jobDetail.readingStatusMatchReport": "Match report", + "jobDetail.linked": "Linked", + "jobDetail.ambiguous": "Ambiguous", // Job types "jobType.rebuild": "Indexing", @@ -386,6 +394,9 @@ const en: Record = { "jobType.metadata_batchDesc": "Searches external metadata providers for all series in the library and automatically applies 100% confidence matches.", "jobType.metadata_refreshLabel": "Metadata refresh", "jobType.metadata_refreshDesc": "Re-downloads and updates metadata for all series already linked to an external provider.", + "jobType.reading_status_match": "Reading status match", + "jobType.reading_status_matchLabel": "Series matching (reading status)", + "jobType.reading_status_matchDesc": "Automatically searches each series in the library against the configured reading status provider (e.g. AniList) and creates links for unambiguously identified series.", // Status badges "statusBadge.extracting_pages": "Extracting pages", diff --git a/apps/backoffice/lib/i18n/fr.ts b/apps/backoffice/lib/i18n/fr.ts index fc1e63f..38f64aa 100644 --- a/apps/backoffice/lib/i18n/fr.ts +++ b/apps/backoffice/lib/i18n/fr.ts @@ -257,6 +257,9 @@ const fr = { "jobs.generateThumbnailsDescription": "Génère les miniatures uniquement pour les livres qui n'en ont pas encore. Les miniatures existantes ne sont pas touchées. Utile après un import ou si certaines miniatures sont manquantes.", "jobs.regenerateThumbnailsDescription": "Regénère toutes les miniatures depuis zéro, en remplaçant les existantes. Utile si la qualité ou la taille des miniatures a changé dans la configuration, ou si des miniatures sont corrompues.", "jobs.batchMetadataDescription": "Recherche automatiquement les métadonnées de chaque série de la bibliothèque auprès du provider configuré (avec fallback si configuré). Seuls les résultats avec un match unique à 100% de confiance sont appliqués automatiquement. Les séries déjà liées sont ignorées. Un rapport détaillé par série est disponible à la fin du job. Requiert une bibliothèque spécifique (ne fonctionne pas sur \u00ab Toutes les bibliothèques \u00bb).", + "jobs.groupReadingStatus": "Statut de lecture", + "jobs.matchReadingStatus": "Correspondance des séries", + "jobs.matchReadingStatusShort": "Lier automatiquement les séries non associées au provider", // Jobs list "jobsList.id": "ID", @@ -358,6 +361,11 @@ const fr = { "jobDetail.match": "Correspondance : {{title}}", "jobDetail.fileErrors": "Erreurs de fichiers ({{count}})", "jobDetail.fileErrorsDesc": "Erreurs rencontrées lors du traitement des fichiers", + "jobDetail.readingStatusMatch": "Correspondance des séries", + "jobDetail.readingStatusMatchDesc": "Recherche de chaque série sur le provider de statut de lecture", + "jobDetail.readingStatusMatchReport": "Rapport de correspondance", + "jobDetail.linked": "Liées", + "jobDetail.ambiguous": "Ambiguës", // Job types "jobType.rebuild": "Indexation", @@ -384,6 +392,9 @@ const fr = { "jobType.metadata_batchDesc": "Recherche les métadonnées auprès des fournisseurs externes pour toutes les séries de la bibliothèque et applique automatiquement les correspondances à 100% de confiance.", "jobType.metadata_refreshLabel": "Rafraîchissement métadonnées", "jobType.metadata_refreshDesc": "Re-télécharge et met à jour les métadonnées pour toutes les séries déjà liées à un fournisseur externe.", + "jobType.reading_status_match": "Correspondance statut lecture", + "jobType.reading_status_matchLabel": "Correspondance des séries (statut lecture)", + "jobType.reading_status_matchDesc": "Recherche automatiquement chaque série de la bibliothèque sur le provider de statut de lecture configuré (ex. AniList) et crée les liens pour les séries identifiées sans ambiguïté.", // Status badges "statusBadge.extracting_pages": "Extraction des pages", diff --git a/apps/backoffice/package.json b/apps/backoffice/package.json index 2bd4241..1c7bfd8 100644 --- a/apps/backoffice/package.json +++ b/apps/backoffice/package.json @@ -1,6 +1,6 @@ { "name": "stripstream-backoffice", - "version": "2.2.0", + "version": "2.3.0", "private": true, "scripts": { "dev": "next dev -p 7082", diff --git a/crates/notifications/src/lib.rs b/crates/notifications/src/lib.rs index e415e6c..b8b22da 100644 --- a/crates/notifications/src/lib.rs +++ b/crates/notifications/src/lib.rs @@ -43,6 +43,10 @@ pub struct EventToggles { pub metadata_refresh_completed: bool, #[serde(default = "default_true")] pub metadata_refresh_failed: bool, + #[serde(default = "default_true")] + pub reading_status_match_completed: bool, + #[serde(default = "default_true")] + pub reading_status_match_failed: bool, } fn default_true() -> bool { @@ -63,6 +67,8 @@ fn default_events() -> EventToggles { metadata_batch_failed: true, metadata_refresh_completed: true, metadata_refresh_failed: true, + reading_status_match_completed: true, + reading_status_match_failed: true, } } @@ -249,6 +255,16 @@ pub enum NotificationEvent { library_name: Option, error: String, }, + // Reading status match (auto-link series to provider) + ReadingStatusMatchCompleted { + library_name: Option, + total_series: i32, + linked: i32, + }, + ReadingStatusMatchFailed { + library_name: Option, + error: String, + }, } /// Classify an indexer job_type string into the right event constructor category. @@ -464,6 +480,37 @@ fn format_event(event: &NotificationEvent) -> String { ] .join("\n") } + NotificationEvent::ReadingStatusMatchCompleted { + library_name, + total_series, + linked, + } => { + let lib = library_name.as_deref().unwrap_or("All libraries"); + [ + format!("✅ Reading status match completed"), + format!("━━━━━━━━━━━━━━━━━━━━"), + format!("📂 Library: {lib}"), + String::new(), + format!("📊 Results"), + format!(" 🔗 Linked: {linked} / {total_series} series"), + ] + .join("\n") + } + NotificationEvent::ReadingStatusMatchFailed { + library_name, + error, + } => { + let lib = library_name.as_deref().unwrap_or("All libraries"); + let err = truncate(error, 200); + [ + format!("🚨 Reading status match failed"), + format!("━━━━━━━━━━━━━━━━━━━━"), + format!("📂 Library: {lib}"), + String::new(), + format!("💬 {err}"), + ] + .join("\n") + } } } @@ -504,6 +551,8 @@ fn is_event_enabled(config: &TelegramConfig, event: &NotificationEvent) -> bool NotificationEvent::MetadataBatchFailed { .. } => config.events.metadata_batch_failed, NotificationEvent::MetadataRefreshCompleted { .. } => config.events.metadata_refresh_completed, NotificationEvent::MetadataRefreshFailed { .. } => config.events.metadata_refresh_failed, + NotificationEvent::ReadingStatusMatchCompleted { .. } => config.events.reading_status_match_completed, + NotificationEvent::ReadingStatusMatchFailed { .. } => config.events.reading_status_match_failed, } } diff --git a/infra/migrations/0055_add_reading_status_match_job_type.sql b/infra/migrations/0055_add_reading_status_match_job_type.sql new file mode 100644 index 0000000..1a7ce85 --- /dev/null +++ b/infra/migrations/0055_add_reading_status_match_job_type.sql @@ -0,0 +1,6 @@ +-- Add reading_status_match job type: auto-matches library series against the +-- configured reading status provider (e.g. AniList) and creates links. +ALTER TABLE index_jobs + DROP CONSTRAINT IF EXISTS index_jobs_type_check, + ADD CONSTRAINT index_jobs_type_check + CHECK (type IN ('scan', 'rebuild', 'full_rebuild', 'rescan', 'thumbnail_rebuild', 'thumbnail_regenerate', 'cbr_to_cbz', 'metadata_batch', 'metadata_refresh', 'reading_status_match')); diff --git a/infra/migrations/0056_add_reading_status_match_results.sql b/infra/migrations/0056_add_reading_status_match_results.sql new file mode 100644 index 0000000..c86e267 --- /dev/null +++ b/infra/migrations/0056_add_reading_status_match_results.sql @@ -0,0 +1,16 @@ +-- Table to store per-series results for reading_status_match jobs +CREATE TABLE reading_status_match_results ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + job_id UUID NOT NULL REFERENCES index_jobs(id) ON DELETE CASCADE, + library_id UUID NOT NULL REFERENCES libraries(id) ON DELETE CASCADE, + series_name TEXT NOT NULL, + status TEXT NOT NULL, -- 'linked', 'already_linked', 'no_results', 'ambiguous', 'error' + anilist_id INTEGER, + anilist_title TEXT, + anilist_url TEXT, + error_message TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_rsmr_job_id ON reading_status_match_results(job_id); +CREATE INDEX idx_rsmr_status ON reading_status_match_results(status);