diff --git a/README.md b/README.md index e5fb522..e1ec20c 100644 --- a/README.md +++ b/README.md @@ -112,19 +112,20 @@ The backoffice will be available at http://localhost:7082 ### Notifications - **Telegram**: real-time notifications via Telegram Bot API -- 12 granular event toggles (scans, thumbnails, conversions, metadata) +- 16 granular event toggles (scans, thumbnails, conversions, metadata, reading status, download detection) - Book thumbnail images included in notifications where applicable - Test connection from settings ### External Integrations +- **AniList**: bidirectional reading status sync — pull progress from AniList or push local statuses (PLANNING/CURRENT/COMPLETED) with differential detection and configurable auto-push schedule - **Komga**: import reading progress -- **Prowlarr**: search for missing volumes +- **Prowlarr**: search for missing volumes manually from series pages, or run a **download detection job** to automatically scan all series with missing volumes and report available releases - **qBittorrent**: add torrents directly from search results ### Background Jobs -- Rebuild, rescan, thumbnail generation, metadata batch, CBR conversion +- Rebuild, rescan, thumbnail generation, metadata batch, CBR conversion, AniList reading status sync/push, download detection (Prowlarr) - Real-time progress via Server-Sent Events (SSE) -- Job history, error tracking, cancellation +- Job history, error tracking, cancellation, replay ### Page Rendering - On-demand page extraction from all formats diff --git a/apps/api/src/download_detection.rs b/apps/api/src/download_detection.rs new file mode 100644 index 0000000..bafa7cb --- /dev/null +++ b/apps/api/src/download_detection.rs @@ -0,0 +1,611 @@ +use axum::{extract::State, Json}; +use serde::{Deserialize, Serialize}; +use sqlx::{PgPool, Row}; +use tracing::{info, warn}; +use utoipa::ToSchema; +use uuid::Uuid; + +use crate::{error::ApiError, prowlarr, state::AppState}; + +// --------------------------------------------------------------------------- +// DTOs +// --------------------------------------------------------------------------- + +#[derive(Deserialize, ToSchema)] +pub struct StartDownloadDetectionRequest { + pub library_id: String, +} + +#[derive(Serialize, ToSchema)] +pub struct DownloadDetectionReportDto { + #[schema(value_type = String)] + pub job_id: Uuid, + pub status: String, + pub total_series: i64, + pub found: i64, + pub not_found: i64, + pub no_missing: i64, + pub no_metadata: i64, + pub errors: i64, +} + +#[derive(Serialize, ToSchema)] +pub struct DownloadDetectionResultDto { + #[schema(value_type = String)] + pub id: Uuid, + pub series_name: String, + /// 'found' | 'not_found' | 'no_missing' | 'no_metadata' | 'error' + pub status: String, + pub missing_count: i32, + pub available_releases: Option>, + pub error_message: Option, +} + +#[derive(Serialize, Deserialize, ToSchema)] +pub struct AvailableReleaseDto { + pub title: String, + pub size: i64, + pub download_url: Option, + pub indexer: Option, + pub seeders: Option, + pub matched_missing_volumes: Vec, +} + +// --------------------------------------------------------------------------- +// POST /download-detection/start +// --------------------------------------------------------------------------- + +#[utoipa::path( + post, + path = "/download-detection/start", + tag = "download_detection", + request_body = StartDownloadDetectionRequest, + responses( + (status = 200, description = "Job created"), + (status = 400, description = "Bad request"), + ), + security(("Bearer" = [])) +)] +pub async fn start_detection( + State(state): State, + Json(body): Json, +) -> Result, ApiError> { + let library_id: Uuid = body + .library_id + .parse() + .map_err(|_| ApiError::bad_request("invalid library_id"))?; + + // Verify library exists + sqlx::query("SELECT id FROM libraries WHERE id = $1") + .bind(library_id) + .fetch_optional(&state.pool) + .await? + .ok_or_else(|| ApiError::not_found("library not found"))?; + + // Verify Prowlarr is configured + prowlarr::check_prowlarr_configured(&state.pool).await?; + + // Check no existing running job for this library + let existing: Option = sqlx::query_scalar( + "SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'download_detection' AND status IN ('pending', 'running') LIMIT 1", + ) + .bind(library_id) + .fetch_optional(&state.pool) + .await?; + + if let Some(existing_id) = existing { + return Ok(Json(serde_json::json!({ + "id": existing_id.to_string(), + "status": "already_running", + }))); + } + + let job_id = Uuid::new_v4(); + sqlx::query( + "INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'download_detection', 'running', NOW())", + ) + .bind(job_id) + .bind(library_id) + .execute(&state.pool) + .await?; + + let pool = state.pool.clone(); + let library_name: Option = + sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1") + .bind(library_id) + .fetch_optional(&state.pool) + .await + .ok() + .flatten(); + + tokio::spawn(async move { + match process_download_detection(&pool, job_id, library_id).await { + Ok((total_series, found)) => { + notifications::notify( + pool, + notifications::NotificationEvent::DownloadDetectionCompleted { + library_name, + total_series, + found, + }, + ); + } + Err(e) => { + warn!("[DOWNLOAD_DETECTION] job {job_id} failed: {e}"); + let _ = sqlx::query( + "UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW() WHERE id = $1", + ) + .bind(job_id) + .bind(e.to_string()) + .execute(&pool) + .await; + notifications::notify( + pool, + notifications::NotificationEvent::DownloadDetectionFailed { + library_name, + error: e.to_string(), + }, + ); + } + } + }); + + Ok(Json(serde_json::json!({ + "id": job_id.to_string(), + "status": "running", + }))) +} + +// --------------------------------------------------------------------------- +// GET /download-detection/:id/report +// --------------------------------------------------------------------------- + +#[utoipa::path( + get, + path = "/download-detection/{id}/report", + tag = "download_detection", + params(("id" = String, Path, description = "Job UUID")), + responses( + (status = 200, body = DownloadDetectionReportDto), + (status = 404, description = "Job not found"), + ), + security(("Bearer" = [])) +)] +pub async fn get_detection_report( + State(state): State, + axum::extract::Path(job_id): axum::extract::Path, +) -> Result, ApiError> { + let row = sqlx::query( + "SELECT status, total_files FROM index_jobs WHERE id = $1 AND type = 'download_detection'", + ) + .bind(job_id) + .fetch_optional(&state.pool) + .await? + .ok_or_else(|| ApiError::not_found("job not found"))?; + + let job_status: String = row.get("status"); + let total_files: Option = row.get("total_files"); + + let counts = sqlx::query( + "SELECT status, COUNT(*) as cnt FROM download_detection_results WHERE job_id = $1 GROUP BY status", + ) + .bind(job_id) + .fetch_all(&state.pool) + .await?; + + let mut found = 0i64; + let mut not_found = 0i64; + let mut no_missing = 0i64; + let mut no_metadata = 0i64; + let mut errors = 0i64; + + for r in &counts { + let status: String = r.get("status"); + let cnt: i64 = r.get("cnt"); + match status.as_str() { + "found" => found = cnt, + "not_found" => not_found = cnt, + "no_missing" => no_missing = cnt, + "no_metadata" => no_metadata = cnt, + "error" => errors = cnt, + _ => {} + } + } + + Ok(Json(DownloadDetectionReportDto { + job_id, + status: job_status, + total_series: total_files.unwrap_or(0) as i64, + found, + not_found, + no_missing, + no_metadata, + errors, + })) +} + +// --------------------------------------------------------------------------- +// GET /download-detection/:id/results +// --------------------------------------------------------------------------- + +#[derive(Deserialize)] +pub struct ResultsQuery { + pub status: Option, +} + +#[utoipa::path( + get, + path = "/download-detection/{id}/results", + tag = "download_detection", + params( + ("id" = String, Path, description = "Job UUID"), + ("status" = Option, Query, description = "Filter by status"), + ), + responses( + (status = 200, body = Vec), + ), + security(("Bearer" = [])) +)] +pub async fn get_detection_results( + State(state): State, + axum::extract::Path(job_id): axum::extract::Path, + axum::extract::Query(query): axum::extract::Query, +) -> Result>, ApiError> { + let rows = if let Some(status_filter) = &query.status { + sqlx::query( + "SELECT id, series_name, status, missing_count, available_releases, error_message + FROM download_detection_results + WHERE job_id = $1 AND status = $2 + ORDER BY series_name", + ) + .bind(job_id) + .bind(status_filter) + .fetch_all(&state.pool) + .await? + } else { + sqlx::query( + "SELECT id, series_name, status, missing_count, available_releases, error_message + FROM download_detection_results + WHERE job_id = $1 + ORDER BY status, series_name", + ) + .bind(job_id) + .fetch_all(&state.pool) + .await? + }; + + let results = rows + .iter() + .map(|row| { + let releases_json: Option = row.get("available_releases"); + let available_releases = releases_json.and_then(|v| { + serde_json::from_value::>(v).ok() + }); + DownloadDetectionResultDto { + id: row.get("id"), + series_name: row.get("series_name"), + status: row.get("status"), + missing_count: row.get("missing_count"), + available_releases, + error_message: row.get("error_message"), + } + }) + .collect(); + + Ok(Json(results)) +} + +// --------------------------------------------------------------------------- +// Background processing +// --------------------------------------------------------------------------- + +pub(crate) async fn process_download_detection( + pool: &PgPool, + job_id: Uuid, + library_id: Uuid, +) -> Result<(i32, i64), String> { + let (prowlarr_url, prowlarr_api_key, categories) = + prowlarr::load_prowlarr_config_internal(pool) + .await + .map_err(|e| e.message)?; + + // Fetch all series with their metadata link status + let all_series: Vec = sqlx::query_scalar( + r#" + SELECT DISTINCT COALESCE(NULLIF(series, ''), 'unclassified') + FROM books + WHERE library_id = $1 + ORDER BY 1 + "#, + ) + .bind(library_id) + .fetch_all(pool) + .await + .map_err(|e| e.to_string())?; + + let total = all_series.len() as i32; + sqlx::query("UPDATE index_jobs SET total_files = $2 WHERE id = $1") + .bind(job_id) + .bind(total) + .execute(pool) + .await + .map_err(|e| e.to_string())?; + + // Fetch approved metadata links for this library (series_name -> link_id) + let links: Vec<(String, Uuid)> = sqlx::query( + "SELECT series_name, id FROM external_metadata_links WHERE library_id = $1 AND status = 'approved'", + ) + .bind(library_id) + .fetch_all(pool) + .await + .map_err(|e| e.to_string())? + .into_iter() + .map(|row| { + let series_name: String = row.get("series_name"); + let link_id: Uuid = row.get("id"); + (series_name, link_id) + }) + .collect(); + + let link_map: std::collections::HashMap = links.into_iter().collect(); + + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .map_err(|e| format!("failed to build HTTP client: {e}"))?; + + let mut processed = 0i32; + + for series_name in &all_series { + if is_job_cancelled(pool, job_id).await { + sqlx::query( + "UPDATE index_jobs SET status = 'cancelled', finished_at = NOW() WHERE id = $1", + ) + .bind(job_id) + .execute(pool) + .await + .map_err(|e| e.to_string())?; + return Ok((total, 0)); + } + + processed += 1; + let progress = (processed * 100 / total.max(1)).min(100); + sqlx::query( + "UPDATE index_jobs SET processed_files = $2, progress_percent = $3, current_file = $4 WHERE id = $1", + ) + .bind(job_id) + .bind(processed) + .bind(progress) + .bind(series_name) + .execute(pool) + .await + .ok(); + + // Skip unclassified + if series_name == "unclassified" { + insert_result(pool, job_id, library_id, series_name, "no_metadata", 0, None, None).await; + continue; + } + + // Check if this series has an approved metadata link + let link_id = match link_map.get(series_name) { + Some(id) => *id, + None => { + insert_result(pool, job_id, library_id, series_name, "no_metadata", 0, None, None).await; + continue; + } + }; + + // Fetch missing books for this series + let missing_rows = sqlx::query( + "SELECT volume_number FROM external_book_metadata WHERE link_id = $1 AND book_id IS NULL ORDER BY volume_number NULLS LAST", + ) + .bind(link_id) + .fetch_all(pool) + .await + .map_err(|e| e.to_string())?; + + if missing_rows.is_empty() { + insert_result(pool, job_id, library_id, series_name, "no_missing", 0, None, None).await; + continue; + } + + let missing_volumes: Vec = missing_rows + .iter() + .filter_map(|row| row.get::, _>("volume_number")) + .collect(); + let missing_count = missing_rows.len() as i32; + + // Search Prowlarr + match search_prowlarr_for_series( + &client, + &prowlarr_url, + &prowlarr_api_key, + &categories, + series_name, + &missing_volumes, + ) + .await + { + Ok(matched_releases) if !matched_releases.is_empty() => { + let releases_json = serde_json::to_value(&matched_releases).ok(); + insert_result( + pool, + job_id, + library_id, + series_name, + "found", + missing_count, + releases_json, + None, + ) + .await; + } + Ok(_) => { + insert_result(pool, job_id, library_id, series_name, "not_found", missing_count, None, None).await; + } + Err(e) => { + warn!("[DOWNLOAD_DETECTION] series '{series_name}': {e}"); + insert_result(pool, job_id, library_id, series_name, "error", missing_count, None, Some(&e)).await; + } + } + } + + // Build final stats + let counts = sqlx::query( + "SELECT status, COUNT(*) as cnt FROM download_detection_results WHERE job_id = $1 GROUP BY status", + ) + .bind(job_id) + .fetch_all(pool) + .await + .map_err(|e| e.to_string())?; + + let mut count_found = 0i64; + let mut count_not_found = 0i64; + let mut count_no_missing = 0i64; + let mut count_no_metadata = 0i64; + let mut count_errors = 0i64; + for row in &counts { + let s: String = row.get("status"); + let c: i64 = row.get("cnt"); + match s.as_str() { + "found" => count_found = c, + "not_found" => count_not_found = c, + "no_missing" => count_no_missing = c, + "no_metadata" => count_no_metadata = c, + "error" => count_errors = c, + _ => {} + } + } + + let stats = serde_json::json!({ + "total_series": total as i64, + "found": count_found, + "not_found": count_not_found, + "no_missing": count_no_missing, + "no_metadata": count_no_metadata, + "errors": count_errors, + }); + + sqlx::query( + "UPDATE index_jobs SET status = 'success', finished_at = NOW(), stats_json = $2, progress_percent = 100 WHERE id = $1", + ) + .bind(job_id) + .bind(&stats) + .execute(pool) + .await + .map_err(|e| e.to_string())?; + + info!( + "[DOWNLOAD_DETECTION] job={job_id} completed: {total} series, found={count_found}, not_found={count_not_found}, no_missing={count_no_missing}, no_metadata={count_no_metadata}, errors={count_errors}" + ); + + Ok((total, count_found)) +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async fn search_prowlarr_for_series( + client: &reqwest::Client, + url: &str, + api_key: &str, + categories: &[i32], + series_name: &str, + missing_volumes: &[i32], +) -> Result, String> { + let query = format!("\"{}\"", series_name); + + let mut params: Vec<(&str, String)> = vec![ + ("query", query), + ("type", "search".to_string()), + ]; + for cat in categories { + params.push(("categories", cat.to_string())); + } + + let resp = client + .get(format!("{url}/api/v1/search")) + .query(¶ms) + .header("X-Api-Key", api_key) + .send() + .await + .map_err(|e| format!("Prowlarr request failed: {e}"))?; + + if !resp.status().is_success() { + let status = resp.status(); + let text = resp.text().await.unwrap_or_default(); + return Err(format!("Prowlarr returned {status}: {text}")); + } + + let raw_releases: Vec = resp + .json() + .await + .map_err(|e| format!("Failed to parse Prowlarr response: {e}"))?; + + let matched: Vec = raw_releases + .into_iter() + .filter_map(|r| { + let title_volumes = prowlarr::extract_volumes_from_title_pub(&r.title); + let matched_vols: Vec = title_volumes + .into_iter() + .filter(|v| missing_volumes.contains(v)) + .collect(); + if matched_vols.is_empty() { + None + } else { + Some(AvailableReleaseDto { + title: r.title, + size: r.size, + download_url: r.download_url, + indexer: r.indexer, + seeders: r.seeders, + matched_missing_volumes: matched_vols, + }) + } + }) + .collect(); + + Ok(matched) +} + +#[allow(clippy::too_many_arguments)] +async fn insert_result( + pool: &PgPool, + job_id: Uuid, + library_id: Uuid, + series_name: &str, + status: &str, + missing_count: i32, + available_releases: Option, + error_message: Option<&str>, +) { + let _ = sqlx::query( + r#" + INSERT INTO download_detection_results + (job_id, library_id, series_name, status, missing_count, available_releases, error_message) + VALUES ($1, $2, $3, $4, $5, $6, $7) + "#, + ) + .bind(job_id) + .bind(library_id) + .bind(series_name) + .bind(status) + .bind(missing_count) + .bind(&available_releases) + .bind(error_message) + .execute(pool) + .await; +} + +async fn is_job_cancelled(pool: &PgPool, job_id: Uuid) -> bool { + sqlx::query_scalar::<_, String>("SELECT status FROM index_jobs WHERE id = $1") + .bind(job_id) + .fetch_optional(pool) + .await + .ok() + .flatten() + .as_deref() + == Some("cancelled") +} diff --git a/apps/api/src/main.rs b/apps/api/src/main.rs index 083b87e..68c8c66 100644 --- a/apps/api/src/main.rs +++ b/apps/api/src/main.rs @@ -2,6 +2,7 @@ mod anilist; mod auth; mod authors; mod books; +mod download_detection; mod error; mod handlers; mod index_jobs; @@ -153,6 +154,9 @@ async fn main() -> anyhow::Result<()> { .route("/reading-status/push", axum::routing::post(reading_status_push::start_push)) .route("/reading-status/push/:id/report", get(reading_status_push::get_push_report)) .route("/reading-status/push/:id/results", get(reading_status_push::get_push_results)) + .route("/download-detection/start", axum::routing::post(download_detection::start_detection)) + .route("/download-detection/:id/report", get(download_detection::get_detection_report)) + .route("/download-detection/:id/results", get(download_detection::get_detection_results)) .merge(settings::settings_routes()) .route_layer(middleware::from_fn_with_state( state.clone(), diff --git a/apps/api/src/prowlarr.rs b/apps/api/src/prowlarr.rs index 814f6d4..d5a8091 100644 --- a/apps/api/src/prowlarr.rs +++ b/apps/api/src/prowlarr.rs @@ -85,6 +85,20 @@ struct ProwlarrConfig { categories: Option>, } +pub(crate) async fn load_prowlarr_config_internal( + pool: &sqlx::PgPool, +) -> Result<(String, String, Vec), ApiError> { + load_prowlarr_config(pool).await +} + +pub(crate) async fn check_prowlarr_configured(pool: &sqlx::PgPool) -> Result<(), ApiError> { + load_prowlarr_config(pool).await.map(|_| ()) +} + +pub(crate) fn extract_volumes_from_title_pub(title: &str) -> Vec { + extract_volumes_from_title(title) +} + async fn load_prowlarr_config( pool: &sqlx::PgPool, ) -> Result<(String, String, Vec), ApiError> { diff --git a/apps/backoffice/app/(app)/jobs/[id]/page.tsx b/apps/backoffice/app/(app)/jobs/[id]/page.tsx index b8c1980..c4eacae 100644 --- a/apps/backoffice/app/(app)/jobs/[id]/page.tsx +++ b/apps/backoffice/app/(app)/jobs/[id]/page.tsx @@ -2,7 +2,7 @@ export const dynamic = "force-dynamic"; import { notFound } from "next/navigation"; import Link from "next/link"; -import { apiFetch, getMetadataBatchReport, getMetadataBatchResults, getMetadataRefreshReport, getReadingStatusMatchReport, getReadingStatusMatchResults, getReadingStatusPushReport, getReadingStatusPushResults, MetadataBatchReportDto, MetadataBatchResultDto, MetadataRefreshReportDto, ReadingStatusMatchReportDto, ReadingStatusMatchResultDto, ReadingStatusPushReportDto, ReadingStatusPushResultDto } from "@/lib/api"; +import { apiFetch, getMetadataBatchReport, getMetadataBatchResults, getMetadataRefreshReport, getReadingStatusMatchReport, getReadingStatusMatchResults, getReadingStatusPushReport, getReadingStatusPushResults, getDownloadDetectionReport, getDownloadDetectionResults, MetadataBatchReportDto, MetadataBatchResultDto, MetadataRefreshReportDto, ReadingStatusMatchReportDto, ReadingStatusMatchResultDto, ReadingStatusPushReportDto, ReadingStatusPushResultDto, DownloadDetectionReportDto, DownloadDetectionResultDto } from "@/lib/api"; import { Card, CardHeader, CardTitle, CardDescription, CardContent, StatusBadge, JobTypeBadge, StatBox, ProgressBar @@ -142,12 +142,18 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { description: t("jobType.reading_status_pushDesc"), isThumbnailOnly: false, }, + download_detection: { + label: t("jobType.download_detectionLabel"), + description: t("jobType.download_detectionDesc"), + isThumbnailOnly: false, + }, }; const isMetadataBatch = job.type === "metadata_batch"; const isMetadataRefresh = job.type === "metadata_refresh"; const isReadingStatusMatch = job.type === "reading_status_match"; const isReadingStatusPush = job.type === "reading_status_push"; + const isDownloadDetection = job.type === "download_detection"; // Fetch batch report & results for metadata_batch jobs let batchReport: MetadataBatchReportDto | null = null; @@ -185,6 +191,16 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { ]); } + // Fetch download detection report & results + let downloadDetectionReport: DownloadDetectionReportDto | null = null; + let downloadDetectionResults: DownloadDetectionResultDto[] = []; + if (isDownloadDetection) { + [downloadDetectionReport, downloadDetectionResults] = await Promise.all([ + getDownloadDetectionReport(id).catch(() => null), + getDownloadDetectionResults(id, "found").catch(() => []), + ]); + } + const typeInfo = JOB_TYPE_INFO[job.type] ?? { label: job.type, description: null, @@ -213,6 +229,8 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { ? t("jobDetail.readingStatusMatch") : isReadingStatusPush ? t("jobDetail.readingStatusPush") + : isDownloadDetection + ? t("jobDetail.downloadDetection") : isThumbnailOnly ? t("jobType.thumbnail_rebuild") : isExtractingPages @@ -229,6 +247,8 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { ? t("jobDetail.readingStatusMatchDesc") : isReadingStatusPush ? t("jobDetail.readingStatusPushDesc") + : isDownloadDetection + ? t("jobDetail.downloadDetectionDesc") : isThumbnailOnly ? undefined : isExtractingPages @@ -290,7 +310,12 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { — {readingStatusPushReport.pushed} {t("jobDetail.pushed").toLowerCase()}, {readingStatusPushReport.no_books} {t("jobDetail.noBooks").toLowerCase()}, {readingStatusPushReport.errors} {t("jobDetail.errors").toLowerCase()} )} - {!isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && !isReadingStatusPush && job.stats_json && ( + {isDownloadDetection && downloadDetectionReport && ( + + — {downloadDetectionReport.found} {t("jobDetail.downloadFound").toLowerCase()}, {downloadDetectionReport.not_found} {t("jobDetail.downloadNotFound").toLowerCase()}, {downloadDetectionReport.errors} {t("jobDetail.errors").toLowerCase()} + + )} + {!isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && !isReadingStatusPush && !isDownloadDetection && job.stats_json && ( — {job.stats_json.scanned_files} {t("jobDetail.scanned").toLowerCase()}, {job.stats_json.indexed_files} {t("jobDetail.indexed").toLowerCase()} {job.stats_json.removed_files > 0 && `, ${job.stats_json.removed_files} ${t("jobDetail.removed").toLowerCase()}`} @@ -564,7 +589,7 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { )} {/* Index Statistics — index jobs only */} - {job.stats_json && !isThumbnailOnly && !isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && !isReadingStatusPush && ( + {job.stats_json && !isThumbnailOnly && !isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && !isReadingStatusPush && !isDownloadDetection && ( {t("jobDetail.indexStats")} @@ -938,6 +963,85 @@ export default async function JobDetailPage({ params }: JobDetailPageProps) { )} + {/* Download detection — summary report */} + {isDownloadDetection && downloadDetectionReport && ( + + + {t("jobDetail.downloadDetectionReport")} + {t("jobDetail.seriesAnalyzed", { count: String(downloadDetectionReport.total_series) })} + + +
+ + + + + 0 ? "error" : "default"} /> +
+
+
+ )} + + {/* Download detection — available releases per series */} + {isDownloadDetection && downloadDetectionResults.length > 0 && ( + + + {t("jobDetail.downloadAvailableReleases")} + {t("jobDetail.downloadAvailableReleasesDesc", { count: String(downloadDetectionResults.length) })} + + + {downloadDetectionResults.map((r) => ( +
+
+ {job.library_id ? ( + + {r.series_name} + + ) : ( + {r.series_name} + )} + + {t("jobDetail.downloadMissingCount", { count: String(r.missing_count) })} + +
+ {r.available_releases && r.available_releases.length > 0 && ( +
+ {r.available_releases.map((release, idx) => ( +
+
+

{release.title}

+
+ {release.indexer && ( + {release.indexer} + )} + {release.seeders != null && ( + {release.seeders} {t("prowlarr.columnSeeders").toLowerCase()} + )} + + {(release.size / 1024 / 1024).toFixed(0)} MB + +
+ {release.matched_missing_volumes.map((vol) => ( + + T.{vol} + + ))} +
+
+
+
+ ))} +
+ )} +
+ ))} +
+
+ )} + {/* Metadata batch results */} {isMetadataBatch && batchResults.length > 0 && ( diff --git a/apps/backoffice/app/(app)/jobs/page.tsx b/apps/backoffice/app/(app)/jobs/page.tsx index 5b6f88f..663e3f6 100644 --- a/apps/backoffice/app/(app)/jobs/page.tsx +++ b/apps/backoffice/app/(app)/jobs/page.tsx @@ -1,6 +1,6 @@ import { revalidatePath } from "next/cache"; import { redirect } from "next/navigation"; -import { listJobs, fetchLibraries, rebuildIndex, rebuildThumbnails, regenerateThumbnails, startMetadataBatch, startMetadataRefresh, startReadingStatusMatch, startReadingStatusPush, IndexJobDto, LibraryDto } from "@/lib/api"; +import { listJobs, fetchLibraries, rebuildIndex, rebuildThumbnails, regenerateThumbnails, startMetadataBatch, startMetadataRefresh, startReadingStatusMatch, startReadingStatusPush, startDownloadDetection, apiFetch, IndexJobDto, LibraryDto } from "@/lib/api"; import { JobsList } from "@/app/components/JobsList"; import { Card, CardHeader, CardTitle, CardDescription, CardContent, FormField, FormSelect } from "@/app/components/ui"; import { getServerTranslations } from "@/lib/i18n/server"; @@ -10,10 +10,12 @@ export const dynamic = "force-dynamic"; export default async function JobsPage({ searchParams }: { searchParams: Promise<{ highlight?: string }> }) { const { highlight } = await searchParams; const { t } = await getServerTranslations(); - const [jobs, libraries] = await Promise.all([ + const [jobs, libraries, prowlarrSettings] = await Promise.all([ listJobs().catch(() => [] as IndexJobDto[]), - fetchLibraries().catch(() => [] as LibraryDto[]) + fetchLibraries().catch(() => [] as LibraryDto[]), + apiFetch<{ url?: string }>("/settings/prowlarr").catch(() => null), ]); + const prowlarrConfigured = !!prowlarrSettings?.url; const libraryMap = new Map(libraries.map(l => [l.id, l.name])); const readingStatusLibraries = libraries.filter(l => l.reading_status_provider); @@ -179,6 +181,35 @@ export default async function JobsPage({ searchParams }: { searchParams: Promise } } + async function triggerDownloadDetection(formData: FormData) { + "use server"; + const libraryId = formData.get("library_id") as string; + if (libraryId) { + let result; + try { + result = await startDownloadDetection(libraryId); + } catch { + return; + } + revalidatePath("/jobs"); + redirect(`/jobs?highlight=${result.id}`); + } else { + // All libraries + const allLibraries = await fetchLibraries().catch(() => [] as LibraryDto[]); + let lastId: string | undefined; + for (const lib of allLibraries) { + try { + const result = await startDownloadDetection(lib.id); + if (result.status !== "already_running") lastId = result.id; + } catch { + // Skip libraries with errors (e.g. Prowlarr not configured) + } + } + revalidatePath("/jobs"); + redirect(lastId ? `/jobs?highlight=${lastId}` : "/jobs"); + } + } + return ( <>
@@ -349,6 +380,28 @@ export default async function JobsPage({ searchParams }: { searchParams: Promise
)} + {/* Download group — only shown if Prowlarr is configured */} + {prowlarrConfigured &&
+
+ + + + {t("jobs.groupProwlarr")} +
+
+ +
+
} + diff --git a/apps/backoffice/app/(app)/settings/components/TelegramCard.tsx b/apps/backoffice/app/(app)/settings/components/TelegramCard.tsx index a01215b..7d73280 100644 --- a/apps/backoffice/app/(app)/settings/components/TelegramCard.tsx +++ b/apps/backoffice/app/(app)/settings/components/TelegramCard.tsx @@ -17,6 +17,12 @@ export const DEFAULT_EVENTS = { metadata_batch_failed: true, metadata_refresh_completed: true, metadata_refresh_failed: true, + reading_status_match_completed: true, + reading_status_match_failed: true, + reading_status_push_completed: true, + reading_status_push_failed: true, + download_detection_completed: true, + download_detection_failed: true, }; export function TelegramCard({ handleUpdateSetting }: { handleUpdateSetting: (key: string, value: unknown) => Promise }) { @@ -191,6 +197,24 @@ export function TelegramCard({ handleUpdateSetting }: { handleUpdateSetting: (ke { key: "metadata_refresh_failed" as const, label: t("settings.eventRefreshFailed") }, ], }, + { + category: t("settings.eventCategoryReadingStatus"), + icon: "books" as const, + items: [ + { key: "reading_status_match_completed" as const, label: t("settings.eventMatchCompleted") }, + { key: "reading_status_match_failed" as const, label: t("settings.eventMatchFailed") }, + { key: "reading_status_push_completed" as const, label: t("settings.eventPushCompleted") }, + { key: "reading_status_push_failed" as const, label: t("settings.eventPushFailed") }, + ], + }, + { + category: t("settings.eventCategoryDownloadDetection"), + icon: "download" as const, + items: [ + { key: "download_detection_completed" as const, label: t("settings.eventCompleted") }, + { key: "download_detection_failed" as const, label: t("settings.eventFailed") }, + ], + }, ]).map(({ category, icon, items }) => (

diff --git a/apps/backoffice/app/components/JobRow.tsx b/apps/backoffice/app/components/JobRow.tsx index 96f18dd..74ecc55 100644 --- a/apps/backoffice/app/components/JobRow.tsx +++ b/apps/backoffice/app/components/JobRow.tsx @@ -24,6 +24,7 @@ interface JobRowProps { refreshed?: number; linked?: number; pushed?: number; + found?: number; } | null; progress_percent: number | null; processed_files: number | null; @@ -69,6 +70,7 @@ export function JobRow({ job, libraryName, highlighted, onCancel, onReplay, form const isMetadataRefresh = job.type === "metadata_refresh"; const isReadingStatusMatch = job.type === "reading_status_match"; const isReadingStatusPush = job.type === "reading_status_push"; + const isDownloadDetection = job.type === "download_detection"; // Thumbnails progress (Phase 2: extracting_pages + generating_thumbnails) const thumbInProgress = hasThumbnailPhase && (job.status === "running" || isPhase2); @@ -210,6 +212,23 @@ export function JobRow({ job, libraryName, highlighted, onCancel, onReplay, form )} + {/* Download detection: total series + found count */} + {isDownloadDetection && job.total_files != null && job.total_files > 0 && ( + + + + {job.total_files} + + + )} + {isDownloadDetection && job.stats_json?.found != null && job.stats_json.found > 0 && ( + + + + {job.stats_json.found} + + + )} {/* Errors */} {errors > 0 && ( @@ -229,7 +248,7 @@ export function JobRow({ job, libraryName, highlighted, onCancel, onReplay, form )} {/* Nothing to show */} - {indexed === 0 && removed === 0 && errors === 0 && scanned === 0 && !hasThumbnailPhase && !isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && !isReadingStatusPush && ( + {indexed === 0 && removed === 0 && errors === 0 && scanned === 0 && !hasThumbnailPhase && !isMetadataBatch && !isMetadataRefresh && !isReadingStatusMatch && !isReadingStatusPush && !isDownloadDetection && ( )}

diff --git a/apps/backoffice/app/components/ui/Icon.tsx b/apps/backoffice/app/components/ui/Icon.tsx index 9413a42..b6a803e 100644 --- a/apps/backoffice/app/components/ui/Icon.tsx +++ b/apps/backoffice/app/components/ui/Icon.tsx @@ -37,7 +37,8 @@ type IconName = | "authors" | "bell" | "link" - | "eye"; + | "eye" + | "download"; type IconSize = "sm" | "md" | "lg" | "xl"; @@ -94,6 +95,7 @@ const icons: Record = { bell: "M15 17h5l-1.405-1.405A2.032 2.032 0 0118 14.158V11a6.002 6.002 0 00-4-5.659V5a2 2 0 10-4 0v.341C7.67 6.165 6 8.388 6 11v3.159c0 .538-.214 1.055-.595 1.436L4 17h5m6 0v1a3 3 0 11-6 0v-1m6 0H9", link: "M13.828 10.172a4 4 0 00-5.656 0l-4 4a4 4 0 105.656 5.656l1.102-1.101m-.758-4.899a4 4 0 005.656 0l4-4a4 4 0 00-5.656-5.656l-1.1 1.1", eye: "M15 12a3 3 0 11-6 0 3 3 0 016 0zm-3-9C7.477 3 3.268 6.11 1.5 12c1.768 5.89 5.977 9 10.5 9s8.732-3.11 10.5-9C20.732 6.11 16.523 3 12 3z", + download: "M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-4l-4 4m0 0l-4-4m4 4V4", }; const colorClasses: Partial> = { diff --git a/apps/backoffice/lib/api.ts b/apps/backoffice/lib/api.ts index c512fe7..638746c 100644 --- a/apps/backoffice/lib/api.ts +++ b/apps/backoffice/lib/api.ts @@ -1141,6 +1141,53 @@ export async function getReadingStatusPushResults(jobId: string) { return apiFetch(`/reading-status/push/${jobId}/results`); } +export async function startDownloadDetection(libraryId: string) { + return apiFetch<{ id: string; status: string }>("/download-detection/start", { + method: "POST", + body: JSON.stringify({ library_id: libraryId }), + }); +} + +export type AvailableReleaseDto = { + title: string; + size: number; + download_url: string | null; + indexer: string | null; + seeders: number | null; + matched_missing_volumes: number[]; +}; + +export type DownloadDetectionReportDto = { + job_id: string; + status: string; + total_series: number; + found: number; + not_found: number; + no_missing: number; + no_metadata: number; + errors: number; +}; + +export type DownloadDetectionResultDto = { + id: string; + series_name: string; + status: "found" | "not_found" | "no_missing" | "no_metadata" | "error"; + missing_count: number; + available_releases: AvailableReleaseDto[] | null; + error_message: string | null; +}; + +export async function getDownloadDetectionReport(jobId: string) { + return apiFetch(`/download-detection/${jobId}/report`); +} + +export async function getDownloadDetectionResults(jobId: string, status?: string) { + const url = status + ? `/download-detection/${jobId}/results?status=${encodeURIComponent(status)}` + : `/download-detection/${jobId}/results`; + return apiFetch(url); +} + export type RefreshFieldDiff = { field: string; old?: unknown; diff --git a/apps/backoffice/lib/i18n/en.ts b/apps/backoffice/lib/i18n/en.ts index d28182a..ac44cbe 100644 --- a/apps/backoffice/lib/i18n/en.ts +++ b/apps/backoffice/lib/i18n/en.ts @@ -266,6 +266,9 @@ const en: Record = { "jobs.matchReadingStatusShort": "Auto-link unmatched series to the reading status provider", "jobs.pushReadingStatus": "Push reading statuses", "jobs.pushReadingStatusShort": "Push changed reading statuses to AniList (differential push)", + "jobs.groupProwlarr": "Download", + "jobs.downloadDetection": "Download detection", + "jobs.downloadDetectionShort": "Search Prowlarr for available releases matching missing volumes", // Jobs list "jobsList.id": "ID", @@ -290,6 +293,7 @@ const en: Record = { "jobRow.seriesTotal": "{{count}} series total", "jobRow.seriesLinked": "{{count}} series linked", "jobRow.seriesPushed": "{{count}} series pushed", + "jobRow.downloadFound": "{{count}} releases found", "jobRow.errors": "{{count}} errors", "jobRow.view": "View", "jobRow.replay": "Replay", @@ -381,6 +385,16 @@ const en: Record = { "jobDetail.pushed": "Pushed", "jobDetail.skipped": "Skipped", "jobDetail.noBooks": "No books", + "jobDetail.downloadDetection": "Download detection", + "jobDetail.downloadDetectionDesc": "Scanning series with missing volumes via Prowlarr", + "jobDetail.downloadDetectionReport": "Detection report", + "jobDetail.downloadFound": "Available", + "jobDetail.downloadNotFound": "Not found", + "jobDetail.downloadNoMissing": "Complete", + "jobDetail.downloadNoMetadata": "No metadata", + "jobDetail.downloadAvailableReleases": "Available releases", + "jobDetail.downloadAvailableReleasesDesc": "{{count}} series with at least one release found", + "jobDetail.downloadMissingCount": "{{count}} missing", // Job types "jobType.rebuild": "Indexing", @@ -413,6 +427,9 @@ const en: Record = { "jobType.reading_status_push": "Reading status push", "jobType.reading_status_pushLabel": "Reading status push", "jobType.reading_status_pushDesc": "Differentially pushes changed reading statuses (or new series) to AniList.", + "jobType.download_detection": "Download detection", + "jobType.download_detectionLabel": "Available downloads detection", + "jobType.download_detectionDesc": "Scans series with missing volumes and queries Prowlarr to find available releases. Downloads nothing — produces a report of opportunities only.", // Status badges "statusBadge.extracting_pages": "Extracting pages", @@ -647,6 +664,12 @@ const en: Record = { "settings.eventBatchFailed": "Batch failed", "settings.eventRefreshCompleted": "Refresh completed", "settings.eventRefreshFailed": "Refresh failed", + "settings.eventCategoryReadingStatus": "Reading status", + "settings.eventMatchCompleted": "Sync completed", + "settings.eventMatchFailed": "Sync failed", + "settings.eventPushCompleted": "Push completed", + "settings.eventPushFailed": "Push failed", + "settings.eventCategoryDownloadDetection": "Download detection", "settings.telegramHelp": "How to get the required information?", "settings.telegramHelpBot": "Open Telegram, search for @BotFather, send /newbot and follow the instructions. Copy the token it gives you.", "settings.telegramHelpChat": "Send a message to your bot, then open https://api.telegram.org/bot<TOKEN>/getUpdates in your browser. The chat id is in message.chat.id.", diff --git a/apps/backoffice/lib/i18n/fr.ts b/apps/backoffice/lib/i18n/fr.ts index 9e07087..9eb4200 100644 --- a/apps/backoffice/lib/i18n/fr.ts +++ b/apps/backoffice/lib/i18n/fr.ts @@ -264,6 +264,9 @@ const fr = { "jobs.matchReadingStatusShort": "Lier automatiquement les séries non associées au provider", "jobs.pushReadingStatus": "Push des états de lecture", "jobs.pushReadingStatusShort": "Envoyer les états de lecture modifiés vers AniList (push différentiel)", + "jobs.groupProwlarr": "Téléchargement", + "jobs.downloadDetection": "Détection de téléchargements", + "jobs.downloadDetectionShort": "Cherche sur Prowlarr les releases disponibles pour les volumes manquants", // Jobs list "jobsList.id": "ID", @@ -288,6 +291,7 @@ const fr = { "jobRow.seriesTotal": "{{count}} séries au total", "jobRow.seriesLinked": "{{count}} séries liées", "jobRow.seriesPushed": "{{count}} séries synchronisées", + "jobRow.downloadFound": "{{count}} releases trouvées", "jobRow.errors": "{{count}} erreurs", "jobRow.view": "Voir", "jobRow.replay": "Rejouer", @@ -379,6 +383,16 @@ const fr = { "jobDetail.pushed": "Envoyés", "jobDetail.skipped": "Ignorés", "jobDetail.noBooks": "Sans livres", + "jobDetail.downloadDetection": "Détection de téléchargements", + "jobDetail.downloadDetectionDesc": "Analyse des séries avec volumes manquants via Prowlarr", + "jobDetail.downloadDetectionReport": "Rapport de détection", + "jobDetail.downloadFound": "Disponibles", + "jobDetail.downloadNotFound": "Non trouvés", + "jobDetail.downloadNoMissing": "Complets", + "jobDetail.downloadNoMetadata": "Sans métadonnées", + "jobDetail.downloadAvailableReleases": "Releases disponibles", + "jobDetail.downloadAvailableReleasesDesc": "{{count}} série(s) avec au moins une release trouvée", + "jobDetail.downloadMissingCount": "{{count}} manquant(s)", // Job types "jobType.rebuild": "Indexation", @@ -411,6 +425,9 @@ const fr = { "jobType.reading_status_push": "Push statut lecture", "jobType.reading_status_pushLabel": "Push des états de lecture", "jobType.reading_status_pushDesc": "Envoie les états de lecture modifiés (ou nouvelles séries) vers AniList de façon différentielle.", + "jobType.download_detection": "Détection téléchargements", + "jobType.download_detectionLabel": "Détection de téléchargements disponibles", + "jobType.download_detectionDesc": "Analyse les séries avec des volumes manquants et interroge Prowlarr pour trouver les releases disponibles. Ne télécharge rien — produit uniquement un rapport des opportunités.", // Status badges "statusBadge.extracting_pages": "Extraction des pages", @@ -645,6 +662,12 @@ const fr = { "settings.eventBatchFailed": "Batch échoué", "settings.eventRefreshCompleted": "Rafraîchissement terminé", "settings.eventRefreshFailed": "Rafraîchissement échoué", + "settings.eventCategoryReadingStatus": "État de lecture", + "settings.eventMatchCompleted": "Synchro. terminée", + "settings.eventMatchFailed": "Synchro. échouée", + "settings.eventPushCompleted": "Push terminé", + "settings.eventPushFailed": "Push échoué", + "settings.eventCategoryDownloadDetection": "Détection téléchargements", "settings.telegramHelp": "Comment obtenir les informations ?", "settings.telegramHelpBot": "Ouvrez Telegram, recherchez @BotFather, envoyez /newbot et suivez les instructions. Copiez le token fourni.", "settings.telegramHelpChat": "Envoyez un message à votre bot, puis ouvrez https://api.telegram.org/bot<TOKEN>/getUpdates dans votre navigateur. Le chat id apparaît dans message.chat.id.", diff --git a/crates/notifications/src/lib.rs b/crates/notifications/src/lib.rs index 8a4d683..72fcecf 100644 --- a/crates/notifications/src/lib.rs +++ b/crates/notifications/src/lib.rs @@ -51,6 +51,10 @@ pub struct EventToggles { pub reading_status_push_completed: bool, #[serde(default = "default_true")] pub reading_status_push_failed: bool, + #[serde(default = "default_true")] + pub download_detection_completed: bool, + #[serde(default = "default_true")] + pub download_detection_failed: bool, } fn default_true() -> bool { @@ -75,6 +79,8 @@ fn default_events() -> EventToggles { reading_status_match_failed: true, reading_status_push_completed: true, reading_status_push_failed: true, + download_detection_completed: true, + download_detection_failed: true, } } @@ -280,6 +286,16 @@ pub enum NotificationEvent { library_name: Option, error: String, }, + // Download detection (Prowlarr search for missing volumes) + DownloadDetectionCompleted { + library_name: Option, + total_series: i32, + found: i64, + }, + DownloadDetectionFailed { + library_name: Option, + error: String, + }, } /// Classify an indexer job_type string into the right event constructor category. @@ -557,6 +573,37 @@ fn format_event(event: &NotificationEvent) -> String { ] .join("\n") } + NotificationEvent::DownloadDetectionCompleted { + library_name, + total_series, + found, + } => { + let lib = library_name.as_deref().unwrap_or("All libraries"); + [ + format!("✅ Download detection completed"), + String::new(), + format!("📂 Library: {lib}"), + String::new(), + format!("📊 Results"), + format!(" 📥 Available: {found} / {total_series} series"), + ] + .join("\n") + } + NotificationEvent::DownloadDetectionFailed { + library_name, + error, + } => { + let lib = library_name.as_deref().unwrap_or("All libraries"); + let err = truncate(error, 200); + [ + format!("🚨 Download detection failed"), + String::new(), + format!("📂 Library: {lib}"), + String::new(), + format!("💬 {err}"), + ] + .join("\n") + } } } @@ -601,6 +648,8 @@ fn is_event_enabled(config: &TelegramConfig, event: &NotificationEvent) -> bool NotificationEvent::ReadingStatusMatchFailed { .. } => config.events.reading_status_match_failed, NotificationEvent::ReadingStatusPushCompleted { .. } => config.events.reading_status_push_completed, NotificationEvent::ReadingStatusPushFailed { .. } => config.events.reading_status_push_failed, + NotificationEvent::DownloadDetectionCompleted { .. } => config.events.download_detection_completed, + NotificationEvent::DownloadDetectionFailed { .. } => config.events.download_detection_failed, } } diff --git a/docs/FEATURES.md b/docs/FEATURES.md index 67f017e..08bca10 100644 --- a/docs/FEATURES.md +++ b/docs/FEATURES.md @@ -150,6 +150,29 @@ ### Field Locking - Individual book fields can be locked to prevent external sync from overwriting manual edits +### AniList Reading Status Sync + +Integration with AniList to synchronize reading progress in both directions for linked series. + +#### Configuration +- AniList user ID required for pull/push operations +- Configured per library in the reading status provider settings +- Auto-push schedule configurable per library: `manual`, `hourly`, `daily`, `weekly` + +#### Reading Status Match (`reading_status_match`) +- Pull reading progress from AniList and update local book statuses +- Maps AniList list status: `PLANNING` → `unread`, `CURRENT` → `reading`, `COMPLETED` → `read` +- Detailed per-series report: matched, updated, skipped, errors +- Rate limit handling: waits 10s and retries once on HTTP 429, aborts on second 429 + +#### Reading Status Push (`reading_status_push`) +- Differential push: only syncs series that changed since last push, have new books, or have never been synced +- Maps local status to AniList: `unread` → `PLANNING`, `reading` → `CURRENT`, `read` → `COMPLETED` +- Never auto-completes a series on AniList based solely on owned books (requires all books read) +- Per-series result tracking: pushed, skipped, no_books, error +- Same 429 retry logic as `reading_status_match` +- Auto-push schedule runs every minute check via indexer scheduler + --- ## External Integrations @@ -178,7 +201,7 @@ - Test connection button in settings ### Granular Event Toggles -12 individually configurable notification events grouped by category: +16 individually configurable notification events grouped by category: | Category | Events | |----------|--------| @@ -186,6 +209,7 @@ | Thumbnails | `thumbnail_completed`, `thumbnail_failed`, `thumbnail_cancelled` | | Conversion | `conversion_completed`, `conversion_failed`, `conversion_cancelled` | | Metadata | `metadata_approved`, `metadata_batch_completed`, `metadata_refresh_completed` | +| Reading status | `reading_status_match_completed`, `reading_status_match_failed`, `reading_status_push_completed`, `reading_status_push_failed` | ### Thumbnail Images in Notifications - Book cover thumbnails attached to applicable notifications (conversion, metadata approval) @@ -233,6 +257,8 @@ | `cbr_to_cbz` | Convert RAR to ZIP | | `metadata_batch` | Auto-match series to metadata | | `metadata_refresh` | Update approved metadata links | +| `reading_status_match` | Pull reading progress from AniList to local | +| `reading_status_push` | Differential push of reading statuses to AniList | ### Job Lifecycle - Status flow: `pending` → `running` → `success` | `failed` | `cancelled` diff --git a/infra/migrations/0060_add_download_detection_job.sql b/infra/migrations/0060_add_download_detection_job.sql new file mode 100644 index 0000000..9b61abe --- /dev/null +++ b/infra/migrations/0060_add_download_detection_job.sql @@ -0,0 +1,20 @@ +ALTER TABLE index_jobs + DROP CONSTRAINT IF EXISTS index_jobs_type_check, + ADD CONSTRAINT index_jobs_type_check + CHECK (type IN ('scan', 'rebuild', 'full_rebuild', 'rescan', 'thumbnail_rebuild', 'thumbnail_regenerate', 'cbr_to_cbz', 'metadata_batch', 'metadata_refresh', 'reading_status_match', 'reading_status_push', 'download_detection')); + +CREATE TABLE download_detection_results ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + job_id UUID NOT NULL REFERENCES index_jobs(id) ON DELETE CASCADE, + library_id UUID NOT NULL, + series_name TEXT NOT NULL, + -- 'found' | 'not_found' | 'no_missing' | 'no_metadata' | 'error' + status TEXT NOT NULL, + missing_count INTEGER NOT NULL DEFAULT 0, + -- JSON array of available Prowlarr releases (simplified) + available_releases JSONB, + error_message TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX download_detection_results_job_id_idx ON download_detection_results(job_id);