feat: table available_downloads découplée des jobs de détection
- Nouvelle table available_downloads (library_id, series_name) unique comme source de vérité pour les téléchargements disponibles - Les jobs de détection font UPSERT (ajout/mise à jour) et DELETE (séries complètes ou sans résultat) - Après import, mise à jour ciblée : retire les volumes importés des releases, supprime l'entrée si plus de releases - Migration avec import des données existantes depuis detection_results - Endpoint latest-found simplifié : une seule query sur la table Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -352,13 +352,20 @@ pub struct LatestFoundPerLibraryDto {
|
||||
#[schema(value_type = String)]
|
||||
pub library_id: Uuid,
|
||||
pub library_name: String,
|
||||
#[schema(value_type = String)]
|
||||
pub job_id: Uuid,
|
||||
pub job_date: String,
|
||||
pub results: Vec<DownloadDetectionResultDto>,
|
||||
pub results: Vec<AvailableDownloadDto>,
|
||||
}
|
||||
|
||||
/// Returns "found" results from the latest detection job per library.
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct AvailableDownloadDto {
|
||||
#[schema(value_type = String)]
|
||||
pub id: Uuid,
|
||||
pub series_name: String,
|
||||
pub missing_count: i32,
|
||||
pub available_releases: Option<Vec<AvailableReleaseDto>>,
|
||||
pub updated_at: String,
|
||||
}
|
||||
|
||||
/// Returns available downloads per library from the `available_downloads` table.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/download-detection/latest-found",
|
||||
@@ -371,67 +378,42 @@ pub struct LatestFoundPerLibraryDto {
|
||||
pub async fn get_latest_found(
|
||||
State(state): State<AppState>,
|
||||
) -> Result<Json<Vec<LatestFoundPerLibraryDto>>, ApiError> {
|
||||
// Get latest completed detection job per library
|
||||
let jobs = sqlx::query(
|
||||
"SELECT DISTINCT ON (j.library_id) j.id, j.library_id, j.created_at, l.name as library_name \
|
||||
FROM index_jobs j \
|
||||
JOIN libraries l ON l.id = j.library_id \
|
||||
WHERE j.type = 'download_detection' AND j.status = 'success' \
|
||||
ORDER BY j.library_id, j.created_at DESC",
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let mut output = Vec::new();
|
||||
|
||||
for job in &jobs {
|
||||
let job_id: Uuid = job.get("id");
|
||||
let library_id: Uuid = job.get("library_id");
|
||||
let library_name: String = job.get("library_name");
|
||||
let created_at: chrono::DateTime<chrono::Utc> = job.get("created_at");
|
||||
|
||||
let rows = sqlx::query(
|
||||
"SELECT id, series_name, status, missing_count, available_releases, error_message \
|
||||
FROM download_detection_results \
|
||||
WHERE job_id = $1 AND status = 'found' \
|
||||
ORDER BY series_name",
|
||||
"SELECT ad.id, ad.library_id, ad.series_name, ad.missing_count, ad.available_releases, ad.updated_at, \
|
||||
l.name as library_name \
|
||||
FROM available_downloads ad \
|
||||
JOIN libraries l ON l.id = ad.library_id \
|
||||
ORDER BY l.name, ad.series_name",
|
||||
)
|
||||
.bind(job_id)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
if rows.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let mut libs: std::collections::BTreeMap<Uuid, LatestFoundPerLibraryDto> = std::collections::BTreeMap::new();
|
||||
|
||||
let results = rows
|
||||
.iter()
|
||||
.map(|row| {
|
||||
for row in &rows {
|
||||
let library_id: Uuid = row.get("library_id");
|
||||
let updated_at: chrono::DateTime<chrono::Utc> = row.get("updated_at");
|
||||
let releases_json: Option<serde_json::Value> = row.get("available_releases");
|
||||
let available_releases = releases_json.and_then(|v| {
|
||||
serde_json::from_value::<Vec<AvailableReleaseDto>>(v).ok()
|
||||
});
|
||||
DownloadDetectionResultDto {
|
||||
|
||||
let entry = libs.entry(library_id).or_insert_with(|| LatestFoundPerLibraryDto {
|
||||
library_id,
|
||||
library_name: row.get("library_name"),
|
||||
results: Vec::new(),
|
||||
});
|
||||
|
||||
entry.results.push(AvailableDownloadDto {
|
||||
id: row.get("id"),
|
||||
series_name: row.get("series_name"),
|
||||
status: row.get("status"),
|
||||
missing_count: row.get("missing_count"),
|
||||
available_releases,
|
||||
error_message: row.get("error_message"),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
output.push(LatestFoundPerLibraryDto {
|
||||
library_id,
|
||||
library_name,
|
||||
job_id,
|
||||
job_date: created_at.to_rfc3339(),
|
||||
results,
|
||||
updated_at: updated_at.to_rfc3339(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Json(output))
|
||||
Ok(Json(libs.into_values().collect()))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -546,6 +528,9 @@ pub(crate) async fn process_download_detection(
|
||||
|
||||
if missing_rows.is_empty() {
|
||||
insert_result(pool, job_id, library_id, series_name, "no_missing", 0, None, None).await;
|
||||
// Series is complete, remove from available_downloads
|
||||
let _ = sqlx::query("DELETE FROM available_downloads WHERE library_id = $1 AND series_name = $2")
|
||||
.bind(library_id).bind(series_name).execute(pool).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -575,13 +560,38 @@ pub(crate) async fn process_download_detection(
|
||||
series_name,
|
||||
"found",
|
||||
missing_count,
|
||||
releases_json,
|
||||
releases_json.clone(),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
// UPSERT into available_downloads
|
||||
if let Some(ref rj) = releases_json {
|
||||
let _ = sqlx::query(
|
||||
"INSERT INTO available_downloads (library_id, series_name, missing_count, available_releases, updated_at) \
|
||||
VALUES ($1, $2, $3, $4, NOW()) \
|
||||
ON CONFLICT (library_id, series_name) DO UPDATE SET \
|
||||
missing_count = EXCLUDED.missing_count, \
|
||||
available_releases = EXCLUDED.available_releases, \
|
||||
updated_at = NOW()",
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(series_name)
|
||||
.bind(missing_count)
|
||||
.bind(rj)
|
||||
.execute(pool)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Ok(_) => {
|
||||
insert_result(pool, job_id, library_id, series_name, "not_found", missing_count, None, None).await;
|
||||
// Remove from available_downloads if previously found
|
||||
let _ = sqlx::query(
|
||||
"DELETE FROM available_downloads WHERE library_id = $1 AND series_name = $2",
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(series_name)
|
||||
.execute(pool)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("[DOWNLOAD_DETECTION] series '{series_name}': {e}");
|
||||
|
||||
@@ -448,6 +448,57 @@ async fn process_torrent_import(pool: PgPool, torrent_id: Uuid) -> anyhow::Resul
|
||||
});
|
||||
}
|
||||
|
||||
// Update available_downloads: remove imported volumes
|
||||
let imported_vols: Vec<i32> = imported.iter().map(|f| f.volume).collect();
|
||||
if !imported_vols.is_empty() {
|
||||
let ad_row = sqlx::query(
|
||||
"SELECT id, missing_count, available_releases FROM available_downloads \
|
||||
WHERE library_id = $1 AND LOWER(series_name) = LOWER($2)",
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(&series_name)
|
||||
.fetch_optional(&pool)
|
||||
.await
|
||||
.unwrap_or(None);
|
||||
|
||||
if let Some(ad_row) = ad_row {
|
||||
let ad_id: Uuid = ad_row.get("id");
|
||||
let releases_json: Option<serde_json::Value> = ad_row.get("available_releases");
|
||||
if let Some(serde_json::Value::Array(releases)) = releases_json {
|
||||
let updated: Vec<serde_json::Value> = releases.into_iter().filter_map(|mut release| {
|
||||
if let Some(matched) = release.get_mut("matched_missing_volumes") {
|
||||
if let Some(arr) = matched.as_array() {
|
||||
let filtered: Vec<serde_json::Value> = arr.iter()
|
||||
.filter(|v| !imported_vols.contains(&(v.as_i64().unwrap_or(-1) as i32)))
|
||||
.cloned()
|
||||
.collect();
|
||||
if filtered.is_empty() {
|
||||
return None;
|
||||
}
|
||||
*matched = serde_json::Value::Array(filtered);
|
||||
}
|
||||
}
|
||||
Some(release)
|
||||
}).collect();
|
||||
|
||||
if updated.is_empty() {
|
||||
let _ = sqlx::query("DELETE FROM available_downloads WHERE id = $1")
|
||||
.bind(ad_id).execute(&pool).await;
|
||||
} else {
|
||||
let new_missing = ad_row.get::<i32, _>("missing_count") - imported_vols.len() as i32;
|
||||
let _ = sqlx::query(
|
||||
"UPDATE available_downloads SET available_releases = $1, missing_count = GREATEST($2, 0), updated_at = NOW() WHERE id = $3",
|
||||
)
|
||||
.bind(serde_json::Value::Array(updated))
|
||||
.bind(new_missing)
|
||||
.bind(ad_id)
|
||||
.execute(&pool)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up: remove source directory if it's a subdirectory of /downloads
|
||||
let physical_content = remap_downloads_path(&content_path);
|
||||
let downloads_root = remap_downloads_path("/downloads");
|
||||
|
||||
@@ -348,7 +348,7 @@ function AvailableLibraryCard({ lib }: { lib: LatestFoundPerLibraryDto }) {
|
||||
<div className="flex flex-col sm:flex-row sm:items-center sm:justify-between gap-1">
|
||||
<CardTitle className="text-sm sm:text-base">{lib.library_name}</CardTitle>
|
||||
<span className="text-[10px] sm:text-xs text-muted-foreground">
|
||||
{t("downloads.detectedSeries", { count: lib.results.length })} — {formatDate(lib.job_date)}
|
||||
{t("downloads.detectedSeries", { count: lib.results.length })}
|
||||
</span>
|
||||
</div>
|
||||
</CardHeader>
|
||||
|
||||
@@ -1184,12 +1184,18 @@ export type DownloadDetectionResultDto = {
|
||||
error_message: string | null;
|
||||
};
|
||||
|
||||
export type AvailableDownloadDto = {
|
||||
id: string;
|
||||
series_name: string;
|
||||
missing_count: number;
|
||||
available_releases: AvailableReleaseDto[] | null;
|
||||
updated_at: string;
|
||||
};
|
||||
|
||||
export type LatestFoundPerLibraryDto = {
|
||||
library_id: string;
|
||||
library_name: string;
|
||||
job_id: string;
|
||||
job_date: string;
|
||||
results: DownloadDetectionResultDto[];
|
||||
results: AvailableDownloadDto[];
|
||||
};
|
||||
|
||||
export async function getDownloadDetectionReport(jobId: string) {
|
||||
|
||||
20
infra/migrations/0067_add_available_downloads.sql
Normal file
20
infra/migrations/0067_add_available_downloads.sql
Normal file
@@ -0,0 +1,20 @@
|
||||
CREATE TABLE available_downloads (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
library_id UUID NOT NULL REFERENCES libraries(id) ON DELETE CASCADE,
|
||||
series_name TEXT NOT NULL,
|
||||
missing_count INTEGER NOT NULL DEFAULT 0,
|
||||
available_releases JSONB,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(library_id, series_name)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_available_downloads_library ON available_downloads(library_id);
|
||||
|
||||
-- Migrate existing detection results into the new table
|
||||
INSERT INTO available_downloads (library_id, series_name, missing_count, available_releases, updated_at)
|
||||
SELECT DISTINCT ON (library_id, series_name)
|
||||
library_id, series_name, missing_count, available_releases, created_at
|
||||
FROM download_detection_results
|
||||
WHERE status = 'found' AND available_releases IS NOT NULL
|
||||
ORDER BY library_id, series_name, created_at DESC
|
||||
ON CONFLICT DO NOTHING;
|
||||
Reference in New Issue
Block a user