feat: unify job creation — tous les types créent N jobs par librairie côté backend
- metadata_batch, metadata_refresh, reading_status_match, reading_status_push, download_detection : library_id devient optionnel, la boucle passe côté API - rebuild (index_jobs.rs), thumbnail_rebuild, thumbnail_regenerate : même logique, suppression du job unique library_id=NULL au profit d'un job par lib - Backoffice simplifié : suppression des boucles frontend, les Server Actions appellent directement l'API sans library_id pour le cas "toutes les librairies" Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -13,7 +13,7 @@ use crate::{error::ApiError, prowlarr, state::AppState};
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct StartDownloadDetectionRequest {
|
||||
pub library_id: String,
|
||||
pub library_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
@@ -70,8 +70,68 @@ pub async fn start_detection(
|
||||
State(state): State<AppState>,
|
||||
Json(body): Json<StartDownloadDetectionRequest>,
|
||||
) -> Result<Json<serde_json::Value>, ApiError> {
|
||||
// All libraries case
|
||||
if body.library_id.is_none() {
|
||||
prowlarr::check_prowlarr_configured(&state.pool).await?;
|
||||
let library_ids: Vec<Uuid> = sqlx::query_scalar(
|
||||
"SELECT id FROM libraries ORDER BY name"
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
let mut last_job_id: Option<Uuid> = None;
|
||||
for library_id in library_ids {
|
||||
let existing: Option<Uuid> = sqlx::query_scalar(
|
||||
"SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'download_detection' AND status IN ('pending', 'running') LIMIT 1",
|
||||
)
|
||||
.bind(library_id)
|
||||
.fetch_optional(&state.pool)
|
||||
.await?;
|
||||
if existing.is_some() { continue; }
|
||||
let job_id = Uuid::new_v4();
|
||||
sqlx::query(
|
||||
"INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'download_detection', 'running', NOW())",
|
||||
)
|
||||
.bind(job_id)
|
||||
.bind(library_id)
|
||||
.execute(&state.pool)
|
||||
.await?;
|
||||
let pool = state.pool.clone();
|
||||
let library_name: Option<String> = sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1")
|
||||
.bind(library_id)
|
||||
.fetch_optional(&state.pool)
|
||||
.await
|
||||
.ok()
|
||||
.flatten();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = process_download_detection(&pool, job_id, library_id).await {
|
||||
warn!("[DOWNLOAD_DETECTION] job {job_id} failed: {e}");
|
||||
let _ = sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW() WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.bind(e.to_string())
|
||||
.execute(&pool)
|
||||
.await;
|
||||
notifications::notify(
|
||||
pool,
|
||||
notifications::NotificationEvent::DownloadDetectionFailed {
|
||||
library_name,
|
||||
error: e.to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
});
|
||||
last_job_id = Some(job_id);
|
||||
}
|
||||
return Ok(Json(serde_json::json!({
|
||||
"id": last_job_id.map(|id| id.to_string()),
|
||||
"status": "started",
|
||||
})));
|
||||
}
|
||||
|
||||
let library_id: Uuid = body
|
||||
.library_id
|
||||
.unwrap()
|
||||
.parse()
|
||||
.map_err(|_| ApiError::bad_request("invalid library_id"))?;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user