feat: unify job creation — tous les types créent N jobs par librairie côté backend

- metadata_batch, metadata_refresh, reading_status_match, reading_status_push,
  download_detection : library_id devient optionnel, la boucle passe côté API
- rebuild (index_jobs.rs), thumbnail_rebuild, thumbnail_regenerate : même logique,
  suppression du job unique library_id=NULL au profit d'un job par lib
- Backoffice simplifié : suppression des boucles frontend, les Server Actions
  appellent directement l'API sans library_id pour le cas "toutes les librairies"

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-26 09:16:24 +01:00
parent 8f48c6a876
commit f08fc6b6a6
9 changed files with 436 additions and 149 deletions

View File

@@ -13,7 +13,7 @@ use crate::{error::ApiError, prowlarr, state::AppState};
#[derive(Deserialize, ToSchema)]
pub struct StartDownloadDetectionRequest {
pub library_id: String,
pub library_id: Option<String>,
}
#[derive(Serialize, ToSchema)]
@@ -70,8 +70,68 @@ pub async fn start_detection(
State(state): State<AppState>,
Json(body): Json<StartDownloadDetectionRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
// All libraries case
if body.library_id.is_none() {
prowlarr::check_prowlarr_configured(&state.pool).await?;
let library_ids: Vec<Uuid> = sqlx::query_scalar(
"SELECT id FROM libraries ORDER BY name"
)
.fetch_all(&state.pool)
.await?;
let mut last_job_id: Option<Uuid> = None;
for library_id in library_ids {
let existing: Option<Uuid> = sqlx::query_scalar(
"SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'download_detection' AND status IN ('pending', 'running') LIMIT 1",
)
.bind(library_id)
.fetch_optional(&state.pool)
.await?;
if existing.is_some() { continue; }
let job_id = Uuid::new_v4();
sqlx::query(
"INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'download_detection', 'running', NOW())",
)
.bind(job_id)
.bind(library_id)
.execute(&state.pool)
.await?;
let pool = state.pool.clone();
let library_name: Option<String> = sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1")
.bind(library_id)
.fetch_optional(&state.pool)
.await
.ok()
.flatten();
tokio::spawn(async move {
if let Err(e) = process_download_detection(&pool, job_id, library_id).await {
warn!("[DOWNLOAD_DETECTION] job {job_id} failed: {e}");
let _ = sqlx::query(
"UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW() WHERE id = $1",
)
.bind(job_id)
.bind(e.to_string())
.execute(&pool)
.await;
notifications::notify(
pool,
notifications::NotificationEvent::DownloadDetectionFailed {
library_name,
error: e.to_string(),
},
);
}
});
last_job_id = Some(job_id);
}
return Ok(Json(serde_json::json!({
"id": last_job_id.map(|id| id.to_string()),
"status": "started",
})));
}
let library_id: Uuid = body
.library_id
.unwrap()
.parse()
.map_err(|_| ApiError::bad_request("invalid library_id"))?;

View File

@@ -123,8 +123,36 @@ pub async fn enqueue_rebuild(
let is_full = payload.as_ref().and_then(|p| p.0.full).unwrap_or(false);
let is_rescan = payload.as_ref().and_then(|p| p.0.rescan).unwrap_or(false);
let job_type = if is_full { "full_rebuild" } else if is_rescan { "rescan" } else { "rebuild" };
let id = Uuid::new_v4();
// When no library specified, create one job per library
if library_id.is_none() {
let library_ids: Vec<Uuid> = sqlx::query_scalar("SELECT id FROM libraries ORDER BY name")
.fetch_all(&state.pool)
.await?;
let mut last_id: Option<Uuid> = None;
for lib_id in library_ids {
let id = Uuid::new_v4();
sqlx::query(
"INSERT INTO index_jobs (id, library_id, type, status) VALUES ($1, $2, $3, 'pending')",
)
.bind(id)
.bind(lib_id)
.bind(job_type)
.execute(&state.pool)
.await?;
last_id = Some(id);
}
let last_id = last_id.ok_or_else(|| ApiError::bad_request("No libraries found"))?;
let row = sqlx::query(
"SELECT id, library_id, book_id, type, status, started_at, finished_at, stats_json, error_opt, created_at FROM index_jobs WHERE id = $1",
)
.bind(last_id)
.fetch_one(&state.pool)
.await?;
return Ok(Json(map_row(row)));
}
let id = Uuid::new_v4();
sqlx::query(
"INSERT INTO index_jobs (id, library_id, type, status) VALUES ($1, $2, $3, 'pending')",
)

View File

@@ -16,7 +16,7 @@ use crate::{error::ApiError, metadata_providers, state::AppState};
#[derive(Deserialize, ToSchema)]
pub struct MetadataBatchRequest {
pub library_id: String,
pub library_id: Option<String>,
}
#[derive(Serialize, ToSchema)]
@@ -76,8 +76,67 @@ pub async fn start_batch(
State(state): State<AppState>,
Json(body): Json<MetadataBatchRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
// All libraries case
if body.library_id.is_none() {
let library_ids: Vec<Uuid> = sqlx::query_scalar(
"SELECT id FROM libraries WHERE metadata_provider IS DISTINCT FROM 'none' ORDER BY name"
)
.fetch_all(&state.pool)
.await?;
let mut last_job_id: Option<Uuid> = None;
for library_id in library_ids {
let existing: Option<Uuid> = sqlx::query_scalar(
"SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'metadata_batch' AND status IN ('pending', 'running') LIMIT 1",
)
.bind(library_id)
.fetch_optional(&state.pool)
.await?;
if existing.is_some() { continue; }
let job_id = Uuid::new_v4();
sqlx::query(
"INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'metadata_batch', 'running', NOW())",
)
.bind(job_id)
.bind(library_id)
.execute(&state.pool)
.await?;
let pool = state.pool.clone();
let library_name: Option<String> = sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1")
.bind(library_id)
.fetch_optional(&state.pool)
.await
.ok()
.flatten();
tokio::spawn(async move {
if let Err(e) = process_metadata_batch(&pool, job_id, library_id).await {
warn!("[METADATA_BATCH] job {job_id} failed: {e}");
let _ = sqlx::query(
"UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW() WHERE id = $1",
)
.bind(job_id)
.bind(e.to_string())
.execute(&pool)
.await;
notifications::notify(
pool.clone(),
notifications::NotificationEvent::MetadataBatchFailed {
library_name,
error: e.to_string(),
},
);
}
});
last_job_id = Some(job_id);
}
return Ok(Json(serde_json::json!({
"id": last_job_id.map(|id| id.to_string()),
"status": "started",
})));
}
let library_id: Uuid = body
.library_id
.unwrap()
.parse()
.map_err(|_| ApiError::bad_request("invalid library_id"))?;

View File

@@ -17,7 +17,7 @@ use crate::metadata_batch::{load_provider_config_from_pool, is_job_cancelled, up
#[derive(Deserialize, ToSchema)]
pub struct MetadataRefreshRequest {
pub library_id: String,
pub library_id: Option<String>,
}
/// A single field change: old → new
@@ -83,8 +83,82 @@ pub async fn start_refresh(
State(state): State<AppState>,
Json(body): Json<MetadataRefreshRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
// All libraries case
if body.library_id.is_none() {
let library_ids: Vec<Uuid> = sqlx::query_scalar(
"SELECT id FROM libraries WHERE metadata_provider IS DISTINCT FROM 'none' ORDER BY name"
)
.fetch_all(&state.pool)
.await?;
let mut last_job_id: Option<Uuid> = None;
for library_id in library_ids {
let link_count: i64 = sqlx::query_scalar(
r#"
SELECT COUNT(*) FROM external_metadata_links eml
LEFT JOIN series_metadata sm
ON sm.library_id = eml.library_id AND sm.name = eml.series_name
WHERE eml.library_id = $1
AND eml.status = 'approved'
AND COALESCE(sm.status, 'ongoing') NOT IN ('ended', 'cancelled')
"#,
)
.bind(library_id)
.fetch_one(&state.pool)
.await
.unwrap_or(0);
if link_count == 0 { continue; }
let existing: Option<Uuid> = sqlx::query_scalar(
"SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'metadata_refresh' AND status IN ('pending', 'running') LIMIT 1",
)
.bind(library_id)
.fetch_optional(&state.pool)
.await?;
if existing.is_some() { continue; }
let job_id = Uuid::new_v4();
sqlx::query(
"INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'metadata_refresh', 'running', NOW())",
)
.bind(job_id)
.bind(library_id)
.execute(&state.pool)
.await?;
let pool = state.pool.clone();
let library_name: Option<String> = sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1")
.bind(library_id)
.fetch_optional(&state.pool)
.await
.ok()
.flatten();
tokio::spawn(async move {
if let Err(e) = process_metadata_refresh(&pool, job_id, library_id).await {
warn!("[METADATA_REFRESH] job {job_id} failed: {e}");
let _ = sqlx::query(
"UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW() WHERE id = $1",
)
.bind(job_id)
.bind(e.to_string())
.execute(&pool)
.await;
notifications::notify(
pool.clone(),
notifications::NotificationEvent::MetadataRefreshFailed {
library_name,
error: e.to_string(),
},
);
}
});
last_job_id = Some(job_id);
}
return Ok(Json(serde_json::json!({
"id": last_job_id.map(|id| id.to_string()),
"status": "started",
})));
}
let library_id: Uuid = body
.library_id
.unwrap()
.parse()
.map_err(|_| ApiError::bad_request("invalid library_id"))?;

View File

@@ -14,7 +14,7 @@ use crate::{anilist, error::ApiError, state::AppState};
#[derive(Deserialize, ToSchema)]
pub struct ReadingStatusMatchRequest {
pub library_id: String,
pub library_id: Option<String>,
}
#[derive(Serialize, ToSchema)]
@@ -62,8 +62,70 @@ pub async fn start_match(
State(state): State<AppState>,
Json(body): Json<ReadingStatusMatchRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
// All libraries case
if body.library_id.is_none() {
anilist::load_anilist_settings(&state.pool).await?;
let library_ids: Vec<Uuid> = sqlx::query_scalar(
"SELECT id FROM libraries WHERE reading_status_provider IS NOT NULL ORDER BY name"
)
.fetch_all(&state.pool)
.await?;
let mut last_job_id: Option<Uuid> = None;
for library_id in library_ids {
let existing: Option<Uuid> = sqlx::query_scalar(
"SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'reading_status_match' AND status IN ('pending', 'running') LIMIT 1",
)
.bind(library_id)
.fetch_optional(&state.pool)
.await?;
if existing.is_some() { continue; }
let job_id = Uuid::new_v4();
sqlx::query(
"INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'reading_status_match', 'running', NOW())",
)
.bind(job_id)
.bind(library_id)
.execute(&state.pool)
.await?;
let pool = state.pool.clone();
let library_name: Option<String> = sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1")
.bind(library_id)
.fetch_optional(&state.pool)
.await
.ok()
.flatten();
tokio::spawn(async move {
if let Err(e) = process_reading_status_match(&pool, job_id, library_id).await {
warn!("[READING_STATUS_MATCH] job {job_id} failed: {e}");
let partial_stats = build_match_stats(&pool, job_id).await;
let _ = sqlx::query(
"UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW(), stats_json = $3 WHERE id = $1",
)
.bind(job_id)
.bind(e.to_string())
.bind(&partial_stats)
.execute(&pool)
.await;
notifications::notify(
pool.clone(),
notifications::NotificationEvent::ReadingStatusMatchFailed {
library_name,
error: e.to_string(),
},
);
}
});
last_job_id = Some(job_id);
}
return Ok(Json(serde_json::json!({
"id": last_job_id.map(|id| id.to_string()),
"status": "started",
})));
}
let library_id: Uuid = body
.library_id
.unwrap()
.parse()
.map_err(|_| ApiError::bad_request("invalid library_id"))?;

View File

@@ -14,7 +14,7 @@ use crate::{anilist, error::ApiError, state::AppState};
#[derive(Deserialize, ToSchema)]
pub struct ReadingStatusPushRequest {
pub library_id: String,
pub library_id: Option<String>,
}
#[derive(Serialize, ToSchema)]
@@ -64,8 +64,75 @@ pub async fn start_push(
State(state): State<AppState>,
Json(body): Json<ReadingStatusPushRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
// All libraries case
if body.library_id.is_none() {
let (_, _, local_user_id) = anilist::load_anilist_settings(&state.pool).await?;
if local_user_id.is_none() {
return Err(ApiError::bad_request(
"AniList local_user_id not configured — required for reading status push",
));
}
let library_ids: Vec<Uuid> = sqlx::query_scalar(
"SELECT id FROM libraries WHERE reading_status_provider = 'anilist' ORDER BY name"
)
.fetch_all(&state.pool)
.await?;
let mut last_job_id: Option<Uuid> = None;
for library_id in library_ids {
let existing: Option<Uuid> = sqlx::query_scalar(
"SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'reading_status_push' AND status IN ('pending', 'running') LIMIT 1",
)
.bind(library_id)
.fetch_optional(&state.pool)
.await?;
if existing.is_some() { continue; }
let job_id = Uuid::new_v4();
sqlx::query(
"INSERT INTO index_jobs (id, library_id, type, status, started_at) VALUES ($1, $2, 'reading_status_push', 'running', NOW())",
)
.bind(job_id)
.bind(library_id)
.execute(&state.pool)
.await?;
let pool = state.pool.clone();
let library_name: Option<String> = sqlx::query_scalar("SELECT name FROM libraries WHERE id = $1")
.bind(library_id)
.fetch_optional(&state.pool)
.await
.ok()
.flatten();
tokio::spawn(async move {
if let Err(e) = process_reading_status_push(&pool, job_id, library_id).await {
warn!("[READING_STATUS_PUSH] job {job_id} failed: {e}");
let partial_stats = build_push_stats(&pool, job_id).await;
let _ = sqlx::query(
"UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW(), stats_json = $3 WHERE id = $1",
)
.bind(job_id)
.bind(e.to_string())
.bind(&partial_stats)
.execute(&pool)
.await;
notifications::notify(
pool.clone(),
notifications::NotificationEvent::ReadingStatusPushFailed {
library_name,
error: e.to_string(),
},
);
}
});
last_job_id = Some(job_id);
}
return Ok(Json(serde_json::json!({
"id": last_job_id.map(|id| id.to_string()),
"status": "started",
})));
}
let library_id: Uuid = body
.library_id
.unwrap()
.parse()
.map_err(|_| ApiError::bad_request("invalid library_id"))?;

View File

@@ -32,8 +32,32 @@ pub async fn start_thumbnails_rebuild(
payload: Option<Json<ThumbnailsRebuildRequest>>,
) -> Result<Json<index_jobs::IndexJobResponse>, ApiError> {
let library_id = payload.as_ref().and_then(|p| p.0.library_id);
let job_id = Uuid::new_v4();
if library_id.is_none() {
let library_ids: Vec<Uuid> = sqlx::query_scalar("SELECT id FROM libraries ORDER BY name")
.fetch_all(&state.pool)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
let mut last_row = None;
for lib_id in library_ids {
let job_id = Uuid::new_v4();
let row = sqlx::query(
r#"INSERT INTO index_jobs (id, library_id, type, status)
VALUES ($1, $2, 'thumbnail_rebuild', 'pending')
RETURNING id, library_id, type, status, started_at, finished_at, stats_json, error_opt, created_at"#,
)
.bind(job_id)
.bind(lib_id)
.fetch_one(&state.pool)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
last_row = Some(row);
}
let row = last_row.ok_or_else(|| ApiError::bad_request("No libraries found"))?;
return Ok(Json(index_jobs::map_row(row)));
}
let job_id = Uuid::new_v4();
let row = sqlx::query(
r#"INSERT INTO index_jobs (id, library_id, type, status)
VALUES ($1, $2, 'thumbnail_rebuild', 'pending')
@@ -66,8 +90,32 @@ pub async fn start_thumbnails_regenerate(
payload: Option<Json<ThumbnailsRebuildRequest>>,
) -> Result<Json<index_jobs::IndexJobResponse>, ApiError> {
let library_id = payload.as_ref().and_then(|p| p.0.library_id);
let job_id = Uuid::new_v4();
if library_id.is_none() {
let library_ids: Vec<Uuid> = sqlx::query_scalar("SELECT id FROM libraries ORDER BY name")
.fetch_all(&state.pool)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
let mut last_row = None;
for lib_id in library_ids {
let job_id = Uuid::new_v4();
let row = sqlx::query(
r#"INSERT INTO index_jobs (id, library_id, type, status)
VALUES ($1, $2, 'thumbnail_regenerate', 'pending')
RETURNING id, library_id, type, status, started_at, finished_at, stats_json, error_opt, created_at"#,
)
.bind(job_id)
.bind(lib_id)
.fetch_one(&state.pool)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
last_row = Some(row);
}
let row = last_row.ok_or_else(|| ApiError::bad_request("No libraries found"))?;
return Ok(Json(index_jobs::map_row(row)));
}
let job_id = Uuid::new_v4();
let row = sqlx::query(
r#"INSERT INTO index_jobs (id, library_id, type, status)
VALUES ($1, $2, 'thumbnail_regenerate', 'pending')

View File

@@ -63,152 +63,41 @@ export default async function JobsPage({ searchParams }: { searchParams: Promise
async function triggerMetadataBatch(formData: FormData) {
"use server";
const libraryId = formData.get("library_id") as string;
if (libraryId) {
let result;
try {
result = await startMetadataBatch(libraryId);
} catch {
// Library may have metadata disabled — ignore silently
return;
}
const result = await startMetadataBatch(libraryId || undefined);
revalidatePath("/jobs");
redirect(`/jobs?highlight=${result.id}`);
} else {
// All libraries — skip those with metadata disabled
const allLibraries = await fetchLibraries().catch(() => [] as LibraryDto[]);
let lastId: string | undefined;
for (const lib of allLibraries) {
if (lib.metadata_provider === "none") continue;
try {
const result = await startMetadataBatch(lib.id);
if (result.status !== "already_running") lastId = result.id;
} catch {
// Library may have metadata disabled or other issue — skip
}
}
revalidatePath("/jobs");
redirect(lastId ? `/jobs?highlight=${lastId}` : "/jobs");
}
redirect(result.id ? `/jobs?highlight=${result.id}` : "/jobs");
}
async function triggerMetadataRefresh(formData: FormData) {
"use server";
const libraryId = formData.get("library_id") as string;
if (libraryId) {
let result;
try {
result = await startMetadataRefresh(libraryId);
} catch {
const result = await startMetadataRefresh(libraryId || undefined);
revalidatePath("/jobs");
redirect("/jobs");
}
revalidatePath("/jobs");
redirect(`/jobs?highlight=${result.id}`);
} else {
// All libraries — skip those with metadata disabled
const allLibraries = await fetchLibraries().catch(() => [] as LibraryDto[]);
let lastId: string | undefined;
for (const lib of allLibraries) {
if (lib.metadata_provider === "none") continue;
try {
const result = await startMetadataRefresh(lib.id);
if (result.status !== "already_running") lastId = result.id;
} catch {
// Library may have metadata disabled or no approved links — skip
}
}
revalidatePath("/jobs");
redirect(lastId ? `/jobs?highlight=${lastId}` : "/jobs");
}
redirect(result.id ? `/jobs?highlight=${result.id}` : "/jobs");
}
async function triggerReadingStatusMatch(formData: FormData) {
"use server";
const libraryId = formData.get("library_id") as string;
if (libraryId) {
let result;
try {
result = await startReadingStatusMatch(libraryId);
} catch {
return;
}
const result = await startReadingStatusMatch(libraryId || undefined);
revalidatePath("/jobs");
redirect(`/jobs?highlight=${result.id}`);
} else {
// All libraries — only those with reading_status_provider configured
const allLibraries = await fetchLibraries().catch(() => [] as LibraryDto[]);
let lastId: string | undefined;
for (const lib of allLibraries) {
if (!lib.reading_status_provider) continue;
try {
const result = await startReadingStatusMatch(lib.id);
if (result.status !== "already_running") lastId = result.id;
} catch {
// Skip libraries with errors
}
}
revalidatePath("/jobs");
redirect(lastId ? `/jobs?highlight=${lastId}` : "/jobs");
}
redirect(result.id ? `/jobs?highlight=${result.id}` : "/jobs");
}
async function triggerReadingStatusPush(formData: FormData) {
"use server";
const libraryId = formData.get("library_id") as string;
if (libraryId) {
let result;
try {
result = await startReadingStatusPush(libraryId);
} catch {
return;
}
const result = await startReadingStatusPush(libraryId || undefined);
revalidatePath("/jobs");
redirect(`/jobs?highlight=${result.id}`);
} else {
// All libraries — only those with reading_status_provider configured
const allLibraries = await fetchLibraries().catch(() => [] as LibraryDto[]);
let lastId: string | undefined;
for (const lib of allLibraries) {
if (!lib.reading_status_provider) continue;
try {
const result = await startReadingStatusPush(lib.id);
if (result.status !== "already_running") lastId = result.id;
} catch {
// Skip libraries with errors
}
}
revalidatePath("/jobs");
redirect(lastId ? `/jobs?highlight=${lastId}` : "/jobs");
}
redirect(result.id ? `/jobs?highlight=${result.id}` : "/jobs");
}
async function triggerDownloadDetection(formData: FormData) {
"use server";
const libraryId = formData.get("library_id") as string;
if (libraryId) {
let result;
try {
result = await startDownloadDetection(libraryId);
} catch {
return;
}
const result = await startDownloadDetection(libraryId || undefined);
revalidatePath("/jobs");
redirect(`/jobs?highlight=${result.id}`);
} else {
// All libraries
const allLibraries = await fetchLibraries().catch(() => [] as LibraryDto[]);
let lastId: string | undefined;
for (const lib of allLibraries) {
try {
const result = await startDownloadDetection(lib.id);
if (result.status !== "already_running") lastId = result.id;
} catch {
// Skip libraries with errors (e.g. Prowlarr not configured)
}
}
revalidatePath("/jobs");
redirect(lastId ? `/jobs?highlight=${lastId}` : "/jobs");
}
redirect(result.id ? `/jobs?highlight=${result.id}` : "/jobs");
}
return (

View File

@@ -1061,24 +1061,24 @@ export type MetadataBatchResultDto = {
error_message: string | null;
};
export async function startMetadataBatch(libraryId: string) {
return apiFetch<{ id: string; status: string }>("/metadata/batch", {
export async function startMetadataBatch(libraryId?: string) {
return apiFetch<{ id: string | null; status: string }>("/metadata/batch", {
method: "POST",
body: JSON.stringify({ library_id: libraryId }),
body: JSON.stringify(libraryId ? { library_id: libraryId } : {}),
});
}
export async function startMetadataRefresh(libraryId: string) {
return apiFetch<{ id: string; status: string }>("/metadata/refresh", {
export async function startMetadataRefresh(libraryId?: string) {
return apiFetch<{ id: string | null; status: string }>("/metadata/refresh", {
method: "POST",
body: JSON.stringify({ library_id: libraryId }),
body: JSON.stringify(libraryId ? { library_id: libraryId } : {}),
});
}
export async function startReadingStatusMatch(libraryId: string) {
return apiFetch<{ id: string; status: string }>("/reading-status/match", {
export async function startReadingStatusMatch(libraryId?: string) {
return apiFetch<{ id: string | null; status: string }>("/reading-status/match", {
method: "POST",
body: JSON.stringify({ library_id: libraryId }),
body: JSON.stringify(libraryId ? { library_id: libraryId } : {}),
});
}
@@ -1111,10 +1111,10 @@ export async function getReadingStatusMatchResults(jobId: string) {
return apiFetch<ReadingStatusMatchResultDto[]>(`/reading-status/match/${jobId}/results`);
}
export async function startReadingStatusPush(libraryId: string) {
return apiFetch<{ id: string; status: string }>("/reading-status/push", {
export async function startReadingStatusPush(libraryId?: string) {
return apiFetch<{ id: string | null; status: string }>("/reading-status/push", {
method: "POST",
body: JSON.stringify({ library_id: libraryId }),
body: JSON.stringify(libraryId ? { library_id: libraryId } : {}),
});
}
@@ -1148,10 +1148,10 @@ export async function getReadingStatusPushResults(jobId: string) {
return apiFetch<ReadingStatusPushResultDto[]>(`/reading-status/push/${jobId}/results`);
}
export async function startDownloadDetection(libraryId: string) {
return apiFetch<{ id: string; status: string }>("/download-detection/start", {
export async function startDownloadDetection(libraryId?: string) {
return apiFetch<{ id: string | null; status: string }>("/download-detection/start", {
method: "POST",
body: JSON.stringify({ library_id: libraryId }),
body: JSON.stringify(libraryId ? { library_id: libraryId } : {}),
});
}