refactor: migrer tout le code Rust vers series_id (table series)
API (15 fichiers): - series.rs: helpers resolve_series_id/get_or_create_series, toutes les queries migrent de books.series TEXT vers series_id FK + JOIN series - Routes /series/:name → /series/:series_id (UUID) - books.rs: filtres série par series_id, SELECT s.name AS series via JOIN - metadata.rs: sync écrit dans series au lieu de series_metadata - metadata_refresh.rs: refresh_link et rematch via series_id - metadata_batch.rs: sync via series table - anilist.rs: liens par series_id au lieu de series_name - download_detection.rs: available_downloads via series_id - reading_progress.rs: mark_series_read par series_id - torrent_import.rs: import via series JOIN - search.rs, stats.rs, libraries.rs: JOINs series pour les noms - reading_status_match.rs, reading_status_push.rs: séries via JOIN Indexer (3 fichiers): - scanner.rs: get_or_create_series_id() avec cache HashMap - batch.rs: BookInsert/BookUpdate.series_id UUID au lieu de series String - job.rs: rematch_unlinked_books via series JOIN 4 nouveaux tests (SeriesItem, SeriesMetadata, UpdateSeriesResponse, BatchStructs avec series_id) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -9,7 +9,7 @@ pub struct BookUpdate {
|
||||
pub title: String,
|
||||
pub kind: String,
|
||||
pub format: String,
|
||||
pub series: Option<String>,
|
||||
pub series_id: Option<Uuid>,
|
||||
pub volume: Option<i32>,
|
||||
pub page_count: Option<i32>,
|
||||
}
|
||||
@@ -28,7 +28,7 @@ pub struct BookInsert {
|
||||
pub kind: String,
|
||||
pub format: String,
|
||||
pub title: String,
|
||||
pub series: Option<String>,
|
||||
pub series_id: Option<Uuid>,
|
||||
pub volume: Option<i32>,
|
||||
pub page_count: Option<i32>,
|
||||
pub thumbnail_path: Option<String>,
|
||||
@@ -73,7 +73,7 @@ pub async fn flush_all_batches(
|
||||
let titles: Vec<String> = books_update.iter().map(|b| b.title.clone()).collect();
|
||||
let kinds: Vec<String> = books_update.iter().map(|b| b.kind.clone()).collect();
|
||||
let formats: Vec<String> = books_update.iter().map(|b| b.format.clone()).collect();
|
||||
let series: Vec<Option<String>> = books_update.iter().map(|b| b.series.clone()).collect();
|
||||
let series_ids: Vec<Option<Uuid>> = books_update.iter().map(|b| b.series_id).collect();
|
||||
let volumes: Vec<Option<i32>> = books_update.iter().map(|b| b.volume).collect();
|
||||
let page_counts: Vec<Option<i32>> = books_update.iter().map(|b| b.page_count).collect();
|
||||
|
||||
@@ -83,13 +83,13 @@ pub async fn flush_all_batches(
|
||||
title = data.title,
|
||||
kind = data.kind,
|
||||
format = data.format,
|
||||
series = data.series,
|
||||
series_id = data.series_id,
|
||||
volume = data.volume,
|
||||
page_count = data.page_count,
|
||||
updated_at = NOW()
|
||||
FROM (
|
||||
SELECT * FROM UNNEST($1::uuid[], $2::text[], $3::text[], $4::text[], $5::text[], $6::int[], $7::int[])
|
||||
AS t(book_id, title, kind, format, series, volume, page_count)
|
||||
SELECT * FROM UNNEST($1::uuid[], $2::text[], $3::text[], $4::text[], $5::uuid[], $6::int[], $7::int[])
|
||||
AS t(book_id, title, kind, format, series_id, volume, page_count)
|
||||
) AS data
|
||||
WHERE books.id = data.book_id
|
||||
"#
|
||||
@@ -98,7 +98,7 @@ pub async fn flush_all_batches(
|
||||
.bind(&titles)
|
||||
.bind(&kinds)
|
||||
.bind(&formats)
|
||||
.bind(&series)
|
||||
.bind(&series_ids)
|
||||
.bind(&volumes)
|
||||
.bind(&page_counts)
|
||||
.execute(&mut *tx)
|
||||
@@ -150,16 +150,16 @@ pub async fn flush_all_batches(
|
||||
let kinds: Vec<String> = books_insert.iter().map(|b| b.kind.clone()).collect();
|
||||
let formats: Vec<String> = books_insert.iter().map(|b| b.format.clone()).collect();
|
||||
let titles: Vec<String> = books_insert.iter().map(|b| b.title.clone()).collect();
|
||||
let series: Vec<Option<String>> = books_insert.iter().map(|b| b.series.clone()).collect();
|
||||
let series_ids: Vec<Option<Uuid>> = books_insert.iter().map(|b| b.series_id).collect();
|
||||
let volumes: Vec<Option<i32>> = books_insert.iter().map(|b| b.volume).collect();
|
||||
let page_counts: Vec<Option<i32>> = books_insert.iter().map(|b| b.page_count).collect();
|
||||
let thumbnail_paths: Vec<Option<String>> = books_insert.iter().map(|b| b.thumbnail_path.clone()).collect();
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO books (id, library_id, kind, format, title, series, volume, page_count, thumbnail_path)
|
||||
SELECT * FROM UNNEST($1::uuid[], $2::uuid[], $3::text[], $4::text[], $5::text[], $6::text[], $7::int[], $8::int[], $9::text[])
|
||||
AS t(id, library_id, kind, format, title, series, volume, page_count, thumbnail_path)
|
||||
INSERT INTO books (id, library_id, kind, format, title, series_id, volume, page_count, thumbnail_path)
|
||||
SELECT * FROM UNNEST($1::uuid[], $2::uuid[], $3::text[], $4::text[], $5::text[], $6::uuid[], $7::int[], $8::int[], $9::text[])
|
||||
AS t(id, library_id, kind, format, title, series_id, volume, page_count, thumbnail_path)
|
||||
"#
|
||||
)
|
||||
.bind(&book_ids)
|
||||
@@ -167,7 +167,7 @@ pub async fn flush_all_batches(
|
||||
.bind(&kinds)
|
||||
.bind(&formats)
|
||||
.bind(&titles)
|
||||
.bind(&series)
|
||||
.bind(&series_ids)
|
||||
.bind(&volumes)
|
||||
.bind(&page_counts)
|
||||
.bind(&thumbnail_paths)
|
||||
|
||||
@@ -18,12 +18,13 @@ async fn rematch_unlinked_books(pool: &PgPool, library_id: Uuid) {
|
||||
FROM external_book_metadata ebm2
|
||||
JOIN external_metadata_links eml ON eml.id = ebm2.link_id
|
||||
JOIN books b ON b.library_id = eml.library_id
|
||||
AND LOWER(COALESCE(NULLIF(b.series, ''), 'unclassified')) = LOWER(eml.series_name)
|
||||
AND b.volume = ebm2.volume_number
|
||||
LEFT JOIN series s ON s.id = b.series_id
|
||||
WHERE eml.library_id = $1
|
||||
AND ebm2.book_id IS NULL
|
||||
AND ebm2.volume_number IS NOT NULL
|
||||
AND eml.status = 'approved'
|
||||
AND LOWER(COALESCE(s.name, 'unclassified')) = LOWER(eml.series_name)
|
||||
) matched
|
||||
WHERE ebm.id = matched.ebm_id
|
||||
"#,
|
||||
|
||||
@@ -28,6 +28,42 @@ pub struct JobStats {
|
||||
|
||||
const BATCH_SIZE: usize = 100;
|
||||
|
||||
/// Look up a series by name in the local cache, or INSERT INTO series ... ON CONFLICT DO NOTHING
|
||||
/// then SELECT to get the id. Updates the cache on creation.
|
||||
async fn get_or_create_series_id(
|
||||
pool: &sqlx::PgPool,
|
||||
library_id: Uuid,
|
||||
name: &str,
|
||||
cache: &mut HashMap<String, Uuid>,
|
||||
) -> Result<Uuid> {
|
||||
// Check local cache first
|
||||
if let Some(&id) = cache.get(name) {
|
||||
return Ok(id);
|
||||
}
|
||||
|
||||
// Try to insert; ON CONFLICT DO NOTHING handles races / existing rows
|
||||
sqlx::query(
|
||||
"INSERT INTO series (id, library_id, name) VALUES ($1, $2, $3) ON CONFLICT (library_id, name) DO NOTHING",
|
||||
)
|
||||
.bind(Uuid::new_v4())
|
||||
.bind(library_id)
|
||||
.bind(name)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
// Always SELECT to get the actual id (whether we just inserted or it already existed)
|
||||
let id: Uuid = sqlx::query_scalar(
|
||||
"SELECT id FROM series WHERE library_id = $1 AND name = $2",
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(name)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
cache.insert(name.to_string(), id);
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
/// Phase 1 — Discovery: walk filesystem, extract metadata from filenames only (no archive I/O).
|
||||
/// New books are inserted with page_count = NULL so the analyzer phase can fill them in.
|
||||
/// Updated books (fingerprint changed) get page_count/thumbnail reset.
|
||||
@@ -108,22 +144,31 @@ pub async fn scan_library_discovery(
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
// Track existing series names for new_series counting
|
||||
let existing_series: HashSet<String> = sqlx::query_scalar(
|
||||
"SELECT DISTINCT COALESCE(NULLIF(series, ''), 'unclassified') FROM books WHERE library_id = $1",
|
||||
// Load existing series for this library: name → id
|
||||
let series_rows = sqlx::query(
|
||||
"SELECT id, name FROM series WHERE library_id = $1",
|
||||
)
|
||||
.bind(library_id)
|
||||
.fetch_all(&state.pool)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.collect();
|
||||
.unwrap_or_default();
|
||||
let mut series_map: HashMap<String, Uuid> = series_rows
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let name: String = row.get("name");
|
||||
let id: Uuid = row.get("id");
|
||||
(name, id)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Track existing series names for new_series counting
|
||||
let existing_series: HashSet<String> = series_map.keys().cloned().collect();
|
||||
let mut seen_new_series: HashSet<String> = HashSet::new();
|
||||
|
||||
// Load series rename mapping: original filesystem name → current DB name.
|
||||
// This prevents the scanner from recreating old series after a user rename.
|
||||
let rename_rows = sqlx::query(
|
||||
"SELECT original_name, name FROM series_metadata WHERE library_id = $1 AND original_name IS NOT NULL",
|
||||
"SELECT original_name, name FROM series WHERE library_id = $1 AND original_name IS NOT NULL",
|
||||
)
|
||||
.bind(library_id)
|
||||
.fetch_all(&state.pool)
|
||||
@@ -378,12 +423,22 @@ pub async fn scan_library_discovery(
|
||||
old_fingerprint != fingerprint
|
||||
);
|
||||
|
||||
// Resolve series name → series_id
|
||||
let update_series_id = if let Some(ref series_name) = parsed.series {
|
||||
Some(
|
||||
get_or_create_series_id(&state.pool, library_id, series_name, &mut series_map)
|
||||
.await?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
books_to_update.push(BookUpdate {
|
||||
book_id,
|
||||
title: parsed.title,
|
||||
kind: utils::kind_from_format(format).to_string(),
|
||||
format: format.as_str().to_string(),
|
||||
series: parsed.series,
|
||||
series_id: update_series_id,
|
||||
volume: parsed.volume,
|
||||
// Reset page_count so analyzer re-processes this book
|
||||
page_count: None,
|
||||
@@ -439,13 +494,23 @@ pub async fn scan_library_discovery(
|
||||
stats.new_series += 1;
|
||||
}
|
||||
|
||||
// Resolve series name → series_id
|
||||
let insert_series_id = if let Some(ref series_name) = parsed.series {
|
||||
Some(
|
||||
get_or_create_series_id(&state.pool, library_id, series_name, &mut series_map)
|
||||
.await?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
books_to_insert.push(BookInsert {
|
||||
book_id,
|
||||
library_id,
|
||||
kind: utils::kind_from_format(format).to_string(),
|
||||
format: format.as_str().to_string(),
|
||||
title: parsed.title,
|
||||
series: parsed.series,
|
||||
series_id: insert_series_id,
|
||||
volume: parsed.volume,
|
||||
page_count: None,
|
||||
thumbnail_path: None,
|
||||
@@ -642,4 +707,34 @@ mod tests {
|
||||
// No existing files in DB — nothing to delete anyway
|
||||
assert!(!should_skip_deletions(true, 10, 0, 0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn batch_structs_use_series_id() {
|
||||
use crate::batch::{BookInsert, BookUpdate};
|
||||
|
||||
let series_id = Uuid::new_v4();
|
||||
let book = BookInsert {
|
||||
book_id: Uuid::new_v4(),
|
||||
library_id: Uuid::new_v4(),
|
||||
kind: "comic".to_string(),
|
||||
format: "cbz".to_string(),
|
||||
title: "Test".to_string(),
|
||||
series_id: Some(series_id),
|
||||
volume: Some(1),
|
||||
page_count: None,
|
||||
thumbnail_path: None,
|
||||
};
|
||||
assert_eq!(book.series_id, Some(series_id));
|
||||
|
||||
let update = BookUpdate {
|
||||
book_id: Uuid::new_v4(),
|
||||
title: "Test".to_string(),
|
||||
kind: "comic".to_string(),
|
||||
format: "cbz".to_string(),
|
||||
series_id: None,
|
||||
volume: None,
|
||||
page_count: None,
|
||||
};
|
||||
assert_eq!(update.series_id, None);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user