feat: bouton télécharger et remplacer + fix extraction volumes UTF-8

- Ajout d'un bouton "télécharger et remplacer" avec popup de
  confirmation, qui passe tous les volumes du pack (pas seulement
  les manquants) et replace_existing=true à l'API.
- Nouvelle colonne replace_existing dans torrent_downloads.
- Fix critique du parseur de volumes : le pass 2 mélangeait les
  indices d'octets (String::find) avec les indices de caractères
  (Vec<char>), causant un décalage quand le titre contenait des
  caractères multi-octets (é, à...). "Tome #097" extrayait 9
  au lieu de 97. Réécrit en indexation char pure.
- Le préfixe "tome" skip désormais "#" (tome #097 → 97).
- Protection intra-batch : si une destination est déjà utilisée,
  le fichier garde son nom original au lieu d'écraser.
- Alerte WARN si N fichiers source donnent N/3 volumes uniques.
- Nettoyage du répertoire sl-{id} et de la catégorie qBittorrent
  après import.
- Badges volumes en flex-wrap dans la page downloads.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-27 16:01:08 +01:00
parent 9c18802864
commit eabb88eb9d
11 changed files with 205 additions and 66 deletions

View File

@@ -49,6 +49,8 @@ pub struct AvailableReleaseDto {
pub indexer: Option<String>,
pub seeders: Option<i32>,
pub matched_missing_volumes: Vec<i32>,
#[serde(default)]
pub all_volumes: Vec<i32>,
}
// ---------------------------------------------------------------------------
@@ -714,7 +716,8 @@ async fn search_prowlarr_for_series(
.filter_map(|r| {
let title_volumes = prowlarr::extract_volumes_from_title_pub(&r.title);
let matched_vols: Vec<i32> = title_volumes
.into_iter()
.iter()
.copied()
.filter(|v| missing_volumes.contains(v))
.collect();
if matched_vols.is_empty() {
@@ -727,6 +730,7 @@ async fn search_prowlarr_for_series(
indexer: r.indexer,
seeders: r.seeders,
matched_missing_volumes: matched_vols,
all_volumes: title_volumes,
})
}
})

View File

@@ -175,26 +175,36 @@ fn extract_volumes_from_title(title: &str) -> Vec<i32> {
}
// Pass 2 — individual volumes not already captured by range expansion
let prefixes = ["tome", "vol.", "vol ", "t", "v", "#"];
// Note: work entirely with char indices (not byte offsets) to avoid
// mismatches when the title contains multi-byte UTF-8 characters.
let prefixes: &[(&[char], bool)] = &[
(&['t', 'o', 'm', 'e'], false),
(&['v', 'o', 'l', '.'], false),
(&['v', 'o', 'l', ' '], false),
(&['t'], true),
(&['v'], true),
(&['#'], false),
];
let len = chars.len();
for prefix in &prefixes {
let mut start = 0;
while let Some(pos) = lower[start..].find(prefix) {
let abs_pos = start + pos;
let after = abs_pos + prefix.len();
// For single-char prefixes (t, v), ensure it's at a word boundary
if prefix.len() == 1 && *prefix != "#" {
if abs_pos > 0 && chars[abs_pos - 1].is_alphanumeric() {
start = after;
continue;
}
for &(prefix, needs_boundary) in prefixes {
let plen = prefix.len();
let mut ci = 0usize;
while ci + plen <= len {
if chars[ci..ci + plen] != *prefix {
ci += 1;
continue;
}
// Skip optional spaces or dots after prefix
let mut i = after;
while i < len && (chars[i] == ' ' || chars[i] == '.') {
// For single-char prefixes (t, v), ensure it's at a word boundary
if needs_boundary && ci > 0 && chars[ci - 1].is_alphanumeric() {
ci += plen;
continue;
}
// Skip optional spaces, dots, or '#' after prefix
let mut i = ci + plen;
while i < len && (chars[i] == ' ' || chars[i] == '.' || chars[i] == '#') {
i += 1;
}
@@ -205,14 +215,15 @@ fn extract_volumes_from_title(title: &str) -> Vec<i32> {
}
if i > digit_start {
if let Ok(num) = lower[digit_start..i].parse::<i32>() {
let num_str: String = chars[digit_start..i].iter().collect();
if let Ok(num) = num_str.parse::<i32>() {
if !volumes.contains(&num) {
volumes.push(num);
}
}
}
start = after;
ci += plen;
}
}
@@ -535,4 +546,22 @@ mod tests {
let v = extract_volumes_from_title("tool v2.0 release");
assert!(!v.contains(&0) || v.len() == 1); // only v2 at most
}
#[test]
fn tome_hash_with_accented_chars() {
// Tome #097 with accented characters earlier in the string — the é in
// "Compressé" shifts byte offsets vs char offsets; this must not break parsing.
let v = sorted(extract_volumes_from_title(
"[Compressé] One Piece [Team Chromatique] - Tome #097 - [V2].cbz",
));
assert!(v.contains(&97), "expected 97 in {:?}", v);
}
#[test]
fn tome_hash_single_digit() {
let v = sorted(extract_volumes_from_title(
"[Compressé] One Piece [Team Chromatique] - Tome #003 (Perfect Edition).cbz",
));
assert!(v.contains(&3), "expected 3 in {:?}", v);
}
}

View File

@@ -17,6 +17,9 @@ pub struct QBittorrentAddRequest {
pub library_id: Option<Uuid>,
pub series_name: Option<String>,
pub expected_volumes: Option<Vec<i32>>,
/// When true, overwrite existing files at destination during import.
#[serde(default)]
pub replace_existing: bool,
}
#[derive(Serialize, ToSchema)]
@@ -203,14 +206,15 @@ pub async fn add_torrent(
let id = download_id.unwrap();
sqlx::query(
"INSERT INTO torrent_downloads (id, library_id, series_name, expected_volumes, qb_hash) \
VALUES ($1, $2, $3, $4, $5)",
"INSERT INTO torrent_downloads (id, library_id, series_name, expected_volumes, qb_hash, replace_existing) \
VALUES ($1, $2, $3, $4, $5, $6)",
)
.bind(id)
.bind(library_id)
.bind(series_name)
.bind(expected_volumes)
.bind(qb_hash.as_deref())
.bind(body.replace_existing)
.execute(&state.pool)
.await?;

View File

@@ -406,7 +406,7 @@ async fn is_torrent_import_enabled(pool: &PgPool) -> bool {
async fn process_torrent_import(pool: PgPool, torrent_id: Uuid) -> anyhow::Result<()> {
let row = sqlx::query(
"SELECT library_id, series_name, expected_volumes, content_path, qb_hash \
"SELECT library_id, series_name, expected_volumes, content_path, qb_hash, replace_existing \
FROM torrent_downloads WHERE id = $1",
)
.bind(torrent_id)
@@ -418,6 +418,7 @@ async fn process_torrent_import(pool: PgPool, torrent_id: Uuid) -> anyhow::Resul
let expected_volumes: Vec<i32> = row.get("expected_volumes");
let content_path: Option<String> = row.get("content_path");
let qb_hash: Option<String> = row.get("qb_hash");
let replace_existing: bool = row.get("replace_existing");
let content_path =
content_path.ok_or_else(|| anyhow::anyhow!("content_path not set on torrent_download"))?;
@@ -428,7 +429,7 @@ async fn process_torrent_import(pool: PgPool, torrent_id: Uuid) -> anyhow::Resul
.execute(&pool)
.await?;
match do_import(&pool, library_id, &series_name, &expected_volumes, &content_path).await {
match do_import(&pool, library_id, &series_name, &expected_volumes, &content_path, replace_existing).await {
Ok(imported) => {
let json = serde_json::to_value(&imported).unwrap_or(serde_json::json!([]));
sqlx::query(
@@ -526,19 +527,19 @@ async fn process_torrent_import(pool: PgPool, torrent_id: Uuid) -> anyhow::Resul
}
}
// Clean up: remove source directory if it's a subdirectory of /downloads
let physical_content = remap_downloads_path(&content_path);
// Clean up: remove the sl-{id} category directory and all its contents
let downloads_root = remap_downloads_path("/downloads");
let content_p = std::path::Path::new(&physical_content);
let category_dir = remap_downloads_path(&format!("/downloads/sl-{torrent_id}"));
let category_p = std::path::Path::new(&category_dir);
let downloads_p = std::path::Path::new(&downloads_root);
if content_p.is_dir() && content_p != downloads_p && content_p.starts_with(downloads_p) {
match std::fs::remove_dir_all(content_p) {
Ok(()) => info!("[IMPORT] Cleaned up source directory: {}", physical_content),
Err(e) => warn!("[IMPORT] Failed to clean up {}: {}", physical_content, e),
if category_p.is_dir() && category_p != downloads_p && category_p.starts_with(downloads_p) {
match std::fs::remove_dir_all(category_p) {
Ok(()) => info!("[IMPORT] Cleaned up category directory: {}", category_dir),
Err(e) => warn!("[IMPORT] Failed to clean up {}: {}", category_dir, e),
}
}
// Remove torrent from qBittorrent
// Remove torrent and category from qBittorrent
if let Some(ref hash) = qb_hash {
if let Ok((base_url, username, password)) = load_qbittorrent_config(&pool).await {
if let Ok(client) = reqwest::Client::builder().timeout(Duration::from_secs(10)).build() {
@@ -550,6 +551,15 @@ async fn process_torrent_import(pool: PgPool, torrent_id: Uuid) -> anyhow::Resul
.send()
.await;
info!("[IMPORT] Removed torrent {} from qBittorrent", hash);
// Remove the sl-{id} category
let cat = format!("sl-{torrent_id}");
let _ = client
.post(format!("{base_url}/api/v2/torrents/removeCategories"))
.header("Cookie", format!("SID={sid}"))
.form(&[("categories", cat.as_str())])
.send()
.await;
}
}
}
@@ -584,6 +594,7 @@ async fn do_import(
series_name: &str,
expected_volumes: &[i32],
content_path: &str,
replace_existing: bool,
) -> anyhow::Result<Vec<ImportedFile>> {
let physical_content = remap_downloads_path(content_path);
@@ -645,6 +656,7 @@ async fn do_import(
info!("[IMPORT] Final reference: {:?}", reference);
let mut imported = Vec::new();
let mut used_destinations: std::collections::HashSet<String> = std::collections::HashSet::new();
for source_path in collect_book_files(&physical_content)? {
let filename = std::path::Path::new(&source_path)
@@ -656,26 +668,37 @@ async fn do_import(
.and_then(|e| e.to_str())
.unwrap_or("");
let matched: Vec<i32> = extract_volumes_from_title_pub(filename)
.into_iter()
let all_extracted = extract_volumes_from_title_pub(filename);
let matched: Vec<i32> = all_extracted
.iter()
.copied()
.filter(|v| expected_set.contains(v))
.collect();
if matched.is_empty() {
info!("[IMPORT] Skipping '{}' (extracted volumes {:?}, none in expected set)", filename, all_extracted);
continue;
}
let target_filename = if matched.len() == 1 {
// Single volume: apply naming pattern from reference
let vol = matched[0];
if let Some((ref ref_path, ref_vol)) = reference {
let generated = if let Some((ref ref_path, ref_vol)) = reference {
let built = build_target_filename(ref_path, ref_vol, vol, ext);
info!("[IMPORT] build_target_filename(ref={}, ref_vol={}, new_vol={}, ext={}) => {:?}",
ref_path, ref_vol, vol, ext, built);
info!("[IMPORT] build_target_filename(ref={}, ref_vol={}, new_vol={}, ext={}) => {:?} (source='{}')",
ref_path, ref_vol, vol, ext, built, filename);
built.unwrap_or_else(|| default_filename(series_name, vol, ext))
} else {
info!("[IMPORT] No reference, using default_filename for vol {}", vol);
info!("[IMPORT] No reference, using default_filename for vol {} (source='{}')", vol, filename);
default_filename(series_name, vol, ext)
};
// If this destination was already used in this batch, keep original filename
if used_destinations.contains(&generated) {
info!("[IMPORT] Destination '{}' already used in this batch, keeping original filename '{}'", generated, filename);
filename.to_string()
} else {
generated
}
} else {
// Multi-volume pack: keep original filename (scanner handles ranges)
@@ -684,13 +707,14 @@ async fn do_import(
let dest = format!("{}/{}", target_dir, target_filename);
if std::path::Path::new(&dest).exists() {
info!("Skipping {} (already exists at destination)", dest);
if std::path::Path::new(&dest).exists() && !replace_existing {
info!("[IMPORT] Skipping '{}' → '{}' (already exists at destination)", filename, dest);
continue;
}
move_file(&source_path, &dest)?;
info!("Imported {:?} → {}", matched, dest);
used_destinations.insert(target_filename);
info!("[IMPORT] Imported '{}' [{:?}] → {}", filename, matched, dest);
imported.push(ImportedFile {
volume: *matched.iter().min().unwrap(),
@@ -699,6 +723,24 @@ async fn do_import(
});
}
// Sanity check: warn if many source files collapsed into few volumes
// (symptom of a volume extraction bug)
let source_count = collect_book_files(&physical_content).map(|f| f.len()).unwrap_or(0);
let unique_volumes: std::collections::HashSet<i32> = imported.iter().map(|f| f.volume).collect();
if source_count > 5 && unique_volumes.len() > 0 && source_count > unique_volumes.len() * 3 {
warn!(
"[IMPORT] Suspicious: {} source files mapped to only {} unique volumes ({:?}). \
Possible volume extraction issue for series '{}'",
source_count, unique_volumes.len(),
{
let mut v: Vec<i32> = unique_volumes.into_iter().collect();
v.sort();
v
},
series_name,
);
}
Ok(imported)
}