feat: add image optimization and settings page

- Add persistent disk cache for processed images
- Optimize image processing with short-circuit and quality settings
- Add WebP lossy encoding with configurable quality
- Add settings API endpoints (GET/POST /settings, cache management)
- Add database table for app configuration
- Add /settings page in backoffice for image/cache/limits config
- Add cache stats and clear functionality
- Update navigation with settings link
This commit is contained in:
2026-03-07 09:12:06 +01:00
parent 9141edfaa9
commit 292c61566c
19 changed files with 1038 additions and 66 deletions

View File

@@ -31,3 +31,4 @@ uuid.workspace = true
zip = { version = "2.2", default-features = false, features = ["deflate"] }
utoipa.workspace = true
utoipa-swagger-ui = { workspace = true, features = ["axum"] }
webp = "0.3"

View File

@@ -224,16 +224,33 @@ pub struct SeriesItem {
pub first_book_id: Uuid,
}
/// List all series in a library
#[derive(Serialize, ToSchema)]
pub struct SeriesPage {
pub items: Vec<SeriesItem>,
#[schema(value_type = Option<String>)]
pub next_cursor: Option<String>,
}
#[derive(Deserialize, ToSchema)]
pub struct ListSeriesQuery {
#[schema(value_type = Option<String>)]
pub cursor: Option<String>,
#[schema(value_type = Option<i64>, example = 50)]
pub limit: Option<i64>,
}
/// List all series in a library with pagination
#[utoipa::path(
get,
path = "/libraries/{library_id}/series",
tag = "books",
params(
("library_id" = String, Path, description = "Library UUID"),
("cursor" = Option<String>, Query, description = "Cursor for pagination (series name)"),
("limit" = Option<i64>, Query, description = "Max items to return (max 200)"),
),
responses(
(status = 200, body = Vec<SeriesItem>),
(status = 200, body = SeriesPage),
(status = 401, description = "Unauthorized"),
),
security(("Bearer" = []))
@@ -241,7 +258,10 @@ pub struct SeriesItem {
pub async fn list_series(
State(state): State<AppState>,
Path(library_id): Path<Uuid>,
) -> Result<Json<Vec<SeriesItem>>, ApiError> {
Query(query): Query<ListSeriesQuery>,
) -> Result<Json<SeriesPage>, ApiError> {
let limit = query.limit.unwrap_or(50).clamp(1, 200);
let rows = sqlx::query(
r#"
WITH sorted_books AS (
@@ -272,6 +292,7 @@ pub async fn list_series(
sb.id as first_book_id
FROM series_counts sc
JOIN sorted_books sb ON sb.name = sc.name AND sb.rn = 1
WHERE ($2::text IS NULL OR sc.name > $2)
ORDER BY
-- Natural sort: extract text part before numbers
REGEXP_REPLACE(LOWER(sc.name), '[0-9]+', '', 'g'),
@@ -281,14 +302,18 @@ pub async fn list_series(
0
),
sc.name ASC
LIMIT $3
"#,
)
.bind(library_id)
.bind(query.cursor.as_deref())
.bind(limit + 1)
.fetch_all(&state.pool)
.await?;
let series: Vec<SeriesItem> = rows
let mut items: Vec<SeriesItem> = rows
.iter()
.take(limit as usize)
.map(|row| SeriesItem {
name: row.get("name"),
book_count: row.get("book_count"),
@@ -296,5 +321,14 @@ pub async fn list_series(
})
.collect();
Ok(Json(series))
let next_cursor = if rows.len() > limit as usize {
items.last().map(|s| s.name.clone())
} else {
None
};
Ok(Json(SeriesPage {
items: std::mem::take(&mut items),
next_cursor,
}))
}

View File

@@ -6,6 +6,7 @@ mod libraries;
mod openapi;
mod pages;
mod search;
mod settings;
mod tokens;
use std::{
@@ -107,6 +108,7 @@ async fn main() -> anyhow::Result<()> {
.route("/folders", get(index_jobs::list_folders))
.route("/admin/tokens", get(tokens::list_tokens).post(tokens::create_token))
.route("/admin/tokens/:id", delete(tokens::revoke_token))
.merge(settings::settings_routes())
.route_layer(middleware::from_fn_with_state(
state.clone(),
auth::require_admin,

View File

@@ -1,6 +1,6 @@
use std::{
io::Read,
path::Path,
io::{Read, Write},
path::{Path, PathBuf},
sync::{atomic::Ordering, Arc},
time::Duration,
};
@@ -11,7 +11,7 @@ use axum::{
http::{header, HeaderMap, HeaderValue, StatusCode},
response::{IntoResponse, Response},
};
use image::{codecs::jpeg::JpegEncoder, codecs::png::PngEncoder, codecs::webp::WebPEncoder, ColorType, ImageEncoder};
use image::{codecs::jpeg::JpegEncoder, codecs::png::PngEncoder, ColorType, ImageEncoder, ImageFormat};
use serde::Deserialize;
use utoipa::ToSchema;
use sha2::{Digest, Sha256};
@@ -29,6 +29,43 @@ fn remap_libraries_path(path: &str) -> String {
path.to_string()
}
fn get_image_cache_dir() -> PathBuf {
std::env::var("IMAGE_CACHE_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| PathBuf::from("/tmp/stripstream-image-cache"))
}
fn get_cache_key(abs_path: &str, page: u32, format: &str, quality: u8, width: u32) -> String {
let mut hasher = Sha256::new();
hasher.update(abs_path.as_bytes());
hasher.update(page.to_le_bytes());
hasher.update(format.as_bytes());
hasher.update(quality.to_le_bytes());
hasher.update(width.to_le_bytes());
format!("{:x}", hasher.finalize())
}
fn get_cache_path(cache_key: &str, format: &OutputFormat) -> PathBuf {
let cache_dir = get_image_cache_dir();
let prefix = &cache_key[..2];
let ext = format.extension();
cache_dir.join(prefix).join(format!("{}.{}", cache_key, ext))
}
fn read_from_disk_cache(cache_path: &Path) -> Option<Vec<u8>> {
std::fs::read(cache_path).ok()
}
fn write_to_disk_cache(cache_path: &Path, data: &[u8]) -> Result<(), std::io::Error> {
if let Some(parent) = cache_path.parent() {
std::fs::create_dir_all(parent)?;
}
let mut file = std::fs::File::create(cache_path)?;
file.write_all(data)?;
file.sync_data()?;
Ok(())
}
#[derive(Deserialize, ToSchema)]
pub struct PageQuery {
#[schema(value_type = Option<String>, example = "webp")]
@@ -109,10 +146,11 @@ pub async fn get_page(
return Err(ApiError::bad_request("width must be <= 2160"));
}
let cache_key = format!("{book_id}:{n}:{}:{quality}:{width}", format.extension());
if let Some(cached) = state.page_cache.lock().await.get(&cache_key).cloned() {
let memory_cache_key = format!("{book_id}:{n}:{}:{quality}:{width}", format.extension());
if let Some(cached) = state.page_cache.lock().await.get(&memory_cache_key).cloned() {
state.metrics.page_cache_hits.fetch_add(1, Ordering::Relaxed);
return Ok(image_response(cached, format.content_type()));
return Ok(image_response(cached, format.content_type(), None));
}
state.metrics.page_cache_misses.fetch_add(1, Ordering::Relaxed);
@@ -131,10 +169,18 @@ pub async fn get_page(
let row = row.ok_or_else(|| ApiError::not_found("book file not found"))?;
let abs_path: String = row.get("abs_path");
// Remap /libraries to LIBRARIES_ROOT_PATH for local development
let abs_path = remap_libraries_path(&abs_path);
let input_format: String = row.get("format");
let disk_cache_key = get_cache_key(&abs_path, n, format.extension(), quality, width);
let cache_path = get_cache_path(&disk_cache_key, &format);
if let Some(cached_bytes) = read_from_disk_cache(&cache_path) {
let bytes = Arc::new(cached_bytes);
state.page_cache.lock().await.put(memory_cache_key, bytes.clone());
return Ok(image_response(bytes, format.content_type(), Some(&disk_cache_key)));
}
let _permit = state
.page_render_limit
.clone()
@@ -142,27 +188,39 @@ pub async fn get_page(
.await
.map_err(|_| ApiError::internal("render limiter unavailable"))?;
let abs_path_clone = abs_path.clone();
let format_clone = format;
let bytes = tokio::time::timeout(
Duration::from_secs(12),
tokio::task::spawn_blocking(move || render_page(&abs_path, &input_format, n, &format, quality, width)),
tokio::task::spawn_blocking(move || {
render_page(&abs_path_clone, &input_format, n, &format_clone, quality, width)
}),
)
.await
.map_err(|_| ApiError::internal("page rendering timeout"))?
.map_err(|e| ApiError::internal(format!("render task failed: {e}")))??;
let bytes = Arc::new(bytes);
state.page_cache.lock().await.put(cache_key, bytes.clone());
let _ = write_to_disk_cache(&cache_path, &bytes);
Ok(image_response(bytes, format.content_type()))
let bytes = Arc::new(bytes);
state.page_cache.lock().await.put(memory_cache_key, bytes.clone());
Ok(image_response(bytes, format.content_type(), Some(&disk_cache_key)))
}
fn image_response(bytes: Arc<Vec<u8>>, content_type: &str) -> Response {
fn image_response(bytes: Arc<Vec<u8>>, content_type: &str, etag_suffix: Option<&str>) -> Response {
let mut headers = HeaderMap::new();
headers.insert(header::CONTENT_TYPE, HeaderValue::from_str(content_type).unwrap_or(HeaderValue::from_static("application/octet-stream")));
headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("public, max-age=300"));
let mut hasher = Sha256::new();
hasher.update(&*bytes);
let etag = format!("\"{:x}\"", hasher.finalize());
headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("public, max-age=31536000, immutable"));
let etag = if let Some(suffix) = etag_suffix {
format!("\"{}\"", suffix)
} else {
let mut hasher = Sha256::new();
hasher.update(&*bytes);
format!("\"{:x}\"", hasher.finalize())
};
if let Ok(v) = HeaderValue::from_str(&etag) {
headers.insert(header::ETAG, v);
}
@@ -271,6 +329,13 @@ fn render_pdf_page(abs_path: &str, page_number: u32, width: u32) -> Result<Vec<u
}
fn transcode_image(input: &[u8], out_format: &OutputFormat, quality: u8, width: u32) -> Result<Vec<u8>, ApiError> {
let source_format = image::guess_format(input).ok();
let needs_transcode = source_format.map(|f| !format_matches(&f, out_format)).unwrap_or(true);
if width == 0 && !needs_transcode {
return Ok(input.to_vec());
}
let mut image = image::load_from_memory(input).map_err(|e| ApiError::internal(format!("invalid source image: {e}")))?;
if width > 0 {
image = image.resize(width, u32::MAX, image::imageops::FilterType::Lanczos3);
@@ -293,15 +358,27 @@ fn transcode_image(input: &[u8], out_format: &OutputFormat, quality: u8, width:
.map_err(|e| ApiError::internal(format!("png encode failed: {e}")))?;
}
OutputFormat::Webp => {
let encoder = WebPEncoder::new_lossless(&mut out);
encoder
.write_image(&rgba, w, h, ColorType::Rgba8.into())
.map_err(|e| ApiError::internal(format!("webp encode failed: {e}")))?;
let rgb_data: Vec<u8> = rgba
.pixels()
.flat_map(|p| [p[0], p[1], p[2]])
.collect();
let webp_data = webp::Encoder::new(&rgb_data, webp::PixelLayout::Rgb, w, h)
.encode(f32::max(quality as f32, 85.0));
out.extend_from_slice(&webp_data);
}
}
Ok(out)
}
fn format_matches(source: &ImageFormat, target: &OutputFormat) -> bool {
match (source, target) {
(ImageFormat::Jpeg, OutputFormat::Jpeg) => true,
(ImageFormat::Png, OutputFormat::Png) => true,
(ImageFormat::WebP, OutputFormat::Webp) => true,
_ => false,
}
}
fn is_image_name(name: &str) -> bool {
name.ends_with(".jpg")
|| name.ends_with(".jpeg")

260
apps/api/src/settings.rs Normal file
View File

@@ -0,0 +1,260 @@
use axum::{
extract::{Query, State},
response::IntoResponse,
routing::{get, post},
Json, Router,
};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use sqlx::Row;
use crate::{error::ApiError, AppState};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ImageProcessingSettings {
pub format: String,
pub quality: u8,
pub filter: String,
pub max_width: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheSettings {
pub enabled: bool,
pub directory: String,
pub max_size_mb: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LimitsSettings {
pub concurrent_renders: u8,
pub timeout_seconds: u8,
pub rate_limit_per_second: u16,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppSettings {
pub image_processing: ImageProcessingSettings,
pub cache: CacheSettings,
pub limits: LimitsSettings,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateSettingRequest {
pub value: Value,
}
pub fn settings_routes() -> Router<AppState> {
Router::new()
.route("/settings", get(get_settings))
.route("/settings/:key", get(get_setting).post(update_setting))
.route("/settings/cache/clear", post(clear_cache))
.route("/settings/cache/stats", get(get_cache_stats))
}
async fn get_settings(State(state): State<AppState>) -> Result<Json<Value>, ApiError> {
let rows = sqlx::query(r#"SELECT key, value FROM app_settings"#)
.fetch_all(&state.pool)
.await?;
let mut settings = serde_json::Map::new();
for row in rows {
let key: String = row.get("key");
let value: Value = row.get("value");
settings.insert(key, value);
}
Ok(Json(Value::Object(settings)))
}
async fn get_setting(
State(state): State<AppState>,
axum::extract::Path(key): axum::extract::Path<String>,
) -> Result<Json<Value>, ApiError> {
let row = sqlx::query(r#"SELECT value FROM app_settings WHERE key = $1"#)
.bind(&key)
.fetch_optional(&state.pool)
.await?;
match row {
Some(row) => {
let value: Value = row.get("value");
Ok(Json(value))
}
None => Err(ApiError::not_found(format!("setting '{}' not found", key))),
}
}
async fn update_setting(
State(state): State<AppState>,
axum::extract::Path(key): axum::extract::Path<String>,
Json(body): Json<UpdateSettingRequest>,
) -> Result<Json<Value>, ApiError> {
let row = sqlx::query(
r#"
INSERT INTO app_settings (key, value, updated_at)
VALUES ($1, $2, CURRENT_TIMESTAMP)
ON CONFLICT (key)
DO UPDATE SET value = $2, updated_at = CURRENT_TIMESTAMP
RETURNING value
"#,
)
.bind(&key)
.bind(&body.value)
.fetch_one(&state.pool)
.await?;
let value: Value = row.get("value");
Ok(Json(value))
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClearCacheResponse {
pub success: bool,
pub message: String,
}
async fn clear_cache(State(_state): State<AppState>) -> Result<Json<ClearCacheResponse>, ApiError> {
let cache_dir = std::env::var("IMAGE_CACHE_DIR")
.unwrap_or_else(|_| "/tmp/stripstream-image-cache".to_string());
let result = tokio::task::spawn_blocking(move || {
if std::path::Path::new(&cache_dir).exists() {
match std::fs::remove_dir_all(&cache_dir) {
Ok(_) => ClearCacheResponse {
success: true,
message: format!("Cache directory '{}' cleared successfully", cache_dir),
},
Err(e) => ClearCacheResponse {
success: false,
message: format!("Failed to clear cache: {}", e),
},
}
} else {
ClearCacheResponse {
success: true,
message: format!("Cache directory '{}' does not exist, nothing to clear", cache_dir),
}
}
})
.await
.map_err(|e| ApiError::internal(format!("cache clear failed: {}", e)))?;
Ok(Json(result))
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheStats {
pub total_size_mb: f64,
pub file_count: u64,
pub directory: String,
}
async fn get_cache_stats(State(_state): State<AppState>) -> Result<Json<CacheStats>, ApiError> {
let cache_dir = std::env::var("IMAGE_CACHE_DIR")
.unwrap_or_else(|_| "/tmp/stripstream-image-cache".to_string());
let cache_dir_clone = cache_dir.clone();
let stats = tokio::task::spawn_blocking(move || {
let path = std::path::Path::new(&cache_dir_clone);
if !path.exists() {
return CacheStats {
total_size_mb: 0.0,
file_count: 0,
directory: cache_dir_clone,
};
}
let mut total_size: u64 = 0;
let mut file_count: u64 = 0;
fn visit_dirs(
dir: &std::path::Path,
total_size: &mut u64,
file_count: &mut u64,
) -> std::io::Result<()> {
if dir.is_dir() {
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
visit_dirs(&path, total_size, file_count)?;
} else {
*total_size += entry.metadata()?.len();
*file_count += 1;
}
}
}
Ok(())
}
let _ = visit_dirs(path, &mut total_size, &mut file_count);
CacheStats {
total_size_mb: total_size as f64 / 1024.0 / 1024.0,
file_count,
directory: cache_dir_clone,
}
})
.await
.map_err(|e| ApiError::internal(format!("cache stats failed: {}", e)))?;
Ok(Json(stats))
}
pub async fn get_settings_from_db(
pool: &sqlx::PgPool,
) -> Result<AppSettings, ApiError> {
let settings = get_settings_from_db_raw(pool).await?;
let image_processing = settings
.get("image_processing")
.and_then(|v| serde_json::from_value(v.clone()).ok())
.unwrap_or_else(|| ImageProcessingSettings {
format: "webp".to_string(),
quality: 85,
filter: "lanczos3".to_string(),
max_width: 2160,
});
let cache = settings
.get("cache")
.and_then(|v| serde_json::from_value(v.clone()).ok())
.unwrap_or_else(|| CacheSettings {
enabled: true,
directory: "/tmp/stripstream-image-cache".to_string(),
max_size_mb: 10000,
});
let limits = settings
.get("limits")
.and_then(|v| serde_json::from_value(v.clone()).ok())
.unwrap_or_else(|| LimitsSettings {
concurrent_renders: 4,
timeout_seconds: 12,
rate_limit_per_second: 120,
});
Ok(AppSettings {
image_processing,
cache,
limits,
})
}
async fn get_settings_from_db_raw(
pool: &sqlx::PgPool,
) -> Result<std::collections::HashMap<String, Value>, ApiError> {
let rows = sqlx::query(r#"SELECT key, value FROM app_settings"#)
.fetch_all(pool)
.await?;
let mut settings = std::collections::HashMap::new();
for row in rows {
let key: String = row.get("key");
let value: Value = row.get("value");
settings.insert(key, value);
}
Ok(settings)
}