Compare commits
123 Commits
feat/thumb
...
8948f75d62
| Author | SHA1 | Date | |
|---|---|---|---|
| 8948f75d62 | |||
| d304877a83 | |||
| 9cec32ba3e | |||
| e8768dfad7 | |||
| cfc98819ab | |||
| bfc1c76fe2 | |||
| 39e9f35acb | |||
| 36987f59b9 | |||
| 931d0e06f4 | |||
| 741a4da878 | |||
| e28b78d0e6 | |||
| 163dc3698c | |||
| 818bd82e0f | |||
| 76c8bcbf2c | |||
| 00094b22c6 | |||
| 1e4d9acebe | |||
| b226aa3a35 | |||
| d913be9d2a | |||
| e9bb951d97 | |||
| 037ede2750 | |||
| 06a245d90a | |||
| 63d5fcaa13 | |||
| 020cb6baae | |||
| 6db8042ffe | |||
| d4f87c4044 | |||
| 055c376222 | |||
| 1cc5d049ea | |||
| b955c2697c | |||
| 9a8c1577af | |||
| 52b9b0e00e | |||
| 51ef2fa725 | |||
| 7d53babc84 | |||
| 00f4445924 | |||
| 1a91c051b5 | |||
| 48ca9d0a8b | |||
| f75d795215 | |||
| ac13f53124 | |||
| c9ccf5cd90 | |||
| a99bfb5a91 | |||
| 389d71b42f | |||
| 2985ef5561 | |||
| 4be8177683 | |||
| a675dcd2a4 | |||
| 127cd8a42c | |||
| 1b9f2d3915 | |||
| f095bf050b | |||
| b17718df9b | |||
| 5c3ddf7819 | |||
| c56d02a895 | |||
| bc98067871 | |||
| a085924f8a | |||
| 9fbdf793d0 | |||
| b14accbbe0 | |||
| 330239d2c3 | |||
| bf5a20882b | |||
| 44c6dd626a | |||
| 9153b0c750 | |||
| e18bbba4ce | |||
| 2870dd9dbc | |||
| cf2e7a0be7 | |||
| 82444cda02 | |||
| 1d25c8869f | |||
| fd277602c9 | |||
| 673777bc8d | |||
| 03af82d065 | |||
| 78e28a269d | |||
| ee05df26c4 | |||
| 96d9efdeed | |||
| 9f5183848b | |||
| 6f9dd108ef | |||
| 61bc307715 | |||
| c7f3ad981d | |||
| 0d60d46cae | |||
| 6947af10fe | |||
| fe54f55f47 | |||
| f71ca92e85 | |||
| 7cca7e40c2 | |||
| 5db2a7501b | |||
| 85e0945c9d | |||
| efc2773199 | |||
| 1d9a1c76d2 | |||
| 3e3e0154fa | |||
| e73498cc60 | |||
| 0f4025369c | |||
| 7d3670e951 | |||
| 09682f5836 | |||
| db11c62d2f | |||
| 7346f1d5b7 | |||
| 358896c7d5 | |||
| 1d10044d46 | |||
| 8d98056375 | |||
| 4aafed3d31 | |||
| 3bd2fb7c1f | |||
| 3b6cc2903d | |||
| 6abaa96fba | |||
| f2d9bedcc7 | |||
| 1c106a4ff2 | |||
| 3ab5b223a8 | |||
| 7cfb6cf001 | |||
| d2fe7f12ab | |||
| 64347edabc | |||
| 8261050943 | |||
| a2da5081ea | |||
| 648d86970f | |||
| 278f422206 | |||
| ff59ac1eff | |||
| 7eb9e2dcad | |||
| c81f7ce1b7 | |||
| 137e8ce11c | |||
| e0b80cae38 | |||
| e8bb014874 | |||
| 4c75e08056 | |||
| f1b3aec94a | |||
| 473e849dfa | |||
| cfc896e92f | |||
| 36af34443e | |||
| 85cad1a7e7 | |||
| 0f5094575a | |||
| 131c50b1a1 | |||
| 6d4c400017 | |||
| 539dc77d57 | |||
| 9c7120c3dc | |||
| b1844a4f01 |
152
.claude/commands/opsx/apply.md
Normal file
152
.claude/commands/opsx/apply.md
Normal file
@@ -0,0 +1,152 @@
|
||||
---
|
||||
name: "OPSX: Apply"
|
||||
description: Implement tasks from an OpenSpec change (Experimental)
|
||||
category: Workflow
|
||||
tags: [workflow, artifacts, experimental]
|
||||
---
|
||||
|
||||
Implement tasks from an OpenSpec change.
|
||||
|
||||
**Input**: Optionally specify a change name (e.g., `/opsx:apply add-auth`). If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **Select the change**
|
||||
|
||||
If a name is provided, use it. Otherwise:
|
||||
- Infer from conversation context if the user mentioned a change
|
||||
- Auto-select if only one active change exists
|
||||
- If ambiguous, run `openspec list --json` to get available changes and use the **AskUserQuestion tool** to let the user select
|
||||
|
||||
Always announce: "Using change: <name>" and how to override (e.g., `/opsx:apply <other>`).
|
||||
|
||||
2. **Check status to understand the schema**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used (e.g., "spec-driven")
|
||||
- Which artifact contains the tasks (typically "tasks" for spec-driven, check status for others)
|
||||
|
||||
3. **Get apply instructions**
|
||||
|
||||
```bash
|
||||
openspec instructions apply --change "<name>" --json
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Context file paths (varies by schema)
|
||||
- Progress (total, complete, remaining)
|
||||
- Task list with status
|
||||
- Dynamic instruction based on current state
|
||||
|
||||
**Handle states:**
|
||||
- If `state: "blocked"` (missing artifacts): show message, suggest using `/opsx:continue`
|
||||
- If `state: "all_done"`: congratulate, suggest archive
|
||||
- Otherwise: proceed to implementation
|
||||
|
||||
4. **Read context files**
|
||||
|
||||
Read the files listed in `contextFiles` from the apply instructions output.
|
||||
The files depend on the schema being used:
|
||||
- **spec-driven**: proposal, specs, design, tasks
|
||||
- Other schemas: follow the contextFiles from CLI output
|
||||
|
||||
5. **Show current progress**
|
||||
|
||||
Display:
|
||||
- Schema being used
|
||||
- Progress: "N/M tasks complete"
|
||||
- Remaining tasks overview
|
||||
- Dynamic instruction from CLI
|
||||
|
||||
6. **Implement tasks (loop until done or blocked)**
|
||||
|
||||
For each pending task:
|
||||
- Show which task is being worked on
|
||||
- Make the code changes required
|
||||
- Keep changes minimal and focused
|
||||
- Mark task complete in the tasks file: `- [ ]` → `- [x]`
|
||||
- Continue to next task
|
||||
|
||||
**Pause if:**
|
||||
- Task is unclear → ask for clarification
|
||||
- Implementation reveals a design issue → suggest updating artifacts
|
||||
- Error or blocker encountered → report and wait for guidance
|
||||
- User interrupts
|
||||
|
||||
7. **On completion or pause, show status**
|
||||
|
||||
Display:
|
||||
- Tasks completed this session
|
||||
- Overall progress: "N/M tasks complete"
|
||||
- If all done: suggest archive
|
||||
- If paused: explain why and wait for guidance
|
||||
|
||||
**Output During Implementation**
|
||||
|
||||
```
|
||||
## Implementing: <change-name> (schema: <schema-name>)
|
||||
|
||||
Working on task 3/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
|
||||
Working on task 4/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
```
|
||||
|
||||
**Output On Completion**
|
||||
|
||||
```
|
||||
## Implementation Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 7/7 tasks complete ✓
|
||||
|
||||
### Completed This Session
|
||||
- [x] Task 1
|
||||
- [x] Task 2
|
||||
...
|
||||
|
||||
All tasks complete! You can archive this change with `/opsx:archive`.
|
||||
```
|
||||
|
||||
**Output On Pause (Issue Encountered)**
|
||||
|
||||
```
|
||||
## Implementation Paused
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 4/7 tasks complete
|
||||
|
||||
### Issue Encountered
|
||||
<description of the issue>
|
||||
|
||||
**Options:**
|
||||
1. <option 1>
|
||||
2. <option 2>
|
||||
3. Other approach
|
||||
|
||||
What would you like to do?
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Keep going through tasks until done or blocked
|
||||
- Always read context files before starting (from the apply instructions output)
|
||||
- If task is ambiguous, pause and ask before implementing
|
||||
- If implementation reveals issues, pause and suggest artifact updates
|
||||
- Keep code changes minimal and scoped to each task
|
||||
- Update task checkbox immediately after completing each task
|
||||
- Pause on errors, blockers, or unclear requirements - don't guess
|
||||
- Use contextFiles from CLI output, don't assume specific file names
|
||||
|
||||
**Fluid Workflow Integration**
|
||||
|
||||
This skill supports the "actions on a change" model:
|
||||
|
||||
- **Can be invoked anytime**: Before all artifacts are done (if tasks exist), after partial implementation, interleaved with other actions
|
||||
- **Allows artifact updates**: If implementation reveals design issues, suggest updating artifacts - not phase-locked, work fluidly
|
||||
157
.claude/commands/opsx/archive.md
Normal file
157
.claude/commands/opsx/archive.md
Normal file
@@ -0,0 +1,157 @@
|
||||
---
|
||||
name: "OPSX: Archive"
|
||||
description: Archive a completed change in the experimental workflow
|
||||
category: Workflow
|
||||
tags: [workflow, archive, experimental]
|
||||
---
|
||||
|
||||
Archive a completed change in the experimental workflow.
|
||||
|
||||
**Input**: Optionally specify a change name after `/opsx:archive` (e.g., `/opsx:archive add-auth`). If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no change name provided, prompt for selection**
|
||||
|
||||
Run `openspec list --json` to get available changes. Use the **AskUserQuestion tool** to let the user select.
|
||||
|
||||
Show only active changes (not already archived).
|
||||
Include the schema used for each change if available.
|
||||
|
||||
**IMPORTANT**: Do NOT guess or auto-select a change. Always let the user choose.
|
||||
|
||||
2. **Check artifact completion status**
|
||||
|
||||
Run `openspec status --change "<name>" --json` to check artifact completion.
|
||||
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used
|
||||
- `artifacts`: List of artifacts with their status (`done` or other)
|
||||
|
||||
**If any artifacts are not `done`:**
|
||||
- Display warning listing incomplete artifacts
|
||||
- Prompt user for confirmation to continue
|
||||
- Proceed if user confirms
|
||||
|
||||
3. **Check task completion status**
|
||||
|
||||
Read the tasks file (typically `tasks.md`) to check for incomplete tasks.
|
||||
|
||||
Count tasks marked with `- [ ]` (incomplete) vs `- [x]` (complete).
|
||||
|
||||
**If incomplete tasks found:**
|
||||
- Display warning showing count of incomplete tasks
|
||||
- Prompt user for confirmation to continue
|
||||
- Proceed if user confirms
|
||||
|
||||
**If no tasks file exists:** Proceed without task-related warning.
|
||||
|
||||
4. **Assess delta spec sync state**
|
||||
|
||||
Check for delta specs at `openspec/changes/<name>/specs/`. If none exist, proceed without sync prompt.
|
||||
|
||||
**If delta specs exist:**
|
||||
- Compare each delta spec with its corresponding main spec at `openspec/specs/<capability>/spec.md`
|
||||
- Determine what changes would be applied (adds, modifications, removals, renames)
|
||||
- Show a combined summary before prompting
|
||||
|
||||
**Prompt options:**
|
||||
- If changes needed: "Sync now (recommended)", "Archive without syncing"
|
||||
- If already synced: "Archive now", "Sync anyway", "Cancel"
|
||||
|
||||
If user chooses sync, use Task tool (subagent_type: "general-purpose", prompt: "Use Skill tool to invoke openspec-sync-specs for change '<name>'. Delta spec analysis: <include the analyzed delta spec summary>"). Proceed to archive regardless of choice.
|
||||
|
||||
5. **Perform the archive**
|
||||
|
||||
Create the archive directory if it doesn't exist:
|
||||
```bash
|
||||
mkdir -p openspec/changes/archive
|
||||
```
|
||||
|
||||
Generate target name using current date: `YYYY-MM-DD-<change-name>`
|
||||
|
||||
**Check if target already exists:**
|
||||
- If yes: Fail with error, suggest renaming existing archive or using different date
|
||||
- If no: Move the change directory to archive
|
||||
|
||||
```bash
|
||||
mv openspec/changes/<name> openspec/changes/archive/YYYY-MM-DD-<name>
|
||||
```
|
||||
|
||||
6. **Display summary**
|
||||
|
||||
Show archive completion summary including:
|
||||
- Change name
|
||||
- Schema that was used
|
||||
- Archive location
|
||||
- Spec sync status (synced / sync skipped / no delta specs)
|
||||
- Note about any warnings (incomplete artifacts/tasks)
|
||||
|
||||
**Output On Success**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** ✓ Synced to main specs
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Output On Success (No Delta Specs)**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** No delta specs
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Output On Success With Warnings**
|
||||
|
||||
```
|
||||
## Archive Complete (with warnings)
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** Sync skipped (user chose to skip)
|
||||
|
||||
**Warnings:**
|
||||
- Archived with 2 incomplete artifacts
|
||||
- Archived with 3 incomplete tasks
|
||||
- Delta spec sync was skipped (user chose to skip)
|
||||
|
||||
Review the archive if this was not intentional.
|
||||
```
|
||||
|
||||
**Output On Error (Archive Exists)**
|
||||
|
||||
```
|
||||
## Archive Failed
|
||||
|
||||
**Change:** <change-name>
|
||||
**Target:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
|
||||
Target archive directory already exists.
|
||||
|
||||
**Options:**
|
||||
1. Rename the existing archive
|
||||
2. Delete the existing archive if it's a duplicate
|
||||
3. Wait until a different date to archive
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Always prompt for change selection if not provided
|
||||
- Use artifact graph (openspec status --json) for completion checking
|
||||
- Don't block archive on warnings - just inform and confirm
|
||||
- Preserve .openspec.yaml when moving to archive (it moves with the directory)
|
||||
- Show clear summary of what happened
|
||||
- If sync is requested, use the Skill tool to invoke `openspec-sync-specs` (agent-driven)
|
||||
- If delta specs exist, always run the sync assessment and show the combined summary before prompting
|
||||
173
.claude/commands/opsx/explore.md
Normal file
173
.claude/commands/opsx/explore.md
Normal file
@@ -0,0 +1,173 @@
|
||||
---
|
||||
name: "OPSX: Explore"
|
||||
description: "Enter explore mode - think through ideas, investigate problems, clarify requirements"
|
||||
category: Workflow
|
||||
tags: [workflow, explore, experimental, thinking]
|
||||
---
|
||||
|
||||
Enter explore mode. Think deeply. Visualize freely. Follow the conversation wherever it goes.
|
||||
|
||||
**IMPORTANT: Explore mode is for thinking, not implementing.** You may read files, search code, and investigate the codebase, but you must NEVER write code or implement features. If the user asks you to implement something, remind them to exit explore mode first and create a change proposal. You MAY create OpenSpec artifacts (proposals, designs, specs) if the user asks—that's capturing thinking, not implementing.
|
||||
|
||||
**This is a stance, not a workflow.** There are no fixed steps, no required sequence, no mandatory outputs. You're a thinking partner helping the user explore.
|
||||
|
||||
**Input**: The argument after `/opsx:explore` is whatever the user wants to think about. Could be:
|
||||
- A vague idea: "real-time collaboration"
|
||||
- A specific problem: "the auth system is getting unwieldy"
|
||||
- A change name: "add-dark-mode" (to explore in context of that change)
|
||||
- A comparison: "postgres vs sqlite for this"
|
||||
- Nothing (just enter explore mode)
|
||||
|
||||
---
|
||||
|
||||
## The Stance
|
||||
|
||||
- **Curious, not prescriptive** - Ask questions that emerge naturally, don't follow a script
|
||||
- **Open threads, not interrogations** - Surface multiple interesting directions and let the user follow what resonates. Don't funnel them through a single path of questions.
|
||||
- **Visual** - Use ASCII diagrams liberally when they'd help clarify thinking
|
||||
- **Adaptive** - Follow interesting threads, pivot when new information emerges
|
||||
- **Patient** - Don't rush to conclusions, let the shape of the problem emerge
|
||||
- **Grounded** - Explore the actual codebase when relevant, don't just theorize
|
||||
|
||||
---
|
||||
|
||||
## What You Might Do
|
||||
|
||||
Depending on what the user brings, you might:
|
||||
|
||||
**Explore the problem space**
|
||||
- Ask clarifying questions that emerge from what they said
|
||||
- Challenge assumptions
|
||||
- Reframe the problem
|
||||
- Find analogies
|
||||
|
||||
**Investigate the codebase**
|
||||
- Map existing architecture relevant to the discussion
|
||||
- Find integration points
|
||||
- Identify patterns already in use
|
||||
- Surface hidden complexity
|
||||
|
||||
**Compare options**
|
||||
- Brainstorm multiple approaches
|
||||
- Build comparison tables
|
||||
- Sketch tradeoffs
|
||||
- Recommend a path (if asked)
|
||||
|
||||
**Visualize**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Use ASCII diagrams liberally │
|
||||
├─────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌────────┐ ┌────────┐ │
|
||||
│ │ State │────────▶│ State │ │
|
||||
│ │ A │ │ B │ │
|
||||
│ └────────┘ └────────┘ │
|
||||
│ │
|
||||
│ System diagrams, state machines, │
|
||||
│ data flows, architecture sketches, │
|
||||
│ dependency graphs, comparison tables │
|
||||
│ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Surface risks and unknowns**
|
||||
- Identify what could go wrong
|
||||
- Find gaps in understanding
|
||||
- Suggest spikes or investigations
|
||||
|
||||
---
|
||||
|
||||
## OpenSpec Awareness
|
||||
|
||||
You have full context of the OpenSpec system. Use it naturally, don't force it.
|
||||
|
||||
### Check for context
|
||||
|
||||
At the start, quickly check what exists:
|
||||
```bash
|
||||
openspec list --json
|
||||
```
|
||||
|
||||
This tells you:
|
||||
- If there are active changes
|
||||
- Their names, schemas, and status
|
||||
- What the user might be working on
|
||||
|
||||
If the user mentioned a specific change name, read its artifacts for context.
|
||||
|
||||
### When no change exists
|
||||
|
||||
Think freely. When insights crystallize, you might offer:
|
||||
|
||||
- "This feels solid enough to start a change. Want me to create a proposal?"
|
||||
- Or keep exploring - no pressure to formalize
|
||||
|
||||
### When a change exists
|
||||
|
||||
If the user mentions a change or you detect one is relevant:
|
||||
|
||||
1. **Read existing artifacts for context**
|
||||
- `openspec/changes/<name>/proposal.md`
|
||||
- `openspec/changes/<name>/design.md`
|
||||
- `openspec/changes/<name>/tasks.md`
|
||||
- etc.
|
||||
|
||||
2. **Reference them naturally in conversation**
|
||||
- "Your design mentions using Redis, but we just realized SQLite fits better..."
|
||||
- "The proposal scopes this to premium users, but we're now thinking everyone..."
|
||||
|
||||
3. **Offer to capture when decisions are made**
|
||||
|
||||
| Insight Type | Where to Capture |
|
||||
|--------------|------------------|
|
||||
| New requirement discovered | `specs/<capability>/spec.md` |
|
||||
| Requirement changed | `specs/<capability>/spec.md` |
|
||||
| Design decision made | `design.md` |
|
||||
| Scope changed | `proposal.md` |
|
||||
| New work identified | `tasks.md` |
|
||||
| Assumption invalidated | Relevant artifact |
|
||||
|
||||
Example offers:
|
||||
- "That's a design decision. Capture it in design.md?"
|
||||
- "This is a new requirement. Add it to specs?"
|
||||
- "This changes scope. Update the proposal?"
|
||||
|
||||
4. **The user decides** - Offer and move on. Don't pressure. Don't auto-capture.
|
||||
|
||||
---
|
||||
|
||||
## What You Don't Have To Do
|
||||
|
||||
- Follow a script
|
||||
- Ask the same questions every time
|
||||
- Produce a specific artifact
|
||||
- Reach a conclusion
|
||||
- Stay on topic if a tangent is valuable
|
||||
- Be brief (this is thinking time)
|
||||
|
||||
---
|
||||
|
||||
## Ending Discovery
|
||||
|
||||
There's no required ending. Discovery might:
|
||||
|
||||
- **Flow into a proposal**: "Ready to start? I can create a change proposal."
|
||||
- **Result in artifact updates**: "Updated design.md with these decisions"
|
||||
- **Just provide clarity**: User has what they need, moves on
|
||||
- **Continue later**: "We can pick this up anytime"
|
||||
|
||||
When things crystallize, you might offer a summary - but it's optional. Sometimes the thinking IS the value.
|
||||
|
||||
---
|
||||
|
||||
## Guardrails
|
||||
|
||||
- **Don't implement** - Never write code or implement features. Creating OpenSpec artifacts is fine, writing application code is not.
|
||||
- **Don't fake understanding** - If something is unclear, dig deeper
|
||||
- **Don't rush** - Discovery is thinking time, not task time
|
||||
- **Don't force structure** - Let patterns emerge naturally
|
||||
- **Don't auto-capture** - Offer to save insights, don't just do it
|
||||
- **Do visualize** - A good diagram is worth many paragraphs
|
||||
- **Do explore the codebase** - Ground discussions in reality
|
||||
- **Do question assumptions** - Including the user's and your own
|
||||
106
.claude/commands/opsx/propose.md
Normal file
106
.claude/commands/opsx/propose.md
Normal file
@@ -0,0 +1,106 @@
|
||||
---
|
||||
name: "OPSX: Propose"
|
||||
description: Propose a new change - create it and generate all artifacts in one step
|
||||
category: Workflow
|
||||
tags: [workflow, artifacts, experimental]
|
||||
---
|
||||
|
||||
Propose a new change - create the change and generate all artifacts in one step.
|
||||
|
||||
I'll create a change with artifacts:
|
||||
- proposal.md (what & why)
|
||||
- design.md (how)
|
||||
- tasks.md (implementation steps)
|
||||
|
||||
When ready to implement, run /opsx:apply
|
||||
|
||||
---
|
||||
|
||||
**Input**: The argument after `/opsx:propose` is the change name (kebab-case), OR a description of what the user wants to build.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no input provided, ask what they want to build**
|
||||
|
||||
Use the **AskUserQuestion tool** (open-ended, no preset options) to ask:
|
||||
> "What change do you want to work on? Describe what you want to build or fix."
|
||||
|
||||
From their description, derive a kebab-case name (e.g., "add user authentication" → `add-user-auth`).
|
||||
|
||||
**IMPORTANT**: Do NOT proceed without understanding what the user wants to build.
|
||||
|
||||
2. **Create the change directory**
|
||||
```bash
|
||||
openspec new change "<name>"
|
||||
```
|
||||
This creates a scaffolded change at `openspec/changes/<name>/` with `.openspec.yaml`.
|
||||
|
||||
3. **Get the artifact build order**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to get:
|
||||
- `applyRequires`: array of artifact IDs needed before implementation (e.g., `["tasks"]`)
|
||||
- `artifacts`: list of all artifacts with their status and dependencies
|
||||
|
||||
4. **Create artifacts in sequence until apply-ready**
|
||||
|
||||
Use the **TodoWrite tool** to track progress through the artifacts.
|
||||
|
||||
Loop through artifacts in dependency order (artifacts with no pending dependencies first):
|
||||
|
||||
a. **For each artifact that is `ready` (dependencies satisfied)**:
|
||||
- Get instructions:
|
||||
```bash
|
||||
openspec instructions <artifact-id> --change "<name>" --json
|
||||
```
|
||||
- The instructions JSON includes:
|
||||
- `context`: Project background (constraints for you - do NOT include in output)
|
||||
- `rules`: Artifact-specific rules (constraints for you - do NOT include in output)
|
||||
- `template`: The structure to use for your output file
|
||||
- `instruction`: Schema-specific guidance for this artifact type
|
||||
- `outputPath`: Where to write the artifact
|
||||
- `dependencies`: Completed artifacts to read for context
|
||||
- Read any completed dependency files for context
|
||||
- Create the artifact file using `template` as the structure
|
||||
- Apply `context` and `rules` as constraints - but do NOT copy them into the file
|
||||
- Show brief progress: "Created <artifact-id>"
|
||||
|
||||
b. **Continue until all `applyRequires` artifacts are complete**
|
||||
- After creating each artifact, re-run `openspec status --change "<name>" --json`
|
||||
- Check if every artifact ID in `applyRequires` has `status: "done"` in the artifacts array
|
||||
- Stop when all `applyRequires` artifacts are done
|
||||
|
||||
c. **If an artifact requires user input** (unclear context):
|
||||
- Use **AskUserQuestion tool** to clarify
|
||||
- Then continue with creation
|
||||
|
||||
5. **Show final status**
|
||||
```bash
|
||||
openspec status --change "<name>"
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
After completing all artifacts, summarize:
|
||||
- Change name and location
|
||||
- List of artifacts created with brief descriptions
|
||||
- What's ready: "All artifacts created! Ready for implementation."
|
||||
- Prompt: "Run `/opsx:apply` to start implementing."
|
||||
|
||||
**Artifact Creation Guidelines**
|
||||
|
||||
- Follow the `instruction` field from `openspec instructions` for each artifact type
|
||||
- The schema defines what each artifact should contain - follow it
|
||||
- Read dependency artifacts for context before creating new ones
|
||||
- Use `template` as the structure for your output file - fill in its sections
|
||||
- **IMPORTANT**: `context` and `rules` are constraints for YOU, not content for the file
|
||||
- Do NOT copy `<context>`, `<rules>`, `<project_context>` blocks into the artifact
|
||||
- These guide what you write, but should never appear in the output
|
||||
|
||||
**Guardrails**
|
||||
- Create ALL artifacts needed for implementation (as defined by schema's `apply.requires`)
|
||||
- Always read dependency artifacts before creating a new one
|
||||
- If context is critically unclear, ask the user - but prefer making reasonable decisions to keep momentum
|
||||
- If a change with that name already exists, ask if user wants to continue it or create a new one
|
||||
- Verify each artifact file exists after writing before proceeding to next
|
||||
156
.claude/skills/openspec-apply-change/SKILL.md
Normal file
156
.claude/skills/openspec-apply-change/SKILL.md
Normal file
@@ -0,0 +1,156 @@
|
||||
---
|
||||
name: openspec-apply-change
|
||||
description: Implement tasks from an OpenSpec change. Use when the user wants to start implementing, continue implementation, or work through tasks.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Implement tasks from an OpenSpec change.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **Select the change**
|
||||
|
||||
If a name is provided, use it. Otherwise:
|
||||
- Infer from conversation context if the user mentioned a change
|
||||
- Auto-select if only one active change exists
|
||||
- If ambiguous, run `openspec list --json` to get available changes and use the **AskUserQuestion tool** to let the user select
|
||||
|
||||
Always announce: "Using change: <name>" and how to override (e.g., `/opsx:apply <other>`).
|
||||
|
||||
2. **Check status to understand the schema**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used (e.g., "spec-driven")
|
||||
- Which artifact contains the tasks (typically "tasks" for spec-driven, check status for others)
|
||||
|
||||
3. **Get apply instructions**
|
||||
|
||||
```bash
|
||||
openspec instructions apply --change "<name>" --json
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Context file paths (varies by schema - could be proposal/specs/design/tasks or spec/tests/implementation/docs)
|
||||
- Progress (total, complete, remaining)
|
||||
- Task list with status
|
||||
- Dynamic instruction based on current state
|
||||
|
||||
**Handle states:**
|
||||
- If `state: "blocked"` (missing artifacts): show message, suggest using openspec-continue-change
|
||||
- If `state: "all_done"`: congratulate, suggest archive
|
||||
- Otherwise: proceed to implementation
|
||||
|
||||
4. **Read context files**
|
||||
|
||||
Read the files listed in `contextFiles` from the apply instructions output.
|
||||
The files depend on the schema being used:
|
||||
- **spec-driven**: proposal, specs, design, tasks
|
||||
- Other schemas: follow the contextFiles from CLI output
|
||||
|
||||
5. **Show current progress**
|
||||
|
||||
Display:
|
||||
- Schema being used
|
||||
- Progress: "N/M tasks complete"
|
||||
- Remaining tasks overview
|
||||
- Dynamic instruction from CLI
|
||||
|
||||
6. **Implement tasks (loop until done or blocked)**
|
||||
|
||||
For each pending task:
|
||||
- Show which task is being worked on
|
||||
- Make the code changes required
|
||||
- Keep changes minimal and focused
|
||||
- Mark task complete in the tasks file: `- [ ]` → `- [x]`
|
||||
- Continue to next task
|
||||
|
||||
**Pause if:**
|
||||
- Task is unclear → ask for clarification
|
||||
- Implementation reveals a design issue → suggest updating artifacts
|
||||
- Error or blocker encountered → report and wait for guidance
|
||||
- User interrupts
|
||||
|
||||
7. **On completion or pause, show status**
|
||||
|
||||
Display:
|
||||
- Tasks completed this session
|
||||
- Overall progress: "N/M tasks complete"
|
||||
- If all done: suggest archive
|
||||
- If paused: explain why and wait for guidance
|
||||
|
||||
**Output During Implementation**
|
||||
|
||||
```
|
||||
## Implementing: <change-name> (schema: <schema-name>)
|
||||
|
||||
Working on task 3/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
|
||||
Working on task 4/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
```
|
||||
|
||||
**Output On Completion**
|
||||
|
||||
```
|
||||
## Implementation Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 7/7 tasks complete ✓
|
||||
|
||||
### Completed This Session
|
||||
- [x] Task 1
|
||||
- [x] Task 2
|
||||
...
|
||||
|
||||
All tasks complete! Ready to archive this change.
|
||||
```
|
||||
|
||||
**Output On Pause (Issue Encountered)**
|
||||
|
||||
```
|
||||
## Implementation Paused
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 4/7 tasks complete
|
||||
|
||||
### Issue Encountered
|
||||
<description of the issue>
|
||||
|
||||
**Options:**
|
||||
1. <option 1>
|
||||
2. <option 2>
|
||||
3. Other approach
|
||||
|
||||
What would you like to do?
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Keep going through tasks until done or blocked
|
||||
- Always read context files before starting (from the apply instructions output)
|
||||
- If task is ambiguous, pause and ask before implementing
|
||||
- If implementation reveals issues, pause and suggest artifact updates
|
||||
- Keep code changes minimal and scoped to each task
|
||||
- Update task checkbox immediately after completing each task
|
||||
- Pause on errors, blockers, or unclear requirements - don't guess
|
||||
- Use contextFiles from CLI output, don't assume specific file names
|
||||
|
||||
**Fluid Workflow Integration**
|
||||
|
||||
This skill supports the "actions on a change" model:
|
||||
|
||||
- **Can be invoked anytime**: Before all artifacts are done (if tasks exist), after partial implementation, interleaved with other actions
|
||||
- **Allows artifact updates**: If implementation reveals design issues, suggest updating artifacts - not phase-locked, work fluidly
|
||||
114
.claude/skills/openspec-archive-change/SKILL.md
Normal file
114
.claude/skills/openspec-archive-change/SKILL.md
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
name: openspec-archive-change
|
||||
description: Archive a completed change in the experimental workflow. Use when the user wants to finalize and archive a change after implementation is complete.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Archive a completed change in the experimental workflow.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no change name provided, prompt for selection**
|
||||
|
||||
Run `openspec list --json` to get available changes. Use the **AskUserQuestion tool** to let the user select.
|
||||
|
||||
Show only active changes (not already archived).
|
||||
Include the schema used for each change if available.
|
||||
|
||||
**IMPORTANT**: Do NOT guess or auto-select a change. Always let the user choose.
|
||||
|
||||
2. **Check artifact completion status**
|
||||
|
||||
Run `openspec status --change "<name>" --json` to check artifact completion.
|
||||
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used
|
||||
- `artifacts`: List of artifacts with their status (`done` or other)
|
||||
|
||||
**If any artifacts are not `done`:**
|
||||
- Display warning listing incomplete artifacts
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
3. **Check task completion status**
|
||||
|
||||
Read the tasks file (typically `tasks.md`) to check for incomplete tasks.
|
||||
|
||||
Count tasks marked with `- [ ]` (incomplete) vs `- [x]` (complete).
|
||||
|
||||
**If incomplete tasks found:**
|
||||
- Display warning showing count of incomplete tasks
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
**If no tasks file exists:** Proceed without task-related warning.
|
||||
|
||||
4. **Assess delta spec sync state**
|
||||
|
||||
Check for delta specs at `openspec/changes/<name>/specs/`. If none exist, proceed without sync prompt.
|
||||
|
||||
**If delta specs exist:**
|
||||
- Compare each delta spec with its corresponding main spec at `openspec/specs/<capability>/spec.md`
|
||||
- Determine what changes would be applied (adds, modifications, removals, renames)
|
||||
- Show a combined summary before prompting
|
||||
|
||||
**Prompt options:**
|
||||
- If changes needed: "Sync now (recommended)", "Archive without syncing"
|
||||
- If already synced: "Archive now", "Sync anyway", "Cancel"
|
||||
|
||||
If user chooses sync, use Task tool (subagent_type: "general-purpose", prompt: "Use Skill tool to invoke openspec-sync-specs for change '<name>'. Delta spec analysis: <include the analyzed delta spec summary>"). Proceed to archive regardless of choice.
|
||||
|
||||
5. **Perform the archive**
|
||||
|
||||
Create the archive directory if it doesn't exist:
|
||||
```bash
|
||||
mkdir -p openspec/changes/archive
|
||||
```
|
||||
|
||||
Generate target name using current date: `YYYY-MM-DD-<change-name>`
|
||||
|
||||
**Check if target already exists:**
|
||||
- If yes: Fail with error, suggest renaming existing archive or using different date
|
||||
- If no: Move the change directory to archive
|
||||
|
||||
```bash
|
||||
mv openspec/changes/<name> openspec/changes/archive/YYYY-MM-DD-<name>
|
||||
```
|
||||
|
||||
6. **Display summary**
|
||||
|
||||
Show archive completion summary including:
|
||||
- Change name
|
||||
- Schema that was used
|
||||
- Archive location
|
||||
- Whether specs were synced (if applicable)
|
||||
- Note about any warnings (incomplete artifacts/tasks)
|
||||
|
||||
**Output On Success**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** ✓ Synced to main specs (or "No delta specs" or "Sync skipped")
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Always prompt for change selection if not provided
|
||||
- Use artifact graph (openspec status --json) for completion checking
|
||||
- Don't block archive on warnings - just inform and confirm
|
||||
- Preserve .openspec.yaml when moving to archive (it moves with the directory)
|
||||
- Show clear summary of what happened
|
||||
- If sync is requested, use openspec-sync-specs approach (agent-driven)
|
||||
- If delta specs exist, always run the sync assessment and show the combined summary before prompting
|
||||
288
.claude/skills/openspec-explore/SKILL.md
Normal file
288
.claude/skills/openspec-explore/SKILL.md
Normal file
@@ -0,0 +1,288 @@
|
||||
---
|
||||
name: openspec-explore
|
||||
description: Enter explore mode - a thinking partner for exploring ideas, investigating problems, and clarifying requirements. Use when the user wants to think through something before or during a change.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Enter explore mode. Think deeply. Visualize freely. Follow the conversation wherever it goes.
|
||||
|
||||
**IMPORTANT: Explore mode is for thinking, not implementing.** You may read files, search code, and investigate the codebase, but you must NEVER write code or implement features. If the user asks you to implement something, remind them to exit explore mode first and create a change proposal. You MAY create OpenSpec artifacts (proposals, designs, specs) if the user asks—that's capturing thinking, not implementing.
|
||||
|
||||
**This is a stance, not a workflow.** There are no fixed steps, no required sequence, no mandatory outputs. You're a thinking partner helping the user explore.
|
||||
|
||||
---
|
||||
|
||||
## The Stance
|
||||
|
||||
- **Curious, not prescriptive** - Ask questions that emerge naturally, don't follow a script
|
||||
- **Open threads, not interrogations** - Surface multiple interesting directions and let the user follow what resonates. Don't funnel them through a single path of questions.
|
||||
- **Visual** - Use ASCII diagrams liberally when they'd help clarify thinking
|
||||
- **Adaptive** - Follow interesting threads, pivot when new information emerges
|
||||
- **Patient** - Don't rush to conclusions, let the shape of the problem emerge
|
||||
- **Grounded** - Explore the actual codebase when relevant, don't just theorize
|
||||
|
||||
---
|
||||
|
||||
## What You Might Do
|
||||
|
||||
Depending on what the user brings, you might:
|
||||
|
||||
**Explore the problem space**
|
||||
- Ask clarifying questions that emerge from what they said
|
||||
- Challenge assumptions
|
||||
- Reframe the problem
|
||||
- Find analogies
|
||||
|
||||
**Investigate the codebase**
|
||||
- Map existing architecture relevant to the discussion
|
||||
- Find integration points
|
||||
- Identify patterns already in use
|
||||
- Surface hidden complexity
|
||||
|
||||
**Compare options**
|
||||
- Brainstorm multiple approaches
|
||||
- Build comparison tables
|
||||
- Sketch tradeoffs
|
||||
- Recommend a path (if asked)
|
||||
|
||||
**Visualize**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Use ASCII diagrams liberally │
|
||||
├─────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌────────┐ ┌────────┐ │
|
||||
│ │ State │────────▶│ State │ │
|
||||
│ │ A │ │ B │ │
|
||||
│ └────────┘ └────────┘ │
|
||||
│ │
|
||||
│ System diagrams, state machines, │
|
||||
│ data flows, architecture sketches, │
|
||||
│ dependency graphs, comparison tables │
|
||||
│ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Surface risks and unknowns**
|
||||
- Identify what could go wrong
|
||||
- Find gaps in understanding
|
||||
- Suggest spikes or investigations
|
||||
|
||||
---
|
||||
|
||||
## OpenSpec Awareness
|
||||
|
||||
You have full context of the OpenSpec system. Use it naturally, don't force it.
|
||||
|
||||
### Check for context
|
||||
|
||||
At the start, quickly check what exists:
|
||||
```bash
|
||||
openspec list --json
|
||||
```
|
||||
|
||||
This tells you:
|
||||
- If there are active changes
|
||||
- Their names, schemas, and status
|
||||
- What the user might be working on
|
||||
|
||||
### When no change exists
|
||||
|
||||
Think freely. When insights crystallize, you might offer:
|
||||
|
||||
- "This feels solid enough to start a change. Want me to create a proposal?"
|
||||
- Or keep exploring - no pressure to formalize
|
||||
|
||||
### When a change exists
|
||||
|
||||
If the user mentions a change or you detect one is relevant:
|
||||
|
||||
1. **Read existing artifacts for context**
|
||||
- `openspec/changes/<name>/proposal.md`
|
||||
- `openspec/changes/<name>/design.md`
|
||||
- `openspec/changes/<name>/tasks.md`
|
||||
- etc.
|
||||
|
||||
2. **Reference them naturally in conversation**
|
||||
- "Your design mentions using Redis, but we just realized SQLite fits better..."
|
||||
- "The proposal scopes this to premium users, but we're now thinking everyone..."
|
||||
|
||||
3. **Offer to capture when decisions are made**
|
||||
|
||||
| Insight Type | Where to Capture |
|
||||
|--------------|------------------|
|
||||
| New requirement discovered | `specs/<capability>/spec.md` |
|
||||
| Requirement changed | `specs/<capability>/spec.md` |
|
||||
| Design decision made | `design.md` |
|
||||
| Scope changed | `proposal.md` |
|
||||
| New work identified | `tasks.md` |
|
||||
| Assumption invalidated | Relevant artifact |
|
||||
|
||||
Example offers:
|
||||
- "That's a design decision. Capture it in design.md?"
|
||||
- "This is a new requirement. Add it to specs?"
|
||||
- "This changes scope. Update the proposal?"
|
||||
|
||||
4. **The user decides** - Offer and move on. Don't pressure. Don't auto-capture.
|
||||
|
||||
---
|
||||
|
||||
## What You Don't Have To Do
|
||||
|
||||
- Follow a script
|
||||
- Ask the same questions every time
|
||||
- Produce a specific artifact
|
||||
- Reach a conclusion
|
||||
- Stay on topic if a tangent is valuable
|
||||
- Be brief (this is thinking time)
|
||||
|
||||
---
|
||||
|
||||
## Handling Different Entry Points
|
||||
|
||||
**User brings a vague idea:**
|
||||
```
|
||||
User: I'm thinking about adding real-time collaboration
|
||||
|
||||
You: Real-time collab is a big space. Let me think about this...
|
||||
|
||||
COLLABORATION SPECTRUM
|
||||
════════════════════════════════════════════
|
||||
|
||||
Awareness Coordination Sync
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌────────┐ ┌────────┐ ┌────────┐
|
||||
│Presence│ │Cursors │ │ CRDT │
|
||||
│ "3 │ │ Multi │ │Conflict│
|
||||
│online" │ │ select │ │ free │
|
||||
└────────┘ └────────┘ └────────┘
|
||||
│ │ │
|
||||
trivial moderate complex
|
||||
|
||||
Where's your head at?
|
||||
```
|
||||
|
||||
**User brings a specific problem:**
|
||||
```
|
||||
User: The auth system is a mess
|
||||
|
||||
You: [reads codebase]
|
||||
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ CURRENT AUTH FLOW │
|
||||
└─────────────────────────────────────────────┘
|
||||
│
|
||||
┌─────────────┼─────────────┐
|
||||
▼ ▼ ▼
|
||||
┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ Google │ │ GitHub │ │ Email │
|
||||
│ OAuth │ │ OAuth │ │ Magic │
|
||||
└────┬────┘ └────┬────┘ └────┬────┘
|
||||
│ │ │
|
||||
└─────────────┼─────────────┘
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Session │
|
||||
└─────┬─────┘
|
||||
│
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Perms │
|
||||
└───────────┘
|
||||
|
||||
I see three tangles. Which one's burning?
|
||||
```
|
||||
|
||||
**User is stuck mid-implementation:**
|
||||
```
|
||||
User: /opsx:explore add-auth-system
|
||||
The OAuth integration is more complex than expected
|
||||
|
||||
You: [reads change artifacts]
|
||||
|
||||
You're on task 4: "Implement OAuth flow"
|
||||
|
||||
Let me trace what's involved...
|
||||
|
||||
[draws diagram, explores options, suggests paths]
|
||||
|
||||
Want to update the design to reflect this?
|
||||
Or add a spike task to investigate?
|
||||
```
|
||||
|
||||
**User wants to compare options:**
|
||||
```
|
||||
User: Should we use Postgres or SQLite?
|
||||
|
||||
You: Generic answer is boring. What's the context?
|
||||
|
||||
User: A CLI tool that tracks local dev environments
|
||||
|
||||
You: That changes everything.
|
||||
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ CLI TOOL DATA STORAGE │
|
||||
└─────────────────────────────────────────────────┘
|
||||
|
||||
Key constraints:
|
||||
• No daemon running
|
||||
• Must work offline
|
||||
• Single user
|
||||
|
||||
SQLite Postgres
|
||||
Deployment embedded ✓ needs server ✗
|
||||
Offline yes ✓ no ✗
|
||||
Single file yes ✓ no ✗
|
||||
|
||||
SQLite. Not even close.
|
||||
|
||||
Unless... is there a sync component?
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Ending Discovery
|
||||
|
||||
There's no required ending. Discovery might:
|
||||
|
||||
- **Flow into a proposal**: "Ready to start? I can create a change proposal."
|
||||
- **Result in artifact updates**: "Updated design.md with these decisions"
|
||||
- **Just provide clarity**: User has what they need, moves on
|
||||
- **Continue later**: "We can pick this up anytime"
|
||||
|
||||
When it feels like things are crystallizing, you might summarize:
|
||||
|
||||
```
|
||||
## What We Figured Out
|
||||
|
||||
**The problem**: [crystallized understanding]
|
||||
|
||||
**The approach**: [if one emerged]
|
||||
|
||||
**Open questions**: [if any remain]
|
||||
|
||||
**Next steps** (if ready):
|
||||
- Create a change proposal
|
||||
- Keep exploring: just keep talking
|
||||
```
|
||||
|
||||
But this summary is optional. Sometimes the thinking IS the value.
|
||||
|
||||
---
|
||||
|
||||
## Guardrails
|
||||
|
||||
- **Don't implement** - Never write code or implement features. Creating OpenSpec artifacts is fine, writing application code is not.
|
||||
- **Don't fake understanding** - If something is unclear, dig deeper
|
||||
- **Don't rush** - Discovery is thinking time, not task time
|
||||
- **Don't force structure** - Let patterns emerge naturally
|
||||
- **Don't auto-capture** - Offer to save insights, don't just do it
|
||||
- **Do visualize** - A good diagram is worth many paragraphs
|
||||
- **Do explore the codebase** - Ground discussions in reality
|
||||
- **Do question assumptions** - Including the user's and your own
|
||||
110
.claude/skills/openspec-propose/SKILL.md
Normal file
110
.claude/skills/openspec-propose/SKILL.md
Normal file
@@ -0,0 +1,110 @@
|
||||
---
|
||||
name: openspec-propose
|
||||
description: Propose a new change with all artifacts generated in one step. Use when the user wants to quickly describe what they want to build and get a complete proposal with design, specs, and tasks ready for implementation.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Propose a new change - create the change and generate all artifacts in one step.
|
||||
|
||||
I'll create a change with artifacts:
|
||||
- proposal.md (what & why)
|
||||
- design.md (how)
|
||||
- tasks.md (implementation steps)
|
||||
|
||||
When ready to implement, run /opsx:apply
|
||||
|
||||
---
|
||||
|
||||
**Input**: The user's request should include a change name (kebab-case) OR a description of what they want to build.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no clear input provided, ask what they want to build**
|
||||
|
||||
Use the **AskUserQuestion tool** (open-ended, no preset options) to ask:
|
||||
> "What change do you want to work on? Describe what you want to build or fix."
|
||||
|
||||
From their description, derive a kebab-case name (e.g., "add user authentication" → `add-user-auth`).
|
||||
|
||||
**IMPORTANT**: Do NOT proceed without understanding what the user wants to build.
|
||||
|
||||
2. **Create the change directory**
|
||||
```bash
|
||||
openspec new change "<name>"
|
||||
```
|
||||
This creates a scaffolded change at `openspec/changes/<name>/` with `.openspec.yaml`.
|
||||
|
||||
3. **Get the artifact build order**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to get:
|
||||
- `applyRequires`: array of artifact IDs needed before implementation (e.g., `["tasks"]`)
|
||||
- `artifacts`: list of all artifacts with their status and dependencies
|
||||
|
||||
4. **Create artifacts in sequence until apply-ready**
|
||||
|
||||
Use the **TodoWrite tool** to track progress through the artifacts.
|
||||
|
||||
Loop through artifacts in dependency order (artifacts with no pending dependencies first):
|
||||
|
||||
a. **For each artifact that is `ready` (dependencies satisfied)**:
|
||||
- Get instructions:
|
||||
```bash
|
||||
openspec instructions <artifact-id> --change "<name>" --json
|
||||
```
|
||||
- The instructions JSON includes:
|
||||
- `context`: Project background (constraints for you - do NOT include in output)
|
||||
- `rules`: Artifact-specific rules (constraints for you - do NOT include in output)
|
||||
- `template`: The structure to use for your output file
|
||||
- `instruction`: Schema-specific guidance for this artifact type
|
||||
- `outputPath`: Where to write the artifact
|
||||
- `dependencies`: Completed artifacts to read for context
|
||||
- Read any completed dependency files for context
|
||||
- Create the artifact file using `template` as the structure
|
||||
- Apply `context` and `rules` as constraints - but do NOT copy them into the file
|
||||
- Show brief progress: "Created <artifact-id>"
|
||||
|
||||
b. **Continue until all `applyRequires` artifacts are complete**
|
||||
- After creating each artifact, re-run `openspec status --change "<name>" --json`
|
||||
- Check if every artifact ID in `applyRequires` has `status: "done"` in the artifacts array
|
||||
- Stop when all `applyRequires` artifacts are done
|
||||
|
||||
c. **If an artifact requires user input** (unclear context):
|
||||
- Use **AskUserQuestion tool** to clarify
|
||||
- Then continue with creation
|
||||
|
||||
5. **Show final status**
|
||||
```bash
|
||||
openspec status --change "<name>"
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
After completing all artifacts, summarize:
|
||||
- Change name and location
|
||||
- List of artifacts created with brief descriptions
|
||||
- What's ready: "All artifacts created! Ready for implementation."
|
||||
- Prompt: "Run `/opsx:apply` or ask me to implement to start working on the tasks."
|
||||
|
||||
**Artifact Creation Guidelines**
|
||||
|
||||
- Follow the `instruction` field from `openspec instructions` for each artifact type
|
||||
- The schema defines what each artifact should contain - follow it
|
||||
- Read dependency artifacts for context before creating new ones
|
||||
- Use `template` as the structure for your output file - fill in its sections
|
||||
- **IMPORTANT**: `context` and `rules` are constraints for YOU, not content for the file
|
||||
- Do NOT copy `<context>`, `<rules>`, `<project_context>` blocks into the artifact
|
||||
- These guide what you write, but should never appear in the output
|
||||
|
||||
**Guardrails**
|
||||
- Create ALL artifacts needed for implementation (as defined by schema's `apply.requires`)
|
||||
- Always read dependency artifacts before creating a new one
|
||||
- If context is critically unclear, ask the user - but prefer making reasonable decisions to keep momentum
|
||||
- If a change with that name already exists, ask if user wants to continue it or create a new one
|
||||
- Verify each artifact file exists after writing before proceeding to next
|
||||
156
.codex/skills/openspec-apply-change/SKILL.md
Normal file
156
.codex/skills/openspec-apply-change/SKILL.md
Normal file
@@ -0,0 +1,156 @@
|
||||
---
|
||||
name: openspec-apply-change
|
||||
description: Implement tasks from an OpenSpec change. Use when the user wants to start implementing, continue implementation, or work through tasks.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Implement tasks from an OpenSpec change.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **Select the change**
|
||||
|
||||
If a name is provided, use it. Otherwise:
|
||||
- Infer from conversation context if the user mentioned a change
|
||||
- Auto-select if only one active change exists
|
||||
- If ambiguous, run `openspec list --json` to get available changes and use the **AskUserQuestion tool** to let the user select
|
||||
|
||||
Always announce: "Using change: <name>" and how to override (e.g., `/opsx:apply <other>`).
|
||||
|
||||
2. **Check status to understand the schema**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used (e.g., "spec-driven")
|
||||
- Which artifact contains the tasks (typically "tasks" for spec-driven, check status for others)
|
||||
|
||||
3. **Get apply instructions**
|
||||
|
||||
```bash
|
||||
openspec instructions apply --change "<name>" --json
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Context file paths (varies by schema - could be proposal/specs/design/tasks or spec/tests/implementation/docs)
|
||||
- Progress (total, complete, remaining)
|
||||
- Task list with status
|
||||
- Dynamic instruction based on current state
|
||||
|
||||
**Handle states:**
|
||||
- If `state: "blocked"` (missing artifacts): show message, suggest using openspec-continue-change
|
||||
- If `state: "all_done"`: congratulate, suggest archive
|
||||
- Otherwise: proceed to implementation
|
||||
|
||||
4. **Read context files**
|
||||
|
||||
Read the files listed in `contextFiles` from the apply instructions output.
|
||||
The files depend on the schema being used:
|
||||
- **spec-driven**: proposal, specs, design, tasks
|
||||
- Other schemas: follow the contextFiles from CLI output
|
||||
|
||||
5. **Show current progress**
|
||||
|
||||
Display:
|
||||
- Schema being used
|
||||
- Progress: "N/M tasks complete"
|
||||
- Remaining tasks overview
|
||||
- Dynamic instruction from CLI
|
||||
|
||||
6. **Implement tasks (loop until done or blocked)**
|
||||
|
||||
For each pending task:
|
||||
- Show which task is being worked on
|
||||
- Make the code changes required
|
||||
- Keep changes minimal and focused
|
||||
- Mark task complete in the tasks file: `- [ ]` → `- [x]`
|
||||
- Continue to next task
|
||||
|
||||
**Pause if:**
|
||||
- Task is unclear → ask for clarification
|
||||
- Implementation reveals a design issue → suggest updating artifacts
|
||||
- Error or blocker encountered → report and wait for guidance
|
||||
- User interrupts
|
||||
|
||||
7. **On completion or pause, show status**
|
||||
|
||||
Display:
|
||||
- Tasks completed this session
|
||||
- Overall progress: "N/M tasks complete"
|
||||
- If all done: suggest archive
|
||||
- If paused: explain why and wait for guidance
|
||||
|
||||
**Output During Implementation**
|
||||
|
||||
```
|
||||
## Implementing: <change-name> (schema: <schema-name>)
|
||||
|
||||
Working on task 3/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
|
||||
Working on task 4/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
```
|
||||
|
||||
**Output On Completion**
|
||||
|
||||
```
|
||||
## Implementation Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 7/7 tasks complete ✓
|
||||
|
||||
### Completed This Session
|
||||
- [x] Task 1
|
||||
- [x] Task 2
|
||||
...
|
||||
|
||||
All tasks complete! Ready to archive this change.
|
||||
```
|
||||
|
||||
**Output On Pause (Issue Encountered)**
|
||||
|
||||
```
|
||||
## Implementation Paused
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 4/7 tasks complete
|
||||
|
||||
### Issue Encountered
|
||||
<description of the issue>
|
||||
|
||||
**Options:**
|
||||
1. <option 1>
|
||||
2. <option 2>
|
||||
3. Other approach
|
||||
|
||||
What would you like to do?
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Keep going through tasks until done or blocked
|
||||
- Always read context files before starting (from the apply instructions output)
|
||||
- If task is ambiguous, pause and ask before implementing
|
||||
- If implementation reveals issues, pause and suggest artifact updates
|
||||
- Keep code changes minimal and scoped to each task
|
||||
- Update task checkbox immediately after completing each task
|
||||
- Pause on errors, blockers, or unclear requirements - don't guess
|
||||
- Use contextFiles from CLI output, don't assume specific file names
|
||||
|
||||
**Fluid Workflow Integration**
|
||||
|
||||
This skill supports the "actions on a change" model:
|
||||
|
||||
- **Can be invoked anytime**: Before all artifacts are done (if tasks exist), after partial implementation, interleaved with other actions
|
||||
- **Allows artifact updates**: If implementation reveals design issues, suggest updating artifacts - not phase-locked, work fluidly
|
||||
114
.codex/skills/openspec-archive-change/SKILL.md
Normal file
114
.codex/skills/openspec-archive-change/SKILL.md
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
name: openspec-archive-change
|
||||
description: Archive a completed change in the experimental workflow. Use when the user wants to finalize and archive a change after implementation is complete.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Archive a completed change in the experimental workflow.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no change name provided, prompt for selection**
|
||||
|
||||
Run `openspec list --json` to get available changes. Use the **AskUserQuestion tool** to let the user select.
|
||||
|
||||
Show only active changes (not already archived).
|
||||
Include the schema used for each change if available.
|
||||
|
||||
**IMPORTANT**: Do NOT guess or auto-select a change. Always let the user choose.
|
||||
|
||||
2. **Check artifact completion status**
|
||||
|
||||
Run `openspec status --change "<name>" --json` to check artifact completion.
|
||||
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used
|
||||
- `artifacts`: List of artifacts with their status (`done` or other)
|
||||
|
||||
**If any artifacts are not `done`:**
|
||||
- Display warning listing incomplete artifacts
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
3. **Check task completion status**
|
||||
|
||||
Read the tasks file (typically `tasks.md`) to check for incomplete tasks.
|
||||
|
||||
Count tasks marked with `- [ ]` (incomplete) vs `- [x]` (complete).
|
||||
|
||||
**If incomplete tasks found:**
|
||||
- Display warning showing count of incomplete tasks
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
**If no tasks file exists:** Proceed without task-related warning.
|
||||
|
||||
4. **Assess delta spec sync state**
|
||||
|
||||
Check for delta specs at `openspec/changes/<name>/specs/`. If none exist, proceed without sync prompt.
|
||||
|
||||
**If delta specs exist:**
|
||||
- Compare each delta spec with its corresponding main spec at `openspec/specs/<capability>/spec.md`
|
||||
- Determine what changes would be applied (adds, modifications, removals, renames)
|
||||
- Show a combined summary before prompting
|
||||
|
||||
**Prompt options:**
|
||||
- If changes needed: "Sync now (recommended)", "Archive without syncing"
|
||||
- If already synced: "Archive now", "Sync anyway", "Cancel"
|
||||
|
||||
If user chooses sync, use Task tool (subagent_type: "general-purpose", prompt: "Use Skill tool to invoke openspec-sync-specs for change '<name>'. Delta spec analysis: <include the analyzed delta spec summary>"). Proceed to archive regardless of choice.
|
||||
|
||||
5. **Perform the archive**
|
||||
|
||||
Create the archive directory if it doesn't exist:
|
||||
```bash
|
||||
mkdir -p openspec/changes/archive
|
||||
```
|
||||
|
||||
Generate target name using current date: `YYYY-MM-DD-<change-name>`
|
||||
|
||||
**Check if target already exists:**
|
||||
- If yes: Fail with error, suggest renaming existing archive or using different date
|
||||
- If no: Move the change directory to archive
|
||||
|
||||
```bash
|
||||
mv openspec/changes/<name> openspec/changes/archive/YYYY-MM-DD-<name>
|
||||
```
|
||||
|
||||
6. **Display summary**
|
||||
|
||||
Show archive completion summary including:
|
||||
- Change name
|
||||
- Schema that was used
|
||||
- Archive location
|
||||
- Whether specs were synced (if applicable)
|
||||
- Note about any warnings (incomplete artifacts/tasks)
|
||||
|
||||
**Output On Success**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** ✓ Synced to main specs (or "No delta specs" or "Sync skipped")
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Always prompt for change selection if not provided
|
||||
- Use artifact graph (openspec status --json) for completion checking
|
||||
- Don't block archive on warnings - just inform and confirm
|
||||
- Preserve .openspec.yaml when moving to archive (it moves with the directory)
|
||||
- Show clear summary of what happened
|
||||
- If sync is requested, use openspec-sync-specs approach (agent-driven)
|
||||
- If delta specs exist, always run the sync assessment and show the combined summary before prompting
|
||||
288
.codex/skills/openspec-explore/SKILL.md
Normal file
288
.codex/skills/openspec-explore/SKILL.md
Normal file
@@ -0,0 +1,288 @@
|
||||
---
|
||||
name: openspec-explore
|
||||
description: Enter explore mode - a thinking partner for exploring ideas, investigating problems, and clarifying requirements. Use when the user wants to think through something before or during a change.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Enter explore mode. Think deeply. Visualize freely. Follow the conversation wherever it goes.
|
||||
|
||||
**IMPORTANT: Explore mode is for thinking, not implementing.** You may read files, search code, and investigate the codebase, but you must NEVER write code or implement features. If the user asks you to implement something, remind them to exit explore mode first and create a change proposal. You MAY create OpenSpec artifacts (proposals, designs, specs) if the user asks—that's capturing thinking, not implementing.
|
||||
|
||||
**This is a stance, not a workflow.** There are no fixed steps, no required sequence, no mandatory outputs. You're a thinking partner helping the user explore.
|
||||
|
||||
---
|
||||
|
||||
## The Stance
|
||||
|
||||
- **Curious, not prescriptive** - Ask questions that emerge naturally, don't follow a script
|
||||
- **Open threads, not interrogations** - Surface multiple interesting directions and let the user follow what resonates. Don't funnel them through a single path of questions.
|
||||
- **Visual** - Use ASCII diagrams liberally when they'd help clarify thinking
|
||||
- **Adaptive** - Follow interesting threads, pivot when new information emerges
|
||||
- **Patient** - Don't rush to conclusions, let the shape of the problem emerge
|
||||
- **Grounded** - Explore the actual codebase when relevant, don't just theorize
|
||||
|
||||
---
|
||||
|
||||
## What You Might Do
|
||||
|
||||
Depending on what the user brings, you might:
|
||||
|
||||
**Explore the problem space**
|
||||
- Ask clarifying questions that emerge from what they said
|
||||
- Challenge assumptions
|
||||
- Reframe the problem
|
||||
- Find analogies
|
||||
|
||||
**Investigate the codebase**
|
||||
- Map existing architecture relevant to the discussion
|
||||
- Find integration points
|
||||
- Identify patterns already in use
|
||||
- Surface hidden complexity
|
||||
|
||||
**Compare options**
|
||||
- Brainstorm multiple approaches
|
||||
- Build comparison tables
|
||||
- Sketch tradeoffs
|
||||
- Recommend a path (if asked)
|
||||
|
||||
**Visualize**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Use ASCII diagrams liberally │
|
||||
├─────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌────────┐ ┌────────┐ │
|
||||
│ │ State │────────▶│ State │ │
|
||||
│ │ A │ │ B │ │
|
||||
│ └────────┘ └────────┘ │
|
||||
│ │
|
||||
│ System diagrams, state machines, │
|
||||
│ data flows, architecture sketches, │
|
||||
│ dependency graphs, comparison tables │
|
||||
│ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Surface risks and unknowns**
|
||||
- Identify what could go wrong
|
||||
- Find gaps in understanding
|
||||
- Suggest spikes or investigations
|
||||
|
||||
---
|
||||
|
||||
## OpenSpec Awareness
|
||||
|
||||
You have full context of the OpenSpec system. Use it naturally, don't force it.
|
||||
|
||||
### Check for context
|
||||
|
||||
At the start, quickly check what exists:
|
||||
```bash
|
||||
openspec list --json
|
||||
```
|
||||
|
||||
This tells you:
|
||||
- If there are active changes
|
||||
- Their names, schemas, and status
|
||||
- What the user might be working on
|
||||
|
||||
### When no change exists
|
||||
|
||||
Think freely. When insights crystallize, you might offer:
|
||||
|
||||
- "This feels solid enough to start a change. Want me to create a proposal?"
|
||||
- Or keep exploring - no pressure to formalize
|
||||
|
||||
### When a change exists
|
||||
|
||||
If the user mentions a change or you detect one is relevant:
|
||||
|
||||
1. **Read existing artifacts for context**
|
||||
- `openspec/changes/<name>/proposal.md`
|
||||
- `openspec/changes/<name>/design.md`
|
||||
- `openspec/changes/<name>/tasks.md`
|
||||
- etc.
|
||||
|
||||
2. **Reference them naturally in conversation**
|
||||
- "Your design mentions using Redis, but we just realized SQLite fits better..."
|
||||
- "The proposal scopes this to premium users, but we're now thinking everyone..."
|
||||
|
||||
3. **Offer to capture when decisions are made**
|
||||
|
||||
| Insight Type | Where to Capture |
|
||||
|--------------|------------------|
|
||||
| New requirement discovered | `specs/<capability>/spec.md` |
|
||||
| Requirement changed | `specs/<capability>/spec.md` |
|
||||
| Design decision made | `design.md` |
|
||||
| Scope changed | `proposal.md` |
|
||||
| New work identified | `tasks.md` |
|
||||
| Assumption invalidated | Relevant artifact |
|
||||
|
||||
Example offers:
|
||||
- "That's a design decision. Capture it in design.md?"
|
||||
- "This is a new requirement. Add it to specs?"
|
||||
- "This changes scope. Update the proposal?"
|
||||
|
||||
4. **The user decides** - Offer and move on. Don't pressure. Don't auto-capture.
|
||||
|
||||
---
|
||||
|
||||
## What You Don't Have To Do
|
||||
|
||||
- Follow a script
|
||||
- Ask the same questions every time
|
||||
- Produce a specific artifact
|
||||
- Reach a conclusion
|
||||
- Stay on topic if a tangent is valuable
|
||||
- Be brief (this is thinking time)
|
||||
|
||||
---
|
||||
|
||||
## Handling Different Entry Points
|
||||
|
||||
**User brings a vague idea:**
|
||||
```
|
||||
User: I'm thinking about adding real-time collaboration
|
||||
|
||||
You: Real-time collab is a big space. Let me think about this...
|
||||
|
||||
COLLABORATION SPECTRUM
|
||||
════════════════════════════════════════════
|
||||
|
||||
Awareness Coordination Sync
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌────────┐ ┌────────┐ ┌────────┐
|
||||
│Presence│ │Cursors │ │ CRDT │
|
||||
│ "3 │ │ Multi │ │Conflict│
|
||||
│online" │ │ select │ │ free │
|
||||
└────────┘ └────────┘ └────────┘
|
||||
│ │ │
|
||||
trivial moderate complex
|
||||
|
||||
Where's your head at?
|
||||
```
|
||||
|
||||
**User brings a specific problem:**
|
||||
```
|
||||
User: The auth system is a mess
|
||||
|
||||
You: [reads codebase]
|
||||
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ CURRENT AUTH FLOW │
|
||||
└─────────────────────────────────────────────┘
|
||||
│
|
||||
┌─────────────┼─────────────┐
|
||||
▼ ▼ ▼
|
||||
┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ Google │ │ GitHub │ │ Email │
|
||||
│ OAuth │ │ OAuth │ │ Magic │
|
||||
└────┬────┘ └────┬────┘ └────┬────┘
|
||||
│ │ │
|
||||
└─────────────┼─────────────┘
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Session │
|
||||
└─────┬─────┘
|
||||
│
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Perms │
|
||||
└───────────┘
|
||||
|
||||
I see three tangles. Which one's burning?
|
||||
```
|
||||
|
||||
**User is stuck mid-implementation:**
|
||||
```
|
||||
User: /opsx:explore add-auth-system
|
||||
The OAuth integration is more complex than expected
|
||||
|
||||
You: [reads change artifacts]
|
||||
|
||||
You're on task 4: "Implement OAuth flow"
|
||||
|
||||
Let me trace what's involved...
|
||||
|
||||
[draws diagram, explores options, suggests paths]
|
||||
|
||||
Want to update the design to reflect this?
|
||||
Or add a spike task to investigate?
|
||||
```
|
||||
|
||||
**User wants to compare options:**
|
||||
```
|
||||
User: Should we use Postgres or SQLite?
|
||||
|
||||
You: Generic answer is boring. What's the context?
|
||||
|
||||
User: A CLI tool that tracks local dev environments
|
||||
|
||||
You: That changes everything.
|
||||
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ CLI TOOL DATA STORAGE │
|
||||
└─────────────────────────────────────────────────┘
|
||||
|
||||
Key constraints:
|
||||
• No daemon running
|
||||
• Must work offline
|
||||
• Single user
|
||||
|
||||
SQLite Postgres
|
||||
Deployment embedded ✓ needs server ✗
|
||||
Offline yes ✓ no ✗
|
||||
Single file yes ✓ no ✗
|
||||
|
||||
SQLite. Not even close.
|
||||
|
||||
Unless... is there a sync component?
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Ending Discovery
|
||||
|
||||
There's no required ending. Discovery might:
|
||||
|
||||
- **Flow into a proposal**: "Ready to start? I can create a change proposal."
|
||||
- **Result in artifact updates**: "Updated design.md with these decisions"
|
||||
- **Just provide clarity**: User has what they need, moves on
|
||||
- **Continue later**: "We can pick this up anytime"
|
||||
|
||||
When it feels like things are crystallizing, you might summarize:
|
||||
|
||||
```
|
||||
## What We Figured Out
|
||||
|
||||
**The problem**: [crystallized understanding]
|
||||
|
||||
**The approach**: [if one emerged]
|
||||
|
||||
**Open questions**: [if any remain]
|
||||
|
||||
**Next steps** (if ready):
|
||||
- Create a change proposal
|
||||
- Keep exploring: just keep talking
|
||||
```
|
||||
|
||||
But this summary is optional. Sometimes the thinking IS the value.
|
||||
|
||||
---
|
||||
|
||||
## Guardrails
|
||||
|
||||
- **Don't implement** - Never write code or implement features. Creating OpenSpec artifacts is fine, writing application code is not.
|
||||
- **Don't fake understanding** - If something is unclear, dig deeper
|
||||
- **Don't rush** - Discovery is thinking time, not task time
|
||||
- **Don't force structure** - Let patterns emerge naturally
|
||||
- **Don't auto-capture** - Offer to save insights, don't just do it
|
||||
- **Do visualize** - A good diagram is worth many paragraphs
|
||||
- **Do explore the codebase** - Ground discussions in reality
|
||||
- **Do question assumptions** - Including the user's and your own
|
||||
110
.codex/skills/openspec-propose/SKILL.md
Normal file
110
.codex/skills/openspec-propose/SKILL.md
Normal file
@@ -0,0 +1,110 @@
|
||||
---
|
||||
name: openspec-propose
|
||||
description: Propose a new change with all artifacts generated in one step. Use when the user wants to quickly describe what they want to build and get a complete proposal with design, specs, and tasks ready for implementation.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Propose a new change - create the change and generate all artifacts in one step.
|
||||
|
||||
I'll create a change with artifacts:
|
||||
- proposal.md (what & why)
|
||||
- design.md (how)
|
||||
- tasks.md (implementation steps)
|
||||
|
||||
When ready to implement, run /opsx:apply
|
||||
|
||||
---
|
||||
|
||||
**Input**: The user's request should include a change name (kebab-case) OR a description of what they want to build.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no clear input provided, ask what they want to build**
|
||||
|
||||
Use the **AskUserQuestion tool** (open-ended, no preset options) to ask:
|
||||
> "What change do you want to work on? Describe what you want to build or fix."
|
||||
|
||||
From their description, derive a kebab-case name (e.g., "add user authentication" → `add-user-auth`).
|
||||
|
||||
**IMPORTANT**: Do NOT proceed without understanding what the user wants to build.
|
||||
|
||||
2. **Create the change directory**
|
||||
```bash
|
||||
openspec new change "<name>"
|
||||
```
|
||||
This creates a scaffolded change at `openspec/changes/<name>/` with `.openspec.yaml`.
|
||||
|
||||
3. **Get the artifact build order**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to get:
|
||||
- `applyRequires`: array of artifact IDs needed before implementation (e.g., `["tasks"]`)
|
||||
- `artifacts`: list of all artifacts with their status and dependencies
|
||||
|
||||
4. **Create artifacts in sequence until apply-ready**
|
||||
|
||||
Use the **TodoWrite tool** to track progress through the artifacts.
|
||||
|
||||
Loop through artifacts in dependency order (artifacts with no pending dependencies first):
|
||||
|
||||
a. **For each artifact that is `ready` (dependencies satisfied)**:
|
||||
- Get instructions:
|
||||
```bash
|
||||
openspec instructions <artifact-id> --change "<name>" --json
|
||||
```
|
||||
- The instructions JSON includes:
|
||||
- `context`: Project background (constraints for you - do NOT include in output)
|
||||
- `rules`: Artifact-specific rules (constraints for you - do NOT include in output)
|
||||
- `template`: The structure to use for your output file
|
||||
- `instruction`: Schema-specific guidance for this artifact type
|
||||
- `outputPath`: Where to write the artifact
|
||||
- `dependencies`: Completed artifacts to read for context
|
||||
- Read any completed dependency files for context
|
||||
- Create the artifact file using `template` as the structure
|
||||
- Apply `context` and `rules` as constraints - but do NOT copy them into the file
|
||||
- Show brief progress: "Created <artifact-id>"
|
||||
|
||||
b. **Continue until all `applyRequires` artifacts are complete**
|
||||
- After creating each artifact, re-run `openspec status --change "<name>" --json`
|
||||
- Check if every artifact ID in `applyRequires` has `status: "done"` in the artifacts array
|
||||
- Stop when all `applyRequires` artifacts are done
|
||||
|
||||
c. **If an artifact requires user input** (unclear context):
|
||||
- Use **AskUserQuestion tool** to clarify
|
||||
- Then continue with creation
|
||||
|
||||
5. **Show final status**
|
||||
```bash
|
||||
openspec status --change "<name>"
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
After completing all artifacts, summarize:
|
||||
- Change name and location
|
||||
- List of artifacts created with brief descriptions
|
||||
- What's ready: "All artifacts created! Ready for implementation."
|
||||
- Prompt: "Run `/opsx:apply` or ask me to implement to start working on the tasks."
|
||||
|
||||
**Artifact Creation Guidelines**
|
||||
|
||||
- Follow the `instruction` field from `openspec instructions` for each artifact type
|
||||
- The schema defines what each artifact should contain - follow it
|
||||
- Read dependency artifacts for context before creating new ones
|
||||
- Use `template` as the structure for your output file - fill in its sections
|
||||
- **IMPORTANT**: `context` and `rules` are constraints for YOU, not content for the file
|
||||
- Do NOT copy `<context>`, `<rules>`, `<project_context>` blocks into the artifact
|
||||
- These guide what you write, but should never appear in the output
|
||||
|
||||
**Guardrails**
|
||||
- Create ALL artifacts needed for implementation (as defined by schema's `apply.requires`)
|
||||
- Always read dependency artifacts before creating a new one
|
||||
- If context is critically unclear, ask the user - but prefer making reasonable decisions to keep momentum
|
||||
- If a change with that name already exists, ask if user wants to continue it or create a new one
|
||||
- Verify each artifact file exists after writing before proceeding to next
|
||||
152
.cursor/commands/opsx-apply.md
Normal file
152
.cursor/commands/opsx-apply.md
Normal file
@@ -0,0 +1,152 @@
|
||||
---
|
||||
name: /opsx-apply
|
||||
id: opsx-apply
|
||||
category: Workflow
|
||||
description: Implement tasks from an OpenSpec change (Experimental)
|
||||
---
|
||||
|
||||
Implement tasks from an OpenSpec change.
|
||||
|
||||
**Input**: Optionally specify a change name (e.g., `/opsx:apply add-auth`). If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **Select the change**
|
||||
|
||||
If a name is provided, use it. Otherwise:
|
||||
- Infer from conversation context if the user mentioned a change
|
||||
- Auto-select if only one active change exists
|
||||
- If ambiguous, run `openspec list --json` to get available changes and use the **AskUserQuestion tool** to let the user select
|
||||
|
||||
Always announce: "Using change: <name>" and how to override (e.g., `/opsx:apply <other>`).
|
||||
|
||||
2. **Check status to understand the schema**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used (e.g., "spec-driven")
|
||||
- Which artifact contains the tasks (typically "tasks" for spec-driven, check status for others)
|
||||
|
||||
3. **Get apply instructions**
|
||||
|
||||
```bash
|
||||
openspec instructions apply --change "<name>" --json
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Context file paths (varies by schema)
|
||||
- Progress (total, complete, remaining)
|
||||
- Task list with status
|
||||
- Dynamic instruction based on current state
|
||||
|
||||
**Handle states:**
|
||||
- If `state: "blocked"` (missing artifacts): show message, suggest using `/opsx:continue`
|
||||
- If `state: "all_done"`: congratulate, suggest archive
|
||||
- Otherwise: proceed to implementation
|
||||
|
||||
4. **Read context files**
|
||||
|
||||
Read the files listed in `contextFiles` from the apply instructions output.
|
||||
The files depend on the schema being used:
|
||||
- **spec-driven**: proposal, specs, design, tasks
|
||||
- Other schemas: follow the contextFiles from CLI output
|
||||
|
||||
5. **Show current progress**
|
||||
|
||||
Display:
|
||||
- Schema being used
|
||||
- Progress: "N/M tasks complete"
|
||||
- Remaining tasks overview
|
||||
- Dynamic instruction from CLI
|
||||
|
||||
6. **Implement tasks (loop until done or blocked)**
|
||||
|
||||
For each pending task:
|
||||
- Show which task is being worked on
|
||||
- Make the code changes required
|
||||
- Keep changes minimal and focused
|
||||
- Mark task complete in the tasks file: `- [ ]` → `- [x]`
|
||||
- Continue to next task
|
||||
|
||||
**Pause if:**
|
||||
- Task is unclear → ask for clarification
|
||||
- Implementation reveals a design issue → suggest updating artifacts
|
||||
- Error or blocker encountered → report and wait for guidance
|
||||
- User interrupts
|
||||
|
||||
7. **On completion or pause, show status**
|
||||
|
||||
Display:
|
||||
- Tasks completed this session
|
||||
- Overall progress: "N/M tasks complete"
|
||||
- If all done: suggest archive
|
||||
- If paused: explain why and wait for guidance
|
||||
|
||||
**Output During Implementation**
|
||||
|
||||
```
|
||||
## Implementing: <change-name> (schema: <schema-name>)
|
||||
|
||||
Working on task 3/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
|
||||
Working on task 4/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
```
|
||||
|
||||
**Output On Completion**
|
||||
|
||||
```
|
||||
## Implementation Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 7/7 tasks complete ✓
|
||||
|
||||
### Completed This Session
|
||||
- [x] Task 1
|
||||
- [x] Task 2
|
||||
...
|
||||
|
||||
All tasks complete! You can archive this change with `/opsx:archive`.
|
||||
```
|
||||
|
||||
**Output On Pause (Issue Encountered)**
|
||||
|
||||
```
|
||||
## Implementation Paused
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 4/7 tasks complete
|
||||
|
||||
### Issue Encountered
|
||||
<description of the issue>
|
||||
|
||||
**Options:**
|
||||
1. <option 1>
|
||||
2. <option 2>
|
||||
3. Other approach
|
||||
|
||||
What would you like to do?
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Keep going through tasks until done or blocked
|
||||
- Always read context files before starting (from the apply instructions output)
|
||||
- If task is ambiguous, pause and ask before implementing
|
||||
- If implementation reveals issues, pause and suggest artifact updates
|
||||
- Keep code changes minimal and scoped to each task
|
||||
- Update task checkbox immediately after completing each task
|
||||
- Pause on errors, blockers, or unclear requirements - don't guess
|
||||
- Use contextFiles from CLI output, don't assume specific file names
|
||||
|
||||
**Fluid Workflow Integration**
|
||||
|
||||
This skill supports the "actions on a change" model:
|
||||
|
||||
- **Can be invoked anytime**: Before all artifacts are done (if tasks exist), after partial implementation, interleaved with other actions
|
||||
- **Allows artifact updates**: If implementation reveals design issues, suggest updating artifacts - not phase-locked, work fluidly
|
||||
157
.cursor/commands/opsx-archive.md
Normal file
157
.cursor/commands/opsx-archive.md
Normal file
@@ -0,0 +1,157 @@
|
||||
---
|
||||
name: /opsx-archive
|
||||
id: opsx-archive
|
||||
category: Workflow
|
||||
description: Archive a completed change in the experimental workflow
|
||||
---
|
||||
|
||||
Archive a completed change in the experimental workflow.
|
||||
|
||||
**Input**: Optionally specify a change name after `/opsx:archive` (e.g., `/opsx:archive add-auth`). If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no change name provided, prompt for selection**
|
||||
|
||||
Run `openspec list --json` to get available changes. Use the **AskUserQuestion tool** to let the user select.
|
||||
|
||||
Show only active changes (not already archived).
|
||||
Include the schema used for each change if available.
|
||||
|
||||
**IMPORTANT**: Do NOT guess or auto-select a change. Always let the user choose.
|
||||
|
||||
2. **Check artifact completion status**
|
||||
|
||||
Run `openspec status --change "<name>" --json` to check artifact completion.
|
||||
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used
|
||||
- `artifacts`: List of artifacts with their status (`done` or other)
|
||||
|
||||
**If any artifacts are not `done`:**
|
||||
- Display warning listing incomplete artifacts
|
||||
- Prompt user for confirmation to continue
|
||||
- Proceed if user confirms
|
||||
|
||||
3. **Check task completion status**
|
||||
|
||||
Read the tasks file (typically `tasks.md`) to check for incomplete tasks.
|
||||
|
||||
Count tasks marked with `- [ ]` (incomplete) vs `- [x]` (complete).
|
||||
|
||||
**If incomplete tasks found:**
|
||||
- Display warning showing count of incomplete tasks
|
||||
- Prompt user for confirmation to continue
|
||||
- Proceed if user confirms
|
||||
|
||||
**If no tasks file exists:** Proceed without task-related warning.
|
||||
|
||||
4. **Assess delta spec sync state**
|
||||
|
||||
Check for delta specs at `openspec/changes/<name>/specs/`. If none exist, proceed without sync prompt.
|
||||
|
||||
**If delta specs exist:**
|
||||
- Compare each delta spec with its corresponding main spec at `openspec/specs/<capability>/spec.md`
|
||||
- Determine what changes would be applied (adds, modifications, removals, renames)
|
||||
- Show a combined summary before prompting
|
||||
|
||||
**Prompt options:**
|
||||
- If changes needed: "Sync now (recommended)", "Archive without syncing"
|
||||
- If already synced: "Archive now", "Sync anyway", "Cancel"
|
||||
|
||||
If user chooses sync, use Task tool (subagent_type: "general-purpose", prompt: "Use Skill tool to invoke openspec-sync-specs for change '<name>'. Delta spec analysis: <include the analyzed delta spec summary>"). Proceed to archive regardless of choice.
|
||||
|
||||
5. **Perform the archive**
|
||||
|
||||
Create the archive directory if it doesn't exist:
|
||||
```bash
|
||||
mkdir -p openspec/changes/archive
|
||||
```
|
||||
|
||||
Generate target name using current date: `YYYY-MM-DD-<change-name>`
|
||||
|
||||
**Check if target already exists:**
|
||||
- If yes: Fail with error, suggest renaming existing archive or using different date
|
||||
- If no: Move the change directory to archive
|
||||
|
||||
```bash
|
||||
mv openspec/changes/<name> openspec/changes/archive/YYYY-MM-DD-<name>
|
||||
```
|
||||
|
||||
6. **Display summary**
|
||||
|
||||
Show archive completion summary including:
|
||||
- Change name
|
||||
- Schema that was used
|
||||
- Archive location
|
||||
- Spec sync status (synced / sync skipped / no delta specs)
|
||||
- Note about any warnings (incomplete artifacts/tasks)
|
||||
|
||||
**Output On Success**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** ✓ Synced to main specs
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Output On Success (No Delta Specs)**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** No delta specs
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Output On Success With Warnings**
|
||||
|
||||
```
|
||||
## Archive Complete (with warnings)
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** Sync skipped (user chose to skip)
|
||||
|
||||
**Warnings:**
|
||||
- Archived with 2 incomplete artifacts
|
||||
- Archived with 3 incomplete tasks
|
||||
- Delta spec sync was skipped (user chose to skip)
|
||||
|
||||
Review the archive if this was not intentional.
|
||||
```
|
||||
|
||||
**Output On Error (Archive Exists)**
|
||||
|
||||
```
|
||||
## Archive Failed
|
||||
|
||||
**Change:** <change-name>
|
||||
**Target:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
|
||||
Target archive directory already exists.
|
||||
|
||||
**Options:**
|
||||
1. Rename the existing archive
|
||||
2. Delete the existing archive if it's a duplicate
|
||||
3. Wait until a different date to archive
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Always prompt for change selection if not provided
|
||||
- Use artifact graph (openspec status --json) for completion checking
|
||||
- Don't block archive on warnings - just inform and confirm
|
||||
- Preserve .openspec.yaml when moving to archive (it moves with the directory)
|
||||
- Show clear summary of what happened
|
||||
- If sync is requested, use the Skill tool to invoke `openspec-sync-specs` (agent-driven)
|
||||
- If delta specs exist, always run the sync assessment and show the combined summary before prompting
|
||||
173
.cursor/commands/opsx-explore.md
Normal file
173
.cursor/commands/opsx-explore.md
Normal file
@@ -0,0 +1,173 @@
|
||||
---
|
||||
name: /opsx-explore
|
||||
id: opsx-explore
|
||||
category: Workflow
|
||||
description: "Enter explore mode - think through ideas, investigate problems, clarify requirements"
|
||||
---
|
||||
|
||||
Enter explore mode. Think deeply. Visualize freely. Follow the conversation wherever it goes.
|
||||
|
||||
**IMPORTANT: Explore mode is for thinking, not implementing.** You may read files, search code, and investigate the codebase, but you must NEVER write code or implement features. If the user asks you to implement something, remind them to exit explore mode first and create a change proposal. You MAY create OpenSpec artifacts (proposals, designs, specs) if the user asks—that's capturing thinking, not implementing.
|
||||
|
||||
**This is a stance, not a workflow.** There are no fixed steps, no required sequence, no mandatory outputs. You're a thinking partner helping the user explore.
|
||||
|
||||
**Input**: The argument after `/opsx:explore` is whatever the user wants to think about. Could be:
|
||||
- A vague idea: "real-time collaboration"
|
||||
- A specific problem: "the auth system is getting unwieldy"
|
||||
- A change name: "add-dark-mode" (to explore in context of that change)
|
||||
- A comparison: "postgres vs sqlite for this"
|
||||
- Nothing (just enter explore mode)
|
||||
|
||||
---
|
||||
|
||||
## The Stance
|
||||
|
||||
- **Curious, not prescriptive** - Ask questions that emerge naturally, don't follow a script
|
||||
- **Open threads, not interrogations** - Surface multiple interesting directions and let the user follow what resonates. Don't funnel them through a single path of questions.
|
||||
- **Visual** - Use ASCII diagrams liberally when they'd help clarify thinking
|
||||
- **Adaptive** - Follow interesting threads, pivot when new information emerges
|
||||
- **Patient** - Don't rush to conclusions, let the shape of the problem emerge
|
||||
- **Grounded** - Explore the actual codebase when relevant, don't just theorize
|
||||
|
||||
---
|
||||
|
||||
## What You Might Do
|
||||
|
||||
Depending on what the user brings, you might:
|
||||
|
||||
**Explore the problem space**
|
||||
- Ask clarifying questions that emerge from what they said
|
||||
- Challenge assumptions
|
||||
- Reframe the problem
|
||||
- Find analogies
|
||||
|
||||
**Investigate the codebase**
|
||||
- Map existing architecture relevant to the discussion
|
||||
- Find integration points
|
||||
- Identify patterns already in use
|
||||
- Surface hidden complexity
|
||||
|
||||
**Compare options**
|
||||
- Brainstorm multiple approaches
|
||||
- Build comparison tables
|
||||
- Sketch tradeoffs
|
||||
- Recommend a path (if asked)
|
||||
|
||||
**Visualize**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Use ASCII diagrams liberally │
|
||||
├─────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌────────┐ ┌────────┐ │
|
||||
│ │ State │────────▶│ State │ │
|
||||
│ │ A │ │ B │ │
|
||||
│ └────────┘ └────────┘ │
|
||||
│ │
|
||||
│ System diagrams, state machines, │
|
||||
│ data flows, architecture sketches, │
|
||||
│ dependency graphs, comparison tables │
|
||||
│ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Surface risks and unknowns**
|
||||
- Identify what could go wrong
|
||||
- Find gaps in understanding
|
||||
- Suggest spikes or investigations
|
||||
|
||||
---
|
||||
|
||||
## OpenSpec Awareness
|
||||
|
||||
You have full context of the OpenSpec system. Use it naturally, don't force it.
|
||||
|
||||
### Check for context
|
||||
|
||||
At the start, quickly check what exists:
|
||||
```bash
|
||||
openspec list --json
|
||||
```
|
||||
|
||||
This tells you:
|
||||
- If there are active changes
|
||||
- Their names, schemas, and status
|
||||
- What the user might be working on
|
||||
|
||||
If the user mentioned a specific change name, read its artifacts for context.
|
||||
|
||||
### When no change exists
|
||||
|
||||
Think freely. When insights crystallize, you might offer:
|
||||
|
||||
- "This feels solid enough to start a change. Want me to create a proposal?"
|
||||
- Or keep exploring - no pressure to formalize
|
||||
|
||||
### When a change exists
|
||||
|
||||
If the user mentions a change or you detect one is relevant:
|
||||
|
||||
1. **Read existing artifacts for context**
|
||||
- `openspec/changes/<name>/proposal.md`
|
||||
- `openspec/changes/<name>/design.md`
|
||||
- `openspec/changes/<name>/tasks.md`
|
||||
- etc.
|
||||
|
||||
2. **Reference them naturally in conversation**
|
||||
- "Your design mentions using Redis, but we just realized SQLite fits better..."
|
||||
- "The proposal scopes this to premium users, but we're now thinking everyone..."
|
||||
|
||||
3. **Offer to capture when decisions are made**
|
||||
|
||||
| Insight Type | Where to Capture |
|
||||
|--------------|------------------|
|
||||
| New requirement discovered | `specs/<capability>/spec.md` |
|
||||
| Requirement changed | `specs/<capability>/spec.md` |
|
||||
| Design decision made | `design.md` |
|
||||
| Scope changed | `proposal.md` |
|
||||
| New work identified | `tasks.md` |
|
||||
| Assumption invalidated | Relevant artifact |
|
||||
|
||||
Example offers:
|
||||
- "That's a design decision. Capture it in design.md?"
|
||||
- "This is a new requirement. Add it to specs?"
|
||||
- "This changes scope. Update the proposal?"
|
||||
|
||||
4. **The user decides** - Offer and move on. Don't pressure. Don't auto-capture.
|
||||
|
||||
---
|
||||
|
||||
## What You Don't Have To Do
|
||||
|
||||
- Follow a script
|
||||
- Ask the same questions every time
|
||||
- Produce a specific artifact
|
||||
- Reach a conclusion
|
||||
- Stay on topic if a tangent is valuable
|
||||
- Be brief (this is thinking time)
|
||||
|
||||
---
|
||||
|
||||
## Ending Discovery
|
||||
|
||||
There's no required ending. Discovery might:
|
||||
|
||||
- **Flow into a proposal**: "Ready to start? I can create a change proposal."
|
||||
- **Result in artifact updates**: "Updated design.md with these decisions"
|
||||
- **Just provide clarity**: User has what they need, moves on
|
||||
- **Continue later**: "We can pick this up anytime"
|
||||
|
||||
When things crystallize, you might offer a summary - but it's optional. Sometimes the thinking IS the value.
|
||||
|
||||
---
|
||||
|
||||
## Guardrails
|
||||
|
||||
- **Don't implement** - Never write code or implement features. Creating OpenSpec artifacts is fine, writing application code is not.
|
||||
- **Don't fake understanding** - If something is unclear, dig deeper
|
||||
- **Don't rush** - Discovery is thinking time, not task time
|
||||
- **Don't force structure** - Let patterns emerge naturally
|
||||
- **Don't auto-capture** - Offer to save insights, don't just do it
|
||||
- **Do visualize** - A good diagram is worth many paragraphs
|
||||
- **Do explore the codebase** - Ground discussions in reality
|
||||
- **Do question assumptions** - Including the user's and your own
|
||||
106
.cursor/commands/opsx-propose.md
Normal file
106
.cursor/commands/opsx-propose.md
Normal file
@@ -0,0 +1,106 @@
|
||||
---
|
||||
name: /opsx-propose
|
||||
id: opsx-propose
|
||||
category: Workflow
|
||||
description: Propose a new change - create it and generate all artifacts in one step
|
||||
---
|
||||
|
||||
Propose a new change - create the change and generate all artifacts in one step.
|
||||
|
||||
I'll create a change with artifacts:
|
||||
- proposal.md (what & why)
|
||||
- design.md (how)
|
||||
- tasks.md (implementation steps)
|
||||
|
||||
When ready to implement, run /opsx:apply
|
||||
|
||||
---
|
||||
|
||||
**Input**: The argument after `/opsx:propose` is the change name (kebab-case), OR a description of what the user wants to build.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no input provided, ask what they want to build**
|
||||
|
||||
Use the **AskUserQuestion tool** (open-ended, no preset options) to ask:
|
||||
> "What change do you want to work on? Describe what you want to build or fix."
|
||||
|
||||
From their description, derive a kebab-case name (e.g., "add user authentication" → `add-user-auth`).
|
||||
|
||||
**IMPORTANT**: Do NOT proceed without understanding what the user wants to build.
|
||||
|
||||
2. **Create the change directory**
|
||||
```bash
|
||||
openspec new change "<name>"
|
||||
```
|
||||
This creates a scaffolded change at `openspec/changes/<name>/` with `.openspec.yaml`.
|
||||
|
||||
3. **Get the artifact build order**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to get:
|
||||
- `applyRequires`: array of artifact IDs needed before implementation (e.g., `["tasks"]`)
|
||||
- `artifacts`: list of all artifacts with their status and dependencies
|
||||
|
||||
4. **Create artifacts in sequence until apply-ready**
|
||||
|
||||
Use the **TodoWrite tool** to track progress through the artifacts.
|
||||
|
||||
Loop through artifacts in dependency order (artifacts with no pending dependencies first):
|
||||
|
||||
a. **For each artifact that is `ready` (dependencies satisfied)**:
|
||||
- Get instructions:
|
||||
```bash
|
||||
openspec instructions <artifact-id> --change "<name>" --json
|
||||
```
|
||||
- The instructions JSON includes:
|
||||
- `context`: Project background (constraints for you - do NOT include in output)
|
||||
- `rules`: Artifact-specific rules (constraints for you - do NOT include in output)
|
||||
- `template`: The structure to use for your output file
|
||||
- `instruction`: Schema-specific guidance for this artifact type
|
||||
- `outputPath`: Where to write the artifact
|
||||
- `dependencies`: Completed artifacts to read for context
|
||||
- Read any completed dependency files for context
|
||||
- Create the artifact file using `template` as the structure
|
||||
- Apply `context` and `rules` as constraints - but do NOT copy them into the file
|
||||
- Show brief progress: "Created <artifact-id>"
|
||||
|
||||
b. **Continue until all `applyRequires` artifacts are complete**
|
||||
- After creating each artifact, re-run `openspec status --change "<name>" --json`
|
||||
- Check if every artifact ID in `applyRequires` has `status: "done"` in the artifacts array
|
||||
- Stop when all `applyRequires` artifacts are done
|
||||
|
||||
c. **If an artifact requires user input** (unclear context):
|
||||
- Use **AskUserQuestion tool** to clarify
|
||||
- Then continue with creation
|
||||
|
||||
5. **Show final status**
|
||||
```bash
|
||||
openspec status --change "<name>"
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
After completing all artifacts, summarize:
|
||||
- Change name and location
|
||||
- List of artifacts created with brief descriptions
|
||||
- What's ready: "All artifacts created! Ready for implementation."
|
||||
- Prompt: "Run `/opsx:apply` to start implementing."
|
||||
|
||||
**Artifact Creation Guidelines**
|
||||
|
||||
- Follow the `instruction` field from `openspec instructions` for each artifact type
|
||||
- The schema defines what each artifact should contain - follow it
|
||||
- Read dependency artifacts for context before creating new ones
|
||||
- Use `template` as the structure for your output file - fill in its sections
|
||||
- **IMPORTANT**: `context` and `rules` are constraints for YOU, not content for the file
|
||||
- Do NOT copy `<context>`, `<rules>`, `<project_context>` blocks into the artifact
|
||||
- These guide what you write, but should never appear in the output
|
||||
|
||||
**Guardrails**
|
||||
- Create ALL artifacts needed for implementation (as defined by schema's `apply.requires`)
|
||||
- Always read dependency artifacts before creating a new one
|
||||
- If context is critically unclear, ask the user - but prefer making reasonable decisions to keep momentum
|
||||
- If a change with that name already exists, ask if user wants to continue it or create a new one
|
||||
- Verify each artifact file exists after writing before proceeding to next
|
||||
156
.cursor/skills/openspec-apply-change/SKILL.md
Normal file
156
.cursor/skills/openspec-apply-change/SKILL.md
Normal file
@@ -0,0 +1,156 @@
|
||||
---
|
||||
name: openspec-apply-change
|
||||
description: Implement tasks from an OpenSpec change. Use when the user wants to start implementing, continue implementation, or work through tasks.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Implement tasks from an OpenSpec change.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **Select the change**
|
||||
|
||||
If a name is provided, use it. Otherwise:
|
||||
- Infer from conversation context if the user mentioned a change
|
||||
- Auto-select if only one active change exists
|
||||
- If ambiguous, run `openspec list --json` to get available changes and use the **AskUserQuestion tool** to let the user select
|
||||
|
||||
Always announce: "Using change: <name>" and how to override (e.g., `/opsx:apply <other>`).
|
||||
|
||||
2. **Check status to understand the schema**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used (e.g., "spec-driven")
|
||||
- Which artifact contains the tasks (typically "tasks" for spec-driven, check status for others)
|
||||
|
||||
3. **Get apply instructions**
|
||||
|
||||
```bash
|
||||
openspec instructions apply --change "<name>" --json
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Context file paths (varies by schema - could be proposal/specs/design/tasks or spec/tests/implementation/docs)
|
||||
- Progress (total, complete, remaining)
|
||||
- Task list with status
|
||||
- Dynamic instruction based on current state
|
||||
|
||||
**Handle states:**
|
||||
- If `state: "blocked"` (missing artifacts): show message, suggest using openspec-continue-change
|
||||
- If `state: "all_done"`: congratulate, suggest archive
|
||||
- Otherwise: proceed to implementation
|
||||
|
||||
4. **Read context files**
|
||||
|
||||
Read the files listed in `contextFiles` from the apply instructions output.
|
||||
The files depend on the schema being used:
|
||||
- **spec-driven**: proposal, specs, design, tasks
|
||||
- Other schemas: follow the contextFiles from CLI output
|
||||
|
||||
5. **Show current progress**
|
||||
|
||||
Display:
|
||||
- Schema being used
|
||||
- Progress: "N/M tasks complete"
|
||||
- Remaining tasks overview
|
||||
- Dynamic instruction from CLI
|
||||
|
||||
6. **Implement tasks (loop until done or blocked)**
|
||||
|
||||
For each pending task:
|
||||
- Show which task is being worked on
|
||||
- Make the code changes required
|
||||
- Keep changes minimal and focused
|
||||
- Mark task complete in the tasks file: `- [ ]` → `- [x]`
|
||||
- Continue to next task
|
||||
|
||||
**Pause if:**
|
||||
- Task is unclear → ask for clarification
|
||||
- Implementation reveals a design issue → suggest updating artifacts
|
||||
- Error or blocker encountered → report and wait for guidance
|
||||
- User interrupts
|
||||
|
||||
7. **On completion or pause, show status**
|
||||
|
||||
Display:
|
||||
- Tasks completed this session
|
||||
- Overall progress: "N/M tasks complete"
|
||||
- If all done: suggest archive
|
||||
- If paused: explain why and wait for guidance
|
||||
|
||||
**Output During Implementation**
|
||||
|
||||
```
|
||||
## Implementing: <change-name> (schema: <schema-name>)
|
||||
|
||||
Working on task 3/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
|
||||
Working on task 4/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
```
|
||||
|
||||
**Output On Completion**
|
||||
|
||||
```
|
||||
## Implementation Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 7/7 tasks complete ✓
|
||||
|
||||
### Completed This Session
|
||||
- [x] Task 1
|
||||
- [x] Task 2
|
||||
...
|
||||
|
||||
All tasks complete! Ready to archive this change.
|
||||
```
|
||||
|
||||
**Output On Pause (Issue Encountered)**
|
||||
|
||||
```
|
||||
## Implementation Paused
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 4/7 tasks complete
|
||||
|
||||
### Issue Encountered
|
||||
<description of the issue>
|
||||
|
||||
**Options:**
|
||||
1. <option 1>
|
||||
2. <option 2>
|
||||
3. Other approach
|
||||
|
||||
What would you like to do?
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Keep going through tasks until done or blocked
|
||||
- Always read context files before starting (from the apply instructions output)
|
||||
- If task is ambiguous, pause and ask before implementing
|
||||
- If implementation reveals issues, pause and suggest artifact updates
|
||||
- Keep code changes minimal and scoped to each task
|
||||
- Update task checkbox immediately after completing each task
|
||||
- Pause on errors, blockers, or unclear requirements - don't guess
|
||||
- Use contextFiles from CLI output, don't assume specific file names
|
||||
|
||||
**Fluid Workflow Integration**
|
||||
|
||||
This skill supports the "actions on a change" model:
|
||||
|
||||
- **Can be invoked anytime**: Before all artifacts are done (if tasks exist), after partial implementation, interleaved with other actions
|
||||
- **Allows artifact updates**: If implementation reveals design issues, suggest updating artifacts - not phase-locked, work fluidly
|
||||
114
.cursor/skills/openspec-archive-change/SKILL.md
Normal file
114
.cursor/skills/openspec-archive-change/SKILL.md
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
name: openspec-archive-change
|
||||
description: Archive a completed change in the experimental workflow. Use when the user wants to finalize and archive a change after implementation is complete.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Archive a completed change in the experimental workflow.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no change name provided, prompt for selection**
|
||||
|
||||
Run `openspec list --json` to get available changes. Use the **AskUserQuestion tool** to let the user select.
|
||||
|
||||
Show only active changes (not already archived).
|
||||
Include the schema used for each change if available.
|
||||
|
||||
**IMPORTANT**: Do NOT guess or auto-select a change. Always let the user choose.
|
||||
|
||||
2. **Check artifact completion status**
|
||||
|
||||
Run `openspec status --change "<name>" --json` to check artifact completion.
|
||||
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used
|
||||
- `artifacts`: List of artifacts with their status (`done` or other)
|
||||
|
||||
**If any artifacts are not `done`:**
|
||||
- Display warning listing incomplete artifacts
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
3. **Check task completion status**
|
||||
|
||||
Read the tasks file (typically `tasks.md`) to check for incomplete tasks.
|
||||
|
||||
Count tasks marked with `- [ ]` (incomplete) vs `- [x]` (complete).
|
||||
|
||||
**If incomplete tasks found:**
|
||||
- Display warning showing count of incomplete tasks
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
**If no tasks file exists:** Proceed without task-related warning.
|
||||
|
||||
4. **Assess delta spec sync state**
|
||||
|
||||
Check for delta specs at `openspec/changes/<name>/specs/`. If none exist, proceed without sync prompt.
|
||||
|
||||
**If delta specs exist:**
|
||||
- Compare each delta spec with its corresponding main spec at `openspec/specs/<capability>/spec.md`
|
||||
- Determine what changes would be applied (adds, modifications, removals, renames)
|
||||
- Show a combined summary before prompting
|
||||
|
||||
**Prompt options:**
|
||||
- If changes needed: "Sync now (recommended)", "Archive without syncing"
|
||||
- If already synced: "Archive now", "Sync anyway", "Cancel"
|
||||
|
||||
If user chooses sync, use Task tool (subagent_type: "general-purpose", prompt: "Use Skill tool to invoke openspec-sync-specs for change '<name>'. Delta spec analysis: <include the analyzed delta spec summary>"). Proceed to archive regardless of choice.
|
||||
|
||||
5. **Perform the archive**
|
||||
|
||||
Create the archive directory if it doesn't exist:
|
||||
```bash
|
||||
mkdir -p openspec/changes/archive
|
||||
```
|
||||
|
||||
Generate target name using current date: `YYYY-MM-DD-<change-name>`
|
||||
|
||||
**Check if target already exists:**
|
||||
- If yes: Fail with error, suggest renaming existing archive or using different date
|
||||
- If no: Move the change directory to archive
|
||||
|
||||
```bash
|
||||
mv openspec/changes/<name> openspec/changes/archive/YYYY-MM-DD-<name>
|
||||
```
|
||||
|
||||
6. **Display summary**
|
||||
|
||||
Show archive completion summary including:
|
||||
- Change name
|
||||
- Schema that was used
|
||||
- Archive location
|
||||
- Whether specs were synced (if applicable)
|
||||
- Note about any warnings (incomplete artifacts/tasks)
|
||||
|
||||
**Output On Success**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** ✓ Synced to main specs (or "No delta specs" or "Sync skipped")
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Always prompt for change selection if not provided
|
||||
- Use artifact graph (openspec status --json) for completion checking
|
||||
- Don't block archive on warnings - just inform and confirm
|
||||
- Preserve .openspec.yaml when moving to archive (it moves with the directory)
|
||||
- Show clear summary of what happened
|
||||
- If sync is requested, use openspec-sync-specs approach (agent-driven)
|
||||
- If delta specs exist, always run the sync assessment and show the combined summary before prompting
|
||||
288
.cursor/skills/openspec-explore/SKILL.md
Normal file
288
.cursor/skills/openspec-explore/SKILL.md
Normal file
@@ -0,0 +1,288 @@
|
||||
---
|
||||
name: openspec-explore
|
||||
description: Enter explore mode - a thinking partner for exploring ideas, investigating problems, and clarifying requirements. Use when the user wants to think through something before or during a change.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Enter explore mode. Think deeply. Visualize freely. Follow the conversation wherever it goes.
|
||||
|
||||
**IMPORTANT: Explore mode is for thinking, not implementing.** You may read files, search code, and investigate the codebase, but you must NEVER write code or implement features. If the user asks you to implement something, remind them to exit explore mode first and create a change proposal. You MAY create OpenSpec artifacts (proposals, designs, specs) if the user asks—that's capturing thinking, not implementing.
|
||||
|
||||
**This is a stance, not a workflow.** There are no fixed steps, no required sequence, no mandatory outputs. You're a thinking partner helping the user explore.
|
||||
|
||||
---
|
||||
|
||||
## The Stance
|
||||
|
||||
- **Curious, not prescriptive** - Ask questions that emerge naturally, don't follow a script
|
||||
- **Open threads, not interrogations** - Surface multiple interesting directions and let the user follow what resonates. Don't funnel them through a single path of questions.
|
||||
- **Visual** - Use ASCII diagrams liberally when they'd help clarify thinking
|
||||
- **Adaptive** - Follow interesting threads, pivot when new information emerges
|
||||
- **Patient** - Don't rush to conclusions, let the shape of the problem emerge
|
||||
- **Grounded** - Explore the actual codebase when relevant, don't just theorize
|
||||
|
||||
---
|
||||
|
||||
## What You Might Do
|
||||
|
||||
Depending on what the user brings, you might:
|
||||
|
||||
**Explore the problem space**
|
||||
- Ask clarifying questions that emerge from what they said
|
||||
- Challenge assumptions
|
||||
- Reframe the problem
|
||||
- Find analogies
|
||||
|
||||
**Investigate the codebase**
|
||||
- Map existing architecture relevant to the discussion
|
||||
- Find integration points
|
||||
- Identify patterns already in use
|
||||
- Surface hidden complexity
|
||||
|
||||
**Compare options**
|
||||
- Brainstorm multiple approaches
|
||||
- Build comparison tables
|
||||
- Sketch tradeoffs
|
||||
- Recommend a path (if asked)
|
||||
|
||||
**Visualize**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Use ASCII diagrams liberally │
|
||||
├─────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌────────┐ ┌────────┐ │
|
||||
│ │ State │────────▶│ State │ │
|
||||
│ │ A │ │ B │ │
|
||||
│ └────────┘ └────────┘ │
|
||||
│ │
|
||||
│ System diagrams, state machines, │
|
||||
│ data flows, architecture sketches, │
|
||||
│ dependency graphs, comparison tables │
|
||||
│ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Surface risks and unknowns**
|
||||
- Identify what could go wrong
|
||||
- Find gaps in understanding
|
||||
- Suggest spikes or investigations
|
||||
|
||||
---
|
||||
|
||||
## OpenSpec Awareness
|
||||
|
||||
You have full context of the OpenSpec system. Use it naturally, don't force it.
|
||||
|
||||
### Check for context
|
||||
|
||||
At the start, quickly check what exists:
|
||||
```bash
|
||||
openspec list --json
|
||||
```
|
||||
|
||||
This tells you:
|
||||
- If there are active changes
|
||||
- Their names, schemas, and status
|
||||
- What the user might be working on
|
||||
|
||||
### When no change exists
|
||||
|
||||
Think freely. When insights crystallize, you might offer:
|
||||
|
||||
- "This feels solid enough to start a change. Want me to create a proposal?"
|
||||
- Or keep exploring - no pressure to formalize
|
||||
|
||||
### When a change exists
|
||||
|
||||
If the user mentions a change or you detect one is relevant:
|
||||
|
||||
1. **Read existing artifacts for context**
|
||||
- `openspec/changes/<name>/proposal.md`
|
||||
- `openspec/changes/<name>/design.md`
|
||||
- `openspec/changes/<name>/tasks.md`
|
||||
- etc.
|
||||
|
||||
2. **Reference them naturally in conversation**
|
||||
- "Your design mentions using Redis, but we just realized SQLite fits better..."
|
||||
- "The proposal scopes this to premium users, but we're now thinking everyone..."
|
||||
|
||||
3. **Offer to capture when decisions are made**
|
||||
|
||||
| Insight Type | Where to Capture |
|
||||
|--------------|------------------|
|
||||
| New requirement discovered | `specs/<capability>/spec.md` |
|
||||
| Requirement changed | `specs/<capability>/spec.md` |
|
||||
| Design decision made | `design.md` |
|
||||
| Scope changed | `proposal.md` |
|
||||
| New work identified | `tasks.md` |
|
||||
| Assumption invalidated | Relevant artifact |
|
||||
|
||||
Example offers:
|
||||
- "That's a design decision. Capture it in design.md?"
|
||||
- "This is a new requirement. Add it to specs?"
|
||||
- "This changes scope. Update the proposal?"
|
||||
|
||||
4. **The user decides** - Offer and move on. Don't pressure. Don't auto-capture.
|
||||
|
||||
---
|
||||
|
||||
## What You Don't Have To Do
|
||||
|
||||
- Follow a script
|
||||
- Ask the same questions every time
|
||||
- Produce a specific artifact
|
||||
- Reach a conclusion
|
||||
- Stay on topic if a tangent is valuable
|
||||
- Be brief (this is thinking time)
|
||||
|
||||
---
|
||||
|
||||
## Handling Different Entry Points
|
||||
|
||||
**User brings a vague idea:**
|
||||
```
|
||||
User: I'm thinking about adding real-time collaboration
|
||||
|
||||
You: Real-time collab is a big space. Let me think about this...
|
||||
|
||||
COLLABORATION SPECTRUM
|
||||
════════════════════════════════════════════
|
||||
|
||||
Awareness Coordination Sync
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌────────┐ ┌────────┐ ┌────────┐
|
||||
│Presence│ │Cursors │ │ CRDT │
|
||||
│ "3 │ │ Multi │ │Conflict│
|
||||
│online" │ │ select │ │ free │
|
||||
└────────┘ └────────┘ └────────┘
|
||||
│ │ │
|
||||
trivial moderate complex
|
||||
|
||||
Where's your head at?
|
||||
```
|
||||
|
||||
**User brings a specific problem:**
|
||||
```
|
||||
User: The auth system is a mess
|
||||
|
||||
You: [reads codebase]
|
||||
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ CURRENT AUTH FLOW │
|
||||
└─────────────────────────────────────────────┘
|
||||
│
|
||||
┌─────────────┼─────────────┐
|
||||
▼ ▼ ▼
|
||||
┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ Google │ │ GitHub │ │ Email │
|
||||
│ OAuth │ │ OAuth │ │ Magic │
|
||||
└────┬────┘ └────┬────┘ └────┬────┘
|
||||
│ │ │
|
||||
└─────────────┼─────────────┘
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Session │
|
||||
└─────┬─────┘
|
||||
│
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Perms │
|
||||
└───────────┘
|
||||
|
||||
I see three tangles. Which one's burning?
|
||||
```
|
||||
|
||||
**User is stuck mid-implementation:**
|
||||
```
|
||||
User: /opsx:explore add-auth-system
|
||||
The OAuth integration is more complex than expected
|
||||
|
||||
You: [reads change artifacts]
|
||||
|
||||
You're on task 4: "Implement OAuth flow"
|
||||
|
||||
Let me trace what's involved...
|
||||
|
||||
[draws diagram, explores options, suggests paths]
|
||||
|
||||
Want to update the design to reflect this?
|
||||
Or add a spike task to investigate?
|
||||
```
|
||||
|
||||
**User wants to compare options:**
|
||||
```
|
||||
User: Should we use Postgres or SQLite?
|
||||
|
||||
You: Generic answer is boring. What's the context?
|
||||
|
||||
User: A CLI tool that tracks local dev environments
|
||||
|
||||
You: That changes everything.
|
||||
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ CLI TOOL DATA STORAGE │
|
||||
└─────────────────────────────────────────────────┘
|
||||
|
||||
Key constraints:
|
||||
• No daemon running
|
||||
• Must work offline
|
||||
• Single user
|
||||
|
||||
SQLite Postgres
|
||||
Deployment embedded ✓ needs server ✗
|
||||
Offline yes ✓ no ✗
|
||||
Single file yes ✓ no ✗
|
||||
|
||||
SQLite. Not even close.
|
||||
|
||||
Unless... is there a sync component?
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Ending Discovery
|
||||
|
||||
There's no required ending. Discovery might:
|
||||
|
||||
- **Flow into a proposal**: "Ready to start? I can create a change proposal."
|
||||
- **Result in artifact updates**: "Updated design.md with these decisions"
|
||||
- **Just provide clarity**: User has what they need, moves on
|
||||
- **Continue later**: "We can pick this up anytime"
|
||||
|
||||
When it feels like things are crystallizing, you might summarize:
|
||||
|
||||
```
|
||||
## What We Figured Out
|
||||
|
||||
**The problem**: [crystallized understanding]
|
||||
|
||||
**The approach**: [if one emerged]
|
||||
|
||||
**Open questions**: [if any remain]
|
||||
|
||||
**Next steps** (if ready):
|
||||
- Create a change proposal
|
||||
- Keep exploring: just keep talking
|
||||
```
|
||||
|
||||
But this summary is optional. Sometimes the thinking IS the value.
|
||||
|
||||
---
|
||||
|
||||
## Guardrails
|
||||
|
||||
- **Don't implement** - Never write code or implement features. Creating OpenSpec artifacts is fine, writing application code is not.
|
||||
- **Don't fake understanding** - If something is unclear, dig deeper
|
||||
- **Don't rush** - Discovery is thinking time, not task time
|
||||
- **Don't force structure** - Let patterns emerge naturally
|
||||
- **Don't auto-capture** - Offer to save insights, don't just do it
|
||||
- **Do visualize** - A good diagram is worth many paragraphs
|
||||
- **Do explore the codebase** - Ground discussions in reality
|
||||
- **Do question assumptions** - Including the user's and your own
|
||||
110
.cursor/skills/openspec-propose/SKILL.md
Normal file
110
.cursor/skills/openspec-propose/SKILL.md
Normal file
@@ -0,0 +1,110 @@
|
||||
---
|
||||
name: openspec-propose
|
||||
description: Propose a new change with all artifacts generated in one step. Use when the user wants to quickly describe what they want to build and get a complete proposal with design, specs, and tasks ready for implementation.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Propose a new change - create the change and generate all artifacts in one step.
|
||||
|
||||
I'll create a change with artifacts:
|
||||
- proposal.md (what & why)
|
||||
- design.md (how)
|
||||
- tasks.md (implementation steps)
|
||||
|
||||
When ready to implement, run /opsx:apply
|
||||
|
||||
---
|
||||
|
||||
**Input**: The user's request should include a change name (kebab-case) OR a description of what they want to build.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no clear input provided, ask what they want to build**
|
||||
|
||||
Use the **AskUserQuestion tool** (open-ended, no preset options) to ask:
|
||||
> "What change do you want to work on? Describe what you want to build or fix."
|
||||
|
||||
From their description, derive a kebab-case name (e.g., "add user authentication" → `add-user-auth`).
|
||||
|
||||
**IMPORTANT**: Do NOT proceed without understanding what the user wants to build.
|
||||
|
||||
2. **Create the change directory**
|
||||
```bash
|
||||
openspec new change "<name>"
|
||||
```
|
||||
This creates a scaffolded change at `openspec/changes/<name>/` with `.openspec.yaml`.
|
||||
|
||||
3. **Get the artifact build order**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to get:
|
||||
- `applyRequires`: array of artifact IDs needed before implementation (e.g., `["tasks"]`)
|
||||
- `artifacts`: list of all artifacts with their status and dependencies
|
||||
|
||||
4. **Create artifacts in sequence until apply-ready**
|
||||
|
||||
Use the **TodoWrite tool** to track progress through the artifacts.
|
||||
|
||||
Loop through artifacts in dependency order (artifacts with no pending dependencies first):
|
||||
|
||||
a. **For each artifact that is `ready` (dependencies satisfied)**:
|
||||
- Get instructions:
|
||||
```bash
|
||||
openspec instructions <artifact-id> --change "<name>" --json
|
||||
```
|
||||
- The instructions JSON includes:
|
||||
- `context`: Project background (constraints for you - do NOT include in output)
|
||||
- `rules`: Artifact-specific rules (constraints for you - do NOT include in output)
|
||||
- `template`: The structure to use for your output file
|
||||
- `instruction`: Schema-specific guidance for this artifact type
|
||||
- `outputPath`: Where to write the artifact
|
||||
- `dependencies`: Completed artifacts to read for context
|
||||
- Read any completed dependency files for context
|
||||
- Create the artifact file using `template` as the structure
|
||||
- Apply `context` and `rules` as constraints - but do NOT copy them into the file
|
||||
- Show brief progress: "Created <artifact-id>"
|
||||
|
||||
b. **Continue until all `applyRequires` artifacts are complete**
|
||||
- After creating each artifact, re-run `openspec status --change "<name>" --json`
|
||||
- Check if every artifact ID in `applyRequires` has `status: "done"` in the artifacts array
|
||||
- Stop when all `applyRequires` artifacts are done
|
||||
|
||||
c. **If an artifact requires user input** (unclear context):
|
||||
- Use **AskUserQuestion tool** to clarify
|
||||
- Then continue with creation
|
||||
|
||||
5. **Show final status**
|
||||
```bash
|
||||
openspec status --change "<name>"
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
After completing all artifacts, summarize:
|
||||
- Change name and location
|
||||
- List of artifacts created with brief descriptions
|
||||
- What's ready: "All artifacts created! Ready for implementation."
|
||||
- Prompt: "Run `/opsx:apply` or ask me to implement to start working on the tasks."
|
||||
|
||||
**Artifact Creation Guidelines**
|
||||
|
||||
- Follow the `instruction` field from `openspec instructions` for each artifact type
|
||||
- The schema defines what each artifact should contain - follow it
|
||||
- Read dependency artifacts for context before creating new ones
|
||||
- Use `template` as the structure for your output file - fill in its sections
|
||||
- **IMPORTANT**: `context` and `rules` are constraints for YOU, not content for the file
|
||||
- Do NOT copy `<context>`, `<rules>`, `<project_context>` blocks into the artifact
|
||||
- These guide what you write, but should never appear in the output
|
||||
|
||||
**Guardrails**
|
||||
- Create ALL artifacts needed for implementation (as defined by schema's `apply.requires`)
|
||||
- Always read dependency artifacts before creating a new one
|
||||
- If context is critically unclear, ask the user - but prefer making reasonable decisions to keep momentum
|
||||
- If a change with that name already exists, ask if user wants to continue it or create a new one
|
||||
- Verify each artifact file exists after writing before proceeding to next
|
||||
41
.env.example
41
.env.example
@@ -9,9 +9,6 @@
|
||||
# REQUIRED - Change these values in production!
|
||||
# =============================================================================
|
||||
|
||||
# Master key for Meilisearch authentication (required)
|
||||
MEILI_MASTER_KEY=change-me-in-production
|
||||
|
||||
# Bootstrap token for initial API admin access (required)
|
||||
# Use this token for the first API calls before creating proper API tokens
|
||||
API_BOOTSTRAP_TOKEN=change-me-in-production
|
||||
@@ -21,19 +18,34 @@ API_BOOTSTRAP_TOKEN=change-me-in-production
|
||||
# =============================================================================
|
||||
|
||||
# API Service
|
||||
API_LISTEN_ADDR=0.0.0.0:8080
|
||||
API_BASE_URL=http://api:8080
|
||||
API_LISTEN_ADDR=0.0.0.0:7080
|
||||
API_BASE_URL=http://api:7080
|
||||
|
||||
# Indexer Service
|
||||
INDEXER_LISTEN_ADDR=0.0.0.0:8081
|
||||
INDEXER_LISTEN_ADDR=0.0.0.0:7081
|
||||
INDEXER_SCAN_INTERVAL_SECONDS=5
|
||||
|
||||
# Meilisearch Search Engine
|
||||
MEILI_URL=http://meilisearch:7700
|
||||
|
||||
# PostgreSQL Database
|
||||
DATABASE_URL=postgres://stripstream:stripstream@postgres:5432/stripstream
|
||||
|
||||
# =============================================================================
|
||||
# Logging
|
||||
# =============================================================================
|
||||
# Log levels per domain. Default: indexer=info,scan=info,extraction=info,thumbnail=warn,watcher=info
|
||||
# Domains:
|
||||
# scan — filesystem scan (discovery phase)
|
||||
# extraction — page extraction from archives (extracting_pages phase)
|
||||
# thumbnail — thumbnail generation (resize/encode)
|
||||
# watcher — file watcher polling
|
||||
# indexer — general indexer logs
|
||||
# Levels: error, warn, info, debug, trace
|
||||
# Examples:
|
||||
# RUST_LOG=indexer=info # default, quiet thumbnails
|
||||
# RUST_LOG=indexer=info,thumbnail=debug # enable thumbnail timing logs
|
||||
# RUST_LOG=indexer=info,extraction=debug # per-book extraction details
|
||||
# RUST_LOG=indexer=debug,scan=debug,extraction=debug,thumbnail=debug,watcher=debug # tout voir
|
||||
# RUST_LOG=indexer=info,scan=info,extraction=info,thumbnail=warn,watcher=info
|
||||
|
||||
# =============================================================================
|
||||
# Storage Configuration
|
||||
# =============================================================================
|
||||
@@ -46,18 +58,17 @@ LIBRARIES_ROOT_PATH=/libraries
|
||||
# Path to libraries directory on host machine (for Docker volume mount)
|
||||
# Default: ../libraries (relative to infra/docker-compose.yml)
|
||||
# You can change this to an absolute path on your machine
|
||||
LIBRARIES_HOST_PATH=../libraries
|
||||
LIBRARIES_HOST_PATH=./libraries
|
||||
|
||||
# Path to thumbnails directory on host machine (for Docker volume mount)
|
||||
# Default: ../data/thumbnails (relative to infra/docker-compose.yml)
|
||||
THUMBNAILS_HOST_PATH=../data/thumbnails
|
||||
THUMBNAILS_HOST_PATH=./data/thumbnails
|
||||
|
||||
# =============================================================================
|
||||
# Port Configuration
|
||||
# =============================================================================
|
||||
# To change ports, edit docker-compose.yml directly:
|
||||
# - API: change "7080:8080" to "YOUR_PORT:8080"
|
||||
# - Indexer: change "7081:8081" to "YOUR_PORT:8081"
|
||||
# - Backoffice: change "7082:8082" to "YOUR_PORT:8082"
|
||||
# - Meilisearch: change "7700:7700" to "YOUR_PORT:7700"
|
||||
# - API: change "7080:7080" to "YOUR_PORT:7080"
|
||||
# - Indexer: change "7081:7081" to "YOUR_PORT:7081"
|
||||
# - Backoffice: change "7082:7082" to "YOUR_PORT:7082"
|
||||
# - PostgreSQL: change "6432:5432" to "YOUR_PORT:5432"
|
||||
|
||||
17
.gitea/workflows/deploy.yml
Normal file
17
.gitea/workflows/deploy.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Deploy with Docker Compose
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main # adapte la branche que tu veux déployer
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: mac-orbstack-runner # le nom que tu as donné au runner
|
||||
steps:
|
||||
- name: Deploy stack
|
||||
env:
|
||||
DOCKER_BUILDKIT: 1
|
||||
COMPOSE_DOCKER_CLI_BUILD: 1
|
||||
run: |
|
||||
BUILDKIT_PROGRESS=plain cd /Users/julienfroidefond/Sites/docker-stack && docker pull julienfroidefond32/stripstream-backoffice && docker pull julienfroidefond32/stripstream-api && docker pull julienfroidefond32/stripstream-indexer && ./scripts/stack.sh up stripstream
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,7 +2,7 @@ target/
|
||||
.env
|
||||
.DS_Store
|
||||
tmp/
|
||||
libraries/
|
||||
/libraries/
|
||||
node_modules/
|
||||
.next/
|
||||
data/thumbnails
|
||||
|
||||
149
.opencode/command/opsx-apply.md
Normal file
149
.opencode/command/opsx-apply.md
Normal file
@@ -0,0 +1,149 @@
|
||||
---
|
||||
description: Implement tasks from an OpenSpec change (Experimental)
|
||||
---
|
||||
|
||||
Implement tasks from an OpenSpec change.
|
||||
|
||||
**Input**: Optionally specify a change name (e.g., `/opsx-apply add-auth`). If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **Select the change**
|
||||
|
||||
If a name is provided, use it. Otherwise:
|
||||
- Infer from conversation context if the user mentioned a change
|
||||
- Auto-select if only one active change exists
|
||||
- If ambiguous, run `openspec list --json` to get available changes and use the **AskUserQuestion tool** to let the user select
|
||||
|
||||
Always announce: "Using change: <name>" and how to override (e.g., `/opsx-apply <other>`).
|
||||
|
||||
2. **Check status to understand the schema**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used (e.g., "spec-driven")
|
||||
- Which artifact contains the tasks (typically "tasks" for spec-driven, check status for others)
|
||||
|
||||
3. **Get apply instructions**
|
||||
|
||||
```bash
|
||||
openspec instructions apply --change "<name>" --json
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Context file paths (varies by schema)
|
||||
- Progress (total, complete, remaining)
|
||||
- Task list with status
|
||||
- Dynamic instruction based on current state
|
||||
|
||||
**Handle states:**
|
||||
- If `state: "blocked"` (missing artifacts): show message, suggest using `/opsx-continue`
|
||||
- If `state: "all_done"`: congratulate, suggest archive
|
||||
- Otherwise: proceed to implementation
|
||||
|
||||
4. **Read context files**
|
||||
|
||||
Read the files listed in `contextFiles` from the apply instructions output.
|
||||
The files depend on the schema being used:
|
||||
- **spec-driven**: proposal, specs, design, tasks
|
||||
- Other schemas: follow the contextFiles from CLI output
|
||||
|
||||
5. **Show current progress**
|
||||
|
||||
Display:
|
||||
- Schema being used
|
||||
- Progress: "N/M tasks complete"
|
||||
- Remaining tasks overview
|
||||
- Dynamic instruction from CLI
|
||||
|
||||
6. **Implement tasks (loop until done or blocked)**
|
||||
|
||||
For each pending task:
|
||||
- Show which task is being worked on
|
||||
- Make the code changes required
|
||||
- Keep changes minimal and focused
|
||||
- Mark task complete in the tasks file: `- [ ]` → `- [x]`
|
||||
- Continue to next task
|
||||
|
||||
**Pause if:**
|
||||
- Task is unclear → ask for clarification
|
||||
- Implementation reveals a design issue → suggest updating artifacts
|
||||
- Error or blocker encountered → report and wait for guidance
|
||||
- User interrupts
|
||||
|
||||
7. **On completion or pause, show status**
|
||||
|
||||
Display:
|
||||
- Tasks completed this session
|
||||
- Overall progress: "N/M tasks complete"
|
||||
- If all done: suggest archive
|
||||
- If paused: explain why and wait for guidance
|
||||
|
||||
**Output During Implementation**
|
||||
|
||||
```
|
||||
## Implementing: <change-name> (schema: <schema-name>)
|
||||
|
||||
Working on task 3/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
|
||||
Working on task 4/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
```
|
||||
|
||||
**Output On Completion**
|
||||
|
||||
```
|
||||
## Implementation Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 7/7 tasks complete ✓
|
||||
|
||||
### Completed This Session
|
||||
- [x] Task 1
|
||||
- [x] Task 2
|
||||
...
|
||||
|
||||
All tasks complete! You can archive this change with `/opsx-archive`.
|
||||
```
|
||||
|
||||
**Output On Pause (Issue Encountered)**
|
||||
|
||||
```
|
||||
## Implementation Paused
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 4/7 tasks complete
|
||||
|
||||
### Issue Encountered
|
||||
<description of the issue>
|
||||
|
||||
**Options:**
|
||||
1. <option 1>
|
||||
2. <option 2>
|
||||
3. Other approach
|
||||
|
||||
What would you like to do?
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Keep going through tasks until done or blocked
|
||||
- Always read context files before starting (from the apply instructions output)
|
||||
- If task is ambiguous, pause and ask before implementing
|
||||
- If implementation reveals issues, pause and suggest artifact updates
|
||||
- Keep code changes minimal and scoped to each task
|
||||
- Update task checkbox immediately after completing each task
|
||||
- Pause on errors, blockers, or unclear requirements - don't guess
|
||||
- Use contextFiles from CLI output, don't assume specific file names
|
||||
|
||||
**Fluid Workflow Integration**
|
||||
|
||||
This skill supports the "actions on a change" model:
|
||||
|
||||
- **Can be invoked anytime**: Before all artifacts are done (if tasks exist), after partial implementation, interleaved with other actions
|
||||
- **Allows artifact updates**: If implementation reveals design issues, suggest updating artifacts - not phase-locked, work fluidly
|
||||
154
.opencode/command/opsx-archive.md
Normal file
154
.opencode/command/opsx-archive.md
Normal file
@@ -0,0 +1,154 @@
|
||||
---
|
||||
description: Archive a completed change in the experimental workflow
|
||||
---
|
||||
|
||||
Archive a completed change in the experimental workflow.
|
||||
|
||||
**Input**: Optionally specify a change name after `/opsx-archive` (e.g., `/opsx-archive add-auth`). If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no change name provided, prompt for selection**
|
||||
|
||||
Run `openspec list --json` to get available changes. Use the **AskUserQuestion tool** to let the user select.
|
||||
|
||||
Show only active changes (not already archived).
|
||||
Include the schema used for each change if available.
|
||||
|
||||
**IMPORTANT**: Do NOT guess or auto-select a change. Always let the user choose.
|
||||
|
||||
2. **Check artifact completion status**
|
||||
|
||||
Run `openspec status --change "<name>" --json` to check artifact completion.
|
||||
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used
|
||||
- `artifacts`: List of artifacts with their status (`done` or other)
|
||||
|
||||
**If any artifacts are not `done`:**
|
||||
- Display warning listing incomplete artifacts
|
||||
- Prompt user for confirmation to continue
|
||||
- Proceed if user confirms
|
||||
|
||||
3. **Check task completion status**
|
||||
|
||||
Read the tasks file (typically `tasks.md`) to check for incomplete tasks.
|
||||
|
||||
Count tasks marked with `- [ ]` (incomplete) vs `- [x]` (complete).
|
||||
|
||||
**If incomplete tasks found:**
|
||||
- Display warning showing count of incomplete tasks
|
||||
- Prompt user for confirmation to continue
|
||||
- Proceed if user confirms
|
||||
|
||||
**If no tasks file exists:** Proceed without task-related warning.
|
||||
|
||||
4. **Assess delta spec sync state**
|
||||
|
||||
Check for delta specs at `openspec/changes/<name>/specs/`. If none exist, proceed without sync prompt.
|
||||
|
||||
**If delta specs exist:**
|
||||
- Compare each delta spec with its corresponding main spec at `openspec/specs/<capability>/spec.md`
|
||||
- Determine what changes would be applied (adds, modifications, removals, renames)
|
||||
- Show a combined summary before prompting
|
||||
|
||||
**Prompt options:**
|
||||
- If changes needed: "Sync now (recommended)", "Archive without syncing"
|
||||
- If already synced: "Archive now", "Sync anyway", "Cancel"
|
||||
|
||||
If user chooses sync, use Task tool (subagent_type: "general-purpose", prompt: "Use Skill tool to invoke openspec-sync-specs for change '<name>'. Delta spec analysis: <include the analyzed delta spec summary>"). Proceed to archive regardless of choice.
|
||||
|
||||
5. **Perform the archive**
|
||||
|
||||
Create the archive directory if it doesn't exist:
|
||||
```bash
|
||||
mkdir -p openspec/changes/archive
|
||||
```
|
||||
|
||||
Generate target name using current date: `YYYY-MM-DD-<change-name>`
|
||||
|
||||
**Check if target already exists:**
|
||||
- If yes: Fail with error, suggest renaming existing archive or using different date
|
||||
- If no: Move the change directory to archive
|
||||
|
||||
```bash
|
||||
mv openspec/changes/<name> openspec/changes/archive/YYYY-MM-DD-<name>
|
||||
```
|
||||
|
||||
6. **Display summary**
|
||||
|
||||
Show archive completion summary including:
|
||||
- Change name
|
||||
- Schema that was used
|
||||
- Archive location
|
||||
- Spec sync status (synced / sync skipped / no delta specs)
|
||||
- Note about any warnings (incomplete artifacts/tasks)
|
||||
|
||||
**Output On Success**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** ✓ Synced to main specs
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Output On Success (No Delta Specs)**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** No delta specs
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Output On Success With Warnings**
|
||||
|
||||
```
|
||||
## Archive Complete (with warnings)
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** Sync skipped (user chose to skip)
|
||||
|
||||
**Warnings:**
|
||||
- Archived with 2 incomplete artifacts
|
||||
- Archived with 3 incomplete tasks
|
||||
- Delta spec sync was skipped (user chose to skip)
|
||||
|
||||
Review the archive if this was not intentional.
|
||||
```
|
||||
|
||||
**Output On Error (Archive Exists)**
|
||||
|
||||
```
|
||||
## Archive Failed
|
||||
|
||||
**Change:** <change-name>
|
||||
**Target:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
|
||||
Target archive directory already exists.
|
||||
|
||||
**Options:**
|
||||
1. Rename the existing archive
|
||||
2. Delete the existing archive if it's a duplicate
|
||||
3. Wait until a different date to archive
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Always prompt for change selection if not provided
|
||||
- Use artifact graph (openspec status --json) for completion checking
|
||||
- Don't block archive on warnings - just inform and confirm
|
||||
- Preserve .openspec.yaml when moving to archive (it moves with the directory)
|
||||
- Show clear summary of what happened
|
||||
- If sync is requested, use the Skill tool to invoke `openspec-sync-specs` (agent-driven)
|
||||
- If delta specs exist, always run the sync assessment and show the combined summary before prompting
|
||||
170
.opencode/command/opsx-explore.md
Normal file
170
.opencode/command/opsx-explore.md
Normal file
@@ -0,0 +1,170 @@
|
||||
---
|
||||
description: Enter explore mode - think through ideas, investigate problems, clarify requirements
|
||||
---
|
||||
|
||||
Enter explore mode. Think deeply. Visualize freely. Follow the conversation wherever it goes.
|
||||
|
||||
**IMPORTANT: Explore mode is for thinking, not implementing.** You may read files, search code, and investigate the codebase, but you must NEVER write code or implement features. If the user asks you to implement something, remind them to exit explore mode first and create a change proposal. You MAY create OpenSpec artifacts (proposals, designs, specs) if the user asks—that's capturing thinking, not implementing.
|
||||
|
||||
**This is a stance, not a workflow.** There are no fixed steps, no required sequence, no mandatory outputs. You're a thinking partner helping the user explore.
|
||||
|
||||
**Input**: The argument after `/opsx-explore` is whatever the user wants to think about. Could be:
|
||||
- A vague idea: "real-time collaboration"
|
||||
- A specific problem: "the auth system is getting unwieldy"
|
||||
- A change name: "add-dark-mode" (to explore in context of that change)
|
||||
- A comparison: "postgres vs sqlite for this"
|
||||
- Nothing (just enter explore mode)
|
||||
|
||||
---
|
||||
|
||||
## The Stance
|
||||
|
||||
- **Curious, not prescriptive** - Ask questions that emerge naturally, don't follow a script
|
||||
- **Open threads, not interrogations** - Surface multiple interesting directions and let the user follow what resonates. Don't funnel them through a single path of questions.
|
||||
- **Visual** - Use ASCII diagrams liberally when they'd help clarify thinking
|
||||
- **Adaptive** - Follow interesting threads, pivot when new information emerges
|
||||
- **Patient** - Don't rush to conclusions, let the shape of the problem emerge
|
||||
- **Grounded** - Explore the actual codebase when relevant, don't just theorize
|
||||
|
||||
---
|
||||
|
||||
## What You Might Do
|
||||
|
||||
Depending on what the user brings, you might:
|
||||
|
||||
**Explore the problem space**
|
||||
- Ask clarifying questions that emerge from what they said
|
||||
- Challenge assumptions
|
||||
- Reframe the problem
|
||||
- Find analogies
|
||||
|
||||
**Investigate the codebase**
|
||||
- Map existing architecture relevant to the discussion
|
||||
- Find integration points
|
||||
- Identify patterns already in use
|
||||
- Surface hidden complexity
|
||||
|
||||
**Compare options**
|
||||
- Brainstorm multiple approaches
|
||||
- Build comparison tables
|
||||
- Sketch tradeoffs
|
||||
- Recommend a path (if asked)
|
||||
|
||||
**Visualize**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Use ASCII diagrams liberally │
|
||||
├─────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌────────┐ ┌────────┐ │
|
||||
│ │ State │────────▶│ State │ │
|
||||
│ │ A │ │ B │ │
|
||||
│ └────────┘ └────────┘ │
|
||||
│ │
|
||||
│ System diagrams, state machines, │
|
||||
│ data flows, architecture sketches, │
|
||||
│ dependency graphs, comparison tables │
|
||||
│ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Surface risks and unknowns**
|
||||
- Identify what could go wrong
|
||||
- Find gaps in understanding
|
||||
- Suggest spikes or investigations
|
||||
|
||||
---
|
||||
|
||||
## OpenSpec Awareness
|
||||
|
||||
You have full context of the OpenSpec system. Use it naturally, don't force it.
|
||||
|
||||
### Check for context
|
||||
|
||||
At the start, quickly check what exists:
|
||||
```bash
|
||||
openspec list --json
|
||||
```
|
||||
|
||||
This tells you:
|
||||
- If there are active changes
|
||||
- Their names, schemas, and status
|
||||
- What the user might be working on
|
||||
|
||||
If the user mentioned a specific change name, read its artifacts for context.
|
||||
|
||||
### When no change exists
|
||||
|
||||
Think freely. When insights crystallize, you might offer:
|
||||
|
||||
- "This feels solid enough to start a change. Want me to create a proposal?"
|
||||
- Or keep exploring - no pressure to formalize
|
||||
|
||||
### When a change exists
|
||||
|
||||
If the user mentions a change or you detect one is relevant:
|
||||
|
||||
1. **Read existing artifacts for context**
|
||||
- `openspec/changes/<name>/proposal.md`
|
||||
- `openspec/changes/<name>/design.md`
|
||||
- `openspec/changes/<name>/tasks.md`
|
||||
- etc.
|
||||
|
||||
2. **Reference them naturally in conversation**
|
||||
- "Your design mentions using Redis, but we just realized SQLite fits better..."
|
||||
- "The proposal scopes this to premium users, but we're now thinking everyone..."
|
||||
|
||||
3. **Offer to capture when decisions are made**
|
||||
|
||||
| Insight Type | Where to Capture |
|
||||
|--------------|------------------|
|
||||
| New requirement discovered | `specs/<capability>/spec.md` |
|
||||
| Requirement changed | `specs/<capability>/spec.md` |
|
||||
| Design decision made | `design.md` |
|
||||
| Scope changed | `proposal.md` |
|
||||
| New work identified | `tasks.md` |
|
||||
| Assumption invalidated | Relevant artifact |
|
||||
|
||||
Example offers:
|
||||
- "That's a design decision. Capture it in design.md?"
|
||||
- "This is a new requirement. Add it to specs?"
|
||||
- "This changes scope. Update the proposal?"
|
||||
|
||||
4. **The user decides** - Offer and move on. Don't pressure. Don't auto-capture.
|
||||
|
||||
---
|
||||
|
||||
## What You Don't Have To Do
|
||||
|
||||
- Follow a script
|
||||
- Ask the same questions every time
|
||||
- Produce a specific artifact
|
||||
- Reach a conclusion
|
||||
- Stay on topic if a tangent is valuable
|
||||
- Be brief (this is thinking time)
|
||||
|
||||
---
|
||||
|
||||
## Ending Discovery
|
||||
|
||||
There's no required ending. Discovery might:
|
||||
|
||||
- **Flow into a proposal**: "Ready to start? I can create a change proposal."
|
||||
- **Result in artifact updates**: "Updated design.md with these decisions"
|
||||
- **Just provide clarity**: User has what they need, moves on
|
||||
- **Continue later**: "We can pick this up anytime"
|
||||
|
||||
When things crystallize, you might offer a summary - but it's optional. Sometimes the thinking IS the value.
|
||||
|
||||
---
|
||||
|
||||
## Guardrails
|
||||
|
||||
- **Don't implement** - Never write code or implement features. Creating OpenSpec artifacts is fine, writing application code is not.
|
||||
- **Don't fake understanding** - If something is unclear, dig deeper
|
||||
- **Don't rush** - Discovery is thinking time, not task time
|
||||
- **Don't force structure** - Let patterns emerge naturally
|
||||
- **Don't auto-capture** - Offer to save insights, don't just do it
|
||||
- **Do visualize** - A good diagram is worth many paragraphs
|
||||
- **Do explore the codebase** - Ground discussions in reality
|
||||
- **Do question assumptions** - Including the user's and your own
|
||||
103
.opencode/command/opsx-propose.md
Normal file
103
.opencode/command/opsx-propose.md
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
description: Propose a new change - create it and generate all artifacts in one step
|
||||
---
|
||||
|
||||
Propose a new change - create the change and generate all artifacts in one step.
|
||||
|
||||
I'll create a change with artifacts:
|
||||
- proposal.md (what & why)
|
||||
- design.md (how)
|
||||
- tasks.md (implementation steps)
|
||||
|
||||
When ready to implement, run /opsx-apply
|
||||
|
||||
---
|
||||
|
||||
**Input**: The argument after `/opsx-propose` is the change name (kebab-case), OR a description of what the user wants to build.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no input provided, ask what they want to build**
|
||||
|
||||
Use the **AskUserQuestion tool** (open-ended, no preset options) to ask:
|
||||
> "What change do you want to work on? Describe what you want to build or fix."
|
||||
|
||||
From their description, derive a kebab-case name (e.g., "add user authentication" → `add-user-auth`).
|
||||
|
||||
**IMPORTANT**: Do NOT proceed without understanding what the user wants to build.
|
||||
|
||||
2. **Create the change directory**
|
||||
```bash
|
||||
openspec new change "<name>"
|
||||
```
|
||||
This creates a scaffolded change at `openspec/changes/<name>/` with `.openspec.yaml`.
|
||||
|
||||
3. **Get the artifact build order**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to get:
|
||||
- `applyRequires`: array of artifact IDs needed before implementation (e.g., `["tasks"]`)
|
||||
- `artifacts`: list of all artifacts with their status and dependencies
|
||||
|
||||
4. **Create artifacts in sequence until apply-ready**
|
||||
|
||||
Use the **TodoWrite tool** to track progress through the artifacts.
|
||||
|
||||
Loop through artifacts in dependency order (artifacts with no pending dependencies first):
|
||||
|
||||
a. **For each artifact that is `ready` (dependencies satisfied)**:
|
||||
- Get instructions:
|
||||
```bash
|
||||
openspec instructions <artifact-id> --change "<name>" --json
|
||||
```
|
||||
- The instructions JSON includes:
|
||||
- `context`: Project background (constraints for you - do NOT include in output)
|
||||
- `rules`: Artifact-specific rules (constraints for you - do NOT include in output)
|
||||
- `template`: The structure to use for your output file
|
||||
- `instruction`: Schema-specific guidance for this artifact type
|
||||
- `outputPath`: Where to write the artifact
|
||||
- `dependencies`: Completed artifacts to read for context
|
||||
- Read any completed dependency files for context
|
||||
- Create the artifact file using `template` as the structure
|
||||
- Apply `context` and `rules` as constraints - but do NOT copy them into the file
|
||||
- Show brief progress: "Created <artifact-id>"
|
||||
|
||||
b. **Continue until all `applyRequires` artifacts are complete**
|
||||
- After creating each artifact, re-run `openspec status --change "<name>" --json`
|
||||
- Check if every artifact ID in `applyRequires` has `status: "done"` in the artifacts array
|
||||
- Stop when all `applyRequires` artifacts are done
|
||||
|
||||
c. **If an artifact requires user input** (unclear context):
|
||||
- Use **AskUserQuestion tool** to clarify
|
||||
- Then continue with creation
|
||||
|
||||
5. **Show final status**
|
||||
```bash
|
||||
openspec status --change "<name>"
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
After completing all artifacts, summarize:
|
||||
- Change name and location
|
||||
- List of artifacts created with brief descriptions
|
||||
- What's ready: "All artifacts created! Ready for implementation."
|
||||
- Prompt: "Run `/opsx-apply` to start implementing."
|
||||
|
||||
**Artifact Creation Guidelines**
|
||||
|
||||
- Follow the `instruction` field from `openspec instructions` for each artifact type
|
||||
- The schema defines what each artifact should contain - follow it
|
||||
- Read dependency artifacts for context before creating new ones
|
||||
- Use `template` as the structure for your output file - fill in its sections
|
||||
- **IMPORTANT**: `context` and `rules` are constraints for YOU, not content for the file
|
||||
- Do NOT copy `<context>`, `<rules>`, `<project_context>` blocks into the artifact
|
||||
- These guide what you write, but should never appear in the output
|
||||
|
||||
**Guardrails**
|
||||
- Create ALL artifacts needed for implementation (as defined by schema's `apply.requires`)
|
||||
- Always read dependency artifacts before creating a new one
|
||||
- If context is critically unclear, ask the user - but prefer making reasonable decisions to keep momentum
|
||||
- If a change with that name already exists, ask if user wants to continue it or create a new one
|
||||
- Verify each artifact file exists after writing before proceeding to next
|
||||
156
.opencode/skills/openspec-apply-change/SKILL.md
Normal file
156
.opencode/skills/openspec-apply-change/SKILL.md
Normal file
@@ -0,0 +1,156 @@
|
||||
---
|
||||
name: openspec-apply-change
|
||||
description: Implement tasks from an OpenSpec change. Use when the user wants to start implementing, continue implementation, or work through tasks.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Implement tasks from an OpenSpec change.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **Select the change**
|
||||
|
||||
If a name is provided, use it. Otherwise:
|
||||
- Infer from conversation context if the user mentioned a change
|
||||
- Auto-select if only one active change exists
|
||||
- If ambiguous, run `openspec list --json` to get available changes and use the **AskUserQuestion tool** to let the user select
|
||||
|
||||
Always announce: "Using change: <name>" and how to override (e.g., `/opsx-apply <other>`).
|
||||
|
||||
2. **Check status to understand the schema**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used (e.g., "spec-driven")
|
||||
- Which artifact contains the tasks (typically "tasks" for spec-driven, check status for others)
|
||||
|
||||
3. **Get apply instructions**
|
||||
|
||||
```bash
|
||||
openspec instructions apply --change "<name>" --json
|
||||
```
|
||||
|
||||
This returns:
|
||||
- Context file paths (varies by schema - could be proposal/specs/design/tasks or spec/tests/implementation/docs)
|
||||
- Progress (total, complete, remaining)
|
||||
- Task list with status
|
||||
- Dynamic instruction based on current state
|
||||
|
||||
**Handle states:**
|
||||
- If `state: "blocked"` (missing artifacts): show message, suggest using openspec-continue-change
|
||||
- If `state: "all_done"`: congratulate, suggest archive
|
||||
- Otherwise: proceed to implementation
|
||||
|
||||
4. **Read context files**
|
||||
|
||||
Read the files listed in `contextFiles` from the apply instructions output.
|
||||
The files depend on the schema being used:
|
||||
- **spec-driven**: proposal, specs, design, tasks
|
||||
- Other schemas: follow the contextFiles from CLI output
|
||||
|
||||
5. **Show current progress**
|
||||
|
||||
Display:
|
||||
- Schema being used
|
||||
- Progress: "N/M tasks complete"
|
||||
- Remaining tasks overview
|
||||
- Dynamic instruction from CLI
|
||||
|
||||
6. **Implement tasks (loop until done or blocked)**
|
||||
|
||||
For each pending task:
|
||||
- Show which task is being worked on
|
||||
- Make the code changes required
|
||||
- Keep changes minimal and focused
|
||||
- Mark task complete in the tasks file: `- [ ]` → `- [x]`
|
||||
- Continue to next task
|
||||
|
||||
**Pause if:**
|
||||
- Task is unclear → ask for clarification
|
||||
- Implementation reveals a design issue → suggest updating artifacts
|
||||
- Error or blocker encountered → report and wait for guidance
|
||||
- User interrupts
|
||||
|
||||
7. **On completion or pause, show status**
|
||||
|
||||
Display:
|
||||
- Tasks completed this session
|
||||
- Overall progress: "N/M tasks complete"
|
||||
- If all done: suggest archive
|
||||
- If paused: explain why and wait for guidance
|
||||
|
||||
**Output During Implementation**
|
||||
|
||||
```
|
||||
## Implementing: <change-name> (schema: <schema-name>)
|
||||
|
||||
Working on task 3/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
|
||||
Working on task 4/7: <task description>
|
||||
[...implementation happening...]
|
||||
✓ Task complete
|
||||
```
|
||||
|
||||
**Output On Completion**
|
||||
|
||||
```
|
||||
## Implementation Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 7/7 tasks complete ✓
|
||||
|
||||
### Completed This Session
|
||||
- [x] Task 1
|
||||
- [x] Task 2
|
||||
...
|
||||
|
||||
All tasks complete! Ready to archive this change.
|
||||
```
|
||||
|
||||
**Output On Pause (Issue Encountered)**
|
||||
|
||||
```
|
||||
## Implementation Paused
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Progress:** 4/7 tasks complete
|
||||
|
||||
### Issue Encountered
|
||||
<description of the issue>
|
||||
|
||||
**Options:**
|
||||
1. <option 1>
|
||||
2. <option 2>
|
||||
3. Other approach
|
||||
|
||||
What would you like to do?
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Keep going through tasks until done or blocked
|
||||
- Always read context files before starting (from the apply instructions output)
|
||||
- If task is ambiguous, pause and ask before implementing
|
||||
- If implementation reveals issues, pause and suggest artifact updates
|
||||
- Keep code changes minimal and scoped to each task
|
||||
- Update task checkbox immediately after completing each task
|
||||
- Pause on errors, blockers, or unclear requirements - don't guess
|
||||
- Use contextFiles from CLI output, don't assume specific file names
|
||||
|
||||
**Fluid Workflow Integration**
|
||||
|
||||
This skill supports the "actions on a change" model:
|
||||
|
||||
- **Can be invoked anytime**: Before all artifacts are done (if tasks exist), after partial implementation, interleaved with other actions
|
||||
- **Allows artifact updates**: If implementation reveals design issues, suggest updating artifacts - not phase-locked, work fluidly
|
||||
114
.opencode/skills/openspec-archive-change/SKILL.md
Normal file
114
.opencode/skills/openspec-archive-change/SKILL.md
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
name: openspec-archive-change
|
||||
description: Archive a completed change in the experimental workflow. Use when the user wants to finalize and archive a change after implementation is complete.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Archive a completed change in the experimental workflow.
|
||||
|
||||
**Input**: Optionally specify a change name. If omitted, check if it can be inferred from conversation context. If vague or ambiguous you MUST prompt for available changes.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no change name provided, prompt for selection**
|
||||
|
||||
Run `openspec list --json` to get available changes. Use the **AskUserQuestion tool** to let the user select.
|
||||
|
||||
Show only active changes (not already archived).
|
||||
Include the schema used for each change if available.
|
||||
|
||||
**IMPORTANT**: Do NOT guess or auto-select a change. Always let the user choose.
|
||||
|
||||
2. **Check artifact completion status**
|
||||
|
||||
Run `openspec status --change "<name>" --json` to check artifact completion.
|
||||
|
||||
Parse the JSON to understand:
|
||||
- `schemaName`: The workflow being used
|
||||
- `artifacts`: List of artifacts with their status (`done` or other)
|
||||
|
||||
**If any artifacts are not `done`:**
|
||||
- Display warning listing incomplete artifacts
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
3. **Check task completion status**
|
||||
|
||||
Read the tasks file (typically `tasks.md`) to check for incomplete tasks.
|
||||
|
||||
Count tasks marked with `- [ ]` (incomplete) vs `- [x]` (complete).
|
||||
|
||||
**If incomplete tasks found:**
|
||||
- Display warning showing count of incomplete tasks
|
||||
- Use **AskUserQuestion tool** to confirm user wants to proceed
|
||||
- Proceed if user confirms
|
||||
|
||||
**If no tasks file exists:** Proceed without task-related warning.
|
||||
|
||||
4. **Assess delta spec sync state**
|
||||
|
||||
Check for delta specs at `openspec/changes/<name>/specs/`. If none exist, proceed without sync prompt.
|
||||
|
||||
**If delta specs exist:**
|
||||
- Compare each delta spec with its corresponding main spec at `openspec/specs/<capability>/spec.md`
|
||||
- Determine what changes would be applied (adds, modifications, removals, renames)
|
||||
- Show a combined summary before prompting
|
||||
|
||||
**Prompt options:**
|
||||
- If changes needed: "Sync now (recommended)", "Archive without syncing"
|
||||
- If already synced: "Archive now", "Sync anyway", "Cancel"
|
||||
|
||||
If user chooses sync, use Task tool (subagent_type: "general-purpose", prompt: "Use Skill tool to invoke openspec-sync-specs for change '<name>'. Delta spec analysis: <include the analyzed delta spec summary>"). Proceed to archive regardless of choice.
|
||||
|
||||
5. **Perform the archive**
|
||||
|
||||
Create the archive directory if it doesn't exist:
|
||||
```bash
|
||||
mkdir -p openspec/changes/archive
|
||||
```
|
||||
|
||||
Generate target name using current date: `YYYY-MM-DD-<change-name>`
|
||||
|
||||
**Check if target already exists:**
|
||||
- If yes: Fail with error, suggest renaming existing archive or using different date
|
||||
- If no: Move the change directory to archive
|
||||
|
||||
```bash
|
||||
mv openspec/changes/<name> openspec/changes/archive/YYYY-MM-DD-<name>
|
||||
```
|
||||
|
||||
6. **Display summary**
|
||||
|
||||
Show archive completion summary including:
|
||||
- Change name
|
||||
- Schema that was used
|
||||
- Archive location
|
||||
- Whether specs were synced (if applicable)
|
||||
- Note about any warnings (incomplete artifacts/tasks)
|
||||
|
||||
**Output On Success**
|
||||
|
||||
```
|
||||
## Archive Complete
|
||||
|
||||
**Change:** <change-name>
|
||||
**Schema:** <schema-name>
|
||||
**Archived to:** openspec/changes/archive/YYYY-MM-DD-<name>/
|
||||
**Specs:** ✓ Synced to main specs (or "No delta specs" or "Sync skipped")
|
||||
|
||||
All artifacts complete. All tasks complete.
|
||||
```
|
||||
|
||||
**Guardrails**
|
||||
- Always prompt for change selection if not provided
|
||||
- Use artifact graph (openspec status --json) for completion checking
|
||||
- Don't block archive on warnings - just inform and confirm
|
||||
- Preserve .openspec.yaml when moving to archive (it moves with the directory)
|
||||
- Show clear summary of what happened
|
||||
- If sync is requested, use openspec-sync-specs approach (agent-driven)
|
||||
- If delta specs exist, always run the sync assessment and show the combined summary before prompting
|
||||
288
.opencode/skills/openspec-explore/SKILL.md
Normal file
288
.opencode/skills/openspec-explore/SKILL.md
Normal file
@@ -0,0 +1,288 @@
|
||||
---
|
||||
name: openspec-explore
|
||||
description: Enter explore mode - a thinking partner for exploring ideas, investigating problems, and clarifying requirements. Use when the user wants to think through something before or during a change.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Enter explore mode. Think deeply. Visualize freely. Follow the conversation wherever it goes.
|
||||
|
||||
**IMPORTANT: Explore mode is for thinking, not implementing.** You may read files, search code, and investigate the codebase, but you must NEVER write code or implement features. If the user asks you to implement something, remind them to exit explore mode first and create a change proposal. You MAY create OpenSpec artifacts (proposals, designs, specs) if the user asks—that's capturing thinking, not implementing.
|
||||
|
||||
**This is a stance, not a workflow.** There are no fixed steps, no required sequence, no mandatory outputs. You're a thinking partner helping the user explore.
|
||||
|
||||
---
|
||||
|
||||
## The Stance
|
||||
|
||||
- **Curious, not prescriptive** - Ask questions that emerge naturally, don't follow a script
|
||||
- **Open threads, not interrogations** - Surface multiple interesting directions and let the user follow what resonates. Don't funnel them through a single path of questions.
|
||||
- **Visual** - Use ASCII diagrams liberally when they'd help clarify thinking
|
||||
- **Adaptive** - Follow interesting threads, pivot when new information emerges
|
||||
- **Patient** - Don't rush to conclusions, let the shape of the problem emerge
|
||||
- **Grounded** - Explore the actual codebase when relevant, don't just theorize
|
||||
|
||||
---
|
||||
|
||||
## What You Might Do
|
||||
|
||||
Depending on what the user brings, you might:
|
||||
|
||||
**Explore the problem space**
|
||||
- Ask clarifying questions that emerge from what they said
|
||||
- Challenge assumptions
|
||||
- Reframe the problem
|
||||
- Find analogies
|
||||
|
||||
**Investigate the codebase**
|
||||
- Map existing architecture relevant to the discussion
|
||||
- Find integration points
|
||||
- Identify patterns already in use
|
||||
- Surface hidden complexity
|
||||
|
||||
**Compare options**
|
||||
- Brainstorm multiple approaches
|
||||
- Build comparison tables
|
||||
- Sketch tradeoffs
|
||||
- Recommend a path (if asked)
|
||||
|
||||
**Visualize**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Use ASCII diagrams liberally │
|
||||
├─────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌────────┐ ┌────────┐ │
|
||||
│ │ State │────────▶│ State │ │
|
||||
│ │ A │ │ B │ │
|
||||
│ └────────┘ └────────┘ │
|
||||
│ │
|
||||
│ System diagrams, state machines, │
|
||||
│ data flows, architecture sketches, │
|
||||
│ dependency graphs, comparison tables │
|
||||
│ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Surface risks and unknowns**
|
||||
- Identify what could go wrong
|
||||
- Find gaps in understanding
|
||||
- Suggest spikes or investigations
|
||||
|
||||
---
|
||||
|
||||
## OpenSpec Awareness
|
||||
|
||||
You have full context of the OpenSpec system. Use it naturally, don't force it.
|
||||
|
||||
### Check for context
|
||||
|
||||
At the start, quickly check what exists:
|
||||
```bash
|
||||
openspec list --json
|
||||
```
|
||||
|
||||
This tells you:
|
||||
- If there are active changes
|
||||
- Their names, schemas, and status
|
||||
- What the user might be working on
|
||||
|
||||
### When no change exists
|
||||
|
||||
Think freely. When insights crystallize, you might offer:
|
||||
|
||||
- "This feels solid enough to start a change. Want me to create a proposal?"
|
||||
- Or keep exploring - no pressure to formalize
|
||||
|
||||
### When a change exists
|
||||
|
||||
If the user mentions a change or you detect one is relevant:
|
||||
|
||||
1. **Read existing artifacts for context**
|
||||
- `openspec/changes/<name>/proposal.md`
|
||||
- `openspec/changes/<name>/design.md`
|
||||
- `openspec/changes/<name>/tasks.md`
|
||||
- etc.
|
||||
|
||||
2. **Reference them naturally in conversation**
|
||||
- "Your design mentions using Redis, but we just realized SQLite fits better..."
|
||||
- "The proposal scopes this to premium users, but we're now thinking everyone..."
|
||||
|
||||
3. **Offer to capture when decisions are made**
|
||||
|
||||
| Insight Type | Where to Capture |
|
||||
|--------------|------------------|
|
||||
| New requirement discovered | `specs/<capability>/spec.md` |
|
||||
| Requirement changed | `specs/<capability>/spec.md` |
|
||||
| Design decision made | `design.md` |
|
||||
| Scope changed | `proposal.md` |
|
||||
| New work identified | `tasks.md` |
|
||||
| Assumption invalidated | Relevant artifact |
|
||||
|
||||
Example offers:
|
||||
- "That's a design decision. Capture it in design.md?"
|
||||
- "This is a new requirement. Add it to specs?"
|
||||
- "This changes scope. Update the proposal?"
|
||||
|
||||
4. **The user decides** - Offer and move on. Don't pressure. Don't auto-capture.
|
||||
|
||||
---
|
||||
|
||||
## What You Don't Have To Do
|
||||
|
||||
- Follow a script
|
||||
- Ask the same questions every time
|
||||
- Produce a specific artifact
|
||||
- Reach a conclusion
|
||||
- Stay on topic if a tangent is valuable
|
||||
- Be brief (this is thinking time)
|
||||
|
||||
---
|
||||
|
||||
## Handling Different Entry Points
|
||||
|
||||
**User brings a vague idea:**
|
||||
```
|
||||
User: I'm thinking about adding real-time collaboration
|
||||
|
||||
You: Real-time collab is a big space. Let me think about this...
|
||||
|
||||
COLLABORATION SPECTRUM
|
||||
════════════════════════════════════════════
|
||||
|
||||
Awareness Coordination Sync
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌────────┐ ┌────────┐ ┌────────┐
|
||||
│Presence│ │Cursors │ │ CRDT │
|
||||
│ "3 │ │ Multi │ │Conflict│
|
||||
│online" │ │ select │ │ free │
|
||||
└────────┘ └────────┘ └────────┘
|
||||
│ │ │
|
||||
trivial moderate complex
|
||||
|
||||
Where's your head at?
|
||||
```
|
||||
|
||||
**User brings a specific problem:**
|
||||
```
|
||||
User: The auth system is a mess
|
||||
|
||||
You: [reads codebase]
|
||||
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ CURRENT AUTH FLOW │
|
||||
└─────────────────────────────────────────────┘
|
||||
│
|
||||
┌─────────────┼─────────────┐
|
||||
▼ ▼ ▼
|
||||
┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ Google │ │ GitHub │ │ Email │
|
||||
│ OAuth │ │ OAuth │ │ Magic │
|
||||
└────┬────┘ └────┬────┘ └────┬────┘
|
||||
│ │ │
|
||||
└─────────────┼─────────────┘
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Session │
|
||||
└─────┬─────┘
|
||||
│
|
||||
▼
|
||||
┌───────────┐
|
||||
│ Perms │
|
||||
└───────────┘
|
||||
|
||||
I see three tangles. Which one's burning?
|
||||
```
|
||||
|
||||
**User is stuck mid-implementation:**
|
||||
```
|
||||
User: /opsx-explore add-auth-system
|
||||
The OAuth integration is more complex than expected
|
||||
|
||||
You: [reads change artifacts]
|
||||
|
||||
You're on task 4: "Implement OAuth flow"
|
||||
|
||||
Let me trace what's involved...
|
||||
|
||||
[draws diagram, explores options, suggests paths]
|
||||
|
||||
Want to update the design to reflect this?
|
||||
Or add a spike task to investigate?
|
||||
```
|
||||
|
||||
**User wants to compare options:**
|
||||
```
|
||||
User: Should we use Postgres or SQLite?
|
||||
|
||||
You: Generic answer is boring. What's the context?
|
||||
|
||||
User: A CLI tool that tracks local dev environments
|
||||
|
||||
You: That changes everything.
|
||||
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ CLI TOOL DATA STORAGE │
|
||||
└─────────────────────────────────────────────────┘
|
||||
|
||||
Key constraints:
|
||||
• No daemon running
|
||||
• Must work offline
|
||||
• Single user
|
||||
|
||||
SQLite Postgres
|
||||
Deployment embedded ✓ needs server ✗
|
||||
Offline yes ✓ no ✗
|
||||
Single file yes ✓ no ✗
|
||||
|
||||
SQLite. Not even close.
|
||||
|
||||
Unless... is there a sync component?
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Ending Discovery
|
||||
|
||||
There's no required ending. Discovery might:
|
||||
|
||||
- **Flow into a proposal**: "Ready to start? I can create a change proposal."
|
||||
- **Result in artifact updates**: "Updated design.md with these decisions"
|
||||
- **Just provide clarity**: User has what they need, moves on
|
||||
- **Continue later**: "We can pick this up anytime"
|
||||
|
||||
When it feels like things are crystallizing, you might summarize:
|
||||
|
||||
```
|
||||
## What We Figured Out
|
||||
|
||||
**The problem**: [crystallized understanding]
|
||||
|
||||
**The approach**: [if one emerged]
|
||||
|
||||
**Open questions**: [if any remain]
|
||||
|
||||
**Next steps** (if ready):
|
||||
- Create a change proposal
|
||||
- Keep exploring: just keep talking
|
||||
```
|
||||
|
||||
But this summary is optional. Sometimes the thinking IS the value.
|
||||
|
||||
---
|
||||
|
||||
## Guardrails
|
||||
|
||||
- **Don't implement** - Never write code or implement features. Creating OpenSpec artifacts is fine, writing application code is not.
|
||||
- **Don't fake understanding** - If something is unclear, dig deeper
|
||||
- **Don't rush** - Discovery is thinking time, not task time
|
||||
- **Don't force structure** - Let patterns emerge naturally
|
||||
- **Don't auto-capture** - Offer to save insights, don't just do it
|
||||
- **Do visualize** - A good diagram is worth many paragraphs
|
||||
- **Do explore the codebase** - Ground discussions in reality
|
||||
- **Do question assumptions** - Including the user's and your own
|
||||
110
.opencode/skills/openspec-propose/SKILL.md
Normal file
110
.opencode/skills/openspec-propose/SKILL.md
Normal file
@@ -0,0 +1,110 @@
|
||||
---
|
||||
name: openspec-propose
|
||||
description: Propose a new change with all artifacts generated in one step. Use when the user wants to quickly describe what they want to build and get a complete proposal with design, specs, and tasks ready for implementation.
|
||||
license: MIT
|
||||
compatibility: Requires openspec CLI.
|
||||
metadata:
|
||||
author: openspec
|
||||
version: "1.0"
|
||||
generatedBy: "1.2.0"
|
||||
---
|
||||
|
||||
Propose a new change - create the change and generate all artifacts in one step.
|
||||
|
||||
I'll create a change with artifacts:
|
||||
- proposal.md (what & why)
|
||||
- design.md (how)
|
||||
- tasks.md (implementation steps)
|
||||
|
||||
When ready to implement, run /opsx-apply
|
||||
|
||||
---
|
||||
|
||||
**Input**: The user's request should include a change name (kebab-case) OR a description of what they want to build.
|
||||
|
||||
**Steps**
|
||||
|
||||
1. **If no clear input provided, ask what they want to build**
|
||||
|
||||
Use the **AskUserQuestion tool** (open-ended, no preset options) to ask:
|
||||
> "What change do you want to work on? Describe what you want to build or fix."
|
||||
|
||||
From their description, derive a kebab-case name (e.g., "add user authentication" → `add-user-auth`).
|
||||
|
||||
**IMPORTANT**: Do NOT proceed without understanding what the user wants to build.
|
||||
|
||||
2. **Create the change directory**
|
||||
```bash
|
||||
openspec new change "<name>"
|
||||
```
|
||||
This creates a scaffolded change at `openspec/changes/<name>/` with `.openspec.yaml`.
|
||||
|
||||
3. **Get the artifact build order**
|
||||
```bash
|
||||
openspec status --change "<name>" --json
|
||||
```
|
||||
Parse the JSON to get:
|
||||
- `applyRequires`: array of artifact IDs needed before implementation (e.g., `["tasks"]`)
|
||||
- `artifacts`: list of all artifacts with their status and dependencies
|
||||
|
||||
4. **Create artifacts in sequence until apply-ready**
|
||||
|
||||
Use the **TodoWrite tool** to track progress through the artifacts.
|
||||
|
||||
Loop through artifacts in dependency order (artifacts with no pending dependencies first):
|
||||
|
||||
a. **For each artifact that is `ready` (dependencies satisfied)**:
|
||||
- Get instructions:
|
||||
```bash
|
||||
openspec instructions <artifact-id> --change "<name>" --json
|
||||
```
|
||||
- The instructions JSON includes:
|
||||
- `context`: Project background (constraints for you - do NOT include in output)
|
||||
- `rules`: Artifact-specific rules (constraints for you - do NOT include in output)
|
||||
- `template`: The structure to use for your output file
|
||||
- `instruction`: Schema-specific guidance for this artifact type
|
||||
- `outputPath`: Where to write the artifact
|
||||
- `dependencies`: Completed artifacts to read for context
|
||||
- Read any completed dependency files for context
|
||||
- Create the artifact file using `template` as the structure
|
||||
- Apply `context` and `rules` as constraints - but do NOT copy them into the file
|
||||
- Show brief progress: "Created <artifact-id>"
|
||||
|
||||
b. **Continue until all `applyRequires` artifacts are complete**
|
||||
- After creating each artifact, re-run `openspec status --change "<name>" --json`
|
||||
- Check if every artifact ID in `applyRequires` has `status: "done"` in the artifacts array
|
||||
- Stop when all `applyRequires` artifacts are done
|
||||
|
||||
c. **If an artifact requires user input** (unclear context):
|
||||
- Use **AskUserQuestion tool** to clarify
|
||||
- Then continue with creation
|
||||
|
||||
5. **Show final status**
|
||||
```bash
|
||||
openspec status --change "<name>"
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
After completing all artifacts, summarize:
|
||||
- Change name and location
|
||||
- List of artifacts created with brief descriptions
|
||||
- What's ready: "All artifacts created! Ready for implementation."
|
||||
- Prompt: "Run `/opsx-apply` or ask me to implement to start working on the tasks."
|
||||
|
||||
**Artifact Creation Guidelines**
|
||||
|
||||
- Follow the `instruction` field from `openspec instructions` for each artifact type
|
||||
- The schema defines what each artifact should contain - follow it
|
||||
- Read dependency artifacts for context before creating new ones
|
||||
- Use `template` as the structure for your output file - fill in its sections
|
||||
- **IMPORTANT**: `context` and `rules` are constraints for YOU, not content for the file
|
||||
- Do NOT copy `<context>`, `<rules>`, `<project_context>` blocks into the artifact
|
||||
- These guide what you write, but should never appear in the output
|
||||
|
||||
**Guardrails**
|
||||
- Create ALL artifacts needed for implementation (as defined by schema's `apply.requires`)
|
||||
- Always read dependency artifacts before creating a new one
|
||||
- If context is critically unclear, ask the user - but prefer making reasonable decisions to keep momentum
|
||||
- If a change with that name already exists, ask if user wants to continue it or create a new one
|
||||
- Verify each artifact file exists after writing before proceeding to next
|
||||
46
AGENTS.md
46
AGENTS.md
@@ -73,12 +73,14 @@ sqlx migrate add -r migration_name
|
||||
|
||||
### Docker Development
|
||||
|
||||
`docker-compose.yml` est à la **racine** du projet (pas dans `infra/`).
|
||||
|
||||
```bash
|
||||
# Start infrastructure only
|
||||
cd infra && docker compose up -d postgres meilisearch
|
||||
docker compose up -d postgres
|
||||
|
||||
# Start full stack
|
||||
cd infra && docker compose up -d
|
||||
docker compose up -d
|
||||
|
||||
# View logs
|
||||
docker compose logs -f api
|
||||
@@ -226,24 +228,21 @@ pub struct BookItem {
|
||||
```
|
||||
stripstream-librarian/
|
||||
├── apps/
|
||||
│ ├── api/ # REST API (axum)
|
||||
│ │ └── src/
|
||||
│ │ ├── main.rs
|
||||
│ │ ├── books.rs
|
||||
│ │ ├── pages.rs
|
||||
│ │ └── ...
|
||||
│ ├── indexer/ # Background indexing service
|
||||
│ │ └── src/
|
||||
│ │ └── main.rs
|
||||
│ └── backoffice/ # Next.js admin UI
|
||||
│ ├── api/ # REST API (axum) — port 7080
|
||||
│ │ └── src/ # books.rs, pages.rs, thumbnails.rs, state.rs, auth.rs...
|
||||
│ ├── indexer/ # Background indexing service — port 7081
|
||||
│ │ └── src/ # worker.rs, scanner.rs, batch.rs, scheduler.rs, watcher.rs...
|
||||
│ └── backoffice/ # Next.js admin UI — port 7082
|
||||
├── crates/
|
||||
│ ├── core/ # Shared config
|
||||
│ ├── core/ # Shared config (env vars)
|
||||
│ │ └── src/config.rs
|
||||
│ └── parsers/ # Book parsing (CBZ, CBR, PDF)
|
||||
├── infra/
|
||||
│ ├── migrations/ # SQL migrations
|
||||
│ └── docker-compose.yml
|
||||
└── libraries/ # Book storage (mounted volume)
|
||||
│ └── migrations/ # SQL migrations (sqlx)
|
||||
├── data/
|
||||
│ └── thumbnails/ # Thumbnails générés par l'API
|
||||
├── libraries/ # Book storage (mounted volume)
|
||||
└── docker-compose.yml # À la racine (pas dans infra/)
|
||||
```
|
||||
|
||||
### Key Files
|
||||
@@ -251,8 +250,13 @@ stripstream-librarian/
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `apps/api/src/books.rs` | Book CRUD endpoints |
|
||||
| `apps/api/src/pages.rs` | Page rendering & caching |
|
||||
| `apps/indexer/src/main.rs` | Indexing logic, batch processing |
|
||||
| `apps/api/src/pages.rs` | Page rendering & caching (LRU + disk) |
|
||||
| `apps/api/src/thumbnails.rs` | Endpoints pour créer des jobs thumbnail (rebuild/regenerate) |
|
||||
| `apps/api/src/state.rs` | AppState, Semaphore concurrent_renders |
|
||||
| `apps/indexer/src/scanner.rs` | Phase 1 discovery : scan rapide sans I/O archive, skip dossiers inchangés |
|
||||
| `apps/indexer/src/analyzer.rs` | Phase 2 analysis : `analyze_book` + génération thumbnails WebP |
|
||||
| `apps/indexer/src/batch.rs` | Bulk DB ops via UNNEST |
|
||||
| `apps/indexer/src/worker.rs` | Job loop, watcher, scheduler orchestration |
|
||||
| `crates/parsers/src/lib.rs` | Format detection, metadata parsing |
|
||||
| `crates/core/src/config.rs` | Configuration from environment |
|
||||
| `infra/migrations/*.sql` | Database schema |
|
||||
@@ -269,7 +273,7 @@ impl IndexerConfig {
|
||||
pub fn from_env() -> Result<Self> {
|
||||
Ok(Self {
|
||||
listen_addr: std::env::var("INDEXER_LISTEN_ADDR")
|
||||
.unwrap_or_else(|_| "0.0.0.0:8081".to_string()),
|
||||
.unwrap_or_else(|_| "0.0.0.0:7081".to_string()),
|
||||
database_url: std::env::var("DATABASE_URL")
|
||||
.context("DATABASE_URL is required")?,
|
||||
// ...
|
||||
@@ -298,4 +302,6 @@ fn remap_libraries_path(path: &str) -> String {
|
||||
- **Workspace**: This is a Cargo workspace. Always specify the package when building specific apps.
|
||||
- **Dependencies**: External crates are defined in workspace `Cargo.toml`, not individual `Cargo.toml`.
|
||||
- **Database**: PostgreSQL is required. Run migrations before starting services.
|
||||
- **External Tools**: The indexer relies on `unar` (for CBR) and `pdftoppm` (for PDF) being installed on the system.
|
||||
- **External Tools**: 4 system tools required — `unrar` (CBR page count), `unar` (CBR extraction), `pdfinfo` (PDF page count), `pdftoppm` (PDF page render). Note: `unrar` and `unar` are distinct tools.
|
||||
- **Thumbnails**: generated by the **indexer** service (phase 2, `analyzer.rs`). The API only creates jobs in DB — it does not generate thumbnails directly.
|
||||
- **Sub-AGENTS.md**: module-specific guidelines in `apps/api/`, `apps/indexer/`, `apps/backoffice/`, `crates/parsers/`.
|
||||
|
||||
73
CLAUDE.md
Normal file
73
CLAUDE.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Stripstream Librarian
|
||||
|
||||
Gestionnaire de bibliothèque de bandes dessinées/ebooks. Workspace Cargo multi-crates avec backoffice Next.js.
|
||||
|
||||
## Architecture
|
||||
|
||||
| Service | Dossier | Port local |
|
||||
|---------|---------|------------|
|
||||
| API REST (axum) | `apps/api/` | 7080 |
|
||||
| Indexer (background) | `apps/indexer/` | 7081 |
|
||||
| Backoffice (Next.js) | `apps/backoffice/` | 7082 |
|
||||
| PostgreSQL | infra | 6432 |
|
||||
|
||||
Crates partagés : `crates/core` (config env), `crates/parsers` (CBZ/CBR/PDF).
|
||||
|
||||
## Commandes
|
||||
|
||||
```bash
|
||||
# Build
|
||||
cargo build # workspace entier
|
||||
cargo build -p api # crate spécifique
|
||||
cargo build --release # version optimisée
|
||||
|
||||
# Linting / format
|
||||
cargo clippy
|
||||
cargo fmt
|
||||
|
||||
# Tests
|
||||
cargo test
|
||||
cargo test -p parsers
|
||||
|
||||
# Infra (dépendances uniquement) — docker-compose.yml est à la racine
|
||||
docker compose up -d postgres
|
||||
|
||||
# Backoffice dev
|
||||
cd apps/backoffice && npm install && npm run dev # http://localhost:7082
|
||||
|
||||
# Migrations
|
||||
sqlx migrate run # DATABASE_URL doit être défini
|
||||
```
|
||||
|
||||
## Environnement
|
||||
|
||||
```bash
|
||||
cp .env.example .env # puis éditer les valeurs REQUIRED
|
||||
```
|
||||
|
||||
Variables **requises** au démarrage : `DATABASE_URL`, `API_BOOTSTRAP_TOKEN`.
|
||||
|
||||
## Gotchas
|
||||
|
||||
- **Dépendances système** : 4 outils requis — `unrar` (CBR listing), `unar` (CBR extraction), `pdfinfo` (PDF page count), `pdftoppm` (PDF rendu). `unrar` ≠ `unar`.
|
||||
- **Port backoffice** : `npm run dev` écoute sur **7082**, pas 3000.
|
||||
- **LIBRARIES_ROOT_PATH** : les chemins en DB commencent par `/libraries/` ; en dev local, définir cette variable pour remapper vers le dossier réel.
|
||||
- **Thumbnails** : stockés dans `THUMBNAIL_DIRECTORY` (défaut `/data/thumbnails`), générés par **l'API** (pas l'indexer) — l'indexer déclenche un checkup via `POST /index/jobs/:id/thumbnails/checkup`.
|
||||
- **Workspace Cargo** : les dépendances externes sont définies dans le `Cargo.toml` racine, pas dans les crates individuels.
|
||||
- **Migrations** : dossier `infra/migrations/`, géré par sqlx. Toujours migrer avant de démarrer les services.
|
||||
- **Recherche** : full-text via PostgreSQL (`ILIKE` + `pg_trgm`), pas de moteur de recherche externe.
|
||||
|
||||
## Fichiers clés
|
||||
|
||||
| Fichier | Rôle |
|
||||
|---------|------|
|
||||
| `crates/core/src/config.rs` | Config depuis env (API, Indexer, AdminUI) |
|
||||
| `crates/parsers/src/lib.rs` | Détection format, extraction métadonnées |
|
||||
| `apps/api/src/books.rs` | Endpoints CRUD livres |
|
||||
| `apps/api/src/search.rs` | Recherche full-text PostgreSQL |
|
||||
| `apps/api/src/pages.rs` | Rendu pages + cache LRU |
|
||||
| `apps/indexer/src/scanner.rs` | Scan filesystem |
|
||||
| `infra/migrations/*.sql` | Schéma DB |
|
||||
|
||||
> Voir `AGENTS.md` pour les conventions de code détaillées (error handling, patterns sqlx, async/tokio).
|
||||
> Des `AGENTS.md` spécifiques existent dans `apps/api/`, `apps/indexer/`, `apps/backoffice/`, `crates/parsers/`.
|
||||
808
Cargo.lock
generated
808
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,7 @@ resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
version = "0.1.0"
|
||||
version = "1.9.0"
|
||||
license = "MIT"
|
||||
|
||||
[workspace.dependencies]
|
||||
@@ -19,6 +19,7 @@ axum = "0.7"
|
||||
base64 = "0.22"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
image = { version = "0.25", default-features = false, features = ["jpeg", "png", "webp"] }
|
||||
jpeg-decoder = "0.3"
|
||||
lru = "0.12"
|
||||
rayon = "1.10"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
@@ -32,6 +33,12 @@ tower = { version = "0.5", features = ["limit"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] }
|
||||
uuid = { version = "1.12", features = ["serde", "v4"] }
|
||||
natord = "1.0"
|
||||
num_cpus = "1.16"
|
||||
pdfium-render = { version = "0.8", default-features = false, features = ["pdfium_latest", "image_latest", "thread_safe"] }
|
||||
unrar = "0.5"
|
||||
walkdir = "2.5"
|
||||
webp = "0.3"
|
||||
utoipa = "4.0"
|
||||
utoipa-swagger-ui = "6.0"
|
||||
scraper = "0.21"
|
||||
|
||||
10
PLAN.md
10
PLAN.md
@@ -12,7 +12,7 @@ Construire un serveur ultra performant pour indexer et servir des bibliotheques
|
||||
- Backend/API: Rust (`axum`)
|
||||
- Indexation: service Rust dedie (`indexer`)
|
||||
- DB: PostgreSQL
|
||||
- Recherche: Meilisearch
|
||||
- Recherche: PostgreSQL full-text (ILIKE + pg_trgm)
|
||||
- Deploiement: Docker Compose
|
||||
- Auth: token bootstrap env + tokens admin en DB (creables/revocables)
|
||||
- Expiration tokens admin: aucune par defaut (revocation manuelle)
|
||||
@@ -33,7 +33,7 @@ Construire un serveur ultra performant pour indexer et servir des bibliotheques
|
||||
**DoD:** Build des crates OK.
|
||||
|
||||
### T2 - Infra Docker Compose
|
||||
- [x] Definir services `postgres`, `meilisearch`, `api`, `indexer`
|
||||
- [x] Definir services `postgres`, `api`, `indexer`
|
||||
- [x] Volumes persistants
|
||||
- [x] Healthchecks
|
||||
|
||||
@@ -114,7 +114,7 @@ Construire un serveur ultra performant pour indexer et servir des bibliotheques
|
||||
**DoD:** Pagination/filtres fonctionnels.
|
||||
|
||||
### T13 - Recherche
|
||||
- [x] Projection vers Meilisearch
|
||||
- [x] Recherche full-text PostgreSQL
|
||||
- [x] `GET /search?q=...&library_id=...&type=...`
|
||||
- [x] Fuzzy + filtres
|
||||
|
||||
@@ -264,10 +264,10 @@ Construire un serveur ultra performant pour indexer et servir des bibliotheques
|
||||
- Bootstrap token = break-glass (peut etre desactive plus tard)
|
||||
|
||||
## Journal
|
||||
- 2026-03-05: `docker compose up -d --build` valide, stack complete en healthy (`postgres`, `meilisearch`, `api`, `indexer`, `admin-ui`).
|
||||
- 2026-03-05: `docker compose up -d --build` valide, stack complete en healthy (`postgres`, `api`, `indexer`, `admin-ui`).
|
||||
- 2026-03-05: ajustements infra appliques pour demarrage stable (`unrar` -> `unrar-free`, image `rust:1-bookworm`, healthchecks `127.0.0.1`).
|
||||
- 2026-03-05: ajout d'un service `migrate` dans Compose pour executer automatiquement `infra/migrations/0001_init.sql` au demarrage.
|
||||
- 2026-03-05: Lot 2 termine (jobs, scan incremental, parsers `cbz/cbr/pdf`, API livres, sync + recherche Meilisearch).
|
||||
- 2026-03-05: Lot 2 termine (jobs, scan incremental, parsers `cbz/cbr/pdf`, API livres, recherche PostgreSQL).
|
||||
- 2026-03-05: verification de bout en bout OK sur une librairie de test (`/libraries/demo`) avec indexation, listing `/books` et recherche `/search` (1 CBZ detecte).
|
||||
- 2026-03-05: Lot 3 avancee: endpoint pages (`/books/:id/pages/:n`) actif avec cache LRU, ETag/Cache-Control, limite concurrence rendu et timeouts.
|
||||
- 2026-03-05: hardening API: readiness expose sans auth via `route_layer`, metriques simples `/metrics`, rate limiting lecture (120 req/s).
|
||||
|
||||
157
README.md
157
README.md
@@ -9,7 +9,7 @@ The project consists of the following components:
|
||||
- **API** (`apps/api/`) - Rust-based REST API service
|
||||
- **Indexer** (`apps/indexer/`) - Rust-based background indexing service
|
||||
- **Backoffice** (`apps/backoffice/`) - Next.js web administration interface
|
||||
- **Infrastructure** (`infra/`) - Docker Compose setup with PostgreSQL and Meilisearch
|
||||
- **Infrastructure** (`infra/`) - Docker Compose setup with PostgreSQL
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -27,28 +27,24 @@ The project consists of the following components:
|
||||
```
|
||||
|
||||
2. Edit `.env` and set secure values for:
|
||||
- `MEILI_MASTER_KEY` - Master key for Meilisearch
|
||||
- `API_BOOTSTRAP_TOKEN` - Bootstrap token for initial API authentication
|
||||
|
||||
### Running with Docker
|
||||
|
||||
```bash
|
||||
cd infra
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This will start:
|
||||
- PostgreSQL (port 5432)
|
||||
- Meilisearch (port 7700)
|
||||
- API service (port 8080)
|
||||
- Indexer service (port 8081)
|
||||
- Backoffice web UI (port 8082)
|
||||
- PostgreSQL (port 6432)
|
||||
- API service (port 7080)
|
||||
- Indexer service (port 7081)
|
||||
- Backoffice web UI (port 7082)
|
||||
|
||||
### Accessing the Application
|
||||
|
||||
- **Backoffice**: http://localhost:8082
|
||||
- **API**: http://localhost:8080
|
||||
- **Meilisearch**: http://localhost:7700
|
||||
- **Backoffice**: http://localhost:7082
|
||||
- **API**: http://localhost:7080
|
||||
|
||||
### Default Credentials
|
||||
|
||||
@@ -62,8 +58,7 @@ The default bootstrap token is configured in your `.env` file. Use this for init
|
||||
|
||||
```bash
|
||||
# Start dependencies
|
||||
cd infra
|
||||
docker compose up -d postgres meilisearch
|
||||
docker compose up -d postgres
|
||||
|
||||
# Run API
|
||||
cd apps/api
|
||||
@@ -82,7 +77,7 @@ npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
The backoffice will be available at http://localhost:3000
|
||||
The backoffice will be available at http://localhost:7082
|
||||
|
||||
## Features
|
||||
|
||||
@@ -96,7 +91,7 @@ The backoffice will be available at http://localhost:3000
|
||||
- Support for CBZ, CBR, and PDF formats
|
||||
- Automatic metadata extraction
|
||||
- Series and volume detection
|
||||
- Full-text search with Meilisearch
|
||||
- Full-text search powered by PostgreSQL
|
||||
|
||||
### Jobs Monitoring
|
||||
- Real-time job progress tracking
|
||||
@@ -111,24 +106,47 @@ The backoffice will be available at http://localhost:3000
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `API_LISTEN_ADDR` | API service bind address | `0.0.0.0:8080` |
|
||||
| `INDEXER_LISTEN_ADDR` | Indexer service bind address | `0.0.0.0:8081` |
|
||||
| `BACKOFFICE_PORT` | Backoffice web UI port | `8082` |
|
||||
| `DATABASE_URL` | PostgreSQL connection string | `postgres://stripstream:stripstream@postgres:5432/stripstream` |
|
||||
| `MEILI_URL` | Meilisearch connection URL | `http://meilisearch:7700` |
|
||||
| `MEILI_MASTER_KEY` | Meilisearch master key (required) | - |
|
||||
| `API_BOOTSTRAP_TOKEN` | Initial API admin token (required) | - |
|
||||
| `INDEXER_SCAN_INTERVAL_SECONDS` | Watcher scan interval | `5` |
|
||||
| `LIBRARIES_ROOT_PATH` | Path to libraries directory | `/libraries` |
|
||||
Variables marquées **required** doivent être définies. Les autres ont une valeur par défaut.
|
||||
|
||||
### Partagées (API + Indexer)
|
||||
|
||||
| Variable | Description | Défaut |
|
||||
|----------|-------------|--------|
|
||||
| `DATABASE_URL` | **required** — Connexion PostgreSQL | — |
|
||||
|
||||
### API
|
||||
|
||||
| Variable | Description | Défaut |
|
||||
|----------|-------------|--------|
|
||||
| `API_BOOTSTRAP_TOKEN` | **required** — Token admin initial | — |
|
||||
| `API_LISTEN_ADDR` | Adresse d'écoute | `0.0.0.0:7080` |
|
||||
|
||||
### Indexer
|
||||
|
||||
| Variable | Description | Défaut |
|
||||
|----------|-------------|--------|
|
||||
| `INDEXER_LISTEN_ADDR` | Adresse d'écoute | `0.0.0.0:7081` |
|
||||
| `INDEXER_SCAN_INTERVAL_SECONDS` | Intervalle de scan du watcher | `5` |
|
||||
| `THUMBNAIL_ENABLED` | Activer la génération de thumbnails | `true` |
|
||||
| `THUMBNAIL_DIRECTORY` | Dossier de stockage des thumbnails | `/data/thumbnails` |
|
||||
| `THUMBNAIL_WIDTH` | Largeur max des thumbnails (px) | `300` |
|
||||
| `THUMBNAIL_HEIGHT` | Hauteur max des thumbnails (px) | `400` |
|
||||
| `THUMBNAIL_QUALITY` | Qualité WebP (0–100) | `80` |
|
||||
| `THUMBNAIL_FORMAT` | Format de sortie | `webp` |
|
||||
|
||||
### Backoffice
|
||||
|
||||
| Variable | Description | Défaut |
|
||||
|----------|-------------|--------|
|
||||
| `API_BOOTSTRAP_TOKEN` | **required** — Token d'accès à l'API | — |
|
||||
| `API_BASE_URL` | URL interne de l'API (dans le réseau Docker) | `http://api:7080` |
|
||||
|
||||
## API Documentation
|
||||
|
||||
The API is documented with OpenAPI/Swagger. When running locally, access the docs at:
|
||||
|
||||
```
|
||||
http://localhost:8080/api-docs
|
||||
http://localhost:7080/swagger-ui
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
@@ -140,12 +158,95 @@ stripstream-librarian/
|
||||
│ ├── indexer/ # Rust background indexer
|
||||
│ └── backoffice/ # Next.js web UI
|
||||
├── infra/
|
||||
│ ├── docker-compose.yml
|
||||
│ └── migrations/ # SQL database migrations
|
||||
├── libraries/ # Book storage (mounted volume)
|
||||
└── .env # Environment configuration
|
||||
```
|
||||
|
||||
## Docker Registry
|
||||
|
||||
Images are built and pushed to Docker Hub with the naming convention `docker.io/{owner}/stripstream-{service}`.
|
||||
|
||||
### Publishing Images (Maintainers)
|
||||
|
||||
To build and push all service images to the registry:
|
||||
|
||||
```bash
|
||||
# Login to Docker Hub first
|
||||
docker login -u julienfroidefond32
|
||||
|
||||
# Build and push all images
|
||||
./scripts/docker-push.sh
|
||||
```
|
||||
|
||||
This script will:
|
||||
- Build images for `api`, `indexer`, and `backoffice`
|
||||
- Tag them with the current version (from `Cargo.toml`) and `latest`
|
||||
- Push to the registry
|
||||
|
||||
### Using Published Images
|
||||
|
||||
To use the pre-built images in your own `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
environment:
|
||||
POSTGRES_DB: stripstream
|
||||
POSTGRES_USER: stripstream
|
||||
POSTGRES_PASSWORD: stripstream
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
|
||||
api:
|
||||
image: julienfroidefond32/stripstream-api:latest
|
||||
ports:
|
||||
- "7080:7080"
|
||||
volumes:
|
||||
- ./libraries:/libraries
|
||||
- ./data/thumbnails:/data/thumbnails
|
||||
environment:
|
||||
# --- Required ---
|
||||
DATABASE_URL: postgres://stripstream:stripstream@postgres:5432/stripstream
|
||||
API_BOOTSTRAP_TOKEN: your_bootstrap_token # required — change this
|
||||
# --- Optional (defaults shown) ---
|
||||
# API_LISTEN_ADDR: 0.0.0.0:7080
|
||||
|
||||
indexer:
|
||||
image: julienfroidefond32/stripstream-indexer:latest
|
||||
ports:
|
||||
- "7081:7081"
|
||||
volumes:
|
||||
- ./libraries:/libraries
|
||||
- ./data/thumbnails:/data/thumbnails
|
||||
environment:
|
||||
# --- Required ---
|
||||
DATABASE_URL: postgres://stripstream:stripstream@postgres:5432/stripstream
|
||||
# --- Optional (defaults shown) ---
|
||||
# INDEXER_LISTEN_ADDR: 0.0.0.0:7081
|
||||
# INDEXER_SCAN_INTERVAL_SECONDS: 5
|
||||
# THUMBNAIL_ENABLED: true
|
||||
# THUMBNAIL_DIRECTORY: /data/thumbnails
|
||||
# THUMBNAIL_WIDTH: 300
|
||||
# THUMBNAIL_HEIGHT: 400
|
||||
# THUMBNAIL_QUALITY: 80
|
||||
# THUMBNAIL_FORMAT: webp
|
||||
|
||||
backoffice:
|
||||
image: julienfroidefond32/stripstream-backoffice:latest
|
||||
ports:
|
||||
- "7082:7082"
|
||||
environment:
|
||||
# --- Required ---
|
||||
API_BOOTSTRAP_TOKEN: your_bootstrap_token # must match api above
|
||||
# --- Optional (defaults shown) ---
|
||||
# API_BASE_URL: http://api:7080
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
[Your License Here]
|
||||
|
||||
73
apps/api/AGENTS.md
Normal file
73
apps/api/AGENTS.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# apps/api — REST API (axum)
|
||||
|
||||
Service HTTP sur le port **7080**. Voir `AGENTS.md` racine pour les conventions globales.
|
||||
|
||||
## Structure des fichiers
|
||||
|
||||
| Fichier | Rôle |
|
||||
|---------|------|
|
||||
| `main.rs` | Routes, initialisation AppState, Semaphore concurrent_renders |
|
||||
| `state.rs` | `AppState` (pool, caches, métriques), `load_concurrent_renders` |
|
||||
| `auth.rs` | Middlewares `require_admin` / `require_read`, authentification tokens |
|
||||
| `error.rs` | `ApiError` avec constructeurs `bad_request`, `not_found`, `internal`, etc. |
|
||||
| `books.rs` | CRUD livres, thumbnails |
|
||||
| `pages.rs` | Rendu page + double cache (mémoire LRU + disque) |
|
||||
| `libraries.rs` | CRUD bibliothèques, déclenchement scans |
|
||||
| `index_jobs.rs` | Suivi jobs, SSE streaming progression |
|
||||
| `thumbnails.rs` | Rebuild/regénération thumbnails |
|
||||
| `tokens.rs` | Gestion tokens API (create/revoke) |
|
||||
| `settings.rs` | Paramètres applicatifs (stockés en DB, clé `limits`) |
|
||||
| `openapi.rs` | Doc OpenAPI via utoipa, accessible sur `/swagger-ui` |
|
||||
|
||||
## Patterns clés
|
||||
|
||||
### Handler type
|
||||
```rust
|
||||
async fn my_handler(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<Json<MyDto>, ApiError> {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
### Erreurs API
|
||||
```rust
|
||||
// Constructeurs disponibles dans error.rs
|
||||
ApiError::bad_request("message")
|
||||
ApiError::not_found("resource not found")
|
||||
ApiError::internal("unexpected error")
|
||||
ApiError::unauthorized("missing token")
|
||||
ApiError::forbidden("admin required")
|
||||
|
||||
// Conversion auto depuis sqlx::Error et std::io::Error
|
||||
```
|
||||
|
||||
### Authentification
|
||||
- **Bootstrap token** : comparaison directe (`API_BOOTSTRAP_TOKEN`), scope Admin
|
||||
- **Tokens DB** : format `stl_<prefix>_<secret>`, hash argon2 en DB, scope `admin` ou `read`
|
||||
- Middleware `require_admin` → routes admin ; `require_read` → routes lecture
|
||||
|
||||
### OpenAPI (utoipa)
|
||||
```rust
|
||||
#[utoipa::path(get, path = "/books/{id}", ...)]
|
||||
async fn get_book(...) { }
|
||||
// Ajouter le handler dans openapi.rs (ApiDoc)
|
||||
```
|
||||
|
||||
### Cache pages (`pages.rs`)
|
||||
- **Cache mémoire** : LRU 512 entrées (`AppState.page_cache`)
|
||||
- **Cache disque** : `IMAGE_CACHE_DIR` (défaut `/tmp/stripstream-image-cache`), clé SHA256
|
||||
- Concurrence limitée par `AppState.page_render_limit` (Semaphore, configurable en DB)
|
||||
- `spawn_blocking` pour le rendu image (CPU-bound)
|
||||
|
||||
### Paramètre concurrent_renders
|
||||
Stocké en DB : `SELECT value FROM app_settings WHERE key = 'limits'` → JSON `{"concurrent_renders": N}`.
|
||||
Chargé au démarrage dans `load_concurrent_renders`.
|
||||
|
||||
## Gotchas
|
||||
|
||||
- **LIBRARIES_ROOT_PATH** : les `abs_path` en DB commencent par `/libraries/`. Appeler `remap_libraries_path()` avant tout accès fichier.
|
||||
- **Rate limit lecture** : middleware `read_rate_limit` sur les routes read (100 req/5s par défaut).
|
||||
- **Métriques** : `/metrics` expose `requests_total`, `page_cache_hits`, `page_cache_misses` (atomics dans `AppState.metrics`).
|
||||
- **Swagger** : accessible sur `/swagger-ui`, spec JSON sur `/openapi.json`.
|
||||
@@ -13,10 +13,13 @@ async-stream = "0.3"
|
||||
chrono.workspace = true
|
||||
futures = "0.3"
|
||||
image.workspace = true
|
||||
jpeg-decoder.workspace = true
|
||||
lru.workspace = true
|
||||
stripstream-core = { path = "../../crates/core" }
|
||||
parsers = { path = "../../crates/parsers" }
|
||||
rand.workspace = true
|
||||
tokio-stream = "0.1"
|
||||
regex = "1"
|
||||
reqwest.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
@@ -28,8 +31,7 @@ tower-http = { version = "0.6", features = ["cors"] }
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
uuid.workspace = true
|
||||
zip = { version = "2.2", default-features = false, features = ["deflate"] }
|
||||
utoipa.workspace = true
|
||||
utoipa-swagger-ui = { workspace = true, features = ["axum"] }
|
||||
webp = "0.3"
|
||||
walkdir = "2"
|
||||
webp.workspace = true
|
||||
scraper.workspace = true
|
||||
|
||||
@@ -1,30 +1,65 @@
|
||||
FROM rust:1-bookworm AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Install sccache for faster builds
|
||||
RUN cargo install sccache --locked
|
||||
ENV RUSTC_WRAPPER=sccache
|
||||
ENV SCCACHE_DIR=/sccache
|
||||
|
||||
# Copy workspace manifests and create dummy source files to cache dependency builds
|
||||
COPY Cargo.toml ./
|
||||
COPY apps/api/Cargo.toml apps/api/Cargo.toml
|
||||
COPY apps/indexer/Cargo.toml apps/indexer/Cargo.toml
|
||||
COPY crates/core/Cargo.toml crates/core/Cargo.toml
|
||||
COPY crates/parsers/Cargo.toml crates/parsers/Cargo.toml
|
||||
|
||||
RUN mkdir -p apps/api/src apps/indexer/src crates/core/src crates/parsers/src && \
|
||||
echo "fn main() {}" > apps/api/src/main.rs && \
|
||||
echo "fn main() {}" > apps/indexer/src/main.rs && \
|
||||
echo "" > apps/indexer/src/lib.rs && \
|
||||
echo "" > crates/core/src/lib.rs && \
|
||||
echo "" > crates/parsers/src/lib.rs
|
||||
|
||||
# Build dependencies only (cached as long as Cargo.toml files don't change)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/app/target \
|
||||
cargo build --release -p api && \
|
||||
cargo install sqlx-cli --no-default-features --features postgres --locked
|
||||
|
||||
# Copy real source code and build
|
||||
COPY apps/api/src apps/api/src
|
||||
COPY apps/indexer/src apps/indexer/src
|
||||
COPY crates/core/src crates/core/src
|
||||
COPY crates/parsers/src crates/parsers/src
|
||||
|
||||
# Build with sccache (cache persisted between builds via Docker cache mount)
|
||||
RUN --mount=type=cache,target=/sccache \
|
||||
cargo build --release -p api
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/app/target \
|
||||
touch apps/api/src/main.rs crates/core/src/lib.rs crates/parsers/src/lib.rs && \
|
||||
cargo build --release -p api && \
|
||||
cp /app/target/release/api /usr/local/bin/api
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates wget unar poppler-utils locales && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates wget locales postgresql-client \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen
|
||||
ENV LANG=en_US.UTF-8
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
COPY --from=builder /app/target/release/api /usr/local/bin/api
|
||||
EXPOSE 8080
|
||||
CMD ["/usr/local/bin/api"]
|
||||
|
||||
# Download pdfium shared library (replaces pdftoppm subprocess)
|
||||
RUN ARCH=$(dpkg --print-architecture) && \
|
||||
case "$ARCH" in \
|
||||
amd64) PDFIUM_ARCH="linux-x64" ;; \
|
||||
arm64) PDFIUM_ARCH="linux-arm64" ;; \
|
||||
*) echo "Unsupported arch: $ARCH" && exit 1 ;; \
|
||||
esac && \
|
||||
wget -q "https://github.com/bblanchon/pdfium-binaries/releases/latest/download/pdfium-${PDFIUM_ARCH}.tgz" -O /tmp/pdfium.tgz && \
|
||||
tar -xzf /tmp/pdfium.tgz -C /tmp && \
|
||||
cp /tmp/lib/libpdfium.so /usr/local/lib/ && \
|
||||
rm -rf /tmp/pdfium.tgz /tmp/lib /tmp/include && \
|
||||
ldconfig
|
||||
COPY --from=builder /usr/local/bin/api /usr/local/bin/api
|
||||
COPY --from=builder /usr/local/cargo/bin/sqlx /usr/local/bin/sqlx
|
||||
COPY infra/migrations /app/migrations
|
||||
COPY apps/api/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
EXPOSE 7080
|
||||
CMD ["/usr/local/bin/entrypoint.sh"]
|
||||
|
||||
63
apps/api/entrypoint.sh
Normal file
63
apps/api/entrypoint.sh
Normal file
@@ -0,0 +1,63 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# psql requires "postgresql://" but Rust/sqlx accepts both "postgres://" and "postgresql://"
|
||||
PSQL_URL=$(echo "$DATABASE_URL" | sed 's|^postgres://|postgresql://|')
|
||||
|
||||
# Check 1: does the old schema exist (index_jobs table)?
|
||||
HAS_OLD_TABLES=$(psql "$PSQL_URL" -tAc \
|
||||
"SELECT EXISTS(SELECT 1 FROM information_schema.tables WHERE table_name='index_jobs')::text" \
|
||||
2>/dev/null || echo "false")
|
||||
|
||||
# Check 2: is sqlx tracking present and non-empty?
|
||||
HAS_SQLX_TABLE=$(psql "$PSQL_URL" -tAc \
|
||||
"SELECT EXISTS(SELECT 1 FROM information_schema.tables WHERE table_name='_sqlx_migrations')::text" \
|
||||
2>/dev/null || echo "false")
|
||||
|
||||
if [ "$HAS_SQLX_TABLE" = "true" ]; then
|
||||
HAS_SQLX_ROWS=$(psql "$PSQL_URL" -tAc \
|
||||
"SELECT EXISTS(SELECT 1 FROM _sqlx_migrations LIMIT 1)::text" \
|
||||
2>/dev/null || echo "false")
|
||||
else
|
||||
HAS_SQLX_ROWS="false"
|
||||
fi
|
||||
|
||||
echo "==> Migration check: old_tables=$HAS_OLD_TABLES sqlx_table=$HAS_SQLX_TABLE sqlx_rows=$HAS_SQLX_ROWS"
|
||||
|
||||
if [ "$HAS_OLD_TABLES" = "true" ] && [ "$HAS_SQLX_ROWS" = "false" ]; then
|
||||
echo "==> Upgrade from pre-sqlx migration system detected: creating baseline..."
|
||||
|
||||
psql "$PSQL_URL" -c "
|
||||
CREATE TABLE IF NOT EXISTS _sqlx_migrations (
|
||||
version BIGINT PRIMARY KEY,
|
||||
description TEXT NOT NULL,
|
||||
installed_on TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
success BOOLEAN NOT NULL,
|
||||
checksum BYTEA NOT NULL,
|
||||
execution_time BIGINT NOT NULL
|
||||
)
|
||||
"
|
||||
|
||||
for f in /app/migrations/*.sql; do
|
||||
filename=$(basename "$f")
|
||||
# Strip leading zeros to get the integer version (e.g. "0005" -> "5")
|
||||
version=$(echo "$filename" | sed 's/^0*//' | cut -d'_' -f1)
|
||||
description=$(echo "$filename" | sed 's/^[0-9]*_//' | sed 's/\.sql$//')
|
||||
checksum=$(sha384sum "$f" | awk '{print $1}')
|
||||
|
||||
psql "$PSQL_URL" -c "
|
||||
INSERT INTO _sqlx_migrations (version, description, installed_on, success, checksum, execution_time)
|
||||
VALUES ($version, '$description', NOW(), TRUE, decode('$checksum', 'hex'), 0)
|
||||
ON CONFLICT (version) DO NOTHING
|
||||
"
|
||||
echo " baselined: $filename"
|
||||
done
|
||||
|
||||
echo "==> Baseline complete."
|
||||
fi
|
||||
|
||||
echo "==> Running migrations..."
|
||||
sqlx migrate run --source /app/migrations
|
||||
|
||||
echo "==> Starting API..."
|
||||
exec /usr/local/bin/api
|
||||
51
apps/api/src/api_middleware.rs
Normal file
51
apps/api/src/api_middleware.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use axum::{
|
||||
extract::State,
|
||||
middleware::Next,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use std::time::Duration;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tracing::info;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
pub async fn request_counter(
|
||||
State(state): State<AppState>,
|
||||
req: axum::extract::Request,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
state.metrics.requests_total.fetch_add(1, Ordering::Relaxed);
|
||||
let method = req.method().clone();
|
||||
let uri = req.uri().clone();
|
||||
let start = std::time::Instant::now();
|
||||
let response = next.run(req).await;
|
||||
let status = response.status().as_u16();
|
||||
let elapsed = start.elapsed();
|
||||
info!("{} {} {} {}ms", method, uri.path(), status, elapsed.as_millis());
|
||||
response
|
||||
}
|
||||
|
||||
pub async fn read_rate_limit(
|
||||
State(state): State<AppState>,
|
||||
req: axum::extract::Request,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
let mut limiter = state.read_rate_limit.lock().await;
|
||||
if limiter.window_started_at.elapsed() >= Duration::from_secs(1) {
|
||||
limiter.window_started_at = std::time::Instant::now();
|
||||
limiter.requests_in_window = 0;
|
||||
}
|
||||
|
||||
let rate_limit = state.settings.read().await.rate_limit_per_second;
|
||||
if limiter.requests_in_window >= rate_limit {
|
||||
return (
|
||||
axum::http::StatusCode::TOO_MANY_REQUESTS,
|
||||
"rate limit exceeded",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
limiter.requests_in_window += 1;
|
||||
drop(limiter);
|
||||
next.run(req).await
|
||||
}
|
||||
@@ -8,7 +8,7 @@ use axum::{
|
||||
use chrono::Utc;
|
||||
use sqlx::Row;
|
||||
|
||||
use crate::{error::ApiError, AppState};
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum Scope {
|
||||
@@ -94,11 +94,15 @@ async fn authenticate(state: &AppState, token: &str) -> Result<Scope, ApiError>
|
||||
}
|
||||
|
||||
fn parse_prefix(token: &str) -> Option<&str> {
|
||||
let mut parts = token.split('_');
|
||||
let namespace = parts.next()?;
|
||||
let prefix = parts.next()?;
|
||||
let secret = parts.next()?;
|
||||
if namespace != "stl" || secret.is_empty() || prefix.len() < 6 {
|
||||
// Format: stl_{8-char prefix}_{secret}
|
||||
// Base64 URL_SAFE peut contenir '_', donc on ne peut pas splitter aveuglément
|
||||
let rest = token.strip_prefix("stl_")?;
|
||||
if rest.len() < 10 {
|
||||
// 8 (prefix) + 1 ('_') + 1 (secret min)
|
||||
return None;
|
||||
}
|
||||
let prefix = &rest[..8];
|
||||
if rest.as_bytes().get(8) != Some(&b'_') {
|
||||
return None;
|
||||
}
|
||||
Some(prefix)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,6 +38,13 @@ impl ApiError {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unprocessable_entity(message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
status: StatusCode::UNPROCESSABLE_ENTITY,
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn not_found(message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
status: StatusCode::NOT_FOUND,
|
||||
@@ -76,3 +83,9 @@ impl From<std::io::Error> for ApiError {
|
||||
Self::internal(format!("IO error: {err}"))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<reqwest::Error> for ApiError {
|
||||
fn from(err: reqwest::Error) -> Self {
|
||||
Self::internal(format!("HTTP client error: {err}"))
|
||||
}
|
||||
}
|
||||
|
||||
26
apps/api/src/handlers.rs
Normal file
26
apps/api/src/handlers.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use axum::{extract::State, Json};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
pub async fn health() -> &'static str {
|
||||
"ok"
|
||||
}
|
||||
|
||||
pub async fn docs_redirect() -> impl axum::response::IntoResponse {
|
||||
axum::response::Redirect::to("/swagger-ui/")
|
||||
}
|
||||
|
||||
pub async fn ready(State(state): State<AppState>) -> Result<Json<serde_json::Value>, ApiError> {
|
||||
sqlx::query("SELECT 1").execute(&state.pool).await?;
|
||||
Ok(Json(serde_json::json!({"status": "ready"})))
|
||||
}
|
||||
|
||||
pub async fn metrics(State(state): State<AppState>) -> String {
|
||||
format!(
|
||||
"requests_total {}\npage_cache_hits {}\npage_cache_misses {}\n",
|
||||
state.metrics.requests_total.load(Ordering::Relaxed),
|
||||
state.metrics.page_cache_hits.load(Ordering::Relaxed),
|
||||
state.metrics.page_cache_misses.load(Ordering::Relaxed),
|
||||
)
|
||||
}
|
||||
@@ -8,7 +8,7 @@ use tokio_stream::Stream;
|
||||
use uuid::Uuid;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{error::ApiError, AppState};
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct RebuildRequest {
|
||||
@@ -24,6 +24,8 @@ pub struct IndexJobResponse {
|
||||
pub id: Uuid,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub library_id: Option<Uuid>,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub book_id: Option<Uuid>,
|
||||
pub r#type: String,
|
||||
pub status: String,
|
||||
#[schema(value_type = Option<String>)]
|
||||
@@ -53,12 +55,18 @@ pub struct IndexJobDetailResponse {
|
||||
pub id: Uuid,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub library_id: Option<Uuid>,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub book_id: Option<Uuid>,
|
||||
pub r#type: String,
|
||||
pub status: String,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub started_at: Option<DateTime<Utc>>,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub finished_at: Option<DateTime<Utc>>,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub phase2_started_at: Option<DateTime<Utc>>,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub generating_thumbnails_started_at: Option<DateTime<Utc>>,
|
||||
pub stats_json: Option<serde_json::Value>,
|
||||
pub error_opt: Option<String>,
|
||||
#[schema(value_type = String)]
|
||||
@@ -122,7 +130,7 @@ pub async fn enqueue_rebuild(
|
||||
.await?;
|
||||
|
||||
let row = sqlx::query(
|
||||
"SELECT id, library_id, type, status, started_at, finished_at, stats_json, error_opt, created_at FROM index_jobs WHERE id = $1",
|
||||
"SELECT id, library_id, book_id, type, status, started_at, finished_at, stats_json, error_opt, created_at FROM index_jobs WHERE id = $1",
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_one(&state.pool)
|
||||
@@ -145,7 +153,7 @@ pub async fn enqueue_rebuild(
|
||||
)]
|
||||
pub async fn list_index_jobs(State(state): State<AppState>) -> Result<Json<Vec<IndexJobResponse>>, ApiError> {
|
||||
let rows = sqlx::query(
|
||||
"SELECT id, library_id, type, status, started_at, finished_at, stats_json, error_opt, created_at, progress_percent, processed_files, total_files FROM index_jobs ORDER BY created_at DESC LIMIT 100",
|
||||
"SELECT id, library_id, book_id, type, status, started_at, finished_at, stats_json, error_opt, created_at, progress_percent, processed_files, total_files FROM index_jobs ORDER BY created_at DESC LIMIT 100",
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
@@ -174,7 +182,7 @@ pub async fn cancel_job(
|
||||
id: axum::extract::Path<Uuid>,
|
||||
) -> Result<Json<IndexJobResponse>, ApiError> {
|
||||
let rows_affected = sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'cancelled' WHERE id = $1 AND status IN ('pending', 'running', 'generating_thumbnails')",
|
||||
"UPDATE index_jobs SET status = 'cancelled' WHERE id = $1 AND status IN ('pending', 'running', 'extracting_pages', 'generating_thumbnails')",
|
||||
)
|
||||
.bind(id.0)
|
||||
.execute(&state.pool)
|
||||
@@ -185,7 +193,7 @@ pub async fn cancel_job(
|
||||
}
|
||||
|
||||
let row = sqlx::query(
|
||||
"SELECT id, library_id, type, status, started_at, finished_at, stats_json, error_opt, created_at, progress_percent, processed_files, total_files FROM index_jobs WHERE id = $1",
|
||||
"SELECT id, library_id, book_id, type, status, started_at, finished_at, stats_json, error_opt, created_at, progress_percent, processed_files, total_files FROM index_jobs WHERE id = $1",
|
||||
)
|
||||
.bind(id.0)
|
||||
.fetch_one(&state.pool)
|
||||
@@ -238,16 +246,16 @@ pub async fn list_folders(
|
||||
base_path.to_path_buf()
|
||||
};
|
||||
|
||||
// Ensure the path is within the libraries root
|
||||
let canonical_target = target_path.canonicalize().unwrap_or(target_path.clone());
|
||||
let canonical_base = base_path.canonicalize().unwrap_or(base_path.to_path_buf());
|
||||
// Ensure the path is within the libraries root (avoid canonicalize — burns fd on Docker mounts)
|
||||
let canonical_target = target_path.clone();
|
||||
let canonical_base = base_path.to_path_buf();
|
||||
|
||||
if !canonical_target.starts_with(&canonical_base) {
|
||||
return Err(ApiError::bad_request("Path is outside libraries root"));
|
||||
}
|
||||
|
||||
let mut folders = Vec::new();
|
||||
let depth = if params.get("path").is_some() {
|
||||
let depth = if params.contains_key("path") {
|
||||
canonical_target.strip_prefix(&canonical_base)
|
||||
.map(|p| p.components().count())
|
||||
.unwrap_or(0)
|
||||
@@ -255,19 +263,31 @@ pub async fn list_folders(
|
||||
0
|
||||
};
|
||||
|
||||
if let Ok(entries) = std::fs::read_dir(&canonical_target) {
|
||||
for entry in entries.flatten() {
|
||||
if entry.file_type().map(|ft| ft.is_dir()).unwrap_or(false) {
|
||||
let entries = std::fs::read_dir(&canonical_target)
|
||||
.map_err(|e| ApiError::internal(format!("cannot read directory {}: {}", canonical_target.display(), e)))?;
|
||||
|
||||
for entry in entries {
|
||||
let entry = match entry {
|
||||
Ok(e) => e,
|
||||
Err(e) => {
|
||||
tracing::warn!("[FOLDERS] entry error in {}: {}", canonical_target.display(), e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let is_dir = match entry.file_type() {
|
||||
Ok(ft) => ft.is_dir(),
|
||||
Err(e) => {
|
||||
tracing::warn!("[FOLDERS] cannot stat {}: {}", entry.path().display(), e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if is_dir {
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
|
||||
// Check if this folder has children
|
||||
let has_children = if let Ok(sub_entries) = std::fs::read_dir(entry.path()) {
|
||||
sub_entries.flatten().any(|e| {
|
||||
e.file_type().map(|ft| ft.is_dir()).unwrap_or(false)
|
||||
})
|
||||
} else {
|
||||
false
|
||||
};
|
||||
// Check if this folder has children (best-effort, default to true on error)
|
||||
let has_children = std::fs::read_dir(entry.path())
|
||||
.map(|sub| sub.flatten().any(|e| e.file_type().map(|ft| ft.is_dir()).unwrap_or(false)))
|
||||
.unwrap_or(true);
|
||||
|
||||
// Calculate the full path relative to libraries root
|
||||
let full_path = if let Ok(relative) = entry.path().strip_prefix(&canonical_base) {
|
||||
@@ -284,7 +304,6 @@ pub async fn list_folders(
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
folders.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
Ok(Json(folders))
|
||||
@@ -294,6 +313,7 @@ pub fn map_row(row: sqlx::postgres::PgRow) -> IndexJobResponse {
|
||||
IndexJobResponse {
|
||||
id: row.get("id"),
|
||||
library_id: row.get("library_id"),
|
||||
book_id: row.try_get("book_id").ok().flatten(),
|
||||
r#type: row.get("type"),
|
||||
status: row.get("status"),
|
||||
started_at: row.get("started_at"),
|
||||
@@ -311,10 +331,13 @@ fn map_row_detail(row: sqlx::postgres::PgRow) -> IndexJobDetailResponse {
|
||||
IndexJobDetailResponse {
|
||||
id: row.get("id"),
|
||||
library_id: row.get("library_id"),
|
||||
book_id: row.try_get("book_id").ok().flatten(),
|
||||
r#type: row.get("type"),
|
||||
status: row.get("status"),
|
||||
started_at: row.get("started_at"),
|
||||
finished_at: row.get("finished_at"),
|
||||
phase2_started_at: row.try_get("phase2_started_at").ok().flatten(),
|
||||
generating_thumbnails_started_at: row.try_get("generating_thumbnails_started_at").ok().flatten(),
|
||||
stats_json: row.get("stats_json"),
|
||||
error_opt: row.get("error_opt"),
|
||||
created_at: row.get("created_at"),
|
||||
@@ -339,9 +362,9 @@ fn map_row_detail(row: sqlx::postgres::PgRow) -> IndexJobDetailResponse {
|
||||
)]
|
||||
pub async fn get_active_jobs(State(state): State<AppState>) -> Result<Json<Vec<IndexJobResponse>>, ApiError> {
|
||||
let rows = sqlx::query(
|
||||
"SELECT id, library_id, type, status, started_at, finished_at, stats_json, error_opt, created_at, progress_percent, processed_files, total_files
|
||||
"SELECT id, library_id, book_id, type, status, started_at, finished_at, stats_json, error_opt, created_at, progress_percent, processed_files, total_files
|
||||
FROM index_jobs
|
||||
WHERE status IN ('pending', 'running', 'generating_thumbnails')
|
||||
WHERE status IN ('pending', 'running', 'extracting_pages', 'generating_thumbnails')
|
||||
ORDER BY created_at ASC"
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
@@ -371,8 +394,8 @@ pub async fn get_job_details(
|
||||
id: axum::extract::Path<Uuid>,
|
||||
) -> Result<Json<IndexJobDetailResponse>, ApiError> {
|
||||
let row = sqlx::query(
|
||||
"SELECT id, library_id, type, status, started_at, finished_at, stats_json, error_opt, created_at,
|
||||
current_file, progress_percent, total_files, processed_files
|
||||
"SELECT id, library_id, book_id, type, status, started_at, finished_at, phase2_started_at, generating_thumbnails_started_at,
|
||||
stats_json, error_opt, created_at, current_file, progress_percent, total_files, processed_files
|
||||
FROM index_jobs WHERE id = $1"
|
||||
)
|
||||
.bind(id.0)
|
||||
|
||||
398
apps/api/src/komga.rs
Normal file
398
apps/api/src/komga.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
use axum::{extract::State, Json};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::Row;
|
||||
use std::collections::HashMap;
|
||||
use utoipa::ToSchema;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
// ─── Komga API types ─────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct KomgaBooksResponse {
|
||||
content: Vec<KomgaBook>,
|
||||
#[serde(rename = "totalPages")]
|
||||
total_pages: i32,
|
||||
number: i32,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct KomgaBook {
|
||||
name: String,
|
||||
#[serde(rename = "seriesTitle")]
|
||||
series_title: String,
|
||||
metadata: KomgaBookMetadata,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct KomgaBookMetadata {
|
||||
title: String,
|
||||
}
|
||||
|
||||
// ─── Request / Response ──────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct KomgaSyncRequest {
|
||||
pub url: String,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct KomgaSyncResponse {
|
||||
#[schema(value_type = String)]
|
||||
pub id: Uuid,
|
||||
pub komga_url: String,
|
||||
pub total_komga_read: i64,
|
||||
pub matched: i64,
|
||||
pub already_read: i64,
|
||||
pub newly_marked: i64,
|
||||
pub matched_books: Vec<String>,
|
||||
pub newly_marked_books: Vec<String>,
|
||||
pub unmatched: Vec<String>,
|
||||
#[schema(value_type = String)]
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct KomgaSyncReportSummary {
|
||||
#[schema(value_type = String)]
|
||||
pub id: Uuid,
|
||||
pub komga_url: String,
|
||||
pub total_komga_read: i64,
|
||||
pub matched: i64,
|
||||
pub already_read: i64,
|
||||
pub newly_marked: i64,
|
||||
pub unmatched_count: i32,
|
||||
#[schema(value_type = String)]
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
// ─── Handlers ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Sync read books from a Komga server
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/komga/sync",
|
||||
tag = "komga",
|
||||
request_body = KomgaSyncRequest,
|
||||
responses(
|
||||
(status = 200, body = KomgaSyncResponse),
|
||||
(status = 400, description = "Bad request"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Komga connection or sync error"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn sync_komga_read_books(
|
||||
State(state): State<AppState>,
|
||||
Json(body): Json<KomgaSyncRequest>,
|
||||
) -> Result<Json<KomgaSyncResponse>, ApiError> {
|
||||
let url = body.url.trim_end_matches('/').to_string();
|
||||
if url.is_empty() {
|
||||
return Err(ApiError::bad_request("url is required"));
|
||||
}
|
||||
|
||||
// Build HTTP client with basic auth
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
.build()
|
||||
.map_err(|e| ApiError::internal(format!("failed to build HTTP client: {e}")))?;
|
||||
|
||||
// Paginate through all READ books from Komga
|
||||
let mut komga_books: Vec<(String, String)> = Vec::new(); // (series_title, title)
|
||||
let mut page = 0;
|
||||
let page_size = 100;
|
||||
let max_pages = 500;
|
||||
|
||||
loop {
|
||||
let resp = client
|
||||
.post(format!("{url}/api/v1/books/list?page={page}&size={page_size}"))
|
||||
.basic_auth(&body.username, Some(&body.password))
|
||||
.header("Content-Type", "application/json")
|
||||
.json(&serde_json::json!({ "condition": { "readStatus": { "operator": "is", "value": "READ" } } }))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| ApiError::internal(format!("Komga request failed: {e}")))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(ApiError::internal(format!(
|
||||
"Komga returned {status}: {text}"
|
||||
)));
|
||||
}
|
||||
|
||||
let data: KomgaBooksResponse = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| ApiError::internal(format!("Failed to parse Komga response: {e}")))?;
|
||||
|
||||
for book in &data.content {
|
||||
let title = if !book.metadata.title.is_empty() {
|
||||
&book.metadata.title
|
||||
} else {
|
||||
&book.name
|
||||
};
|
||||
komga_books.push((book.series_title.clone(), title.clone()));
|
||||
}
|
||||
|
||||
if data.number >= data.total_pages - 1 || page >= max_pages {
|
||||
break;
|
||||
}
|
||||
page += 1;
|
||||
}
|
||||
|
||||
let total_komga_read = komga_books.len() as i64;
|
||||
|
||||
// Build local lookup maps
|
||||
let rows = sqlx::query(
|
||||
"SELECT id, title, COALESCE(series, '') as series, LOWER(title) as title_lower, LOWER(COALESCE(series, '')) as series_lower FROM books",
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
type BookEntry = (Uuid, String, String);
|
||||
// Primary: (series_lower, title_lower) -> Vec<(Uuid, title, series)>
|
||||
let mut primary_map: HashMap<(String, String), Vec<BookEntry>> = HashMap::new();
|
||||
// Secondary: title_lower -> Vec<(Uuid, title, series)>
|
||||
let mut secondary_map: HashMap<String, Vec<BookEntry>> = HashMap::new();
|
||||
|
||||
for row in &rows {
|
||||
let id: Uuid = row.get("id");
|
||||
let title: String = row.get("title");
|
||||
let series: String = row.get("series");
|
||||
let title_lower: String = row.get("title_lower");
|
||||
let series_lower: String = row.get("series_lower");
|
||||
let entry = (id, title, series);
|
||||
|
||||
primary_map
|
||||
.entry((series_lower, title_lower.clone()))
|
||||
.or_default()
|
||||
.push(entry.clone());
|
||||
secondary_map.entry(title_lower).or_default().push(entry);
|
||||
}
|
||||
|
||||
// Match Komga books to local books
|
||||
let mut matched_entries: Vec<(Uuid, String)> = Vec::new(); // (id, display_title)
|
||||
let mut unmatched: Vec<String> = Vec::new();
|
||||
|
||||
for (series_title, title) in &komga_books {
|
||||
let title_lower = title.to_lowercase();
|
||||
let series_lower = series_title.to_lowercase();
|
||||
|
||||
let found = if let Some(entries) = primary_map.get(&(series_lower.clone(), title_lower.clone())) {
|
||||
Some(entries)
|
||||
} else {
|
||||
secondary_map.get(&title_lower)
|
||||
};
|
||||
|
||||
if let Some(entries) = found {
|
||||
for (id, local_title, local_series) in entries {
|
||||
let display = if local_series.is_empty() {
|
||||
local_title.clone()
|
||||
} else {
|
||||
format!("{local_series} - {local_title}")
|
||||
};
|
||||
matched_entries.push((*id, display));
|
||||
}
|
||||
} else if series_title.is_empty() {
|
||||
unmatched.push(title.clone());
|
||||
} else {
|
||||
unmatched.push(format!("{series_title} - {title}"));
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate by ID
|
||||
matched_entries.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
matched_entries.dedup_by(|a, b| a.0 == b.0);
|
||||
|
||||
let matched_ids: Vec<Uuid> = matched_entries.iter().map(|(id, _)| *id).collect();
|
||||
let matched = matched_ids.len() as i64;
|
||||
let mut already_read: i64 = 0;
|
||||
let mut already_read_ids: std::collections::HashSet<Uuid> = std::collections::HashSet::new();
|
||||
|
||||
if !matched_ids.is_empty() {
|
||||
// Get already-read book IDs
|
||||
let ar_rows = sqlx::query(
|
||||
"SELECT book_id FROM book_reading_progress WHERE book_id = ANY($1) AND status = 'read'",
|
||||
)
|
||||
.bind(&matched_ids)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
for row in &ar_rows {
|
||||
already_read_ids.insert(row.get("book_id"));
|
||||
}
|
||||
already_read = already_read_ids.len() as i64;
|
||||
|
||||
// Bulk upsert all matched books as read
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO book_reading_progress (book_id, status, current_page, last_read_at, updated_at)
|
||||
SELECT unnest($1::uuid[]), 'read', NULL, NOW(), NOW()
|
||||
ON CONFLICT (book_id) DO UPDATE
|
||||
SET status = 'read',
|
||||
current_page = NULL,
|
||||
last_read_at = NOW(),
|
||||
updated_at = NOW()
|
||||
WHERE book_reading_progress.status != 'read'
|
||||
"#,
|
||||
)
|
||||
.bind(&matched_ids)
|
||||
.execute(&state.pool)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let newly_marked = matched - already_read;
|
||||
|
||||
// Build matched_books and newly_marked_books lists
|
||||
let mut newly_marked_books: Vec<String> = Vec::new();
|
||||
let mut matched_books: Vec<String> = Vec::new();
|
||||
for (id, title) in &matched_entries {
|
||||
if !already_read_ids.contains(id) {
|
||||
newly_marked_books.push(title.clone());
|
||||
}
|
||||
matched_books.push(title.clone());
|
||||
}
|
||||
// Sort: newly marked first, then alphabetical
|
||||
let newly_marked_set: std::collections::HashSet<&str> =
|
||||
newly_marked_books.iter().map(|s| s.as_str()).collect();
|
||||
matched_books.sort_by(|a, b| {
|
||||
let a_new = newly_marked_set.contains(a.as_str());
|
||||
let b_new = newly_marked_set.contains(b.as_str());
|
||||
b_new.cmp(&a_new).then(a.cmp(b))
|
||||
});
|
||||
newly_marked_books.sort();
|
||||
|
||||
// Save sync report
|
||||
let unmatched_json = serde_json::to_value(&unmatched).unwrap_or_default();
|
||||
let matched_books_json = serde_json::to_value(&matched_books).unwrap_or_default();
|
||||
let newly_marked_books_json = serde_json::to_value(&newly_marked_books).unwrap_or_default();
|
||||
let report_row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO komga_sync_reports (komga_url, total_komga_read, matched, already_read, newly_marked, matched_books, newly_marked_books, unmatched)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
RETURNING id, created_at
|
||||
"#,
|
||||
)
|
||||
.bind(&url)
|
||||
.bind(total_komga_read)
|
||||
.bind(matched)
|
||||
.bind(already_read)
|
||||
.bind(newly_marked)
|
||||
.bind(&matched_books_json)
|
||||
.bind(&newly_marked_books_json)
|
||||
.bind(&unmatched_json)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
Ok(Json(KomgaSyncResponse {
|
||||
id: report_row.get("id"),
|
||||
komga_url: url,
|
||||
total_komga_read,
|
||||
matched,
|
||||
already_read,
|
||||
newly_marked,
|
||||
matched_books,
|
||||
newly_marked_books,
|
||||
unmatched,
|
||||
created_at: report_row.get("created_at"),
|
||||
}))
|
||||
}
|
||||
|
||||
/// List Komga sync reports (most recent first)
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/komga/reports",
|
||||
tag = "komga",
|
||||
responses(
|
||||
(status = 200, body = Vec<KomgaSyncReportSummary>),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn list_sync_reports(
|
||||
State(state): State<AppState>,
|
||||
) -> Result<Json<Vec<KomgaSyncReportSummary>>, ApiError> {
|
||||
let rows = sqlx::query(
|
||||
r#"
|
||||
SELECT id, komga_url, total_komga_read, matched, already_read, newly_marked,
|
||||
jsonb_array_length(unmatched) as unmatched_count, created_at
|
||||
FROM komga_sync_reports
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 20
|
||||
"#,
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let reports: Vec<KomgaSyncReportSummary> = rows
|
||||
.iter()
|
||||
.map(|row| KomgaSyncReportSummary {
|
||||
id: row.get("id"),
|
||||
komga_url: row.get("komga_url"),
|
||||
total_komga_read: row.get("total_komga_read"),
|
||||
matched: row.get("matched"),
|
||||
already_read: row.get("already_read"),
|
||||
newly_marked: row.get("newly_marked"),
|
||||
unmatched_count: row.get("unmatched_count"),
|
||||
created_at: row.get("created_at"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Json(reports))
|
||||
}
|
||||
|
||||
/// Get a specific sync report with full unmatched list
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/komga/reports/{id}",
|
||||
tag = "komga",
|
||||
params(("id" = String, Path, description = "Report UUID")),
|
||||
responses(
|
||||
(status = 200, body = KomgaSyncResponse),
|
||||
(status = 404, description = "Report not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_sync_report(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(id): axum::extract::Path<Uuid>,
|
||||
) -> Result<Json<KomgaSyncResponse>, ApiError> {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
SELECT id, komga_url, total_komga_read, matched, already_read, newly_marked, matched_books, newly_marked_books, unmatched, created_at
|
||||
FROM komga_sync_reports
|
||||
WHERE id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_optional(&state.pool)
|
||||
.await?;
|
||||
|
||||
let row = row.ok_or_else(|| ApiError::not_found("report not found"))?;
|
||||
|
||||
let matched_books_json: serde_json::Value = row.try_get("matched_books").unwrap_or(serde_json::Value::Array(vec![]));
|
||||
let matched_books: Vec<String> = serde_json::from_value(matched_books_json).unwrap_or_default();
|
||||
let newly_marked_books_json: serde_json::Value = row.try_get("newly_marked_books").unwrap_or(serde_json::Value::Array(vec![]));
|
||||
let newly_marked_books: Vec<String> = serde_json::from_value(newly_marked_books_json).unwrap_or_default();
|
||||
let unmatched_json: serde_json::Value = row.get("unmatched");
|
||||
let unmatched: Vec<String> = serde_json::from_value(unmatched_json).unwrap_or_default();
|
||||
|
||||
Ok(Json(KomgaSyncResponse {
|
||||
id: row.get("id"),
|
||||
komga_url: row.get("komga_url"),
|
||||
total_komga_read: row.get("total_komga_read"),
|
||||
matched: row.get("matched"),
|
||||
already_read: row.get("already_read"),
|
||||
newly_marked: row.get("newly_marked"),
|
||||
matched_books,
|
||||
newly_marked_books,
|
||||
unmatched,
|
||||
created_at: row.get("created_at"),
|
||||
}))
|
||||
}
|
||||
@@ -6,7 +6,7 @@ use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{error::ApiError, AppState};
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct LibraryResponse {
|
||||
@@ -18,8 +18,11 @@ pub struct LibraryResponse {
|
||||
pub book_count: i64,
|
||||
pub monitor_enabled: bool,
|
||||
pub scan_mode: String,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub next_scan_at: Option<chrono::DateTime<chrono::Utc>>,
|
||||
pub watcher_enabled: bool,
|
||||
pub metadata_provider: Option<String>,
|
||||
pub fallback_metadata_provider: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
@@ -44,7 +47,7 @@ pub struct CreateLibraryRequest {
|
||||
)]
|
||||
pub async fn list_libraries(State(state): State<AppState>) -> Result<Json<Vec<LibraryResponse>>, ApiError> {
|
||||
let rows = sqlx::query(
|
||||
"SELECT l.id, l.name, l.root_path, l.enabled, l.monitor_enabled, l.scan_mode, l.next_scan_at, l.watcher_enabled,
|
||||
"SELECT l.id, l.name, l.root_path, l.enabled, l.monitor_enabled, l.scan_mode, l.next_scan_at, l.watcher_enabled, l.metadata_provider, l.fallback_metadata_provider,
|
||||
(SELECT COUNT(*) FROM books b WHERE b.library_id = l.id) as book_count
|
||||
FROM libraries l ORDER BY l.created_at DESC"
|
||||
)
|
||||
@@ -63,6 +66,8 @@ pub async fn list_libraries(State(state): State<AppState>) -> Result<Json<Vec<Li
|
||||
scan_mode: row.get("scan_mode"),
|
||||
next_scan_at: row.get("next_scan_at"),
|
||||
watcher_enabled: row.get("watcher_enabled"),
|
||||
metadata_provider: row.get("metadata_provider"),
|
||||
fallback_metadata_provider: row.get("fallback_metadata_provider"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -114,6 +119,8 @@ pub async fn create_library(
|
||||
scan_mode: "manual".to_string(),
|
||||
next_scan_at: None,
|
||||
watcher_enabled: false,
|
||||
metadata_provider: None,
|
||||
fallback_metadata_provider: None,
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -155,14 +162,19 @@ fn canonicalize_library_root(root_path: &str) -> Result<PathBuf, ApiError> {
|
||||
return Err(ApiError::bad_request("root_path must be absolute"));
|
||||
}
|
||||
|
||||
let canonical = std::fs::canonicalize(path)
|
||||
.map_err(|_| ApiError::bad_request("root_path does not exist or is inaccessible"))?;
|
||||
|
||||
if !canonical.is_dir() {
|
||||
// Avoid fs::canonicalize — it opens extra file descriptors to resolve symlinks
|
||||
// and can fail on Docker volume mounts (ro, cached) when fd limits are low.
|
||||
if !path.exists() {
|
||||
return Err(ApiError::bad_request(format!(
|
||||
"root_path does not exist: {}",
|
||||
root_path
|
||||
)));
|
||||
}
|
||||
if !path.is_dir() {
|
||||
return Err(ApiError::bad_request("root_path must point to a directory"));
|
||||
}
|
||||
|
||||
Ok(canonical)
|
||||
Ok(path.to_path_buf())
|
||||
}
|
||||
|
||||
use crate::index_jobs::{IndexJobResponse, RebuildRequest};
|
||||
@@ -275,7 +287,7 @@ pub async fn update_monitoring(
|
||||
let watcher_enabled = input.watcher_enabled.unwrap_or(false);
|
||||
|
||||
let result = sqlx::query(
|
||||
"UPDATE libraries SET monitor_enabled = $2, scan_mode = $3, next_scan_at = $4, watcher_enabled = $5 WHERE id = $1 RETURNING id, name, root_path, enabled, monitor_enabled, scan_mode, next_scan_at, watcher_enabled"
|
||||
"UPDATE libraries SET monitor_enabled = $2, scan_mode = $3, next_scan_at = $4, watcher_enabled = $5 WHERE id = $1 RETURNING id, name, root_path, enabled, monitor_enabled, scan_mode, next_scan_at, watcher_enabled, metadata_provider, fallback_metadata_provider"
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(input.monitor_enabled)
|
||||
@@ -304,5 +316,71 @@ pub async fn update_monitoring(
|
||||
scan_mode: row.get("scan_mode"),
|
||||
next_scan_at: row.get("next_scan_at"),
|
||||
watcher_enabled: row.get("watcher_enabled"),
|
||||
metadata_provider: row.get("metadata_provider"),
|
||||
fallback_metadata_provider: row.get("fallback_metadata_provider"),
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct UpdateMetadataProviderRequest {
|
||||
pub metadata_provider: Option<String>,
|
||||
pub fallback_metadata_provider: Option<String>,
|
||||
}
|
||||
|
||||
/// Update the metadata provider for a library
|
||||
#[utoipa::path(
|
||||
patch,
|
||||
path = "/libraries/{id}/metadata-provider",
|
||||
tag = "libraries",
|
||||
params(
|
||||
("id" = String, Path, description = "Library UUID"),
|
||||
),
|
||||
request_body = UpdateMetadataProviderRequest,
|
||||
responses(
|
||||
(status = 200, body = LibraryResponse),
|
||||
(status = 404, description = "Library not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 403, description = "Forbidden - Admin scope required"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn update_metadata_provider(
|
||||
State(state): State<AppState>,
|
||||
AxumPath(library_id): AxumPath<Uuid>,
|
||||
Json(input): Json<UpdateMetadataProviderRequest>,
|
||||
) -> Result<Json<LibraryResponse>, ApiError> {
|
||||
let provider = input.metadata_provider.as_deref().filter(|s| !s.is_empty());
|
||||
let fallback = input.fallback_metadata_provider.as_deref().filter(|s| !s.is_empty());
|
||||
|
||||
let result = sqlx::query(
|
||||
"UPDATE libraries SET metadata_provider = $2, fallback_metadata_provider = $3 WHERE id = $1 RETURNING id, name, root_path, enabled, monitor_enabled, scan_mode, next_scan_at, watcher_enabled, metadata_provider, fallback_metadata_provider"
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(provider)
|
||||
.bind(fallback)
|
||||
.fetch_optional(&state.pool)
|
||||
.await?;
|
||||
|
||||
let Some(row) = result else {
|
||||
return Err(ApiError::not_found("library not found"));
|
||||
};
|
||||
|
||||
let book_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM books WHERE library_id = $1")
|
||||
.bind(library_id)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
Ok(Json(LibraryResponse {
|
||||
id: row.get("id"),
|
||||
name: row.get("name"),
|
||||
root_path: row.get("root_path"),
|
||||
enabled: row.get("enabled"),
|
||||
book_count,
|
||||
monitor_enabled: row.get("monitor_enabled"),
|
||||
scan_mode: row.get("scan_mode"),
|
||||
next_scan_at: row.get("next_scan_at"),
|
||||
watcher_enabled: row.get("watcher_enabled"),
|
||||
metadata_provider: row.get("metadata_provider"),
|
||||
fallback_metadata_provider: row.get("fallback_metadata_provider"),
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -1,70 +1,43 @@
|
||||
mod auth;
|
||||
mod books;
|
||||
mod error;
|
||||
mod handlers;
|
||||
mod index_jobs;
|
||||
mod komga;
|
||||
mod libraries;
|
||||
mod metadata;
|
||||
mod metadata_batch;
|
||||
mod metadata_refresh;
|
||||
mod metadata_providers;
|
||||
mod api_middleware;
|
||||
mod openapi;
|
||||
mod pages;
|
||||
mod reading_progress;
|
||||
mod search;
|
||||
mod settings;
|
||||
mod state;
|
||||
mod stats;
|
||||
mod thumbnails;
|
||||
mod tokens;
|
||||
|
||||
use std::{
|
||||
num::NonZeroUsize,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use axum::{
|
||||
middleware,
|
||||
response::IntoResponse,
|
||||
routing::{delete, get},
|
||||
Json, Router,
|
||||
Router,
|
||||
};
|
||||
use utoipa::OpenApi;
|
||||
use utoipa_swagger_ui::SwaggerUi;
|
||||
use lru::LruCache;
|
||||
use std::num::NonZeroUsize;
|
||||
use stripstream_core::config::ApiConfig;
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
use tokio::sync::{Mutex, Semaphore};
|
||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||
use tracing::info;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AppState {
|
||||
pool: sqlx::PgPool,
|
||||
bootstrap_token: Arc<str>,
|
||||
meili_url: Arc<str>,
|
||||
meili_master_key: Arc<str>,
|
||||
page_cache: Arc<Mutex<LruCache<String, Arc<Vec<u8>>>>>,
|
||||
page_render_limit: Arc<Semaphore>,
|
||||
metrics: Arc<Metrics>,
|
||||
read_rate_limit: Arc<Mutex<ReadRateLimit>>,
|
||||
}
|
||||
|
||||
struct Metrics {
|
||||
requests_total: AtomicU64,
|
||||
page_cache_hits: AtomicU64,
|
||||
page_cache_misses: AtomicU64,
|
||||
}
|
||||
|
||||
struct ReadRateLimit {
|
||||
window_started_at: Instant,
|
||||
requests_in_window: u32,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
requests_total: AtomicU64::new(0),
|
||||
page_cache_hits: AtomicU64::new(0),
|
||||
page_cache_misses: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
use crate::state::{load_concurrent_renders, load_dynamic_settings, AppState, Metrics, ReadRateLimit};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
@@ -80,18 +53,33 @@ async fn main() -> anyhow::Result<()> {
|
||||
.connect(&config.database_url)
|
||||
.await?;
|
||||
|
||||
// Load concurrent_renders from settings, default to 8
|
||||
let concurrent_renders = load_concurrent_renders(&pool).await;
|
||||
info!("Using concurrent_renders limit: {}", concurrent_renders);
|
||||
|
||||
let dynamic_settings = load_dynamic_settings(&pool).await;
|
||||
info!(
|
||||
"Dynamic settings: rate_limit={}, timeout={}s, format={}, quality={}, filter={}, max_width={}, cache_dir={}",
|
||||
dynamic_settings.rate_limit_per_second,
|
||||
dynamic_settings.timeout_seconds,
|
||||
dynamic_settings.image_format,
|
||||
dynamic_settings.image_quality,
|
||||
dynamic_settings.image_filter,
|
||||
dynamic_settings.image_max_width,
|
||||
dynamic_settings.cache_directory,
|
||||
);
|
||||
|
||||
let state = AppState {
|
||||
pool,
|
||||
bootstrap_token: Arc::from(config.api_bootstrap_token),
|
||||
meili_url: Arc::from(config.meili_url),
|
||||
meili_master_key: Arc::from(config.meili_master_key),
|
||||
page_cache: Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(512).expect("non-zero")))),
|
||||
page_render_limit: Arc::new(Semaphore::new(8)),
|
||||
page_render_limit: Arc::new(Semaphore::new(concurrent_renders)),
|
||||
metrics: Arc::new(Metrics::new()),
|
||||
read_rate_limit: Arc::new(Mutex::new(ReadRateLimit {
|
||||
window_started_at: Instant::now(),
|
||||
requests_in_window: 0,
|
||||
})),
|
||||
settings: Arc::new(RwLock::new(dynamic_settings)),
|
||||
};
|
||||
|
||||
let admin_routes = Router::new()
|
||||
@@ -99,6 +87,10 @@ async fn main() -> anyhow::Result<()> {
|
||||
.route("/libraries/:id", delete(libraries::delete_library))
|
||||
.route("/libraries/:id/scan", axum::routing::post(libraries::scan_library))
|
||||
.route("/libraries/:id/monitoring", axum::routing::patch(libraries::update_monitoring))
|
||||
.route("/libraries/:id/metadata-provider", axum::routing::patch(libraries::update_metadata_provider))
|
||||
.route("/books/:id", axum::routing::patch(books::update_book))
|
||||
.route("/books/:id/convert", axum::routing::post(books::convert_book))
|
||||
.route("/libraries/:library_id/series/:name", axum::routing::patch(books::update_series))
|
||||
.route("/index/rebuild", axum::routing::post(index_jobs::enqueue_rebuild))
|
||||
.route("/index/thumbnails/rebuild", axum::routing::post(thumbnails::start_thumbnails_rebuild))
|
||||
.route("/index/thumbnails/regenerate", axum::routing::post(thumbnails::start_thumbnails_regenerate))
|
||||
@@ -106,12 +98,27 @@ async fn main() -> anyhow::Result<()> {
|
||||
.route("/index/jobs/active", get(index_jobs::get_active_jobs))
|
||||
.route("/index/jobs/:id", get(index_jobs::get_job_details))
|
||||
.route("/index/jobs/:id/stream", get(index_jobs::stream_job_progress))
|
||||
.route("/index/jobs/:id/thumbnails/checkup", axum::routing::post(thumbnails::start_checkup))
|
||||
.route("/index/jobs/:id/errors", get(index_jobs::get_job_errors))
|
||||
.route("/index/cancel/:id", axum::routing::post(index_jobs::cancel_job))
|
||||
.route("/folders", get(index_jobs::list_folders))
|
||||
.route("/admin/tokens", get(tokens::list_tokens).post(tokens::create_token))
|
||||
.route("/admin/tokens/:id", delete(tokens::revoke_token))
|
||||
.route("/admin/tokens/:id/delete", axum::routing::post(tokens::delete_token))
|
||||
.route("/komga/sync", axum::routing::post(komga::sync_komga_read_books))
|
||||
.route("/komga/reports", get(komga::list_sync_reports))
|
||||
.route("/komga/reports/:id", get(komga::get_sync_report))
|
||||
.route("/metadata/search", axum::routing::post(metadata::search_metadata))
|
||||
.route("/metadata/match", axum::routing::post(metadata::create_metadata_match))
|
||||
.route("/metadata/approve/:id", axum::routing::post(metadata::approve_metadata))
|
||||
.route("/metadata/reject/:id", axum::routing::post(metadata::reject_metadata))
|
||||
.route("/metadata/links", get(metadata::get_metadata_links))
|
||||
.route("/metadata/missing/:id", get(metadata::get_missing_books))
|
||||
.route("/metadata/links/:id", delete(metadata::delete_metadata_link))
|
||||
.route("/metadata/batch", axum::routing::post(metadata_batch::start_batch))
|
||||
.route("/metadata/batch/:id/report", get(metadata_batch::get_batch_report))
|
||||
.route("/metadata/batch/:id/results", get(metadata_batch::get_batch_results))
|
||||
.route("/metadata/refresh", axum::routing::post(metadata_refresh::start_refresh))
|
||||
.route("/metadata/refresh/:id/report", get(metadata_refresh::get_refresh_report))
|
||||
.merge(settings::settings_routes())
|
||||
.route_layer(middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
@@ -120,26 +127,35 @@ async fn main() -> anyhow::Result<()> {
|
||||
|
||||
let read_routes = Router::new()
|
||||
.route("/books", get(books::list_books))
|
||||
.route("/books/ongoing", get(books::ongoing_books))
|
||||
.route("/books/:id", get(books::get_book))
|
||||
.route("/books/:id/thumbnail", get(books::get_thumbnail))
|
||||
.route("/books/:id/pages/:n", get(pages::get_page))
|
||||
.route("/books/:id/progress", get(reading_progress::get_reading_progress).patch(reading_progress::update_reading_progress))
|
||||
.route("/libraries/:library_id/series", get(books::list_series))
|
||||
.route("/libraries/:library_id/series/:name/metadata", get(books::get_series_metadata))
|
||||
.route("/series", get(books::list_all_series))
|
||||
.route("/series/ongoing", get(books::ongoing_series))
|
||||
.route("/series/statuses", get(books::series_statuses))
|
||||
.route("/series/provider-statuses", get(books::provider_statuses))
|
||||
.route("/series/mark-read", axum::routing::post(reading_progress::mark_series_read))
|
||||
.route("/stats", get(stats::get_stats))
|
||||
.route("/search", get(search::search_books))
|
||||
.route_layer(middleware::from_fn_with_state(state.clone(), read_rate_limit))
|
||||
.route_layer(middleware::from_fn_with_state(state.clone(), api_middleware::read_rate_limit))
|
||||
.route_layer(middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
auth::require_read,
|
||||
));
|
||||
|
||||
let app = Router::new()
|
||||
.route("/health", get(health))
|
||||
.route("/ready", get(ready))
|
||||
.route("/metrics", get(metrics))
|
||||
.route("/docs", get(docs_redirect))
|
||||
.route("/health", get(handlers::health))
|
||||
.route("/ready", get(handlers::ready))
|
||||
.route("/metrics", get(handlers::metrics))
|
||||
.route("/docs", get(handlers::docs_redirect))
|
||||
.merge(SwaggerUi::new("/swagger-ui").url("/openapi.json", openapi::ApiDoc::openapi()))
|
||||
.merge(admin_routes)
|
||||
.merge(read_routes)
|
||||
.layer(middleware::from_fn_with_state(state.clone(), request_counter))
|
||||
.layer(middleware::from_fn_with_state(state.clone(), api_middleware::request_counter))
|
||||
.with_state(state);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(&config.listen_addr).await?;
|
||||
@@ -148,57 +164,3 @@ async fn main() -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn health() -> &'static str {
|
||||
"ok"
|
||||
}
|
||||
|
||||
async fn docs_redirect() -> impl axum::response::IntoResponse {
|
||||
axum::response::Redirect::to("/swagger-ui/")
|
||||
}
|
||||
|
||||
async fn ready(axum::extract::State(state): axum::extract::State<AppState>) -> Result<Json<serde_json::Value>, error::ApiError> {
|
||||
sqlx::query("SELECT 1").execute(&state.pool).await?;
|
||||
Ok(Json(serde_json::json!({"status": "ready"})))
|
||||
}
|
||||
|
||||
async fn metrics(axum::extract::State(state): axum::extract::State<AppState>) -> String {
|
||||
format!(
|
||||
"requests_total {}\npage_cache_hits {}\npage_cache_misses {}\n",
|
||||
state.metrics.requests_total.load(Ordering::Relaxed),
|
||||
state.metrics.page_cache_hits.load(Ordering::Relaxed),
|
||||
state.metrics.page_cache_misses.load(Ordering::Relaxed),
|
||||
)
|
||||
}
|
||||
|
||||
async fn request_counter(
|
||||
axum::extract::State(state): axum::extract::State<AppState>,
|
||||
req: axum::extract::Request,
|
||||
next: axum::middleware::Next,
|
||||
) -> axum::response::Response {
|
||||
state.metrics.requests_total.fetch_add(1, Ordering::Relaxed);
|
||||
next.run(req).await
|
||||
}
|
||||
|
||||
async fn read_rate_limit(
|
||||
axum::extract::State(state): axum::extract::State<AppState>,
|
||||
req: axum::extract::Request,
|
||||
next: axum::middleware::Next,
|
||||
) -> axum::response::Response {
|
||||
let mut limiter = state.read_rate_limit.lock().await;
|
||||
if limiter.window_started_at.elapsed() >= Duration::from_secs(1) {
|
||||
limiter.window_started_at = Instant::now();
|
||||
limiter.requests_in_window = 0;
|
||||
}
|
||||
|
||||
if limiter.requests_in_window >= 120 {
|
||||
return (
|
||||
axum::http::StatusCode::TOO_MANY_REQUESTS,
|
||||
"rate limit exceeded",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
limiter.requests_in_window += 1;
|
||||
drop(limiter);
|
||||
next.run(req).await
|
||||
}
|
||||
|
||||
1077
apps/api/src/metadata.rs
Normal file
1077
apps/api/src/metadata.rs
Normal file
File diff suppressed because it is too large
Load Diff
1117
apps/api/src/metadata_batch.rs
Normal file
1117
apps/api/src/metadata_batch.rs
Normal file
File diff suppressed because it is too large
Load Diff
342
apps/api/src/metadata_providers/anilist.rs
Normal file
342
apps/api/src/metadata_providers/anilist.rs
Normal file
@@ -0,0 +1,342 @@
|
||||
use super::{BookCandidate, MetadataProvider, ProviderConfig, SeriesCandidate};
|
||||
|
||||
pub struct AniListProvider;
|
||||
|
||||
impl MetadataProvider for AniListProvider {
|
||||
fn name(&self) -> &str {
|
||||
"anilist"
|
||||
}
|
||||
|
||||
fn search_series(
|
||||
&self,
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<SeriesCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let query = query.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { search_series_impl(&query, &config).await })
|
||||
}
|
||||
|
||||
fn get_series_books(
|
||||
&self,
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<BookCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let external_id = external_id.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { get_series_books_impl(&external_id, &config).await })
|
||||
}
|
||||
}
|
||||
|
||||
const SEARCH_QUERY: &str = r#"
|
||||
query ($search: String) {
|
||||
Page(perPage: 20) {
|
||||
media(search: $search, type: MANGA, sort: SEARCH_MATCH) {
|
||||
id
|
||||
title { romaji english native }
|
||||
description(asHtml: false)
|
||||
coverImage { large medium }
|
||||
startDate { year }
|
||||
status
|
||||
volumes
|
||||
chapters
|
||||
staff { edges { node { name { full } } role } }
|
||||
siteUrl
|
||||
genres
|
||||
}
|
||||
}
|
||||
}
|
||||
"#;
|
||||
|
||||
const DETAIL_QUERY: &str = r#"
|
||||
query ($id: Int) {
|
||||
Media(id: $id, type: MANGA) {
|
||||
id
|
||||
title { romaji english native }
|
||||
description(asHtml: false)
|
||||
coverImage { large medium }
|
||||
startDate { year }
|
||||
status
|
||||
volumes
|
||||
chapters
|
||||
staff { edges { node { name { full } } role } }
|
||||
siteUrl
|
||||
genres
|
||||
}
|
||||
}
|
||||
"#;
|
||||
|
||||
async fn graphql_request(
|
||||
client: &reqwest::Client,
|
||||
query: &str,
|
||||
variables: serde_json::Value,
|
||||
) -> Result<serde_json::Value, String> {
|
||||
let resp = client
|
||||
.post("https://graphql.anilist.co")
|
||||
.header("Content-Type", "application/json")
|
||||
.json(&serde_json::json!({
|
||||
"query": query,
|
||||
"variables": variables,
|
||||
}))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("AniList request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(format!("AniList returned {status}: {text}"));
|
||||
}
|
||||
|
||||
resp.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse AniList response: {e}"))
|
||||
}
|
||||
|
||||
async fn search_series_impl(
|
||||
query: &str,
|
||||
_config: &ProviderConfig,
|
||||
) -> Result<Vec<SeriesCandidate>, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))?;
|
||||
|
||||
let data = graphql_request(
|
||||
&client,
|
||||
SEARCH_QUERY,
|
||||
serde_json::json!({ "search": query }),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let media = match data
|
||||
.get("data")
|
||||
.and_then(|d| d.get("Page"))
|
||||
.and_then(|p| p.get("media"))
|
||||
.and_then(|m| m.as_array())
|
||||
{
|
||||
Some(media) => media,
|
||||
None => return Ok(vec![]),
|
||||
};
|
||||
|
||||
let query_lower = query.to_lowercase();
|
||||
|
||||
let mut candidates: Vec<SeriesCandidate> = media
|
||||
.iter()
|
||||
.filter_map(|m| {
|
||||
let id = m.get("id").and_then(|id| id.as_i64())?;
|
||||
let title_obj = m.get("title")?;
|
||||
let title = title_obj
|
||||
.get("english")
|
||||
.and_then(|t| t.as_str())
|
||||
.or_else(|| title_obj.get("romaji").and_then(|t| t.as_str()))?
|
||||
.to_string();
|
||||
|
||||
let description = m
|
||||
.get("description")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(|d| d.replace("\\n", "\n").trim().to_string())
|
||||
.filter(|d| !d.is_empty());
|
||||
|
||||
let cover_url = m
|
||||
.get("coverImage")
|
||||
.and_then(|ci| ci.get("large").or_else(|| ci.get("medium")))
|
||||
.and_then(|u| u.as_str())
|
||||
.map(String::from);
|
||||
|
||||
let start_year = m
|
||||
.get("startDate")
|
||||
.and_then(|sd| sd.get("year"))
|
||||
.and_then(|y| y.as_i64())
|
||||
.map(|y| y as i32);
|
||||
|
||||
let volumes = m
|
||||
.get("volumes")
|
||||
.and_then(|v| v.as_i64())
|
||||
.map(|v| v as i32);
|
||||
|
||||
let chapters = m
|
||||
.get("chapters")
|
||||
.and_then(|v| v.as_i64())
|
||||
.map(|v| v as i32);
|
||||
|
||||
let status = m
|
||||
.get("status")
|
||||
.and_then(|s| s.as_str())
|
||||
.unwrap_or("UNKNOWN")
|
||||
.to_string();
|
||||
|
||||
let site_url = m
|
||||
.get("siteUrl")
|
||||
.and_then(|u| u.as_str())
|
||||
.map(String::from);
|
||||
|
||||
let authors = extract_authors(m);
|
||||
|
||||
let confidence = compute_confidence(&title, &query_lower);
|
||||
|
||||
// Use volumes if known, otherwise fall back to chapters count
|
||||
let (total_volumes, volume_source) = match volumes {
|
||||
Some(v) => (Some(v), "volumes"),
|
||||
None => match chapters {
|
||||
Some(c) => (Some(c), "chapters"),
|
||||
None => (None, "unknown"),
|
||||
},
|
||||
};
|
||||
|
||||
Some(SeriesCandidate {
|
||||
external_id: id.to_string(),
|
||||
title,
|
||||
authors,
|
||||
description,
|
||||
publishers: vec![],
|
||||
start_year,
|
||||
total_volumes,
|
||||
cover_url,
|
||||
external_url: site_url,
|
||||
confidence,
|
||||
metadata_json: serde_json::json!({
|
||||
"status": status,
|
||||
"chapters": chapters,
|
||||
"volumes": volumes,
|
||||
"volume_source": volume_source,
|
||||
}),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
candidates.sort_by(|a, b| b.confidence.partial_cmp(&a.confidence).unwrap_or(std::cmp::Ordering::Equal));
|
||||
candidates.truncate(10);
|
||||
Ok(candidates)
|
||||
}
|
||||
|
||||
async fn get_series_books_impl(
|
||||
external_id: &str,
|
||||
_config: &ProviderConfig,
|
||||
) -> Result<Vec<BookCandidate>, String> {
|
||||
let id: i64 = external_id
|
||||
.parse()
|
||||
.map_err(|_| "invalid AniList ID".to_string())?;
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))?;
|
||||
|
||||
let data = graphql_request(
|
||||
&client,
|
||||
DETAIL_QUERY,
|
||||
serde_json::json!({ "id": id }),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let media = match data.get("data").and_then(|d| d.get("Media")) {
|
||||
Some(m) => m,
|
||||
None => return Ok(vec![]),
|
||||
};
|
||||
|
||||
let title_obj = media.get("title").cloned().unwrap_or(serde_json::json!({}));
|
||||
let title = title_obj
|
||||
.get("english")
|
||||
.and_then(|t| t.as_str())
|
||||
.or_else(|| title_obj.get("romaji").and_then(|t| t.as_str()))
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
let volumes = media
|
||||
.get("volumes")
|
||||
.and_then(|v| v.as_i64())
|
||||
.map(|v| v as i32);
|
||||
|
||||
let chapters = media
|
||||
.get("chapters")
|
||||
.and_then(|v| v.as_i64())
|
||||
.map(|v| v as i32);
|
||||
|
||||
// Use volumes if known, otherwise fall back to chapters count
|
||||
let total = volumes.or(chapters);
|
||||
|
||||
let cover_url = media
|
||||
.get("coverImage")
|
||||
.and_then(|ci| ci.get("large").or_else(|| ci.get("medium")))
|
||||
.and_then(|u| u.as_str())
|
||||
.map(String::from);
|
||||
|
||||
let description = media
|
||||
.get("description")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(|d| d.replace("\\n", "\n").trim().to_string());
|
||||
|
||||
let authors = extract_authors(media);
|
||||
|
||||
// AniList doesn't have per-volume data — generate entries from volumes count (or chapters as fallback)
|
||||
let mut books = Vec::new();
|
||||
if let Some(total) = total {
|
||||
for vol in 1..=total {
|
||||
books.push(BookCandidate {
|
||||
external_book_id: format!("{}-vol-{}", external_id, vol),
|
||||
title: format!("{} Vol. {}", title, vol),
|
||||
volume_number: Some(vol),
|
||||
authors: authors.clone(),
|
||||
isbn: None,
|
||||
summary: if vol == 1 { description.clone() } else { None },
|
||||
cover_url: if vol == 1 { cover_url.clone() } else { None },
|
||||
page_count: None,
|
||||
language: Some("ja".to_string()),
|
||||
publish_date: None,
|
||||
metadata_json: serde_json::json!({}),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(books)
|
||||
}
|
||||
|
||||
fn extract_authors(media: &serde_json::Value) -> Vec<String> {
|
||||
let mut authors = Vec::new();
|
||||
if let Some(edges) = media
|
||||
.get("staff")
|
||||
.and_then(|s| s.get("edges"))
|
||||
.and_then(|e| e.as_array())
|
||||
{
|
||||
for edge in edges {
|
||||
let role = edge
|
||||
.get("role")
|
||||
.and_then(|r| r.as_str())
|
||||
.unwrap_or("");
|
||||
let role_lower = role.to_lowercase();
|
||||
if role_lower.contains("story") || role_lower.contains("art") || role_lower.contains("original") {
|
||||
if let Some(name) = edge
|
||||
.get("node")
|
||||
.and_then(|n| n.get("name"))
|
||||
.and_then(|n| n.get("full"))
|
||||
.and_then(|f| f.as_str())
|
||||
{
|
||||
if !authors.contains(&name.to_string()) {
|
||||
authors.push(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
authors
|
||||
}
|
||||
|
||||
fn compute_confidence(title: &str, query: &str) -> f32 {
|
||||
let title_lower = title.to_lowercase();
|
||||
if title_lower == query {
|
||||
1.0
|
||||
} else if title_lower.starts_with(query) || query.starts_with(&title_lower) {
|
||||
0.8
|
||||
} else if title_lower.contains(query) || query.contains(&title_lower) {
|
||||
0.7
|
||||
} else {
|
||||
let common: usize = query.chars().filter(|c| title_lower.contains(*c)).count();
|
||||
let max_len = query.len().max(title_lower.len()).max(1);
|
||||
(common as f32 / max_len as f32).clamp(0.1, 0.6)
|
||||
}
|
||||
}
|
||||
671
apps/api/src/metadata_providers/bedetheque.rs
Normal file
671
apps/api/src/metadata_providers/bedetheque.rs
Normal file
@@ -0,0 +1,671 @@
|
||||
use scraper::{Html, Selector};
|
||||
|
||||
use super::{BookCandidate, MetadataProvider, ProviderConfig, SeriesCandidate};
|
||||
|
||||
pub struct BedethequeProvider;
|
||||
|
||||
impl MetadataProvider for BedethequeProvider {
|
||||
fn name(&self) -> &str {
|
||||
"bedetheque"
|
||||
}
|
||||
|
||||
fn search_series(
|
||||
&self,
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<SeriesCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let query = query.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { search_series_impl(&query, &config).await })
|
||||
}
|
||||
|
||||
fn get_series_books(
|
||||
&self,
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<BookCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let external_id = external_id.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { get_series_books_impl(&external_id, &config).await })
|
||||
}
|
||||
}
|
||||
|
||||
fn build_client() -> Result<reqwest::Client, String> {
|
||||
reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(20))
|
||||
.user_agent("Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:108.0) Gecko/20100101 Firefox/108.0")
|
||||
.default_headers({
|
||||
let mut h = reqwest::header::HeaderMap::new();
|
||||
h.insert(
|
||||
reqwest::header::ACCEPT,
|
||||
"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
h.insert(
|
||||
reqwest::header::ACCEPT_LANGUAGE,
|
||||
"fr-FR,fr;q=0.9,en;q=0.5".parse().unwrap(),
|
||||
);
|
||||
h.insert(reqwest::header::REFERER, "https://www.bedetheque.com/".parse().unwrap());
|
||||
h
|
||||
})
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))
|
||||
}
|
||||
|
||||
/// Remove diacritics for URL construction (bedetheque uses ASCII slugs)
|
||||
fn normalize_for_url(s: &str) -> String {
|
||||
s.chars()
|
||||
.map(|c| match c {
|
||||
'é' | 'è' | 'ê' | 'ë' | 'É' | 'È' | 'Ê' | 'Ë' => 'e',
|
||||
'à' | 'â' | 'ä' | 'À' | 'Â' | 'Ä' => 'a',
|
||||
'ù' | 'û' | 'ü' | 'Ù' | 'Û' | 'Ü' => 'u',
|
||||
'ô' | 'ö' | 'Ô' | 'Ö' => 'o',
|
||||
'î' | 'ï' | 'Î' | 'Ï' => 'i',
|
||||
'ç' | 'Ç' => 'c',
|
||||
'ñ' | 'Ñ' => 'n',
|
||||
_ => c,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn urlencoded(s: &str) -> String {
|
||||
let mut result = String::new();
|
||||
for byte in s.bytes() {
|
||||
match byte {
|
||||
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
|
||||
result.push(byte as char);
|
||||
}
|
||||
b' ' => result.push('+'),
|
||||
_ => result.push_str(&format!("%{:02X}", byte)),
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Search
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async fn search_series_impl(
|
||||
query: &str,
|
||||
_config: &ProviderConfig,
|
||||
) -> Result<Vec<SeriesCandidate>, String> {
|
||||
let client = build_client()?;
|
||||
|
||||
// Use the full-text search page
|
||||
let url = format!(
|
||||
"https://www.bedetheque.com/search/tout?RechTexte={}&RechWhere=0",
|
||||
urlencoded(&normalize_for_url(query))
|
||||
);
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Bedetheque request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
return Err(format!("Bedetheque returned {status}"));
|
||||
}
|
||||
|
||||
let html = resp
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to read Bedetheque response: {e}"))?;
|
||||
|
||||
// Detect IP blacklist
|
||||
if html.contains("<title></title>") || html.contains("<title> </title>") {
|
||||
return Err("Bedetheque: IP may be rate-limited, please retry later".to_string());
|
||||
}
|
||||
|
||||
// Parse HTML in a block so the non-Send Html type is dropped before any .await
|
||||
let candidates = {
|
||||
let document = Html::parse_document(&html);
|
||||
let link_sel =
|
||||
Selector::parse("a[href*='/serie-']").map_err(|e| format!("selector error: {e}"))?;
|
||||
|
||||
let query_lower = query.to_lowercase();
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
let mut candidates = Vec::new();
|
||||
|
||||
for el in document.select(&link_sel) {
|
||||
let href = match el.value().attr("href") {
|
||||
Some(h) => h.to_string(),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let (series_id, _slug) = match parse_serie_href(&href) {
|
||||
Some(v) => v,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
if !seen.insert(series_id.clone()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let title = el.text().collect::<String>().trim().to_string();
|
||||
if title.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let confidence = compute_confidence(&title, &query_lower);
|
||||
let cover_url = format!(
|
||||
"https://www.bedetheque.com/cache/thb_series/PlancheS_{}.jpg",
|
||||
series_id
|
||||
);
|
||||
|
||||
candidates.push(SeriesCandidate {
|
||||
external_id: series_id.clone(),
|
||||
title: title.clone(),
|
||||
authors: vec![],
|
||||
description: None,
|
||||
publishers: vec![],
|
||||
start_year: None,
|
||||
total_volumes: None,
|
||||
cover_url: Some(cover_url),
|
||||
external_url: Some(href),
|
||||
confidence,
|
||||
metadata_json: serde_json::json!({}),
|
||||
});
|
||||
}
|
||||
|
||||
candidates.sort_by(|a, b| {
|
||||
b.confidence
|
||||
.partial_cmp(&a.confidence)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
candidates.truncate(10);
|
||||
candidates
|
||||
}; // document is dropped here — safe to .await below
|
||||
|
||||
// For the top candidates, fetch series details to enrich metadata
|
||||
// (limit to top 3 to avoid hammering the site)
|
||||
let mut enriched = Vec::new();
|
||||
for mut c in candidates {
|
||||
if enriched.len() < 3 {
|
||||
if let Ok(details) = fetch_series_details(&client, &c.external_id, c.external_url.as_deref()).await {
|
||||
if let Some(desc) = details.description {
|
||||
c.description = Some(desc);
|
||||
}
|
||||
if !details.authors.is_empty() {
|
||||
c.authors = details.authors;
|
||||
}
|
||||
if !details.publishers.is_empty() {
|
||||
c.publishers = details.publishers;
|
||||
}
|
||||
if let Some(year) = details.start_year {
|
||||
c.start_year = Some(year);
|
||||
}
|
||||
if let Some(count) = details.album_count {
|
||||
c.total_volumes = Some(count);
|
||||
}
|
||||
c.metadata_json = serde_json::json!({
|
||||
"description": c.description,
|
||||
"authors": c.authors,
|
||||
"publishers": c.publishers,
|
||||
"start_year": c.start_year,
|
||||
"genres": details.genres,
|
||||
"status": details.status,
|
||||
"origin": details.origin,
|
||||
"language": details.language,
|
||||
});
|
||||
}
|
||||
}
|
||||
enriched.push(c);
|
||||
}
|
||||
|
||||
Ok(enriched)
|
||||
}
|
||||
|
||||
/// Parse serie URL to extract (id, slug)
|
||||
fn parse_serie_href(href: &str) -> Option<(String, String)> {
|
||||
// Patterns:
|
||||
// https://www.bedetheque.com/serie-3-BD-Blacksad.html
|
||||
// /serie-3-BD-Blacksad.html
|
||||
let re = regex::Regex::new(r"/serie-(\d+)-[A-Za-z]+-(.+?)(?:__\d+)?\.html").ok()?;
|
||||
let caps = re.captures(href)?;
|
||||
Some((caps[1].to_string(), caps[2].to_string()))
|
||||
}
|
||||
|
||||
struct SeriesDetails {
|
||||
description: Option<String>,
|
||||
authors: Vec<String>,
|
||||
publishers: Vec<String>,
|
||||
start_year: Option<i32>,
|
||||
album_count: Option<i32>,
|
||||
genres: Vec<String>,
|
||||
status: Option<String>,
|
||||
origin: Option<String>,
|
||||
language: Option<String>,
|
||||
}
|
||||
|
||||
async fn fetch_series_details(
|
||||
client: &reqwest::Client,
|
||||
series_id: &str,
|
||||
series_url: Option<&str>,
|
||||
) -> Result<SeriesDetails, String> {
|
||||
// Build URL — append __10000 to get all albums on one page
|
||||
let url = match series_url {
|
||||
Some(u) => {
|
||||
// Replace .html with __10000.html
|
||||
u.replace(".html", "__10000.html")
|
||||
}
|
||||
None => format!(
|
||||
"https://www.bedetheque.com/serie-{}-BD-Serie__10000.html",
|
||||
series_id
|
||||
),
|
||||
};
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to fetch series page: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Series page returned {}", resp.status()));
|
||||
}
|
||||
|
||||
let html = resp
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to read series page: {e}"))?;
|
||||
|
||||
let doc = Html::parse_document(&html);
|
||||
let mut details = SeriesDetails {
|
||||
description: None,
|
||||
authors: vec![],
|
||||
publishers: vec![],
|
||||
start_year: None,
|
||||
album_count: None,
|
||||
genres: vec![],
|
||||
status: None,
|
||||
origin: None,
|
||||
language: None,
|
||||
};
|
||||
|
||||
// Description from <meta name="description"> — format: "Tout sur la série {name} : {description}"
|
||||
if let Ok(sel) = Selector::parse(r#"meta[name="description"]"#) {
|
||||
if let Some(el) = doc.select(&sel).next() {
|
||||
if let Some(content) = el.value().attr("content") {
|
||||
let desc = content.trim().to_string();
|
||||
// Strip the "Tout sur la série ... : " prefix
|
||||
let cleaned = if let Some(pos) = desc.find(" : ") {
|
||||
desc[pos + 3..].trim().to_string()
|
||||
} else {
|
||||
desc
|
||||
};
|
||||
if !cleaned.is_empty() {
|
||||
details.description = Some(cleaned);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract authors from itemprop="author" and itemprop="illustrator" (deduplicated)
|
||||
{
|
||||
let mut authors_set = std::collections::HashSet::new();
|
||||
for attr in ["author", "illustrator"] {
|
||||
if let Ok(sel) = Selector::parse(&format!(r#"[itemprop="{attr}"]"#)) {
|
||||
for el in doc.select(&sel) {
|
||||
let name = el.text().collect::<String>().trim().to_string();
|
||||
// Names are "Last, First" — normalize to "First Last"
|
||||
let normalized = if let Some((last, first)) = name.split_once(',') {
|
||||
format!("{} {}", first.trim(), last.trim())
|
||||
} else {
|
||||
name
|
||||
};
|
||||
if !normalized.is_empty() && is_real_author(&normalized) {
|
||||
authors_set.insert(normalized);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
details.authors = authors_set.into_iter().collect();
|
||||
details.authors.sort();
|
||||
}
|
||||
|
||||
// Extract publishers from itemprop="publisher" (deduplicated)
|
||||
{
|
||||
let mut publishers_set = std::collections::HashSet::new();
|
||||
if let Ok(sel) = Selector::parse(r#"[itemprop="publisher"]"#) {
|
||||
for el in doc.select(&sel) {
|
||||
let name = el.text().collect::<String>().trim().to_string();
|
||||
if !name.is_empty() {
|
||||
publishers_set.insert(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
details.publishers = publishers_set.into_iter().collect();
|
||||
details.publishers.sort();
|
||||
}
|
||||
|
||||
// Extract series-level info from <li><label>X :</label>value</li> blocks
|
||||
// Genre: <li><label>Genre :</label><span class="style-serie">Animalier, Aventure, Humour</span></li>
|
||||
if let Ok(sel) = Selector::parse("span.style-serie") {
|
||||
if let Some(el) = doc.select(&sel).next() {
|
||||
let text = el.text().collect::<String>();
|
||||
details.genres = text
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
}
|
||||
}
|
||||
|
||||
// Parution: <li><label>Parution :</label><span class="parution-serie">Série finie</span></li>
|
||||
if let Ok(sel) = Selector::parse("span.parution-serie") {
|
||||
if let Some(el) = doc.select(&sel).next() {
|
||||
let text = el.text().collect::<String>().trim().to_string();
|
||||
if !text.is_empty() {
|
||||
details.status = Some(text);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Origine and Langue from page text (no dedicated CSS class)
|
||||
let page_text = doc.root_element().text().collect::<String>();
|
||||
|
||||
if let Some(val) = extract_info_value(&page_text, "Origine") {
|
||||
let val = val.lines().next().unwrap_or(val).trim();
|
||||
if !val.is_empty() {
|
||||
details.origin = Some(val.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(val) = extract_info_value(&page_text, "Langue") {
|
||||
let val = val.lines().next().unwrap_or(val).trim();
|
||||
if !val.is_empty() {
|
||||
details.language = Some(val.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Album count from serie-info text (e.g. "Tomes : 8")
|
||||
if let Ok(re) = regex::Regex::new(r"Tomes?\s*:\s*(\d+)") {
|
||||
if let Some(caps) = re.captures(&page_text) {
|
||||
if let Ok(n) = caps[1].parse::<i32>() {
|
||||
details.album_count = Some(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start year from first <meta itemprop="datePublished" content="YYYY-MM-DD">
|
||||
if let Ok(sel) = Selector::parse(r#"[itemprop="datePublished"]"#) {
|
||||
if let Some(el) = doc.select(&sel).next() {
|
||||
if let Some(content) = el.value().attr("content") {
|
||||
// content is "YYYY-MM-DD"
|
||||
if let Some(year_str) = content.split('-').next() {
|
||||
if let Ok(year) = year_str.parse::<i32>() {
|
||||
details.start_year = Some(year);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(details)
|
||||
}
|
||||
|
||||
/// Extract value after a label like "Scénario : Jean-Claude" → "Jean-Claude"
|
||||
fn extract_info_value<'a>(text: &'a str, label: &str) -> Option<&'a str> {
|
||||
// Handle both "Label :" and "Label:"
|
||||
let patterns = [
|
||||
format!("{} :", label),
|
||||
format!("{}:", label),
|
||||
format!("{} :", &label.to_lowercase()),
|
||||
];
|
||||
for pat in &patterns {
|
||||
if let Some(pos) = text.find(pat.as_str()) {
|
||||
let val = text[pos + pat.len()..].trim();
|
||||
if !val.is_empty() {
|
||||
return Some(val);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Get series books
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async fn get_series_books_impl(
|
||||
external_id: &str,
|
||||
_config: &ProviderConfig,
|
||||
) -> Result<Vec<BookCandidate>, String> {
|
||||
let client = build_client()?;
|
||||
|
||||
// We need to find the series URL — try a direct fetch
|
||||
// external_id is the numeric series ID
|
||||
// We try to fetch the series page to get the album list
|
||||
let url = format!(
|
||||
"https://www.bedetheque.com/serie-{}-BD-Serie__10000.html",
|
||||
external_id
|
||||
);
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to fetch series: {e}"))?;
|
||||
|
||||
// If the generic slug fails, try without the slug part (bedetheque redirects)
|
||||
let html = if resp.status().is_success() {
|
||||
resp.text().await.map_err(|e| format!("Failed to read: {e}"))?
|
||||
} else {
|
||||
// Try alternative URL pattern
|
||||
let alt_url = format!(
|
||||
"https://www.bedetheque.com/serie-{}__10000.html",
|
||||
external_id
|
||||
);
|
||||
let resp2 = client
|
||||
.get(&alt_url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to fetch series (alt): {e}"))?;
|
||||
if !resp2.status().is_success() {
|
||||
return Err(format!("Series page not found for id {external_id}"));
|
||||
}
|
||||
resp2.text().await.map_err(|e| format!("Failed to read: {e}"))?
|
||||
};
|
||||
|
||||
if html.contains("<title></title>") {
|
||||
return Err("Bedetheque: IP may be rate-limited".to_string());
|
||||
}
|
||||
|
||||
let doc = Html::parse_document(&html);
|
||||
let mut books = Vec::new();
|
||||
|
||||
// Each album block starts before a .album-main div.
|
||||
// The cover image (<img itemprop="image">) is OUTSIDE .album-main (sibling),
|
||||
// so we iterate over a broader parent. But the simplest approach: parse all
|
||||
// itemprop elements relative to each .album-main, plus pick covers separately.
|
||||
let album_sel = Selector::parse(".album-main").map_err(|e| format!("selector: {e}"))?;
|
||||
|
||||
// Pre-collect cover images — they appear in <img itemprop="image"> before each .album-main
|
||||
// and link to an album URL containing the book ID
|
||||
let cover_sel = Selector::parse(r#"img[itemprop="image"]"#).map_err(|e| format!("selector: {e}"))?;
|
||||
let covers: Vec<String> = doc.select(&cover_sel)
|
||||
.filter_map(|el| el.value().attr("src").map(|s| {
|
||||
if s.starts_with("http") { s.to_string() } else { format!("https://www.bedetheque.com{}", s) }
|
||||
}))
|
||||
.collect();
|
||||
|
||||
static RE_TOME: std::sync::LazyLock<regex::Regex> =
|
||||
std::sync::LazyLock::new(|| regex::Regex::new(r"(?i)-Tome-\d+-").unwrap());
|
||||
static RE_BOOK_ID: std::sync::LazyLock<regex::Regex> =
|
||||
std::sync::LazyLock::new(|| regex::Regex::new(r"-(\d+)\.html").unwrap());
|
||||
static RE_VOLUME: std::sync::LazyLock<regex::Regex> =
|
||||
std::sync::LazyLock::new(|| regex::Regex::new(r"(?i)Tome-(\d+)-").unwrap());
|
||||
|
||||
for (idx, album_el) in doc.select(&album_sel).enumerate() {
|
||||
// Title from <a class="titre" title="..."> — the title attribute is clean
|
||||
let title_sel = Selector::parse("a.titre").ok();
|
||||
let title_el = title_sel.as_ref().and_then(|s| album_el.select(s).next());
|
||||
let title = title_el
|
||||
.and_then(|el| el.value().attr("title"))
|
||||
.unwrap_or("")
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
if title.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// External book ID from album URL (e.g. "...-1063.html")
|
||||
let album_url = title_el.and_then(|el| el.value().attr("href")).unwrap_or("");
|
||||
|
||||
// Only keep main tomes — their URLs contain "Tome-{N}-"
|
||||
// Skip hors-série (HS), intégrales (INT/INTFL), romans, coffrets, etc.
|
||||
if !RE_TOME.is_match(album_url) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let external_book_id = RE_BOOK_ID
|
||||
.captures(album_url)
|
||||
.map(|c| c[1].to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
// Volume number from URL pattern "Tome-{N}-" or from itemprop name
|
||||
let volume_number = RE_VOLUME
|
||||
.captures(album_url)
|
||||
.and_then(|c| c[1].parse::<i32>().ok())
|
||||
.or_else(|| extract_volume_from_title(&title));
|
||||
|
||||
// Authors from itemprop="author" and itemprop="illustrator"
|
||||
let mut authors = Vec::new();
|
||||
let author_sel = Selector::parse(r#"[itemprop="author"]"#).ok();
|
||||
let illustrator_sel = Selector::parse(r#"[itemprop="illustrator"]"#).ok();
|
||||
for sel in [&author_sel, &illustrator_sel].into_iter().flatten() {
|
||||
for el in album_el.select(sel) {
|
||||
let name = el.text().collect::<String>().trim().to_string();
|
||||
// Names are "Last, First" format — normalize to "First Last"
|
||||
let normalized = if let Some((last, first)) = name.split_once(',') {
|
||||
format!("{} {}", first.trim(), last.trim())
|
||||
} else {
|
||||
name
|
||||
};
|
||||
if !normalized.is_empty() && is_real_author(&normalized) && !authors.contains(&normalized) {
|
||||
authors.push(normalized);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ISBN from <span itemprop="isbn">
|
||||
let isbn = Selector::parse(r#"[itemprop="isbn"]"#)
|
||||
.ok()
|
||||
.and_then(|s| album_el.select(&s).next())
|
||||
.map(|el| el.text().collect::<String>().trim().to_string())
|
||||
.filter(|s| !s.is_empty());
|
||||
|
||||
// Page count from <span itemprop="numberOfPages">
|
||||
let page_count = Selector::parse(r#"[itemprop="numberOfPages"]"#)
|
||||
.ok()
|
||||
.and_then(|s| album_el.select(&s).next())
|
||||
.and_then(|el| el.text().collect::<String>().trim().parse::<i32>().ok());
|
||||
|
||||
// Publish date from <meta itemprop="datePublished" content="YYYY-MM-DD">
|
||||
let publish_date = Selector::parse(r#"[itemprop="datePublished"]"#)
|
||||
.ok()
|
||||
.and_then(|s| album_el.select(&s).next())
|
||||
.and_then(|el| el.value().attr("content").map(|c| c.trim().to_string()))
|
||||
.filter(|s| !s.is_empty());
|
||||
|
||||
// Cover from pre-collected covers (same index)
|
||||
let cover_url = covers.get(idx).cloned();
|
||||
|
||||
books.push(BookCandidate {
|
||||
external_book_id,
|
||||
title,
|
||||
volume_number,
|
||||
authors,
|
||||
isbn,
|
||||
summary: None,
|
||||
cover_url,
|
||||
page_count,
|
||||
language: Some("fr".to_string()),
|
||||
publish_date,
|
||||
metadata_json: serde_json::json!({}),
|
||||
});
|
||||
}
|
||||
|
||||
books.sort_by_key(|b| b.volume_number.unwrap_or(999));
|
||||
Ok(books)
|
||||
}
|
||||
|
||||
/// Filter out placeholder author names from Bédéthèque
|
||||
fn is_real_author(name: &str) -> bool {
|
||||
!name.starts_with('<') && !name.ends_with('>') && name != "Collectif"
|
||||
}
|
||||
|
||||
fn extract_volume_from_title(title: &str) -> Option<i32> {
|
||||
let patterns = [
|
||||
r"(?i)(?:tome|t\.)\s*(\d+)",
|
||||
r"(?i)(?:vol(?:ume)?\.?)\s*(\d+)",
|
||||
r"#\s*(\d+)",
|
||||
];
|
||||
for pattern in &patterns {
|
||||
if let Ok(re) = regex::Regex::new(pattern) {
|
||||
if let Some(caps) = re.captures(title) {
|
||||
if let Ok(n) = caps[1].parse::<i32>() {
|
||||
return Some(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Normalize a title by removing French articles (leading or in parentheses)
|
||||
/// and extra whitespace/punctuation, so that "Les Légendaires - Résistance"
|
||||
/// and "Légendaires (Les) - Résistance" produce the same canonical form.
|
||||
fn normalize_title(s: &str) -> String {
|
||||
let lower = s.to_lowercase();
|
||||
// Remove articles in parentheses: "(les)", "(la)", "(le)", "(l')", "(un)", "(une)", "(des)"
|
||||
let re_parens = regex::Regex::new(r"\s*\((?:les?|la|l'|une?|des|du|d')\)").unwrap();
|
||||
let cleaned = re_parens.replace_all(&lower, "");
|
||||
// Remove leading articles: "les ", "la ", "le ", "l'", "un ", "une ", "des ", "du ", "d'"
|
||||
let re_leading = regex::Regex::new(r"^(?:les?|la|l'|une?|des|du|d')\s+").unwrap();
|
||||
let cleaned = re_leading.replace(&cleaned, "");
|
||||
// Collapse multiple spaces/dashes into single
|
||||
let re_spaces = regex::Regex::new(r"\s+").unwrap();
|
||||
re_spaces.replace_all(cleaned.trim(), " ").to_string()
|
||||
}
|
||||
|
||||
fn compute_confidence(title: &str, query: &str) -> f32 {
|
||||
let title_lower = title.to_lowercase();
|
||||
let query_lower = query.to_lowercase();
|
||||
if title_lower == query_lower {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
// Try with normalized forms (handles Bedetheque's "Name (Article)" convention)
|
||||
let title_norm = normalize_title(title);
|
||||
let query_norm = normalize_title(query);
|
||||
if title_norm == query_norm {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
if title_lower.starts_with(&query_lower) || query_lower.starts_with(&title_lower)
|
||||
|| title_norm.starts_with(&query_norm) || query_norm.starts_with(&title_norm)
|
||||
{
|
||||
0.85
|
||||
} else if title_lower.contains(&query_lower) || query_lower.contains(&title_lower)
|
||||
|| title_norm.contains(&query_norm) || query_norm.contains(&title_norm)
|
||||
{
|
||||
0.7
|
||||
} else {
|
||||
let common: usize = query_lower
|
||||
.chars()
|
||||
.filter(|c| title_lower.contains(*c))
|
||||
.count();
|
||||
let max_len = query_lower.len().max(title_lower.len()).max(1);
|
||||
(common as f32 / max_len as f32).clamp(0.1, 0.6)
|
||||
}
|
||||
}
|
||||
267
apps/api/src/metadata_providers/comicvine.rs
Normal file
267
apps/api/src/metadata_providers/comicvine.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
use super::{BookCandidate, MetadataProvider, ProviderConfig, SeriesCandidate};
|
||||
|
||||
pub struct ComicVineProvider;
|
||||
|
||||
impl MetadataProvider for ComicVineProvider {
|
||||
fn name(&self) -> &str {
|
||||
"comicvine"
|
||||
}
|
||||
|
||||
fn search_series(
|
||||
&self,
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<SeriesCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let query = query.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { search_series_impl(&query, &config).await })
|
||||
}
|
||||
|
||||
fn get_series_books(
|
||||
&self,
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<BookCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let external_id = external_id.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { get_series_books_impl(&external_id, &config).await })
|
||||
}
|
||||
}
|
||||
|
||||
fn build_client() -> Result<reqwest::Client, String> {
|
||||
reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.user_agent("StripstreamLibrarian/1.0")
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))
|
||||
}
|
||||
|
||||
async fn search_series_impl(
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> Result<Vec<SeriesCandidate>, String> {
|
||||
let api_key = config
|
||||
.api_key
|
||||
.as_deref()
|
||||
.filter(|k| !k.is_empty())
|
||||
.ok_or_else(|| "ComicVine requires an API key. Configure it in Settings > Integrations.".to_string())?;
|
||||
|
||||
let client = build_client()?;
|
||||
|
||||
let url = format!(
|
||||
"https://comicvine.gamespot.com/api/search/?api_key={}&format=json&resources=volume&query={}&limit=20",
|
||||
api_key,
|
||||
urlencoded(query)
|
||||
);
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("ComicVine request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(format!("ComicVine returned {status}: {text}"));
|
||||
}
|
||||
|
||||
let data: serde_json::Value = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse ComicVine response: {e}"))?;
|
||||
|
||||
let results = match data.get("results").and_then(|r| r.as_array()) {
|
||||
Some(results) => results,
|
||||
None => return Ok(vec![]),
|
||||
};
|
||||
|
||||
let query_lower = query.to_lowercase();
|
||||
|
||||
let mut candidates: Vec<SeriesCandidate> = results
|
||||
.iter()
|
||||
.filter_map(|vol| {
|
||||
let name = vol.get("name").and_then(|n| n.as_str())?.to_string();
|
||||
let id = vol.get("id").and_then(|id| id.as_i64())?;
|
||||
let description = vol
|
||||
.get("description")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(strip_html);
|
||||
let publisher = vol
|
||||
.get("publisher")
|
||||
.and_then(|p| p.get("name"))
|
||||
.and_then(|n| n.as_str())
|
||||
.map(String::from);
|
||||
let start_year = vol
|
||||
.get("start_year")
|
||||
.and_then(|y| y.as_str())
|
||||
.and_then(|y| y.parse::<i32>().ok());
|
||||
let count_of_issues = vol
|
||||
.get("count_of_issues")
|
||||
.and_then(|c| c.as_i64())
|
||||
.map(|c| c as i32);
|
||||
let cover_url = vol
|
||||
.get("image")
|
||||
.and_then(|img| img.get("medium_url").or_else(|| img.get("small_url")))
|
||||
.and_then(|u| u.as_str())
|
||||
.map(String::from);
|
||||
let site_url = vol
|
||||
.get("site_detail_url")
|
||||
.and_then(|u| u.as_str())
|
||||
.map(String::from);
|
||||
|
||||
let confidence = compute_confidence(&name, &query_lower);
|
||||
|
||||
Some(SeriesCandidate {
|
||||
external_id: id.to_string(),
|
||||
title: name,
|
||||
authors: vec![],
|
||||
description,
|
||||
publishers: publisher.into_iter().collect(),
|
||||
start_year,
|
||||
total_volumes: count_of_issues,
|
||||
cover_url,
|
||||
external_url: site_url,
|
||||
confidence,
|
||||
metadata_json: serde_json::json!({}),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
candidates.sort_by(|a, b| b.confidence.partial_cmp(&a.confidence).unwrap_or(std::cmp::Ordering::Equal));
|
||||
candidates.truncate(10);
|
||||
Ok(candidates)
|
||||
}
|
||||
|
||||
async fn get_series_books_impl(
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> Result<Vec<BookCandidate>, String> {
|
||||
let api_key = config
|
||||
.api_key
|
||||
.as_deref()
|
||||
.filter(|k| !k.is_empty())
|
||||
.ok_or_else(|| "ComicVine requires an API key".to_string())?;
|
||||
|
||||
let client = build_client()?;
|
||||
|
||||
let url = format!(
|
||||
"https://comicvine.gamespot.com/api/issues/?api_key={}&format=json&filter=volume:{}&sort=issue_number:asc&limit=100&field_list=id,name,issue_number,description,image,cover_date,site_detail_url",
|
||||
api_key,
|
||||
external_id
|
||||
);
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("ComicVine request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(format!("ComicVine returned {status}: {text}"));
|
||||
}
|
||||
|
||||
let data: serde_json::Value = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse ComicVine response: {e}"))?;
|
||||
|
||||
let results = match data.get("results").and_then(|r| r.as_array()) {
|
||||
Some(results) => results,
|
||||
None => return Ok(vec![]),
|
||||
};
|
||||
|
||||
let books: Vec<BookCandidate> = results
|
||||
.iter()
|
||||
.filter_map(|issue| {
|
||||
let id = issue.get("id").and_then(|id| id.as_i64())?;
|
||||
let name = issue
|
||||
.get("name")
|
||||
.and_then(|n| n.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let issue_number = issue
|
||||
.get("issue_number")
|
||||
.and_then(|n| n.as_str())
|
||||
.and_then(|n| n.parse::<f64>().ok())
|
||||
.map(|n| n as i32);
|
||||
let description = issue
|
||||
.get("description")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(strip_html);
|
||||
let cover_url = issue
|
||||
.get("image")
|
||||
.and_then(|img| img.get("medium_url").or_else(|| img.get("small_url")))
|
||||
.and_then(|u| u.as_str())
|
||||
.map(String::from);
|
||||
let cover_date = issue
|
||||
.get("cover_date")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(String::from);
|
||||
|
||||
Some(BookCandidate {
|
||||
external_book_id: id.to_string(),
|
||||
title: name,
|
||||
volume_number: issue_number,
|
||||
authors: vec![],
|
||||
isbn: None,
|
||||
summary: description,
|
||||
cover_url,
|
||||
page_count: None,
|
||||
language: None,
|
||||
publish_date: cover_date,
|
||||
metadata_json: serde_json::json!({}),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(books)
|
||||
}
|
||||
|
||||
fn strip_html(s: &str) -> String {
|
||||
let mut result = String::new();
|
||||
let mut in_tag = false;
|
||||
for ch in s.chars() {
|
||||
match ch {
|
||||
'<' => in_tag = true,
|
||||
'>' => in_tag = false,
|
||||
_ if !in_tag => result.push(ch),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
result.trim().to_string()
|
||||
}
|
||||
|
||||
fn compute_confidence(title: &str, query: &str) -> f32 {
|
||||
let title_lower = title.to_lowercase();
|
||||
if title_lower == query {
|
||||
1.0
|
||||
} else if title_lower.starts_with(query) || query.starts_with(&title_lower) {
|
||||
0.8
|
||||
} else if title_lower.contains(query) || query.contains(&title_lower) {
|
||||
0.7
|
||||
} else {
|
||||
let common: usize = query.chars().filter(|c| title_lower.contains(*c)).count();
|
||||
let max_len = query.len().max(title_lower.len()).max(1);
|
||||
(common as f32 / max_len as f32).clamp(0.1, 0.6)
|
||||
}
|
||||
}
|
||||
|
||||
fn urlencoded(s: &str) -> String {
|
||||
let mut result = String::new();
|
||||
for byte in s.bytes() {
|
||||
match byte {
|
||||
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
|
||||
result.push(byte as char);
|
||||
}
|
||||
_ => result.push_str(&format!("%{:02X}", byte)),
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
472
apps/api/src/metadata_providers/google_books.rs
Normal file
472
apps/api/src/metadata_providers/google_books.rs
Normal file
@@ -0,0 +1,472 @@
|
||||
use super::{BookCandidate, MetadataProvider, ProviderConfig, SeriesCandidate};
|
||||
|
||||
pub struct GoogleBooksProvider;
|
||||
|
||||
impl MetadataProvider for GoogleBooksProvider {
|
||||
fn name(&self) -> &str {
|
||||
"google_books"
|
||||
}
|
||||
|
||||
fn search_series(
|
||||
&self,
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<SeriesCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let query = query.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { search_series_impl(&query, &config).await })
|
||||
}
|
||||
|
||||
fn get_series_books(
|
||||
&self,
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<BookCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let external_id = external_id.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { get_series_books_impl(&external_id, &config).await })
|
||||
}
|
||||
}
|
||||
|
||||
async fn search_series_impl(
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> Result<Vec<SeriesCandidate>, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))?;
|
||||
|
||||
let search_query = format!("intitle:{}", query);
|
||||
let mut url = format!(
|
||||
"https://www.googleapis.com/books/v1/volumes?q={}&maxResults=20&printType=books&langRestrict={}",
|
||||
urlencoded(&search_query),
|
||||
urlencoded(&config.language),
|
||||
);
|
||||
if let Some(ref key) = config.api_key {
|
||||
url.push_str(&format!("&key={}", key));
|
||||
}
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Google Books request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(format!("Google Books returned {status}: {text}"));
|
||||
}
|
||||
|
||||
let data: serde_json::Value = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse Google Books response: {e}"))?;
|
||||
|
||||
let items = match data.get("items").and_then(|i| i.as_array()) {
|
||||
Some(items) => items,
|
||||
None => return Ok(vec![]),
|
||||
};
|
||||
|
||||
// Group volumes by series name to produce series candidates
|
||||
let query_lower = query.to_lowercase();
|
||||
let mut series_map: std::collections::HashMap<String, SeriesCandidateBuilder> =
|
||||
std::collections::HashMap::new();
|
||||
|
||||
for item in items {
|
||||
let volume_info = match item.get("volumeInfo") {
|
||||
Some(vi) => vi,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let title = volume_info
|
||||
.get("title")
|
||||
.and_then(|t| t.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let authors: Vec<String> = volume_info
|
||||
.get("authors")
|
||||
.and_then(|a| a.as_array())
|
||||
.map(|arr| {
|
||||
arr.iter()
|
||||
.filter_map(|v| v.as_str().map(String::from))
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let publisher = volume_info
|
||||
.get("publisher")
|
||||
.and_then(|p| p.as_str())
|
||||
.map(String::from);
|
||||
let published_date = volume_info
|
||||
.get("publishedDate")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(String::from);
|
||||
let description = volume_info
|
||||
.get("description")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(String::from);
|
||||
|
||||
// Extract series info from title or seriesInfo
|
||||
let series_name = volume_info
|
||||
.get("seriesInfo")
|
||||
.and_then(|si| si.get("title"))
|
||||
.and_then(|t| t.as_str())
|
||||
.map(String::from)
|
||||
.unwrap_or_else(|| extract_series_name(&title));
|
||||
|
||||
let cover_url = volume_info
|
||||
.get("imageLinks")
|
||||
.and_then(|il| {
|
||||
il.get("thumbnail")
|
||||
.or_else(|| il.get("smallThumbnail"))
|
||||
})
|
||||
.and_then(|u| u.as_str())
|
||||
.map(|s| s.replace("http://", "https://"));
|
||||
|
||||
let google_id = item
|
||||
.get("id")
|
||||
.and_then(|id| id.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
let entry = series_map
|
||||
.entry(series_name.clone())
|
||||
.or_insert_with(|| SeriesCandidateBuilder {
|
||||
title: series_name.clone(),
|
||||
authors: vec![],
|
||||
description: None,
|
||||
publishers: vec![],
|
||||
start_year: None,
|
||||
volume_count: 0,
|
||||
cover_url: None,
|
||||
external_id: google_id.clone(),
|
||||
external_url: None,
|
||||
metadata_json: serde_json::json!({}),
|
||||
});
|
||||
|
||||
entry.volume_count += 1;
|
||||
|
||||
// Merge authors
|
||||
for a in &authors {
|
||||
if !entry.authors.contains(a) {
|
||||
entry.authors.push(a.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Set description if not yet set
|
||||
if entry.description.is_none() {
|
||||
entry.description = description;
|
||||
}
|
||||
|
||||
// Merge publisher
|
||||
if let Some(ref pub_name) = publisher {
|
||||
if !entry.publishers.contains(pub_name) {
|
||||
entry.publishers.push(pub_name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Extract year
|
||||
if let Some(ref date) = published_date {
|
||||
if let Some(year) = extract_year(date) {
|
||||
if entry.start_year.is_none() || entry.start_year.unwrap() > year {
|
||||
entry.start_year = Some(year);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if entry.cover_url.is_none() {
|
||||
entry.cover_url = cover_url;
|
||||
}
|
||||
|
||||
entry.external_url = Some(format!(
|
||||
"https://books.google.com/books?id={}",
|
||||
google_id
|
||||
));
|
||||
}
|
||||
|
||||
let mut candidates: Vec<SeriesCandidate> = series_map
|
||||
.into_values()
|
||||
.map(|b| {
|
||||
let confidence = compute_confidence(&b.title, &query_lower);
|
||||
SeriesCandidate {
|
||||
external_id: b.external_id,
|
||||
title: b.title,
|
||||
authors: b.authors,
|
||||
description: b.description,
|
||||
publishers: b.publishers,
|
||||
start_year: b.start_year,
|
||||
total_volumes: if b.volume_count > 1 {
|
||||
Some(b.volume_count)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
cover_url: b.cover_url,
|
||||
external_url: b.external_url,
|
||||
confidence,
|
||||
metadata_json: b.metadata_json,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
candidates.sort_by(|a, b| b.confidence.partial_cmp(&a.confidence).unwrap_or(std::cmp::Ordering::Equal));
|
||||
candidates.truncate(10);
|
||||
|
||||
Ok(candidates)
|
||||
}
|
||||
|
||||
async fn get_series_books_impl(
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> Result<Vec<BookCandidate>, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))?;
|
||||
|
||||
// First fetch the volume to get its series info
|
||||
let mut url = format!(
|
||||
"https://www.googleapis.com/books/v1/volumes/{}",
|
||||
external_id
|
||||
);
|
||||
if let Some(ref key) = config.api_key {
|
||||
url.push_str(&format!("?key={}", key));
|
||||
}
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Google Books request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(format!("Google Books returned {status}: {text}"));
|
||||
}
|
||||
|
||||
let volume: serde_json::Value = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse Google Books response: {e}"))?;
|
||||
|
||||
let volume_info = volume.get("volumeInfo").cloned().unwrap_or(serde_json::json!({}));
|
||||
let title = volume_info
|
||||
.get("title")
|
||||
.and_then(|t| t.as_str())
|
||||
.unwrap_or("");
|
||||
|
||||
// Search for more volumes in this series
|
||||
let series_name = extract_series_name(title);
|
||||
let search_query = format!("intitle:{}", series_name);
|
||||
let mut search_url = format!(
|
||||
"https://www.googleapis.com/books/v1/volumes?q={}&maxResults=40&printType=books&langRestrict={}",
|
||||
urlencoded(&search_query),
|
||||
urlencoded(&config.language),
|
||||
);
|
||||
if let Some(ref key) = config.api_key {
|
||||
search_url.push_str(&format!("&key={}", key));
|
||||
}
|
||||
|
||||
let resp = client
|
||||
.get(&search_url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Google Books search failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
// Return just the single volume as a book
|
||||
return Ok(vec![volume_to_book_candidate(&volume)]);
|
||||
}
|
||||
|
||||
let data: serde_json::Value = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse search response: {e}"))?;
|
||||
|
||||
let items = match data.get("items").and_then(|i| i.as_array()) {
|
||||
Some(items) => items,
|
||||
None => return Ok(vec![volume_to_book_candidate(&volume)]),
|
||||
};
|
||||
|
||||
let mut books: Vec<BookCandidate> = items
|
||||
.iter()
|
||||
.map(volume_to_book_candidate)
|
||||
.collect();
|
||||
|
||||
// Sort by volume number
|
||||
books.sort_by_key(|b| b.volume_number.unwrap_or(999));
|
||||
|
||||
Ok(books)
|
||||
}
|
||||
|
||||
fn volume_to_book_candidate(item: &serde_json::Value) -> BookCandidate {
|
||||
let volume_info = item.get("volumeInfo").cloned().unwrap_or(serde_json::json!({}));
|
||||
let title = volume_info
|
||||
.get("title")
|
||||
.and_then(|t| t.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let authors: Vec<String> = volume_info
|
||||
.get("authors")
|
||||
.and_then(|a| a.as_array())
|
||||
.map(|arr| {
|
||||
arr.iter()
|
||||
.filter_map(|v| v.as_str().map(String::from))
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let isbn = volume_info
|
||||
.get("industryIdentifiers")
|
||||
.and_then(|ids| ids.as_array())
|
||||
.and_then(|arr| {
|
||||
arr.iter()
|
||||
.find(|id| {
|
||||
id.get("type")
|
||||
.and_then(|t| t.as_str())
|
||||
.map(|t| t == "ISBN_13" || t == "ISBN_10")
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.and_then(|id| id.get("identifier").and_then(|i| i.as_str()))
|
||||
})
|
||||
.map(String::from);
|
||||
let summary = volume_info
|
||||
.get("description")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(String::from);
|
||||
let cover_url = volume_info
|
||||
.get("imageLinks")
|
||||
.and_then(|il| il.get("thumbnail").or_else(|| il.get("smallThumbnail")))
|
||||
.and_then(|u| u.as_str())
|
||||
.map(|s| s.replace("http://", "https://"));
|
||||
let page_count = volume_info
|
||||
.get("pageCount")
|
||||
.and_then(|p| p.as_i64())
|
||||
.map(|p| p as i32);
|
||||
let language = volume_info
|
||||
.get("language")
|
||||
.and_then(|l| l.as_str())
|
||||
.map(String::from);
|
||||
let publish_date = volume_info
|
||||
.get("publishedDate")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(String::from);
|
||||
let google_id = item
|
||||
.get("id")
|
||||
.and_then(|id| id.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let volume_number = extract_volume_number(&title);
|
||||
|
||||
BookCandidate {
|
||||
external_book_id: google_id,
|
||||
title,
|
||||
volume_number,
|
||||
authors,
|
||||
isbn,
|
||||
summary,
|
||||
cover_url,
|
||||
page_count,
|
||||
language,
|
||||
publish_date,
|
||||
metadata_json: serde_json::json!({}),
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_series_name(title: &str) -> String {
|
||||
// Remove trailing volume indicators like "Vol. 1", "Tome 2", "#3", "- Volume 1"
|
||||
let re_patterns = [
|
||||
r"(?i)\s*[-–—]\s*(?:vol(?:ume)?\.?\s*|tome\s*|t\.\s*|#)\s*\d+.*$",
|
||||
r"(?i)\s*,?\s*(?:vol(?:ume)?\.?\s*|tome\s*|t\.\s*|#)\s*\d+.*$",
|
||||
r"\s*\(\d+\)\s*$",
|
||||
r"\s+\d+\s*$",
|
||||
];
|
||||
|
||||
let mut result = title.to_string();
|
||||
for pattern in &re_patterns {
|
||||
if let Ok(re) = regex::Regex::new(pattern) {
|
||||
let cleaned = re.replace(&result, "").to_string();
|
||||
if !cleaned.is_empty() {
|
||||
result = cleaned;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.trim().to_string()
|
||||
}
|
||||
|
||||
fn extract_volume_number(title: &str) -> Option<i32> {
|
||||
let patterns = [
|
||||
r"(?i)(?:vol(?:ume)?\.?\s*|tome\s*|t\.\s*|#)\s*(\d+)",
|
||||
r"\((\d+)\)\s*$",
|
||||
r"\b(\d+)\s*$",
|
||||
];
|
||||
|
||||
for pattern in &patterns {
|
||||
if let Ok(re) = regex::Regex::new(pattern) {
|
||||
if let Some(caps) = re.captures(title) {
|
||||
if let Some(num) = caps.get(1).and_then(|m| m.as_str().parse::<i32>().ok()) {
|
||||
return Some(num);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn extract_year(date: &str) -> Option<i32> {
|
||||
date.get(..4).and_then(|s| s.parse::<i32>().ok())
|
||||
}
|
||||
|
||||
fn compute_confidence(title: &str, query: &str) -> f32 {
|
||||
let title_lower = title.to_lowercase();
|
||||
if title_lower == query {
|
||||
1.0
|
||||
} else if title_lower.starts_with(query) || query.starts_with(&title_lower) {
|
||||
0.8
|
||||
} else if title_lower.contains(query) || query.contains(&title_lower) {
|
||||
0.7
|
||||
} else {
|
||||
// Simple character overlap ratio
|
||||
let common: usize = query
|
||||
.chars()
|
||||
.filter(|c| title_lower.contains(*c))
|
||||
.count();
|
||||
let max_len = query.len().max(title_lower.len()).max(1);
|
||||
(common as f32 / max_len as f32).clamp(0.1, 0.6)
|
||||
}
|
||||
}
|
||||
|
||||
fn urlencoded(s: &str) -> String {
|
||||
let mut result = String::new();
|
||||
for byte in s.bytes() {
|
||||
match byte {
|
||||
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
|
||||
result.push(byte as char);
|
||||
}
|
||||
_ => {
|
||||
result.push_str(&format!("%{:02X}", byte));
|
||||
}
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
struct SeriesCandidateBuilder {
|
||||
title: String,
|
||||
authors: Vec<String>,
|
||||
description: Option<String>,
|
||||
publishers: Vec<String>,
|
||||
start_year: Option<i32>,
|
||||
volume_count: i32,
|
||||
cover_url: Option<String>,
|
||||
external_id: String,
|
||||
external_url: Option<String>,
|
||||
metadata_json: serde_json::Value,
|
||||
}
|
||||
295
apps/api/src/metadata_providers/mod.rs
Normal file
295
apps/api/src/metadata_providers/mod.rs
Normal file
@@ -0,0 +1,295 @@
|
||||
pub mod anilist;
|
||||
pub mod bedetheque;
|
||||
pub mod comicvine;
|
||||
pub mod google_books;
|
||||
pub mod open_library;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Configuration passed to providers (API keys, etc.)
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ProviderConfig {
|
||||
pub api_key: Option<String>,
|
||||
/// Preferred language for metadata results (ISO 639-1: "en", "fr", "es"). Defaults to "en".
|
||||
pub language: String,
|
||||
}
|
||||
|
||||
/// A candidate series returned by a provider search
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SeriesCandidate {
|
||||
pub external_id: String,
|
||||
pub title: String,
|
||||
pub authors: Vec<String>,
|
||||
pub description: Option<String>,
|
||||
pub publishers: Vec<String>,
|
||||
pub start_year: Option<i32>,
|
||||
pub total_volumes: Option<i32>,
|
||||
pub cover_url: Option<String>,
|
||||
pub external_url: Option<String>,
|
||||
pub confidence: f32,
|
||||
pub metadata_json: serde_json::Value,
|
||||
}
|
||||
|
||||
/// A candidate book within a series
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BookCandidate {
|
||||
pub external_book_id: String,
|
||||
pub title: String,
|
||||
pub volume_number: Option<i32>,
|
||||
pub authors: Vec<String>,
|
||||
pub isbn: Option<String>,
|
||||
pub summary: Option<String>,
|
||||
pub cover_url: Option<String>,
|
||||
pub page_count: Option<i32>,
|
||||
pub language: Option<String>,
|
||||
pub publish_date: Option<String>,
|
||||
pub metadata_json: serde_json::Value,
|
||||
}
|
||||
|
||||
/// Trait that all metadata providers must implement
|
||||
pub trait MetadataProvider: Send + Sync {
|
||||
#[allow(dead_code)]
|
||||
fn name(&self) -> &str;
|
||||
|
||||
fn search_series(
|
||||
&self,
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<SeriesCandidate>, String>> + Send + '_>,
|
||||
>;
|
||||
|
||||
fn get_series_books(
|
||||
&self,
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<BookCandidate>, String>> + Send + '_>,
|
||||
>;
|
||||
}
|
||||
|
||||
/// Factory function to get a provider by name
|
||||
pub fn get_provider(name: &str) -> Option<Box<dyn MetadataProvider>> {
|
||||
match name {
|
||||
"google_books" => Some(Box::new(google_books::GoogleBooksProvider)),
|
||||
"open_library" => Some(Box::new(open_library::OpenLibraryProvider)),
|
||||
"comicvine" => Some(Box::new(comicvine::ComicVineProvider)),
|
||||
"anilist" => Some(Box::new(anilist::AniListProvider)),
|
||||
"bedetheque" => Some(Box::new(bedetheque::BedethequeProvider)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// End-to-end provider tests
|
||||
//
|
||||
// These tests hit real external APIs — run them explicitly with:
|
||||
// cargo test -p api providers_e2e -- --ignored --nocapture
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod providers_e2e {
|
||||
use super::*;
|
||||
|
||||
fn config_fr() -> ProviderConfig {
|
||||
ProviderConfig { api_key: None, language: "fr".to_string() }
|
||||
}
|
||||
|
||||
fn config_en() -> ProviderConfig {
|
||||
ProviderConfig { api_key: None, language: "en".to_string() }
|
||||
}
|
||||
|
||||
fn print_candidate(name: &str, c: &SeriesCandidate) {
|
||||
println!("\n=== {name} — best candidate ===");
|
||||
println!(" title: {:?}", c.title);
|
||||
println!(" external_id: {:?}", c.external_id);
|
||||
println!(" authors: {:?}", c.authors);
|
||||
println!(" description: {:?}", c.description.as_deref().map(|d| &d[..d.len().min(120)]));
|
||||
println!(" publishers: {:?}", c.publishers);
|
||||
println!(" start_year: {:?}", c.start_year);
|
||||
println!(" total_volumes: {:?}", c.total_volumes);
|
||||
println!(" cover_url: {}", c.cover_url.is_some());
|
||||
println!(" external_url: {}", c.external_url.is_some());
|
||||
println!(" confidence: {:.2}", c.confidence);
|
||||
println!(" metadata_json: {}", serde_json::to_string_pretty(&c.metadata_json).unwrap_or_default());
|
||||
}
|
||||
|
||||
fn print_books(name: &str, books: &[BookCandidate]) {
|
||||
println!("\n=== {name} — {} books ===", books.len());
|
||||
for (i, b) in books.iter().take(5).enumerate() {
|
||||
println!(
|
||||
" [{}] vol={:?} title={:?} authors={} isbn={:?} pages={:?} lang={:?} date={:?} cover={}",
|
||||
i, b.volume_number, b.title, b.authors.len(), b.isbn, b.page_count, b.language, b.publish_date, b.cover_url.is_some()
|
||||
);
|
||||
}
|
||||
if books.len() > 5 { println!(" ... and {} more", books.len() - 5); }
|
||||
|
||||
let with_vol = books.iter().filter(|b| b.volume_number.is_some()).count();
|
||||
let with_isbn = books.iter().filter(|b| b.isbn.is_some()).count();
|
||||
let with_authors = books.iter().filter(|b| !b.authors.is_empty()).count();
|
||||
let with_date = books.iter().filter(|b| b.publish_date.is_some()).count();
|
||||
let with_cover = books.iter().filter(|b| b.cover_url.is_some()).count();
|
||||
let with_pages = books.iter().filter(|b| b.page_count.is_some()).count();
|
||||
println!(" --- field coverage ---");
|
||||
println!(" volume_number: {with_vol}/{}", books.len());
|
||||
println!(" isbn: {with_isbn}/{}", books.len());
|
||||
println!(" authors: {with_authors}/{}", books.len());
|
||||
println!(" publish_date: {with_date}/{}", books.len());
|
||||
println!(" cover_url: {with_cover}/{}", books.len());
|
||||
println!(" page_count: {with_pages}/{}", books.len());
|
||||
}
|
||||
|
||||
// --- Google Books ---
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn google_books_search_and_books() {
|
||||
let p = get_provider("google_books").unwrap();
|
||||
let cfg = config_en();
|
||||
|
||||
let candidates = p.search_series("Blacksad", &cfg).await.unwrap();
|
||||
assert!(!candidates.is_empty(), "google_books: no results for Blacksad");
|
||||
print_candidate("google_books", &candidates[0]);
|
||||
|
||||
let books = p.get_series_books(&candidates[0].external_id, &cfg).await.unwrap();
|
||||
print_books("google_books", &books);
|
||||
assert!(!books.is_empty(), "google_books: no books returned");
|
||||
}
|
||||
|
||||
// --- Open Library ---
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn open_library_search_and_books() {
|
||||
let p = get_provider("open_library").unwrap();
|
||||
let cfg = config_en();
|
||||
|
||||
let candidates = p.search_series("Sandman Neil Gaiman", &cfg).await.unwrap();
|
||||
assert!(!candidates.is_empty(), "open_library: no results for Sandman");
|
||||
print_candidate("open_library", &candidates[0]);
|
||||
|
||||
let books = p.get_series_books(&candidates[0].external_id, &cfg).await.unwrap();
|
||||
print_books("open_library", &books);
|
||||
assert!(!books.is_empty(), "open_library: no books returned");
|
||||
}
|
||||
|
||||
// --- AniList ---
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn anilist_search_finished() {
|
||||
let p = get_provider("anilist").unwrap();
|
||||
let cfg = config_fr();
|
||||
|
||||
let candidates = p.search_series("Death Note", &cfg).await.unwrap();
|
||||
assert!(!candidates.is_empty(), "anilist: no results for Death Note");
|
||||
print_candidate("anilist (finished)", &candidates[0]);
|
||||
|
||||
let best = &candidates[0];
|
||||
assert!(best.total_volumes.is_some(), "anilist: finished series should have total_volumes");
|
||||
assert!(best.description.is_some(), "anilist: should have description");
|
||||
assert!(!best.authors.is_empty(), "anilist: should have authors");
|
||||
|
||||
let status = best.metadata_json.get("status").and_then(|s| s.as_str());
|
||||
assert_eq!(status, Some("FINISHED"), "anilist: Death Note should be FINISHED");
|
||||
|
||||
let books = p.get_series_books(&best.external_id, &cfg).await.unwrap();
|
||||
print_books("anilist (Death Note)", &books);
|
||||
assert!(books.len() >= 12, "anilist: Death Note should have ≥12 volumes, got {}", books.len());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn anilist_search_ongoing() {
|
||||
let p = get_provider("anilist").unwrap();
|
||||
let cfg = config_fr();
|
||||
|
||||
let candidates = p.search_series("One Piece", &cfg).await.unwrap();
|
||||
assert!(!candidates.is_empty(), "anilist: no results for One Piece");
|
||||
print_candidate("anilist (ongoing)", &candidates[0]);
|
||||
|
||||
let best = &candidates[0];
|
||||
let status = best.metadata_json.get("status").and_then(|s| s.as_str());
|
||||
assert_eq!(status, Some("RELEASING"), "anilist: One Piece should be RELEASING");
|
||||
|
||||
let volume_source = best.metadata_json.get("volume_source").and_then(|s| s.as_str());
|
||||
println!(" volume_source: {:?}", volume_source);
|
||||
println!(" total_volumes: {:?}", best.total_volumes);
|
||||
}
|
||||
|
||||
// --- Bédéthèque ---
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn bedetheque_search_and_books() {
|
||||
let p = get_provider("bedetheque").unwrap();
|
||||
let cfg = config_fr();
|
||||
|
||||
let candidates = p.search_series("De Cape et de Crocs", &cfg).await.unwrap();
|
||||
assert!(!candidates.is_empty(), "bedetheque: no results");
|
||||
print_candidate("bedetheque", &candidates[0]);
|
||||
|
||||
let best = &candidates[0];
|
||||
assert!(best.description.is_some(), "bedetheque: should have description");
|
||||
assert!(!best.authors.is_empty(), "bedetheque: should have authors");
|
||||
assert!(!best.publishers.is_empty(), "bedetheque: should have publishers");
|
||||
assert!(best.start_year.is_some(), "bedetheque: should have start_year");
|
||||
assert!(best.total_volumes.is_some(), "bedetheque: should have total_volumes");
|
||||
|
||||
// Enriched metadata_json
|
||||
let mj = &best.metadata_json;
|
||||
assert!(mj.get("genres").and_then(|g| g.as_array()).map(|a| !a.is_empty()).unwrap_or(false), "bedetheque: should have genres");
|
||||
assert!(mj.get("status").and_then(|s| s.as_str()).is_some(), "bedetheque: should have status");
|
||||
|
||||
let books = p.get_series_books(&best.external_id, &cfg).await.unwrap();
|
||||
print_books("bedetheque", &books);
|
||||
assert!(books.len() >= 12, "bedetheque: De Cape et de Crocs should have ≥12 volumes, got {}", books.len());
|
||||
}
|
||||
|
||||
// --- ComicVine (needs API key) ---
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn comicvine_no_key() {
|
||||
let p = get_provider("comicvine").unwrap();
|
||||
let cfg = config_en();
|
||||
|
||||
let result = p.search_series("Batman", &cfg).await;
|
||||
println!("\n=== comicvine (no key) ===");
|
||||
match result {
|
||||
Ok(c) => println!(" returned {} candidates (unexpected without key)", c.len()),
|
||||
Err(e) => println!(" expected error: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
// --- Cross-provider comparison ---
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn cross_provider_blacksad() {
|
||||
println!("\n{}", "=".repeat(60));
|
||||
println!(" Cross-provider comparison: Blacksad");
|
||||
println!("{}\n", "=".repeat(60));
|
||||
|
||||
let providers: Vec<(&str, ProviderConfig)> = vec![
|
||||
("google_books", config_en()),
|
||||
("open_library", config_en()),
|
||||
("anilist", config_fr()),
|
||||
("bedetheque", config_fr()),
|
||||
];
|
||||
|
||||
for (name, cfg) in &providers {
|
||||
let p = get_provider(name).unwrap();
|
||||
match p.search_series("Blacksad", cfg).await {
|
||||
Ok(candidates) if !candidates.is_empty() => {
|
||||
let b = &candidates[0];
|
||||
println!("[{name}] title={:?} authors={} desc={} pubs={} year={:?} vols={:?} cover={} url={} conf={:.2}",
|
||||
b.title, b.authors.len(), b.description.is_some(), b.publishers.len(),
|
||||
b.start_year, b.total_volumes, b.cover_url.is_some(), b.external_url.is_some(), b.confidence);
|
||||
}
|
||||
Ok(_) => println!("[{name}] no results"),
|
||||
Err(e) => println!("[{name}] error: {e}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
351
apps/api/src/metadata_providers/open_library.rs
Normal file
351
apps/api/src/metadata_providers/open_library.rs
Normal file
@@ -0,0 +1,351 @@
|
||||
use super::{BookCandidate, MetadataProvider, ProviderConfig, SeriesCandidate};
|
||||
|
||||
pub struct OpenLibraryProvider;
|
||||
|
||||
impl MetadataProvider for OpenLibraryProvider {
|
||||
fn name(&self) -> &str {
|
||||
"open_library"
|
||||
}
|
||||
|
||||
fn search_series(
|
||||
&self,
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<SeriesCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let query = query.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { search_series_impl(&query, &config).await })
|
||||
}
|
||||
|
||||
fn get_series_books(
|
||||
&self,
|
||||
external_id: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<Vec<BookCandidate>, String>> + Send + '_>,
|
||||
> {
|
||||
let external_id = external_id.to_string();
|
||||
let config = config.clone();
|
||||
Box::pin(async move { get_series_books_impl(&external_id, &config).await })
|
||||
}
|
||||
}
|
||||
|
||||
async fn search_series_impl(
|
||||
query: &str,
|
||||
config: &ProviderConfig,
|
||||
) -> Result<Vec<SeriesCandidate>, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))?;
|
||||
|
||||
// Open Library uses 3-letter language codes
|
||||
let ol_lang = match config.language.as_str() {
|
||||
"fr" => "fre",
|
||||
"es" => "spa",
|
||||
_ => "eng",
|
||||
};
|
||||
|
||||
let url = format!(
|
||||
"https://openlibrary.org/search.json?title={}&limit=20&language={}",
|
||||
urlencoded(query),
|
||||
ol_lang,
|
||||
);
|
||||
|
||||
let resp = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Open Library request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
return Err(format!("Open Library returned {status}: {text}"));
|
||||
}
|
||||
|
||||
let data: serde_json::Value = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse Open Library response: {e}"))?;
|
||||
|
||||
let docs = match data.get("docs").and_then(|d| d.as_array()) {
|
||||
Some(docs) => docs,
|
||||
None => return Ok(vec![]),
|
||||
};
|
||||
|
||||
let query_lower = query.to_lowercase();
|
||||
let mut series_map: std::collections::HashMap<String, SeriesCandidateBuilder> =
|
||||
std::collections::HashMap::new();
|
||||
|
||||
for doc in docs {
|
||||
let title = doc
|
||||
.get("title")
|
||||
.and_then(|t| t.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let authors: Vec<String> = doc
|
||||
.get("author_name")
|
||||
.and_then(|a| a.as_array())
|
||||
.map(|arr| arr.iter().filter_map(|v| v.as_str().map(String::from)).collect())
|
||||
.unwrap_or_default();
|
||||
let publishers: Vec<String> = doc
|
||||
.get("publisher")
|
||||
.and_then(|a| a.as_array())
|
||||
.map(|arr| {
|
||||
let mut pubs: Vec<String> = arr.iter().filter_map(|v| v.as_str().map(String::from)).collect();
|
||||
pubs.truncate(3);
|
||||
pubs
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let first_publish_year = doc
|
||||
.get("first_publish_year")
|
||||
.and_then(|y| y.as_i64())
|
||||
.map(|y| y as i32);
|
||||
let cover_i = doc.get("cover_i").and_then(|c| c.as_i64());
|
||||
let cover_url = cover_i.map(|id| format!("https://covers.openlibrary.org/b/id/{}-M.jpg", id));
|
||||
let key = doc
|
||||
.get("key")
|
||||
.and_then(|k| k.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
let series_name = extract_series_name(&title);
|
||||
|
||||
let entry = series_map
|
||||
.entry(series_name.clone())
|
||||
.or_insert_with(|| SeriesCandidateBuilder {
|
||||
title: series_name.clone(),
|
||||
authors: vec![],
|
||||
description: None,
|
||||
publishers: vec![],
|
||||
start_year: None,
|
||||
volume_count: 0,
|
||||
cover_url: None,
|
||||
external_id: key.clone(),
|
||||
external_url: if key.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(format!("https://openlibrary.org{}", key))
|
||||
},
|
||||
});
|
||||
|
||||
entry.volume_count += 1;
|
||||
|
||||
for a in &authors {
|
||||
if !entry.authors.contains(a) {
|
||||
entry.authors.push(a.clone());
|
||||
}
|
||||
}
|
||||
for p in &publishers {
|
||||
if !entry.publishers.contains(p) {
|
||||
entry.publishers.push(p.clone());
|
||||
}
|
||||
}
|
||||
if (entry.start_year.is_none() || first_publish_year.is_some_and(|y| entry.start_year.unwrap() > y))
|
||||
&& first_publish_year.is_some()
|
||||
{
|
||||
entry.start_year = first_publish_year;
|
||||
}
|
||||
if entry.cover_url.is_none() {
|
||||
entry.cover_url = cover_url;
|
||||
}
|
||||
}
|
||||
|
||||
let mut candidates: Vec<SeriesCandidate> = series_map
|
||||
.into_values()
|
||||
.map(|b| {
|
||||
let confidence = compute_confidence(&b.title, &query_lower);
|
||||
SeriesCandidate {
|
||||
external_id: b.external_id,
|
||||
title: b.title,
|
||||
authors: b.authors,
|
||||
description: b.description,
|
||||
publishers: b.publishers,
|
||||
start_year: b.start_year,
|
||||
total_volumes: if b.volume_count > 1 { Some(b.volume_count) } else { None },
|
||||
cover_url: b.cover_url,
|
||||
external_url: b.external_url,
|
||||
confidence,
|
||||
metadata_json: serde_json::json!({}),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
candidates.sort_by(|a, b| b.confidence.partial_cmp(&a.confidence).unwrap_or(std::cmp::Ordering::Equal));
|
||||
candidates.truncate(10);
|
||||
Ok(candidates)
|
||||
}
|
||||
|
||||
async fn get_series_books_impl(
|
||||
external_id: &str,
|
||||
_config: &ProviderConfig,
|
||||
) -> Result<Vec<BookCandidate>, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.map_err(|e| format!("failed to build HTTP client: {e}"))?;
|
||||
|
||||
// Fetch the work to get its title for series search
|
||||
let url = format!("https://openlibrary.org{}.json", external_id);
|
||||
let resp = client.get(&url).send().await.map_err(|e| format!("Open Library request failed: {e}"))?;
|
||||
|
||||
let work: serde_json::Value = if resp.status().is_success() {
|
||||
resp.json().await.map_err(|e| format!("Failed to parse response: {e}"))?
|
||||
} else {
|
||||
serde_json::json!({})
|
||||
};
|
||||
|
||||
let title = work.get("title").and_then(|t| t.as_str()).unwrap_or("");
|
||||
let series_name = extract_series_name(title);
|
||||
|
||||
// Search for editions of this series
|
||||
let search_url = format!(
|
||||
"https://openlibrary.org/search.json?title={}&limit=40",
|
||||
urlencoded(&series_name)
|
||||
);
|
||||
let resp = client.get(&search_url).send().await.map_err(|e| format!("Open Library search failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let data: serde_json::Value = resp.json().await.map_err(|e| format!("Failed to parse response: {e}"))?;
|
||||
let docs = match data.get("docs").and_then(|d| d.as_array()) {
|
||||
Some(docs) => docs,
|
||||
None => return Ok(vec![]),
|
||||
};
|
||||
|
||||
let mut books: Vec<BookCandidate> = docs
|
||||
.iter()
|
||||
.map(|doc| {
|
||||
let title = doc.get("title").and_then(|t| t.as_str()).unwrap_or("").to_string();
|
||||
let authors: Vec<String> = doc
|
||||
.get("author_name")
|
||||
.and_then(|a| a.as_array())
|
||||
.map(|arr| arr.iter().filter_map(|v| v.as_str().map(String::from)).collect())
|
||||
.unwrap_or_default();
|
||||
let isbn = doc
|
||||
.get("isbn")
|
||||
.and_then(|a| a.as_array())
|
||||
.and_then(|arr| arr.first())
|
||||
.and_then(|v| v.as_str())
|
||||
.map(String::from);
|
||||
let page_count = doc
|
||||
.get("number_of_pages_median")
|
||||
.and_then(|n| n.as_i64())
|
||||
.map(|n| n as i32);
|
||||
let cover_i = doc.get("cover_i").and_then(|c| c.as_i64());
|
||||
let cover_url = cover_i.map(|id| format!("https://covers.openlibrary.org/b/id/{}-M.jpg", id));
|
||||
let language = doc
|
||||
.get("language")
|
||||
.and_then(|a| a.as_array())
|
||||
.and_then(|arr| arr.first())
|
||||
.and_then(|v| v.as_str())
|
||||
.map(String::from);
|
||||
let publish_date = doc
|
||||
.get("first_publish_year")
|
||||
.and_then(|y| y.as_i64())
|
||||
.map(|y| y.to_string());
|
||||
let key = doc.get("key").and_then(|k| k.as_str()).unwrap_or("").to_string();
|
||||
let volume_number = extract_volume_number(&title);
|
||||
|
||||
BookCandidate {
|
||||
external_book_id: key,
|
||||
title,
|
||||
volume_number,
|
||||
authors,
|
||||
isbn,
|
||||
summary: None,
|
||||
cover_url,
|
||||
page_count,
|
||||
language,
|
||||
publish_date,
|
||||
metadata_json: serde_json::json!({}),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
books.sort_by_key(|b| b.volume_number.unwrap_or(999));
|
||||
Ok(books)
|
||||
}
|
||||
|
||||
fn extract_series_name(title: &str) -> String {
|
||||
let re_patterns = [
|
||||
r"(?i)\s*[-–—]\s*(?:vol(?:ume)?\.?\s*|tome\s*|t\.\s*|#)\s*\d+.*$",
|
||||
r"(?i)\s*,?\s*(?:vol(?:ume)?\.?\s*|tome\s*|t\.\s*|#)\s*\d+.*$",
|
||||
r"\s*\(\d+\)\s*$",
|
||||
r"\s+\d+\s*$",
|
||||
];
|
||||
let mut result = title.to_string();
|
||||
for pattern in &re_patterns {
|
||||
if let Ok(re) = regex::Regex::new(pattern) {
|
||||
let cleaned = re.replace(&result, "").to_string();
|
||||
if !cleaned.is_empty() {
|
||||
result = cleaned;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
result.trim().to_string()
|
||||
}
|
||||
|
||||
fn extract_volume_number(title: &str) -> Option<i32> {
|
||||
let patterns = [
|
||||
r"(?i)(?:vol(?:ume)?\.?\s*|tome\s*|t\.\s*|#)\s*(\d+)",
|
||||
r"\((\d+)\)\s*$",
|
||||
r"\b(\d+)\s*$",
|
||||
];
|
||||
for pattern in &patterns {
|
||||
if let Ok(re) = regex::Regex::new(pattern) {
|
||||
if let Some(caps) = re.captures(title) {
|
||||
if let Some(num) = caps.get(1).and_then(|m| m.as_str().parse::<i32>().ok()) {
|
||||
return Some(num);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn compute_confidence(title: &str, query: &str) -> f32 {
|
||||
let title_lower = title.to_lowercase();
|
||||
if title_lower == query {
|
||||
1.0
|
||||
} else if title_lower.starts_with(query) || query.starts_with(&title_lower) {
|
||||
0.8
|
||||
} else if title_lower.contains(query) || query.contains(&title_lower) {
|
||||
0.7
|
||||
} else {
|
||||
let common: usize = query.chars().filter(|c| title_lower.contains(*c)).count();
|
||||
let max_len = query.len().max(title_lower.len()).max(1);
|
||||
(common as f32 / max_len as f32).clamp(0.1, 0.6)
|
||||
}
|
||||
}
|
||||
|
||||
fn urlencoded(s: &str) -> String {
|
||||
let mut result = String::new();
|
||||
for byte in s.bytes() {
|
||||
match byte {
|
||||
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
|
||||
result.push(byte as char);
|
||||
}
|
||||
_ => result.push_str(&format!("%{:02X}", byte)),
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
struct SeriesCandidateBuilder {
|
||||
title: String,
|
||||
authors: Vec<String>,
|
||||
description: Option<String>,
|
||||
publishers: Vec<String>,
|
||||
start_year: Option<i32>,
|
||||
volume_count: i32,
|
||||
cover_url: Option<String>,
|
||||
external_id: String,
|
||||
external_url: Option<String>,
|
||||
}
|
||||
796
apps/api/src/metadata_refresh.rs
Normal file
796
apps/api/src/metadata_refresh.rs
Normal file
@@ -0,0 +1,796 @@
|
||||
use axum::{
|
||||
extract::{Path as AxumPath, State},
|
||||
Json,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::{PgPool, Row};
|
||||
use uuid::Uuid;
|
||||
use utoipa::ToSchema;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::{error::ApiError, metadata_providers, state::AppState};
|
||||
use crate::metadata_batch::{load_provider_config_from_pool, is_job_cancelled, update_progress};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DTOs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct MetadataRefreshRequest {
|
||||
pub library_id: String,
|
||||
}
|
||||
|
||||
/// A single field change: old → new
|
||||
#[derive(Serialize, Clone)]
|
||||
struct FieldDiff {
|
||||
field: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
old: Option<serde_json::Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
new: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
/// Per-book changes
|
||||
#[derive(Serialize, Clone)]
|
||||
struct BookDiff {
|
||||
book_id: String,
|
||||
title: String,
|
||||
volume: Option<i32>,
|
||||
changes: Vec<FieldDiff>,
|
||||
}
|
||||
|
||||
/// Per-series change report
|
||||
#[derive(Serialize, Clone)]
|
||||
struct SeriesRefreshResult {
|
||||
series_name: String,
|
||||
provider: String,
|
||||
status: String, // "updated", "unchanged", "error"
|
||||
series_changes: Vec<FieldDiff>,
|
||||
book_changes: Vec<BookDiff>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
/// Response DTO for the report endpoint
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct MetadataRefreshReportDto {
|
||||
#[schema(value_type = String)]
|
||||
pub job_id: Uuid,
|
||||
pub status: String,
|
||||
pub total_links: i64,
|
||||
pub refreshed: i64,
|
||||
pub unchanged: i64,
|
||||
pub errors: i64,
|
||||
pub changes: serde_json::Value,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// POST /metadata/refresh — Trigger a metadata refresh job
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/metadata/refresh",
|
||||
tag = "metadata",
|
||||
request_body = MetadataRefreshRequest,
|
||||
responses(
|
||||
(status = 200, description = "Job created"),
|
||||
(status = 400, description = "Bad request"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn start_refresh(
|
||||
State(state): State<AppState>,
|
||||
Json(body): Json<MetadataRefreshRequest>,
|
||||
) -> Result<Json<serde_json::Value>, ApiError> {
|
||||
let library_id: Uuid = body
|
||||
.library_id
|
||||
.parse()
|
||||
.map_err(|_| ApiError::bad_request("invalid library_id"))?;
|
||||
|
||||
// Verify library exists
|
||||
sqlx::query("SELECT 1 FROM libraries WHERE id = $1")
|
||||
.bind(library_id)
|
||||
.fetch_optional(&state.pool)
|
||||
.await?
|
||||
.ok_or_else(|| ApiError::not_found("library not found"))?;
|
||||
|
||||
// Check no existing running metadata_refresh job for this library
|
||||
let existing: Option<Uuid> = sqlx::query_scalar(
|
||||
"SELECT id FROM index_jobs WHERE library_id = $1 AND type = 'metadata_refresh' AND status IN ('pending', 'running') LIMIT 1",
|
||||
)
|
||||
.bind(library_id)
|
||||
.fetch_optional(&state.pool)
|
||||
.await?;
|
||||
|
||||
if let Some(existing_id) = existing {
|
||||
return Ok(Json(serde_json::json!({
|
||||
"id": existing_id.to_string(),
|
||||
"status": "already_running",
|
||||
})));
|
||||
}
|
||||
|
||||
// Check there are approved links to refresh
|
||||
let link_count: i64 = sqlx::query_scalar(
|
||||
"SELECT COUNT(*) FROM external_metadata_links WHERE library_id = $1 AND status = 'approved'",
|
||||
)
|
||||
.bind(library_id)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
if link_count == 0 {
|
||||
return Err(ApiError::bad_request("No approved metadata links to refresh for this library"));
|
||||
}
|
||||
|
||||
let job_id = Uuid::new_v4();
|
||||
sqlx::query(
|
||||
"INSERT INTO index_jobs (id, library_id, type, status) VALUES ($1, $2, 'metadata_refresh', 'pending')",
|
||||
)
|
||||
.bind(job_id)
|
||||
.bind(library_id)
|
||||
.execute(&state.pool)
|
||||
.await?;
|
||||
|
||||
// Spawn the background processing task
|
||||
let pool = state.pool.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = process_metadata_refresh(&pool, job_id, library_id).await {
|
||||
warn!("[METADATA_REFRESH] job {job_id} failed: {e}");
|
||||
let _ = sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'failed', error_opt = $2, finished_at = NOW() WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.bind(e.to_string())
|
||||
.execute(&pool)
|
||||
.await;
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Json(serde_json::json!({
|
||||
"id": job_id.to_string(),
|
||||
"status": "pending",
|
||||
})))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// GET /metadata/refresh/:id/report — Refresh report from stats_json
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/metadata/refresh/{id}/report",
|
||||
tag = "metadata",
|
||||
params(("id" = String, Path, description = "Job UUID")),
|
||||
responses(
|
||||
(status = 200, body = MetadataRefreshReportDto),
|
||||
(status = 404, description = "Job not found"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_refresh_report(
|
||||
State(state): State<AppState>,
|
||||
AxumPath(job_id): AxumPath<Uuid>,
|
||||
) -> Result<Json<MetadataRefreshReportDto>, ApiError> {
|
||||
let row = sqlx::query(
|
||||
"SELECT status, stats_json, total_files FROM index_jobs WHERE id = $1 AND type = 'metadata_refresh'",
|
||||
)
|
||||
.bind(job_id)
|
||||
.fetch_optional(&state.pool)
|
||||
.await?
|
||||
.ok_or_else(|| ApiError::not_found("job not found"))?;
|
||||
|
||||
let job_status: String = row.get("status");
|
||||
let stats: Option<serde_json::Value> = row.get("stats_json");
|
||||
let total_files: Option<i32> = row.get("total_files");
|
||||
|
||||
let (refreshed, unchanged, errors, changes) = if let Some(ref s) = stats {
|
||||
(
|
||||
s.get("refreshed").and_then(|v| v.as_i64()).unwrap_or(0),
|
||||
s.get("unchanged").and_then(|v| v.as_i64()).unwrap_or(0),
|
||||
s.get("errors").and_then(|v| v.as_i64()).unwrap_or(0),
|
||||
s.get("changes").cloned().unwrap_or(serde_json::json!([])),
|
||||
)
|
||||
} else {
|
||||
(0, 0, 0, serde_json::json!([]))
|
||||
};
|
||||
|
||||
Ok(Json(MetadataRefreshReportDto {
|
||||
job_id,
|
||||
status: job_status,
|
||||
total_links: total_files.unwrap_or(0) as i64,
|
||||
refreshed,
|
||||
unchanged,
|
||||
errors,
|
||||
changes,
|
||||
}))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Background processing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async fn process_metadata_refresh(
|
||||
pool: &PgPool,
|
||||
job_id: Uuid,
|
||||
library_id: Uuid,
|
||||
) -> Result<(), String> {
|
||||
// Set job to running
|
||||
sqlx::query("UPDATE index_jobs SET status = 'running', started_at = NOW() WHERE id = $1")
|
||||
.bind(job_id)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// Get all approved links for this library
|
||||
let links: Vec<(Uuid, String, String, String)> = sqlx::query_as(
|
||||
r#"
|
||||
SELECT id, series_name, provider, external_id
|
||||
FROM external_metadata_links
|
||||
WHERE library_id = $1 AND status = 'approved'
|
||||
ORDER BY series_name
|
||||
"#,
|
||||
)
|
||||
.bind(library_id)
|
||||
.fetch_all(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let total = links.len() as i32;
|
||||
sqlx::query("UPDATE index_jobs SET total_files = $2 WHERE id = $1")
|
||||
.bind(job_id)
|
||||
.bind(total)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let mut processed = 0i32;
|
||||
let mut refreshed = 0i32;
|
||||
let mut unchanged = 0i32;
|
||||
let mut errors = 0i32;
|
||||
let mut all_results: Vec<SeriesRefreshResult> = Vec::new();
|
||||
|
||||
for (link_id, series_name, provider_name, external_id) in &links {
|
||||
// Check cancellation
|
||||
if is_job_cancelled(pool, job_id).await {
|
||||
sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'cancelled', finished_at = NOW() WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match refresh_link(pool, *link_id, library_id, series_name, provider_name, external_id).await {
|
||||
Ok(result) => {
|
||||
if result.status == "updated" {
|
||||
refreshed += 1;
|
||||
info!("[METADATA_REFRESH] job={job_id} updated series='{series_name}' via {provider_name}");
|
||||
} else {
|
||||
unchanged += 1;
|
||||
}
|
||||
all_results.push(result);
|
||||
}
|
||||
Err(e) => {
|
||||
errors += 1;
|
||||
warn!("[METADATA_REFRESH] job={job_id} error on series='{series_name}': {e}");
|
||||
all_results.push(SeriesRefreshResult {
|
||||
series_name: series_name.clone(),
|
||||
provider: provider_name.clone(),
|
||||
status: "error".to_string(),
|
||||
series_changes: vec![],
|
||||
book_changes: vec![],
|
||||
error: Some(e),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
processed += 1;
|
||||
update_progress(pool, job_id, processed, total, series_name).await;
|
||||
|
||||
// Rate limit: 1s delay between provider calls
|
||||
tokio::time::sleep(std::time::Duration::from_millis(1000)).await;
|
||||
}
|
||||
|
||||
// Only keep series that have changes or errors (filter out "unchanged")
|
||||
let changes_only: Vec<&SeriesRefreshResult> = all_results
|
||||
.iter()
|
||||
.filter(|r| r.status != "unchanged")
|
||||
.collect();
|
||||
|
||||
// Build stats summary
|
||||
let stats = serde_json::json!({
|
||||
"total_links": total,
|
||||
"refreshed": refreshed,
|
||||
"unchanged": unchanged,
|
||||
"errors": errors,
|
||||
"changes": changes_only,
|
||||
});
|
||||
|
||||
sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'success', finished_at = NOW(), progress_percent = 100, stats_json = $2 WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.bind(stats)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
info!("[METADATA_REFRESH] job={job_id} completed: {refreshed} updated, {unchanged} unchanged, {errors} errors");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Refresh a single approved metadata link: re-fetch from provider, compare, sync, return diff
|
||||
async fn refresh_link(
|
||||
pool: &PgPool,
|
||||
link_id: Uuid,
|
||||
library_id: Uuid,
|
||||
series_name: &str,
|
||||
provider_name: &str,
|
||||
external_id: &str,
|
||||
) -> Result<SeriesRefreshResult, String> {
|
||||
let provider = metadata_providers::get_provider(provider_name)
|
||||
.ok_or_else(|| format!("Unknown provider: {provider_name}"))?;
|
||||
|
||||
let config = load_provider_config_from_pool(pool, provider_name).await;
|
||||
|
||||
let mut series_changes: Vec<FieldDiff> = Vec::new();
|
||||
let mut book_changes: Vec<BookDiff> = Vec::new();
|
||||
|
||||
// ── Series-level refresh ──────────────────────────────────────────────
|
||||
let candidates = provider
|
||||
.search_series(series_name, &config)
|
||||
.await
|
||||
.map_err(|e| format!("provider search error: {e}"))?;
|
||||
|
||||
let candidate = candidates
|
||||
.iter()
|
||||
.find(|c| c.external_id == external_id)
|
||||
.or_else(|| candidates.first());
|
||||
|
||||
if let Some(candidate) = candidate {
|
||||
// Update link metadata_json
|
||||
sqlx::query(
|
||||
r#"
|
||||
UPDATE external_metadata_links
|
||||
SET metadata_json = $2,
|
||||
total_volumes_external = $3,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(link_id)
|
||||
.bind(&candidate.metadata_json)
|
||||
.bind(candidate.total_volumes)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// Diff + sync series metadata
|
||||
series_changes = sync_series_with_diff(pool, library_id, series_name, candidate).await?;
|
||||
}
|
||||
|
||||
// ── Book-level refresh ────────────────────────────────────────────────
|
||||
let books = provider
|
||||
.get_series_books(external_id, &config)
|
||||
.await
|
||||
.map_err(|e| format!("provider books error: {e}"))?;
|
||||
|
||||
// Delete existing external_book_metadata for this link
|
||||
sqlx::query("DELETE FROM external_book_metadata WHERE link_id = $1")
|
||||
.bind(link_id)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// Pre-fetch local books
|
||||
let local_books: Vec<(Uuid, Option<i32>, String)> = sqlx::query_as(
|
||||
r#"
|
||||
SELECT id, volume, title FROM books
|
||||
WHERE library_id = $1
|
||||
AND COALESCE(NULLIF(series, ''), 'unclassified') = $2
|
||||
ORDER BY volume NULLS LAST,
|
||||
REGEXP_REPLACE(LOWER(title), '[0-9].*$', ''),
|
||||
COALESCE((REGEXP_MATCH(LOWER(title), '\d+'))[1]::int, 0),
|
||||
title ASC
|
||||
"#,
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(series_name)
|
||||
.fetch_all(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let local_books_with_pos: Vec<(Uuid, i32, String)> = local_books
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, (id, vol, title))| (*id, vol.unwrap_or((idx + 1) as i32), title.clone()))
|
||||
.collect();
|
||||
|
||||
let mut matched_local_ids = std::collections::HashSet::new();
|
||||
|
||||
for (ext_idx, book) in books.iter().enumerate() {
|
||||
let ext_vol = book.volume_number.unwrap_or((ext_idx + 1) as i32);
|
||||
|
||||
// Match by volume number
|
||||
let mut local_book_id: Option<Uuid> = local_books_with_pos
|
||||
.iter()
|
||||
.find(|(id, v, _)| *v == ext_vol && !matched_local_ids.contains(id))
|
||||
.map(|(id, _, _)| *id);
|
||||
|
||||
// Match by title containment
|
||||
if local_book_id.is_none() {
|
||||
let ext_title_lower = book.title.to_lowercase();
|
||||
local_book_id = local_books_with_pos
|
||||
.iter()
|
||||
.find(|(id, _, local_title)| {
|
||||
if matched_local_ids.contains(id) {
|
||||
return false;
|
||||
}
|
||||
let local_lower = local_title.to_lowercase();
|
||||
local_lower.contains(&ext_title_lower) || ext_title_lower.contains(&local_lower)
|
||||
})
|
||||
.map(|(id, _, _)| *id);
|
||||
}
|
||||
|
||||
if let Some(id) = local_book_id {
|
||||
matched_local_ids.insert(id);
|
||||
}
|
||||
|
||||
// Insert external_book_metadata
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO external_book_metadata
|
||||
(link_id, book_id, external_book_id, volume_number, title, authors, isbn, summary, cover_url, page_count, language, publish_date, metadata_json)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
|
||||
"#,
|
||||
)
|
||||
.bind(link_id)
|
||||
.bind(local_book_id)
|
||||
.bind(&book.external_book_id)
|
||||
.bind(book.volume_number)
|
||||
.bind(&book.title)
|
||||
.bind(&book.authors)
|
||||
.bind(&book.isbn)
|
||||
.bind(&book.summary)
|
||||
.bind(&book.cover_url)
|
||||
.bind(book.page_count)
|
||||
.bind(&book.language)
|
||||
.bind(&book.publish_date)
|
||||
.bind(&book.metadata_json)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// Diff + push metadata to matched local book
|
||||
if let Some(book_id) = local_book_id {
|
||||
let diffs = sync_book_with_diff(pool, book_id, book).await?;
|
||||
if !diffs.is_empty() {
|
||||
let local_title = local_books_with_pos
|
||||
.iter()
|
||||
.find(|(id, _, _)| *id == book_id)
|
||||
.map(|(_, _, t)| t.clone())
|
||||
.unwrap_or_default();
|
||||
book_changes.push(BookDiff {
|
||||
book_id: book_id.to_string(),
|
||||
title: local_title,
|
||||
volume: book.volume_number,
|
||||
changes: diffs,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update synced_at on the link
|
||||
sqlx::query("UPDATE external_metadata_links SET synced_at = NOW(), updated_at = NOW() WHERE id = $1")
|
||||
.bind(link_id)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let has_changes = !series_changes.is_empty() || !book_changes.is_empty();
|
||||
|
||||
Ok(SeriesRefreshResult {
|
||||
series_name: series_name.to_string(),
|
||||
provider: provider_name.to_string(),
|
||||
status: if has_changes { "updated".to_string() } else { "unchanged".to_string() },
|
||||
series_changes,
|
||||
book_changes,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Diff helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Compare old/new for a nullable string field. Returns Some(FieldDiff) only if value actually changed.
|
||||
fn diff_opt_str(field: &str, old: Option<&str>, new: Option<&str>) -> Option<FieldDiff> {
|
||||
let new_val = new.filter(|s| !s.is_empty());
|
||||
// Only report a change if there is a new non-empty value AND it differs from old
|
||||
match (old, new_val) {
|
||||
(Some(o), Some(n)) if o != n => Some(FieldDiff {
|
||||
field: field.to_string(),
|
||||
old: Some(serde_json::Value::String(o.to_string())),
|
||||
new: Some(serde_json::Value::String(n.to_string())),
|
||||
}),
|
||||
(None, Some(n)) => Some(FieldDiff {
|
||||
field: field.to_string(),
|
||||
old: None,
|
||||
new: Some(serde_json::Value::String(n.to_string())),
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn diff_opt_i32(field: &str, old: Option<i32>, new: Option<i32>) -> Option<FieldDiff> {
|
||||
match (old, new) {
|
||||
(Some(o), Some(n)) if o != n => Some(FieldDiff {
|
||||
field: field.to_string(),
|
||||
old: Some(serde_json::json!(o)),
|
||||
new: Some(serde_json::json!(n)),
|
||||
}),
|
||||
(None, Some(n)) => Some(FieldDiff {
|
||||
field: field.to_string(),
|
||||
old: None,
|
||||
new: Some(serde_json::json!(n)),
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn diff_str_vec(field: &str, old: &[String], new: &[String]) -> Option<FieldDiff> {
|
||||
if new.is_empty() {
|
||||
return None;
|
||||
}
|
||||
if old != new {
|
||||
Some(FieldDiff {
|
||||
field: field.to_string(),
|
||||
old: Some(serde_json::json!(old)),
|
||||
new: Some(serde_json::json!(new)),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Series sync with diff tracking
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async fn sync_series_with_diff(
|
||||
pool: &PgPool,
|
||||
library_id: Uuid,
|
||||
series_name: &str,
|
||||
candidate: &metadata_providers::SeriesCandidate,
|
||||
) -> Result<Vec<FieldDiff>, String> {
|
||||
let new_description = candidate.metadata_json
|
||||
.get("description")
|
||||
.and_then(|d| d.as_str())
|
||||
.or(candidate.description.as_deref());
|
||||
let new_authors = &candidate.authors;
|
||||
let new_publishers = &candidate.publishers;
|
||||
let new_start_year = candidate.start_year;
|
||||
let new_total_volumes = candidate.total_volumes;
|
||||
let new_status = if let Some(raw) = candidate.metadata_json.get("status").and_then(|s| s.as_str()) {
|
||||
crate::metadata::normalize_series_status(pool, raw).await
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let new_status = new_status.as_deref();
|
||||
|
||||
// Fetch existing series metadata for diffing
|
||||
let existing = sqlx::query(
|
||||
r#"SELECT description, publishers, start_year, total_volumes, status, authors, locked_fields
|
||||
FROM series_metadata WHERE library_id = $1 AND name = $2"#,
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(series_name)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let locked = existing
|
||||
.as_ref()
|
||||
.map(|r| r.get::<serde_json::Value, _>("locked_fields"))
|
||||
.unwrap_or(serde_json::json!({}));
|
||||
let is_locked = |field: &str| -> bool {
|
||||
locked.get(field).and_then(|v| v.as_bool()).unwrap_or(false)
|
||||
};
|
||||
|
||||
// Build diffs (only for unlocked fields that actually change)
|
||||
let mut diffs: Vec<FieldDiff> = Vec::new();
|
||||
|
||||
if !is_locked("description") {
|
||||
let old_desc: Option<String> = existing.as_ref().and_then(|r| r.get("description"));
|
||||
if let Some(d) = diff_opt_str("description", old_desc.as_deref(), new_description) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("authors") {
|
||||
let old_authors: Vec<String> = existing.as_ref().map(|r| r.get("authors")).unwrap_or_default();
|
||||
if let Some(d) = diff_str_vec("authors", &old_authors, new_authors) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("publishers") {
|
||||
let old_publishers: Vec<String> = existing.as_ref().map(|r| r.get("publishers")).unwrap_or_default();
|
||||
if let Some(d) = diff_str_vec("publishers", &old_publishers, new_publishers) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("start_year") {
|
||||
let old_year: Option<i32> = existing.as_ref().and_then(|r| r.get("start_year"));
|
||||
if let Some(d) = diff_opt_i32("start_year", old_year, new_start_year) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("total_volumes") {
|
||||
let old_vols: Option<i32> = existing.as_ref().and_then(|r| r.get("total_volumes"));
|
||||
if let Some(d) = diff_opt_i32("total_volumes", old_vols, new_total_volumes) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("status") {
|
||||
let old_status: Option<String> = existing.as_ref().and_then(|r| r.get("status"));
|
||||
if let Some(d) = diff_opt_str("status", old_status.as_deref(), new_status) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
|
||||
// Now do the actual upsert
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO series_metadata (library_id, name, description, publishers, start_year, total_volumes, status, authors, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW())
|
||||
ON CONFLICT (library_id, name)
|
||||
DO UPDATE SET
|
||||
description = CASE
|
||||
WHEN (series_metadata.locked_fields->>'description')::boolean IS TRUE THEN series_metadata.description
|
||||
ELSE COALESCE(NULLIF(EXCLUDED.description, ''), series_metadata.description)
|
||||
END,
|
||||
publishers = CASE
|
||||
WHEN (series_metadata.locked_fields->>'publishers')::boolean IS TRUE THEN series_metadata.publishers
|
||||
WHEN array_length(EXCLUDED.publishers, 1) > 0 THEN EXCLUDED.publishers
|
||||
ELSE series_metadata.publishers
|
||||
END,
|
||||
start_year = CASE
|
||||
WHEN (series_metadata.locked_fields->>'start_year')::boolean IS TRUE THEN series_metadata.start_year
|
||||
ELSE COALESCE(EXCLUDED.start_year, series_metadata.start_year)
|
||||
END,
|
||||
total_volumes = CASE
|
||||
WHEN (series_metadata.locked_fields->>'total_volumes')::boolean IS TRUE THEN series_metadata.total_volumes
|
||||
ELSE COALESCE(EXCLUDED.total_volumes, series_metadata.total_volumes)
|
||||
END,
|
||||
status = CASE
|
||||
WHEN (series_metadata.locked_fields->>'status')::boolean IS TRUE THEN series_metadata.status
|
||||
ELSE COALESCE(EXCLUDED.status, series_metadata.status)
|
||||
END,
|
||||
authors = CASE
|
||||
WHEN (series_metadata.locked_fields->>'authors')::boolean IS TRUE THEN series_metadata.authors
|
||||
WHEN array_length(EXCLUDED.authors, 1) > 0 THEN EXCLUDED.authors
|
||||
ELSE series_metadata.authors
|
||||
END,
|
||||
updated_at = NOW()
|
||||
"#,
|
||||
)
|
||||
.bind(library_id)
|
||||
.bind(series_name)
|
||||
.bind(new_description)
|
||||
.bind(new_publishers)
|
||||
.bind(new_start_year)
|
||||
.bind(new_total_volumes)
|
||||
.bind(new_status)
|
||||
.bind(new_authors)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(diffs)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Book sync with diff tracking
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async fn sync_book_with_diff(
|
||||
pool: &PgPool,
|
||||
book_id: Uuid,
|
||||
ext_book: &metadata_providers::BookCandidate,
|
||||
) -> Result<Vec<FieldDiff>, String> {
|
||||
// Fetch current book state
|
||||
let current = sqlx::query(
|
||||
"SELECT summary, isbn, publish_date, language, authors, locked_fields FROM books WHERE id = $1",
|
||||
)
|
||||
.bind(book_id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let locked = current.get::<serde_json::Value, _>("locked_fields");
|
||||
let is_locked = |field: &str| -> bool {
|
||||
locked.get(field).and_then(|v| v.as_bool()).unwrap_or(false)
|
||||
};
|
||||
|
||||
// Build diffs
|
||||
let mut diffs: Vec<FieldDiff> = Vec::new();
|
||||
|
||||
if !is_locked("summary") {
|
||||
let old: Option<String> = current.get("summary");
|
||||
if let Some(d) = diff_opt_str("summary", old.as_deref(), ext_book.summary.as_deref()) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("isbn") {
|
||||
let old: Option<String> = current.get("isbn");
|
||||
if let Some(d) = diff_opt_str("isbn", old.as_deref(), ext_book.isbn.as_deref()) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("publish_date") {
|
||||
let old: Option<String> = current.get("publish_date");
|
||||
if let Some(d) = diff_opt_str("publish_date", old.as_deref(), ext_book.publish_date.as_deref()) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("language") {
|
||||
let old: Option<String> = current.get("language");
|
||||
if let Some(d) = diff_opt_str("language", old.as_deref(), ext_book.language.as_deref()) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
if !is_locked("authors") {
|
||||
let old: Vec<String> = current.get("authors");
|
||||
if let Some(d) = diff_str_vec("authors", &old, &ext_book.authors) {
|
||||
diffs.push(d);
|
||||
}
|
||||
}
|
||||
|
||||
// Do the actual update
|
||||
sqlx::query(
|
||||
r#"
|
||||
UPDATE books SET
|
||||
summary = CASE
|
||||
WHEN (locked_fields->>'summary')::boolean IS TRUE THEN summary
|
||||
ELSE COALESCE(NULLIF($2, ''), summary)
|
||||
END,
|
||||
isbn = CASE
|
||||
WHEN (locked_fields->>'isbn')::boolean IS TRUE THEN isbn
|
||||
ELSE COALESCE(NULLIF($3, ''), isbn)
|
||||
END,
|
||||
publish_date = CASE
|
||||
WHEN (locked_fields->>'publish_date')::boolean IS TRUE THEN publish_date
|
||||
ELSE COALESCE(NULLIF($4, ''), publish_date)
|
||||
END,
|
||||
language = CASE
|
||||
WHEN (locked_fields->>'language')::boolean IS TRUE THEN language
|
||||
ELSE COALESCE(NULLIF($5, ''), language)
|
||||
END,
|
||||
authors = CASE
|
||||
WHEN (locked_fields->>'authors')::boolean IS TRUE THEN authors
|
||||
WHEN CARDINALITY($6::text[]) > 0 THEN $6
|
||||
ELSE authors
|
||||
END,
|
||||
author = CASE
|
||||
WHEN (locked_fields->>'authors')::boolean IS TRUE THEN author
|
||||
WHEN CARDINALITY($6::text[]) > 0 THEN $6[1]
|
||||
ELSE author
|
||||
END,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(book_id)
|
||||
.bind(&ext_book.summary)
|
||||
.bind(&ext_book.isbn)
|
||||
.bind(&ext_book.publish_date)
|
||||
.bind(&ext_book.language)
|
||||
.bind(&ext_book.authors)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
Ok(diffs)
|
||||
}
|
||||
@@ -6,7 +6,18 @@ use utoipa::OpenApi;
|
||||
paths(
|
||||
crate::books::list_books,
|
||||
crate::books::get_book,
|
||||
crate::reading_progress::get_reading_progress,
|
||||
crate::reading_progress::update_reading_progress,
|
||||
crate::reading_progress::mark_series_read,
|
||||
crate::books::get_thumbnail,
|
||||
crate::books::list_series,
|
||||
crate::books::list_all_series,
|
||||
crate::books::ongoing_series,
|
||||
crate::books::ongoing_books,
|
||||
crate::books::convert_book,
|
||||
crate::books::update_book,
|
||||
crate::books::get_series_metadata,
|
||||
crate::books::update_series,
|
||||
crate::pages::get_page,
|
||||
crate::search::search_books,
|
||||
crate::index_jobs::enqueue_rebuild,
|
||||
@@ -27,6 +38,26 @@ use utoipa::OpenApi;
|
||||
crate::tokens::list_tokens,
|
||||
crate::tokens::create_token,
|
||||
crate::tokens::revoke_token,
|
||||
crate::tokens::delete_token,
|
||||
crate::stats::get_stats,
|
||||
crate::settings::get_settings,
|
||||
crate::settings::get_setting,
|
||||
crate::settings::update_setting,
|
||||
crate::settings::clear_cache,
|
||||
crate::settings::get_cache_stats,
|
||||
crate::settings::get_thumbnail_stats,
|
||||
crate::metadata::search_metadata,
|
||||
crate::metadata::create_metadata_match,
|
||||
crate::metadata::approve_metadata,
|
||||
crate::metadata::reject_metadata,
|
||||
crate::metadata::get_metadata_links,
|
||||
crate::metadata::get_missing_books,
|
||||
crate::metadata::delete_metadata_link,
|
||||
crate::books::series_statuses,
|
||||
crate::books::provider_statuses,
|
||||
crate::settings::list_status_mappings,
|
||||
crate::settings::upsert_status_mapping,
|
||||
crate::settings::delete_status_mapping,
|
||||
),
|
||||
components(
|
||||
schemas(
|
||||
@@ -34,10 +65,22 @@ use utoipa::OpenApi;
|
||||
crate::books::BookItem,
|
||||
crate::books::BooksPage,
|
||||
crate::books::BookDetails,
|
||||
crate::reading_progress::ReadingProgressResponse,
|
||||
crate::reading_progress::UpdateReadingProgressRequest,
|
||||
crate::reading_progress::MarkSeriesReadRequest,
|
||||
crate::reading_progress::MarkSeriesReadResponse,
|
||||
crate::books::SeriesItem,
|
||||
crate::books::SeriesPage,
|
||||
crate::books::ListAllSeriesQuery,
|
||||
crate::books::OngoingQuery,
|
||||
crate::books::UpdateBookRequest,
|
||||
crate::books::SeriesMetadata,
|
||||
crate::books::UpdateSeriesRequest,
|
||||
crate::books::UpdateSeriesResponse,
|
||||
crate::pages::PageQuery,
|
||||
crate::search::SearchQuery,
|
||||
crate::search::SearchResponse,
|
||||
crate::search::SeriesHit,
|
||||
crate::index_jobs::RebuildRequest,
|
||||
crate::thumbnails::ThumbnailsRebuildRequest,
|
||||
crate::index_jobs::IndexJobResponse,
|
||||
@@ -51,6 +94,34 @@ use utoipa::OpenApi;
|
||||
crate::tokens::CreateTokenRequest,
|
||||
crate::tokens::TokenResponse,
|
||||
crate::tokens::CreatedTokenResponse,
|
||||
crate::settings::UpdateSettingRequest,
|
||||
crate::settings::ClearCacheResponse,
|
||||
crate::settings::CacheStats,
|
||||
crate::settings::ThumbnailStats,
|
||||
crate::settings::StatusMappingDto,
|
||||
crate::settings::UpsertStatusMappingRequest,
|
||||
crate::stats::StatsResponse,
|
||||
crate::stats::StatsOverview,
|
||||
crate::stats::ReadingStatusStats,
|
||||
crate::stats::FormatCount,
|
||||
crate::stats::LanguageCount,
|
||||
crate::stats::LibraryStats,
|
||||
crate::stats::TopSeries,
|
||||
crate::stats::MonthlyAdditions,
|
||||
crate::stats::MetadataStats,
|
||||
crate::stats::ProviderCount,
|
||||
crate::metadata::ApproveRequest,
|
||||
crate::metadata::ApproveResponse,
|
||||
crate::metadata::SyncReport,
|
||||
crate::metadata::SeriesSyncReport,
|
||||
crate::metadata::BookSyncReport,
|
||||
crate::metadata::FieldChange,
|
||||
crate::metadata::MetadataSearchRequest,
|
||||
crate::metadata::SeriesCandidateDto,
|
||||
crate::metadata::MetadataMatchRequest,
|
||||
crate::metadata::ExternalMetadataLinkDto,
|
||||
crate::metadata::MissingBooksDto,
|
||||
crate::metadata::MissingBookItem,
|
||||
ErrorResponse,
|
||||
)
|
||||
),
|
||||
@@ -59,9 +130,11 @@ use utoipa::OpenApi;
|
||||
),
|
||||
tags(
|
||||
(name = "books", description = "Read-only endpoints for browsing and searching books"),
|
||||
(name = "reading-progress", description = "Reading progress tracking per book"),
|
||||
(name = "libraries", description = "Library management endpoints (Admin only)"),
|
||||
(name = "indexing", description = "Search index management and job control (Admin only)"),
|
||||
(name = "tokens", description = "API token management (Admin only)"),
|
||||
(name = "settings", description = "Application settings and cache management (Admin only)"),
|
||||
),
|
||||
modifiers(&SecurityAddon)
|
||||
)]
|
||||
@@ -106,15 +179,24 @@ mod tests {
|
||||
.to_pretty_json()
|
||||
.expect("Failed to serialize OpenAPI");
|
||||
|
||||
// Check that there are no references to non-existent schemas
|
||||
assert!(
|
||||
!json.contains("\"/components/schemas/Uuid\""),
|
||||
"Uuid schema should not be referenced"
|
||||
);
|
||||
assert!(
|
||||
!json.contains("\"/components/schemas/DateTime\""),
|
||||
"DateTime schema should not be referenced"
|
||||
);
|
||||
// Check that all $ref targets exist in components/schemas
|
||||
let doc: serde_json::Value =
|
||||
serde_json::from_str(&json).expect("OpenAPI JSON should be valid");
|
||||
let empty = serde_json::Map::new();
|
||||
let schemas = doc["components"]["schemas"]
|
||||
.as_object()
|
||||
.unwrap_or(&empty);
|
||||
let prefix = "#/components/schemas/";
|
||||
let mut broken: Vec<String> = Vec::new();
|
||||
for part in json.split(prefix).skip(1) {
|
||||
if let Some(name) = part.split('"').next() {
|
||||
if !schemas.contains_key(name) {
|
||||
broken.push(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
broken.dedup();
|
||||
assert!(broken.is_empty(), "Unresolved schema refs: {:?}", broken);
|
||||
|
||||
// Save to file for inspection
|
||||
std::fs::write("/tmp/openapi.json", &json).expect("Failed to write file");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
io::Write,
|
||||
path::{Path, PathBuf},
|
||||
sync::{atomic::Ordering, Arc},
|
||||
time::Duration,
|
||||
@@ -16,11 +16,10 @@ use serde::Deserialize;
|
||||
use utoipa::ToSchema;
|
||||
use sha2::{Digest, Sha256};
|
||||
use sqlx::Row;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
use uuid::Uuid;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::{error::ApiError, AppState};
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
fn remap_libraries_path(path: &str) -> String {
|
||||
if let Ok(root) = std::env::var("LIBRARIES_ROOT_PATH") {
|
||||
@@ -31,10 +30,12 @@ fn remap_libraries_path(path: &str) -> String {
|
||||
path.to_string()
|
||||
}
|
||||
|
||||
fn get_image_cache_dir() -> PathBuf {
|
||||
std::env::var("IMAGE_CACHE_DIR")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| PathBuf::from("/tmp/stripstream-image-cache"))
|
||||
fn parse_filter(s: &str) -> image::imageops::FilterType {
|
||||
match s {
|
||||
"lanczos3" => image::imageops::FilterType::Lanczos3,
|
||||
"nearest" => image::imageops::FilterType::Nearest,
|
||||
_ => image::imageops::FilterType::Triangle, // Triangle (bilinear) is fast and good enough for comics
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cache_key(abs_path: &str, page: u32, format: &str, quality: u8, width: u32) -> String {
|
||||
@@ -47,8 +48,7 @@ fn get_cache_key(abs_path: &str, page: u32, format: &str, quality: u8, width: u3
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
fn get_cache_path(cache_key: &str, format: &OutputFormat) -> PathBuf {
|
||||
let cache_dir = get_image_cache_dir();
|
||||
fn get_cache_path(cache_key: &str, format: &OutputFormat, cache_dir: &Path) -> PathBuf {
|
||||
let prefix = &cache_key[..2];
|
||||
let ext = format.extension();
|
||||
cache_dir.join(prefix).join(format!("{}.{}", cache_key, ext))
|
||||
@@ -64,7 +64,7 @@ fn write_to_disk_cache(cache_path: &Path, data: &[u8]) -> Result<(), std::io::Er
|
||||
}
|
||||
let mut file = std::fs::File::create(cache_path)?;
|
||||
file.write_all(data)?;
|
||||
file.sync_data()?;
|
||||
// No sync_data() — this is a cache, durability is not critical
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -80,6 +80,8 @@ pub struct PageQuery {
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
enum OutputFormat {
|
||||
/// Serve raw bytes from the archive — no decode, no re-encode.
|
||||
Original,
|
||||
Jpeg,
|
||||
Png,
|
||||
Webp,
|
||||
@@ -87,16 +89,19 @@ enum OutputFormat {
|
||||
|
||||
impl OutputFormat {
|
||||
fn parse(value: Option<&str>) -> Result<Self, ApiError> {
|
||||
match value.unwrap_or("webp") {
|
||||
"jpeg" | "jpg" => Ok(Self::Jpeg),
|
||||
"png" => Ok(Self::Png),
|
||||
"webp" => Ok(Self::Webp),
|
||||
_ => Err(ApiError::bad_request("format must be webp|jpeg|png")),
|
||||
match value {
|
||||
None => Ok(Self::Original),
|
||||
Some("original") => Ok(Self::Original),
|
||||
Some("jpeg") | Some("jpg") => Ok(Self::Jpeg),
|
||||
Some("png") => Ok(Self::Png),
|
||||
Some("webp") => Ok(Self::Webp),
|
||||
_ => Err(ApiError::bad_request("format must be original|webp|jpeg|png")),
|
||||
}
|
||||
}
|
||||
|
||||
fn content_type(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Original => "application/octet-stream", // will be overridden by detected type
|
||||
Self::Jpeg => "image/jpeg",
|
||||
Self::Png => "image/png",
|
||||
Self::Webp => "image/webp",
|
||||
@@ -105,6 +110,7 @@ impl OutputFormat {
|
||||
|
||||
fn extension(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Original => "orig",
|
||||
Self::Jpeg => "jpg",
|
||||
Self::Png => "png",
|
||||
Self::Webp => "webp",
|
||||
@@ -112,6 +118,17 @@ impl OutputFormat {
|
||||
}
|
||||
}
|
||||
|
||||
/// Detect content type from raw image bytes.
|
||||
fn detect_content_type(data: &[u8]) -> &'static str {
|
||||
match image::guess_format(data) {
|
||||
Ok(ImageFormat::Jpeg) => "image/jpeg",
|
||||
Ok(ImageFormat::Png) => "image/png",
|
||||
Ok(ImageFormat::WebP) => "image/webp",
|
||||
Ok(ImageFormat::Avif) => "image/avif",
|
||||
_ => "application/octet-stream",
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a specific page image from a book with optional format conversion
|
||||
#[utoipa::path(
|
||||
get,
|
||||
@@ -132,36 +149,38 @@ impl OutputFormat {
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
#[instrument(skip(state), fields(book_id = %book_id, page = n))]
|
||||
#[instrument(skip(state, headers), fields(book_id = %book_id, page = n))]
|
||||
pub async fn get_page(
|
||||
State(state): State<AppState>,
|
||||
AxumPath((book_id, n)): AxumPath<(Uuid, u32)>,
|
||||
Query(query): Query<PageQuery>,
|
||||
headers: HeaderMap,
|
||||
) -> Result<Response, ApiError> {
|
||||
info!("Processing image request");
|
||||
|
||||
if n == 0 {
|
||||
warn!("Invalid page number: 0");
|
||||
return Err(ApiError::bad_request("page index starts at 1"));
|
||||
}
|
||||
|
||||
let (default_quality, max_width, filter_str, timeout_secs, cache_dir) = {
|
||||
let s = state.settings.read().await;
|
||||
(s.image_quality, s.image_max_width, s.image_filter.clone(), s.timeout_seconds, s.cache_directory.clone())
|
||||
};
|
||||
|
||||
let format = OutputFormat::parse(query.format.as_deref())?;
|
||||
let quality = query.quality.unwrap_or(80).clamp(1, 100);
|
||||
let quality = query.quality.unwrap_or(default_quality).clamp(1, 100);
|
||||
let width = query.width.unwrap_or(0);
|
||||
if width > 2160 {
|
||||
warn!("Invalid width: {}", width);
|
||||
return Err(ApiError::bad_request("width must be <= 2160"));
|
||||
if width > max_width {
|
||||
return Err(ApiError::bad_request(format!("width must be <= {}", max_width)));
|
||||
}
|
||||
let filter = parse_filter(&filter_str);
|
||||
let cache_dir_path = std::path::PathBuf::from(&cache_dir);
|
||||
|
||||
let memory_cache_key = format!("{book_id}:{n}:{}:{quality}:{width}", format.extension());
|
||||
|
||||
if let Some(cached) = state.page_cache.lock().await.get(&memory_cache_key).cloned() {
|
||||
state.metrics.page_cache_hits.fetch_add(1, Ordering::Relaxed);
|
||||
debug!("Memory cache hit for key: {}", memory_cache_key);
|
||||
return Ok(image_response(cached, format.content_type(), None));
|
||||
return Ok(image_response(cached, format, None, &headers));
|
||||
}
|
||||
state.metrics.page_cache_misses.fetch_add(1, Ordering::Relaxed);
|
||||
debug!("Memory cache miss for key: {}", memory_cache_key);
|
||||
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
@@ -183,7 +202,6 @@ pub async fn get_page(
|
||||
let row = match row {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
error!("Book file not found for book_id: {}", book_id);
|
||||
return Err(ApiError::not_found("book file not found"));
|
||||
}
|
||||
};
|
||||
@@ -192,18 +210,22 @@ pub async fn get_page(
|
||||
let abs_path = remap_libraries_path(&abs_path);
|
||||
let input_format: String = row.get("format");
|
||||
|
||||
info!("Processing book file: {} (format: {})", abs_path, input_format);
|
||||
|
||||
let disk_cache_key = get_cache_key(&abs_path, n, format.extension(), quality, width);
|
||||
let cache_path = get_cache_path(&disk_cache_key, &format);
|
||||
let cache_path = get_cache_path(&disk_cache_key, &format, &cache_dir_path);
|
||||
|
||||
// If-None-Match: return 304 if the client already has this version
|
||||
if let Some(if_none_match) = headers.get(header::IF_NONE_MATCH) {
|
||||
let expected_etag = format!("\"{}\"", disk_cache_key);
|
||||
if if_none_match.as_bytes() == expected_etag.as_bytes() {
|
||||
return Ok(StatusCode::NOT_MODIFIED.into_response());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(cached_bytes) = read_from_disk_cache(&cache_path) {
|
||||
info!("Disk cache hit for: {}", cache_path.display());
|
||||
let bytes = Arc::new(cached_bytes);
|
||||
state.page_cache.lock().await.put(memory_cache_key, bytes.clone());
|
||||
return Ok(image_response(bytes, format.content_type(), Some(&disk_cache_key)));
|
||||
return Ok(image_response(bytes, format, Some(&disk_cache_key), &headers));
|
||||
}
|
||||
debug!("Disk cache miss for: {}", cache_path.display());
|
||||
|
||||
let _permit = state
|
||||
.page_render_limit
|
||||
@@ -215,15 +237,14 @@ pub async fn get_page(
|
||||
ApiError::internal("render limiter unavailable")
|
||||
})?;
|
||||
|
||||
info!("Rendering page {} from {}", n, abs_path);
|
||||
let abs_path_clone = abs_path.clone();
|
||||
let format_clone = format;
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let bytes = tokio::time::timeout(
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(timeout_secs),
|
||||
tokio::task::spawn_blocking(move || {
|
||||
render_page(&abs_path_clone, &input_format, n, &format_clone, quality, width)
|
||||
render_page(&abs_path_clone, &input_format, n, &format_clone, quality, width, filter)
|
||||
}),
|
||||
)
|
||||
.await
|
||||
@@ -240,18 +261,37 @@ pub async fn get_page(
|
||||
|
||||
match bytes {
|
||||
Ok(data) => {
|
||||
info!("Successfully rendered page {} in {:?}", n, duration);
|
||||
info!("Rendered page {} in {:?}", n, duration);
|
||||
|
||||
if let Err(e) = write_to_disk_cache(&cache_path, &data) {
|
||||
warn!("Failed to write to disk cache: {}", e);
|
||||
} else {
|
||||
info!("Cached rendered image to: {}", cache_path.display());
|
||||
}
|
||||
|
||||
let bytes = Arc::new(data);
|
||||
state.page_cache.lock().await.put(memory_cache_key, bytes.clone());
|
||||
state.page_cache.lock().await.put(memory_cache_key.clone(), bytes.clone());
|
||||
|
||||
Ok(image_response(bytes, format.content_type(), Some(&disk_cache_key)))
|
||||
// Prefetch next 2 pages in background (fire-and-forget)
|
||||
for next_page in [n + 1, n + 2] {
|
||||
let state2 = state.clone();
|
||||
let abs_path2 = abs_path.clone();
|
||||
let cache_dir2 = cache_dir_path.clone();
|
||||
let format2 = format;
|
||||
tokio::spawn(async move {
|
||||
prefetch_page(state2, &PrefetchParams {
|
||||
book_id,
|
||||
abs_path: &abs_path2,
|
||||
page: next_page,
|
||||
format: format2,
|
||||
quality,
|
||||
width,
|
||||
filter,
|
||||
timeout_secs,
|
||||
cache_dir: &cache_dir2,
|
||||
}).await;
|
||||
});
|
||||
}
|
||||
|
||||
Ok(image_response(bytes, format, Some(&disk_cache_key), &headers))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to render page {} from {}: {:?}", n, abs_path, e);
|
||||
@@ -260,11 +300,83 @@ pub async fn get_page(
|
||||
}
|
||||
}
|
||||
|
||||
fn image_response(bytes: Arc<Vec<u8>>, content_type: &str, etag_suffix: Option<&str>) -> Response {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(header::CONTENT_TYPE, HeaderValue::from_str(content_type).unwrap_or(HeaderValue::from_static("application/octet-stream")));
|
||||
headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("public, max-age=31536000, immutable"));
|
||||
struct PrefetchParams<'a> {
|
||||
book_id: Uuid,
|
||||
abs_path: &'a str,
|
||||
page: u32,
|
||||
format: OutputFormat,
|
||||
quality: u8,
|
||||
width: u32,
|
||||
filter: image::imageops::FilterType,
|
||||
timeout_secs: u64,
|
||||
cache_dir: &'a Path,
|
||||
}
|
||||
|
||||
/// Prefetch a single page into disk+memory cache (best-effort, ignores errors).
|
||||
async fn prefetch_page(state: AppState, params: &PrefetchParams<'_>) {
|
||||
let book_id = params.book_id;
|
||||
let page = params.page;
|
||||
let format = params.format;
|
||||
let quality = params.quality;
|
||||
let width = params.width;
|
||||
let filter = params.filter;
|
||||
let timeout_secs = params.timeout_secs;
|
||||
let abs_path = params.abs_path;
|
||||
let cache_dir = params.cache_dir;
|
||||
|
||||
let mem_key = format!("{book_id}:{page}:{}:{quality}:{width}", format.extension());
|
||||
// Already in memory cache?
|
||||
if state.page_cache.lock().await.contains(&mem_key) {
|
||||
return;
|
||||
}
|
||||
// Already on disk?
|
||||
let disk_key = get_cache_key(abs_path, page, format.extension(), quality, width);
|
||||
let cache_path = get_cache_path(&disk_key, &format, cache_dir);
|
||||
if cache_path.exists() {
|
||||
return;
|
||||
}
|
||||
// Acquire render permit (don't block too long — if busy, skip)
|
||||
let permit = tokio::time::timeout(
|
||||
Duration::from_millis(100),
|
||||
state.page_render_limit.clone().acquire_owned(),
|
||||
)
|
||||
.await;
|
||||
let _permit = match permit {
|
||||
Ok(Ok(p)) => p,
|
||||
_ => return,
|
||||
};
|
||||
|
||||
// Fetch the book format from the path extension as a shortcut
|
||||
let input_format = match abs_path.rsplit('.').next().map(|e| e.to_ascii_lowercase()) {
|
||||
Some(ref e) if e == "cbz" => "cbz",
|
||||
Some(ref e) if e == "cbr" => "cbr",
|
||||
Some(ref e) if e == "pdf" => "pdf",
|
||||
_ => return,
|
||||
}
|
||||
.to_string();
|
||||
|
||||
let abs_clone = abs_path.to_string();
|
||||
let fmt = format;
|
||||
let result = tokio::time::timeout(
|
||||
Duration::from_secs(timeout_secs),
|
||||
tokio::task::spawn_blocking(move || {
|
||||
render_page(&abs_clone, &input_format, page, &fmt, quality, width, filter)
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Ok(Ok(Ok(data))) = result {
|
||||
let _ = write_to_disk_cache(&cache_path, &data);
|
||||
let bytes = Arc::new(data);
|
||||
state.page_cache.lock().await.put(mem_key, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
fn image_response(bytes: Arc<Vec<u8>>, format: OutputFormat, etag_suffix: Option<&str>, req_headers: &HeaderMap) -> Response {
|
||||
let content_type = match format {
|
||||
OutputFormat::Original => detect_content_type(&bytes),
|
||||
_ => format.content_type(),
|
||||
};
|
||||
let etag = if let Some(suffix) = etag_suffix {
|
||||
format!("\"{}\"", suffix)
|
||||
} else {
|
||||
@@ -273,19 +385,37 @@ fn image_response(bytes: Arc<Vec<u8>>, content_type: &str, etag_suffix: Option<&
|
||||
format!("\"{:x}\"", hasher.finalize())
|
||||
};
|
||||
|
||||
// Check If-None-Match for 304
|
||||
if let Some(if_none_match) = req_headers.get(header::IF_NONE_MATCH) {
|
||||
if if_none_match.as_bytes() == etag.as_bytes() {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("public, max-age=31536000, immutable"));
|
||||
if let Ok(v) = HeaderValue::from_str(&etag) {
|
||||
headers.insert(header::ETAG, v);
|
||||
}
|
||||
(StatusCode::OK, headers, Body::from((*bytes).clone())).into_response()
|
||||
return (StatusCode::NOT_MODIFIED, headers).into_response();
|
||||
}
|
||||
}
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(header::CONTENT_TYPE, HeaderValue::from_str(content_type).unwrap_or(HeaderValue::from_static("application/octet-stream")));
|
||||
headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("public, max-age=31536000, immutable"));
|
||||
if let Ok(v) = HeaderValue::from_str(&etag) {
|
||||
headers.insert(header::ETAG, v);
|
||||
}
|
||||
// Use Bytes to avoid cloning the Vec — shares the Arc's allocation via zero-copy
|
||||
let body_bytes = axum::body::Bytes::from(Arc::unwrap_or_clone(bytes));
|
||||
(StatusCode::OK, headers, Body::from(body_bytes)).into_response()
|
||||
}
|
||||
|
||||
/// Render page 1 of a book (for thumbnail fallback or thumbnail checkup). Uses thumbnail dimensions by default.
|
||||
/// Render page 1 as a thumbnail fallback. Returns (bytes, content_type).
|
||||
pub async fn render_book_page_1(
|
||||
state: &AppState,
|
||||
book_id: Uuid,
|
||||
width: u32,
|
||||
quality: u8,
|
||||
) -> Result<Vec<u8>, ApiError> {
|
||||
) -> Result<(Vec<u8>, &'static str), ApiError> {
|
||||
let row = sqlx::query(
|
||||
r#"SELECT abs_path, format FROM book_files WHERE book_id = $1 ORDER BY updated_at DESC LIMIT 1"#,
|
||||
)
|
||||
@@ -306,17 +436,24 @@ pub async fn render_book_page_1(
|
||||
.await
|
||||
.map_err(|_| ApiError::internal("render limiter unavailable"))?;
|
||||
|
||||
let (timeout_secs, filter_str) = {
|
||||
let s = state.settings.read().await;
|
||||
(s.timeout_seconds, s.image_filter.clone())
|
||||
};
|
||||
let filter = parse_filter(&filter_str);
|
||||
|
||||
let abs_path_clone = abs_path.clone();
|
||||
let bytes = tokio::time::timeout(
|
||||
Duration::from_secs(60),
|
||||
Duration::from_secs(timeout_secs),
|
||||
tokio::task::spawn_blocking(move || {
|
||||
render_page(
|
||||
&abs_path_clone,
|
||||
&input_format,
|
||||
1,
|
||||
&OutputFormat::Webp,
|
||||
&OutputFormat::Original,
|
||||
quality,
|
||||
width,
|
||||
filter,
|
||||
)
|
||||
}),
|
||||
)
|
||||
@@ -324,7 +461,9 @@ pub async fn render_book_page_1(
|
||||
.map_err(|_| ApiError::internal("page rendering timeout"))?
|
||||
.map_err(|e| ApiError::internal(format!("render task failed: {e}")))?;
|
||||
|
||||
bytes
|
||||
let bytes = bytes?;
|
||||
let content_type = detect_content_type(&bytes);
|
||||
Ok((bytes, content_type))
|
||||
}
|
||||
|
||||
fn render_page(
|
||||
@@ -334,200 +473,114 @@ fn render_page(
|
||||
out_format: &OutputFormat,
|
||||
quality: u8,
|
||||
width: u32,
|
||||
filter: image::imageops::FilterType,
|
||||
) -> Result<Vec<u8>, ApiError> {
|
||||
let page_bytes = match input_format {
|
||||
"cbz" => extract_cbz_page(abs_path, page_number)?,
|
||||
"cbr" => extract_cbr_page(abs_path, page_number)?,
|
||||
"pdf" => render_pdf_page(abs_path, page_number, width)?,
|
||||
let format = match input_format {
|
||||
"cbz" => parsers::BookFormat::Cbz,
|
||||
"cbr" => parsers::BookFormat::Cbr,
|
||||
"pdf" => parsers::BookFormat::Pdf,
|
||||
_ => return Err(ApiError::bad_request("unsupported source format")),
|
||||
};
|
||||
|
||||
transcode_image(&page_bytes, out_format, quality, width)
|
||||
}
|
||||
|
||||
fn extract_cbz_page(abs_path: &str, page_number: u32) -> Result<Vec<u8>, ApiError> {
|
||||
debug!("Opening CBZ archive: {}", abs_path);
|
||||
let file = std::fs::File::open(abs_path).map_err(|e| {
|
||||
error!("Cannot open CBZ file {}: {}", abs_path, e);
|
||||
ApiError::internal(format!("cannot open cbz: {e}"))
|
||||
})?;
|
||||
|
||||
let mut archive = zip::ZipArchive::new(file).map_err(|e| {
|
||||
error!("Invalid CBZ archive {}: {}", abs_path, e);
|
||||
ApiError::internal(format!("invalid cbz: {e}"))
|
||||
})?;
|
||||
|
||||
let mut image_names: Vec<String> = Vec::new();
|
||||
for i in 0..archive.len() {
|
||||
let entry = archive.by_index(i).map_err(|e| {
|
||||
error!("Failed to read CBZ entry {} in {}: {}", i, abs_path, e);
|
||||
ApiError::internal(format!("cbz entry read failed: {e}"))
|
||||
})?;
|
||||
let name = entry.name().to_ascii_lowercase();
|
||||
if is_image_name(&name) {
|
||||
image_names.push(entry.name().to_string());
|
||||
}
|
||||
}
|
||||
image_names.sort();
|
||||
debug!("Found {} images in CBZ {}", image_names.len(), abs_path);
|
||||
|
||||
let index = page_number as usize - 1;
|
||||
let selected = image_names.get(index).ok_or_else(|| {
|
||||
error!("Page {} out of range in {} (total: {})", page_number, abs_path, image_names.len());
|
||||
ApiError::not_found("page out of range")
|
||||
})?;
|
||||
|
||||
debug!("Extracting page {} ({}) from {}", page_number, selected, abs_path);
|
||||
let mut entry = archive.by_name(selected).map_err(|e| {
|
||||
error!("Failed to read CBZ page {} from {}: {}", selected, abs_path, e);
|
||||
ApiError::internal(format!("cbz page read failed: {e}"))
|
||||
})?;
|
||||
let mut buf = Vec::new();
|
||||
entry.read_to_end(&mut buf).map_err(|e| {
|
||||
error!("Failed to load CBZ page {} from {}: {}", selected, abs_path, e);
|
||||
ApiError::internal(format!("cbz page load failed: {e}"))
|
||||
})?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn extract_cbr_page(abs_path: &str, page_number: u32) -> Result<Vec<u8>, ApiError> {
|
||||
info!("Opening CBR archive: {}", abs_path);
|
||||
|
||||
let index = page_number as usize - 1;
|
||||
let tmp_dir = std::env::temp_dir().join(format!("stripstream-cbr-{}", Uuid::new_v4()));
|
||||
debug!("Creating temp dir for CBR extraction: {}", tmp_dir.display());
|
||||
|
||||
std::fs::create_dir_all(&tmp_dir).map_err(|e| {
|
||||
error!("Cannot create temp dir: {}", e);
|
||||
ApiError::internal(format!("temp dir error: {}", e))
|
||||
})?;
|
||||
|
||||
// Extract directly - skip listing which fails on UTF-16 encoded filenames
|
||||
let extract_output = std::process::Command::new("env")
|
||||
.args(["LC_ALL=en_US.UTF-8", "LANG=en_US.UTF-8", "unar", "-o"])
|
||||
.arg(&tmp_dir)
|
||||
.arg(abs_path)
|
||||
.output()
|
||||
let pdf_render_width = if width > 0 { width } else { 1200 };
|
||||
let page_bytes = parsers::extract_page(
|
||||
std::path::Path::new(abs_path),
|
||||
format,
|
||||
page_number,
|
||||
pdf_render_width,
|
||||
)
|
||||
.map_err(|e| {
|
||||
let _ = std::fs::remove_dir_all(&tmp_dir);
|
||||
error!("unar extract failed: {}", e);
|
||||
ApiError::internal(format!("unar extract failed: {e}"))
|
||||
error!("Failed to extract page {} from {}: {}", page_number, abs_path, e);
|
||||
ApiError::internal(format!("page extraction failed: {e}"))
|
||||
})?;
|
||||
|
||||
if !extract_output.status.success() {
|
||||
let _ = std::fs::remove_dir_all(&tmp_dir);
|
||||
let stderr = String::from_utf8_lossy(&extract_output.stderr);
|
||||
error!("unar extract failed {}: {}", abs_path, stderr);
|
||||
return Err(ApiError::internal("unar extract failed"));
|
||||
// Original mode or source matches output with no resize → return raw bytes (zero transcoding)
|
||||
if matches!(out_format, OutputFormat::Original) && width == 0 {
|
||||
return Ok(page_bytes);
|
||||
}
|
||||
if width == 0 {
|
||||
if let Ok(source_fmt) = image::guess_format(&page_bytes) {
|
||||
if format_matches(&source_fmt, out_format) {
|
||||
return Ok(page_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find and read the requested image (recursive search for CBR files with subdirectories)
|
||||
let mut image_files: Vec<_> = WalkDir::new(&tmp_dir)
|
||||
.into_iter()
|
||||
.filter_map(|e| e.ok())
|
||||
.filter(|e| {
|
||||
let name = e.file_name().to_string_lossy().to_lowercase();
|
||||
is_image_name(&name)
|
||||
})
|
||||
.collect();
|
||||
|
||||
image_files.sort_by_key(|e| e.path().to_string_lossy().to_lowercase());
|
||||
|
||||
let selected = image_files.get(index).ok_or_else(|| {
|
||||
let _ = std::fs::remove_dir_all(&tmp_dir);
|
||||
error!("Page {} not found (total: {})", page_number, image_files.len());
|
||||
ApiError::not_found("page out of range")
|
||||
})?;
|
||||
|
||||
let data = std::fs::read(selected.path()).map_err(|e| {
|
||||
let _ = std::fs::remove_dir_all(&tmp_dir);
|
||||
error!("read failed: {}", e);
|
||||
ApiError::internal(format!("read error: {}", e))
|
||||
})?;
|
||||
|
||||
let _ = std::fs::remove_dir_all(&tmp_dir);
|
||||
|
||||
info!("Successfully extracted CBR page {} ({} bytes)", page_number, data.len());
|
||||
Ok(data)
|
||||
transcode_image(&page_bytes, out_format, quality, width, filter)
|
||||
}
|
||||
|
||||
fn render_pdf_page(abs_path: &str, page_number: u32, width: u32) -> Result<Vec<u8>, ApiError> {
|
||||
let tmp_dir = std::env::temp_dir().join(format!("stripstream-pdf-{}", Uuid::new_v4()));
|
||||
debug!("Creating temp dir for PDF rendering: {}", tmp_dir.display());
|
||||
std::fs::create_dir_all(&tmp_dir).map_err(|e| {
|
||||
error!("Cannot create temp dir {}: {}", tmp_dir.display(), e);
|
||||
ApiError::internal(format!("cannot create temp dir: {e}"))
|
||||
})?;
|
||||
let output_prefix = tmp_dir.join("page");
|
||||
|
||||
let mut cmd = std::process::Command::new("pdftoppm");
|
||||
cmd.arg("-f")
|
||||
.arg(page_number.to_string())
|
||||
.arg("-singlefile")
|
||||
.arg("-png");
|
||||
if width > 0 {
|
||||
cmd.arg("-scale-to-x").arg(width.to_string()).arg("-scale-to-y").arg("-1");
|
||||
/// Fast JPEG decode with DCT scaling: decodes directly at reduced resolution.
|
||||
fn fast_jpeg_decode(input: &[u8], target_w: u32, target_h: u32) -> Option<image::DynamicImage> {
|
||||
if image::guess_format(input).ok()? != ImageFormat::Jpeg {
|
||||
return None;
|
||||
}
|
||||
let mut decoder = jpeg_decoder::Decoder::new(std::io::Cursor::new(input));
|
||||
decoder.read_info().ok()?;
|
||||
decoder.scale(target_w as u16, target_h as u16).ok()?;
|
||||
let pixels = decoder.decode().ok()?;
|
||||
let info = decoder.info()?;
|
||||
let w = info.width as u32;
|
||||
let h = info.height as u32;
|
||||
match info.pixel_format {
|
||||
jpeg_decoder::PixelFormat::RGB24 => {
|
||||
let buf = image::RgbImage::from_raw(w, h, pixels)?;
|
||||
Some(image::DynamicImage::ImageRgb8(buf))
|
||||
}
|
||||
jpeg_decoder::PixelFormat::L8 => {
|
||||
let buf = image::GrayImage::from_raw(w, h, pixels)?;
|
||||
Some(image::DynamicImage::ImageLuma8(buf))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
cmd.arg(abs_path).arg(&output_prefix);
|
||||
|
||||
debug!("Running pdftoppm for page {} of {} (width: {})", page_number, abs_path, width);
|
||||
let output = cmd
|
||||
.output()
|
||||
.map_err(|e| {
|
||||
error!("pdftoppm command failed for {} page {}: {}", abs_path, page_number, e);
|
||||
ApiError::internal(format!("pdf render failed: {e}"))
|
||||
})?;
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
let _ = std::fs::remove_dir_all(&tmp_dir);
|
||||
error!("pdftoppm failed for {} page {}: {}", abs_path, page_number, stderr);
|
||||
return Err(ApiError::internal("pdf render command failed"));
|
||||
}
|
||||
|
||||
let image_path = output_prefix.with_extension("png");
|
||||
debug!("Reading rendered PDF page from: {}", image_path.display());
|
||||
let bytes = std::fs::read(&image_path).map_err(|e| {
|
||||
error!("Failed to read rendered PDF output {}: {}", image_path.display(), e);
|
||||
ApiError::internal(format!("render output missing: {e}"))
|
||||
})?;
|
||||
let _ = std::fs::remove_dir_all(&tmp_dir);
|
||||
debug!("Successfully rendered PDF page {} to {} bytes", page_number, bytes.len());
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
fn transcode_image(input: &[u8], out_format: &OutputFormat, quality: u8, width: u32) -> Result<Vec<u8>, ApiError> {
|
||||
debug!("Transcoding image: {} bytes, format: {:?}, quality: {}, width: {}", input.len(), out_format, quality, width);
|
||||
fn transcode_image(input: &[u8], out_format: &OutputFormat, quality: u8, width: u32, filter: image::imageops::FilterType) -> Result<Vec<u8>, ApiError> {
|
||||
let source_format = image::guess_format(input).ok();
|
||||
debug!("Source format detected: {:?}", source_format);
|
||||
let needs_transcode = source_format.map(|f| !format_matches(&f, out_format)).unwrap_or(true);
|
||||
|
||||
// Resolve "Original" to the actual source format for encoding
|
||||
let effective_format = match out_format {
|
||||
OutputFormat::Original => match source_format {
|
||||
Some(ImageFormat::Png) => OutputFormat::Png,
|
||||
Some(ImageFormat::WebP) => OutputFormat::Webp,
|
||||
_ => OutputFormat::Jpeg, // default to JPEG for original resize
|
||||
},
|
||||
other => *other,
|
||||
};
|
||||
|
||||
let needs_transcode = source_format.map(|f| !format_matches(&f, &effective_format)).unwrap_or(true);
|
||||
|
||||
if width == 0 && !needs_transcode {
|
||||
debug!("No transcoding needed, returning original");
|
||||
return Ok(input.to_vec());
|
||||
}
|
||||
|
||||
debug!("Loading image from memory...");
|
||||
let mut image = image::load_from_memory(input).map_err(|e| {
|
||||
error!("Failed to load image from memory: {} (input size: {} bytes)", e, input.len());
|
||||
// For JPEG with resize: use DCT scaling to decode at ~target size (much faster)
|
||||
let mut image = if width > 0 {
|
||||
fast_jpeg_decode(input, width, u32::MAX)
|
||||
.unwrap_or_else(|| {
|
||||
image::load_from_memory(input).unwrap_or_default()
|
||||
})
|
||||
} else {
|
||||
image::load_from_memory(input).map_err(|e| {
|
||||
ApiError::internal(format!("invalid source image: {e}"))
|
||||
})?;
|
||||
})?
|
||||
};
|
||||
|
||||
if width > 0 {
|
||||
debug!("Resizing image to width: {}", width);
|
||||
image = image.resize(width, u32::MAX, image::imageops::FilterType::Lanczos3);
|
||||
image = image.resize(width, u32::MAX, filter);
|
||||
}
|
||||
|
||||
debug!("Converting to RGBA...");
|
||||
let rgba = image.to_rgba8();
|
||||
let (w, h) = rgba.dimensions();
|
||||
debug!("Image dimensions: {}x{}", w, h);
|
||||
|
||||
let mut out = Vec::new();
|
||||
match out_format {
|
||||
OutputFormat::Jpeg => {
|
||||
match effective_format {
|
||||
OutputFormat::Jpeg | OutputFormat::Original => {
|
||||
// JPEG doesn't support alpha — convert RGBA to RGB
|
||||
let rgb = image::DynamicImage::ImageRgba8(rgba.clone()).to_rgb8();
|
||||
let mut encoder = JpegEncoder::new_with_quality(&mut out, quality);
|
||||
encoder
|
||||
.encode(&rgba, w, h, ColorType::Rgba8.into())
|
||||
.encode(&rgb, w, h, ColorType::Rgb8.into())
|
||||
.map_err(|e| ApiError::internal(format!("jpeg encode failed: {e}")))?;
|
||||
}
|
||||
OutputFormat::Png => {
|
||||
@@ -542,7 +595,7 @@ fn transcode_image(input: &[u8], out_format: &OutputFormat, quality: u8, width:
|
||||
.flat_map(|p| [p[0], p[1], p[2]])
|
||||
.collect();
|
||||
let webp_data = webp::Encoder::new(&rgb_data, webp::PixelLayout::Rgb, w, h)
|
||||
.encode(f32::max(quality as f32, 85.0));
|
||||
.encode(quality as f32);
|
||||
out.extend_from_slice(&webp_data);
|
||||
}
|
||||
}
|
||||
@@ -550,28 +603,11 @@ fn transcode_image(input: &[u8], out_format: &OutputFormat, quality: u8, width:
|
||||
}
|
||||
|
||||
fn format_matches(source: &ImageFormat, target: &OutputFormat) -> bool {
|
||||
match (source, target) {
|
||||
(ImageFormat::Jpeg, OutputFormat::Jpeg) => true,
|
||||
(ImageFormat::Png, OutputFormat::Png) => true,
|
||||
(ImageFormat::WebP, OutputFormat::Webp) => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(
|
||||
(source, target),
|
||||
(ImageFormat::Jpeg, OutputFormat::Jpeg)
|
||||
| (ImageFormat::Png, OutputFormat::Png)
|
||||
| (ImageFormat::WebP, OutputFormat::Webp)
|
||||
)
|
||||
}
|
||||
|
||||
fn is_image_name(name: &str) -> bool {
|
||||
let lower = name.to_lowercase();
|
||||
lower.ends_with(".jpg")
|
||||
|| lower.ends_with(".jpeg")
|
||||
|| lower.ends_with(".png")
|
||||
|| lower.ends_with(".webp")
|
||||
|| lower.ends_with(".avif")
|
||||
|| lower.ends_with(".gif")
|
||||
|| lower.ends_with(".tif")
|
||||
|| lower.ends_with(".tiff")
|
||||
|| lower.ends_with(".bmp")
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn _is_absolute_path(value: &str) -> bool {
|
||||
Path::new(value).is_absolute()
|
||||
}
|
||||
|
||||
247
apps/api/src/reading_progress.rs
Normal file
247
apps/api/src/reading_progress.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
use axum::{extract::{Path, State}, Json};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct ReadingProgressResponse {
|
||||
/// Reading status: "unread", "reading", or "read"
|
||||
pub status: String,
|
||||
/// Current page (only set when status is "reading")
|
||||
pub current_page: Option<i32>,
|
||||
#[schema(value_type = Option<String>)]
|
||||
pub last_read_at: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct UpdateReadingProgressRequest {
|
||||
/// Reading status: "unread", "reading", or "read"
|
||||
pub status: String,
|
||||
/// Required when status is "reading", must be > 0
|
||||
pub current_page: Option<i32>,
|
||||
}
|
||||
|
||||
/// Get reading progress for a book
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/books/{id}/progress",
|
||||
tag = "reading-progress",
|
||||
params(
|
||||
("id" = String, Path, description = "Book UUID"),
|
||||
),
|
||||
responses(
|
||||
(status = 200, body = ReadingProgressResponse),
|
||||
(status = 404, description = "Book not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_reading_progress(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<Json<ReadingProgressResponse>, ApiError> {
|
||||
// Verify book exists
|
||||
let exists: bool = sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM books WHERE id = $1)")
|
||||
.bind(id)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
if !exists {
|
||||
return Err(ApiError::not_found("book not found"));
|
||||
}
|
||||
|
||||
let row = sqlx::query(
|
||||
"SELECT status, current_page, last_read_at FROM book_reading_progress WHERE book_id = $1",
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_optional(&state.pool)
|
||||
.await?;
|
||||
|
||||
let response = match row {
|
||||
Some(r) => ReadingProgressResponse {
|
||||
status: r.get("status"),
|
||||
current_page: r.get("current_page"),
|
||||
last_read_at: r.get("last_read_at"),
|
||||
},
|
||||
None => ReadingProgressResponse {
|
||||
status: "unread".to_string(),
|
||||
current_page: None,
|
||||
last_read_at: None,
|
||||
},
|
||||
};
|
||||
|
||||
Ok(Json(response))
|
||||
}
|
||||
|
||||
/// Update reading progress for a book
|
||||
#[utoipa::path(
|
||||
patch,
|
||||
path = "/books/{id}/progress",
|
||||
tag = "reading-progress",
|
||||
params(
|
||||
("id" = String, Path, description = "Book UUID"),
|
||||
),
|
||||
request_body = UpdateReadingProgressRequest,
|
||||
responses(
|
||||
(status = 200, body = ReadingProgressResponse),
|
||||
(status = 404, description = "Book not found"),
|
||||
(status = 422, description = "Validation error (missing or invalid current_page for status 'reading')"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn update_reading_progress(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<Uuid>,
|
||||
Json(body): Json<UpdateReadingProgressRequest>,
|
||||
) -> Result<Json<ReadingProgressResponse>, ApiError> {
|
||||
// Validate status value
|
||||
if !["unread", "reading", "read"].contains(&body.status.as_str()) {
|
||||
return Err(ApiError::bad_request(format!(
|
||||
"invalid status '{}': must be one of unread, reading, read",
|
||||
body.status
|
||||
)));
|
||||
}
|
||||
|
||||
// Validate current_page for "reading" status
|
||||
if body.status == "reading" {
|
||||
match body.current_page {
|
||||
None => {
|
||||
return Err(ApiError::unprocessable_entity(
|
||||
"current_page is required when status is 'reading'",
|
||||
))
|
||||
}
|
||||
Some(p) if p <= 0 => {
|
||||
return Err(ApiError::unprocessable_entity(
|
||||
"current_page must be greater than 0",
|
||||
))
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify book exists
|
||||
let exists: bool = sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM books WHERE id = $1)")
|
||||
.bind(id)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
if !exists {
|
||||
return Err(ApiError::not_found("book not found"));
|
||||
}
|
||||
|
||||
// current_page is only stored for "reading" status
|
||||
let current_page = if body.status == "reading" {
|
||||
body.current_page
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO book_reading_progress (book_id, status, current_page, last_read_at, updated_at)
|
||||
VALUES ($1, $2, $3, NOW(), NOW())
|
||||
ON CONFLICT (book_id) DO UPDATE
|
||||
SET status = EXCLUDED.status,
|
||||
current_page = EXCLUDED.current_page,
|
||||
last_read_at = NOW(),
|
||||
updated_at = NOW()
|
||||
RETURNING status, current_page, last_read_at
|
||||
"#,
|
||||
)
|
||||
.bind(id)
|
||||
.bind(&body.status)
|
||||
.bind(current_page)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
Ok(Json(ReadingProgressResponse {
|
||||
status: row.get("status"),
|
||||
current_page: row.get("current_page"),
|
||||
last_read_at: row.get("last_read_at"),
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct MarkSeriesReadRequest {
|
||||
/// Series name (use "unclassified" for books without series)
|
||||
pub series: String,
|
||||
/// Status to set: "read" or "unread"
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct MarkSeriesReadResponse {
|
||||
pub updated: i64,
|
||||
}
|
||||
|
||||
/// Mark all books in a series as read or unread
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/series/mark-read",
|
||||
tag = "reading-progress",
|
||||
request_body = MarkSeriesReadRequest,
|
||||
responses(
|
||||
(status = 200, body = MarkSeriesReadResponse),
|
||||
(status = 422, description = "Invalid status"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn mark_series_read(
|
||||
State(state): State<AppState>,
|
||||
Json(body): Json<MarkSeriesReadRequest>,
|
||||
) -> Result<Json<MarkSeriesReadResponse>, ApiError> {
|
||||
if !["read", "unread"].contains(&body.status.as_str()) {
|
||||
return Err(ApiError::bad_request(
|
||||
"status must be 'read' or 'unread'",
|
||||
));
|
||||
}
|
||||
|
||||
let series_filter = if body.series == "unclassified" {
|
||||
"(series IS NULL OR series = '')"
|
||||
} else {
|
||||
"series = $1"
|
||||
};
|
||||
|
||||
let sql = if body.status == "unread" {
|
||||
// Delete progress records to reset to unread
|
||||
format!(
|
||||
r#"
|
||||
WITH target_books AS (
|
||||
SELECT id FROM books WHERE {series_filter}
|
||||
)
|
||||
DELETE FROM book_reading_progress
|
||||
WHERE book_id IN (SELECT id FROM target_books)
|
||||
"#
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
r#"
|
||||
INSERT INTO book_reading_progress (book_id, status, current_page, last_read_at, updated_at)
|
||||
SELECT id, 'read', NULL, NOW(), NOW()
|
||||
FROM books
|
||||
WHERE {series_filter}
|
||||
ON CONFLICT (book_id) DO UPDATE
|
||||
SET status = 'read',
|
||||
current_page = NULL,
|
||||
last_read_at = NOW(),
|
||||
updated_at = NOW()
|
||||
"#
|
||||
)
|
||||
};
|
||||
|
||||
let result = if body.series == "unclassified" {
|
||||
sqlx::query(&sql).execute(&state.pool).await?
|
||||
} else {
|
||||
sqlx::query(&sql).bind(&body.series).execute(&state.pool).await?
|
||||
};
|
||||
|
||||
Ok(Json(MarkSeriesReadResponse {
|
||||
updated: result.rows_affected() as i64,
|
||||
}))
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
use axum::{extract::{Query, State}, Json};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::Row;
|
||||
use utoipa::ToSchema;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{error::ApiError, AppState};
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct SearchQuery {
|
||||
@@ -18,24 +20,36 @@ pub struct SearchQuery {
|
||||
pub limit: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct SeriesHit {
|
||||
#[schema(value_type = String)]
|
||||
pub library_id: Uuid,
|
||||
pub name: String,
|
||||
pub book_count: i64,
|
||||
pub books_read_count: i64,
|
||||
#[schema(value_type = String)]
|
||||
pub first_book_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct SearchResponse {
|
||||
pub hits: serde_json::Value,
|
||||
pub series_hits: Vec<SeriesHit>,
|
||||
pub estimated_total_hits: Option<u64>,
|
||||
pub processing_time_ms: Option<u64>,
|
||||
}
|
||||
|
||||
/// Search books across all libraries using Meilisearch
|
||||
/// Search books across all libraries
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/search",
|
||||
tag = "books",
|
||||
params(
|
||||
("q" = String, Query, description = "Search query"),
|
||||
("q" = String, Query, description = "Search query (books + series via PostgreSQL full-text)"),
|
||||
("library_id" = Option<String>, Query, description = "Filter by library ID"),
|
||||
("type" = Option<String>, Query, description = "Filter by type (cbz, cbr, pdf)"),
|
||||
("kind" = Option<String>, Query, description = "Filter by kind (alias for type)"),
|
||||
("limit" = Option<usize>, Query, description = "Max results (max 100)"),
|
||||
("limit" = Option<usize>, Query, description = "Max results per type (max 100)"),
|
||||
),
|
||||
responses(
|
||||
(status = 200, body = SearchResponse),
|
||||
@@ -51,51 +65,127 @@ pub async fn search_books(
|
||||
return Err(ApiError::bad_request("q is required"));
|
||||
}
|
||||
|
||||
let mut filters: Vec<String> = Vec::new();
|
||||
if let Some(library_id) = query.library_id.as_deref() {
|
||||
filters.push(format!("library_id = '{}'", library_id.replace('"', "")));
|
||||
}
|
||||
let kind_filter = query.r#type.as_deref().or(query.kind.as_deref());
|
||||
if let Some(kind) = kind_filter {
|
||||
filters.push(format!("kind = '{}'", kind.replace('"', "")));
|
||||
}
|
||||
let limit_val = query.limit.unwrap_or(20).clamp(1, 100) as i64;
|
||||
let q_pattern = format!("%{}%", query.q);
|
||||
let library_id_uuid: Option<Uuid> = query.library_id.as_deref()
|
||||
.and_then(|s| s.parse().ok());
|
||||
let kind_filter: Option<&str> = query.r#type.as_deref().or(query.kind.as_deref());
|
||||
|
||||
let body = serde_json::json!({
|
||||
"q": query.q,
|
||||
"limit": query.limit.unwrap_or(20).clamp(1, 100),
|
||||
"filter": if filters.is_empty() { serde_json::Value::Null } else { serde_json::Value::String(filters.join(" AND ")) }
|
||||
});
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
let client = reqwest::Client::new();
|
||||
let url = format!("{}/indexes/books/search", state.meili_url.trim_end_matches('/'));
|
||||
let response = client
|
||||
.post(url)
|
||||
.header("Authorization", format!("Bearer {}", state.meili_master_key))
|
||||
.json(&body)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| ApiError::internal(format!("meili request failed: {e}")))?;
|
||||
// Book search via PostgreSQL ILIKE on title, authors, series
|
||||
let books_sql = r#"
|
||||
SELECT b.id, b.library_id, b.kind, b.title,
|
||||
COALESCE(b.authors, CASE WHEN b.author IS NOT NULL AND b.author != '' THEN ARRAY[b.author] ELSE ARRAY[]::text[] END) as authors,
|
||||
b.series, b.volume, b.language
|
||||
FROM books b
|
||||
LEFT JOIN series_metadata sm
|
||||
ON sm.library_id = b.library_id
|
||||
AND sm.name = COALESCE(NULLIF(b.series, ''), 'unclassified')
|
||||
WHERE (
|
||||
b.title ILIKE $1
|
||||
OR b.series ILIKE $1
|
||||
OR EXISTS (SELECT 1 FROM unnest(
|
||||
COALESCE(b.authors, CASE WHEN b.author IS NOT NULL AND b.author != '' THEN ARRAY[b.author] ELSE ARRAY[]::text[] END)
|
||||
|| COALESCE(sm.authors, ARRAY[]::text[])
|
||||
) AS a WHERE a ILIKE $1)
|
||||
)
|
||||
AND ($2::uuid IS NULL OR b.library_id = $2)
|
||||
AND ($3::text IS NULL OR b.kind = $3)
|
||||
ORDER BY
|
||||
CASE WHEN b.title ILIKE $1 THEN 0 ELSE 1 END,
|
||||
b.title ASC
|
||||
LIMIT $4
|
||||
"#;
|
||||
|
||||
if !response.status().is_success() {
|
||||
let body = response.text().await.unwrap_or_else(|_| "unknown meili error".to_string());
|
||||
if body.contains("index_not_found") {
|
||||
return Ok(Json(SearchResponse {
|
||||
hits: serde_json::json!([]),
|
||||
estimated_total_hits: Some(0),
|
||||
processing_time_ms: Some(0),
|
||||
}));
|
||||
}
|
||||
return Err(ApiError::internal(format!("meili error: {body}")));
|
||||
}
|
||||
let series_sql = r#"
|
||||
WITH sorted_books AS (
|
||||
SELECT
|
||||
library_id,
|
||||
COALESCE(NULLIF(series, ''), 'unclassified') as name,
|
||||
id,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY library_id, COALESCE(NULLIF(series, ''), 'unclassified')
|
||||
ORDER BY
|
||||
REGEXP_REPLACE(LOWER(title), '[0-9]+', '', 'g'),
|
||||
COALESCE((REGEXP_MATCH(LOWER(title), '\d+'))[1]::int, 0),
|
||||
title ASC
|
||||
) as rn
|
||||
FROM books
|
||||
WHERE ($2::uuid IS NULL OR library_id = $2)
|
||||
),
|
||||
series_counts AS (
|
||||
SELECT
|
||||
sb.library_id,
|
||||
sb.name,
|
||||
COUNT(*) as book_count,
|
||||
COUNT(brp.book_id) FILTER (WHERE brp.status = 'read') as books_read_count
|
||||
FROM sorted_books sb
|
||||
LEFT JOIN book_reading_progress brp ON brp.book_id = sb.id
|
||||
GROUP BY sb.library_id, sb.name
|
||||
)
|
||||
SELECT sc.library_id, sc.name, sc.book_count, sc.books_read_count, sb.id as first_book_id
|
||||
FROM series_counts sc
|
||||
JOIN sorted_books sb ON sb.library_id = sc.library_id AND sb.name = sc.name AND sb.rn = 1
|
||||
WHERE sc.name ILIKE $1
|
||||
ORDER BY sc.name ASC
|
||||
LIMIT $4
|
||||
"#;
|
||||
|
||||
let payload: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| ApiError::internal(format!("invalid meili response: {e}")))?;
|
||||
let (books_rows, series_rows) = tokio::join!(
|
||||
sqlx::query(books_sql)
|
||||
.bind(&q_pattern)
|
||||
.bind(library_id_uuid)
|
||||
.bind(kind_filter)
|
||||
.bind(limit_val)
|
||||
.fetch_all(&state.pool),
|
||||
sqlx::query(series_sql)
|
||||
.bind(&q_pattern)
|
||||
.bind(library_id_uuid)
|
||||
.bind(kind_filter) // unused in series query but keeps bind positions consistent
|
||||
.bind(limit_val)
|
||||
.fetch_all(&state.pool)
|
||||
);
|
||||
|
||||
let elapsed_ms = start.elapsed().as_millis() as u64;
|
||||
|
||||
// Build book hits as JSON array (same shape as before)
|
||||
let books_rows = books_rows.map_err(|e| ApiError::internal(format!("book search failed: {e}")))?;
|
||||
let hits: Vec<serde_json::Value> = books_rows
|
||||
.iter()
|
||||
.map(|row| {
|
||||
serde_json::json!({
|
||||
"id": row.get::<Uuid, _>("id").to_string(),
|
||||
"library_id": row.get::<Uuid, _>("library_id").to_string(),
|
||||
"kind": row.get::<String, _>("kind"),
|
||||
"title": row.get::<String, _>("title"),
|
||||
"authors": row.get::<Vec<String>, _>("authors"),
|
||||
"series": row.get::<Option<String>, _>("series"),
|
||||
"volume": row.get::<Option<i32>, _>("volume"),
|
||||
"language": row.get::<Option<String>, _>("language"),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let estimated_total_hits = hits.len() as u64;
|
||||
|
||||
// Series hits
|
||||
let series_hits: Vec<SeriesHit> = series_rows
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.map(|row| SeriesHit {
|
||||
library_id: row.get("library_id"),
|
||||
name: row.get("name"),
|
||||
book_count: row.get("book_count"),
|
||||
books_read_count: row.get("books_read_count"),
|
||||
first_book_id: row.get("first_book_id"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Json(SearchResponse {
|
||||
hits: payload.get("hits").cloned().unwrap_or_else(|| serde_json::json!([])),
|
||||
estimated_total_hits: payload.get("estimatedTotalHits").and_then(|v| v.as_u64()),
|
||||
processing_time_ms: payload.get("processingTimeMs").and_then(|v| v.as_u64()),
|
||||
hits: serde_json::Value::Array(hits),
|
||||
series_hits,
|
||||
estimated_total_hits: Some(estimated_total_hits),
|
||||
processing_time_ms: Some(elapsed_ms),
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -1,33 +1,35 @@
|
||||
use axum::{
|
||||
extract::State,
|
||||
routing::{get, post},
|
||||
extract::{Path as AxumPath, State},
|
||||
routing::{delete, get, post},
|
||||
Json, Router,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{error::ApiError, AppState};
|
||||
use crate::{error::ApiError, state::{AppState, load_dynamic_settings}};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UpdateSettingRequest {
|
||||
pub value: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ClearCacheResponse {
|
||||
pub success: bool,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct CacheStats {
|
||||
pub total_size_mb: f64,
|
||||
pub file_count: u64,
|
||||
pub directory: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ThumbnailStats {
|
||||
pub total_size_mb: f64,
|
||||
pub file_count: u64,
|
||||
@@ -41,9 +43,28 @@ pub fn settings_routes() -> Router<AppState> {
|
||||
.route("/settings/cache/clear", post(clear_cache))
|
||||
.route("/settings/cache/stats", get(get_cache_stats))
|
||||
.route("/settings/thumbnail/stats", get(get_thumbnail_stats))
|
||||
.route(
|
||||
"/settings/status-mappings",
|
||||
get(list_status_mappings).post(upsert_status_mapping),
|
||||
)
|
||||
.route(
|
||||
"/settings/status-mappings/:id",
|
||||
delete(delete_status_mapping),
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_settings(State(state): State<AppState>) -> Result<Json<Value>, ApiError> {
|
||||
/// List all settings
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/settings",
|
||||
tag = "settings",
|
||||
responses(
|
||||
(status = 200, description = "All settings as key/value object"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_settings(State(state): State<AppState>) -> Result<Json<Value>, ApiError> {
|
||||
let rows = sqlx::query(r#"SELECT key, value FROM app_settings"#)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
@@ -58,7 +79,20 @@ async fn get_settings(State(state): State<AppState>) -> Result<Json<Value>, ApiE
|
||||
Ok(Json(Value::Object(settings)))
|
||||
}
|
||||
|
||||
async fn get_setting(
|
||||
/// Get a single setting by key
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/settings/{key}",
|
||||
tag = "settings",
|
||||
params(("key" = String, Path, description = "Setting key")),
|
||||
responses(
|
||||
(status = 200, description = "Setting value"),
|
||||
(status = 404, description = "Setting not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_setting(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key): axum::extract::Path<String>,
|
||||
) -> Result<Json<Value>, ApiError> {
|
||||
@@ -76,7 +110,20 @@ async fn get_setting(
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_setting(
|
||||
/// Create or update a setting
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/settings/{key}",
|
||||
tag = "settings",
|
||||
params(("key" = String, Path, description = "Setting key")),
|
||||
request_body = UpdateSettingRequest,
|
||||
responses(
|
||||
(status = 200, description = "Updated setting value"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn update_setting(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key): axum::extract::Path<String>,
|
||||
Json(body): Json<UpdateSettingRequest>,
|
||||
@@ -96,12 +143,29 @@ async fn update_setting(
|
||||
.await?;
|
||||
|
||||
let value: Value = row.get("value");
|
||||
|
||||
// Rechargement des settings dynamiques si la clé affecte le comportement runtime
|
||||
if key == "limits" || key == "image_processing" || key == "cache" {
|
||||
let new_settings = load_dynamic_settings(&state.pool).await;
|
||||
*state.settings.write().await = new_settings;
|
||||
}
|
||||
|
||||
Ok(Json(value))
|
||||
}
|
||||
|
||||
async fn clear_cache(State(_state): State<AppState>) -> Result<Json<ClearCacheResponse>, ApiError> {
|
||||
let cache_dir = std::env::var("IMAGE_CACHE_DIR")
|
||||
.unwrap_or_else(|_| "/tmp/stripstream-image-cache".to_string());
|
||||
/// Clear the image page cache
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/settings/cache/clear",
|
||||
tag = "settings",
|
||||
responses(
|
||||
(status = 200, body = ClearCacheResponse),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn clear_cache(State(state): State<AppState>) -> Result<Json<ClearCacheResponse>, ApiError> {
|
||||
let cache_dir = state.settings.read().await.cache_directory.clone();
|
||||
|
||||
let result = tokio::task::spawn_blocking(move || {
|
||||
if std::path::Path::new(&cache_dir).exists() {
|
||||
@@ -128,9 +192,19 @@ async fn clear_cache(State(_state): State<AppState>) -> Result<Json<ClearCacheRe
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
async fn get_cache_stats(State(_state): State<AppState>) -> Result<Json<CacheStats>, ApiError> {
|
||||
let cache_dir = std::env::var("IMAGE_CACHE_DIR")
|
||||
.unwrap_or_else(|_| "/tmp/stripstream-image-cache".to_string());
|
||||
/// Get image page cache statistics
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/settings/cache/stats",
|
||||
tag = "settings",
|
||||
responses(
|
||||
(status = 200, body = CacheStats),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_cache_stats(State(state): State<AppState>) -> Result<Json<CacheStats>, ApiError> {
|
||||
let cache_dir = state.settings.read().await.cache_directory.clone();
|
||||
|
||||
let cache_dir_clone = cache_dir.clone();
|
||||
let stats = tokio::task::spawn_blocking(move || {
|
||||
@@ -208,7 +282,18 @@ fn compute_dir_stats(path: &std::path::Path) -> (u64, u64) {
|
||||
(total_size, file_count)
|
||||
}
|
||||
|
||||
async fn get_thumbnail_stats(State(_state): State<AppState>) -> Result<Json<ThumbnailStats>, ApiError> {
|
||||
/// Get thumbnail storage statistics
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/settings/thumbnail/stats",
|
||||
tag = "settings",
|
||||
responses(
|
||||
(status = 200, body = ThumbnailStats),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_thumbnail_stats(State(_state): State<AppState>) -> Result<Json<ThumbnailStats>, ApiError> {
|
||||
let settings = sqlx::query(r#"SELECT value FROM app_settings WHERE key = 'thumbnail'"#)
|
||||
.fetch_optional(&_state.pool)
|
||||
.await?;
|
||||
@@ -248,3 +333,120 @@ async fn get_thumbnail_stats(State(_state): State<AppState>) -> Result<Json<Thum
|
||||
|
||||
Ok(Json(stats))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Status Mappings
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct StatusMappingDto {
|
||||
pub id: String,
|
||||
pub provider_status: String,
|
||||
pub mapped_status: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, ToSchema)]
|
||||
pub struct UpsertStatusMappingRequest {
|
||||
pub provider_status: String,
|
||||
pub mapped_status: String,
|
||||
}
|
||||
|
||||
/// List all status mappings
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/settings/status-mappings",
|
||||
tag = "settings",
|
||||
responses(
|
||||
(status = 200, body = Vec<StatusMappingDto>),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn list_status_mappings(
|
||||
State(state): State<AppState>,
|
||||
) -> Result<Json<Vec<StatusMappingDto>>, ApiError> {
|
||||
let rows = sqlx::query(
|
||||
"SELECT id, provider_status, mapped_status FROM status_mappings ORDER BY mapped_status, provider_status",
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let mappings = rows
|
||||
.iter()
|
||||
.map(|row| StatusMappingDto {
|
||||
id: row.get::<Uuid, _>("id").to_string(),
|
||||
provider_status: row.get("provider_status"),
|
||||
mapped_status: row.get("mapped_status"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Json(mappings))
|
||||
}
|
||||
|
||||
/// Create or update a status mapping
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/settings/status-mappings",
|
||||
tag = "settings",
|
||||
request_body = UpsertStatusMappingRequest,
|
||||
responses(
|
||||
(status = 200, body = StatusMappingDto),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn upsert_status_mapping(
|
||||
State(state): State<AppState>,
|
||||
Json(body): Json<UpsertStatusMappingRequest>,
|
||||
) -> Result<Json<StatusMappingDto>, ApiError> {
|
||||
let provider_status = body.provider_status.to_lowercase();
|
||||
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO status_mappings (provider_status, mapped_status)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (provider_status)
|
||||
DO UPDATE SET mapped_status = $2, updated_at = NOW()
|
||||
RETURNING id, provider_status, mapped_status
|
||||
"#,
|
||||
)
|
||||
.bind(&provider_status)
|
||||
.bind(&body.mapped_status)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
Ok(Json(StatusMappingDto {
|
||||
id: row.get::<Uuid, _>("id").to_string(),
|
||||
provider_status: row.get("provider_status"),
|
||||
mapped_status: row.get("mapped_status"),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Delete a status mapping
|
||||
#[utoipa::path(
|
||||
delete,
|
||||
path = "/settings/status-mappings/{id}",
|
||||
tag = "settings",
|
||||
params(("id" = String, Path, description = "Mapping UUID")),
|
||||
responses(
|
||||
(status = 204, description = "Deleted"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 404, description = "Not found"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn delete_status_mapping(
|
||||
State(state): State<AppState>,
|
||||
AxumPath(id): AxumPath<Uuid>,
|
||||
) -> Result<Json<Value>, ApiError> {
|
||||
let result = sqlx::query("DELETE FROM status_mappings WHERE id = $1")
|
||||
.bind(id)
|
||||
.execute(&state.pool)
|
||||
.await?;
|
||||
|
||||
if result.rows_affected() == 0 {
|
||||
return Err(ApiError::not_found("status mapping not found"));
|
||||
}
|
||||
|
||||
Ok(Json(serde_json::json!({"deleted": true})))
|
||||
}
|
||||
|
||||
134
apps/api/src/state.rs
Normal file
134
apps/api/src/state.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use std::sync::{
|
||||
atomic::AtomicU64,
|
||||
Arc,
|
||||
};
|
||||
use std::time::Instant;
|
||||
|
||||
use lru::LruCache;
|
||||
use sqlx::{Pool, Postgres, Row};
|
||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub pool: sqlx::PgPool,
|
||||
pub bootstrap_token: Arc<str>,
|
||||
pub page_cache: Arc<Mutex<LruCache<String, Arc<Vec<u8>>>>>,
|
||||
pub page_render_limit: Arc<Semaphore>,
|
||||
pub metrics: Arc<Metrics>,
|
||||
pub read_rate_limit: Arc<Mutex<ReadRateLimit>>,
|
||||
pub settings: Arc<RwLock<DynamicSettings>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DynamicSettings {
|
||||
pub rate_limit_per_second: u32,
|
||||
pub timeout_seconds: u64,
|
||||
pub image_format: String,
|
||||
pub image_quality: u8,
|
||||
pub image_filter: String,
|
||||
pub image_max_width: u32,
|
||||
pub cache_directory: String,
|
||||
}
|
||||
|
||||
impl Default for DynamicSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
rate_limit_per_second: 120,
|
||||
timeout_seconds: 12,
|
||||
image_format: "webp".to_string(),
|
||||
image_quality: 85,
|
||||
image_filter: "triangle".to_string(),
|
||||
image_max_width: 2160,
|
||||
cache_directory: std::env::var("IMAGE_CACHE_DIR")
|
||||
.unwrap_or_else(|_| "/tmp/stripstream-image-cache".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Metrics {
|
||||
pub requests_total: AtomicU64,
|
||||
pub page_cache_hits: AtomicU64,
|
||||
pub page_cache_misses: AtomicU64,
|
||||
}
|
||||
|
||||
pub struct ReadRateLimit {
|
||||
pub window_started_at: Instant,
|
||||
pub requests_in_window: u32,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
requests_total: AtomicU64::new(0),
|
||||
page_cache_hits: AtomicU64::new(0),
|
||||
page_cache_misses: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_concurrent_renders(pool: &Pool<Postgres>) -> usize {
|
||||
let default_concurrency = 8;
|
||||
let row = sqlx::query(r#"SELECT value FROM app_settings WHERE key = 'limits'"#)
|
||||
.fetch_optional(pool)
|
||||
.await;
|
||||
|
||||
match row {
|
||||
Ok(Some(row)) => {
|
||||
let value: serde_json::Value = row.get("value");
|
||||
value
|
||||
.get("concurrent_renders")
|
||||
.and_then(|v: &serde_json::Value| v.as_u64())
|
||||
.map(|v| v as usize)
|
||||
.unwrap_or(default_concurrency)
|
||||
}
|
||||
_ => default_concurrency,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_dynamic_settings(pool: &Pool<Postgres>) -> DynamicSettings {
|
||||
let mut s = DynamicSettings::default();
|
||||
|
||||
if let Ok(Some(row)) = sqlx::query(r#"SELECT value FROM app_settings WHERE key = 'limits'"#)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
{
|
||||
let v: serde_json::Value = row.get("value");
|
||||
if let Some(n) = v.get("rate_limit_per_second").and_then(|x| x.as_u64()) {
|
||||
s.rate_limit_per_second = n as u32;
|
||||
}
|
||||
if let Some(n) = v.get("timeout_seconds").and_then(|x| x.as_u64()) {
|
||||
s.timeout_seconds = n;
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(Some(row)) = sqlx::query(r#"SELECT value FROM app_settings WHERE key = 'image_processing'"#)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
{
|
||||
let v: serde_json::Value = row.get("value");
|
||||
if let Some(s2) = v.get("format").and_then(|x| x.as_str()) {
|
||||
s.image_format = s2.to_string();
|
||||
}
|
||||
if let Some(n) = v.get("quality").and_then(|x| x.as_u64()) {
|
||||
s.image_quality = n.clamp(1, 100) as u8;
|
||||
}
|
||||
if let Some(s2) = v.get("filter").and_then(|x| x.as_str()) {
|
||||
s.image_filter = s2.to_string();
|
||||
}
|
||||
if let Some(n) = v.get("max_width").and_then(|x| x.as_u64()) {
|
||||
s.image_max_width = n as u32;
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(Some(row)) = sqlx::query(r#"SELECT value FROM app_settings WHERE key = 'cache'"#)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
{
|
||||
let v: serde_json::Value = row.get("value");
|
||||
if let Some(dir) = v.get("directory").and_then(|x| x.as_str()) {
|
||||
s.cache_directory = dir.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
s
|
||||
}
|
||||
340
apps/api/src/stats.rs
Normal file
340
apps/api/src/stats.rs
Normal file
@@ -0,0 +1,340 @@
|
||||
use axum::{extract::State, Json};
|
||||
use serde::Serialize;
|
||||
use sqlx::Row;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct StatsOverview {
|
||||
pub total_books: i64,
|
||||
pub total_series: i64,
|
||||
pub total_libraries: i64,
|
||||
pub total_pages: i64,
|
||||
pub total_size_bytes: i64,
|
||||
pub total_authors: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct ReadingStatusStats {
|
||||
pub unread: i64,
|
||||
pub reading: i64,
|
||||
pub read: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct FormatCount {
|
||||
pub format: String,
|
||||
pub count: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct LanguageCount {
|
||||
pub language: Option<String>,
|
||||
pub count: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct LibraryStats {
|
||||
pub library_name: String,
|
||||
pub book_count: i64,
|
||||
pub size_bytes: i64,
|
||||
pub read_count: i64,
|
||||
pub reading_count: i64,
|
||||
pub unread_count: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct TopSeries {
|
||||
pub series: String,
|
||||
pub book_count: i64,
|
||||
pub read_count: i64,
|
||||
pub total_pages: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct MonthlyAdditions {
|
||||
pub month: String,
|
||||
pub books_added: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct MetadataStats {
|
||||
pub total_series: i64,
|
||||
pub series_linked: i64,
|
||||
pub series_unlinked: i64,
|
||||
pub books_with_summary: i64,
|
||||
pub books_with_isbn: i64,
|
||||
pub by_provider: Vec<ProviderCount>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct ProviderCount {
|
||||
pub provider: String,
|
||||
pub count: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct StatsResponse {
|
||||
pub overview: StatsOverview,
|
||||
pub reading_status: ReadingStatusStats,
|
||||
pub by_format: Vec<FormatCount>,
|
||||
pub by_language: Vec<LanguageCount>,
|
||||
pub by_library: Vec<LibraryStats>,
|
||||
pub top_series: Vec<TopSeries>,
|
||||
pub additions_over_time: Vec<MonthlyAdditions>,
|
||||
pub metadata: MetadataStats,
|
||||
}
|
||||
|
||||
/// Get collection statistics for the dashboard
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/stats",
|
||||
tag = "books",
|
||||
responses(
|
||||
(status = 200, body = StatsResponse),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn get_stats(
|
||||
State(state): State<AppState>,
|
||||
) -> Result<Json<StatsResponse>, ApiError> {
|
||||
// Overview + reading status in one query
|
||||
let overview_row = sqlx::query(
|
||||
r#"
|
||||
SELECT
|
||||
COUNT(*) AS total_books,
|
||||
COUNT(DISTINCT NULLIF(series, '')) AS total_series,
|
||||
COUNT(DISTINCT library_id) AS total_libraries,
|
||||
COALESCE(SUM(page_count), 0)::BIGINT AS total_pages,
|
||||
(SELECT COUNT(DISTINCT a) FROM (
|
||||
SELECT DISTINCT UNNEST(authors) AS a FROM books WHERE authors != '{}'
|
||||
UNION
|
||||
SELECT DISTINCT author FROM books WHERE author IS NOT NULL AND author != ''
|
||||
) sub) AS total_authors,
|
||||
COUNT(*) FILTER (WHERE COALESCE(brp.status, 'unread') = 'unread') AS unread,
|
||||
COUNT(*) FILTER (WHERE brp.status = 'reading') AS reading,
|
||||
COUNT(*) FILTER (WHERE brp.status = 'read') AS read
|
||||
FROM books b
|
||||
LEFT JOIN book_reading_progress brp ON brp.book_id = b.id
|
||||
"#,
|
||||
)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
// Total size from book_files
|
||||
let size_row = sqlx::query(
|
||||
r#"
|
||||
SELECT COALESCE(SUM(bf.size_bytes), 0)::BIGINT AS total_size_bytes
|
||||
FROM (
|
||||
SELECT DISTINCT ON (book_id) size_bytes
|
||||
FROM book_files
|
||||
ORDER BY book_id, updated_at DESC
|
||||
) bf
|
||||
"#,
|
||||
)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
let overview = StatsOverview {
|
||||
total_books: overview_row.get("total_books"),
|
||||
total_series: overview_row.get("total_series"),
|
||||
total_libraries: overview_row.get("total_libraries"),
|
||||
total_pages: overview_row.get("total_pages"),
|
||||
total_size_bytes: size_row.get("total_size_bytes"),
|
||||
total_authors: overview_row.get("total_authors"),
|
||||
};
|
||||
|
||||
let reading_status = ReadingStatusStats {
|
||||
unread: overview_row.get("unread"),
|
||||
reading: overview_row.get("reading"),
|
||||
read: overview_row.get("read"),
|
||||
};
|
||||
|
||||
// By format
|
||||
let format_rows = sqlx::query(
|
||||
r#"
|
||||
SELECT COALESCE(bf.format, b.kind) AS fmt, COUNT(*) AS count
|
||||
FROM books b
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT format FROM book_files WHERE book_id = b.id ORDER BY updated_at DESC LIMIT 1
|
||||
) bf ON TRUE
|
||||
GROUP BY fmt
|
||||
ORDER BY count DESC
|
||||
"#,
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let by_format: Vec<FormatCount> = format_rows
|
||||
.iter()
|
||||
.map(|r| FormatCount {
|
||||
format: r.get::<Option<String>, _>("fmt").unwrap_or_else(|| "unknown".to_string()),
|
||||
count: r.get("count"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// By language
|
||||
let lang_rows = sqlx::query(
|
||||
r#"
|
||||
SELECT language, COUNT(*) AS count
|
||||
FROM books
|
||||
GROUP BY language
|
||||
ORDER BY count DESC
|
||||
"#,
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let by_language: Vec<LanguageCount> = lang_rows
|
||||
.iter()
|
||||
.map(|r| LanguageCount {
|
||||
language: r.get("language"),
|
||||
count: r.get("count"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// By library
|
||||
let lib_rows = sqlx::query(
|
||||
r#"
|
||||
SELECT
|
||||
l.name AS library_name,
|
||||
COUNT(b.id) AS book_count,
|
||||
COALESCE(SUM(bf.size_bytes), 0)::BIGINT AS size_bytes,
|
||||
COUNT(*) FILTER (WHERE brp.status = 'read') AS read_count,
|
||||
COUNT(*) FILTER (WHERE brp.status = 'reading') AS reading_count,
|
||||
COUNT(*) FILTER (WHERE COALESCE(brp.status, 'unread') = 'unread') AS unread_count
|
||||
FROM libraries l
|
||||
LEFT JOIN books b ON b.library_id = l.id
|
||||
LEFT JOIN book_reading_progress brp ON brp.book_id = b.id
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT size_bytes FROM book_files WHERE book_id = b.id ORDER BY updated_at DESC LIMIT 1
|
||||
) bf ON TRUE
|
||||
GROUP BY l.id, l.name
|
||||
ORDER BY book_count DESC
|
||||
"#,
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let by_library: Vec<LibraryStats> = lib_rows
|
||||
.iter()
|
||||
.map(|r| LibraryStats {
|
||||
library_name: r.get("library_name"),
|
||||
book_count: r.get("book_count"),
|
||||
size_bytes: r.get("size_bytes"),
|
||||
read_count: r.get("read_count"),
|
||||
reading_count: r.get("reading_count"),
|
||||
unread_count: r.get("unread_count"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Top series (by book count)
|
||||
let series_rows = sqlx::query(
|
||||
r#"
|
||||
SELECT
|
||||
b.series,
|
||||
COUNT(*) AS book_count,
|
||||
COUNT(*) FILTER (WHERE brp.status = 'read') AS read_count,
|
||||
COALESCE(SUM(b.page_count), 0)::BIGINT AS total_pages
|
||||
FROM books b
|
||||
LEFT JOIN book_reading_progress brp ON brp.book_id = b.id
|
||||
WHERE b.series IS NOT NULL AND b.series != ''
|
||||
GROUP BY b.series
|
||||
ORDER BY book_count DESC
|
||||
LIMIT 10
|
||||
"#,
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let top_series: Vec<TopSeries> = series_rows
|
||||
.iter()
|
||||
.map(|r| TopSeries {
|
||||
series: r.get("series"),
|
||||
book_count: r.get("book_count"),
|
||||
read_count: r.get("read_count"),
|
||||
total_pages: r.get("total_pages"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Additions over time (last 12 months)
|
||||
let additions_rows = sqlx::query(
|
||||
r#"
|
||||
SELECT
|
||||
TO_CHAR(DATE_TRUNC('month', created_at), 'YYYY-MM') AS month,
|
||||
COUNT(*) AS books_added
|
||||
FROM books
|
||||
WHERE created_at >= DATE_TRUNC('month', NOW()) - INTERVAL '11 months'
|
||||
GROUP BY DATE_TRUNC('month', created_at)
|
||||
ORDER BY month ASC
|
||||
"#,
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let additions_over_time: Vec<MonthlyAdditions> = additions_rows
|
||||
.iter()
|
||||
.map(|r| MonthlyAdditions {
|
||||
month: r.get("month"),
|
||||
books_added: r.get("books_added"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Metadata stats
|
||||
let meta_row = sqlx::query(
|
||||
r#"
|
||||
SELECT
|
||||
(SELECT COUNT(DISTINCT NULLIF(series, '')) FROM books) AS total_series,
|
||||
(SELECT COUNT(DISTINCT series_name) FROM external_metadata_links WHERE status = 'approved') AS series_linked,
|
||||
(SELECT COUNT(*) FROM books WHERE summary IS NOT NULL AND summary != '') AS books_with_summary,
|
||||
(SELECT COUNT(*) FROM books WHERE isbn IS NOT NULL AND isbn != '') AS books_with_isbn
|
||||
"#,
|
||||
)
|
||||
.fetch_one(&state.pool)
|
||||
.await?;
|
||||
|
||||
let meta_total_series: i64 = meta_row.get("total_series");
|
||||
let meta_series_linked: i64 = meta_row.get("series_linked");
|
||||
|
||||
let provider_rows = sqlx::query(
|
||||
r#"
|
||||
SELECT provider, COUNT(DISTINCT series_name) AS count
|
||||
FROM external_metadata_links
|
||||
WHERE status = 'approved'
|
||||
GROUP BY provider
|
||||
ORDER BY count DESC
|
||||
"#,
|
||||
)
|
||||
.fetch_all(&state.pool)
|
||||
.await?;
|
||||
|
||||
let by_provider: Vec<ProviderCount> = provider_rows
|
||||
.iter()
|
||||
.map(|r| ProviderCount {
|
||||
provider: r.get("provider"),
|
||||
count: r.get("count"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let metadata = MetadataStats {
|
||||
total_series: meta_total_series,
|
||||
series_linked: meta_series_linked,
|
||||
series_unlinked: meta_total_series - meta_series_linked,
|
||||
books_with_summary: meta_row.get("books_with_summary"),
|
||||
books_with_isbn: meta_row.get("books_with_isbn"),
|
||||
by_provider,
|
||||
};
|
||||
|
||||
Ok(Json(StatsResponse {
|
||||
overview,
|
||||
reading_status,
|
||||
by_format,
|
||||
by_language,
|
||||
by_library,
|
||||
top_series,
|
||||
additions_over_time,
|
||||
metadata,
|
||||
}))
|
||||
}
|
||||
@@ -1,203 +1,12 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::{Path as AxumPath, State},
|
||||
http::StatusCode,
|
||||
extract::State,
|
||||
Json,
|
||||
};
|
||||
use image::GenericImageView;
|
||||
use serde::Deserialize;
|
||||
use sqlx::Row;
|
||||
use tracing::{info, warn};
|
||||
use uuid::Uuid;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{error::ApiError, index_jobs, pages, AppState};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ThumbnailConfig {
|
||||
enabled: bool,
|
||||
width: u32,
|
||||
height: u32,
|
||||
quality: u8,
|
||||
directory: String,
|
||||
}
|
||||
|
||||
async fn load_thumbnail_config(pool: &sqlx::PgPool) -> ThumbnailConfig {
|
||||
let fallback = ThumbnailConfig {
|
||||
enabled: true,
|
||||
width: 300,
|
||||
height: 400,
|
||||
quality: 80,
|
||||
directory: "/data/thumbnails".to_string(),
|
||||
};
|
||||
let row = sqlx::query(r#"SELECT value FROM app_settings WHERE key = 'thumbnail'"#)
|
||||
.fetch_optional(pool)
|
||||
.await;
|
||||
|
||||
match row {
|
||||
Ok(Some(row)) => {
|
||||
let value: serde_json::Value = row.get("value");
|
||||
ThumbnailConfig {
|
||||
enabled: value
|
||||
.get("enabled")
|
||||
.and_then(|v| v.as_bool())
|
||||
.unwrap_or(fallback.enabled),
|
||||
width: value
|
||||
.get("width")
|
||||
.and_then(|v| v.as_u64())
|
||||
.map(|v| v as u32)
|
||||
.unwrap_or(fallback.width),
|
||||
height: value
|
||||
.get("height")
|
||||
.and_then(|v| v.as_u64())
|
||||
.map(|v| v as u32)
|
||||
.unwrap_or(fallback.height),
|
||||
quality: value
|
||||
.get("quality")
|
||||
.and_then(|v| v.as_u64())
|
||||
.map(|v| v as u8)
|
||||
.unwrap_or(fallback.quality),
|
||||
directory: value
|
||||
.get("directory")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| fallback.directory.clone()),
|
||||
}
|
||||
}
|
||||
_ => fallback,
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_thumbnail(image_bytes: &[u8], config: &ThumbnailConfig) -> anyhow::Result<Vec<u8>> {
|
||||
let img = image::load_from_memory(image_bytes).context("failed to load image")?;
|
||||
let (orig_w, orig_h) = img.dimensions();
|
||||
let ratio_w = config.width as f32 / orig_w as f32;
|
||||
let ratio_h = config.height as f32 / orig_h as f32;
|
||||
let ratio = ratio_w.min(ratio_h);
|
||||
let new_w = (orig_w as f32 * ratio) as u32;
|
||||
let new_h = (orig_h as f32 * ratio) as u32;
|
||||
let resized = img.resize(new_w, new_h, image::imageops::FilterType::Lanczos3);
|
||||
let rgba = resized.to_rgba8();
|
||||
let (w, h) = rgba.dimensions();
|
||||
let rgb_data: Vec<u8> = rgba.pixels().flat_map(|p| [p[0], p[1], p[2]]).collect();
|
||||
let quality = f32::max(config.quality as f32, 85.0);
|
||||
let webp_data =
|
||||
webp::Encoder::new(&rgb_data, webp::PixelLayout::Rgb, w, h).encode(quality);
|
||||
Ok(webp_data.to_vec())
|
||||
}
|
||||
|
||||
fn save_thumbnail(book_id: Uuid, thumbnail_bytes: &[u8], config: &ThumbnailConfig) -> anyhow::Result<String> {
|
||||
let dir = Path::new(&config.directory);
|
||||
std::fs::create_dir_all(dir)?;
|
||||
let filename = format!("{}.webp", book_id);
|
||||
let path = dir.join(&filename);
|
||||
std::fs::write(&path, thumbnail_bytes)?;
|
||||
Ok(path.to_string_lossy().to_string())
|
||||
}
|
||||
|
||||
async fn run_checkup(state: AppState, job_id: Uuid) {
|
||||
let pool = &state.pool;
|
||||
let row = sqlx::query("SELECT library_id, type FROM index_jobs WHERE id = $1")
|
||||
.bind(job_id)
|
||||
.fetch_optional(pool)
|
||||
.await;
|
||||
|
||||
let (library_id, job_type) = match row {
|
||||
Ok(Some(r)) => (
|
||||
r.get::<Option<Uuid>, _>("library_id"),
|
||||
r.get::<String, _>("type"),
|
||||
),
|
||||
_ => {
|
||||
warn!("thumbnails checkup: job {} not found", job_id);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Regenerate: clear existing thumbnails in scope so they get regenerated
|
||||
if job_type == "thumbnail_regenerate" {
|
||||
let cleared = sqlx::query(
|
||||
r#"UPDATE books SET thumbnail_path = NULL WHERE (library_id = $1 OR $1 IS NULL)"#,
|
||||
)
|
||||
.bind(library_id)
|
||||
.execute(pool)
|
||||
.await;
|
||||
if let Ok(res) = cleared {
|
||||
info!("thumbnails regenerate: cleared {} books", res.rows_affected());
|
||||
}
|
||||
}
|
||||
|
||||
let book_ids: Vec<Uuid> = sqlx::query_scalar(
|
||||
r#"SELECT id FROM books WHERE (library_id = $1 OR $1 IS NULL) AND thumbnail_path IS NULL"#,
|
||||
)
|
||||
.bind(library_id)
|
||||
.fetch_all(pool)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let config = load_thumbnail_config(pool).await;
|
||||
if !config.enabled || book_ids.is_empty() {
|
||||
let _ = sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'success', finished_at = NOW(), progress_percent = 100, current_file = NULL WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.execute(pool)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
let total = book_ids.len() as i32;
|
||||
let _ = sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'generating_thumbnails', total_files = $2, processed_files = 0, current_file = NULL WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.bind(total)
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
for (i, &book_id) in book_ids.iter().enumerate() {
|
||||
match pages::render_book_page_1(&state, book_id, config.width, config.quality).await {
|
||||
Ok(page_bytes) => {
|
||||
match generate_thumbnail(&page_bytes, &config) {
|
||||
Ok(thumb_bytes) => {
|
||||
if let Ok(path) = save_thumbnail(book_id, &thumb_bytes, &config) {
|
||||
if sqlx::query("UPDATE books SET thumbnail_path = $1 WHERE id = $2")
|
||||
.bind(&path)
|
||||
.bind(book_id)
|
||||
.execute(pool)
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
let processed = (i + 1) as i32;
|
||||
let percent = ((i + 1) as f64 / total as f64 * 100.0) as i32;
|
||||
let _ = sqlx::query(
|
||||
"UPDATE index_jobs SET processed_files = $2, progress_percent = $3 WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.bind(processed)
|
||||
.bind(percent)
|
||||
.execute(pool)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => warn!("thumbnail generate failed for book {}: {:?}", book_id, e),
|
||||
}
|
||||
}
|
||||
Err(e) => warn!("render page 1 failed for book {}: {:?}", book_id, e),
|
||||
}
|
||||
}
|
||||
|
||||
let _ = sqlx::query(
|
||||
"UPDATE index_jobs SET status = 'success', finished_at = NOW(), progress_percent = 100, current_file = NULL WHERE id = $1",
|
||||
)
|
||||
.bind(job_id)
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
info!("thumbnails checkup finished for job {} ({} books)", job_id, total);
|
||||
}
|
||||
use crate::{error::ApiError, index_jobs, state::AppState};
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct ThumbnailsRebuildRequest {
|
||||
@@ -205,14 +14,14 @@ pub struct ThumbnailsRebuildRequest {
|
||||
pub library_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
/// POST /index/thumbnails/rebuild — create a job and generate thumbnails for books that don't have one (optional library scope).
|
||||
/// POST /index/thumbnails/rebuild — create a job to generate thumbnails for books that don't have one.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/index/thumbnails/rebuild",
|
||||
tag = "indexing",
|
||||
request_body = Option<ThumbnailsRebuildRequest>,
|
||||
responses(
|
||||
(status = 200, body = index_jobs::IndexJobResponse),
|
||||
(status = 200, body = IndexJobResponse),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 403, description = "Forbidden - Admin scope required"),
|
||||
),
|
||||
@@ -239,14 +48,14 @@ pub async fn start_thumbnails_rebuild(
|
||||
Ok(Json(index_jobs::map_row(row)))
|
||||
}
|
||||
|
||||
/// POST /index/thumbnails/regenerate — create a job and regenerate all thumbnails in scope (clears then regenerates).
|
||||
/// POST /index/thumbnails/regenerate — create a job to regenerate all thumbnails (clears then regenerates).
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/index/thumbnails/regenerate",
|
||||
tag = "indexing",
|
||||
request_body = Option<ThumbnailsRebuildRequest>,
|
||||
responses(
|
||||
(status = 200, body = index_jobs::IndexJobResponse),
|
||||
(status = 200, body = IndexJobResponse),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 403, description = "Forbidden - Admin scope required"),
|
||||
),
|
||||
@@ -272,13 +81,3 @@ pub async fn start_thumbnails_regenerate(
|
||||
|
||||
Ok(Json(index_jobs::map_row(row)))
|
||||
}
|
||||
|
||||
/// POST /index/jobs/:id/thumbnails/checkup — start thumbnail generation for books missing thumbnails (called by indexer at end of build).
|
||||
pub async fn start_checkup(
|
||||
State(state): State<AppState>,
|
||||
AxumPath(job_id): AxumPath<Uuid>,
|
||||
) -> Result<StatusCode, ApiError> {
|
||||
let state = state.clone();
|
||||
tokio::spawn(async move { run_checkup(state, job_id).await });
|
||||
Ok(StatusCode::ACCEPTED)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{error::ApiError, AppState};
|
||||
use crate::{error::ApiError, state::AppState};
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct CreateTokenRequest {
|
||||
@@ -170,3 +170,35 @@ pub async fn revoke_token(
|
||||
|
||||
Ok(Json(serde_json::json!({"revoked": true, "id": id})))
|
||||
}
|
||||
|
||||
/// Permanently delete a revoked API token
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/admin/tokens/{id}/delete",
|
||||
tag = "tokens",
|
||||
params(
|
||||
("id" = String, Path, description = "Token UUID"),
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Token permanently deleted"),
|
||||
(status = 404, description = "Token not found or not revoked"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 403, description = "Forbidden - Admin scope required"),
|
||||
),
|
||||
security(("Bearer" = []))
|
||||
)]
|
||||
pub async fn delete_token(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<Json<serde_json::Value>, ApiError> {
|
||||
let result = sqlx::query("DELETE FROM api_tokens WHERE id = $1 AND revoked_at IS NOT NULL")
|
||||
.bind(id)
|
||||
.execute(&state.pool)
|
||||
.await?;
|
||||
|
||||
if result.rows_affected() == 0 {
|
||||
return Err(ApiError::not_found("token not found or not revoked"));
|
||||
}
|
||||
|
||||
Ok(Json(serde_json::json!({"deleted": true, "id": id})))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
API_BASE_URL=http://localhost:8080
|
||||
API_BASE_URL=http://localhost:7080
|
||||
API_BOOTSTRAP_TOKEN=stripstream-dev-bootstrap-token
|
||||
NEXT_PUBLIC_API_BASE_URL=http://localhost:8080
|
||||
NEXT_PUBLIC_API_BASE_URL=http://localhost:7080
|
||||
NEXT_PUBLIC_API_BOOTSTRAP_TOKEN=stripstream-dev-bootstrap-token
|
||||
|
||||
66
apps/backoffice/AGENTS.md
Normal file
66
apps/backoffice/AGENTS.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# apps/backoffice — Interface d'administration (Next.js)
|
||||
|
||||
App Next.js 16 avec React 19, Tailwind CSS v4, TypeScript. Port de dev : **7082** (`npm run dev`).
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
app/
|
||||
├── layout.tsx # Layout global (nav sticky glassmorphism, ThemeProvider)
|
||||
├── page.tsx # Dashboard
|
||||
├── books/ # Liste et détail des livres
|
||||
├── libraries/ # Gestion bibliothèques
|
||||
├── jobs/ # Monitoring jobs
|
||||
├── tokens/ # Tokens API
|
||||
├── settings/ # Paramètres
|
||||
├── components/ # Composants métier
|
||||
│ ├── ui/ # Composants génériques (Button, Card, Badge, Icon, Input, ProgressBar, StatBox...)
|
||||
│ ├── BookCard.tsx
|
||||
│ ├── JobProgress.tsx
|
||||
│ ├── JobsList.tsx
|
||||
│ ├── LibraryForm.tsx
|
||||
│ ├── FolderBrowser.tsx / FolderPicker.tsx
|
||||
│ └── ...
|
||||
└── globals.css # Variables CSS, Tailwind base
|
||||
lib/
|
||||
└── api.ts # Client API : types DTO + fonctions fetch vers l'API Rust
|
||||
```
|
||||
|
||||
## Client API (lib/api.ts)
|
||||
|
||||
Tous les appels vers l'API Rust passent par `lib/api.ts`. Les types DTO sont définis là :
|
||||
- `LibraryDto`, `IndexJobDto`, `BookDto`, `TokenDto`, `FolderItem`
|
||||
|
||||
Ajouter les nouveaux endpoints et types dans ce fichier.
|
||||
|
||||
## Composants UI
|
||||
|
||||
Les composants génériques sont dans `app/components/ui/`. Utiliser ces composants plutôt que des éléments HTML bruts :
|
||||
|
||||
```tsx
|
||||
import { Button, Card, Badge, Icon, Input, ProgressBar, StatBox } from "@/app/components/ui";
|
||||
```
|
||||
|
||||
## Conventions
|
||||
|
||||
- **App Router** : toutes les pages sont des Server Components par défaut. Utiliser `"use client"` seulement pour l'interactivité.
|
||||
- **Tailwind v4** : config dans `postcss.config.js` + `tailwind.config.js`. Variables CSS dans `globals.css`.
|
||||
- **Thème** : `ThemeProvider` + `ThemeToggle` pour dark/light mode via `next-themes`.
|
||||
- **Icônes** : composant `<Icon name="..." size="sm|md|lg" />` dans `ui/Icon.tsx` — pas de librairie externe.
|
||||
- **Navigation** : routes typées dans `layout.tsx` (`"/" | "/books" | "/libraries" | "/jobs" | "/tokens" | "/settings"`).
|
||||
|
||||
## Commandes
|
||||
|
||||
```bash
|
||||
npm install
|
||||
npm run dev # http://localhost:7082
|
||||
npm run build
|
||||
npm run start # Production sur http://localhost:7082
|
||||
```
|
||||
|
||||
## Gotchas
|
||||
|
||||
- **Port 7082** : pas le port Next.js par défaut (3000). Défini dans `package.json` scripts (`-p 7082`).
|
||||
- **API_BASE_URL** : en prod, configuré via env. En dev local, l'API doit tourner sur `http://localhost:7080`.
|
||||
- **React 19 + Next.js 16** : utiliser les nouvelles APIs (actions serveur, `use()` hook) si disponibles.
|
||||
- **Pas de gestion d'état global** : fetch direct depuis les Server Components ou `useState`/`useEffect` dans les Client Components.
|
||||
@@ -12,11 +12,11 @@ RUN npm run build
|
||||
FROM node:22-alpine AS runner
|
||||
WORKDIR /app
|
||||
ENV NODE_ENV=production
|
||||
ENV PORT=8082
|
||||
ENV PORT=7082
|
||||
ENV HOST=0.0.0.0
|
||||
RUN apk add --no-cache wget
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
COPY --from=builder /app/public ./public
|
||||
EXPOSE 8082
|
||||
EXPOSE 7082
|
||||
CMD ["node", "server.js"]
|
||||
|
||||
17
apps/backoffice/app/api/books/[bookId]/convert/route.ts
Normal file
17
apps/backoffice/app/api/books/[bookId]/convert/route.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { convertBook } from "@/lib/api";
|
||||
|
||||
export async function POST(
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ bookId: string }> }
|
||||
) {
|
||||
const { bookId } = await params;
|
||||
try {
|
||||
const data = await convertBook(bookId);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to start conversion";
|
||||
const status = message.includes("409") ? 409 : 500;
|
||||
return NextResponse.json({ error: message }, { status });
|
||||
}
|
||||
}
|
||||
@@ -1,35 +1,25 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { config } from "@/lib/api";
|
||||
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ bookId: string; pageNum: string }> }
|
||||
) {
|
||||
const { bookId, pageNum } = await params;
|
||||
|
||||
// Récupérer les query params (format, width, quality)
|
||||
try {
|
||||
const { baseUrl, token } = config();
|
||||
const { searchParams } = new URL(request.url);
|
||||
const format = searchParams.get("format") || "webp";
|
||||
const width = searchParams.get("width") || "";
|
||||
const quality = searchParams.get("quality") || "";
|
||||
|
||||
// Construire l'URL vers l'API backend
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiUrl = new URL(`${apiBaseUrl}/books/${bookId}/pages/${pageNum}`);
|
||||
const apiUrl = new URL(`${baseUrl}/books/${bookId}/pages/${pageNum}`);
|
||||
apiUrl.searchParams.set("format", format);
|
||||
if (width) apiUrl.searchParams.set("width", width);
|
||||
if (quality) apiUrl.searchParams.set("quality", quality);
|
||||
|
||||
// Faire la requête à l'API
|
||||
const token = process.env.API_BOOTSTRAP_TOKEN;
|
||||
if (!token) {
|
||||
return new NextResponse("API token not configured", { status: 500 });
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(apiUrl.toString(), {
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
|
||||
17
apps/backoffice/app/api/books/[bookId]/progress/route.ts
Normal file
17
apps/backoffice/app/api/books/[bookId]/progress/route.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { updateReadingProgress } from "@/lib/api";
|
||||
|
||||
export async function PATCH(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ bookId: string }> }
|
||||
) {
|
||||
const { bookId } = await params;
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await updateReadingProgress(bookId, body.status, body.current_page ?? undefined);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to update reading progress";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
17
apps/backoffice/app/api/books/[bookId]/route.ts
Normal file
17
apps/backoffice/app/api/books/[bookId]/route.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { updateBook } from "@/lib/api";
|
||||
|
||||
export async function PATCH(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ bookId: string }> }
|
||||
) {
|
||||
const { bookId } = await params;
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await updateBook(bookId, body);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to update book";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { config } from "@/lib/api";
|
||||
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
@@ -6,19 +7,10 @@ export async function GET(
|
||||
) {
|
||||
const { bookId } = await params;
|
||||
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiUrl = `${apiBaseUrl}/books/${bookId}/thumbnail`;
|
||||
|
||||
const token = process.env.API_BOOTSTRAP_TOKEN;
|
||||
if (!token) {
|
||||
return new NextResponse("API token not configured", { status: 500 });
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(apiUrl, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
const { baseUrl, token } = config();
|
||||
const response = await fetch(`${baseUrl}/books/${bookId}/thumbnail`, {
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
|
||||
@@ -1,39 +1,13 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { listFolders } from "@/lib/api";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiToken = process.env.API_BOOTSTRAP_TOKEN;
|
||||
|
||||
if (!apiToken) {
|
||||
return NextResponse.json({ error: "API token not configured" }, { status: 500 });
|
||||
}
|
||||
|
||||
try {
|
||||
const { searchParams } = new URL(request.url);
|
||||
const path = searchParams.get("path");
|
||||
|
||||
let apiUrl = `${apiBaseUrl}/folders`;
|
||||
if (path) {
|
||||
apiUrl += `?path=${encodeURIComponent(path)}`;
|
||||
}
|
||||
|
||||
const response = await fetch(apiUrl, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiToken}`,
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return NextResponse.json(
|
||||
{ error: `API error: ${response.status}` },
|
||||
{ status: response.status }
|
||||
);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const path = searchParams.get("path") || undefined;
|
||||
const data = await listFolders(path);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
console.error("Proxy error:", error);
|
||||
return NextResponse.json({ error: "Failed to fetch folders" }, { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,36 +1,15 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { cancelJob } from "@/lib/api";
|
||||
|
||||
export async function POST(
|
||||
request: NextRequest,
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> }
|
||||
) {
|
||||
const { id } = await params;
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiToken = process.env.API_BOOTSTRAP_TOKEN;
|
||||
|
||||
if (!apiToken) {
|
||||
return NextResponse.json({ error: "API token not configured" }, { status: 500 });
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`${apiBaseUrl}/index/cancel/${id}`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiToken}`,
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return NextResponse.json(
|
||||
{ error: `API error: ${response.status}` },
|
||||
{ status: response.status }
|
||||
);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const data = await cancelJob(id);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
console.error("Proxy error:", error);
|
||||
return NextResponse.json({ error: "Failed to cancel job" }, { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,35 +1,15 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch, IndexJobDto } from "@/lib/api";
|
||||
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> }
|
||||
) {
|
||||
const { id } = await params;
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiToken = process.env.API_BOOTSTRAP_TOKEN;
|
||||
|
||||
if (!apiToken) {
|
||||
return NextResponse.json({ error: "API token not configured" }, { status: 500 });
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`${apiBaseUrl}/index/jobs/${id}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiToken}`,
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return NextResponse.json(
|
||||
{ error: `API error: ${response.status}` },
|
||||
{ status: response.status }
|
||||
);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const data = await apiFetch<IndexJobDto>(`/index/jobs/${id}`);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
console.error("Proxy error:", error);
|
||||
return NextResponse.json({ error: "Failed to fetch job" }, { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,12 @@
|
||||
import { NextRequest } from "next/server";
|
||||
import { config } from "@/lib/api";
|
||||
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> }
|
||||
) {
|
||||
const { id } = await params;
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiToken = process.env.API_BOOTSTRAP_TOKEN;
|
||||
|
||||
if (!apiToken) {
|
||||
return new Response(
|
||||
`data: ${JSON.stringify({ error: "API token not configured" })}\n\n`,
|
||||
{ status: 500, headers: { "Content-Type": "text/event-stream" } }
|
||||
);
|
||||
}
|
||||
const { baseUrl, token } = config();
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
@@ -22,18 +15,18 @@ export async function GET(
|
||||
|
||||
let lastData: string | null = null;
|
||||
let isActive = true;
|
||||
let consecutiveErrors = 0;
|
||||
|
||||
const fetchJob = async () => {
|
||||
if (!isActive) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(`${apiBaseUrl}/index/jobs/${id}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiToken}`,
|
||||
},
|
||||
const response = await fetch(`${baseUrl}/index/jobs/${id}`, {
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
|
||||
if (response.ok && isActive) {
|
||||
consecutiveErrors = 0;
|
||||
const data = await response.json();
|
||||
const dataStr = JSON.stringify(data);
|
||||
|
||||
@@ -63,7 +56,11 @@ export async function GET(
|
||||
}
|
||||
} catch (error) {
|
||||
if (isActive) {
|
||||
console.error("SSE fetch error:", error);
|
||||
consecutiveErrors++;
|
||||
// Only log first failure and every 60th to avoid spam
|
||||
if (consecutiveErrors === 1 || consecutiveErrors % 60 === 0) {
|
||||
console.warn(`SSE fetch error (${consecutiveErrors} consecutive):`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
11
apps/backoffice/app/api/jobs/active/route.ts
Normal file
11
apps/backoffice/app/api/jobs/active/route.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { NextResponse } from "next/server";
|
||||
import { apiFetch, IndexJobDto } from "@/lib/api";
|
||||
|
||||
export async function GET() {
|
||||
try {
|
||||
const data = await apiFetch<IndexJobDto[]>("/index/jobs/active");
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
return NextResponse.json({ error: "Failed to fetch active jobs" }, { status: 500 });
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiToken = process.env.API_BOOTSTRAP_TOKEN;
|
||||
|
||||
if (!apiToken) {
|
||||
return NextResponse.json({ error: "API token not configured" }, { status: 500 });
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`${apiBaseUrl}/index/status`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiToken}`,
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return NextResponse.json(
|
||||
{ error: `API error: ${response.status}` },
|
||||
{ status: response.status }
|
||||
);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
console.error("Proxy error:", error);
|
||||
return NextResponse.json({ error: "Failed to fetch jobs" }, { status: 500 });
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,8 @@
|
||||
import { NextRequest } from "next/server";
|
||||
import { config } from "@/lib/api";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const apiBaseUrl = process.env.API_BASE_URL || "http://api:8080";
|
||||
const apiToken = process.env.API_BOOTSTRAP_TOKEN;
|
||||
|
||||
if (!apiToken) {
|
||||
return new Response(
|
||||
`data: ${JSON.stringify({ error: "API token not configured" })}\n\n`,
|
||||
{ status: 500, headers: { "Content-Type": "text/event-stream" } }
|
||||
);
|
||||
}
|
||||
const { baseUrl, token } = config();
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
@@ -17,18 +10,18 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
let lastData: string | null = null;
|
||||
let isActive = true;
|
||||
let consecutiveErrors = 0;
|
||||
|
||||
const fetchJobs = async () => {
|
||||
if (!isActive) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(`${apiBaseUrl}/index/status`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiToken}`,
|
||||
},
|
||||
const response = await fetch(`${baseUrl}/index/status`, {
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
|
||||
if (response.ok && isActive) {
|
||||
consecutiveErrors = 0;
|
||||
const data = await response.json();
|
||||
const dataStr = JSON.stringify(data);
|
||||
|
||||
@@ -47,7 +40,11 @@ export async function GET(request: NextRequest) {
|
||||
}
|
||||
} catch (error) {
|
||||
if (isActive) {
|
||||
console.error("SSE fetch error:", error);
|
||||
consecutiveErrors++;
|
||||
// Only log first failure and every 30th to avoid spam
|
||||
if (consecutiveErrors === 1 || consecutiveErrors % 30 === 0) {
|
||||
console.warn(`SSE fetch error (${consecutiveErrors} consecutive):`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
16
apps/backoffice/app/api/komga/reports/[id]/route.ts
Normal file
16
apps/backoffice/app/api/komga/reports/[id]/route.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { NextResponse, NextRequest } from "next/server";
|
||||
import { getKomgaReport } from "@/lib/api";
|
||||
|
||||
export async function GET(
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> },
|
||||
) {
|
||||
try {
|
||||
const { id } = await params;
|
||||
const data = await getKomgaReport(id);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to fetch report";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
12
apps/backoffice/app/api/komga/reports/route.ts
Normal file
12
apps/backoffice/app/api/komga/reports/route.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { NextResponse } from "next/server";
|
||||
import { listKomgaReports } from "@/lib/api";
|
||||
|
||||
export async function GET() {
|
||||
try {
|
||||
const data = await listKomgaReports();
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to fetch reports";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
16
apps/backoffice/app/api/komga/sync/route.ts
Normal file
16
apps/backoffice/app/api/komga/sync/route.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { NextResponse, NextRequest } from "next/server";
|
||||
import { apiFetch } from "@/lib/api";
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await apiFetch("/komga/sync", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to sync with Komga";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch, LibraryDto } from "@/lib/api";
|
||||
|
||||
export async function PATCH(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> }
|
||||
) {
|
||||
const { id } = await params;
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await apiFetch<LibraryDto>(`/libraries/${id}/metadata-provider`, {
|
||||
method: "PATCH",
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to update metadata provider";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
18
apps/backoffice/app/api/libraries/[id]/monitoring/route.ts
Normal file
18
apps/backoffice/app/api/libraries/[id]/monitoring/route.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { updateLibraryMonitoring } from "@/lib/api";
|
||||
|
||||
export async function PATCH(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string }> }
|
||||
) {
|
||||
const { id } = await params;
|
||||
try {
|
||||
const { monitor_enabled, scan_mode, watcher_enabled } = await request.json();
|
||||
const data = await updateLibraryMonitoring(id, monitor_enabled, scan_mode, watcher_enabled);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to update monitoring settings";
|
||||
console.error("[monitoring PATCH]", message);
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { fetchSeriesMetadata } from "@/lib/api";
|
||||
|
||||
export async function GET(
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string; name: string }> }
|
||||
) {
|
||||
const { id, name } = await params;
|
||||
try {
|
||||
const data = await fetchSeriesMetadata(id, name);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to fetch series metadata";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { updateSeries } from "@/lib/api";
|
||||
|
||||
export async function PATCH(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ id: string; name: string }> }
|
||||
) {
|
||||
const { id, name } = await params;
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await updateSeries(id, name, body);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to update series";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
17
apps/backoffice/app/api/metadata/approve/route.ts
Normal file
17
apps/backoffice/app/api/metadata/approve/route.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch } from "@/lib/api";
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json();
|
||||
const { id, ...rest } = body;
|
||||
const data = await apiFetch<{ status: string; books_synced: number }>(`/metadata/approve/${id}`, {
|
||||
method: "POST",
|
||||
body: JSON.stringify(rest),
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to approve metadata";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
17
apps/backoffice/app/api/metadata/batch/report/route.ts
Normal file
17
apps/backoffice/app/api/metadata/batch/report/route.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch, MetadataBatchReportDto } from "@/lib/api";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url);
|
||||
const id = searchParams.get("id");
|
||||
if (!id) {
|
||||
return NextResponse.json({ error: "id is required" }, { status: 400 });
|
||||
}
|
||||
const data = await apiFetch<MetadataBatchReportDto>(`/metadata/batch/${id}/report`);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to fetch report";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
19
apps/backoffice/app/api/metadata/batch/results/route.ts
Normal file
19
apps/backoffice/app/api/metadata/batch/results/route.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch, MetadataBatchResultDto } from "@/lib/api";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url);
|
||||
const id = searchParams.get("id");
|
||||
if (!id) {
|
||||
return NextResponse.json({ error: "id is required" }, { status: 400 });
|
||||
}
|
||||
const status = searchParams.get("status") || "";
|
||||
const params = status ? `?status=${status}` : "";
|
||||
const data = await apiFetch<MetadataBatchResultDto[]>(`/metadata/batch/${id}/results${params}`);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to fetch results";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
16
apps/backoffice/app/api/metadata/batch/route.ts
Normal file
16
apps/backoffice/app/api/metadata/batch/route.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch } from "@/lib/api";
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await apiFetch<{ id: string; status: string }>("/metadata/batch", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to start batch";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
35
apps/backoffice/app/api/metadata/links/route.ts
Normal file
35
apps/backoffice/app/api/metadata/links/route.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch, ExternalMetadataLinkDto } from "@/lib/api";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url);
|
||||
const libraryId = searchParams.get("library_id") || "";
|
||||
const seriesName = searchParams.get("series_name") || "";
|
||||
const params = new URLSearchParams();
|
||||
if (libraryId) params.set("library_id", libraryId);
|
||||
if (seriesName) params.set("series_name", seriesName);
|
||||
const data = await apiFetch<ExternalMetadataLinkDto[]>(`/metadata/links?${params.toString()}`);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to fetch metadata links";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
export async function DELETE(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url);
|
||||
const id = searchParams.get("id");
|
||||
if (!id) {
|
||||
return NextResponse.json({ error: "id is required" }, { status: 400 });
|
||||
}
|
||||
const data = await apiFetch<{ deleted: boolean }>(`/metadata/links/${id}`, {
|
||||
method: "DELETE",
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to delete metadata link";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
16
apps/backoffice/app/api/metadata/match/route.ts
Normal file
16
apps/backoffice/app/api/metadata/match/route.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch, ExternalMetadataLinkDto } from "@/lib/api";
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await apiFetch<ExternalMetadataLinkDto>("/metadata/match", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to create metadata match";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
17
apps/backoffice/app/api/metadata/missing/route.ts
Normal file
17
apps/backoffice/app/api/metadata/missing/route.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch, MissingBooksDto } from "@/lib/api";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url);
|
||||
const id = searchParams.get("id");
|
||||
if (!id) {
|
||||
return NextResponse.json({ error: "id is required" }, { status: 400 });
|
||||
}
|
||||
const data = await apiFetch<MissingBooksDto>(`/metadata/missing/${id}`);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to fetch missing books";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
16
apps/backoffice/app/api/metadata/refresh/report/route.ts
Normal file
16
apps/backoffice/app/api/metadata/refresh/report/route.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch } from "@/lib/api";
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const jobId = request.nextUrl.searchParams.get("job_id");
|
||||
if (!jobId) {
|
||||
return NextResponse.json({ error: "job_id required" }, { status: 400 });
|
||||
}
|
||||
const data = await apiFetch(`/metadata/refresh/${jobId}/report`);
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to get report";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
16
apps/backoffice/app/api/metadata/refresh/route.ts
Normal file
16
apps/backoffice/app/api/metadata/refresh/route.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch } from "@/lib/api";
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await apiFetch<{ id: string; status: string }>("/metadata/refresh", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to start refresh";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
15
apps/backoffice/app/api/metadata/reject/route.ts
Normal file
15
apps/backoffice/app/api/metadata/reject/route.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { apiFetch } from "@/lib/api";
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json();
|
||||
const data = await apiFetch<{ status: string }>(`/metadata/reject/${body.id}`, {
|
||||
method: "POST",
|
||||
});
|
||||
return NextResponse.json(data);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Failed to reject metadata";
|
||||
return NextResponse.json({ error: message }, { status: 500 });
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user