Refactor EvaluationEditor component by removing unused Link import. Enhance ExportModal to include Confluence export option and update export-utils with functions for parsing questions and rubrics, and generating Confluence markup for evaluations.
Some checks failed
Deploy with Docker Compose / deploy (push) Has been cancelled

This commit is contained in:
2026-02-23 13:24:36 +01:00
parent 9ff745489f
commit 87326b459e
4 changed files with 133 additions and 1 deletions

View File

@@ -0,0 +1,39 @@
import { NextRequest, NextResponse } from "next/server";
import { prisma } from "@/lib/db";
import { evaluationToConfluenceMarkup } from "@/lib/export-utils";
export async function GET(req: NextRequest) {
try {
const { searchParams } = new URL(req.url);
const id = searchParams.get("id");
if (!id) {
return NextResponse.json({ error: "Evaluation id required" }, { status: 400 });
}
const evaluation = await prisma.evaluation.findUnique({
where: { id },
include: {
template: true,
dimensionScores: { include: { dimension: true } },
},
});
if (!evaluation) {
return NextResponse.json({ error: "Evaluation not found" }, { status: 404 });
}
const markup = evaluationToConfluenceMarkup(
evaluation as Parameters<typeof evaluationToConfluenceMarkup>[0]
);
return new NextResponse(markup, {
headers: {
"Content-Type": "text/plain; charset=utf-8",
"Content-Disposition": `attachment; filename="guide-entretien-${id}.md"`,
},
});
} catch (e) {
console.error(e);
return NextResponse.json({ error: "Export failed" }, { status: 500 });
}
}

View File

@@ -1,7 +1,6 @@
"use client";
import { useState, useEffect, useCallback } from "react";
import Link from "next/link";
import { useRouter } from "next/navigation";
import { updateEvaluation, deleteEvaluation, fetchEvaluation } from "@/actions/evaluations";
import { CandidateForm } from "@/components/CandidateForm";

View File

@@ -12,6 +12,7 @@ export function ExportModal({ isOpen, onClose, evaluationId }: ExportModalProps)
const base = typeof window !== "undefined" ? window.location.origin : "";
const csvUrl = `${base}/api/export/csv?id=${evaluationId}`;
const pdfUrl = `${base}/api/export/pdf?id=${evaluationId}`;
const confluenceUrl = `${base}/api/export/confluence?id=${evaluationId}`;
return (
<>
@@ -37,6 +38,14 @@ export function ExportModal({ isOpen, onClose, evaluationId }: ExportModalProps)
>
pdf
</a>
<a
href={confluenceUrl}
download
className="rounded border border-zinc-300 dark:border-zinc-600 bg-zinc-100 dark:bg-zinc-700 px-4 py-2 text-center font-mono text-xs text-zinc-700 dark:text-zinc-300 hover:bg-zinc-200 dark:hover:bg-zinc-700"
title="Guide d'entretien (questions + grille) au format Confluence wiki markup"
>
confluence
</a>
</div>
<button
type="button"

View File

@@ -5,6 +5,28 @@ export interface EvaluationWithScores extends Evaluation {
dimensionScores: (DimensionScore & { dimension: TemplateDimension })[];
}
/** Parse suggestedQuestions JSON array */
function parseQuestions(s: string | null | undefined): string[] {
if (!s) return [];
try {
const arr = JSON.parse(s) as unknown;
return Array.isArray(arr) ? arr.filter((x): x is string => typeof x === "string") : [];
} catch {
return [];
}
}
/** Parse rubric "1:X;2:Y;..." into labels */
function parseRubric(rubric: string): string[] {
if (rubric === "1-5" || !rubric) return ["1", "2", "3", "4", "5"];
const labels: string[] = [];
for (let i = 1; i <= 5; i++) {
const m = rubric.match(new RegExp(`${i}:([^;]+)`));
labels.push(m ? m[1].trim() : String(i));
}
return labels;
}
/** Compute average score across dimensions (1-5 scale) */
export function computeAverageScore(scores: { score: number | null }[]): number {
const valid = scores.filter((s) => s.score != null && s.score >= 1 && s.score <= 5);
@@ -81,3 +103,66 @@ export function evaluationToCsvRows(evalData: EvaluationWithScores): string[][]
}
return rows;
}
const CONFIDENCE_LABELS: Record<string, string> = {
low: "Faible",
med: "Moyenne",
high: "Haute",
};
/** Convert evaluation template (dimensions + questions + rubric) to Markdown for Confluence paste */
export function evaluationToConfluenceMarkup(evalData: EvaluationWithScores): string {
const lines: string[] = [];
lines.push(`# Guide d'entretien — ${evalData.template?.name ?? "Évaluation"}`);
lines.push("");
lines.push(`**Candidat:** ${evalData.candidateName} | **Rôle:** ${evalData.candidateRole}`);
if (evalData.candidateTeam) lines.push(`**Équipe:** ${evalData.candidateTeam}`);
lines.push(`**Évaluateur:** ${evalData.evaluatorName} | **Date:** ${evalData.evaluationDate.toISOString().split("T")[0]}`);
lines.push("");
lines.push("## Système de notation");
lines.push("");
lines.push("Chaque dimension est notée de **1** (faible) à **5** (expert). La grille ci-dessous détaille les critères par niveau.");
lines.push("");
for (const ds of evalData.dimensionScores) {
const dim = ds.dimension;
const title = (dim as { title?: string; name?: string }).title ?? (dim as { name?: string }).name ?? "";
lines.push(`## ${title}`);
lines.push("");
const questions = parseQuestions((dim as { suggestedQuestions?: string | null }).suggestedQuestions);
if (questions.length > 0) {
lines.push("### Questions suggérées");
questions.forEach((q, i) => {
lines.push(`${i + 1}. ${q}`);
});
lines.push("");
}
const rubricLabels = parseRubric((dim as { rubric?: string }).rubric ?? "");
if (rubricLabels.length > 0) {
lines.push("### Grille");
rubricLabels.forEach((label, i) => {
lines.push(`- ${i + 1}: ${label}`);
});
lines.push("");
}
lines.push("### Notes évaluateur");
if (ds.score != null) {
lines.push(`- **Score:** ${ds.score}/5`);
if (ds.confidence)
lines.push(`- **Confiance:** ${CONFIDENCE_LABELS[ds.confidence] ?? ds.confidence}`);
} else {
lines.push(`- **Score:** _à compléter_`);
lines.push(`- **Confiance:** _à compléter_`);
}
lines.push(`- **Notes candidat:** ${ds.candidateNotes ?? "_à compléter_"}`);
lines.push(`- **Justification:** ${ds.justification ?? "_à compléter_"}`);
lines.push(`- **Exemples:** ${ds.examplesObserved ?? "_à compléter_"}`);
lines.push("");
}
return lines.join("\n");
}