- Defensive default for count_in in TempoMapEditor prevents crash when AI omits field - Fix hardcoded GitHub registry URL → git.avezzano.io/the_og/clicktrack-registry - Add response_format json_schema to Ollama provider so count_in is always required Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
165 lines
5.3 KiB
TypeScript
165 lines
5.3 KiB
TypeScript
import 'server-only';
|
|
|
|
import type { CTPDocument } from "@/lib/ctp/schema";
|
|
import type { AnalysisInput, AnalysisProvider } from "@/lib/analysis/providers";
|
|
import { SYSTEM_PROMPT, CTP_SCHEMA } from "./anthropic";
|
|
|
|
function getBaseUrl(): string {
|
|
return process.env.OLLAMA_BASE_URL ?? "http://localhost:11434";
|
|
}
|
|
|
|
// ─── Model list ───────────────────────────────────────────────────────────────
|
|
|
|
interface OllamaTagsResponse {
|
|
models?: Array<{ name: string }>;
|
|
}
|
|
|
|
export async function getOllamaModels(): Promise<string[]> {
|
|
try {
|
|
const url = `${getBaseUrl()}/api/tags`;
|
|
const response = await fetch(url, { signal: AbortSignal.timeout(5000) });
|
|
if (!response.ok) return [];
|
|
const json = await response.json() as OllamaTagsResponse;
|
|
return (json.models ?? []).map((m) => m.name);
|
|
} catch {
|
|
return [];
|
|
}
|
|
}
|
|
|
|
// ─── Chat completions helper ──────────────────────────────────────────────────
|
|
|
|
async function callOllama(model: string, userMessage: string): Promise<string> {
|
|
const baseUrl = getBaseUrl();
|
|
const response = await fetch(`${baseUrl}/v1/chat/completions`, {
|
|
method: "POST",
|
|
headers: { "Content-Type": "application/json" },
|
|
body: JSON.stringify({
|
|
model,
|
|
messages: [
|
|
{ role: "system", content: SYSTEM_PROMPT },
|
|
{
|
|
role: "user",
|
|
content:
|
|
userMessage +
|
|
"\n\nRespond with valid JSON only. Do not add any explanation or markdown. Your entire response must be a single valid JSON object matching the schema.",
|
|
},
|
|
],
|
|
response_format: {
|
|
type: "json_schema",
|
|
json_schema: {
|
|
name: "CTPDocument",
|
|
schema: CTP_SCHEMA,
|
|
},
|
|
},
|
|
stream: false,
|
|
}),
|
|
signal: AbortSignal.timeout(120000), // 2-minute timeout for slow local models
|
|
});
|
|
|
|
if (!response.ok) {
|
|
const text = await response.text().catch(() => "");
|
|
throw new Error(`Ollama API error ${response.status}: ${text.slice(0, 200)}`);
|
|
}
|
|
|
|
const json = await response.json() as {
|
|
choices?: Array<{ message?: { content?: string } }>;
|
|
};
|
|
|
|
const content = json.choices?.[0]?.message?.content;
|
|
if (!content) {
|
|
throw new Error("Ollama did not return a message content");
|
|
}
|
|
|
|
return content;
|
|
}
|
|
|
|
// ─── Provider implementation ──────────────────────────────────────────────────
|
|
|
|
export const ollamaProvider: AnalysisProvider = {
|
|
id: "ollama",
|
|
label: "Ollama",
|
|
type: "local-ai",
|
|
|
|
async isAvailable() {
|
|
const url = getBaseUrl();
|
|
try {
|
|
const response = await fetch(`${url}/api/tags`, {
|
|
signal: AbortSignal.timeout(5000),
|
|
});
|
|
if (response.ok) {
|
|
return { available: true };
|
|
}
|
|
return { available: false, reason: `Ollama not reachable at ${url}` };
|
|
} catch {
|
|
return { available: false, reason: `Ollama not reachable at ${url}` };
|
|
}
|
|
},
|
|
|
|
async generateCTP(input: AnalysisInput): Promise<CTPDocument> {
|
|
const { ollamaModel, bpm, duration, title, artist, mbid, contributed_by } = input;
|
|
|
|
if (!ollamaModel) {
|
|
throw new Error("ollamaModel is required for Ollama provider");
|
|
}
|
|
|
|
const approxBars = Math.round((duration * bpm) / 60 / 4);
|
|
|
|
const userMessage = `\
|
|
Generate a CTP document for the following song:
|
|
|
|
Title: ${title ?? "Unknown Title"}
|
|
Artist: ${artist ?? "Unknown Artist"}
|
|
MusicBrainz ID: ${mbid ?? "unknown"}
|
|
Detected BPM: ${bpm}
|
|
Duration: ${duration.toFixed(1)} seconds (~${approxBars} bars at 4/4)
|
|
Contributed by: ${contributed_by}
|
|
|
|
Create a plausible section layout for this song. If this is a well-known song, use your knowledge of its actual arrangement. If not, use a sensible generic structure.`;
|
|
|
|
// Attempt parse with one retry on failure
|
|
let content: string;
|
|
try {
|
|
content = await callOllama(ollamaModel, userMessage);
|
|
} catch (err) {
|
|
throw new Error(
|
|
`Ollama request failed: ${err instanceof Error ? err.message : String(err)}`
|
|
);
|
|
}
|
|
|
|
const tryParse = (raw: string): CTPDocument | null => {
|
|
// Strip markdown code fences if present
|
|
const stripped = raw.replace(/^```(?:json)?\s*/i, "").replace(/\s*```\s*$/, "").trim();
|
|
try {
|
|
const doc = JSON.parse(stripped) as CTPDocument;
|
|
if (!doc.metadata.created_at || doc.metadata.created_at.includes("placeholder")) {
|
|
doc.metadata.created_at = new Date().toISOString();
|
|
}
|
|
return doc;
|
|
} catch {
|
|
return null;
|
|
}
|
|
};
|
|
|
|
const firstAttempt = tryParse(content);
|
|
if (firstAttempt) return firstAttempt;
|
|
|
|
// Retry once
|
|
let retryContent: string;
|
|
try {
|
|
retryContent = await callOllama(ollamaModel, userMessage);
|
|
} catch (err) {
|
|
throw new Error(
|
|
`Ollama retry request failed: ${err instanceof Error ? err.message : String(err)}`
|
|
);
|
|
}
|
|
|
|
const secondAttempt = tryParse(retryContent);
|
|
if (secondAttempt) return secondAttempt;
|
|
|
|
throw new Error(
|
|
`Ollama (${ollamaModel}) returned a response that could not be parsed as a valid CTP document. ` +
|
|
`Response preview: ${content.slice(0, 200)}`
|
|
);
|
|
},
|
|
};
|