pos momas

This commit is contained in:
aguitauwu
2026-02-11 17:36:28 -06:00
parent 0cc8e39446
commit 9e9cde648c

View File

@@ -1,14 +1,6 @@
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
<<<<<<< HEAD
const HF_MODELS: Record<string, string> = {
"yuuki-v0.1": "YuuKi-OS/Yuuki-v0.1",
"yuuki-3.7": "YuuKi-OS/Yuuki-3.7",
"yuuki-best": "YuuKi-OS/Yuuki-best",
};
=======
const YUUKI_API_URL = "https://opceanai-yuuki-api.hf.space/generate"; const YUUKI_API_URL = "https://opceanai-yuuki-api.hf.space/generate";
>>>>>>> fe68380 (uwu)
const VALID_MODELS = ["yuuki-best", "yuuki-3.7", "yuuki-v0.1"]; const VALID_MODELS = ["yuuki-best", "yuuki-3.7", "yuuki-v0.1"];
@@ -21,14 +13,15 @@ async function callYuukiAPI(
model: string model: string
) { ) {
// Build a prompt from the message history // Build a prompt from the message history
const prompt = messages const prompt =
.map((m) => { messages
if (m.role === "system") return `System: ${m.content}`; .map((m) => {
if (m.role === "user") return `User: ${m.content}`; if (m.role === "system") return `System: ${m.content}`;
if (m.role === "assistant") return `Assistant: ${m.content}`; if (m.role === "user") return `User: ${m.content}`;
return m.content; if (m.role === "assistant") return `Assistant: ${m.content}`;
}) return m.content;
.join("\n") + "\nAssistant:"; })
.join("\n") + "\nAssistant:";
const response = await fetch(YUUKI_API_URL, { const response = await fetch(YUUKI_API_URL, {
method: "POST", method: "POST",
@@ -52,46 +45,7 @@ async function callYuukiAPI(
const data = await response.json(); const data = await response.json();
<<<<<<< HEAD // Handle various response formats
/**
* Calls HuggingFace Inference API via the new router.huggingface.co endpoint.
* Uses the OpenAI-compatible chat completions format.
*/
async function callHuggingFace(
token: string,
model: string,
messages: { role: string; content: string }[]
) {
const modelId = HF_MODELS[model] || HF_MODELS["yuuki-best"];
const url = `https://router.huggingface.co/hf-inference/models/${modelId}/v1/chat/completions`;
const response = await fetch(url, {
method: "POST",
headers: {
Authorization: `Bearer ${token}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: modelId,
messages,
max_tokens: 1024,
temperature: 0.7,
top_p: 0.9,
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(
`HuggingFace error (${response.status}): ${errorText.slice(0, 200)}`
);
}
const data = await response.json();
const content =
data.choices?.[0]?.message?.content?.trim() || "No response generated.";
=======
// Handle various response formats from the HF Space
let generatedText = ""; let generatedText = "";
if (typeof data === "string") { if (typeof data === "string") {
@@ -108,17 +62,16 @@ async function callHuggingFace(
generatedText = JSON.stringify(data); generatedText = JSON.stringify(data);
} }
// Clean up conversational artifacts // Clean conversational artifacts
const cutoffs = ["User:", "System:", "\nUser", "\nSystem"]; const cutoffs = ["User:", "System:", "\nUser", "\nSystem"];
for (const cutoff of cutoffs) { for (const cutoff of cutoffs) {
const idx = generatedText.indexOf(cutoff); const idx = generatedText.indexOf(cutoff);
if (idx > 0) generatedText = generatedText.substring(0, idx).trim(); if (idx > 0) generatedText = generatedText.substring(0, idx).trim();
} }
>>>>>>> fe68380 (uwu)
return { return {
content, content: generatedText || "No response generated.",
id: data.id || `chatcmpl-${Date.now()}`, id: `chatcmpl-${Date.now()}`,
model, model,
}; };
} }
@@ -136,6 +89,7 @@ export async function POST(req: NextRequest) {
} }
const modelKey = model || "yuuki-best"; const modelKey = model || "yuuki-best";
if (!VALID_MODELS.includes(modelKey)) { if (!VALID_MODELS.includes(modelKey)) {
return NextResponse.json({ error: "Invalid model" }, { status: 400 }); return NextResponse.json({ error: "Invalid model" }, { status: 400 });
} }