mirror of
https://github.com/YuuKi-OS/Yuuki-chat.git
synced 2026-02-18 13:51:10 +00:00
pos momas
This commit is contained in:
@@ -1,14 +1,6 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
|
||||
<<<<<<< HEAD
|
||||
const HF_MODELS: Record<string, string> = {
|
||||
"yuuki-v0.1": "YuuKi-OS/Yuuki-v0.1",
|
||||
"yuuki-3.7": "YuuKi-OS/Yuuki-3.7",
|
||||
"yuuki-best": "YuuKi-OS/Yuuki-best",
|
||||
};
|
||||
=======
|
||||
const YUUKI_API_URL = "https://opceanai-yuuki-api.hf.space/generate";
|
||||
>>>>>>> fe68380 (uwu)
|
||||
|
||||
const VALID_MODELS = ["yuuki-best", "yuuki-3.7", "yuuki-v0.1"];
|
||||
|
||||
@@ -21,14 +13,15 @@ async function callYuukiAPI(
|
||||
model: string
|
||||
) {
|
||||
// Build a prompt from the message history
|
||||
const prompt = messages
|
||||
.map((m) => {
|
||||
if (m.role === "system") return `System: ${m.content}`;
|
||||
if (m.role === "user") return `User: ${m.content}`;
|
||||
if (m.role === "assistant") return `Assistant: ${m.content}`;
|
||||
return m.content;
|
||||
})
|
||||
.join("\n") + "\nAssistant:";
|
||||
const prompt =
|
||||
messages
|
||||
.map((m) => {
|
||||
if (m.role === "system") return `System: ${m.content}`;
|
||||
if (m.role === "user") return `User: ${m.content}`;
|
||||
if (m.role === "assistant") return `Assistant: ${m.content}`;
|
||||
return m.content;
|
||||
})
|
||||
.join("\n") + "\nAssistant:";
|
||||
|
||||
const response = await fetch(YUUKI_API_URL, {
|
||||
method: "POST",
|
||||
@@ -52,46 +45,7 @@ async function callYuukiAPI(
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
<<<<<<< HEAD
|
||||
/**
|
||||
* Calls HuggingFace Inference API via the new router.huggingface.co endpoint.
|
||||
* Uses the OpenAI-compatible chat completions format.
|
||||
*/
|
||||
async function callHuggingFace(
|
||||
token: string,
|
||||
model: string,
|
||||
messages: { role: string; content: string }[]
|
||||
) {
|
||||
const modelId = HF_MODELS[model] || HF_MODELS["yuuki-best"];
|
||||
const url = `https://router.huggingface.co/hf-inference/models/${modelId}/v1/chat/completions`;
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: modelId,
|
||||
messages,
|
||||
max_tokens: 1024,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(
|
||||
`HuggingFace error (${response.status}): ${errorText.slice(0, 200)}`
|
||||
);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const content =
|
||||
data.choices?.[0]?.message?.content?.trim() || "No response generated.";
|
||||
=======
|
||||
// Handle various response formats from the HF Space
|
||||
// Handle various response formats
|
||||
let generatedText = "";
|
||||
|
||||
if (typeof data === "string") {
|
||||
@@ -108,17 +62,16 @@ async function callHuggingFace(
|
||||
generatedText = JSON.stringify(data);
|
||||
}
|
||||
|
||||
// Clean up conversational artifacts
|
||||
// Clean conversational artifacts
|
||||
const cutoffs = ["User:", "System:", "\nUser", "\nSystem"];
|
||||
for (const cutoff of cutoffs) {
|
||||
const idx = generatedText.indexOf(cutoff);
|
||||
if (idx > 0) generatedText = generatedText.substring(0, idx).trim();
|
||||
}
|
||||
>>>>>>> fe68380 (uwu)
|
||||
|
||||
return {
|
||||
content,
|
||||
id: data.id || `chatcmpl-${Date.now()}`,
|
||||
content: generatedText || "No response generated.",
|
||||
id: `chatcmpl-${Date.now()}`,
|
||||
model,
|
||||
};
|
||||
}
|
||||
@@ -136,6 +89,7 @@ export async function POST(req: NextRequest) {
|
||||
}
|
||||
|
||||
const modelKey = model || "yuuki-best";
|
||||
|
||||
if (!VALID_MODELS.includes(modelKey)) {
|
||||
return NextResponse.json({ error: "Invalid model" }, { status: 400 });
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user