yuy chat init

This commit is contained in:
aguitauwu
2026-02-06 21:29:55 -06:00
parent eecf0b9ac2
commit 0392a6b96d
19 changed files with 2257 additions and 112 deletions

160
LICENSE
View File

@@ -1,7 +1,9 @@
Apache License Apache License
Version 2.0, January 2004 Version 2.0, January 2004
http://www.apache.org/licenses/ http://www.apache.org/licenses/
Copyright 2026 OpceanAI, Yuuki
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions. 1. Definitions.
@@ -63,130 +65,64 @@
on behalf of whom a Contribution has been received by Licensor and on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work. subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of 2. Grant of Copyright License.
this License, each Contributor hereby grants to You a perpetual, Subject to the terms and conditions of this License, each Contributor
worldwide, non-exclusive, no-charge, royalty-free, irrevocable hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
copyright license to reproduce, prepare Derivative Works of, royalty-free, irrevocable copyright license to reproduce, prepare
publicly display, publicly perform, sublicense, and distribute the Derivative Works of, publicly display, publicly perform, sublicense,
Work and such Derivative Works in Source or Object form. and distribute the Work and such Derivative Works in Source or Object
form.
3. Grant of Patent License. Subject to the terms and conditions of 3. Grant of Patent License.
this License, each Contributor hereby grants to You a perpetual, Subject to the terms and conditions of this License, each Contributor
worldwide, non-exclusive, no-charge, royalty-free, irrevocable hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
(except as stated in this section) patent license to make, have made, royalty-free, irrevocable (except as stated in this section) patent
use, offer to sell, sell, import, and otherwise transfer the Work, license to make, have made, use, offer to sell, sell, import, and
where such license applies only to those patent claims licensable otherwise transfer the Work, where such license applies only to those
by such Contributor that are necessarily infringed by their patent claims licensable by such Contributor that are necessarily
Contribution(s) alone or by combination of their Contribution(s) infringed by their Contribution(s) alone or by combination of their
with the Work to which such Contribution(s) was submitted. If You Contribution(s) with the Work to which such Contribution(s) was
institute patent litigation against any entity (including a submitted. If You institute patent litigation against any entity
cross-claim or counterclaim in a lawsuit) alleging that the Work alleging that the Work or a Contribution constitutes patent
or a Contribution incorporated within the Work constitutes direct infringement, then any patent licenses granted under this License
or contributory patent infringement, then any patent licenses shall terminate as of the date such litigation is filed.
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the 4. Redistribution.
Work or Derivative Works thereof in any medium, with or without You may reproduce and distribute copies of the Work or Derivative
modifications, and in Source or Object form, provided that You Works thereof in any medium, with or without modifications, provided
meet the following conditions: that You meet the following conditions:
(a) You must give any other recipients of the Work or (a) You must give recipients a copy of this License; and
Derivative Works a copy of this License; and (b) You must cause modified files to carry prominent notices stating
that You changed the files; and
(c) You must retain all copyright, patent, trademark, and attribution
notices; and
(d) Any NOTICE file must be included if present.
(b) You must cause any modified files to carry prominent notices 5. Submission of Contributions.
stating that You changed the files; and Unless You explicitly state otherwise, any Contribution submitted
shall be under the terms of this License.
(c) You must retain, in the Source form of any Derivative Works 6. Trademarks.
that You distribute, all copyright, patent, trademark, and This License does not grant permission to use the trade names,
attribution notices from the Source form of the Work, trademarks, or service marks of the Licensor.
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its 7. Disclaimer of Warranty.
distribution, then any Derivative Works that You distribute must The Work is provided on an "AS IS" BASIS, WITHOUT WARRANTIES OR
include a readable copy of the attribution notices contained CONDITIONS OF ANY KIND.
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and 8. Limitation of Liability.
may provide additional or different license terms and conditions In no event shall any Contributor be liable for damages arising from
for use, reproduction, or distribution of Your modifications, or the use of the Work.
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, 9. Accepting Warranty or Additional Liability.
any Contribution intentionally submitted for inclusion in the Work You may offer support or warranty only on Your own behalf.
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work. APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following Copyright 2026 OpceanAI
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

9
yuy-chat-complete/.gitignore vendored Normal file
View File

@@ -0,0 +1,9 @@
/target
Cargo.lock
*.swp
*.swo
*~
.DS_Store
.vscode/
.idea/
*.log

View File

@@ -0,0 +1,59 @@
[package]
name = "yuy-chat"
version = "0.1.0"
edition = "2021"
authors = ["Yuuki Team"]
description = "Beautiful TUI chat interface for local AI models"
license = "MIT"
repository = "https://github.com/YuuKi-OS/yuy-chat"
[dependencies]
# TUI Framework
ratatui = "0.26"
crossterm = "0.27"
# Async runtime
tokio = { version = "1.40", features = ["full"] }
# HTTP client for HuggingFace API
reqwest = { version = "0.12", features = ["json"] }
# File system operations
walkdir = "2.4"
dirs = "5.0"
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
toml = "0.8"
# Error handling
anyhow = "1.0"
thiserror = "1.0"
# Date/time for conversations
chrono = { version = "0.4", features = ["serde"] }
# Logging
tracing = "0.1"
tracing-subscriber = "0.3"
# Terminal colors
colored = "2.1"
# Async process management
tokio-util = { version = "0.7", features = ["codec"] }
futures = "0.3"
# Command detection
which = "6.0"
[profile.release]
opt-level = "z"
lto = true
codegen-units = 1
strip = true
[[bin]]
name = "yuy-chat"
path = "src/main.rs"

180
yuy-chat-complete/README.md Normal file
View File

@@ -0,0 +1,180 @@
# yuy-chat
<div align="center">
```
$$\ $$\
\$$\ $$ |
\$$\ $$ /$$\ $$\ $$\ $$\
\$$$$ / $$ | $$ |$$ | $$ |
\$$ / $$ | $$ |$$ | $$ |
$$ | $$ | $$ |$$ | $$ |
$$ | \$$$$$$ |\$$$$$$$ |
\__| \______/ \____$$ |
$$\ $$ |
\$$$$$$ |
\______/
```
**Beautiful TUI chat interface for local AI models**
[![Rust](https://img.shields.io/badge/rust-1.70%2B-orange.svg)](https://www.rust-lang.org)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
</div>
---
## 🌟 Features
-**Beautiful TUI** - Gorgeous terminal interface powered by ratatui
- 🔍 **Auto-discovery** - Automatically finds `.gguf` and `.llamafile` models
- 🎨 **Presets** - Creative, Balanced, and Precise modes
- 💾 **Save conversations** - Keep your chat history
- 🌐 **HuggingFace API** - Use models from HuggingFace (optional)
-**Fast & Lightweight** - ~5MB binary, minimal dependencies
- 🚀 **Streaming responses** - See words appear as they're generated
- 🎯 **Zero configuration** - Just run and chat
## 📦 Installation
### From source:
```bash
git clone https://github.com/YuuKi-OS/yuy-chat
cd yuy-chat
cargo build --release
```
### Install globally:
```bash
cargo install --path .
```
## 🚀 Quick Start
```bash
# Run yuy-chat
yuy-chat
# It will auto-scan ~/.yuuki/models/ for .gguf and .llamafile files
# Select a model and start chatting!
```
## 📁 Supported Model Formats
-**GGUF** (`.gguf`) - Runs with llama.cpp
-**Llamafile** (`.llamafile`) - Self-contained executables
## 🎮 Controls
### Model Selector
- `↑/↓` or `j/k` - Navigate models
- `Enter` - Select model
- `R` - Refresh model list
- `Q` - Quit
### Chat
- `Type` - Write your message
- `Enter` - Send message
- `Shift+Enter` - New line
- `Ctrl+Enter` - Send (always)
- `Ctrl+C` - Open menu
- `Ctrl+L` - Clear chat
- `Ctrl+S` - Save conversation
- `↑/↓` - Scroll chat (when input is empty)
### Menu
- `1` - Change model
- `2` - Change preset
- `3` - Save conversation
- `4` - Load conversation
- `5` - Clear chat
- `6` - Settings
- `Q` - Back to chat
## ⚙️ Configuration
Config file location: `~/.config/yuy-chat/config.toml`
```toml
models_dir = "/home/user/.yuuki/models"
hf_token = "hf_xxxxxxxxxxxxx" # Optional
default_preset = "Balanced"
save_history = true
theme = "Dark"
```
## 🎯 Presets
- **Creative** (temp: 0.8, top_p: 0.9) - More random and creative
- **Balanced** (temp: 0.6, top_p: 0.7) - Good middle ground
- **Precise** (temp: 0.3, top_p: 0.5) - More focused and deterministic
## 🌐 HuggingFace Integration
Add your HuggingFace token in settings to use models via API:
1. Press `Ctrl+C``6` (Settings)
2. Edit `HuggingFace Token`
3. Paste your token from https://huggingface.co/settings/tokens
4. Save and refresh models
## 📚 Directory Structure
```
~/.config/yuy-chat/
├── config.toml # Configuration
└── conversations/ # Saved chats
├── conversation-20240206-143022.json
└── conversation-20240206-150133.json
```
## 🔧 Requirements
- **Rust 1.70+** (for building)
- **llama.cpp** (for .gguf models) - Install with: `yuy runtime install llama-cpp`
- **chmod +x** (for .llamafile models)
## 🤝 Integration with yuy
yuy-chat is designed to work alongside [yuy](https://github.com/YuuKi-OS/yuy):
```bash
# Download models with yuy
yuy download Yuuki-best
# Chat with yuy-chat
yuy-chat
```
## 🐛 Troubleshooting
**No models found?**
- Make sure you have models in `~/.yuuki/models/`
- Or specify custom directory: `yuy-chat --models-dir /path/to/models`
**llama.cpp not found?**
- Install with: `yuy runtime install llama-cpp`
- Or: `brew install llama.cpp` (macOS)
- Or: `pkg install llama-cpp` (Termux)
**Streaming not working?**
- Ensure llama.cpp is installed and in PATH
- Check model file permissions
## 📝 License
MIT License - see [LICENSE](LICENSE) file
## 🌸 Credits
Made with love by the Yuuki team
- TUI Framework: [ratatui](https://github.com/ratatui-org/ratatui)
- Inference: [llama.cpp](https://github.com/ggerganov/llama.cpp)
---
**For model management, see [yuy](https://github.com/YuuKi-OS/yuy)**

495
yuy-chat-complete/USAGE.md Normal file
View File

@@ -0,0 +1,495 @@
# yuy-chat - Guía de Uso Completa
## 📖 Contenido
1. [Instalación](#instalación)
2. [Primera Vez](#primera-vez)
3. [Uso Diario](#uso-diario)
4. [Configuración Avanzada](#configuración-avanzada)
5. [Integración con HuggingFace](#integración-con-huggingface)
6. [Tips y Trucos](#tips-y-trucos)
7. [Troubleshooting](#troubleshooting)
---
## 🔧 Instalación
### Termux (Android)
```bash
# Instalar Rust
pkg install rust
# Clonar y compilar
git clone https://github.com/YuuKi-OS/yuy-chat
cd yuy-chat
cargo build --release -j 1 # Usar 1 thread en Termux
# Instalar globalmente
cargo install --path .
```
### Linux/macOS
```bash
# Clonar y compilar
git clone https://github.com/YuuKi-OS/yuy-chat
cd yuy-chat
cargo build --release
# Instalar
cargo install --path .
```
### Windows
```bash
# Mismo proceso que Linux/macOS
git clone https://github.com/YuuKi-OS/yuy-chat
cd yuy-chat
cargo build --release
cargo install --path .
```
---
## 🎬 Primera Vez
### 1. Asegúrate de tener modelos
yuy-chat busca modelos en `~/.yuuki/models/` por defecto.
**Opción A: Usar yuy**
```bash
yuy download Yuuki-best
```
**Opción B: Copiar manualmente**
```bash
mkdir -p ~/.yuuki/models/
cp /path/to/your/model.gguf ~/.yuuki/models/
```
### 2. Instalar llama.cpp
**Termux:**
```bash
pkg install llama-cpp
```
**macOS:**
```bash
brew install llama.cpp
```
**Linux:**
```bash
# Descargar desde releases
wget https://github.com/ggerganov/llama.cpp/releases/...
chmod +x llama-cli
sudo mv llama-cli /usr/local/bin/
```
### 3. Ejecutar yuy-chat
```bash
yuy-chat
```
Verás el selector de modelos. Usa `↑/↓` para navegar y `Enter` para seleccionar.
---
## 💬 Uso Diario
### Flujo Básico
```
1. Ejecuta: yuy-chat
2. Selecciona modelo con ↑/↓ y Enter
3. Escribe tu mensaje
4. Presiona Enter para enviar
5. Yuuki responde (streaming)
6. Continúa la conversación
```
### Atajos de Teclado Útiles
**En chat:**
- `Enter` - Enviar mensaje
- `Shift+Enter` - Nueva línea (para mensajes multi-línea)
- `Ctrl+L` - Limpiar chat
- `Ctrl+S` - Guardar conversación
- `Ctrl+C` - Abrir menú
**Escribir código:**
```
You: Dame un ejemplo de código Python
[Shift+Enter para nueva línea]
def hello():
print("Hola")
[Shift+Enter]
hello()
[Ctrl+Enter para enviar]
```
### Cambiar Preset
```
1. Ctrl+C (abrir menú)
2. Presiona 2 (Change Preset)
Cicla entre: Creative → Balanced → Precise
```
**Cuándo usar cada preset:**
- **Creative**: Escribir historias, brainstorming, ideas
- **Balanced**: Uso general, conversación
- **Precise**: Código, matemáticas, datos exactos
---
## ⚙️ Configuración Avanzada
### Cambiar Directorio de Modelos
**Método 1: Configuración**
```bash
yuy-chat
Ctrl+C → 6 (Settings)
Editar "Models Directory"
```
**Método 2: Archivo config**
```bash
nano ~/.config/yuy-chat/config.toml
```
```toml
models_dir = "/custom/path/to/models"
```
### Personalizar Presets
Edita el código o usa parámetros de llama.cpp directamente:
```bash
# En models/runtime.rs, modifica:
pub fn temperature(&self) -> f32 {
match self {
Preset::Creative => 0.9, // Más aleatorio
// ...
}
}
```
### Tema Claro
```toml
theme = "Light"
```
---
## 🌐 Integración con HuggingFace
### 1. Obtener Token
1. Ve a https://huggingface.co/settings/tokens
2. Click "Create new token"
3. Tipo: "Read"
4. Copia el token
### 2. Configurar en yuy-chat
**Método A: UI**
```
Ctrl+C → 6 (Settings)
Navigate to "HuggingFace Token"
Enter → Pega tu token
```
**Método B: Config file**
```toml
hf_token = "hf_abcdefghijklmnopqrstuvwxyz1234567890"
```
### 3. Usar Modelos de HF
Después de configurar el token:
```
yuy-chat
[Verás modelos locales + modelos HF API]
> Yuuki-best.gguf (Local)
Yuuki-3.7.gguf (Local)
Yuuki-best (HF API) <-- Usa la API
```
**Ventajas:**
- No ocupa espacio local
- Siempre actualizado
- Acceso a modelos privados
**Desventajas:**
- Requiere internet
- Más lento que local
- Rate limits en plan gratis
---
## 💡 Tips y Trucos
### Guardar Conversaciones Importantes
```
Ctrl+S mientras chateas
→ Se guarda en ~/.config/yuy-chat/conversations/
```
### Cargar Conversación Anterior
```
Ctrl+C → 4 (Load Conversation)
↑/↓ para navegar
Enter para cargar
```
### Prompt Engineering
**Para mejores respuestas, sé específico:**
❌ Malo:
```
You: Explica Rust
```
✅ Bueno:
```
You: Explica el sistema de ownership en Rust con un ejemplo simple de borrowing. Quiero entender por qué evita memory leaks.
```
### Conversaciones Multi-paso
```
You: Vamos a diseñar una API REST
Yuuki: Claro, ¿qué tipo de API?
You: Para gestionar tareas tipo TODO
Yuuki: Perfecto, estos son los endpoints...
```
### Usar Presets Dinámicamente
- **Creative preset**: "Escribe un cuento de terror"
- **Precise preset**: "¿Cuál es la complejidad de quicksort?"
- **Balanced preset**: "Explícame cómo funciona Git"
---
## 🔧 Troubleshooting
### Error: "No models found"
**Solución:**
```bash
# Verifica que tienes modelos
ls ~/.yuuki/models/
# Si está vacío, descarga uno
yuy download Yuuki-best
# O especifica otro directorio
yuy-chat --models-dir /path/to/models
```
### Error: "llama.cpp binary not found"
**Solución:**
```bash
# Termux
pkg install llama-cpp
# macOS
brew install llama.cpp
# Linux - verifica que está en PATH
which llama-cli
# Si no, instala o agrega al PATH
export PATH=$PATH:/path/to/llama-cpp
```
### Error: "Permission denied" (llamafile)
**Solución:**
```bash
chmod +x ~/.yuuki/models/*.llamafile
```
### Chat no responde / se congela
**Diagnóstico:**
1. Verifica que llama.cpp funciona:
```bash
llama-cli -m ~/.yuuki/models/Yuuki-best.gguf -p "Hola"
```
2. Revisa logs:
```bash
RUST_LOG=debug yuy-chat
```
3. Reduce context size si es falta de RAM
### Respuestas muy lentas
**Causas comunes:**
- Modelo muy grande para tu RAM
- Cuantización muy alta (F32, Q8)
- Sin aceleración GPU
**Solución:**
```bash
# Descarga versión cuantizada más pequeña
yuy download Yuuki-best --quant q4_0
# Verifica RAM disponible
free -h # Linux
top # macOS/Linux
```
### No puedo escribir mensajes largos
El input box tiene límite visual pero **no de contenido**:
- Usa `Shift+Enter` para multi-línea
- Scroll automático después de 5 líneas
- O escribe en editor externo y pega
### HuggingFace API no funciona
**Verifica:**
```bash
# Test manual
curl https://api-inference.huggingface.co/models/OpceanAI/Yuuki-best \
-H "Authorization: Bearer YOUR_TOKEN" \
-d '{"inputs": "test"}'
```
**Problemas comunes:**
- Token expirado → Genera nuevo
- Rate limit → Espera o upgrade plan
- Modelo privado → Verifica permisos
---
## 📊 Performance Tips
### Termux/Móvil
```bash
# Usa modelos pequeños
yuy download Yuuki-best --quant q4_0
# Preset Balanced o Precise
# Creative es más lento
```
### Desktop High-end
```bash
# Usa Q8 o F32 para mejor calidad
yuy download Yuuki-best --quant q8_0
# Habilita GPU en llama.cpp
llama-cli -m model.gguf -ngl 32 # 32 layers en GPU
```
---
## 🎓 Casos de Uso
### 1. Coding Assistant
```
Preset: Precise
You: Cómo implemento un servidor HTTP en Rust?
You: Muestra ejemplo con tokio
You: Agrega manejo de errores
You: Ahora agrega logging
```
### 2. Creative Writing
```
Preset: Creative
You: Escribe el inicio de una novela de ciencia ficción ambientada en Marte en el año 2157
You: Continúa describiendo al protagonista
You: ¿Qué conflicto enfrenta?
```
### 3. Learning/Study
```
Preset: Balanced
You: Explícame la diferencia entre mutex y semaphore
You: Dame un ejemplo de cuándo usar cada uno
You: ¿Qué pasa si no uso sincronización?
```
---
## 🚀 Workflow Recomendado
### Developer
```bash
# Mañana: Coding
yuy-chat # Preset: Precise
> Ayuda con bugs, arquitectura, código
# Tarde: Docs
yuy-chat # Preset: Balanced
> Escribir documentación, READMEs
# Noche: Ideas
yuy-chat # Preset: Creative
> Brainstorming features
```
### Writer
```bash
yuy-chat # Preset: Creative
> Generar ideas
> Escribir borradores
> Feedback de historias
```
### Estudiante
```bash
yuy-chat # Preset: Balanced
> Explicaciones de conceptos
> Resolver dudas
> Preparar exámenes
```
---
**¿Preguntas? Abre un issue en GitHub!**
🌸 Hecho con amor por el equipo Yuuki

View File

@@ -0,0 +1,227 @@
use crate::config::{Config, Preset};
use crate::conversation::{Conversation, Message};
use crate::models::{Model, ModelScanner, ModelRuntime};
use anyhow::Result;
use std::sync::Arc;
use tokio::sync::Mutex;
#[derive(Debug, Clone, PartialEq)]
pub enum AppState {
ModelSelector,
Chat,
Menu,
Settings,
ConversationList,
}
pub struct App {
pub state: AppState,
pub config: Config,
// Models
pub models: Vec<Model>,
pub selected_model_idx: usize,
pub current_model: Option<Model>,
pub runtime: Option<Arc<Mutex<ModelRuntime>>>,
// Chat
pub conversation: Conversation,
pub input: String,
pub scroll_offset: usize,
pub is_streaming: bool,
// Conversations history
pub saved_conversations: Vec<String>,
pub selected_conversation_idx: usize,
// Settings
pub selected_setting_idx: usize,
// Preset
pub current_preset: Preset,
}
impl App {
pub async fn new() -> Result<Self> {
let config = Config::load()?;
let scanner = ModelScanner::new();
let models = scanner.scan_all(&config).await?;
let saved_conversations = Conversation::list_saved()?;
Ok(Self {
state: if models.is_empty() {
AppState::ModelSelector
} else {
AppState::ModelSelector
},
config,
models,
selected_model_idx: 0,
current_model: None,
runtime: None,
conversation: Conversation::new(),
input: String::new(),
scroll_offset: 0,
is_streaming: false,
saved_conversations,
selected_conversation_idx: 0,
selected_setting_idx: 0,
current_preset: Preset::Balanced,
})
}
pub fn previous_model(&mut self) {
if self.selected_model_idx > 0 {
self.selected_model_idx -= 1;
}
}
pub fn next_model(&mut self) {
if self.selected_model_idx < self.models.len().saturating_sub(1) {
self.selected_model_idx += 1;
}
}
pub async fn refresh_models(&mut self) -> Result<()> {
let scanner = ModelScanner::new();
self.models = scanner.scan_all(&self.config).await?;
self.selected_model_idx = 0;
Ok(())
}
pub async fn load_selected_model(&mut self) -> Result<()> {
if let Some(model) = self.models.get(self.selected_model_idx).cloned() {
let runtime = ModelRuntime::new(model.clone(), self.current_preset.clone()).await?;
self.current_model = Some(model);
self.runtime = Some(Arc::new(Mutex::new(runtime)));
self.state = AppState::Chat;
}
Ok(())
}
pub async fn send_message(&mut self) -> Result<()> {
if self.input.trim().is_empty() {
return Ok(());
}
let user_message = self.input.clone();
self.conversation.add_message(Message::user(user_message.clone()));
self.input.clear();
if let Some(runtime) = &self.runtime {
self.is_streaming = true;
let runtime = runtime.clone();
let user_msg = user_message.clone();
tokio::spawn(async move {
let mut rt = runtime.lock().await;
if let Err(e) = rt.generate(&user_msg).await {
tracing::error!("Error generating response: {:?}", e);
}
});
}
Ok(())
}
pub async fn poll_response(&mut self) -> Result<Option<String>> {
if let Some(runtime) = &self.runtime {
let mut rt = runtime.lock().await;
Ok(rt.poll_chunk().await?)
} else {
Ok(None)
}
}
pub fn handle_response_chunk(&mut self, chunk: String) {
if chunk == "[DONE]" {
self.is_streaming = false;
return;
}
if let Some(last_msg) = self.conversation.messages.last_mut() {
if last_msg.role == "assistant" {
last_msg.content.push_str(&chunk);
} else {
self.conversation.add_message(Message::assistant(chunk));
}
} else {
self.conversation.add_message(Message::assistant(chunk));
}
}
pub fn clear_chat(&mut self) {
self.conversation = Conversation::new();
self.scroll_offset = 0;
}
pub fn scroll_up(&mut self) {
self.scroll_offset = self.scroll_offset.saturating_sub(1);
}
pub fn scroll_down(&mut self) {
self.scroll_offset = self.scroll_offset.saturating_add(1);
}
pub fn cycle_preset(&mut self) {
self.current_preset = match self.current_preset {
Preset::Creative => Preset::Balanced,
Preset::Balanced => Preset::Precise,
Preset::Precise => Preset::Creative,
};
}
pub fn save_conversation(&mut self) -> Result<()> {
let filename = self.conversation.save()?;
self.saved_conversations.push(filename);
Ok(())
}
pub fn previous_conversation(&mut self) {
if self.selected_conversation_idx > 0 {
self.selected_conversation_idx -= 1;
}
}
pub fn next_conversation(&mut self) {
if self.selected_conversation_idx < self.saved_conversations.len().saturating_sub(1) {
self.selected_conversation_idx += 1;
}
}
pub fn load_selected_conversation(&mut self) -> Result<()> {
if let Some(filename) = self.saved_conversations.get(self.selected_conversation_idx) {
self.conversation = Conversation::load(filename)?;
}
Ok(())
}
pub fn delete_selected_conversation(&mut self) -> Result<()> {
if let Some(filename) = self.saved_conversations.get(self.selected_conversation_idx) {
Conversation::delete(filename)?;
self.saved_conversations.remove(self.selected_conversation_idx);
if self.selected_conversation_idx > 0 {
self.selected_conversation_idx -= 1;
}
}
Ok(())
}
pub fn previous_setting(&mut self) {
if self.selected_setting_idx > 0 {
self.selected_setting_idx -= 1;
}
}
pub fn next_setting(&mut self) {
if self.selected_setting_idx < 5 {
self.selected_setting_idx += 1;
}
}
pub fn edit_setting(&mut self) {
// Placeholder for setting editing
// Would open input dialog
}
}

View File

@@ -0,0 +1,112 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub models_dir: PathBuf,
pub hf_token: Option<String>,
pub default_preset: Preset,
pub save_history: bool,
pub theme: Theme,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum Preset {
Creative,
Balanced,
Precise,
}
impl Preset {
pub fn temperature(&self) -> f32 {
match self {
Preset::Creative => 0.8,
Preset::Balanced => 0.6,
Preset::Precise => 0.3,
}
}
pub fn top_p(&self) -> f32 {
match self {
Preset::Creative => 0.9,
Preset::Balanced => 0.7,
Preset::Precise => 0.5,
}
}
pub fn as_str(&self) -> &str {
match self {
Preset::Creative => "Creative",
Preset::Balanced => "Balanced",
Preset::Precise => "Precise",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Theme {
Dark,
Light,
}
impl Default for Config {
fn default() -> Self {
let home = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
Self {
models_dir: home.join(".yuuki").join("models"),
hf_token: None,
default_preset: Preset::Balanced,
save_history: true,
theme: Theme::Dark,
}
}
}
impl Config {
pub fn load() -> Result<Self> {
let config_path = Self::config_path()?;
if config_path.exists() {
let content = fs::read_to_string(&config_path)?;
let config: Config = toml::from_str(&content)?;
Ok(config)
} else {
let config = Config::default();
config.save()?;
Ok(config)
}
}
pub fn save(&self) -> Result<()> {
let config_path = Self::config_path()?;
if let Some(parent) = config_path.parent() {
fs::create_dir_all(parent)?;
}
let content = toml::to_string_pretty(self)?;
fs::write(&config_path, content)?;
Ok(())
}
fn config_path() -> Result<PathBuf> {
let config_dir = dirs::config_dir()
.context("Could not find config directory")?
.join("yuy-chat");
fs::create_dir_all(&config_dir)?;
Ok(config_dir.join("config.toml"))
}
pub fn conversations_dir() -> Result<PathBuf> {
let config_dir = dirs::config_dir()
.context("Could not find config directory")?
.join("yuy-chat")
.join("conversations");
fs::create_dir_all(&config_dir)?;
Ok(config_dir)
}
}

View File

@@ -0,0 +1,120 @@
use anyhow::Result;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::fs;
use crate::config::Config;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Message {
pub role: String,
pub content: String,
pub timestamp: DateTime<Utc>,
}
impl Message {
pub fn user(content: String) -> Self {
Self {
role: "user".to_string(),
content,
timestamp: Utc::now(),
}
}
pub fn assistant(content: String) -> Self {
Self {
role: "assistant".to_string(),
content,
timestamp: Utc::now(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Conversation {
pub messages: Vec<Message>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
impl Conversation {
pub fn new() -> Self {
let now = Utc::now();
Self {
messages: Vec::new(),
created_at: now,
updated_at: now,
}
}
pub fn add_message(&mut self, message: Message) {
self.messages.push(message);
self.updated_at = Utc::now();
}
pub fn save(&self) -> Result<String> {
let conversations_dir = Config::conversations_dir()?;
let filename = format!("conversation-{}.json", self.created_at.format("%Y%m%d-%H%M%S"));
let path = conversations_dir.join(&filename);
let json = serde_json::to_string_pretty(self)?;
fs::write(&path, json)?;
Ok(filename)
}
pub fn load(filename: &str) -> Result<Self> {
let conversations_dir = Config::conversations_dir()?;
let path = conversations_dir.join(filename);
let content = fs::read_to_string(&path)?;
let conversation: Conversation = serde_json::from_str(&content)?;
Ok(conversation)
}
pub fn list_saved() -> Result<Vec<String>> {
let conversations_dir = Config::conversations_dir()?;
let mut conversations = Vec::new();
if conversations_dir.exists() {
for entry in fs::read_dir(&conversations_dir)? {
let entry = entry?;
if entry.path().extension().map_or(false, |e| e == "json") {
if let Some(filename) = entry.file_name().to_str() {
conversations.push(filename.to_string());
}
}
}
}
conversations.sort();
conversations.reverse(); // Most recent first
Ok(conversations)
}
pub fn delete(filename: &str) -> Result<()> {
let conversations_dir = Config::conversations_dir()?;
let path = conversations_dir.join(filename);
if path.exists() {
fs::remove_file(&path)?;
}
Ok(())
}
pub fn get_summary(&self) -> String {
if let Some(first_msg) = self.messages.first() {
let preview = first_msg.content.chars().take(50).collect::<String>();
if first_msg.content.len() > 50 {
format!("{}...", preview)
} else {
preview
}
} else {
"Empty conversation".to_string()
}
}
}

View File

@@ -0,0 +1,193 @@
mod app;
mod config;
mod conversation;
mod models;
mod ui;
use anyhow::Result;
use app::{App, AppState};
use crossterm::{
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyModifiers},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{
backend::CrosstermBackend,
Terminal,
};
use std::io;
use tracing_subscriber;
#[tokio::main]
async fn main() -> Result<()> {
// Initialize logging
tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.init();
// Setup terminal
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
// Create app
let mut app = App::new().await?;
// Run app
let res = run_app(&mut terminal, &mut app).await;
// Restore terminal
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)?;
terminal.show_cursor()?;
if let Err(err) = res {
eprintln!("Error: {:?}", err);
}
Ok(())
}
async fn run_app<B: ratatui::backend::Backend>(
terminal: &mut Terminal<B>,
app: &mut App,
) -> Result<()> {
loop {
terminal.draw(|f| ui::render(f, app))?;
if event::poll(std::time::Duration::from_millis(100))? {
if let Event::Key(key) = event::read()? {
match app.state {
AppState::ModelSelector => {
match key.code {
KeyCode::Char('q') => return Ok(()),
KeyCode::Up | KeyCode::Char('k') => app.previous_model(),
KeyCode::Down | KeyCode::Char('j') => app.next_model(),
KeyCode::Enter => {
app.load_selected_model().await?;
}
KeyCode::Char('r') => {
app.refresh_models().await?;
}
_ => {}
}
}
AppState::Chat => {
match (key.modifiers, key.code) {
// Ctrl+C: Open menu
(KeyModifiers::CONTROL, KeyCode::Char('c')) => {
app.state = AppState::Menu;
}
// Ctrl+L: Clear chat
(KeyModifiers::CONTROL, KeyCode::Char('l')) => {
app.clear_chat();
}
// Ctrl+S: Save conversation
(KeyModifiers::CONTROL, KeyCode::Char('s')) => {
app.save_conversation()?;
}
// Enter: Send message
(_, KeyCode::Enter) if !key.modifiers.contains(KeyModifiers::SHIFT) => {
app.send_message().await?;
}
// Shift+Enter: New line
(KeyModifiers::SHIFT, KeyCode::Enter) => {
app.input.push('\n');
}
// Ctrl+Enter: Send (always)
(KeyModifiers::CONTROL, KeyCode::Enter) => {
app.send_message().await?;
}
// Backspace
(_, KeyCode::Backspace) => {
app.input.pop();
}
// Character input
(_, KeyCode::Char(c)) => {
app.input.push(c);
}
// Up arrow: Scroll chat up
(_, KeyCode::Up) if app.input.is_empty() => {
app.scroll_up();
}
// Down arrow: Scroll chat down
(_, KeyCode::Down) if app.input.is_empty() => {
app.scroll_down();
}
_ => {}
}
}
AppState::Menu => {
match key.code {
KeyCode::Char('q') | KeyCode::Esc => {
app.state = AppState::Chat;
}
KeyCode::Char('1') => {
app.state = AppState::ModelSelector;
}
KeyCode::Char('2') => {
app.cycle_preset();
}
KeyCode::Char('3') => {
app.save_conversation()?;
app.state = AppState::Chat;
}
KeyCode::Char('4') => {
app.state = AppState::ConversationList;
}
KeyCode::Char('5') => {
app.clear_chat();
app.state = AppState::Chat;
}
KeyCode::Char('6') => {
app.state = AppState::Settings;
}
_ => {}
}
}
AppState::Settings => {
match key.code {
KeyCode::Esc | KeyCode::Char('q') => {
app.state = AppState::Menu;
}
KeyCode::Up => app.previous_setting(),
KeyCode::Down => app.next_setting(),
KeyCode::Enter => app.edit_setting(),
_ => {}
}
}
AppState::ConversationList => {
match key.code {
KeyCode::Esc | KeyCode::Char('q') => {
app.state = AppState::Menu;
}
KeyCode::Up => app.previous_conversation(),
KeyCode::Down => app.next_conversation(),
KeyCode::Enter => {
app.load_selected_conversation()?;
app.state = AppState::Chat;
}
KeyCode::Char('d') => {
app.delete_selected_conversation()?;
}
_ => {}
}
}
}
}
}
// Handle streaming responses
if app.is_streaming {
if let Some(response) = app.poll_response().await? {
app.handle_response_chunk(response);
}
}
}
}

View File

@@ -0,0 +1,70 @@
use anyhow::Result;
use reqwest::Client;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize)]
struct HFRequest {
inputs: String,
parameters: HFParameters,
}
#[derive(Debug, Serialize)]
struct HFParameters {
temperature: f32,
top_p: f32,
max_new_tokens: u32,
}
#[derive(Debug, Deserialize)]
struct HFResponse {
generated_text: String,
}
pub struct HuggingFaceAPI {
client: Client,
token: String,
model: String,
}
impl HuggingFaceAPI {
pub fn new(token: String, org: String, model: String) -> Self {
Self {
client: Client::new(),
token,
model: format!("{}/{}", org, model),
}
}
pub async fn generate(&self, prompt: &str, temperature: f32, top_p: f32) -> Result<String> {
let url = format!("https://api-inference.huggingface.co/models/{}", self.model);
let request = HFRequest {
inputs: prompt.to_string(),
parameters: HFParameters {
temperature,
top_p,
max_new_tokens: 512,
},
};
let response = self
.client
.post(&url)
.header("Authorization", format!("Bearer {}", self.token))
.json(&request)
.send()
.await?;
if !response.status().is_success() {
anyhow::bail!("HuggingFace API error: {}", response.status());
}
let hf_response: Vec<HFResponse> = response.json().await?;
if let Some(first) = hf_response.first() {
Ok(first.generated_text.clone())
} else {
Ok(String::new())
}
}
}

View File

@@ -0,0 +1,7 @@
mod scanner;
mod runtime;
mod hf_api;
pub use scanner::{Model, ModelScanner, ModelSource};
pub use runtime::ModelRuntime;
pub use hf_api::HuggingFaceAPI;

View File

@@ -0,0 +1,146 @@
use super::{Model, ModelFormat, ModelSource};
use crate::config::Preset;
use anyhow::{Context, Result};
use std::process::Stdio;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::{Child, Command};
use tokio::sync::mpsc;
pub struct ModelRuntime {
model: Model,
preset: Preset,
process: Option<Child>,
response_rx: Option<mpsc::Receiver<String>>,
}
impl ModelRuntime {
pub async fn new(model: Model, preset: Preset) -> Result<Self> {
Ok(Self {
model,
preset,
process: None,
response_rx: None,
})
}
pub async fn generate(&mut self, prompt: &str) -> Result<()> {
match &self.model.source {
ModelSource::Local(_) => self.generate_local(prompt).await,
ModelSource::HuggingFace { .. } => self.generate_hf(prompt).await,
}
}
async fn generate_local(&mut self, prompt: &str) -> Result<()> {
let command = match self.model.format {
ModelFormat::GGUF => self.build_llama_cpp_command(prompt)?,
ModelFormat::Llamafile => self.build_llamafile_command(prompt)?,
};
let (tx, rx) = mpsc::channel(100);
self.response_rx = Some(rx);
let prompt_owned = prompt.to_string();
tokio::spawn(async move {
if let Ok(mut child) = command.spawn() {
if let Some(stdout) = child.stdout.take() {
let reader = BufReader::new(stdout);
let mut lines = reader.lines();
while let Ok(Some(line)) = lines.next_line().await {
if tx.send(line).await.is_err() {
break;
}
}
}
let _ = child.wait().await;
let _ = tx.send("[DONE]".to_string()).await;
}
});
Ok(())
}
fn build_llama_cpp_command(&self, prompt: &str) -> Result<Command> {
let llama_cmd = self.find_llama_binary()?;
let mut cmd = Command::new(llama_cmd);
cmd.arg("-m")
.arg(&self.model.path)
.arg("--temp")
.arg(self.preset.temperature().to_string())
.arg("--top-p")
.arg(self.preset.top_p().to_string())
.arg("-c")
.arg("4096")
.arg("-p")
.arg(prompt)
.stdout(Stdio::piped())
.stderr(Stdio::null());
Ok(cmd)
}
fn build_llamafile_command(&self, prompt: &str) -> Result<Command> {
let mut cmd = Command::new(&self.model.path);
cmd.arg("--temp")
.arg(self.preset.temperature().to_string())
.arg("--top-p")
.arg(self.preset.top_p().to_string())
.arg("-c")
.arg("4096")
.arg("-p")
.arg(prompt)
.stdout(Stdio::piped())
.stderr(Stdio::null());
Ok(cmd)
}
fn find_llama_binary(&self) -> Result<String> {
for binary in &["llama-cli", "llama", "main"] {
if which::which(binary).is_ok() {
return Ok(binary.to_string());
}
}
anyhow::bail!("llama.cpp binary not found. Install with: yuy runtime install llama-cpp")
}
async fn generate_hf(&mut self, prompt: &str) -> Result<()> {
// Placeholder for HuggingFace API call
let (tx, rx) = mpsc::channel(100);
self.response_rx = Some(rx);
let prompt_owned = prompt.to_string();
tokio::spawn(async move {
// Simulated streaming response
let response = format!("Response to: {}", prompt_owned);
for word in response.split_whitespace() {
let _ = tx.send(format!("{} ", word)).await;
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
}
let _ = tx.send("[DONE]".to_string()).await;
});
Ok(())
}
pub async fn poll_chunk(&mut self) -> Result<Option<String>> {
if let Some(rx) = &mut self.response_rx {
Ok(rx.recv().await)
} else {
Ok(None)
}
}
}
impl Drop for ModelRuntime {
fn drop(&mut self) {
if let Some(mut process) = self.process.take() {
let _ = process.start_kill();
}
}
}

View File

@@ -0,0 +1,148 @@
use crate::config::Config;
use anyhow::Result;
use std::path::PathBuf;
use walkdir::WalkDir;
#[derive(Debug, Clone)]
pub enum ModelSource {
Local(PathBuf),
HuggingFace { org: String, model: String },
}
#[derive(Debug, Clone)]
pub struct Model {
pub name: String,
pub path: PathBuf,
pub source: ModelSource,
pub format: ModelFormat,
pub size: u64,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ModelFormat {
GGUF,
Llamafile,
}
impl Model {
pub fn display_name(&self) -> String {
format!("{} ({}) [{}]",
self.name,
format_size(self.size),
match &self.source {
ModelSource::Local(_) => "Local",
ModelSource::HuggingFace { .. } => "HuggingFace API",
}
)
}
pub fn format_name(&self) -> &str {
match self.format {
ModelFormat::GGUF => "GGUF",
ModelFormat::Llamafile => "Llamafile",
}
}
}
pub struct ModelScanner;
impl ModelScanner {
pub fn new() -> Self {
Self
}
pub async fn scan_all(&self, config: &Config) -> Result<Vec<Model>> {
let mut models = Vec::new();
// Scan local models
models.extend(self.scan_local(&config.models_dir)?);
// Scan HuggingFace if token is available
if let Some(token) = &config.hf_token {
if let Ok(hf_models) = self.scan_huggingface(token).await {
models.extend(hf_models);
}
}
Ok(models)
}
fn scan_local(&self, models_dir: &PathBuf) -> Result<Vec<Model>> {
let mut models = Vec::new();
if !models_dir.exists() {
return Ok(models);
}
for entry in WalkDir::new(models_dir)
.max_depth(3)
.into_iter()
.filter_map(|e| e.ok())
{
let path = entry.path();
if !path.is_file() {
continue;
}
let extension = path.extension().and_then(|s| s.to_str());
let format = match extension {
Some("gguf") => ModelFormat::GGUF,
Some("llamafile") => ModelFormat::Llamafile,
_ => continue,
};
let name = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("Unknown")
.to_string();
let size = path.metadata().map(|m| m.len()).unwrap_or(0);
models.push(Model {
name,
path: path.to_path_buf(),
source: ModelSource::Local(path.to_path_buf()),
format,
size,
});
}
Ok(models)
}
async fn scan_huggingface(&self, _token: &str) -> Result<Vec<Model>> {
// Placeholder for HuggingFace API integration
// Would query API for available Yuuki models
let hf_models = vec![
Model {
name: "Yuuki-best (HF API)".to_string(),
path: PathBuf::from(""),
source: ModelSource::HuggingFace {
org: "OpceanAI".to_string(),
model: "Yuuki-best".to_string(),
},
format: ModelFormat::GGUF,
size: 0,
},
];
Ok(hf_models)
}
}
fn format_size(bytes: u64) -> String {
const GB: u64 = 1024 * 1024 * 1024;
const MB: u64 = 1024 * 1024;
if bytes >= GB {
format!("{:.2} GB", bytes as f64 / GB as f64)
} else if bytes >= MB {
format!("{:.2} MB", bytes as f64 / MB as f64)
} else {
format!("{} B", bytes)
}
}

View File

@@ -0,0 +1,132 @@
use crate::app::App;
use ratatui::{
backend::Backend,
layout::{Alignment, Constraint, Direction, Layout},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, Paragraph, Wrap},
Frame,
};
pub fn render<B: Backend>(f: &mut Frame, app: &App) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3), // Header
Constraint::Min(0), // Messages
Constraint::Length(5), // Input
Constraint::Length(1), // Help
])
.split(f.area());
// Header
render_header(f, app, chunks[0]);
// Messages
render_messages(f, app, chunks[1]);
// Input box
render_input(f, app, chunks[2]);
// Help bar
render_help(f, chunks[3]);
}
fn render_header<B: Backend>(f: &mut Frame, app: &App, area: ratatui::layout::Rect) {
let model_name = app
.current_model
.as_ref()
.map(|m| m.name.clone())
.unwrap_or_else(|| "No model".to_string());
let tokens = app.conversation.messages.iter()
.map(|m| m.content.len())
.sum::<usize>();
let header_text = format!(
"Model: {} | Preset: {} | Tokens: {}/4096 | Messages: {}",
model_name,
app.current_preset.as_str(),
tokens,
app.conversation.messages.len()
);
let header = Paragraph::new(header_text)
.style(Style::default().fg(Color::Cyan))
.alignment(Alignment::Left)
.block(
Block::default()
.borders(Borders::ALL)
.title("yuy-chat")
.style(Style::default().fg(Color::Magenta)),
);
f.render_widget(header, area);
}
fn render_messages<B: Backend>(f: &mut Frame, app: &App, area: ratatui::layout::Rect) {
let mut lines = Vec::new();
for msg in &app.conversation.messages {
let (prefix, style) = if msg.role == "user" {
("You: ", Style::default().fg(Color::Blue).add_modifier(Modifier::BOLD))
} else {
("Yuuki: ", Style::default().fg(Color::Green).add_modifier(Modifier::BOLD))
};
lines.push(Line::from(vec![
Span::styled(prefix, style),
Span::raw(&msg.content),
]));
lines.push(Line::from(""));
}
// Add streaming indicator
if app.is_streaming {
lines.push(Line::from(vec![
Span::styled("Yuuki: ", Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)),
Span::styled("●●●", Style::default().fg(Color::Yellow)),
]));
}
let messages = Paragraph::new(lines)
.block(Block::default().borders(Borders::ALL))
.wrap(Wrap { trim: false })
.scroll((app.scroll_offset as u16, 0));
f.render_widget(messages, area);
}
fn render_input<B: Backend>(f: &mut Frame, app: &App, area: ratatui::layout::Rect) {
let input_lines: Vec<Line> = app
.input
.split('\n')
.map(|line| Line::from(line.to_string()))
.collect();
let input_widget = Paragraph::new(input_lines)
.style(Style::default().fg(Color::White))
.block(
Block::default()
.borders(Borders::ALL)
.title("Message")
.style(Style::default().fg(Color::Yellow)),
)
.wrap(Wrap { trim: false });
f.render_widget(input_widget, area);
// Set cursor position
f.set_cursor_position((
area.x + 1 + (app.input.len() as u16 % (area.width - 2)),
area.y + 1 + (app.input.len() as u16 / (area.width - 2)),
));
}
fn render_help<B: Backend>(f: &mut Frame, area: ratatui::layout::Rect) {
let help = Paragraph::new("Enter: Send | Shift+Enter: New line | Ctrl+C: Menu | Ctrl+L: Clear | Ctrl+S: Save")
.style(Style::default().fg(Color::Gray))
.alignment(Alignment::Center);
f.render_widget(help, area);
}

View File

@@ -0,0 +1,67 @@
use crate::app::App;
use ratatui::{
backend::Backend,
layout::{Alignment, Constraint, Direction, Layout},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, Paragraph},
Frame,
};
pub fn render<B: Backend>(f: &mut Frame, app: &App) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(0),
Constraint::Length(3),
])
.split(f.area());
// Title
let title = Paragraph::new("Saved Conversations")
.style(Style::default().fg(Color::Magenta).add_modifier(Modifier::BOLD))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(title, chunks[0]);
// Conversation list
let items: Vec<ListItem> = if app.saved_conversations.is_empty() {
vec![ListItem::new(Line::from(Span::styled(
"No saved conversations",
Style::default().fg(Color::Gray),
)))]
} else {
app.saved_conversations
.iter()
.enumerate()
.map(|(i, filename)| {
let content = if i == app.selected_conversation_idx {
Line::from(vec![
Span::styled("> ", Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)),
Span::styled(filename, Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)),
])
} else {
Line::from(vec![
Span::raw(" "),
Span::raw(filename),
])
};
ListItem::new(content)
})
.collect()
};
let list = List::new(items)
.block(Block::default().borders(Borders::ALL));
f.render_widget(list, chunks[1]);
// Help
let help = Paragraph::new("↑/↓: Navigate | Enter: Load | D: Delete | Esc: Back")
.style(Style::default().fg(Color::Gray))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(help, chunks[2]);
}

View File

@@ -0,0 +1,73 @@
use crate::app::App;
use ratatui::{
backend::Backend,
layout::{Alignment, Constraint, Direction, Layout},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, Paragraph},
Frame,
};
pub fn render<B: Backend>(f: &mut Frame, app: &App) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(0),
Constraint::Length(3),
])
.split(f.area());
// Title
let title = Paragraph::new("Menu")
.style(Style::default().fg(Color::Magenta).add_modifier(Modifier::BOLD))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(title, chunks[0]);
// Menu items
let items = vec![
ListItem::new(Line::from(vec![
Span::styled("1. ", Style::default().fg(Color::Yellow)),
Span::raw("Change Model"),
])),
ListItem::new(Line::from(vec![
Span::styled("2. ", Style::default().fg(Color::Yellow)),
Span::raw(format!("Change Preset (Current: {})", app.current_preset.as_str())),
])),
ListItem::new(Line::from(vec![
Span::styled("3. ", Style::default().fg(Color::Yellow)),
Span::raw("Save Conversation"),
])),
ListItem::new(Line::from(vec![
Span::styled("4. ", Style::default().fg(Color::Yellow)),
Span::raw("Load Conversation"),
])),
ListItem::new(Line::from(vec![
Span::styled("5. ", Style::default().fg(Color::Yellow)),
Span::raw("Clear Chat"),
])),
ListItem::new(Line::from(vec![
Span::styled("6. ", Style::default().fg(Color::Yellow)),
Span::raw("Settings"),
])),
ListItem::new(""),
ListItem::new(Line::from(vec![
Span::styled("Q. ", Style::default().fg(Color::Red)),
Span::raw("Back to Chat"),
])),
];
let list = List::new(items)
.block(Block::default().borders(Borders::ALL).title("Options"));
f.render_widget(list, chunks[1]);
// Help
let help = Paragraph::new("Press number key or Q to go back")
.style(Style::default().fg(Color::Gray))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(help, chunks[2]);
}

View File

@@ -0,0 +1,22 @@
mod selector;
mod chat;
mod menu;
mod settings;
mod conversations;
use crate::app::{App, AppState};
use ratatui::{
backend::Backend,
layout::{Constraint, Direction, Layout},
Frame,
};
pub fn render<B: Backend>(f: &mut Frame, app: &App) {
match app.state {
AppState::ModelSelector => selector::render(f, app),
AppState::Chat => chat::render(f, app),
AppState::Menu => menu::render(f, app),
AppState::Settings => settings::render(f, app),
AppState::ConversationList => conversations::render(f, app),
}
}

View File

@@ -0,0 +1,72 @@
use crate::app::App;
use ratatui::{
backend::Backend,
layout::{Alignment, Constraint, Direction, Layout},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, Paragraph},
Frame,
};
pub fn render<B: Backend>(f: &mut Frame, app: &App) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(0),
Constraint::Length(3),
])
.split(f.area());
// Title
let title = Paragraph::new("yuy-chat v0.1.0")
.style(Style::default().fg(Color::Magenta).add_modifier(Modifier::BOLD))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(title, chunks[0]);
// Model list
let items: Vec<ListItem> = app
.models
.iter()
.enumerate()
.map(|(i, model)| {
let content = if i == app.selected_model_idx {
Line::from(vec![
Span::styled("> ", Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)),
Span::styled(&model.name, Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)),
Span::raw(format!(" {} ", model.display_name())),
])
} else {
Line::from(vec![
Span::raw(" "),
Span::raw(&model.name),
Span::styled(format!(" {} ", model.display_name()), Style::default().fg(Color::Gray)),
])
};
ListItem::new(content)
})
.collect();
let list_widget = List::new(items)
.block(
Block::default()
.borders(Borders::ALL)
.title("📋 Select a Model")
.style(Style::default().fg(Color::Cyan)),
);
f.render_widget(list_widget, chunks[1]);
// Help
let help = if app.models.is_empty() {
Paragraph::new("⚠️ No models found | Download with: yuy download Yuuki-best | R: Refresh | Q: Quit")
} else {
Paragraph::new("↑/↓: Navigate | Enter: Select | R: Refresh | Q: Quit")
}
.style(Style::default().fg(Color::Gray))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(help, chunks[2]);
}

View File

@@ -0,0 +1,77 @@
use crate::app::App;
use ratatui::{
backend::Backend,
layout::{Alignment, Constraint, Direction, Layout},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, Paragraph},
Frame,
};
pub fn render<B: Backend>(f: &mut Frame, app: &App) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(0),
Constraint::Length(3),
])
.split(f.area());
// Title
let title = Paragraph::new("Settings")
.style(Style::default().fg(Color::Magenta).add_modifier(Modifier::BOLD))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(title, chunks[0]);
// Settings items
let items: Vec<ListItem> = vec![
create_setting_item(0, app, "Models Directory", &app.config.models_dir.display().to_string()),
create_setting_item(1, app, "HuggingFace Token",
if app.config.hf_token.is_some() { "hf_****..." } else { "Not set" }),
create_setting_item(2, app, "Default Preset", app.config.default_preset.as_str()),
create_setting_item(3, app, "Save History",
if app.config.save_history { "Enabled" } else { "Disabled" }),
create_setting_item(4, app, "Theme",
match app.config.theme {
crate::config::Theme::Dark => "Dark",
crate::config::Theme::Light => "Light",
}),
];
let list = List::new(items)
.block(Block::default().borders(Borders::ALL));
f.render_widget(list, chunks[1]);
// Help
let help = Paragraph::new("↑/↓: Navigate | Enter: Edit | Esc: Back")
.style(Style::default().fg(Color::Gray))
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
f.render_widget(help, chunks[2]);
}
fn create_setting_item(idx: usize, app: &App, label: &str, value: &str) -> ListItem {
let is_selected = idx == app.selected_setting_idx;
let line = if is_selected {
Line::from(vec![
Span::styled("> ", Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)),
Span::styled(label, Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)),
Span::raw(": "),
Span::styled(value, Style::default().fg(Color::Cyan)),
])
} else {
Line::from(vec![
Span::raw(" "),
Span::raw(label),
Span::raw(": "),
Span::styled(value, Style::default().fg(Color::Gray)),
])
};
ListItem::new(line)
}