Spaces:
Sleeping
Sleeping
Hearth Chat๊ณผ Lily LLM API ์ฐ๋ ๊ฐ์ด๋
๐ ์ฐ๋ ๊ฐ์
Hugging Face Spaces์ ๋ฐฐํฌ๋ Lily LLM API๋ฅผ Railway์์ ํธ์คํ ๋๋ Hearth Chat ์๋น์ค์ ์ฐ๋ํ๋ ๋ฐฉ๋ฒ์ ์ค๋ช ํฉ๋๋ค.
1. Hugging Face Spaces ๋ฐฐํฌ ์๋ฃ ํ์ธ
1.1 API ์๋ํฌ์ธํธ ํ์ธ
๋ฐฐํฌ๋ Lily LLM API URL:
https://YOUR_USERNAME-lily-llm-api.hf.space
1.2 ์ฃผ์ ์๋ํฌ์ธํธ ํ ์คํธ
# ํฌ์ค ์ฒดํฌ
curl https://YOUR_USERNAME-lily-llm-api.hf.space/health
# ๋ชจ๋ธ ๋ชฉ๋ก ํ์ธ
curl https://YOUR_USERNAME-lily-llm-api.hf.space/models
# ํ
์คํธ ์์ฑ ํ
์คํธ
curl -X POST https://YOUR_USERNAME-lily-llm-api.hf.space/generate \
-F "prompt=์๋
ํ์ธ์! ํ
์คํธ์
๋๋ค."
2. Hearth Chat ์ค์ ์ ๋ฐ์ดํธ
2.1 AI ์ค์ ๋ชจ๋ฌ ์ ๋ฐ์ดํธ
hearth_chat_react/src/components/AISettingsModal.js
์์ Lily LLM ์ค์ ์ถ๊ฐ:
// Lily LLM API URL ์ค์
{settings.aiProvider === 'lily' && (
<>
<div className="setting-group">
<label className="setting-label">Lily API URL:</label>
<input
type="url"
value={settings.lilyApiUrl}
onChange={(e) => handleInputChange('lilyApiUrl', e.target.value)}
placeholder="https://your-username-lily-llm-api.hf.space"
/>
</div>
<div className="setting-group">
<label className="setting-label">Lily ๋ชจ๋ธ:</label>
<select
value={settings.lilyModel}
onChange={(e) => handleInputChange('lilyModel', e.target.value)}
>
<option value="kanana-1.5-v-3b-instruct">Kanana 1.5 v3B Instruct</option>
</select>
</div>
{/* API ์ฐ๊ฒฐ ์ํ ํ์ */}
<div className="model-info">
<small style={{ color: '#4CAF50', fontWeight: 'bold' }}>
๐ Hugging Face Spaces์์ ํธ์คํ
</small>
<small style={{ color: '#666', display: 'block', marginTop: '4px' }}>
๋ฉํฐ๋ชจ๋ฌ AI ๋ชจ๋ธ (ํ
์คํธ + ์ด๋ฏธ์ง ์ฒ๋ฆฌ)
</small>
</div>
</>
)}
2.2 ์ฐ๊ฒฐ ํ ์คํธ ํจ์ ์ ๋ฐ์ดํธ
case 'lily':
testUrl = `${settings.lilyApiUrl}/health`;
testData = {
method: 'GET',
headers: {
'Accept': 'application/json'
}
};
// ์ถ๊ฐ ์์ฑ ํ
์คํธ
if (response.ok) {
const generateTestUrl = `${settings.lilyApiUrl}/generate`;
const generateResponse = await fetch(generateTestUrl, {
method: 'POST',
body: new FormData([
['prompt', '์ฐ๊ฒฐ ํ
์คํธ์
๋๋ค.']
])
});
if (generateResponse.ok) {
const result = await generateResponse.json();
console.log('Lily LLM ์์ฑ ํ
์คํธ ์ฑ๊ณต:', result);
}
}
break;
3. ๋ฐฑ์๋ ์ฐ๋ ์ ๋ฐ์ดํธ
3.1 Consumers.py ์์
hearth_chat_django/chat/consumers.py
์์ Lily LLM API ํธ์ถ ๋ถ๋ถ:
async def call_lily_api(user_message, user_emotion, image_urls=None, documents=None):
"""Lily LLM API ํธ์ถ (Hugging Face Spaces)"""
import requests
import aiohttp
try:
# ์ฌ์ฉ์ ์ค์ ์์ API URL ๊ฐ์ ธ์ค๊ธฐ
user = getattr(self, 'scope', {}).get('user', None)
ai_settings = None
if user and hasattr(user, 'is_authenticated') and user.is_authenticated:
ai_settings = await self.get_user_ai_settings(user)
# API URL ์ค์ (๊ธฐ๋ณธ๊ฐ: Hugging Face Spaces)
lily_api_url = ai_settings.get('lilyApiUrl', 'https://gbrabbit-lily-math-rag.hf.space') if ai_settings else 'https://gbrabbit-lily-math-rag.hf.space'
lily_model = ai_settings.get('lilyModel', 'kanana-1.5-v-3b-instruct') if ai_settings else 'kanana-1.5-v-3b-instruct'
# API ์๋ํฌ์ธํธ
generate_url = f"{lily_api_url}/generate"
# ์์ฒญ ๋ฐ์ดํฐ ์ค๋น
data = {
'prompt': f"{emotion_prompt}\n\n์ฌ์ฉ์ ๋ฉ์์ง: {user_message}",
'max_length': 200,
'temperature': 0.7
}
files = {}
# ์ด๋ฏธ์ง ์ฒ๋ฆฌ
if image_urls and len(image_urls) > 0:
print(f"๐ผ๏ธ ์ด๋ฏธ์ง ์ฒ๋ฆฌ: {len(image_urls)}๊ฐ")
async with aiohttp.ClientSession() as session:
for i, image_url in enumerate(image_urls[:4]): # ์ต๋ 4๊ฐ ์ด๋ฏธ์ง
try:
async with session.get(image_url) as img_response:
if img_response.status == 200:
image_data = await img_response.read()
files[f'image{i+1}'] = ('image.jpg', image_data, 'image/jpeg')
except Exception as e:
print(f"โ ์ด๋ฏธ์ง {i+1} ๋ก๋ ์คํจ: {e}")
# API ํธ์ถ
timeout = aiohttp.ClientTimeout(total=120) # 2๋ถ ํ์์์
async with aiohttp.ClientSession(timeout=timeout) as session:
if files:
# ๋ฉํฐํํธ ์์ฒญ (์ด๋ฏธ์ง ํฌํจ)
form_data = aiohttp.FormData()
for key, value in data.items():
form_data.add_field(key, str(value))
for key, (filename, file_data, content_type) in files.items():
form_data.add_field(key, file_data, filename=filename, content_type=content_type)
async with session.post(generate_url, data=form_data) as response:
if response.status == 200:
result = await response.json()
lily_response = result.get('generated_text', '์ฃ์กํฉ๋๋ค. ์๋ต์ ์์ฑํ ์ ์์ต๋๋ค.')
return {
"response": lily_response,
"provider": "lily",
"ai_name": "Lily LLM",
"ai_type": "huggingface"
}
else:
error_text = await response.text()
raise Exception(f"Lily API ์ค๋ฅ: {response.status} - {error_text}")
else:
# ์ผ๋ฐ POST ์์ฒญ (ํ
์คํธ๋ง)
async with session.post(generate_url, data=data) as response:
if response.status == 200:
result = await response.json()
lily_response = result.get('generated_text', '์ฃ์กํฉ๋๋ค. ์๋ต์ ์์ฑํ ์ ์์ต๋๋ค.')
return {
"response": lily_response,
"provider": "lily",
"ai_name": "Lily LLM",
"ai_type": "huggingface"
}
else:
error_text = await response.text()
raise Exception(f"Lily API ์ค๋ฅ: {response.status} - {error_text}")
except Exception as e:
print(f"โ Lily LLM API ํธ์ถ ์คํจ: {e}")
raise e
3.2 ํ๊ฒฝ ๋ณ์ ์ค์
Railway ํ๊ฒฝ์์ ๋ค์ ํ๊ฒฝ ๋ณ์ ์ถ๊ฐ:
# Lily LLM API ์ค์
LILY_LLM_API_URL=https://YOUR_USERNAME-lily-llm-api.hf.space
LILY_LLM_MODEL=kanana-1.5-v-3b-instruct
LILY_LLM_TIMEOUT=120
4. ํ ์คํธ ๋ฐ ๊ฒ์ฆ
4.1 ์ฐ๋ ํ ์คํธ ์คํฌ๋ฆฝํธ
# test_hearth_lily_integration.py
import requests
import json
def test_hearth_chat_lily_integration():
"""Hearth Chat๊ณผ Lily LLM ์ฐ๋ ํ
์คํธ"""
# Hearth Chat API ์๋ํฌ์ธํธ (Railway)
hearth_chat_url = "https://your-hearth-chat.railway.app"
# 1. ๋ก๊ทธ์ธ ๋ฐ ์ธ์
ํ๋
session = requests.Session()
# 2. AI ์ค์ ์
๋ฐ์ดํธ
ai_settings = {
"aiProvider": "lily",
"lilyApiUrl": "https://YOUR_USERNAME-lily-llm-api.hf.space",
"lilyModel": "kanana-1.5-v-3b-instruct",
"aiEnabled": True
}
settings_response = session.patch(
f"{hearth_chat_url}/api/chat/user/settings/",
json=ai_settings
)
print(f"์ค์ ์
๋ฐ์ดํธ: {settings_response.status_code}")
# 3. ์ฑํ
ํ
์คํธ
test_messages = [
"์๋
ํ์ธ์! Lily LLM ํ
์คํธ์
๋๋ค.",
"์ค๋ ๋ ์จ๊ฐ ์ด๋ค๊ฐ์?",
"๊ฐ๋จํ ์ํ ๋ฌธ์ ๋ฅผ ๋ด์ฃผ์ธ์."
]
for message in test_messages:
print(f"\n๐ค ํ
์คํธ ๋ฉ์์ง: {message}")
# WebSocket ๋๋ HTTP API๋ฅผ ํตํ ๋ฉ์์ง ์ ์ก
# (์ค์ ๊ตฌํ์ ๋ฐ๋ผ ์กฐ์ )
# ์๋ต ํ์ธ
print(f"โ
์๋ต ๋ฐ์")
if __name__ == "__main__":
test_hearth_chat_lily_integration()
4.2 ์ด๋ฏธ์ง ์ฒ๋ฆฌ ํ ์คํธ
def test_image_processing():
"""์ด๋ฏธ์ง ์ฒ๋ฆฌ ์ฐ๋ ํ
์คํธ"""
# ํ
์คํธ ์ด๋ฏธ์ง ์
๋ก๋
with open("test_image.jpg", "rb") as f:
files = {"image": f}
data = {"message": "์ด๋ฏธ์ง์์ ๋ฌด์์ ๋ณผ ์ ์๋์?"}
response = requests.post(
"https://your-hearth-chat.railway.app/api/chat/send-message/",
files=files,
data=data
)
print(f"์ด๋ฏธ์ง ์ฒ๋ฆฌ ํ
์คํธ: {response.status_code}")
print(f"์๋ต: {response.json()}")
5. ๋ชจ๋ํฐ๋ง ๋ฐ ๋ก๊ทธ
5.1 Hugging Face Spaces ๋ก๊ทธ ๋ชจ๋ํฐ๋ง
# Spaces ๋์๋ณด๋์์ ์ค์๊ฐ ๋ก๊ทธ ํ์ธ
# API ํธ์ถ ๋น๋ ๋ฐ ์๋ต ์๊ฐ ๋ชจ๋ํฐ๋ง
5.2 Railway ๋ก๊ทธ ๋ชจ๋ํฐ๋ง
# Railway ๋์๋ณด๋์์ Hearth Chat ๋ก๊ทธ ํ์ธ
# Lily LLM API ํธ์ถ ์ฑ๊ณต/์คํจ ๋ชจ๋ํฐ๋ง
6. ์ฑ๋ฅ ์ต์ ํ
6.1 ์บ์ฑ ์ ๋ต
# Redis๋ฅผ ์ด์ฉํ ์๋ต ์บ์ฑ
import redis
redis_client = redis.Redis(host='localhost', port=6379, db=0)
def cached_lily_response(prompt_hash, response):
"""์๋ต ์บ์ฑ"""
redis_client.setex(f"lily_cache:{prompt_hash}", 3600, json.dumps(response))
def get_cached_response(prompt_hash):
"""์บ์๋ ์๋ต ์กฐํ"""
cached = redis_client.get(f"lily_cache:{prompt_hash}")
return json.loads(cached) if cached else None
6.2 ๋ก๋ ๋ฐธ๋ฐ์ฑ
# ์ฌ๋ฌ Hugging Face Spaces ์ธ์คํด์ค ์ฌ์ฉ
LILY_API_ENDPOINTS = [
"https://username1-lily-llm-api.hf.space",
"https://username2-lily-llm-api.hf.space"
]
def get_available_endpoint():
"""์ฌ์ฉ ๊ฐ๋ฅํ ์๋ํฌ์ธํธ ์ ํ"""
for endpoint in LILY_API_ENDPOINTS:
try:
response = requests.get(f"{endpoint}/health", timeout=5)
if response.status_code == 200:
return endpoint
except:
continue
return LILY_API_ENDPOINTS[0] # ๊ธฐ๋ณธ๊ฐ
7. ๋ณด์ ๊ณ ๋ ค์ฌํญ
7.1 API ํค ๊ด๋ฆฌ
# ํ๊ฒฝ ๋ณ์๋ก ๋ฏผ๊ฐํ ์ ๋ณด ๊ด๋ฆฌ
import os
LILY_API_KEY = os.getenv('LILY_API_KEY') # ํ์์
LILY_API_SECRET = os.getenv('LILY_API_SECRET') # ํ์์
7.2 ์์ฒญ ์ ํ
# ์ฌ์ฉ์๋ณ ์์ฒญ ์ ํ
from django.core.cache import cache
def check_rate_limit(user_id):
"""์ฌ์ฉ์๋ณ ์์ฒญ ์ ํ ํ์ธ"""
key = f"lily_api_rate_limit:{user_id}"
current = cache.get(key, 0)
if current >= 100: # ์๊ฐ๋น 100ํ ์ ํ
return False
cache.set(key, current + 1, 3600) # 1์๊ฐ
return True
๐ ์ฐ๋ ์๋ฃ
๋ชจ๋ ์ค์ ์ด ์๋ฃ๋๋ฉด:
- Hugging Face Spaces: Lily LLM API ์๋ฒ ํธ์คํ
- Railway: Hearth Chat ์๋น์ค ํธ์คํ
- ์ฐ๋: ๋ ์๋น์ค ๊ฐ ์ํํ ํต์
์ฌ์ฉ์๋ Hearth Chat ์ธํฐํ์ด์ค๋ฅผ ํตํด Hugging Face์์ ํธ์คํ ๋๋ ๊ฐ๋ ฅํ Lily LLM AI๋ฅผ ์ฌ์ฉํ ์ ์๊ฒ ๋ฉ๋๋ค! ๐