Spaces:
Running
Running
QAway-to
commited on
Commit
·
0395151
1
Parent(s):
985f897
New stcructure v1.2
Browse files- config.py +7 -11
- requirements.txt +5 -4
- services/llm_client.py +28 -45
config.py
CHANGED
|
@@ -1,24 +1,20 @@
|
|
| 1 |
"""
|
| 2 |
🇬🇧 Module: config.py
|
| 3 |
-
Purpose: Central configuration for environment variables and constants
|
| 4 |
-
used across the entire TradeLink AI Analyzer project.
|
| 5 |
|
| 6 |
🇷🇺 Модуль: config.py
|
| 7 |
-
Назначение: централизованная конфигурация переменных окружения и констант
|
| 8 |
-
для всего проекта TradeLink AI Analyzer.
|
| 9 |
"""
|
| 10 |
|
| 11 |
import os
|
| 12 |
|
| 13 |
-
# ===
|
| 14 |
-
|
| 15 |
-
|
| 16 |
|
| 17 |
# === API Base URLs ===
|
| 18 |
TRADELINK_API_URL = "https://api.tradelink.pro"
|
| 19 |
|
| 20 |
-
# ===
|
| 21 |
-
REQUEST_TIMEOUT = 15
|
| 22 |
-
|
| 23 |
-
# === Debug Mode ===
|
| 24 |
DEBUG = os.getenv("DEBUG", "false").lower() == "true"
|
|
|
|
| 1 |
"""
|
| 2 |
🇬🇧 Module: config.py
|
| 3 |
+
Purpose: Central configuration for environment variables and constants.
|
|
|
|
| 4 |
|
| 5 |
🇷🇺 Модуль: config.py
|
| 6 |
+
Назначение: централизованная конфигурация переменных окружения и констант проекта.
|
|
|
|
| 7 |
"""
|
| 8 |
|
| 9 |
import os
|
| 10 |
|
| 11 |
+
# === Featherless.ai Configuration ===
|
| 12 |
+
FEATHERLESS_API_KEY = os.getenv("featherless")
|
| 13 |
+
FEATHERLESS_MODEL = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
| 14 |
|
| 15 |
# === API Base URLs ===
|
| 16 |
TRADELINK_API_URL = "https://api.tradelink.pro"
|
| 17 |
|
| 18 |
+
# === Other Settings ===
|
| 19 |
+
REQUEST_TIMEOUT = 15
|
|
|
|
|
|
|
| 20 |
DEBUG = os.getenv("DEBUG", "false").lower() == "true"
|
requirements.txt
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
gradio>=4.29.0
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
| 1 |
gradio>=4.29.0
|
| 2 |
+
openai>=1.30.1
|
| 3 |
+
requests
|
| 4 |
+
httpx
|
| 5 |
+
pandas
|
| 6 |
+
matplotlib
|
services/llm_client.py
CHANGED
|
@@ -1,56 +1,39 @@
|
|
| 1 |
"""
|
| 2 |
🇬🇧 Module: llm_client.py
|
| 3 |
-
Purpose: Adapter for
|
| 4 |
-
Provides a unified interface for text generation and future model extensions.
|
| 5 |
|
| 6 |
🇷🇺 Модуль: llm_client.py
|
| 7 |
-
Назначение: адаптер для LLM через
|
| 8 |
-
Обеспечивает единый интерфейс для генерации текста и расширения под другие модели в будущем.
|
| 9 |
"""
|
| 10 |
|
| 11 |
-
import
|
| 12 |
from typing import List, Dict, Generator
|
| 13 |
-
from
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
-
class
|
| 17 |
-
"""
|
| 18 |
|
| 19 |
-
def __init__(self, api_key: str):
|
| 20 |
if not api_key:
|
| 21 |
-
raise
|
| 22 |
-
self.
|
| 23 |
-
self.
|
| 24 |
-
|
| 25 |
-
def
|
| 26 |
-
"""
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
return result["generated_text"]
|
| 42 |
-
|
| 43 |
-
return str(result)
|
| 44 |
-
|
| 45 |
-
def stream_chat(self, *, messages: List[Dict], model: str) -> Generator[str, None, None]:
|
| 46 |
-
"""
|
| 47 |
-
Placeholder for streaming logic (not all Hugging Face endpoints support streaming).
|
| 48 |
-
For now, concatenates messages and sends as one prompt.
|
| 49 |
-
"""
|
| 50 |
-
prompt = "\n".join(m["content"] for m in messages)
|
| 51 |
-
text = self.complete(model, prompt)
|
| 52 |
-
yield text
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
# Global instance for reuse
|
| 56 |
-
llm_service = HuggingFaceLLMClient(api_key=HF_LLM_API_KEY)
|
|
|
|
| 1 |
"""
|
| 2 |
🇬🇧 Module: llm_client.py
|
| 3 |
+
Purpose: Adapter for Featherless.ai (OpenAI-compatible API).
|
|
|
|
| 4 |
|
| 5 |
🇷🇺 Модуль: llm_client.py
|
| 6 |
+
Назначение: адаптер для LLM-инференса через Featherless.ai (совместимо с OpenAI API).
|
|
|
|
| 7 |
"""
|
| 8 |
|
| 9 |
+
import os
|
| 10 |
from typing import List, Dict, Generator
|
| 11 |
+
from openai import OpenAI
|
| 12 |
+
from config import FEATHERLESS_API_KEY, FEATHERLESS_MODEL
|
| 13 |
|
| 14 |
|
| 15 |
+
class FeatherlessLLM:
|
| 16 |
+
"""Wrapper for Featherless.ai LLM inference."""
|
| 17 |
|
| 18 |
+
def __init__(self, api_key: str = FEATHERLESS_API_KEY, model: str = FEATHERLESS_MODEL):
|
| 19 |
if not api_key:
|
| 20 |
+
raise RuntimeError("❌ Environment variable 'featherless' (API key) is missing.")
|
| 21 |
+
self.client = OpenAI(base_url="https://api.featherless.ai/v1", api_key=api_key)
|
| 22 |
+
self.model = model
|
| 23 |
+
|
| 24 |
+
def stream_chat(self, *, messages: List[Dict], model: str = None) -> Generator[str, None, None]:
|
| 25 |
+
"""Stream chat completion using Featherless.ai."""
|
| 26 |
+
used_model = model or self.model
|
| 27 |
+
response = self.client.chat.completions.create(
|
| 28 |
+
model=used_model,
|
| 29 |
+
messages=messages,
|
| 30 |
+
stream=True,
|
| 31 |
+
)
|
| 32 |
+
for chunk in response:
|
| 33 |
+
delta = chunk.choices[0].delta.content
|
| 34 |
+
if delta:
|
| 35 |
+
yield delta
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# === Global singleton instance ===
|
| 39 |
+
llm_service = FeatherlessLLM()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|