QAway-to commited on
Commit
0395151
·
1 Parent(s): 985f897

New stcructure v1.2

Browse files
Files changed (3) hide show
  1. config.py +7 -11
  2. requirements.txt +5 -4
  3. services/llm_client.py +28 -45
config.py CHANGED
@@ -1,24 +1,20 @@
1
  """
2
  🇬🇧 Module: config.py
3
- Purpose: Central configuration for environment variables and constants
4
- used across the entire TradeLink AI Analyzer project.
5
 
6
  🇷🇺 Модуль: config.py
7
- Назначение: централизованная конфигурация переменных окружения и констант
8
- для всего проекта TradeLink AI Analyzer.
9
  """
10
 
11
  import os
12
 
13
- # === Hugging Face LLM Configuration ===
14
- HF_LLM_API_KEY = os.getenv("HF_LLM_integrate") # token name in Space settings
15
- HF_LLM_API_URL = "https://api-inference.huggingface.co/models"
16
 
17
  # === API Base URLs ===
18
  TRADELINK_API_URL = "https://api.tradelink.pro"
19
 
20
- # === Request/Connection Settings ===
21
- REQUEST_TIMEOUT = 15 # seconds
22
-
23
- # === Debug Mode ===
24
  DEBUG = os.getenv("DEBUG", "false").lower() == "true"
 
1
  """
2
  🇬🇧 Module: config.py
3
+ Purpose: Central configuration for environment variables and constants.
 
4
 
5
  🇷🇺 Модуль: config.py
6
+ Назначение: централизованная конфигурация переменных окружения и констант проекта.
 
7
  """
8
 
9
  import os
10
 
11
+ # === Featherless.ai Configuration ===
12
+ FEATHERLESS_API_KEY = os.getenv("featherless")
13
+ FEATHERLESS_MODEL = "meta-llama/Meta-Llama-3.1-8B-Instruct"
14
 
15
  # === API Base URLs ===
16
  TRADELINK_API_URL = "https://api.tradelink.pro"
17
 
18
+ # === Other Settings ===
19
+ REQUEST_TIMEOUT = 15
 
 
20
  DEBUG = os.getenv("DEBUG", "false").lower() == "true"
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
  gradio>=4.29.0
2
- requests>=2.31.0
3
- httpx>=0.27.0
4
- pandas>=2.2.0
5
- matplotlib>=3.8.0
 
 
1
  gradio>=4.29.0
2
+ openai>=1.30.1
3
+ requests
4
+ httpx
5
+ pandas
6
+ matplotlib
services/llm_client.py CHANGED
@@ -1,56 +1,39 @@
1
  """
2
  🇬🇧 Module: llm_client.py
3
- Purpose: Adapter for Hugging Face Inference API (LLM).
4
- Provides a unified interface for text generation and future model extensions.
5
 
6
  🇷🇺 Модуль: llm_client.py
7
- Назначение: адаптер для LLM через Hugging Face Inference API.
8
- Обеспечивает единый интерфейс для генерации текста и расширения под другие модели в будущем.
9
  """
10
 
11
- import requests
12
  from typing import List, Dict, Generator
13
- from config import HF_LLM_API_KEY, HF_LLM_API_URL, DEBUG
 
14
 
15
 
16
- class HuggingFaceLLMClient:
17
- """Lightweight client for Hugging Face text generation models."""
18
 
19
- def __init__(self, api_key: str):
20
  if not api_key:
21
- raise ValueError("Missing Hugging Face API key (HF_LLM_integrate).")
22
- self.api_key = api_key
23
- self.headers = {"Authorization": f"Bearer {self.api_key}"}
24
-
25
- def complete(self, model: str, prompt: str) -> str:
26
- """Non-streaming completion request."""
27
- url = f"{HF_LLM_API_URL}/{model}"
28
- payload = {"inputs": prompt}
29
-
30
- if DEBUG:
31
- print(f"[DEBUG] Sending request to {url}")
32
-
33
- resp = requests.post(url, headers=self.headers, json=payload)
34
- resp.raise_for_status()
35
- result = resp.json()
36
-
37
- # Handle HF response structures
38
- if isinstance(result, list) and "generated_text" in result[0]:
39
- return result[0]["generated_text"]
40
- if isinstance(result, dict) and "generated_text" in result:
41
- return result["generated_text"]
42
-
43
- return str(result)
44
-
45
- def stream_chat(self, *, messages: List[Dict], model: str) -> Generator[str, None, None]:
46
- """
47
- Placeholder for streaming logic (not all Hugging Face endpoints support streaming).
48
- For now, concatenates messages and sends as one prompt.
49
- """
50
- prompt = "\n".join(m["content"] for m in messages)
51
- text = self.complete(model, prompt)
52
- yield text
53
-
54
-
55
- # Global instance for reuse
56
- llm_service = HuggingFaceLLMClient(api_key=HF_LLM_API_KEY)
 
1
  """
2
  🇬🇧 Module: llm_client.py
3
+ Purpose: Adapter for Featherless.ai (OpenAI-compatible API).
 
4
 
5
  🇷🇺 Модуль: llm_client.py
6
+ Назначение: адаптер для LLM-инференса через Featherless.ai (совместимо с OpenAI API).
 
7
  """
8
 
9
+ import os
10
  from typing import List, Dict, Generator
11
+ from openai import OpenAI
12
+ from config import FEATHERLESS_API_KEY, FEATHERLESS_MODEL
13
 
14
 
15
+ class FeatherlessLLM:
16
+ """Wrapper for Featherless.ai LLM inference."""
17
 
18
+ def __init__(self, api_key: str = FEATHERLESS_API_KEY, model: str = FEATHERLESS_MODEL):
19
  if not api_key:
20
+ raise RuntimeError(" Environment variable 'featherless' (API key) is missing.")
21
+ self.client = OpenAI(base_url="https://api.featherless.ai/v1", api_key=api_key)
22
+ self.model = model
23
+
24
+ def stream_chat(self, *, messages: List[Dict], model: str = None) -> Generator[str, None, None]:
25
+ """Stream chat completion using Featherless.ai."""
26
+ used_model = model or self.model
27
+ response = self.client.chat.completions.create(
28
+ model=used_model,
29
+ messages=messages,
30
+ stream=True,
31
+ )
32
+ for chunk in response:
33
+ delta = chunk.choices[0].delta.content
34
+ if delta:
35
+ yield delta
36
+
37
+
38
+ # === Global singleton instance ===
39
+ llm_service = FeatherlessLLM()