QAway-to commited on
Commit
4afef48
·
1 Parent(s): b2d5b74

New stcructure v1.2

Browse files
Files changed (5) hide show
  1. .idea/vcs.xml +0 -1
  2. app.py +1 -1
  3. config.py +7 -8
  4. requirements.txt +4 -8
  5. services/llm_client.py +46 -38
.idea/vcs.xml CHANGED
@@ -2,6 +2,5 @@
2
  <project version="4">
3
  <component name="VcsDirectoryMappings">
4
  <mapping directory="" vcs="Git" />
5
- <mapping directory="$PROJECT_DIR$/AIassistance" vcs="Git" />
6
  </component>
7
  </project>
 
2
  <project version="4">
3
  <component name="VcsDirectoryMappings">
4
  <mapping directory="" vcs="Git" />
 
5
  </component>
6
  </project>
app.py CHANGED
@@ -7,7 +7,7 @@ from core.visualization import build_alpha_chart
7
  from core.metrics import show_metrics_table
8
 
9
  # === Model setup ===
10
- MODEL_NAME = "meta-llama/Meta-Llama-3.1-8B-Instruct"
11
 
12
  # === Use case classes ===
13
  analyzer = PortfolioAnalyzer(llm_service, MODEL_NAME)
 
7
  from core.metrics import show_metrics_table
8
 
9
  # === Model setup ===
10
+ MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct"
11
 
12
  # === Use case classes ===
13
  analyzer = PortfolioAnalyzer(llm_service, MODEL_NAME)
config.py CHANGED
@@ -1,21 +1,20 @@
1
  """
2
  🇬🇧 Module: config.py
3
- Purpose: Central configuration file storing environment variables, constants, and settings
4
- used across the entire application.
5
 
6
  🇷🇺 Модуль: config.py
7
- Назначение: центральный конфигурационный файл, где хранятся переменные окружения,
8
- константы и настройки, используемые во всём приложении.
9
  """
10
 
11
  import os
12
 
13
- # === LLM Configuration ===
14
- LLM_PROVIDER = os.getenv("LLM_PROVIDER", "featherless")
15
- LLM_API_KEY = os.getenv("featherless") or os.getenv("OPENAI_API_KEY")
16
 
17
  # === API Base URLs ===
18
- FEATHERLESS_BASE_URL = "https://api.featherless.ai/v1"
19
  TRADELINK_API_URL = "https://api.tradelink.pro"
20
 
21
  # === Request/Connection Settings ===
 
1
  """
2
  🇬🇧 Module: config.py
3
+ Purpose: Central configuration for environment variables and constants
4
+ used across the entire TradeLink AI Analyzer project.
5
 
6
  🇷🇺 Модуль: config.py
7
+ Назначение: централизованная конфигурация переменных окружения и констант
8
+ для всего проекта TradeLink AI Analyzer.
9
  """
10
 
11
  import os
12
 
13
+ # === Hugging Face LLM Configuration ===
14
+ HF_LLM_API_KEY = os.getenv("HF_LLM_integrate") # token name in Space settings
15
+ HF_LLM_API_URL = "https://api-inference.huggingface.co/models"
16
 
17
  # === API Base URLs ===
 
18
  TRADELINK_API_URL = "https://api.tradelink.pro"
19
 
20
  # === Request/Connection Settings ===
requirements.txt CHANGED
@@ -1,9 +1,5 @@
1
  gradio>=4.29.0
2
- openai>=1.30.1
3
- requests
4
- httpx
5
- pandas
6
- matplotlib
7
- fastapi
8
- uvicorn
9
- httpx
 
1
  gradio>=4.29.0
2
+ requests>=2.31.0
3
+ httpx>=0.27.0
4
+ pandas>=2.2.0
5
+ matplotlib>=3.8.0
 
 
 
 
services/llm_client.py CHANGED
@@ -1,48 +1,56 @@
1
  """
2
  🇬🇧 Module: llm_client.py
3
- Purpose: Provides an adapter for different LLM providers (Featherless, OpenAI, etc.)
4
- to ensure unified interface for chat completions.
5
 
6
  🇷🇺 Модуль: llm_client.py
7
- Назначение: адаптер для различных LLM-провайдеров (Featherless, OpenAI и др.),
8
- создающий единый интерфейс для взаимодействия с моделями.
9
  """
10
 
11
- from typing import Iterable, List, Dict
12
- from openai import OpenAI
13
- from config import LLM_API_KEY, LLM_PROVIDER, FEATHERLESS_BASE_URL
14
 
15
 
16
- class LLMAdapter:
17
- """Unified adapter for LLM providers."""
18
 
19
- def __init__(self, provider: str = LLM_PROVIDER, api_key: str | None = LLM_API_KEY):
20
  if not api_key:
21
- raise ValueError("Missing LLM API key (env var 'featherless' or 'OPENAI_API_KEY').")
22
- self.provider = provider
23
- self.client = self._init_client(provider, api_key)
24
-
25
- def _init_client(self, provider: str, api_key: str) -> OpenAI:
26
- """Initialize OpenAI client depending on provider."""
27
- if provider == "featherless":
28
- return OpenAI(base_url=FEATHERLESS_BASE_URL, api_key=api_key)
29
- if provider == "openai":
30
- return OpenAI(api_key=api_key)
31
- raise ValueError(f"Unsupported provider: {provider}")
32
-
33
- def stream_chat(self, *, messages: List[Dict], model: str) -> Iterable[str]:
34
- """Stream responses token by token."""
35
- response = self.client.chat.completions.create(model=model, messages=messages, stream=True)
36
- for chunk in response:
37
- delta = chunk.choices[0].delta.content
38
- if delta:
39
- yield delta
40
-
41
- def complete(self, *, messages: List[Dict], model: str) -> str:
42
- """Return a single completion (non-streaming)."""
43
- resp = self.client.chat.completions.create(model=model, messages=messages)
44
- return resp.choices[0].message.content
45
-
46
-
47
- # Global ready-to-use instance
48
- llm_service = LLMAdapter()
 
 
 
 
 
 
 
 
 
1
  """
2
  🇬🇧 Module: llm_client.py
3
+ Purpose: Adapter for Hugging Face Inference API (LLM).
4
+ Provides a unified interface for text generation and future model extensions.
5
 
6
  🇷🇺 Модуль: llm_client.py
7
+ Назначение: адаптер для LLM через Hugging Face Inference API.
8
+ Обеспечивает единый интерфейс для генерации текста и расширения под другие модели в будущем.
9
  """
10
 
11
+ import requests
12
+ from typing import List, Dict, Generator
13
+ from config import HF_LLM_API_KEY, HF_LLM_API_URL, DEBUG
14
 
15
 
16
+ class HuggingFaceLLMClient:
17
+ """Lightweight client for Hugging Face text generation models."""
18
 
19
+ def __init__(self, api_key: str):
20
  if not api_key:
21
+ raise ValueError("Missing Hugging Face API key (HF_LLM_integrate).")
22
+ self.api_key = api_key
23
+ self.headers = {"Authorization": f"Bearer {self.api_key}"}
24
+
25
+ def complete(self, model: str, prompt: str) -> str:
26
+ """Non-streaming completion request."""
27
+ url = f"{HF_LLM_API_URL}/{model}"
28
+ payload = {"inputs": prompt}
29
+
30
+ if DEBUG:
31
+ print(f"[DEBUG] Sending request to {url}")
32
+
33
+ resp = requests.post(url, headers=self.headers, json=payload)
34
+ resp.raise_for_status()
35
+ result = resp.json()
36
+
37
+ # Handle HF response structures
38
+ if isinstance(result, list) and "generated_text" in result[0]:
39
+ return result[0]["generated_text"]
40
+ if isinstance(result, dict) and "generated_text" in result:
41
+ return result["generated_text"]
42
+
43
+ return str(result)
44
+
45
+ def stream_chat(self, *, messages: List[Dict], model: str) -> Generator[str, None, None]:
46
+ """
47
+ Placeholder for streaming logic (not all Hugging Face endpoints support streaming).
48
+ For now, concatenates messages and sends as one prompt.
49
+ """
50
+ prompt = "\n".join(m["content"] for m in messages)
51
+ text = self.complete(model, prompt)
52
+ yield text
53
+
54
+
55
+ # Global instance for reuse
56
+ llm_service = HuggingFaceLLMClient(api_key=HF_LLM_API_KEY)