QAway-to commited on
Commit
678027d
·
1 Parent(s): 0395151

New stcructure v1.5

Browse files
Files changed (4) hide show
  1. core/analyzer.py +7 -8
  2. core/chat.py +11 -7
  3. core/comparer.py +11 -11
  4. test123 +0 -0
core/analyzer.py CHANGED
@@ -9,7 +9,7 @@ Purpose: Handles single-portfolio analysis using LLM. Fetches metrics, builds pr
9
  import asyncio
10
  from typing import Generator
11
  from services.tradelink_api import extract_portfolio_id, fetch_metrics_async
12
- from services.llm_client import llm_service # Hugging Face client
13
  from prompts.system_prompts import ANALYSIS_SYSTEM_PROMPT
14
  from prompts.reference_templates import REFERENCE_PROMPT
15
 
@@ -17,8 +17,7 @@ from prompts.reference_templates import REFERENCE_PROMPT
17
  class PortfolioAnalyzer:
18
  """Main use-case class for analyzing a single portfolio."""
19
 
20
- def __init__(self, llm=llm_service, model_name: str = "mistralai/Mixtral-8x7B-Instruct"):
21
- # llm must expose .stream_chat(messages=list[dict], model=str) -> Iterable[str]
22
  self.llm = llm
23
  self.model_name = model_name
24
 
@@ -26,18 +25,18 @@ class PortfolioAnalyzer:
26
  """Stream analysis result step by step."""
27
  portfolio_id = extract_portfolio_id(text)
28
  if not portfolio_id:
29
- yield "❗ Please provide a valid portfolio ID or link."
30
  return
31
 
32
- yield "⏳ Fetching portfolio metrics..."
33
  try:
34
  metrics = asyncio.run(fetch_metrics_async(portfolio_id))
35
  except Exception as e:
36
- yield f"❌ Failed to fetch metrics: {e}"
37
  return
38
 
39
  if not metrics:
40
- yield "❗ Unable to retrieve metrics from API."
41
  return
42
 
43
  # Build prompt text from metrics
@@ -56,4 +55,4 @@ class PortfolioAnalyzer:
56
  yield partial
57
 
58
  except Exception as e:
59
- yield f"❌ Error during LLM analysis: {e}"
 
9
  import asyncio
10
  from typing import Generator
11
  from services.tradelink_api import extract_portfolio_id, fetch_metrics_async
12
+ from services.llm_client import llm_service
13
  from prompts.system_prompts import ANALYSIS_SYSTEM_PROMPT
14
  from prompts.reference_templates import REFERENCE_PROMPT
15
 
 
17
  class PortfolioAnalyzer:
18
  """Main use-case class for analyzing a single portfolio."""
19
 
20
+ def __init__(self, llm=llm_service, model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"):
 
21
  self.llm = llm
22
  self.model_name = model_name
23
 
 
25
  """Stream analysis result step by step."""
26
  portfolio_id = extract_portfolio_id(text)
27
  if not portfolio_id:
28
+ yield "❗ Пожалуйста, введите корректный portfolio ID или ссылку."
29
  return
30
 
31
+ yield "⏳ Получаю метрики портфеля..."
32
  try:
33
  metrics = asyncio.run(fetch_metrics_async(portfolio_id))
34
  except Exception as e:
35
+ yield f"❌ Ошибка при получении метрик: {e}"
36
  return
37
 
38
  if not metrics:
39
+ yield "❗ Не удалось получить данные по метрикам."
40
  return
41
 
42
  # Build prompt text from metrics
 
55
  yield partial
56
 
57
  except Exception as e:
58
+ yield f"❌ Ошибка при работе LLM: {e}"
core/chat.py CHANGED
@@ -7,14 +7,14 @@ Purpose: General chat interface for user questions about TradeLink and its ecosy
7
  """
8
 
9
  from typing import Generator
10
- from services.llm_client import LLMAdapter
11
  from prompts.system_prompts import TRADELINK_CONTEXT
12
 
13
 
14
  class ChatAssistant:
15
- """Handles generic user dialogue via LLM."""
16
 
17
- def __init__(self, llm: LLMAdapter, model_name: str):
18
  self.llm = llm
19
  self.model_name = model_name
20
 
@@ -25,7 +25,11 @@ class ChatAssistant:
25
  {"role": "user", "content": user_input},
26
  ]
27
 
28
- partial = ""
29
- for delta in self.llm.stream_chat(messages=messages, model=self.model_name):
30
- partial += delta
31
- yield partial
 
 
 
 
 
7
  """
8
 
9
  from typing import Generator
10
+ from services.llm_client import llm_service
11
  from prompts.system_prompts import TRADELINK_CONTEXT
12
 
13
 
14
  class ChatAssistant:
15
+ """Handles general user dialogue via LLM."""
16
 
17
+ def __init__(self, llm=llm_service, model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"):
18
  self.llm = llm
19
  self.model_name = model_name
20
 
 
25
  {"role": "user", "content": user_input},
26
  ]
27
 
28
+ try:
29
+ partial = ""
30
+ for delta in self.llm.stream_chat(messages=messages, model=self.model_name):
31
+ partial += delta
32
+ yield partial
33
+
34
+ except Exception as e:
35
+ yield f"❌ Ошибка при генерации ответа: {e}"
core/comparer.py CHANGED
@@ -3,13 +3,13 @@
3
  Purpose: Compares two portfolios using LLM. Fetches metrics for both and builds a unified comparison prompt.
4
 
5
  🇷🇺 Модуль: comparer.py
6
- Назначение: сравнение двух портфелей с помощью LLM. Получает метрики обоих портфелей, формирует промпт и выводит стриминговый результат.
7
  """
8
 
9
  import asyncio
10
  from typing import Generator
11
  from services.tradelink_api import extract_portfolio_id, fetch_metrics_async
12
- from services.llm_client import LLMAdapter
13
  from prompts.system_prompts import COMPARISON_SYSTEM_PROMPT
14
  from prompts.reference_templates import REFERENCE_COMPARISON_PROMPT
15
 
@@ -17,7 +17,7 @@ from prompts.reference_templates import REFERENCE_COMPARISON_PROMPT
17
  class PortfolioComparer:
18
  """Main use-case class for comparing two portfolios."""
19
 
20
- def __init__(self, llm: LLMAdapter, model_name: str):
21
  self.llm = llm
22
  self.model_name = model_name
23
 
@@ -27,26 +27,26 @@ class PortfolioComparer:
27
  id2 = extract_portfolio_id(text2)
28
 
29
  if text1 == text2:
30
- yield "❗ Please provide two different portfolio IDs."
31
  return
32
  if not id1 or not id2:
33
- yield "❗ One or both portfolio IDs are invalid."
34
  return
35
 
36
- yield "⏳ Fetching metrics for both portfolios..."
37
  try:
38
  m1 = asyncio.run(fetch_metrics_async(id1))
39
  m2 = asyncio.run(fetch_metrics_async(id2))
40
  except Exception as e:
41
- yield f"❌ Failed to fetch data: {e}"
42
  return
43
 
44
  if not m1 or not m2:
45
- yield "❗ Metrics missing for one or both portfolios."
46
  return
47
 
48
- m1_text = ", ".join([f"{k}: {v}" for k, v in m1.items()])
49
- m2_text = ", ".join([f"{k}: {v}" for k, v in m2.items()])
50
 
51
  prompt = (
52
  f"{REFERENCE_COMPARISON_PROMPT}\n"
@@ -67,4 +67,4 @@ class PortfolioComparer:
67
  yield partial
68
 
69
  except Exception as e:
70
- yield f"❌ Error during LLM comparison: {e}"
 
3
  Purpose: Compares two portfolios using LLM. Fetches metrics for both and builds a unified comparison prompt.
4
 
5
  🇷🇺 Модуль: comparer.py
6
+ Назначение: сравнение двух портфелей с помощью LLM. Получает метрики обоих портфелей, формирует промпт и возвращает потоковый результат.
7
  """
8
 
9
  import asyncio
10
  from typing import Generator
11
  from services.tradelink_api import extract_portfolio_id, fetch_metrics_async
12
+ from services.llm_client import llm_service
13
  from prompts.system_prompts import COMPARISON_SYSTEM_PROMPT
14
  from prompts.reference_templates import REFERENCE_COMPARISON_PROMPT
15
 
 
17
  class PortfolioComparer:
18
  """Main use-case class for comparing two portfolios."""
19
 
20
+ def __init__(self, llm=llm_service, model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"):
21
  self.llm = llm
22
  self.model_name = model_name
23
 
 
27
  id2 = extract_portfolio_id(text2)
28
 
29
  if text1 == text2:
30
+ yield "❗ Пожалуйста, введите два разных portfolio ID."
31
  return
32
  if not id1 or not id2:
33
+ yield "❗ Один или оба portfolio ID некорректны."
34
  return
35
 
36
+ yield "⏳ Получаю метрики для обоих портфелей..."
37
  try:
38
  m1 = asyncio.run(fetch_metrics_async(id1))
39
  m2 = asyncio.run(fetch_metrics_async(id2))
40
  except Exception as e:
41
+ yield f"❌ Ошибка при получении данных: {e}"
42
  return
43
 
44
  if not m1 or not m2:
45
+ yield "❗ Не удалось получить метрики хотя бы одного портфеля."
46
  return
47
 
48
+ m1_text = ", ".join(f"{k}: {v}" for k, v in m1.items())
49
+ m2_text = ", ".join(f"{k}: {v}" for k, v in m2.items())
50
 
51
  prompt = (
52
  f"{REFERENCE_COMPARISON_PROMPT}\n"
 
67
  yield partial
68
 
69
  except Exception as e:
70
+ yield f"❌ Ошибка при сравнении портфелей через LLM: {e}"
test123 DELETED
The diff for this file is too large to render. See raw diff