Riy777 commited on
Commit
c881126
·
verified ·
1 Parent(s): 4213beb

Update LLM.py

Browse files
Files changed (1) hide show
  1. LLM.py +197 -422
LLM.py CHANGED
@@ -1,491 +1,266 @@
1
- # LLM.py (V19.5 - Remove Bias Scores from Prompt)
2
- import os, traceback, json, time, re
3
- import httpx
4
- from datetime import datetime
5
- from typing import List, Dict, Any, Optional
6
 
7
- # (استخدام مكتبة OpenAI الرسمية بدلاً من httpx)
 
 
8
  from openai import AsyncOpenAI, RateLimitError, APIError
9
 
10
- try:
11
- from r2 import R2Service
12
- from learning_hub.hub_manager import LearningHubManager # (استيراد العقل)
13
- except ImportError:
14
- print("❌ [LLMService] فشل استيراد R2Service أو LearningHubManager")
15
- R2Service = None
16
- LearningHubManager = None
17
-
18
- # (V8.1) استيراد NewsFetcher
19
- try:
20
- from sentiment_news import NewsFetcher
21
- except ImportError:
22
- NewsFetcher = None
23
-
24
- # (استيراد VADER هنا أيضاً للـ type hinting)
25
- try:
26
- from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
27
- except ImportError:
28
- SentimentIntensityAnalyzer = None
29
-
30
-
31
- # (تحديث الإعدادات الافتراضية لتطابق NVIDIA)
32
  LLM_API_URL = os.getenv("LLM_API_URL", "https://integrate.api.nvidia.com/v1")
33
- LLM_API_KEY = os.getenv("LLM_API_KEY") # (هذا هو المفتاح الذي سيتم استخدامه)
34
  LLM_MODEL = os.getenv("LLM_MODEL", "nvidia/llama-3.1-nemotron-ultra-253b-v1")
35
 
36
- # (البارامترات المحددة من طرفك)
37
  LLM_TEMPERATURE = 0.2
38
  LLM_TOP_P = 0.7
39
  LLM_MAX_TOKENS = 16384
40
  LLM_FREQUENCY_PENALTY = 0.8
41
  LLM_PRESENCE_PENALTY = 0.5
42
-
43
- # إعدادات العميل
44
  CLIENT_TIMEOUT = 300.0
45
 
46
  class LLMService:
47
  def __init__(self):
48
  if not LLM_API_KEY:
49
- raise ValueError("❌ [LLMService] متغير بيئة LLM_API_KEY غير موجود!")
50
 
51
- try:
52
- self.client = AsyncOpenAI(
53
- base_url=LLM_API_URL,
54
- api_key=LLM_API_KEY,
55
- timeout=CLIENT_TIMEOUT
56
- )
57
- # 🔴 --- START OF CHANGE (V19.5) --- 🔴
58
- print(f"✅ [LLMService V19.5] مهيأ. النموذج: {LLM_MODEL}")
59
- # 🔴 --- END OF CHANGE --- 🔴
60
- print(f" -> Endpoint: {LLM_API_URL}")
61
- except Exception as e:
62
- # 🔴 --- START OF CHANGE (V19.5) --- 🔴
63
- print(f"❌ [LLMService V19.5] فشل تهيئة AsyncOpenAI: {e}")
64
- # 🔴 --- END OF CHANGE --- 🔴
65
- traceback.print_exc()
66
- raise
67
 
68
- # --- (الربط بالخدمات الأخرى) ---
69
- self.r2_service: Optional[R2Service] = None
70
- self.learning_hub: Optional[LearningHubManager] = None
71
- self.news_fetcher: Optional[NewsFetcher] = None
72
- self.vader_analyzer: Optional[SentimentIntensityAnalyzer] = None
73
 
74
  async def _call_llm(self, prompt: str) -> Optional[str]:
75
- """
76
- (محدث V19.2)
77
- إجراء استدعاء API للنموذج الضخم (يستخدم الآن "detailed thinking on" كـ system prompt).
78
- """
79
-
80
  system_prompt = "detailed thinking on"
81
 
82
- payload = {
83
- "model": LLM_MODEL,
84
- "messages": [
85
- {"role": "system", "content": system_prompt},
86
- {"role": "user", "content": prompt} # (prompt يحتوي الآن على تعليمات JSON)
87
- ],
88
- "temperature": LLM_TEMPERATURE,
89
- "top_p": LLM_TOP_P,
90
- "max_tokens": LLM_MAX_TOKENS,
91
- "frequency_penalty": LLM_FREQUENCY_PENALTY,
92
- "presence_penalty": LLM_PRESENCE_PENALTY,
93
- "stream": False, # (يجب أن تكون False للحصول على JSON)
94
- "response_format": {"type": "json_object"}
95
- }
96
-
97
  try:
98
- response = await self.client.chat.completions.create(**payload)
99
-
100
- if response.choices and len(response.choices) > 0:
101
- content = response.choices[0].message.content
102
- if content:
103
- return content.strip()
104
-
105
- print(f"❌ [LLMService] استجابة API غير متوقعة: {response.model_dump_json()}")
106
- return None
107
-
108
- except RateLimitError as e:
109
- print(f"❌ [LLMService] خطأ Rate Limit من NVIDIA API: {e}")
110
- except APIError as e:
111
- print(f"❌ [LLMService] خطأ API من NVIDIA API: {e}")
112
- except json.JSONDecodeError:
113
- print(f"❌ [LLMService] فشل في تحليل استجابة JSON.")
114
  except Exception as e:
115
- print(f"❌ [LLMService] خطأ غير متوقع في _call_llm: {e}")
116
- traceback.print_exc()
117
-
118
- return None
119
-
120
- def _parse_llm_response_enhanced(self,
121
- response_text: str,
122
- fallback_strategy: str = "decision",
123
- symbol: str = "N/A") -> Optional[Dict[str, Any]]:
124
- """
125
- (محدث V8) محلل JSON ذكي ومتسامح مع الأخطاء.
126
- """
127
- if not response_text:
128
- print(f" ⚠️ [LLMParser] الاستجابة فارغة لـ {symbol}.")
129
- return self._get_fallback_response(fallback_strategy, "Empty response")
130
-
131
- # 1. محاولة تحليل JSON مباشرة (لأننا طلبنا response_format=json_object)
132
- try:
133
- return json.loads(response_text)
134
- except json.JSONDecodeError:
135
- print(f" ⚠️ [LLMParser] فشل تحليل JSON المباشر لـ {symbol}. محاولة استخراج JSON...")
136
- pass # (الانتقال إلى المحاولة 2)
137
 
138
- # 2. محاولة استخراج JSON من داخل نص (Fallback 1)
 
139
  try:
140
- # (البحث عن أول { وآخر })
141
- json_match = re.search(r'\{.*\}', response_text, re.DOTALL)
142
- if json_match:
143
- json_string = json_match.group(0)
144
- return json.loads(json_string)
145
- else:
146
- print(f" ⚠️ [LLMParser] لم يتم العثور على JSON مطابق لـ {symbol}.")
147
- raise json.JSONDecodeError("No JSON object found in text", response_text, 0)
148
- except json.JSONDecodeError as e:
149
- print(f" ❌ [LLMParser] فشل الاستخراج النهائي لـ {symbol}. نص الاستجابة: {response_text[:200]}...")
150
- return self._get_fallback_response(fallback_strategy, f"Final JSON parse fail: {e}")
151
- except Exception as e:
152
- print(f" ❌ [LLMParser] خطأ غير متوقع في المحلل لـ {symbol}: {e}")
153
- return self._get_fallback_response(fallback_strategy, f"Unexpected parser error: {e}")
154
-
155
- def _get_fallback_response(self, strategy: str, reason: str) -> Optional[Dict[str, Any]]:
156
- """
157
- (محدث V8) إرجاع استجابة آمنة عند فشل النموذج الضخم.
158
- """
159
- print(f" 🚨 [LLMService] تفعيل الاستجابة الاحتياطية (Fallback) لاستراتيجية '{strategy}' (السبب: {reason})")
160
-
161
- if strategy == "decision":
162
- # (القرار الآمن: لا تتداول)
163
- return {
164
- "action": "NO_DECISION",
165
- "strategy_to_watch": "GENERIC",
166
- "confidence_level": 0,
167
- "reasoning": f"LLM analysis failed: {reason}",
168
- "exit_profile": "Standard"
169
- }
170
- elif strategy == "reanalysis":
171
- # (القرار الآمن: استمر في الصفقة الحالية)
172
- return {
173
- "action": "HOLD",
174
- "strategy": "MAINTAIN_CURRENT",
175
- "reasoning": f"LLM re-analysis failed: {reason}. Maintaining current trade strategy."
176
- }
177
- elif strategy == "reflection":
178
- # (القرار الآمن: لا تقم بإنشاء قاعدة تعلم)
179
- return None # (سيمنع Reflector من إنشاء دلتا)
180
-
181
- elif strategy == "distillation":
182
- # (القرار ��لآمن: لا تقم بإنشاء قواعد مقطرة)
183
- return None # (سيمنع Curator من المتابعة)
184
-
185
- return None # (Fallback عام)
186
 
 
 
 
187
  async def get_trading_decision(self, candidate_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
188
- """
189
- (محدث V8.1)
190
- يستدعي النموذج الضخم لاتخاذ قرار "WATCH" استراتيجي (Explorer Brain).
191
- """
192
  symbol = candidate_data.get('symbol', 'UNKNOWN')
193
  try:
194
- # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
195
- learning_context_prompt = "Playbook: No learning context available."
196
  if self.learning_hub:
197
- learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
198
- domain="general",
199
- query=f"{symbol} strategy decision"
200
- )
201
-
202
- # 2. إنشاء الـ Prompt (باللغة الإنجليزية)
203
- prompt = self._create_trading_prompt(candidate_data, learning_context_prompt)
204
-
205
- if self.r2_service:
206
- await self.r2_service.save_llm_prompts_async(symbol, "trading_decision", prompt, candidate_data)
207
 
208
- # 3. استدعاء النموذج الضخم (LLM)
 
 
 
209
  response_text = await self._call_llm(prompt)
 
210
 
211
- # 4. تحليل الاستجابة (باستخدام المحلل الذكي)
212
- decision_json = self._parse_llm_response_enhanced(
213
- response_text,
214
- fallback_strategy="decision",
215
- symbol=symbol
216
- )
217
-
218
- return decision_json
219
 
 
220
  except Exception as e:
221
- print(f"❌ [LLMService] فشل فادح في get_trading_decision لـ {symbol}: {e}")
222
  traceback.print_exc()
223
- return self._get_fallback_response("decision", str(e)) # (إرجاع قرار آمن)
224
 
 
 
 
225
  async def re_analyze_trade_async(self, trade_data: Dict[str, Any], current_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
226
- """
227
- (محدث V19.3)
228
- يستدعي النموذج الضخم لإعادة تحليل صفقة مفتوحة (Reflector Brain).
229
- """
230
  symbol = trade_data.get('symbol', 'UNKNOWN')
231
  try:
232
- # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
233
- learning_context_prompt = "Playbook: No learning context available."
234
  if self.learning_hub:
235
- learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
236
- domain="strategy",
237
- query=f"{symbol} re-analysis {trade_data.get('strategy', 'GENERIC')}"
238
- )
239
-
240
- # 2. (V8.1) جلب أحدث الأخبار (باستخدام NewsFetcher المخصص)
241
- latest_news_text = "News data unavailable for re-analysis."
242
- latest_news_score = 0.0
243
-
244
- # (استخدام self.vader_analyzer الذي تم حقنه)
245
- if self.news_fetcher:
246
- latest_news_text = await self.news_fetcher.get_news_for_symbol(symbol)
247
- if self.vader_analyzer and latest_news_text: # (التحقق من المحلل المُمرر)
248
- vader_scores = self.vader_analyzer.polarity_scores(latest_news_text)
249
- latest_news_score = vader_scores.get('compound', 0.0)
250
-
251
- current_data['latest_news_text'] = latest_news_text
252
- current_data['latest_news_score'] = latest_news_score
253
 
254
- # 3. إنشاء الـ Prompt (باللغة الإنجليزية)
255
- prompt = await self._create_reanalysis_prompt(trade_data, current_data, learning_context_prompt)
256
 
257
- if self.r2_service:
258
- await self.r2_service.save_llm_prompts_async(symbol, "trade_reanalysis", prompt, current_data)
259
-
260
- # 4. استدعاء النموذج الضخم (LLM)
261
  response_text = await self._call_llm(prompt)
 
262
 
263
- # 5. تحليل الاستجابة (باستخدام المحلل الذكي)
264
- decision_json = self._parse_llm_response_enhanced(
265
- response_text,
266
- fallback_strategy="reanalysis",
267
- symbol=symbol
268
- )
269
-
270
- return decision_json
271
 
 
272
  except Exception as e:
273
- print(f"❌ [LLMService] فشل فادح في re_analyze_trade_async لـ {symbol}: {e}")
274
- traceback.print_exc()
275
- return self._get_fallback_response("reanalysis", str(e)) # (إرجاع قرار آمن)
276
-
277
- # --- (دوال إنشاء الـ Prompts) ---
278
- # (ملاحظة: هذه الدوال يجب أن تكون دائماً باللغة الإنجليزية)
279
 
280
- def _create_trading_prompt(self,
281
- candidate_data: Dict[str, Any],
282
- learning_context: str) -> str:
 
283
  """
284
- (معدل V19.5)
285
- إنشاء الـ Prompt (باللغة الإنجليزية) لاتخاذ قرار التداول الأولي (Explorer).
286
- (تمت إزالة الدرجات المسبقة لتقليل الانحياز)
287
  """
 
 
288
 
289
- symbol = candidate_data.get('symbol', 'N/A')
290
-
291
- # --- 1. استخراج بيانات ML (الطبقة 1) ---
292
- # (تمت إزالة l1_score و l1_reasons عمداً)
293
- pattern_data = candidate_data.get('pattern_analysis', {})
294
- mc_data = candidate_data.get('monte_carlo_distribution', {})
295
-
296
- # --- 2. استخراج بيانات المشاعر والأخبار (الطبقة 1) ---
297
- news_text = candidate_data.get('news_text', 'No news text provided.')
298
- news_score_raw = candidate_data.get('news_score_raw', 0.0)
299
- statistical_news_pnl = candidate_data.get('statistical_news_pnl', 0.0)
300
 
301
- # --- 3. استخراج بيانات الحيتان (الطبقة 1) ---
302
- whale_data = candidate_data.get('whale_data', {})
303
- whale_summary = whale_data.get('llm_friendly_summary', {})
304
- exchange_flows = whale_data.get('exchange_flows', {})
305
 
306
- whale_signal = whale_summary.get('recommended_action', 'HOLD')
307
- whale_confidence = whale_summary.get('confidence', 0.3)
308
- whale_reason = whale_summary.get('whale_activity_summary', 'No whale data.')
309
- net_flow_usd = exchange_flows.get('net_flow_usd', 0.0)
310
-
311
- # (البيانات طويلة المدى - من تحليل 24 ساعة الجديد)
312
- accumulation_data_24h = whale_data.get('accumulation_analysis_24h', {})
313
- net_flow_24h_usd = accumulation_data_24h.get('net_flow_usd', 0.0)
314
- total_inflow_24h_usd = accumulation_data_24h.get('to_exchanges_usd', 0.0)
315
- total_outflow_24h_usd = accumulation_data_24h.get('from_exchanges_usd', 0.0)
316
- relative_net_flow_24h_percent = accumulation_data_24h.get('relative_net_flow_percent', 0.0)
317
-
318
- # --- 4. استخراج بيانات السوق (الطبقة 0) ---
319
- market_context = candidate_data.get('sentiment_data', {})
320
- market_trend = market_context.get('market_trend', 'UNKNOWN')
321
- btc_sentiment = market_context.get('btc_sentiment', 'UNKNOWN')
322
 
323
- # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
324
-
325
- playbook_prompt = f"""
326
- --- START OF LEARNING PLAYBOOK ---
327
- {learning_context}
328
- --- END OF PLAYBOOK ---
329
- """
330
-
331
- # 🔴 --- START OF CHANGE (V19.5) --- 🔴
332
- # (تمت إزالة درجة l1_score و l1_reasons من هنا)
333
- technical_summary_prompt = f"""
334
- 1. **Technical Analysis:**
335
- * Chart Pattern: {pattern_data.get('pattern_detected', 'None')} (Conf: {pattern_data.get('pattern_confidence', 0):.2f})
336
- * Monte Carlo (1h): {mc_data.get('probability_of_gain', 0) * 100:.1f}% chance of profit (Expected: {mc_data.get('expected_return_pct', 0):.2f}%)
337
- """
338
- # 🔴 --- END OF CHANGE --- 🔴
339
-
340
- news_prompt = f"""
341
- 2. **News & Sentiment Analysis:**
342
- * Market Trend: {market_trend} (BTC: {btc_sentiment})
343
- * VADER (Raw): {news_score_raw:.3f}
344
- * Statistical PnL (Learned): {statistical_news_pnl:+.2f}%
345
- * News Text: {news_text[:300]}...
346
  """
347
- whale_activity_prompt = f"""
348
- 3. **Whale Activity (Real-time Flow - Optimized Window):**
349
- * Signal: {whale_signal} (Confidence: {whale_confidence:.2f})
350
- * Reason: {whale_reason}
351
- * Net Flow (to/from Exchanges): ${net_flow_usd:,.2f}
352
-
353
- 4. **Whale Activity (24h Accumulation):**
354
- * 24h Net Flow (Accumulation): ${net_flow_24h_usd:,.2f}
355
- * 24h Total Deposits: ${total_inflow_24h_usd:,.2f}
356
- * 24h Total Withdrawals: ${total_outflow_24h_usd:,.2f}
357
- * Relative 24h Net Flow (vs Daily Volume): {relative_net_flow_24h_percent:+.2f}%
358
- """
359
-
360
- # 🔴 --- START OF CHANGE (V19.5) --- 🔴
361
- # (تم تحديث التعليمات ليعكس تحليل البيانات "الخام" بدلاً من الدرجات)
362
- task_prompt = f"""
363
- CONTEXT:
364
- You are an expert AI trading analyst (Explorer Brain).
365
- Analyze the following raw technical, news, and whale data for {symbol}. You must make a decision based *only* on the data provided, without any pre-calculated scores.
366
- Decide if this combination of signals presents a high-potential opportunity to 'WATCH'.
367
- {playbook_prompt}
368
-
369
- --- START OF CANDIDATE DATA ---
370
- {technical_summary_prompt}
371
- {news_prompt}
372
- {whale_activity_prompt}
373
- --- END OF CANDIDATE DATA ---
374
-
375
- TASK:
376
- 1. **Internal Thinking (Private):** Perform a step-by-step analysis (as triggered by the system prompt).
377
- * Synthesize all data points (Chart Pattern, Monte Carlo, News, Whale Flow, 24h Accumulation).
378
- * Are the signals aligned? (e.g., Bullish Pattern + High MC Probability + Whale Accumulation = Strong).
379
- * Are there conflicts? (e.g., Bullish Pattern but high 24h Deposits = Risky).
380
- * Consult the "Playbook" for learned rules.
381
- 2. **Final Decision:** Based on your internal thinking, decide the final action.
382
- 3. **Output Constraint:** Provide your final answer ONLY in the requested JSON object format, with no introductory text, markdown formatting, or explanations.
383
-
384
- OUTPUT (JSON Object ONLY):
 
 
 
 
 
 
 
 
 
 
 
385
  {{
386
- "action": "WATCH" or "NO_DECISION",
387
- "strategy_to_watch": "STRATEGY_NAME",
388
- "confidence_level": 0.0_to_1.0,
389
- "reasoning": "Brief justification (max 40 words) synthesizing all data points.",
390
- "exit_profile": "Aggressive" or "Standard" or "Patient"
391
  }}
392
  """
393
- # 🔴 --- END OF CHANGE --- 🔴
394
-
395
- # (نرسل فقط task_prompt لأنه يحتوي الآن على كل شيء)
396
- return task_prompt
397
-
398
 
399
- async def _create_reanalysis_prompt(self,
400
- trade_data: Dict[str, Any],
401
- current_data: Dict[str, Any],
402
- learning_context: str) -> str:
403
  """
404
- (معدل V19.4)
405
- إنشاء الـ Prompt (باللغة الإنجليزية) لإعادة تحليل صفقة مفتوحة (Reflector Brain).
406
- (تم إصلاح تنسيق مونت كارلو)
407
  """
 
 
 
 
 
 
 
 
408
 
409
- symbol = trade_data.get('symbol', 'N/A')
410
-
411
- # --- 1. بيانات الصفقة الأصلية (القديمة) ---
412
- original_strategy = trade_data.get('strategy', 'N/A')
413
- original_reasoning = trade_data.get('decision_data', {}).get('reasoning', 'N/A')
414
- entry_price = trade_data.get('entry_price', 0)
415
- current_pnl = trade_data.get('pnl_percent', 0)
416
- current_sl = trade_data.get('stop_loss', 0)
417
- current_tp = trade_data.get('take_profit', 0)
418
-
419
- # --- 2. البيانات الفنية المحدثة (الحالية) ---
420
- current_price = current_data.get('current_price', 0)
421
- mc_data = current_data.get('monte_carlo_distribution', {})
422
- mc_prob = mc_data.get('probability_of_gain', 0)
423
- mc_expected_return = mc_data.get('expected_return_pct', 0)
424
 
425
- # --- 3. (V8.1) بيانات الأخبار المحدثة (الحالية) ---
426
- latest_news_text = current_data.get('latest_news_text', 'No news.')
427
- latest_news_score = current_data.get('latest_news_score', 0.0)
428
-
429
- # --- 4. (العقل) بيانات التعلم الإحصائي ---
430
- statistical_feedback = ""
431
- if self.learning_hub:
432
- statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(original_strategy)
433
-
434
- # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
435
-
436
- playbook_prompt = f"""
437
- --- START OF LEARNING PLAYBOOK ---
 
 
 
 
 
 
 
 
 
438
  {learning_context}
439
- {statistical_feedback}
440
- --- END OF PLAYBOOK ---
441
- """
442
-
443
- trade_status_prompt = f"""
444
- 1. **Open Trade Status ({symbol}):**
445
- * Current PnL: {current_pnl:+.2f}%
446
- * Original Strategy: {original_strategy}
447
- * Original Reasoning: {original_reasoning}
448
- * Entry Price: {entry_price}
449
- * Current Price: {current_price}
450
- * Current StopLoss: {current_sl}
451
- * Current TakeProfit: {current_tp}
452
- """
453
 
454
- current_analysis_prompt = f"""
455
- 2. **Current Real-time Analysis:**
456
- * Monte Carlo (1h): {mc_prob * 100:.1f}% chance of profit (Expected: {mc_expected_return:.2f}%)
457
- * Latest News (VADER: {latest_news_score:.3f}): {latest_news_text[:300]}...
458
- """
459
-
460
- # (دمج جميع التعليمات في رسالة الـ user)
461
- task_prompt = f"""
462
- CONTEXT:
463
- You are an expert AI trading analyst (Reflector Brain).
464
- An open trade for {symbol} has triggered a mandatory re-analysis. Analyze the new data and decide the next action.
465
- {playbook_prompt}
466
-
467
- --- START OF TRADE DATA ---
468
- {trade_status_prompt}
469
- {current_analysis_prompt}
470
- --- END OF TRADE DATA ---
471
-
472
- TASK:
473
- 1. **Internal Thinking (Private):** Perform a step-by-step analysis (as triggered by the system prompt).
474
- * Compare the "Open Trade Status" with the "Current Real-time Analysis".
475
- * Has the situation improved or deteriorated? (e.g., PnL is good, but new Monte Carlo is negative).
476
- * Are there new critical news?
477
- * Consult the "Playbook" for learned rules and statistical feedback.
478
- 2. **Final Decision:** Based on your internal thinking, decide the best course of action (HOLD, UPDATE_TRADE, CLOSE_TRADE).
479
- 3. **Output Constraint:** Provide your final answer ONLY in the requested JSON object format, with no introductory text, markdown formatting, or explanations.
480
-
481
- OUTPUT (JSON Object ONLY):
482
  {{
483
- "action": "HOLD" or "UPDATE_TRADE" or "CLOSE_TRADE",
484
- "strategy": "MAINTAIN_CURRENT" or "ADAPTIVE_EXIT" or "IMMEDIATE_EXIT",
485
- "reasoning": "Brief justification (max 40 words) for the decision.",
486
- "new_stop_loss": (float or null, required if action is 'UPDATE_TRADE'),
487
- "new_take_profit": (float or null, required if action is 'UPDATE_TRADE')
488
  }}
489
  """
490
-
491
- return task_prompt
 
1
+ # LLM.py (V13.4 - The "Heavyweight" Omniscient Brain)
2
+ # استعادة كامل التفاصيل في البرومبتات لضمان عدم وجود أي اختصارات.
 
 
 
3
 
4
+ import os, traceback, json, re, time
5
+ from datetime import datetime
6
+ from typing import Dict, Any, Optional
7
  from openai import AsyncOpenAI, RateLimitError, APIError
8
 
9
+ # ==============================================================================
10
+ # 🔌 إعدادات الاتصال (مطابقة للأصل تماماً)
11
+ # ==============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  LLM_API_URL = os.getenv("LLM_API_URL", "https://integrate.api.nvidia.com/v1")
13
+ LLM_API_KEY = os.getenv("LLM_API_KEY")
14
  LLM_MODEL = os.getenv("LLM_MODEL", "nvidia/llama-3.1-nemotron-ultra-253b-v1")
15
 
 
16
  LLM_TEMPERATURE = 0.2
17
  LLM_TOP_P = 0.7
18
  LLM_MAX_TOKENS = 16384
19
  LLM_FREQUENCY_PENALTY = 0.8
20
  LLM_PRESENCE_PENALTY = 0.5
 
 
21
  CLIENT_TIMEOUT = 300.0
22
 
23
  class LLMService:
24
  def __init__(self):
25
  if not LLM_API_KEY:
26
+ raise ValueError("❌ [LLM] LLM_API_KEY is missing!")
27
 
28
+ self.client = AsyncOpenAI(
29
+ base_url=LLM_API_URL,
30
+ api_key=LLM_API_KEY,
31
+ timeout=CLIENT_TIMEOUT
32
+ )
33
+ self.r2_service = None
34
+ self.learning_hub = None
 
 
 
 
 
 
 
 
 
35
 
36
+ print(f"🧠 [LLM V13.4] Heavyweight Brain Initialized: {LLM_MODEL}")
 
 
 
 
37
 
38
  async def _call_llm(self, prompt: str) -> Optional[str]:
39
+ """إرسال الطلب مع تفعيل وضع التفكير العميق بدقة"""
40
+ # ⚠️ هام: هذا الإعداد دقيق جداً لتفعيل قدرات Nemotron الخاصة
 
 
 
41
  system_prompt = "detailed thinking on"
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  try:
44
+ response = await self.client.chat.completions.create(
45
+ model=LLM_MODEL,
46
+ messages=[
47
+ {"role": "system", "content": system_prompt},
48
+ {"role": "user", "content": prompt}
49
+ ],
50
+ temperature=LLM_TEMPERATURE,
51
+ top_p=LLM_TOP_P,
52
+ max_tokens=LLM_MAX_TOKENS,
53
+ frequency_penalty=LLM_FREQUENCY_PENALTY,
54
+ presence_penalty=LLM_PRESENCE_PENALTY,
55
+ stream=False,
56
+ response_format={"type": "json_object"}
57
+ )
58
+ return response.choices[0].message.content
 
59
  except Exception as e:
60
+ print(f"❌ [LLM Call Error] {e}")
61
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ def _parse_json_secure(self, text: str) -> Optional[Dict]:
64
+ """محلل JSON قوي يستخرج البيانات من أي نص"""
65
  try:
66
+ match = re.search(r'\{.*\}', text, re.DOTALL)
67
+ if match: return json.loads(match.group(0))
68
+ except: pass
69
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ # ==================================================================
72
+ # 🧠 الوظيفة 1: قرار الدخول الاستراتيجي (تحليل شامل ومفصل)
73
+ # ==================================================================
74
  async def get_trading_decision(self, candidate_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
 
 
 
 
75
  symbol = candidate_data.get('symbol', 'UNKNOWN')
76
  try:
77
+ # جلب سياق التعلم السابق إن وجد
78
+ learning_context = "Playbook: No specific prior learning for this scenario."
79
  if self.learning_hub:
80
+ learning_context = await self.learning_hub.get_active_context_for_llm("general", f"{symbol} entry analysis")
 
 
 
 
 
 
 
 
 
81
 
82
+ # بناء البرومبت المفصل جداً
83
+ prompt = self._create_heavyweight_entry_prompt(candidate_data, learning_context)
84
+
85
+ # استدعاء النموذج
86
  response_text = await self._call_llm(prompt)
87
+ decision = self._parse_json_secure(response_text)
88
 
89
+ # حفظ نسخة طبق الأصل من الطلب والرد للتدقيق
90
+ if self.r2_service and response_text:
91
+ await self.r2_service.save_llm_prompt_async(symbol, "entry_decision_full", prompt, response_text)
 
 
 
 
 
92
 
93
+ return decision
94
  except Exception as e:
95
+ print(f"❌ [LLM Entry Error] {symbol}: {e}")
96
  traceback.print_exc()
97
+ return None
98
 
99
+ # ==================================================================
100
+ # 🔄 الوظيفة 2: إعادة التحليل الدوري (مراجعة شاملة للوضع)
101
+ # ==================================================================
102
  async def re_analyze_trade_async(self, trade_data: Dict[str, Any], current_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
 
 
 
 
103
  symbol = trade_data.get('symbol', 'UNKNOWN')
104
  try:
105
+ strategy = trade_data.get('entry_reason', 'GENERIC')
106
+ learning_context = "Playbook: Maintain original strategy unless validated invalidation occurs."
107
  if self.learning_hub:
108
+ learning_context = await self.learning_hub.get_active_context_for_llm("strategy", f"{symbol} re-eval {strategy}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ prompt = self._create_heavyweight_reanalysis_prompt(trade_data, current_data, learning_context)
 
111
 
 
 
 
 
112
  response_text = await self._call_llm(prompt)
113
+ decision = self._parse_json_secure(response_text)
114
 
115
+ if self.r2_service and response_text:
116
+ await self.r2_service.save_llm_prompt_async(symbol, "re_analysis_full", prompt, response_text)
 
 
 
 
 
 
117
 
118
+ return decision
119
  except Exception as e:
120
+ print(f"❌ [LLM Re-Eval Error] {symbol}: {e}")
121
+ return None
 
 
 
 
122
 
123
+ # ==================================================================
124
+ # 📝 قسم هندسة البرومبتات (تفاصيل كاملة بدون اختصارات)
125
+ # ==================================================================
126
+ def _create_heavyweight_entry_prompt(self, data: Dict[str, Any], learning_context: str) -> str:
127
  """
128
+ إنشاء برومبت ضخم يحتوي على كل شاردة وواردة من البيانات المتاحة.
 
 
129
  """
130
+ symbol = data.get('symbol')
131
+ current_price = data.get('current_price')
132
 
133
+ # 1. تفاصيل الطبقات السابقة (التحليل الفني والكمي)
134
+ titan_score = data.get('titan_details', {}).get('score', 0)
135
+ titan_trend = "STRONG_UP" if titan_score > 0.7 else "UP" if titan_score > 0.5 else "WEAK"
 
 
 
 
 
 
 
 
136
 
137
+ pat_details = data.get('pattern_details', {})
138
+ pat_name = pat_details.get('pattern_detected', 'None')
139
+ pat_conf = pat_details.get('pattern_confidence', 0)
 
140
 
141
+ mc_score = data.get('components', {}).get('mc_score', 0)
142
+ l1_total = data.get('enhanced_final_score', 0)
143
+ l2_total = data.get('layer2_score', 0)
144
+
145
+ # 2. تفاصيل بيانات الحيتان (كاملة)
146
+ whale = data.get('whale_data', {})
147
+ whale_1h = whale.get('exchange_flows', {})
148
+ whale_24h = whale.get('accumulation_analysis_24h', {})
 
 
 
 
 
 
 
 
149
 
150
+ whale_section = f"""
151
+ - 1H Net Flow to Exchanges: ${whale_1h.get('net_flow_usd', 0):,.2f}
152
+ - 1H Deposits: {whale_1h.get('deposit_count', 0)} | Withdrawals: {whale_1h.get('withdrawal_count', 0)}
153
+ - 24H Accumulation Flow: ${whale_24h.get('net_flow_usd', 0):,.2f}
154
+ - 24H Whale Transaction Count: {whale_24h.get('whale_transfers_count', 0)}
155
+ - Relative Flow Impact (24H): {whale_24h.get('relative_net_flow_percent', 0):.4f}%
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  """
157
+
158
+ # 3. تفاصيل الأخبار (النص الخام الكامل)
159
+ news_text = data.get('news_text', 'No specific news available for this asset currently.')
160
+
161
+ # 4. لقطة السوق (Price Action Snapshot)
162
+ ohlcv = data.get('ohlcv_sample', {})
163
+ price_section = ""
164
+ for tf, candle in ohlcv.items():
165
+ if candle:
166
+ # [Timestamp, Open, High, Low, Close, Volume]
167
+ price_section += f" - {tf.upper()}: Open={candle[1]}, High={candle[2]}, Low={candle[3]}, Close={candle[4]}, Vol={candle[5]}\n"
168
+
169
+ return f"""
170
+ YOU ARE THE OMNISCIENT BRAIN. A skeptical, master-level crypto trading AI.
171
+ Your goal is to validate the findings of your sub-systems and make the FINAL GO/NO-GO decision for {symbol}.
172
+ Current Price: {current_price}
173
+
174
+ ========== 🧠 PART 1: SUB-SYSTEM REPORTS (PRELIMINARY ANALYSIS) ==========
175
+ Your subordinate systems have flagged this asset with the following scores:
176
+ * Layer 1 Technical Score: {l1_score:.4f} / 1.0
177
+ - Titan ML Trend Model: {titan_score:.4f} ({titan_trend})
178
+ - Chart Pattern Recognition: {pat_name} (Confidence: {pat_conf:.2f})
179
+ - Monte Carlo Probability (1H): {mc_score:.4f}
180
+ * Layer 2 Enhanced Score: {l2_total:.4f} / 1.0 (After initial whale/news weighting)
181
+
182
+ ========== 🔍 PART 2: RAW EVIDENCE FOR VERIFICATION (THE TRUTH) ==========
183
+ Do not trust the scores above blindly. Verify them against this raw data:
184
+
185
+ [A] RAW PRICE ACTION SNAPSHOT (OHLCV Last Closed Candles):
186
+ {price_section}
187
+ -> TASK: Does this price action confirm the 'Titan Trend' reported above?
188
+
189
+ [B] RAW WHALE ON-CHAIN ACTIVITY:
190
+ {whale_section}
191
+ -> TASK: Is there hidden distribution (selling) despite the technical uptrend?
192
+
193
+ [C] RAW NEWSWIRE FEED (Latest Headlines & Summaries):
194
+ \"\"\"{news_text}\"\"\"
195
+ -> TASK: Are there any immediate red flags, FUD, or regulatory risks in this text?
196
+
197
+ ========== 📖 PART 3: INSTITUTIONAL MEMORY (LEARNING PLAYBOOK) ==========
198
+ {learning_context}
199
+
200
+ ========== 🛑 FINAL DECISION TASK ==========
201
+ Perform a deep, step-by-step internal analysis (triggered by your system mode).
202
+ Compare PART 1 (Opinions) vs PART 2 (Facts).
203
+ If FACTS contradict OPINIONS, you MUST reject the trade.
204
+
205
+ REQUIRED OUTPUT (Strict JSON format ONLY):
206
  {{
207
+ "action": "WATCH" or "IGNORE",
208
+ "confidence_level": 0.00 to 1.00,
209
+ "reasoning": "A rigorous, professional justification citing specific raw evidence (e.g., 'Whale 1H inflows of $5M contradict Titan trend').",
210
+ "strategy_directive": "MOMENTUM_BREAKOUT" or "DIP_ACCUMULATION" or "SCALP_REVERSAL",
211
+ "key_risk_factor": "Identify the single biggest risk based on raw evidence."
212
  }}
213
  """
 
 
 
 
 
214
 
215
+ def _create_heavyweight_reanalysis_prompt(self, trade: Dict, current: Dict, learning_context: str) -> str:
 
 
 
216
  """
217
+ إنشاء برومبت مفصل لإعادة تقييم صفقة مفتوحة بناءً على تغير الظروف.
 
 
218
  """
219
+ symbol = trade.get('symbol')
220
+ entry_price = trade.get('entry_price')
221
+ current_price = current.get('current_price')
222
+ pnl_pct = ((current_price - entry_price) / entry_price) * 100
223
+ duration_min = (datetime.now() - datetime.fromisoformat(trade.get('entry_time').replace('Z', ''))).total_seconds() / 60
224
+
225
+ # البيانات الحالية المقارنة
226
+ titan_now = current.get('titan_score', 0)
227
 
228
+ whale_now = current.get('whale_data', {})
229
+ whale_1h_net = whale_now.get('exchange_flows', {}).get('net_flow_usd', 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
+ news_now = current.get('news_text', 'No new significant news.')
232
+
233
+ return f"""
234
+ ROLE: Omniscient Brain (Trade Guardian Mode).
235
+ EVENT: Mandatory periodic re-evaluation of OPEN POSITION.
236
+ ASSET: {symbol}
237
+ TIME IN TRADE: {duration_min:.1f} minutes
238
+
239
+ ========== 📉 POSITION STATUS ==========
240
+ * Entry Price: {entry_price}
241
+ * Current Price: {current_price}
242
+ * Unrealized PnL: {pnl_pct:+.2f}%
243
+ * Original Entry Reason: "{trade.get('entry_reason')}"
244
+
245
+ ========== 🆕 CHANGED MARKET CONDITIONS (RAW DATA) ==========
246
+ 1. ML Trend Update (Titan): Currently {titan_now:.4f}
247
+ 2. Fresh Whale Activity (Last 1H): Net Flow ${whale_1h_net:,.0f}
248
+ (Positive = potential selling pressure, Negative = accumulation)
249
+ 3. Latest News Update:
250
+ \"\"\"{news_now[:1000]}\"\"\"
251
+
252
+ ========== 📖 PLAYBOOK GUIDELINES ==========
253
  {learning_context}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
+ ========== 🛡️ GUARDIAN DECISION ==========
256
+ Analyze if the original investment thesis is still valid based on the NEW raw data.
257
+ Output Strict JSON ONLY:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  {{
259
+ "action": "HOLD" or "EMERGENCY_EXIT" or "UPDATE_TARGETS",
260
+ "suggested_new_tp": null or float value,
261
+ "suggested_new_sl": null or float value,
262
+ "reasoning": "Professional assessment of current risk vs original thesis."
 
263
  }}
264
  """
265
+
266
+ print("✅ LLM Service V13.4 (Heavyweight Omniscient Brain) Loaded - NO SHORTCUTS")