#!/usr/bin/env python3
"""
LLM Client - 大模型调用客户端
支持多种 LLM Provider (OpenAI, Claude, 通义千问等)
"""

import os
import json
import logging
from typing import Dict, Any, Optional
import requests
from datetime import datetime

logger = logging.getLogger(__name__)

# LLM 配置
LLM_PROVIDER = os.getenv('LLM_PROVIDER', 'openai')  # openai, claude, qianwen, deepseek, ollama
LLM_API_KEY = os.getenv('LLM_API_KEY', '')
LLM_API_URL = os.getenv('LLM_API_URL', '')
LLM_MODEL = os.getenv('LLM_MODEL', 'gpt-4o-mini')
LLM_TIMEOUT = int(os.getenv('LLM_TIMEOUT', '30'))
LLM_MAX_RETRIES = int(os.getenv('LLM_MAX_RETRIES', '2'))


class LLMClient:
    """大模型客户端"""
    
    def __init__(self):
        self.provider = LLM_PROVIDER
        self.api_key = LLM_API_KEY
        self.api_url = LLM_API_URL or self._get_default_url()
        self.model = LLM_MODEL
        self.timeout = LLM_TIMEOUT
        self.max_retries = LLM_MAX_RETRIES
        
        if not self.api_key:
            logger.warning("⚠️  LLM_API_KEY 未配置，将使用规则引擎")
    
    def _get_default_url(self) -> str:
        """获取默认 API URL"""
        defaults = {
            'openai': 'https://api.openai.com/v1/chat/completions',
            'claude': 'https://api.anthropic.com/v1/messages',
            'qianwen': 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation',
            'deepseek': 'https://api.deepseek.com/v1/chat/completions',
            'ollama': 'http://localhost:11434/v1/chat/completions',
        }
        return defaults.get(self.provider, '')
    
    def is_available(self) -> bool:
        """检查 LLM 是否可用"""
        # 本地 Ollama 不需要真实 API key
        if self.provider == 'ollama':
            return bool(self.api_url)
        return bool(self.api_key and self.api_url)
    
    def analyze_event(self, event_data: Dict[str, Any], prompt_template: str) -> Optional[Dict[str, Any]]:
        """
        使用 LLM 分析事件
        
        Args:
            event_data: 事件数据
            prompt_template: Prompt 模板
            
        Returns:
            分析结果，包含 behavior_type 和 behavior_description
        """
        if not self.is_available():
            return None
        
        try:
            # 构建完整 Prompt
            prompt = self._build_prompt(event_data, prompt_template)
            
            # 调用 LLM
            response = self._call_llm(prompt)
            
            # 解析结果
            return self._parse_response(response)
            
        except Exception as e:
            logger.error(f"❌ LLM 分析失败: {e}")
            return None
    
    def _build_prompt(self, event_data: Dict[str, Any], template: str) -> str:
        """构建 Prompt（与 flink-agents-processor 保持一致）"""
        # 提取关键信息
        event_type = event_data.get('eventType', 'unknown')
        timestamp = event_data.get('timestamp', 0)
        url = event_data.get('url', '')
        title = event_data.get('title', event_data.get('tabTitle', ''))
        context_id = event_data.get('contextId', '')
        
        # Layer 1: Semantic Tree
        global_structure = event_data.get('globalStructure', {})
        semantic_tree = global_structure.get('semanticTree', '')
        viewport = global_structure.get('viewport', {})
        viewport_str = f"{viewport.get('width', 0)}x{viewport.get('height', 0)}"
        
        # Layer 2: Target
        interaction_context = event_data.get('interactionContext', {})
        target_node_id = interaction_context.get('targetNodeId', '')
        selector = interaction_context.get('selector', '')
        bounding_rect = interaction_context.get('boundingRect', {})
        position = f"({bounding_rect.get('x', 0)}, {bounding_rect.get('y', 0)})"
        size = f"{bounding_rect.get('width', 0)}x{bounding_rect.get('height', 0)}"
        
        # Layer 3: Mutations
        mutation_context = event_data.get('mutationContext', {})
        mutations = mutation_context.get('mutations', [])
        mutations_count = len(mutations)
        
        mutations_str = ""
        if mutations:
            for mut in mutations[:5]:  # 最多显示5个变更
                mutations_str += f"- {mut.get('type', 'unknown')}: {mut.get('nodeId', '')} ({mut.get('relationToTarget', 'unknown')})\n"
        
        # Event Data
        event_data_json = json.dumps(event_data.get('eventData', {}), indent=2, ensure_ascii=False)
        
        # 填充模板
        prompt = template.format(
            action=event_type,
            time=timestamp,
            url=url,
            title=title,
            context_id=context_id,
            viewport=viewport_str,
            semantic_tree=semantic_tree[:2000] if semantic_tree else "(empty)",  # 限制长度
            target_node_id=target_node_id,
            selector=selector,
            position=position,
            size=size,
            mutations_count=mutations_count,
            mutations=mutations_str if mutations_str else "(no mutations)",
            event_data=event_data_json
        )
        
        return prompt
    
    def _call_llm(self, prompt: str) -> str:
        """调用 LLM API"""
        for attempt in range(self.max_retries):
            try:
                if self.provider in ['openai', 'deepseek', 'ollama']:
                    return self._call_openai_compatible(prompt)
                elif self.provider == 'claude':
                    return self._call_claude(prompt)
                elif self.provider == 'qianwen':
                    return self._call_qianwen(prompt)
                else:
                    raise ValueError(f"Unsupported provider: {self.provider}")

            except Exception as e:
                if attempt < self.max_retries - 1:
                    logger.warning(f"⚠️  LLM 调用失败 (尝试 {attempt + 1}/{self.max_retries}): {e}")
                    continue
                else:
                    raise
    
    def _call_openai_compatible(self, prompt: str) -> str:
        """调用 OpenAI 兼容 API (OpenAI, DeepSeek, Ollama)"""
        headers = {
            'Content-Type': 'application/json'
        }

        # 本地 Ollama 可以不需要 Authorization header
        if self.api_key and self.provider != 'ollama':
            headers['Authorization'] = f'Bearer {self.api_key}'
        
        payload = {
            'model': self.model,
            'messages': [
                {'role': 'user', 'content': prompt}
            ],
            'temperature': 0.3,
            'max_tokens': 500
        }
        
        response = requests.post(
            self.api_url,
            headers=headers,
            json=payload,
            timeout=self.timeout
        )
        response.raise_for_status()
        
        result = response.json()
        return result['choices'][0]['message']['content']
    
    def _call_claude(self, prompt: str) -> str:
        """调用 Claude API"""
        headers = {
            'x-api-key': self.api_key,
            'anthropic-version': '2023-06-01',
            'Content-Type': 'application/json'
        }
        
        payload = {
            'model': self.model,
            'messages': [
                {'role': 'user', 'content': prompt}
            ],
            'max_tokens': 500,
            'temperature': 0.3
        }
        
        response = requests.post(
            self.api_url,
            headers=headers,
            json=payload,
            timeout=self.timeout
        )
        response.raise_for_status()
        
        result = response.json()
        return result['content'][0]['text']
    
    def _call_qianwen(self, prompt: str) -> str:
        """调用通义千问 API"""
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        
        payload = {
            'model': self.model,
            'input': {
                'messages': [
                    {'role': 'user', 'content': prompt}
                ]
            },
            'parameters': {
                'temperature': 0.3,
                'max_tokens': 500
            }
        }
        
        response = requests.post(
            self.api_url,
            headers=headers,
            json=payload,
            timeout=self.timeout
        )
        response.raise_for_status()
        
        result = response.json()
        return result['output']['text']
    
    def _parse_response(self, response: str) -> Dict[str, Any]:
        """解析 LLM 响应（JSON 格式）"""
        try:
            # 尝试从响应中提取 JSON（可能包含 markdown 代码块）
            response = response.strip()
            
            # 去除 markdown 代码块标记
            if '```json' in response:
                response = response.split('```json')[1].split('```')[0].strip()
            elif '```' in response:
                response = response.split('```')[1].split('```')[0].strip()
            
            # 解析 JSON
            result = json.loads(response)
            
            # 验证必要字段
            if 'behavior_type' not in result or 'behavior_description' not in result:
                raise ValueError("Missing required fields in LLM response")
            
            # 添加分析方法标识
            result['analysis_method'] = f'llm_{self.provider}'
            
            # 确保 confidence 字段存在
            if 'confidence' not in result:
                result['confidence'] = 0.9
            
            return result
            
        except (json.JSONDecodeError, ValueError, KeyError) as e:
            logger.warning(f"⚠️  Failed to parse LLM JSON response, using fallback: {e}")
            
            # 降级处理：使用纯文本作为描述
            description = response.strip()
            
            # 推断行为类型
            behavior_type = 'interaction'  # 默认
            if any(kw in description.lower() for kw in ['star', 'follow', 'like', '点赞', '关注', '收藏']):
                behavior_type = 'social_action'
            elif any(kw in description.lower() for kw in ['submit', 'post', 'create', '提交', '创建', '发布']):
                behavior_type = 'content_creation'
            elif any(kw in description.lower() for kw in ['search', 'query', '搜索', '查询']):
                behavior_type = 'search'
            elif any(kw in description.lower() for kw in ['scroll', 'browse', '滚动', '浏览']):
                behavior_type = 'navigation'
            
            return {
                'behavior_type': behavior_type,
                'behavior_description': description,
                'confidence': 0.95,
                'analysis_method': f'llm_{self.provider}'
            }

