#!/usr/bin/env python3
"""
LLM Handler - 处理大模型调用逻辑
"""
import os
import json
import logging
import requests
import re
from typing import Dict, Any, Optional

logger = logging.getLogger(__name__)


def load_prompt_template() -> str:
    """加载 Prompt 模板"""
    try:
        # 优先使用 prompt_template_v2.txt（合伙人模板 + JSON 格式）
        template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'prompt_template_v2.txt')
        if not os.path.exists(template_path):
            # 降级到原始版本
            template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'prompt_template.txt')
        
        with open(template_path, 'r', encoding='utf-8') as f:
            return f.read()
    except Exception as e:
        logger.error(f"❌ 加载 Prompt 模板失败: {e}")
        return "你是一个用户行为分析专家。根据浏览器事件数据，分析用户的真实意图。"


def call_llm_api(event_data: Dict[str, Any], prompt_template: str) -> Optional[Dict[str, Any]]:
    """
    调用 LLM API 分析用户行为
    
    Args:
        event_data: 事件数据
        prompt_template: Prompt 模板
        
    Returns:
        分析结果字典，包含 behavior_type, behavior_description, confidence
        失败返回 None
    """
    try:
        # 提取字段
        event_type = event_data.get('eventType', 'unknown')
        timestamp = event_data.get('timestamp', 0)
        url = event_data.get('url', '')
        title = event_data.get('title', event_data.get('tabTitle', ''))
        context_id = event_data.get('contextId', '')
        
        # 全局结构
        global_structure = event_data.get('globalStructure', {})
        semantic_tree = global_structure.get('semanticTree', '')
        viewport = global_structure.get('viewport', {})
        viewport_str = f"{viewport.get('width', 0)}x{viewport.get('height', 0)}"
        
        # 交互上下文
        interaction_context = event_data.get('interactionContext', {})
        target_node_id = interaction_context.get('targetNodeId', '')
        selector = interaction_context.get('selector', '')
        bounding_rect = interaction_context.get('boundingRect', {})
        position = f"({bounding_rect.get('x', 0)}, {bounding_rect.get('y', 0)})"
        size = f"{bounding_rect.get('width', 0)}x{bounding_rect.get('height', 0)}"
        
        # 变更上下文
        mutation_context = event_data.get('mutationContext', {})
        mutations = mutation_context.get('mutations', [])
        mutations_count = len(mutations)
        mutations_str = ""
        if mutations:
            for mut in mutations[:5]:  # 最多显示5个变更
                mutations_str += f"- {mut.get('type', 'unknown')}: {mut.get('nodeId', '')} ({mut.get('relationToTarget', 'unknown')})\n"
        
        # 事件数据
        event_data_json = json.dumps(event_data.get('eventData', {}), indent=2, ensure_ascii=False)
        
        # 填充模板（模板中已包含输出格式要求）
        user_prompt = prompt_template.format(
            action=event_type,
            time=timestamp,
            url=url,
            title=title,
            context_id=context_id,
            viewport=viewport_str,
            semantic_tree=semantic_tree[:2000] if semantic_tree else "(empty)",  # 限制长度
            target_node_id=target_node_id,
            selector=selector,
            position=position,
            size=size,
            mutations_count=mutations_count,
            mutations=mutations_str if mutations_str else "(no mutations)",
            event_data=event_data_json
        )
        
        # 读取 LLM 配置
        llm_provider = os.getenv('LLM_PROVIDER', 'deepseek')
        llm_url = os.getenv('LLM_API_URL', 'https://api.deepseek.com/v1/chat/completions')
        llm_model = os.getenv('LLM_MODEL', 'deepseek-chat')
        llm_api_key = os.getenv('LLM_API_KEY', '')
        
        if not llm_api_key:
            logger.warning("⚠️  LLM_API_KEY 未配置")
            return None
        
        # 调用 API
        response = requests.post(
            llm_url,
            headers={
                'Authorization': f'Bearer {llm_api_key}',
                'Content-Type': 'application/json'
            },
            json={
                'model': llm_model,
                'messages': [
                    {'role': 'system', 'content': prompt_template},
                    {'role': 'user', 'content': user_prompt}
                ],
                'temperature': 0.7
            },
            timeout=10
        )
        
        if response.status_code == 200:
            content = response.json()['choices'][0]['message']['content']
            
            # 提取 JSON
            try:
                json_match = re.search(r'\{[\s\S]*\}', content)
                if json_match:
                    result = json.loads(json_match.group())
                    return result
            except Exception as parse_error:
                logger.warning(f"⚠️  LLM 返回非 JSON: {content[:100]}")
                return None
        else:
            logger.error(f"❌ LLM API 调用失败: {response.status_code}")
            return None
    
    except Exception as e:
        logger.error(f"❌ LLM 调用异常: {e}")
        return None

