"""
Ollama本地LLM服务
提供基于Ollama的本地模型调用功能
"""

import httpx
import json
import asyncio
from typing import Dict, List, Any, Optional, AsyncGenerator
from datetime import datetime
import logging
import sys
import os

# 添加项目根目录到路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))

try:
    from config.ollama_config import ollama_config
except ImportError:
    # 如果配置文件不存在，使用默认配置
    class DefaultOllamaConfig:
        BASE_URL = "http://localhost:11434"
        DEFAULT_MODEL = "qwen2.5:0.5b"
        TIMEOUT = 60
        DEFAULT_TEMPERATURE = 0.7
        DEFAULT_MAX_TOKENS = 2000
        JH_SYSTEM_PROMPT = "你是一个专业的AI求职助手。"
        
        def get_jh_config(self):
            return {"system_prompt": self.JH_SYSTEM_PROMPT}
    
    ollama_config = DefaultOllamaConfig()

logger = logging.getLogger(__name__)

class OllamaService:
    """Ollama本地LLM服务类"""
    
    def __init__(
        self, 
        base_url: str = None,
        model_name: str = None,
        timeout: int = None
    ):
        # 使用配置文件的默认值
        self.base_url = (base_url or ollama_config.BASE_URL).rstrip('/')
        self.model_name = model_name or ollama_config.DEFAULT_MODEL
        self.timeout = timeout or ollama_config.TIMEOUT
        self.client = httpx.AsyncClient(timeout=self.timeout)
        
    async def __aenter__(self):
        return self
        
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        await self.client.aclose()
        
    async def is_available(self) -> bool:
        """检查Ollama服务是否可用"""
        try:
            response = await self.client.get(f"{self.base_url}/api/tags")
            return response.status_code == 200
        except Exception as e:
            logger.error(f"Ollama服务连接失败: {e}")
            return False
            
    async def list_models(self) -> List[Dict[str, Any]]:
        """获取可用模型列表"""
        try:
            response = await self.client.get(f"{self.base_url}/api/tags")
            response.raise_for_status()
            data = response.json()
            return data.get("models", [])
        except Exception as e:
            logger.error(f"获取模型列表失败: {e}")
            return []
            
    async def check_model_exists(self, model_name: str = None) -> bool:
        """检查指定模型是否存在"""
        model = model_name or self.model_name
        models = await self.list_models()
        model_names = [m.get("name", "") for m in models]
        return model in model_names
        
    async def chat_completion(
        self,
        messages: List[Dict[str, str]],
        stream: bool = False,
        temperature: float = 0.7,
        max_tokens: int = 2000,
        system_prompt: str = None
    ) -> Dict[str, Any]:
        """聊天补全接口"""
        try:
            # 构建消息数组 - 使用/api/chat端点
            formatted_messages = []
            
            # 添加系统提示词
            if system_prompt:
                formatted_messages.append({
                    "role": "system",
                    "content": system_prompt
                })
            
            # 添加历史消息
            for msg in messages:
                formatted_messages.append({
                    "role": msg.get("role", "user"),
                    "content": msg.get("content", "")
                })
            
            # 准备请求数据 - 使用/api/chat端点
            payload = {
                "model": self.model_name,
                "messages": formatted_messages,
                "stream": stream,
                "options": {
                    "temperature": temperature,
                    "num_predict": max_tokens
                }
            }
            
            # 发送请求
            response = await self.client.post(
                f"{self.base_url}/api/chat",
                json=payload,
                headers={"Content-Type": "application/json"}
            )
            response.raise_for_status()
            
            if stream:
                return {"response": response}
            else:
                data = response.json()
                # /api/chat端点返回message结构
                return {
                    "message": data.get("message", {"content": ""}),
                    "done": data.get("done", True),
                    "total_duration": data.get("total_duration", 0),
                    "load_duration": data.get("load_duration", 0),
                    "prompt_eval_count": data.get("prompt_eval_count", 0),
                    "eval_count": data.get("eval_count", 0)
                }
                
        except Exception as e:
            logger.error(f"聊天补全请求失败: {e}")
            raise
            
    async def generate_stream(
        self,
        messages: List[Dict[str, str]],
        system_prompt: str = None,
        temperature: float = 0.7,
        max_tokens: int = 2000
    ) -> AsyncGenerator[str, None]:
        """流式生成回复"""
        try:
            # 构建消息数组 - 使用/api/chat端点
            formatted_messages = []
            
            if system_prompt:
                formatted_messages.append({
                    "role": "system",
                    "content": system_prompt
                })
            
            for msg in messages:
                formatted_messages.append({
                    "role": msg.get("role", "user"),
                    "content": msg.get("content", "")
                })
            
            payload = {
                "model": self.model_name,
                "messages": formatted_messages,
                "stream": True,
                "options": {
                    "temperature": temperature,
                    "num_predict": max_tokens
                }
            }
            
            async with self.client.stream(
                "POST",
                f"{self.base_url}/api/chat",
                json=payload,
                headers={"Content-Type": "application/json"}
            ) as response:
                response.raise_for_status()
                
                async for line in response.aiter_lines():
                    if line.strip():
                        try:
                            data = json.loads(line)
                            # /api/chat 端点使用 "message.content" 字段
                            if "message" in data and "content" in data["message"]:
                                content = data["message"]["content"]
                                if content:
                                    yield content
                            if data.get("done", False):
                                break
                        except json.JSONDecodeError:
                            continue
                            
        except Exception as e:
            logger.error(f"流式生成失败: {e}")
            raise

class JHChatService:
    """JH子系统专用的AI聊天服务"""
    
    def __init__(self, ollama_service: OllamaService):
        self.ollama = ollama_service
        # 按图表ID分别存储对话历史
        self.chart_conversations = {}  # {chart_id: [conversation_history]}
        self.current_chart_id = None  # 当前活跃的图表ID
        
    def _get_system_prompt(self) -> str:
        """获取JH子系统专用的系统提示词"""
        from config.ollama_config import get_jh_config
        jh_config = get_jh_config()
        return jh_config.get("system_prompt", "你是一个专业的AI求职助手。")

    async def chat(
        self,
        user_message: str,
        context_data: Dict[str, Any] = None,
        stream: bool = False,
        chart_id: str = None
    ) -> Dict[str, Any]:
        """与AI助手对话"""
        try:
            # 设置当前图表ID
            if chart_id:
                self.current_chart_id = chart_id
                
            # 获取当前图表的对话历史
            conversation_history = self._get_chart_conversation_history(self.current_chart_id)
            
            # 添加用户消息到历史记录
            conversation_history.append({
                "role": "user",
                "content": user_message
            })
            
            # 如果有上下文数据，将其整合到消息中
            enhanced_message = self._enhance_message_with_context(user_message, context_data)
            
            # 准备消息列表（保留对话历史的最后10轮）
            messages = conversation_history[-20:]  # 保留最近10轮对话（每轮包含用户和助手消息）
            if enhanced_message != user_message:
                messages[-1]["content"] = enhanced_message
            
            # 调用Ollama API
            if stream:
                return await self._handle_stream_response(messages)
            else:
                response = await self.ollama.chat_completion(
                    messages=messages,
                    system_prompt=self._get_system_prompt(),
                    temperature=0.7,
                    max_tokens=2000
                )
                
                # 提取AI回复内容
                ai_message = response.get("message", {}).get("content", "抱歉，我暂时无法回答您的问题。请稍后再试。")
                
                # 添加AI回复到历史记录
                conversation_history.append({
                    "role": "assistant",
                    "content": ai_message
                })
                
                return {
                    "response": ai_message,
                    "timestamp": datetime.now().isoformat(),
                    "token_usage": {
                        "prompt_tokens": response.get("prompt_eval_count", 0),
                        "completion_tokens": response.get("eval_count", 0),
                        "total_duration": response.get("total_duration", 0)
                    }
                }
                
        except Exception as e:
            logger.error(f"聊天服务错误: {e}")
            return {
                "response": "抱歉，服务暂时不可用，请稍后再试。",
                "error": str(e),
                "timestamp": datetime.now().isoformat()
            }
    
    async def _handle_stream_response(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
        """处理流式响应"""
        full_response = ""
        async for chunk in self.ollama.generate_stream(
            messages=messages,
            system_prompt=self._get_system_prompt()
        ):
            full_response += chunk
            
        # 添加完整回复到历史记录
        self.conversation_history.append({
            "role": "assistant", 
            "content": full_response
        })
        
        return {
            "response": full_response,
            "timestamp": datetime.now().isoformat(),
            "stream": True
        }
    
    def _enhance_message_with_context(self, message: str, context_data: Dict[str, Any] = None) -> str:
        """根据上下文数据增强用户消息"""
        if not context_data:
            return message
            
        context_parts = []
        
        # 添加图表数据上下文
        if "chart_data" in context_data and "chart_type" in context_data:
            chart_info = []
            chart_info.append(f"当前图表类型：{context_data.get('chart_type', '未知')}")
            
            if "dimension" in context_data:
                chart_info.append(f"数据维度：{context_data['dimension']}")
                
            if "chart_title" in context_data:
                chart_info.append(f"图表标题：{context_data['chart_title']}")
            
            # 添加具体数据信息
            chart_data = context_data["chart_data"]
            if chart_data.get("labels") and chart_data.get("datasets"):
                labels = chart_data["labels"]
                datasets = chart_data["datasets"]
                
                if len(labels) > 0 and len(datasets) > 0:
                    main_dataset = datasets[0]
                    data_values = main_dataset.get("data", [])
                    
                    chart_info.append(f"数据分类：{', '.join(map(str, labels[:5]))}{'...' if len(labels) > 5 else ''}")
                    chart_info.append(f"数据值：{', '.join(map(str, data_values[:5]))}{'...' if len(data_values) > 5 else ''}")
            
            # 添加数据统计摘要
            if "data_summary" in context_data:
                summary = context_data["data_summary"]
                chart_info.append(f"数据统计：总数{summary.get('total', 0)}，最大值{summary.get('max', 0)}，最小值{summary.get('min', 0)}，项目数{summary.get('count', 0)}")
            
            context_parts.append("当前图表信息：" + "；".join(chart_info))
        
        # 添加筛选器上下文
        if "current_filters" in context_data and context_data["current_filters"]:
            filters = context_data["current_filters"]
            filter_info = []
            for key, value in filters.items():
                if value:
                    filter_info.append(f"{key}={value}")
            if filter_info:
                context_parts.append(f"当前筛选条件：{', '.join(filter_info)}")
        
        # 添加用户技能信息
        if "user_skills" in context_data:
            skills = context_data["user_skills"]
            if skills:
                context_parts.append(f"我的技能背景：{', '.join(skills)}")
        
        # 添加当前浏览的职位信息
        if "current_job" in context_data:
            job = context_data["current_job"]
            context_parts.append(f"当前关注的职位：{job.get('title', '')} - {job.get('company', '')}")
        
        # 添加市场数据摘要
        if "market_summary" in context_data:
            summary = context_data["market_summary"]
            context_parts.append(f"相关市场信息：{summary}")
        
        if context_parts:
            enhanced_message = f"[当前图表和数据背景]\n{chr(10).join(context_parts)}\n\n[用户问题]\n{message}"
            return enhanced_message
            
        return message
    
    def _get_chart_conversation_history(self, chart_id: str = None):
        """获取指定图表的对话历史"""
        if chart_id is None:
            chart_id = "default"
            
        if chart_id not in self.chart_conversations:
            self.chart_conversations[chart_id] = []
            
        return self.chart_conversations[chart_id]
    
    def set_current_chart(self, chart_id: str):
        """设置当前活跃的图表ID"""
        self.current_chart_id = chart_id
    
    def clear_history(self, chart_id: str = None):
        """清除对话历史"""
        if chart_id:
            # 清除指定图表的对话历史
            if chart_id in self.chart_conversations:
                self.chart_conversations[chart_id] = []
        else:
            # 清除当前图表的对话历史
            if self.current_chart_id and self.current_chart_id in self.chart_conversations:
                self.chart_conversations[self.current_chart_id] = []
    
    def get_conversation_summary(self, chart_id: str = None) -> Dict[str, Any]:
        """获取对话摘要"""
        conversation_history = self._get_chart_conversation_history(chart_id or self.current_chart_id)
        return {
            "chart_id": chart_id or self.current_chart_id,
            "total_messages": len(conversation_history),
            "user_messages": len([m for m in conversation_history if m["role"] == "user"]),
            "assistant_messages": len([m for m in conversation_history if m["role"] == "assistant"]),
            "last_interaction": conversation_history[-1]["content"][:100] + "..." if conversation_history else None
        } 