import os
import json
import time
import re
from typing import Dict, Any, List, Optional, Tuple
import logging
import requests

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ModelService:
    """
    模型服务类，用于调用本地AI模型生成摘要和关键词
    """
    
    def __init__(self):
        """
        初始化模型服务
        """
        self.local_model_url = "http://localhost:5000/v1/chat/completions"
        
        # 测试连接本地模型服务
        try:
            response = requests.get("http://localhost:5000/health", timeout=2)
            if response.status_code != 200:
                logger.warning("无法连接本地模型服务，请确保服务已启动")
        except Exception as e:
            logger.warning(f"测试连接本地模型服务失败: {str(e)}")
    
    def generate_chat_response(self, 
                              user_input: str, 
                              model_name: str = "model_1") -> str:
        """
        生成聊天回复
        
        Args:
            user_input: 用户输入内容
            model_name: 模型名称，支持 "model_1", "model_2", "model_3"
            
        Returns:
            生成的聊天回复
        """
        return self._local_generate_chat_response(user_input, model_name)
    
    def _local_generate_chat_response(self, 
                                   user_input: str, 
                                   model_name: str) -> str:
        """
        使用本地模型API生成聊天回复
        
        Args:
            user_input: 用户输入内容
            model_name: 模型名称
            
        Returns:
            模型生成的聊天回复
        """
        try:
            # 将内部模型名称映射到实际使用的模型名称
            model_mapping = {
                "model_1": "qwen",
                "model_2": "baichuan2",
                "model_3": "chatglm3",
                # 默认使用qwen
                "default": "qwen"
            }
            
            local_model = model_mapping.get(model_name, model_mapping["default"])
            
            # 构造聊天上下文
            payload = {
                "model": local_model,
                "messages": [
                    {"role": "system", "content": "你是一个友好、专业的AI助手，能够用中文回答各种问题。请尽量简洁地提供有用的信息。"},
                    {"role": "user", "content": user_input}
                ],
                "temperature": 0.7,
                "max_tokens": 500
            }
            
            # 调用本地模型API
            response = requests.post(self.local_model_url, json=payload, timeout=30)
            
            if response.status_code == 200:
                # 解析返回结果
                result = response.json()
                ai_reply = result["choices"][0]["message"]["content"]
                return ai_reply
            else:
                logger.error(f"调用本地模型API失败, 状态码: {response.status_code}, 响应: {response.text}")
                return f"调用本地模型失败，请确保您的本地模型服务正在运行。错误状态码: {response.status_code}"
            
        except Exception as e:
            logger.error(f"调用本地模型API生成聊天回复失败: {str(e)}")
            return f"调用本地模型发生错误: {str(e)}，请检查模型服务是否正常运行。"
    
    def generate_summary_and_keywords(self, 
                                     dialogue: str, 
                                     model_name: str = "model_1") -> Tuple[str, str, str, str]:
        """
        为对话生成摘要和关键词
        
        Args:
            dialogue: 对话内容
            model_name: 模型名称，支持 "model_1", "model_2", "model_3"
            
        Returns:
            元组 (摘要, 关键词, 参考ID, 参考文本)
        """
        return self._local_model_generate(dialogue, model_name)
    
    def _local_model_generate(self, dialogue: str, model_name: str) -> Tuple[str, str, str, str]:
        """
        使用本地模型生成摘要和关键词
        
        Args:
            dialogue: 对话内容
            model_name: 模型名称
            
        Returns:
            元组 (摘要, 关键词, 参考ID, 参考文本)
        """
        try:
            # 将内部模型名称映射到实际使用的模型名称
            model_mapping = {
                "model_1": "qwen",
                "model_2": "baichuan2",
                "model_3": "chatglm3",
                # 默认使用qwen
                "default": "qwen"
            }
            
            local_model = model_mapping.get(model_name, model_mapping["default"])
            
            prompt = """请分析以下对话内容，并按照如下格式输出:
1. 生成一个简洁但完整的摘要
2. 提取5-8个关键词，使用逗号分隔
3. 提取2-4个主要引用或参考，使用分号分隔

输出格式为JSON:
{
  "summary": "对话摘要",
  "keywords": "关键词1,关键词2,关键词3",
  "coreference_id": "引用ID",
  "coreference_text": "引用1;引用2;引用3"
}

请确保输出是有效的JSON格式。
"""
            
            # 构造请求
            payload = {
                "model": local_model,
                "messages": [
                    {"role": "system", "content": prompt},
                    {"role": "user", "content": dialogue}
                ],
                "temperature": 0.5,
                "max_tokens": 1000
            }
            
            # 调用本地模型API
            response = requests.post(self.local_model_url, json=payload, timeout=60)
            
            if response.status_code == 200:
                # 解析模型返回的内容
                result = response.json()
                result_text = result["choices"][0]["message"]["content"].strip()
                
                # 尝试解析JSON格式的响应
                try:
                    # 如果返回的是JSON格式，直接解析
                    import json
                    result_json = json.loads(result_text)
                    summary = result_json.get("summary", "")
                    keywords = result_json.get("keywords", "")
                    coreference_id = result_json.get("coreference_id", "")
                    coreference_text = result_json.get("coreference_text", "")
                    return summary, keywords, coreference_id, coreference_text
                except (json.JSONDecodeError, AttributeError):
                    # 如果不是JSON格式，尝试从文本中提取摘要和关键词
                    logger.error(f"解析模型返回内容失败: {result_text}")
                    # 尝试提取JSON部分
                    import re
                    json_match = re.search(r'({[\s\S]*})', result_text)
                    if json_match:
                        try:
                            json_text = json_match.group(1)
                            parsed_json = json.loads(json_text)
                            summary = parsed_json.get("summary", "")
                            keywords = parsed_json.get("keywords", "")
                            coreference_id = parsed_json.get("coreference_id", "")
                            coreference_text = parsed_json.get("coreference_text", "")
                            return summary, keywords, coreference_id, coreference_text
                        except Exception as e:
                            logger.error(f"解析提取的JSON失败: {str(e)}")
                    
                    # 如果仍然无法解析，返回空结果
                    return "无法解析模型返回的内容", "", "", ""
            else:
                logger.error(f"调用本地模型API失败, 状态码: {response.status_code}, 响应: {response.text}")
                return "调用本地模型失败", "", "", ""
            
        except Exception as e:
            logger.error(f"调用本地模型失败: {str(e)}")
            # 发生错误时返回错误信息
            return f"调用本地模型发生错误: {str(e)}", "", "", ""

# 创建一个模型服务实例
model_service = ModelService() 