"""
合同分析器
使用AI模型分析合同内容，识别风险和问题
"""

import json
import logging
from typing import Dict, List, Any, Optional
import openai
from datetime import datetime

from config import Config

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ContractAnalyzer:
    """合同分析器类"""
    
    def __init__(self, model_type: str = "OpenAI GPT-4", strictness: int = 3):
        """
        初始化合同分析器
        
        Args:
            model_type: AI模型类型
            strictness: 审核严格程度 (1-5)
        """
        self.config = Config()
        self.model_type = model_type
        self.strictness = strictness
        
        # 配置AI客户端
        if model_type.startswith("OpenAI"):
            if not self.config.OPENAI_API_KEY:
                raise ValueError("请设置OpenAI API密钥")
            self.client = openai.OpenAI(api_key=self.config.OPENAI_API_KEY)

            # 选择模型
            if "GPT-4" in model_type:
                self.model_name = self.config.OPENAI_MODEL_GPT4
            else:
                self.model_name = self.config.OPENAI_MODEL_GPT35

        elif model_type.startswith("通义千问") or model_type.startswith("Qwen"):
            if not self.config.DASHSCOPE_API_KEY:
                raise ValueError("请设置DASHSCOPE API密钥")
            self.client = openai.OpenAI(
                api_key=self.config.DASHSCOPE_API_KEY,
                base_url=self.config.DASHSCOPE_BASE_URL
            )
            self.model_name = self.config.QWEN_MODEL
        
    def analyze_contract(self, content: str, review_types: List[str]) -> Dict[str, Any]:
        """
        分析合同内容
        
        Args:
            content: 合同文本内容
            review_types: 审核类型列表
            
        Returns:
            分析结果字典
        """
        logger.info(f"开始分析合同，审核类型: {review_types}")
        
        analysis_results = {}
        overall_score = 100
        all_risks = []
        all_recommendations = []
        
        # 对每种审核类型进行分析
        for review_type in review_types:
            try:
                result = self._analyze_by_type(content, review_type)
                analysis_results[review_type] = result
                
                # 收集风险和建议
                if 'risks' in result:
                    all_risks.extend(result['risks'])
                if 'recommendations' in result:
                    all_recommendations.extend(result['recommendations'])
                if 'score' in result:
                    overall_score = min(overall_score, result['score'])
                    
            except Exception as e:
                logger.error(f"分析类型 {review_type} 时出错: {e}")
                analysis_results[review_type] = {
                    "error": str(e),
                    "score": 50  # 默认中等风险
                }
        
        # 根据严格程度调整评分
        overall_score = self._adjust_score_by_strictness(overall_score)
        
        return {
            "overall_score": overall_score,
            "details": analysis_results,
            "risks": list(set(all_risks)),  # 去重
            "recommendations": list(set(all_recommendations)),  # 去重
            "analysis_time": datetime.now().isoformat(),
            "model_used": self.model_type,
            "strictness_level": self.strictness
        }
    
    def _analyze_by_type(self, content: str, review_type: str) -> Dict[str, Any]:
        """
        按类型分析合同
        
        Args:
            content: 合同内容
            review_type: 审核类型
            
        Returns:
            分析结果
        """
        # 获取对应的提示词模板
        prompt_key = self.config.REVIEW_TYPES.get(review_type)
        if not prompt_key or prompt_key not in self.config.ANALYSIS_PROMPTS:
            raise ValueError(f"不支持的审核类型: {review_type}")
        
        prompt_template = self.config.ANALYSIS_PROMPTS[prompt_key]
        
        # 截断过长的内容
        max_content_length = 8000  # 根据模型限制调整
        if len(content) > max_content_length:
            content = content[:max_content_length] + "...[内容已截断]"
        
        # 构建完整提示词
        prompt = prompt_template.format(contract_content=content)
        
        # 调用AI模型
        if self.model_type.startswith("OpenAI") or self.model_type.startswith("通义千问") or self.model_type.startswith("Qwen"):
            return self._call_openai_compatible_api(prompt, review_type)
        else:
            # 本地模型或其他模型的处理逻辑
            return self._call_local_model(prompt, review_type)
    
    def _call_openai_compatible_api(self, prompt: str, review_type: str) -> Dict[str, Any]:
        """
        调用OpenAI兼容的API（支持OpenAI和通义千问）

        Args:
            prompt: 提示词
            review_type: 审核类型

        Returns:
            API响应结果
        """
        try:
            response = self.client.chat.completions.create(
                model=self.model_name,
                messages=[
                    {
                        "role": "system",
                        "content": "你是一个专业的法律合同审核专家，请仔细分析合同内容并提供专业意见。"
                    },
                    {"role": "user", "content": prompt}
                ],
                temperature=self.config.DEFAULT_TEMPERATURE,
                max_tokens=self.config.MAX_TOKENS
            )
            
            result_text = response.choices[0].message.content
            
            # 尝试解析JSON响应
            try:
                result = json.loads(result_text)
            except json.JSONDecodeError:
                # 如果不是JSON格式，创建标准格式
                result = self._parse_text_response(result_text, review_type)
            
            return result
            
        except Exception as e:
            model_name = "通义千问" if self.model_type.startswith("通义千问") or self.model_type.startswith("Qwen") else "OpenAI"
            logger.error(f"{model_name} API调用失败: {e}")
            return {
                "error": f"API调用失败: {str(e)}",
                "score": 50,
                "risks": ["API调用失败，无法完成分析"],
                "recommendations": ["请检查网络连接和API密钥"]
            }
    
    def _call_local_model(self, prompt: str, review_type: str) -> Dict[str, Any]:
        """
        调用本地模型（占位符实现）
        
        Args:
            prompt: 提示词
            review_type: 审核类型
            
        Returns:
            模拟的分析结果
        """
        # 这里是本地模型的占位符实现
        # 实际使用时需要集成具体的本地模型
        
        logger.warning("本地模型功能尚未实现，返回模拟结果")
        
        return {
            "score": 75,
            "risks": [f"模拟风险项目 - {review_type}"],
            "recommendations": [f"模拟建议 - {review_type}"],
            "details": f"本地模型分析结果 - {review_type}",
            "note": "这是模拟结果，请配置真实的AI模型"
        }
    
    def _parse_text_response(self, text: str, review_type: str) -> Dict[str, Any]:
        """
        解析文本响应为标准格式
        
        Args:
            text: AI模型的文本响应
            review_type: 审核类型
            
        Returns:
            标准格式的结果
        """
        # 简单的文本解析逻辑
        lines = text.split('\n')
        
        risks = []
        recommendations = []
        score = 75  # 默认评分
        
        for line in lines:
            line = line.strip()
            if '风险' in line or '问题' in line:
                risks.append(line)
            elif '建议' in line or '改进' in line:
                recommendations.append(line)
            elif '评分' in line or '分数' in line:
                # 尝试提取数字
                import re
                numbers = re.findall(r'\d+', line)
                if numbers:
                    score = int(numbers[0])
        
        return {
            "score": score,
            "risks": risks[:5],  # 最多5个风险
            "recommendations": recommendations[:5],  # 最多5个建议
            "details": text,
            "parsed_from_text": True
        }
    
    def _adjust_score_by_strictness(self, base_score: int) -> int:
        """
        根据严格程度调整评分
        
        Args:
            base_score: 基础评分
            
        Returns:
            调整后的评分
        """
        # 严格程度调整系数
        strictness_factors = {
            1: 1.1,   # 宽松：提高评分
            2: 1.05,
            3: 1.0,   # 标准：不调整
            4: 0.95,
            5: 0.9    # 严格：降低评分
        }
        
        factor = strictness_factors.get(self.strictness, 1.0)
        adjusted_score = int(base_score * factor)
        
        # 确保评分在0-100范围内
        return max(0, min(100, adjusted_score))
    
    def get_risk_level(self, score: int) -> str:
        """
        根据评分获取风险等级
        
        Args:
            score: 评分
            
        Returns:
            风险等级
        """
        for level, config in self.config.RISK_LEVELS.items():
            min_score, max_score = config["score_range"]
            if min_score <= score <= max_score:
                return level
        return "未知"
