"""
需求分析服务
通过大模型分析需求文档，提取测试点和功能模块，然后基于分析结果生成测试用例
"""

import httpx
import json
import logging
import asyncio
import os
from typing import List, Dict, Any, Optional
from dotenv import load_dotenv
from app.models.schemas import TestCaseRequest, TestCaseResponse
from app.services.model_adapters import ModelFactory, BaseModelAdapter
from app.services.cache_service import get_cache_service

# 加载环境变量
load_dotenv()

logger = logging.getLogger(__name__)


class RequirementAnalysisService:
    """需求分析服务"""

    def __init__(self):
        # 获取缓存服务
        self.cache_service = get_cache_service()

        # 获取模型配置
        try:
            from app.config.settings import get_settings
            settings = get_settings()
            model_config = settings.get_model_config()

            # 创建模型适配器
            self.model_adapter = ModelFactory.create_adapter(
                provider=model_config["provider"],
                api_key=model_config["api_key"],
                api_url=model_config["api_url"],
                model=model_config["model"]
            )

            if not self.model_adapter.validate_config():
                raise ValueError("模型配置验证失败")

            logger.info(
                f"需求分析服务使用模型: {self.model_adapter.get_provider_name()}")

        except Exception as e:
            logger.error(f"初始化模型适配器失败: {str(e)}")
            # 回退到传统方式
            self.model_adapter = None
            self.api_key = os.getenv("LLM_API_KEY")
            self.api_url = os.getenv("LLM_API_URL")
            self.model = os.getenv("LLM_MODEL", "deepseek-chat")
            logger.warning("需求分析服务使用传统API调用方式作为回退")

        self.max_chunk_size = 3000  # 每个分块的最大字符数

    async def analyze_requirement(self, requirement: str) -> Dict[str, Any]:
        """分析需求文档，提取功能模块和测试点"""
        logger.info(f"开始分析需求，长度: {len(requirement)} 字符")

        # 首先检查缓存
        cached_result = self.cache_service.get_requirement_analysis(
            requirement)
        if cached_result:
            logger.info("✅ 使用缓存的需求分析结果，跳过大模型调用")
            return cached_result

        # 缓存未命中，进行实际分析
        logger.info("缓存未命中，开始调用大模型分析需求")

        # 如果需求过长，进行分块分析
        if len(requirement) > self.max_chunk_size:
            logger.info("需求内容较长，采用分块分析策略")
            result = await self._analyze_requirement_in_chunks(requirement)
        else:
            logger.info("需求内容适中，采用单次分析策略")
            result = await self._analyze_requirement_single(requirement)

        # 将结果缓存
        if result and result.get('modules'):
            self.cache_service.set_requirement_analysis(requirement, result)
            logger.info("✅ 需求分析结果已缓存")

        return result

    async def _analyze_requirement_single(self, requirement: str) -> Dict[str, Any]:
        """单次分析需求"""
        prompt = self._create_analysis_prompt(requirement)

        try:
            if self.model_adapter:
                # 使用新的模型适配器
                messages = [
                    {
                        "role": "system",
                        "content": "你是专业的需求分析师，擅长分析软件需求文档，提取功能模块和测试点。"
                    },
                    {
                        "role": "user",
                        "content": prompt
                    }
                ]

                content = await self.model_adapter.generate_completion(
                    messages=messages,
                    temperature=0.1,
                    max_tokens=4000,
                    timeout=300.0
                )
            else:
                # 传统API调用方式（回退）
                async with httpx.AsyncClient(timeout=300.0) as client:
                    response = await client.post(
                        self.api_url,
                        headers={
                            "Authorization": f"Bearer {self.api_key}",
                            "Content-Type": "application/json"
                        },
                        json={
                            "model": self.model,
                            "messages": [
                                {
                                    "role": "system",
                                    "content": "你是专业的需求分析师，擅长分析软件需求文档，提取功能模块和测试点。"
                                },
                                {
                                    "role": "user",
                                    "content": prompt
                                }
                            ],
                            "temperature": 0.1,
                            "max_tokens": 4000
                        }
                    )

                    if response.status_code != 200:
                        raise Exception(f"API调用失败: {response.status_code}")

                    result = response.json()
                    content = result["choices"][0]["message"]["content"]

            # 解析分析结果
            return self._parse_analysis_result(content)

        except Exception as e:
            logger.error(f"需求分析失败: {str(e)}")
            return self._create_fallback_analysis(requirement)

    async def _analyze_requirement_in_chunks(self, requirement: str) -> Dict[str, Any]:
        """分块分析需求"""
        chunks = self._split_requirement_into_chunks(requirement)
        logger.info(f"需求拆分为 {len(chunks)} 个分块")

        all_modules = []
        all_test_points = []

        for i, chunk in enumerate(chunks):
            logger.info(f"分析第 {i+1}/{len(chunks)} 个分块")

            try:
                chunk_analysis = await self._analyze_requirement_single(chunk)

                if chunk_analysis.get("modules"):
                    all_modules.extend(chunk_analysis["modules"])
                if chunk_analysis.get("test_points"):
                    all_test_points.extend(chunk_analysis["test_points"])

                # 添加延迟避免API限流
                await asyncio.sleep(1)

            except Exception as e:
                logger.error(f"分析第 {i+1} 个分块失败: {str(e)}")
                continue

        # 合并和去重分析结果
        return self._merge_analysis_results(all_modules, all_test_points, requirement)

    def _split_requirement_into_chunks(self, requirement: str) -> List[str]:
        """将需求拆分成多个分块"""
        chunks = []
        lines = requirement.split('\n')
        current_chunk = []
        current_length = 0

        for line in lines:
            line = line.strip()
            if not line:
                continue

            # 如果添加这行会超过限制，先保存当前分块
            if current_length + len(line) > self.max_chunk_size and current_chunk:
                chunks.append('\n'.join(current_chunk))
                current_chunk = []
                current_length = 0

            current_chunk.append(line)
            current_length += len(line)

        # 保存最后一个分块
        if current_chunk:
            chunks.append('\n'.join(current_chunk))

        return chunks

    def _create_analysis_prompt(self, requirement: str) -> str:
        """创建需求分析提示词"""
        return f"""
请分析以下需求文档，提取功能模块和测试点。

需求文档：
{requirement}

请按照以下JSON格式返回分析结果：

{{
  "modules": [
    {{
      "name": "功能模块名称",
      "description": "模块功能描述",
      "priority": "P1/P2/P3",
      "test_points": [
        "测试点1：具体的测试场景描述",
        "测试点2：具体的测试场景描述"
      ]
    }}
  ],
  "test_points": [
    {{
      "category": "功能分类",
      "point": "测试点描述",
      "scenarios": ["正常场景", "异常场景", "边界场景"],
      "priority": "P1/P2/P3"
    }}
  ],
  "business_rules": [
    "业务规则1",
    "业务规则2"
  ],
  "constraints": [
    "约束条件1", 
    "约束条件2"
  ]
}}

分析要求：
1. 识别所有功能模块，按重要性分级
2. 为每个模块提取具体的测试点
3. 识别业务规则和约束条件
4. 考虑正常、异常、边界等多种测试场景
5. 只返回JSON格式，不要其他说明
"""

    def _parse_analysis_result(self, content: str) -> Dict[str, Any]:
        """解析分析结果"""
        try:
            # 清理JSON内容
            content = content.strip()
            if content.startswith('```json'):
                content = content[7:]
            if content.endswith('```'):
                content = content[:-3]
            content = content.strip()

            # 解析JSON
            result = json.loads(content)
            logger.info(f"成功解析分析结果，包含 {len(result.get('modules', []))} 个模块")
            return result

        except json.JSONDecodeError as e:
            logger.error(f"解析分析结果失败: {str(e)}")
            return self._create_fallback_analysis("")

    def _create_fallback_analysis(self, requirement: str) -> Dict[str, Any]:
        """创建备用分析结果"""
        return {
            "modules": [
                {
                    "name": "核心功能模块",
                    "description": "基于需求文档的核心功能",
                    "priority": "P1",
                    "test_points": [
                        "功能正常流程测试",
                        "异常输入处理测试",
                        "边界值测试"
                    ]
                }
            ],
            "test_points": [
                {
                    "category": "功能测试",
                    "point": "基本功能验证",
                    "scenarios": ["正常操作", "异常输入", "边界条件"],
                    "priority": "P1"
                }
            ],
            "business_rules": ["基本业务规则验证"],
            "constraints": ["系统约束条件验证"]
        }

    def _merge_analysis_results(self, all_modules: List[Dict], all_test_points: List[Dict],
                                requirement: str) -> Dict[str, Any]:
        """合并分析结果"""
        # 去重模块
        unique_modules = []
        seen_module_names = set()

        for module in all_modules:
            name = module.get("name", "").lower()
            if name not in seen_module_names:
                unique_modules.append(module)
                seen_module_names.add(name)

        # 去重测试点
        unique_test_points = []
        seen_test_points = set()

        for point in all_test_points:
            point_desc = point.get("point", "").lower()
            if point_desc not in seen_test_points:
                unique_test_points.append(point)
                seen_test_points.add(point_desc)

        return {
            "modules": unique_modules,
            "test_points": unique_test_points,
            "business_rules": [],
            "constraints": []
        }
