"""
字段提取服务
"""
import asyncio
import logging
from typing import List, Dict, Any, Optional
import httpx

from app.core.config import settings
from app.core.constants import EXTRACTION_RULES
from app.utils.helpers import extract_json_from_markdown

logger = logging.getLogger(__name__)


class FieldExtractionService:
    """字段提取服务类"""
    
    def __init__(self):
        self.chat_url = settings.CHAT_URL
        self.parse_agent_id = settings.PARSE_AGENT_ID  # 文件解析智能体
        self.extract_agent_id = settings.EXTRACT_AGENT_ID  # 字段提取智能体
        self.user_chat_input = settings.USER_CHAT_INPUT
        self.headers = {
            "Authorization": settings.AUTH_TOKEN,
            "Content-Type": "application/json"
        }
        self.timeout = httpx.Timeout(settings.REQUEST_TIMEOUT)
        # 创建信号量，控制并发数量
        self.semaphore = asyncio.Semaphore(settings.MAX_CONCURRENT_REQUESTS)
    
    async def parse_files(self, file_ids: List[Dict[str, str]]) -> Optional[str]:
        """
        调用文件解析智能体，解析文件内容（只需执行一次）
        
        Args:
            file_ids: 文件ID列表 [{"fileId": "xxx"}, ...]
            
        Returns:
            解析后的文件内容字符串，失败返回None
        """
        try:
            logger.info(f"开始解析文件，文件数: {len(file_ids)}")
            
            # 构造请求体
            request_body = {
                "agentId": self.parse_agent_id,
                "userChatInput": self.user_chat_input,
                "files": file_ids
            }
            
            # 发送POST请求
            async with httpx.AsyncClient(timeout=self.timeout) as client:
                response = await client.post(
                    self.chat_url,
                    headers=self.headers,
                    json=request_body
                )
                response.raise_for_status()
                
                result = response.json()
                
                # 提取content内容
                if "choices" in result and len(result["choices"]) > 0:
                    file_content = result["choices"][0].get("content", "")
                    
                    logger.info(f"文件解析成功，内容长度: {len(file_content)} 字符")
                    
                    return file_content
                else:
                    raise Exception("返回数据中没有choices")
                    
        except httpx.HTTPStatusError as e:
            logger.error(f"文件解析HTTP错误: {e.response.status_code}")
            return None
        except Exception as e:
            logger.error(f"文件解析失败: {str(e)}")
            return None
    
    async def extract_single_field(
        self,
        file_content: str,
        rule: Dict[str, Any]
    ) -> Dict[str, Any]:
        """
        提取单个字段（使用已解析的文件内容）
        
        Args:
            file_content: 已解析的文件内容字符串
            rule: 提取规则
            
        Returns:
            提取结果字典
        """
        field_name = rule.get("field", "未知字段")
        
        async with self.semaphore:  # 控制并发
            try:
                logger.info(f"开始提取字段: {field_name}")
                
                # 构造请求体（使用fileContent而不是files）
                request_body = {
                    "agentId": self.extract_agent_id,
                    "userChatInput": self.user_chat_input,
                    "state": {
                        "fileContent": file_content,
                        "rule": rule
                    }
                }
                
                # 发送POST请求
                async with httpx.AsyncClient(timeout=self.timeout) as client:
                    response = await client.post(
                        self.chat_url,
                        headers=self.headers,
                        json=request_body
                    )
                    response.raise_for_status()
                    
                    result = response.json()
                    
                    # 调试：打印返回数据结构
                    logger.debug(f"字段 {field_name} 返回数据: {result}")
                    
                    # 提取content内容
                    if "choices" in result and len(result["choices"]) > 0:
                        content = result["choices"][0].get("content", "")
                        
                        # 提取JSON数据
                        extracted_json = extract_json_from_markdown(content)
                        
                        logger.info(f"字段 {field_name} 提取成功")
                        
                        return {
                            "field": field_name,
                            "success": True,
                            "raw_content": content,  # 完整的输出内容
                            "extracted_data": extracted_json,  # 提取的JSON数据
                            "request_id": result.get("requestId"),
                            "chat_id": result.get("chatId"),
                            "conversation_id": result.get("conversationId")
                        }
                    else:
                        # 打印实际返回的数据结构以便调试
                        logger.error(f"字段 {field_name} 返回数据异常，实际返回: {result}")
                        raise Exception(f"返回数据中没有choices，实际返回: {list(result.keys())}")
                        
            except httpx.HTTPStatusError as e:
                logger.error(f"字段 {field_name} 提取HTTP错误: {e.response.status_code}")
                return {
                    "field": field_name,
                    "success": False,
                    "error": f"HTTP错误: {e.response.status_code}",
                    "raw_content": None,
                    "extracted_data": None
                }
            except Exception as e:
                logger.error(f"字段 {field_name} 提取失败: {str(e)}")
                return {
                    "field": field_name,
                    "success": False,
                    "error": str(e),
                    "raw_content": None,
                    "extracted_data": None
                }
    
    async def extract_all_fields(
        self,
        file_ids: List[Dict[str, str]],
        rules: Optional[List[Dict[str, Any]]] = None
    ) -> List[Dict[str, Any]]:
        """
        提取所有字段（先解析文件一次，然后并发提取所有字段）
        
        Args:
            file_ids: 文件ID列表
            rules: 提取规则列表，默认使用EXTRACTION_RULES
            
        Returns:
            所有字段的提取结果列表
        """
        if rules is None:
            rules = EXTRACTION_RULES
        
        if not file_ids:
            logger.error("没有可用的文件ID")
            return []
        
        logger.info(f"开始提取流程，文件数: {len(file_ids)}，字段数: {len(rules)}，最大并发: {settings.MAX_CONCURRENT_REQUESTS}")
        
        # 步骤1: 解析文件内容（只执行一次）
        file_content = await self.parse_files(file_ids)
        
        if not file_content:
            logger.error("文件解析失败，无法继续提取字段")
            return [
                {
                    "field": rule.get("field", "未知字段"),
                    "success": False,
                    "error": "文件解析失败",
                    "raw_content": None,
                    "extracted_data": None
                }
                for rule in rules
            ]
        
        logger.info(f"步骤2: 开始并发提取 {len(rules)} 个字段")
        
        # 步骤2: 创建所有提取任务（复用解析后的内容）
        tasks = [
            self.extract_single_field(file_content, rule)
            for rule in rules
        ]
        
        # 并发执行所有任务（通过信号量自动控制并发数）
        results = await asyncio.gather(*tasks)
        
        # 统计结果
        success_count = sum(1 for r in results if r.get("success"))
        logger.info(f"字段提取完成: 成功 {success_count}/{len(rules)}")
        
        return results
    
    def format_extraction_results(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        格式化提取结果，整合所有字段的数据
        
        Args:
            results: 提取结果列表
            
        Returns:
            格式化后的结果字典
        """
        formatted = {
            "total_fields": len(results),
            "successful_fields": sum(1 for r in results if r.get("success")),
            "failed_fields": sum(1 for r in results if not r.get("success")),
            "fields_data": {},
            "raw_contents": {},
            "errors": {}
        }
        
        for result in results:
            field_name = result.get("field", "未知字段")
            
            if result.get("success"):
                # 保存提取的JSON数据
                if result.get("extracted_data"):
                    formatted["fields_data"][field_name] = result["extracted_data"]
                
                # 保存完整的原始内容
                if result.get("raw_content"):
                    formatted["raw_contents"][field_name] = result["raw_content"]
            else:
                # 保存错误信息
                formatted["errors"][field_name] = result.get("error", "未知错误")
        
        return formatted

