"""
大模型分析服务API接口
提供基于LLM的智能分析功能
"""
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from pydantic import BaseModel, Field
from typing import Dict, Any, List, Optional, Union
import asyncio

from core.database import get_db
from core.logging_config import get_logger, log_api_request
from core.exceptions import AIServiceError, ValidationError
from core.auth import get_current_user, require_permission
from core.config import settings
from models.user import User
from services.llm_service import llm_service_manager, LLMMessage

# 路由器
llm_router = APIRouter(tags=["大模型分析"])

# 日志记录器
logger = get_logger("llm_api")

# 导入必要的模块
import time
import json
from datetime import datetime


class LLMAnalysisRequest(BaseModel):
    """大模型分析请求模型"""
    text: str = Field(..., min_length=1, max_length=10000, description="待分析文本")
    analysis_type: str = Field(..., description="分析类型")
    provider: Optional[str] = Field(None, description="LLM提供商")
    model: Optional[str] = Field(None, description="模型名称")
    temperature: Optional[float] = Field(0.7, ge=0.0, le=2.0, description="温度参数")
    max_tokens: Optional[int] = Field(2000, ge=100, le=4000, description="最大token数")


class LLMAnalysisResponse(BaseModel):
    """大模型分析响应模型"""
    success: bool
    analysis: Dict[str, Any]
    model: str
    provider: str
    usage: Dict[str, int]
    processing_time: float
    error_message: str = ""


class PetitionAnalysisRequest(BaseModel):
    """信访分析请求模型"""
    petition_text: str = Field(..., min_length=1, max_length=10000, description="信访文本")
    analysis_types: List[str] = Field(..., description="分析类型列表")
    provider: Optional[str] = Field(None, description="LLM提供商")


class PetitionAnalysisResponse(BaseModel):
    """信访分析响应模型"""
    success: bool
    petition_id: Optional[str] = None
    analyses: Dict[str, Dict[str, Any]]
    summary: Dict[str, Any]
    processing_time: float
    error_message: str = ""


class BatchAnalysisRequest(BaseModel):
    """批量分析请求模型"""
    texts: List[str] = Field(..., min_items=1, max_items=50, description="待分析文本列表")
    analysis_type: str = Field(..., description="分析类型")
    provider: Optional[str] = Field(None, description="LLM提供商")
    concurrent_limit: int = Field(5, ge=1, le=10, description="并发限制")


class StreamingAnalysisRequest(BaseModel):
    """流式分析请求模型"""
    text: str = Field(..., min_length=1, max_length=5000, description="待分析文本")
    analysis_type: str = Field(..., description="分析类型")
    provider: Optional[str] = Field(None, description="LLM提供商")


@llm_router.post("/analyze", response_model=LLMAnalysisResponse)
@log_api_request("llm_analyze")
@require_permission("analysis_task:execute")
async def analyze_text(
    request: LLMAnalysisRequest,
    current_user: User = Depends(get_current_user)
):
    """大模型文本分析接口"""
    logger.info(f"大模型分析请求: {request.analysis_type}")
    
    try:
        # 构建消息
        system_prompt = _get_analysis_prompt(request.analysis_type)
        messages = [
            LLMMessage(role="system", content=system_prompt),
            LLMMessage(role="user", content=request.text)
        ]
        
        # 调用大模型服务
        response = await llm_service_manager.chat_completion(
            messages=messages,
            provider=_get_provider_enum(request.provider),
            temperature=request.temperature,
            max_tokens=request.max_tokens
        )
        
        # 解析响应
        content = response.choices[0]["message"]["content"]
        structured_data = _parse_llm_response(content, request.analysis_type)
        
        return LLMAnalysisResponse(
            success=True,
            analysis=structured_data,
            model=response.model,
            provider=_get_provider_enum(request.provider).value if request.provider else settings.llm_provider,
            usage=response.usage,
            processing_time=response.processing_time
        )
        
    except Exception as e:
        logger.error(f"大模型分析失败: {str(e)}")
        return LLMAnalysisResponse(
            success=False,
            analysis={},
            model="",
            provider=request.provider or settings.llm_provider,
            usage={},
            processing_time=0.0,
            error_message=str(e)
        )


@llm_router.post("/analyze-petition", response_model=PetitionAnalysisResponse)
@log_api_request("llm_analyze_petition")
@require_permission("analysis_task:execute")
async def analyze_petition(
    request: PetitionAnalysisRequest,
    current_user: User = Depends(get_current_user)
):
    """信访文本综合分析接口"""
    logger.info(f"信访综合分析请求: {len(request.analysis_types)} 种分析类型")
    
    start_time = time.time()
    
    try:
        analyses = {}
        
        # 并发执行多种分析
        tasks = []
        for analysis_type in request.analysis_types:
            task = _perform_single_analysis(
                text=request.petition_text,
                analysis_type=analysis_type,
                provider=request.provider
            )
            tasks.append(task)
        
        # 执行并发分析
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理结果
        for i, result in enumerate(results):
            analysis_type = request.analysis_types[i]
            if isinstance(result, Exception):
                analyses[analysis_type] = {
                    "success": False,
                    "error": str(result)
                }
            else:
                analyses[analysis_type] = result
        
        # 生成综合摘要
        summary = await _generate_analysis_summary(request.petition_text, analyses)
        
        return PetitionAnalysisResponse(
            success=True,
            analyses=analyses,
            summary=summary,
            processing_time=time.time() - start_time
        )
        
    except Exception as e:
        logger.error(f"信访综合分析失败: {str(e)}")
        return PetitionAnalysisResponse(
            success=False,
            analyses={},
            summary={},
            processing_time=time.time() - start_time,
            error_message=str(e)
        )


@llm_router.post("/batch-analyze")
@log_api_request("llm_batch_analyze")
@require_permission("analysis_task:execute")
async def batch_analyze(
    request: BatchAnalysisRequest,
    current_user: User = Depends(get_current_user)
):
    """批量文本分析接口"""
    logger.info(f"批量分析请求: {len(request.texts)} 个文本")
    
    try:
        if len(request.texts) > 50:
            raise HTTPException(status_code=400, detail="批量分析最多支持50个文本")
        
        # 构建分析任务
        tasks = []
        for i, text in enumerate(request.texts):
            task = _perform_single_analysis(
                text=text,
                analysis_type=request.analysis_type,
                provider=request.provider,
                task_id=i
            )
            tasks.append(task)
        
        # 控制并发数量
        semaphore = asyncio.Semaphore(request.concurrent_limit)
        
        async def analyze_with_semaphore(task):
            async with semaphore:
                return await task
        
        # 执行批量分析
        results = await asyncio.gather(*[analyze_with_semaphore(task) for task in tasks], return_exceptions=True)
        
        # 处理结果
        processed_results = []
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                processed_results.append({
                    "text_index": i,
                    "success": False,
                    "error": str(result)
                })
            else:
                processed_results.append({
                    "text_index": i,
                    "success": True,
                    "analysis": result
                })
        
        return {
            "total_texts": len(request.texts),
            "successful_analyses": sum(1 for r in processed_results if r["success"]),
            "failed_analyses": sum(1 for r in processed_results if not r["success"]),
            "results": processed_results,
            "analysis_type": request.analysis_type
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"批量分析失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@llm_router.get("/providers")
@log_api_request("llm_list_providers")
@require_permission("analysis_task:view")
async def list_providers(
    current_user: User = Depends(get_current_user)
):
    """获取支持的LLM提供商列表"""
    try:
        providers = [
            {
                "name": "openai",
                "display_name": "OpenAI",
                "models": ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"],
                "description": "OpenAI GPT系列模型"
            },
            {
                "name": "azure",
                "display_name": "Azure OpenAI",
                "models": ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"],
                "description": "微软Azure OpenAI服务"
            },
            {
                "name": "qwen",
                "display_name": "通义千问",
                "models": ["qwen-turbo", "qwen-plus", "qwen-max"],
                "description": "阿里云通义千问大模型"
            }
        ]
        
        return {"providers": providers}
        
    except Exception as e:
        logger.error(f"获取提供商列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@llm_router.get("/analysis-types")
@log_api_request("llm_list_analysis_types")
@require_permission("analysis_task:view")
async def list_analysis_types(
    current_user: User = Depends(get_current_user)
):
    """获取支持的分析类型列表"""
    try:
        analysis_types = [
            {
                "type": "general",
                "name": "综合分析",
                "description": "对信访文本进行综合分析"
            },
            {
                "type": "sentiment",
                "name": "情感分析",
                "description": "分析文本的情感倾向和情绪"
            },
            {
                "type": "extract",
                "name": "信息抽取",
                "description": "提取关键实体和结构化信息"
            },
            {
                "type": "summarize",
                "name": "内容摘要",
                "description": "生成文本摘要和关键要点"
            },
            {
                "type": "classify",
                "name": "问题分类",
                "description": "对问题进行分类和标记"
            },
            {
                "type": "solution",
                "name": "解决方案",
                "description": "提供解决方案建议"
            },
            {
                "type": "risk",
                "name": "风险评估",
                "description": "评估问题的风险等级"
            }
        ]
        
        return {"analysis_types": analysis_types}
        
    except Exception as e:
        logger.error(f"获取分析类型列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@llm_router.get("/health")
@log_api_request("llm_health_check")
@require_permission("analysis_task:view")
async def health_check(
    provider: Optional[str] = None,
    current_user: User = Depends(get_current_user)
):
    """大模型服务健康检查"""
    try:
        from services.llm_service import LLMProvider
        
        provider_enum = None
        if provider:
            try:
                provider_enum = LLMProvider(provider)
            except ValueError:
                raise HTTPException(status_code=400, detail=f"不支持的提供商: {provider}")
        
        health_result = await llm_service_manager.health_check(provider_enum)
        
        return {
            "health": health_result,
            "timestamp": datetime.now().isoformat()
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"健康检查失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@llm_router.get("/models")
@log_api_request("llm_list_models")
@require_permission("analysis_task:view")
async def list_models(
    provider: Optional[str] = None,
    current_user: User = Depends(get_current_user)
):
    """获取可用模型列表"""
    try:
        models = []
        
        if provider is None or provider == "openai":
            models.extend([
                {"provider": "openai", "name": "gpt-3.5-turbo", "description": "GPT-3.5 Turbo"},
                {"provider": "openai", "name": "gpt-4", "description": "GPT-4"},
                {"provider": "openai", "name": "gpt-4-turbo", "description": "GPT-4 Turbo"}
            ])
        
        if provider is None or provider == "qwen":
            models.extend([
                {"provider": "qwen", "name": "qwen-turbo", "description": "通义千问 Turbo"},
                {"provider": "qwen", "name": "qwen-plus", "description": "通义千问 Plus"},
                {"provider": "qwen", "name": "qwen-max", "description": "通义千问 Max"}
            ])
        
        return {"models": models}
        
    except Exception as e:
        logger.error(f"获取模型列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 辅助函数
def _get_provider_enum(provider: Optional[str]) -> Optional[str]:
    """获取提供商枚举值"""
    if not provider:
        return None
    
    provider_mapping = {
        "openai": "openai",
        "azure": "azure", 
        "anthropic": "anthropic",
        "qwen": "qwen"
    }
    
    return provider_mapping.get(provider.lower())


def _get_analysis_prompt(analysis_type: str) -> str:
    """获取分析提示词"""
    prompts = {
        "general": """你是一个专业的信访分析专家。请分析以下信访文本，提供全面的分析结果。

请包含以下方面的分析：
1. 信访内容概述
2. 主要问题识别
3. 涉及的相关方
4. 问题的严重程度
5. 建议的处理方式

请以JSON格式返回分析结果。""",
        
        "sentiment": """你是一个情感分析专家。请分析以下信访文本的情感特征。

请分析：
1. 整体情感倾向（正面/负面/中性）
2. 情感强度（1-10分）
3. 主要情感类型
4. 情绪变化趋势

请以JSON格式返回结果。""",
        
        "extract": """你是一个信息抽取专家。请从以下信访文本中提取关键信息。

请提取：
1. 投诉人信息
2. 被投诉方信息
3. 事件发生的时间和地点
4. 问题描述
5. 问题类型

请以JSON格式返回结果。""",
        
        "summarize": """你是一个文本摘要专家。请对以下信访文本进行摘要。

请提供：
1. 一句话摘要
2. 简要摘要（100字以内）
3. 关键信息点
4. 核心问题

请以JSON格式返回结果。""",
        
        "classify": """你是一个问题分类专家。请对以下信访文本中的问题进行分类。

请分类：
1. 问题大类（环境污染、社会民生等）
2. 问题子类
3. 紧急程度
4. 影响范围
5. 责任部门类型

请以JSON格式返回结果。""",
        
        "solution": """你是一个问题解决专家。请针对以下信访文本中的问题提供解决方案建议。

请提供：
1. 短期解决方案
2. 长期解决方案
3. 责任部门建议
4. 执行步骤
5. 预期效果

请以JSON格式返回结果。""",
        
        "risk": """你是一个风险评估专家。请对以下信访文本中的问题进行风险评估。

请评估：
1. 风险等级
2. 风险类型
3. 影响程度
4. 紧急程度
5. 扩散风险
6. 处理难度

请以JSON格式返回结果。"""
    }
    
    return prompts.get(analysis_type, prompts["general"])


def _parse_llm_response(content: str, analysis_type: str) -> Dict[str, Any]:
    """解析大模型响应"""
    try:
        return json.loads(content)
    except json.JSONDecodeError:
        return {
            "raw_content": content,
            "analysis_type": analysis_type,
            "structured": False
        }


async def _perform_single_analysis(
    text: str,
    analysis_type: str,
    provider: Optional[str] = None,
    task_id: Optional[int] = None
) -> Dict[str, Any]:
    """执行单个分析任务"""
    try:
        system_prompt = _get_analysis_prompt(analysis_type)
        messages = [
            LLMMessage(role="system", content=system_prompt),
            LLMMessage(role="user", content=text)
        ]
        
        response = await llm_service_manager.chat_completion(
            messages=messages,
            provider=_get_provider_enum(provider),
            temperature=0.3,
            max_tokens=2000
        )
        
        content = response.choices[0]["message"]["content"]
        structured_data = _parse_llm_response(content, analysis_type)
        
        return {
            "success": True,
            "analysis": structured_data,
            "model": response.model,
            "provider": _get_provider_enum(provider).value if provider else settings.llm_provider,
            "usage": response.usage,
            "processing_time": response.processing_time,
            "task_id": task_id
        }
        
    except Exception as e:
        return {
            "success": False,
            "error": str(e),
            "task_id": task_id
        }


async def _generate_analysis_summary(petition_text: str, analyses: Dict[str, Any]) -> Dict[str, Any]:
    """生成分析摘要"""
    try:
        # 构建摘要提示词
        summary_prompt = f"""基于以下信访文本和多种分析结果，请生成一个综合摘要：

信访文本：
{petition_text}

分析结果：
{json.dumps(analyses, ensure_ascii=False, indent=2)}

请提供一个包含以下内容的综合摘要：
1. 问题概述
2. 主要发现
3. 建议措施
4. 优先级评估

请以JSON格式返回。"""
        
        messages = [
            LLMMessage(role="system", content="你是一个专业的信访分析专家。"),
            LLMMessage(role="user", content=summary_prompt)
        ]
        
        response = await llm_service_manager.chat_completion(
            messages=messages,
            temperature=0.5,
            max_tokens=1000
        )
        
        content = response.choices[0]["message"]["content"]
        
        try:
            return json.loads(content)
        except json.JSONDecodeError:
            return {"summary": content}
            
    except Exception as e:
        logger.error(f"生成分析摘要失败: {str(e)}")
        return {
            "summary": "无法生成综合摘要",
            "error": str(e)
        }


