"""
重复分析API路由
提供重复分析相关的API接口
"""
from fastapi import APIRouter, Depends, HTTPException, Query, BackgroundTasks
from typing import Optional, Dict, Any, List
from pydantic import BaseModel, Field
from datetime import datetime
from sqlalchemy.orm import Session

from core import get_logger
from core.database import get_db
from core.auth import get_current_active_user
from models.user import User
from services.analysis.duplicate_analyzer import DuplicateAnalyzer, DuplicateAnalysisFilters
from services.analysis.base_analyzer import get_analysis_service
from services.visits_services.duplicate_detection_service import DuplicateDetectionService

logger = get_logger("duplicate_analysis")

# 创建路由器
duplicate_analysis_router = APIRouter()

# 请求模型
class DuplicateDetectionRequest(BaseModel):
    """重复检测请求模型"""
    task_id: Optional[int] = Field(None, description="分析任务ID，如果为None则分析所有数据")
    save_results: bool = Field(True, description="是否保存结果到本地")
    output_path: Optional[str] = Field(None, description="自定义输出路径")

class DuplicateDetectionResponse(BaseModel):
    """重复检测响应模型"""
    success: bool = Field(..., description="是否成功")
    message: str = Field(..., description="响应消息")
    data: Optional[Dict[str, Any]] = Field(None, description="响应数据")
    summary: Optional[Dict[str, Any]] = Field(None, description="统计摘要")

class SimilarityAnalysisRequest(BaseModel):
    """相似度分析请求模型"""
    similarity_threshold: float = Field(..., ge=0.0, le=1.0, description="相似度阈值")
    task_id: Optional[int] = Field(None, description="分析任务ID")
    start_date: Optional[datetime] = Field(None, description="开始时间")
    end_date: Optional[datetime] = Field(None, description="结束时间")

class ClusterAnalysisRequest(BaseModel):
    """簇分析请求模型"""
    min_cluster_size: Optional[int] = Field(2, ge=2, description="最小簇大小")
    task_id: Optional[int] = Field(None, description="分析任务ID")
    start_date: Optional[datetime] = Field(None, description="开始时间")
    end_date: Optional[datetime] = Field(None, description="结束时间")


@duplicate_analysis_router.post("/analyze", response_model=DuplicateDetectionResponse)
async def analyze_duplicates(
        request: DuplicateDetectionRequest,
        background_tasks: BackgroundTasks,
        current_user: User = Depends(get_current_active_user),
        db: Session = Depends(get_db)
):
    """
        执行重复投诉检测

        Args:
            request: 重复检测请求
            background_tasks: 后台任务
            current_user: 当前用户
            db: 数据库会话

        Returns:
            重复检测结果
        """
    try:
        logger.info(f"用户 {current_user.username} 开始执行重复投诉检测，task_id={request.task_id}")

        # 初始化检测服务
        service = DuplicateDetectionService()

        # 执行检测
        result_data = service.detect_duplicates(request.task_id)

        if result_data.empty:
            return DuplicateDetectionResponse(
                success=False,
                message="未找到数据或检测失败，请先运行区域分析任务",
                data=None,
                summary=None
            )

        # 获取统计摘要
        summary = service.get_summary_statistics(result_data)

        # 保存结果
        output_file = None
        if request.save_results:
            if request.output_path:
                output_file = service.save_results(result_data, request.output_path)
            else:
                output_file = service.save_results(result_data)

        # 准备响应数据
        response_data = {
            "total_records": len(result_data),
            "duplicates_found": summary.get("duplicates_found", 0),
            "duplicate_groups": summary.get("duplicate_groups", 0),
            "duplicate_rate": summary.get("duplicate_rate", 0),
            "output_file": output_file,
            "detection_time": summary.get("detection_time")
        }

        logger.info(f"重复投诉检测完成，检测到 {summary.get('duplicate_groups', 0)} 个重复组")

        return DuplicateDetectionResponse(
            success=True,
            message="重复投诉检测完成",
            data=response_data,
            summary=summary
        )

    except Exception as e:
        logger.error(f"重复投诉检测失败: {e}")
        raise HTTPException(status_code=500, detail=f"重复投诉检测失败: {str(e)}")


@duplicate_analysis_router.get("/similarity-distribution")
async def get_similarity_distribution(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    min_similarity: Optional[float] = Query(None, ge=0.0, le=1.0, description="最小相似度"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取相似度分布统计数据"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date
        filters.min_similarity = min_similarity

        analyzer = DuplicateAnalyzer(db)
        query = analyzer.build_base_query(filters)
        distribution = analyzer.get_similarity_distribution(query, filters)

        return {
            "code": 200,
            "message": "获取相似度分布成功",
            "data": distribution,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取相似度分布失败: {str(e)}")


@duplicate_analysis_router.get("/cluster-analysis")
async def get_cluster_analysis(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    min_cluster_size: Optional[int] = Query(2, ge=2, description="最小簇大小"),
    limit: Optional[int] = Query(20, ge=1, le=100, description="返回簇数量限制"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复簇分析"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date
        filters.cluster_size = min_cluster_size

        analyzer = DuplicateAnalyzer(db)
        query = analyzer.build_base_query(filters)
        cluster_analysis = analyzer.get_cluster_analysis(query, filters, limit)

        return {
            "code": 200,
            "message": "获取簇分析成功",
            "data": cluster_analysis,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取簇分析失败: {str(e)}")


@duplicate_analysis_router.get("/popular-duplicates")
async def get_popular_duplicates(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    min_similarity: Optional[float] = Query(0.8, ge=0.0, le=1.0, description="最小相似度"),
    limit: Optional[int] = Query(10, ge=1, le=50, description="返回记录数量限制"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取热门重复内容"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date
        filters.min_similarity = min_similarity

        analyzer = DuplicateAnalyzer(db)
        query = analyzer.build_base_query(filters)
        popular_duplicates = analyzer.get_popular_duplicates(query, filters, limit)

        return {
            "code": 200,
            "message": "获取热门重复内容成功",
            "data": popular_duplicates,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取热门重复内容失败: {str(e)}")


@duplicate_analysis_router.get("/duplicate-trends")
async def get_duplicate_trends(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    min_similarity: Optional[float] = Query(0.8, ge=0.0, le=1.0, description="最小相似度"),
    granularity: Optional[str] = Query("day", description="时间粒度: day, week, month"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复趋势分析"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date
        filters.min_similarity = min_similarity

        analyzer = DuplicateAnalyzer(db)
        query = analyzer.build_base_query(filters)
        trends = analyzer.get_duplicate_trends(query, filters, granularity)

        return {
            "code": 200,
            "message": "获取重复趋势成功",
            "data": trends,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取重复趋势失败: {str(e)}")


@duplicate_analysis_router.get("/duplicate-details")
async def get_duplicate_details(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    min_similarity: Optional[float] = Query(0.8, ge=0.0, le=1.0, description="最小相似度"),
    limit: Optional[int] = Query(20, ge=1, le=100, description="返回记录数量限制"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复详情"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date
        filters.min_similarity = min_similarity

        analyzer = DuplicateAnalyzer(db)
        query = analyzer.build_base_query(filters)
        duplicate_details = analyzer.get_duplicate_details(query, filters, limit)

        return {
            "code": 200,
            "message": "获取重复详情成功",
            "data": duplicate_details,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取重复详情失败: {str(e)}")


@duplicate_analysis_router.post("/similarity-analysis")
async def analyze_similarity_threshold(
    request: SimilarityAnalysisRequest,
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """分析特定相似度阈值"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = request.task_id
        filters.start_date = request.start_date
        filters.end_date = request.end_date
        filters.min_similarity = request.similarity_threshold

        analyzer = DuplicateAnalyzer(db)
        query = analyzer.build_base_query(filters)
        analysis = analyzer.analyze_similarity_threshold(query, filters, request.similarity_threshold)

        return {
            "code": 200,
            "message": "相似度阈值分析完成",
            "data": analysis,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"相似度阈值分析失败: {str(e)}")


@duplicate_analysis_router.post("/cluster-analysis")
async def analyze_clusters(
    request: ClusterAnalysisRequest,
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """分析重复簇"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = request.task_id
        filters.start_date = request.start_date
        filters.end_date = request.end_date
        filters.cluster_size = request.min_cluster_size

        analyzer = DuplicateAnalyzer(db)
        query = analyzer.build_base_query(filters)
        cluster_analysis = analyzer.get_cluster_analysis(query, filters)

        return {
            "code": 200,
            "message": "簇分析完成",
            "data": cluster_analysis,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"簇分析失败: {str(e)}")


@duplicate_analysis_router.get("/time-series")
async def get_duplicate_time_series(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    min_similarity: Optional[float] = Query(0.8, ge=0.0, le=1.0, description="最小相似度"),
    granularity: Optional[str] = Query("day", description="时间粒度: day, week, month"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复时间序列数据"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date
        filters.min_similarity = min_similarity

        analyzer = DuplicateAnalyzer(db)
        time_series = analyzer.get_time_series_data(filters, granularity)

        return {
            "code": 200,
            "message": "获取时间序列成功",
            "data": time_series,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取时间序列失败: {str(e)}")


@duplicate_analysis_router.get("/summary")
async def get_duplicate_summary(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复分析摘要"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date

        analyzer = DuplicateAnalyzer(db)
        basic_stats = analyzer.get_basic_statistics(filters)

        # 获取简要的相似度分布
        query = analyzer.build_base_query(filters)
        distribution = analyzer.get_similarity_distribution(query, filters)

        summary = {
            "basic_statistics": basic_stats,
            "similarity_overview": {
                "average_similarity": distribution.get("average_similarity", 0),
                "max_similarity": distribution.get("max_similarity", 0),
                "min_similarity": distribution.get("min_similarity", 0),
                "high_similarity_count": distribution.get("high_similarity_count", 0)
            },
            "duplicate_rate": distribution.get("duplicate_rate", 0),
            "potential_duplicates": distribution.get("potential_duplicates", 0)
        }

        return {
            "code": 200,
            "message": "获取重复分析摘要成功",
            "data": summary,
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取重复分析摘要失败: {str(e)}")


@duplicate_analysis_router.get("/export")
async def export_duplicate_data(
    task_id: Optional[int] = Query(None, description="分析任务ID"),
    start_date: Optional[datetime] = Query(None, description="开始时间"),
    end_date: Optional[datetime] = Query(None, description="结束时间"),
    min_similarity: Optional[float] = Query(0.8, ge=0.0, le=1.0, description="最小相似度"),
    format: str = Query("json", description="导出格式: json, csv"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """导出重复分析数据"""
    try:
        filters = DuplicateAnalysisFilters()
        filters.task_id = task_id
        filters.start_date = start_date
        filters.end_date = end_date
        filters.min_similarity = min_similarity

        analyzer = DuplicateAnalyzer(db)
        result = analyzer.analyze_data(filters)

        if not result.success:
            raise HTTPException(status_code=400, detail=result.message)

        if format == "json":
            return {
                "code": 200,
                "message": "数据导出成功",
                "data": result.data,
                "format": "json",
                "timestamp": datetime.now().isoformat()
            }
        elif format == "csv":
            # 这里可以实现CSV导出逻辑
            raise HTTPException(status_code=400, detail="CSV导出功能待实现")
        else:
            raise HTTPException(status_code=400, detail="不支持的导出格式")

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"导出数据失败: {str(e)}")