"""
分析结果管理API接口
"""
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.orm import Session
from sqlalchemy import func
from sqlalchemy import desc, and_, or_
from pydantic import BaseModel, Field
from typing import Dict, Any, List, Optional
from datetime import datetime, date, timedelta

from core.database import get_db
from core.logging_config import get_logger, log_api_request
from core.auth import get_current_active_user, require_permission
from models.petition_record import (
    RegionalAnalysisResult, 
    ComplaintTypeAnalysisResult, 
    SentimentAnalysisResult, 
    ComplianceAnalysisResult, 
    DuplicateAnalysisResult
)
from models.analysis_task import AnalysisTask
from models.user import User

# 路由器
analysis_results_router = APIRouter(tags=["分析结果管理"])

# 日志记录器
logger = get_logger("analysis_results_api")


class AnalysisResultFilter(BaseModel):
    """分析结果过滤器"""
    task_id: Optional[int] = Field(None, description="任务ID")
    petition_record_id: Optional[str] = Field(None, description="信访记录ID")
    start_date: Optional[date] = Field(None, description="开始日期")
    end_date: Optional[date] = Field(None, description="结束日期")
    region: Optional[str] = Field(None, description="区域")
    complaint_type: Optional[str] = Field(None, description="投诉类型")
    sentiment_type: Optional[str] = Field(None, description="情感类型")
    compliance_status: Optional[str] = Field(None, description="合规状态")


# 区域分析结果API
@analysis_results_router.get("/regional/")
@log_api_request("list_regional_analysis_results")
@require_permission("analysis_result:view")
async def list_regional_analysis_results(
    task_id: Optional[int] = Query(None, description="任务ID"),
    petition_record_id: Optional[str] = Query(None, description="信访记录ID"),
    region: Optional[str] = Query(None, description="区域"),
    has_location: Optional[bool] = Query(None, description="是否有位置信息"),
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取区域分析结果列表"""
    logger.info("获取区域分析结果列表请求")
    
    try:
        # 构建查询
        query = db.query(RegionalAnalysisResult).filter(RegionalAnalysisResult.is_deleted == False)
        
        # 应用过滤器
        if task_id:
            query = query.filter(RegionalAnalysisResult.task_id == task_id)
        if petition_record_id:
            query = query.filter(RegionalAnalysisResult.petition_record_id == petition_record_id)
        if region:
            query = query.filter(
                or_(
                    RegionalAnalysisResult.dzxx_xzq.contains(region),
                    RegionalAnalysisResult.dzxx_jd.contains(region)
                )
            )
        if has_location is not None:
            if has_location:
                query = query.filter(
                    and_(
                        RegionalAnalysisResult.dzxx_lng.isnot(None),
                        RegionalAnalysisResult.dzxx_lat.isnot(None),
                        RegionalAnalysisResult.dzxx_lng != '',
                        RegionalAnalysisResult.dzxx_lat != ''
                    )
                )
            else:
                query = query.filter(
                    or_(
                        RegionalAnalysisResult.dzxx_lng.is_(None),
                        RegionalAnalysisResult.dzxx_lat.is_(None),
                        RegionalAnalysisResult.dzxx_lng == '',
                        RegionalAnalysisResult.dzxx_lat == ''
                    )
                )
        
        # 计算总数
        total = query.count()
        
        # 分页查询
        results = query.order_by(desc(RegionalAnalysisResult.created_at)).offset(
            (page - 1) * page_size
        ).limit(page_size).all()
        
        # 转换为响应格式
        result_list = [result.to_dict() for result in results]
        
        # 统一返回标准响应格式
        return {
            "code": 200,
            "message": "success",
            "data": {
                "total": total,
                "page": page,
                "page_size": page_size,
                "items": result_list,
                "total_pages": (total + page_size - 1) // page_size
            },
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        logger.error(f"获取区域分析结果列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/regional/{result_id}/")
@log_api_request("get_regional_analysis_result_detail")
@require_permission("analysis_result:view")
async def get_regional_analysis_result_detail(
    result_id: int,
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取区域分析结果详情"""
    logger.info(f"获取区域分析结果详情请求 - ID: {result_id}")

    try:
        # 查询记录
        result = db.query(RegionalAnalysisResult).filter(
            RegionalAnalysisResult.id == result_id,
            RegionalAnalysisResult.is_deleted == False
        ).first()

        if not result:
            raise HTTPException(status_code=404, detail="区域分析结果不存在")

        # 转换为字典格式
        detail_data = result.to_dict()

        # 添加关联数据
        if result.task_id:
            task = db.query(AnalysisTask).filter(
                AnalysisTask.id == result.task_id,
                AnalysisTask.is_deleted == False
            ).first()
            if task:
                detail_data["task_info"] = {
                    "id": task.id,
                    "name": task.name,
                    "status": task.status,
                    "created_at": task.created_at.isoformat() if task.created_at else None
                }

        # 添加位置信息
        try:
            lng, lat = result.get_location_coordinates()
            detail_data["coordinates"] = {
                "lng": lng,
                "lat": lat
            } if lng and lat else None
        except ValueError:
            detail_data["coordinates"] = None

        return {
            "code": 200,
            "message": "success",
            "data": detail_data,
            "timestamp": datetime.now().isoformat()
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取区域分析结果详情失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/regional/map-data")
@log_api_request("get_regional_map_data")
@require_permission("analysis_result:view")
async def get_regional_map_data(
    task_id: Optional[int] = Query(None, description="任务ID"),
    region: Optional[str] = Query(None, description="区域"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取区域分析地图数据"""
    logger.info("获取区域分析地图数据请求")
    
    try:
        # 构建查询
        query = db.query(RegionalAnalysisResult).filter(
            RegionalAnalysisResult.is_deleted == False,
            RegionalAnalysisResult.dzxx_lng.isnot(None),
            RegionalAnalysisResult.dzxx_lat.isnot(None),
            RegionalAnalysisResult.dzxx_lng != '',
            RegionalAnalysisResult.dzxx_lat != ''
        )
        
        if task_id:
            query = query.filter(RegionalAnalysisResult.task_id == task_id)
        if region:
            query = query.filter(
                or_(
                    RegionalAnalysisResult.dzxx_xzq.contains(region),
                    RegionalAnalysisResult.dzxx_jd.contains(region)
                )
            )
        
        results = query.all()
        
        # 构建地图数据
        map_data = []
        for result in results:
            try:
                lng, lat = result.get_location_coordinates()
                if lng and lat:
                    map_data.append({
                        "id": result.id,
                        "petition_record_id": result.petition_record_id,
                        "name": result.tsr or "未知",
                        "address": result.get_full_address(),
                        "coordinates": [lng, lat],
                        "region": result.dzxx_xzq,
                        "district": result.dzxx_jd,
                        "confidence": result.get_confidence_score(),
                        "description": result.xfnr[:200] + "..." if len(result.xfnr) > 200 else result.xfnr
                    })
            except ValueError:
                continue
        
        return {
            "total_points": len(map_data),
            "map_data": map_data
        }
        
    except Exception as e:
        logger.error(f"获取区域分析地图数据失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 投诉类型分析结果API
@analysis_results_router.get("/complaint-types/")
@log_api_request("list_complaint_type_analysis_results")
@require_permission("analysis_result:view")
async def list_complaint_type_analysis_results(
    task_id: Optional[int] = Query(None, description="任务ID"),
    petition_record_id: Optional[str] = Query(None, description="信访记录ID"),
    level1_type: Optional[str] = Query(None, description="一级类型"),
    level2_type: Optional[str] = Query(None, description="二级类型"),
    min_confidence: Optional[float] = Query(None, ge=0, le=1, description="最小置信度"),
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取投诉类型分析结果列表"""
    logger.info("获取投诉类型分析结果列表请求")
    
    try:
        # 构建查询
        query = db.query(ComplaintTypeAnalysisResult).filter(ComplaintTypeAnalysisResult.is_deleted == False)
        
        # 应用过滤器
        if task_id:
            query = query.filter(ComplaintTypeAnalysisResult.task_id == task_id)
        if petition_record_id:
            query = query.filter(ComplaintTypeAnalysisResult.petition_record_id == petition_record_id)
        if level1_type:
            query = query.filter(ComplaintTypeAnalysisResult.tslx_yj == level1_type)
        if level2_type:
            query = query.filter(ComplaintTypeAnalysisResult.tslx_rj == level2_type)
        
        # 计算总数
        total = query.count()
        
        # 分页查询
        results = query.order_by(desc(ComplaintTypeAnalysisResult.created_at)).offset(
            (page - 1) * page_size
        ).limit(page_size).all()
        
        # 应用置信度过滤
        if min_confidence is not None:
            filtered_results = []
            for result in results:
                if result.get_level2_confidence() >= min_confidence or result.get_level3_confidence() >= min_confidence:
                    filtered_results.append(result)
            results = filtered_results
        
        # 转换为响应格式
        result_list = [result.to_dict() for result in results]
        
        # 统一返回标准响应格式
        return {
            "code": 200,
            "message": "success",
            "data": {
                "total": total,
                "page": page,
                "page_size": page_size,
                "items": result_list,
                "total_pages": (total + page_size - 1) // page_size
            },
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        logger.error(f"获取投诉类型分析结果列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/complaint-types/{result_id}/")
@log_api_request("get_complaint_type_analysis_result_detail")
@require_permission("analysis_result:view")
async def get_complaint_type_analysis_result_detail(
    result_id: int,
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取投诉类型分析结果详情"""
    logger.info(f"获取投诉类型分析结果详情请求 - ID: {result_id}")

    try:
        # 查询记录
        result = db.query(ComplaintTypeAnalysisResult).filter(
            ComplaintTypeAnalysisResult.id == result_id,
            ComplaintTypeAnalysisResult.is_deleted == False
        ).first()

        if not result:
            raise HTTPException(status_code=404, detail="投诉类型分析结果不存在")

        # 转换为字典格式
        detail_data = result.to_dict()

        # 添加关联数据
        if result.task_id:
            task = db.query(AnalysisTask).filter(
                AnalysisTask.id == result.task_id,
                AnalysisTask.is_deleted == False
            ).first()
            if task:
                detail_data["task_info"] = {
                    "id": task.id,
                    "name": task.name,
                    "status": task.status,
                    "created_at": task.created_at.isoformat() if task.created_at else None
                }

        # 添加置信度信息
        detail_data["confidence_scores"] = {
            "level2_confidence": result.get_level2_confidence(),
            "level3_confidence": result.get_level3_confidence(),
            "max_confidence": max(result.get_level2_confidence(), result.get_level3_confidence())
        }

        return {
            "code": 200,
            "message": "success",
            "data": detail_data,
            "timestamp": datetime.now().isoformat()
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取投诉类型分析结果详情失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/complaint-types/statistics")
@log_api_request("get_complaint_type_statistics")
@require_permission("analysis_result:view")
async def get_complaint_type_statistics(
    task_id: Optional[int] = Query(None, description="任务ID"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取投诉类型统计"""
    logger.info("获取投诉类型统计请求")
    
    try:
        # 构建查询
        query = db.query(ComplaintTypeAnalysisResult).filter(ComplaintTypeAnalysisResult.is_deleted == False)
        
        if task_id:
            query = query.filter(ComplaintTypeAnalysisResult.task_id == task_id)
        
        # 按一级类型统计
        level1_stats = db.query(
            ComplaintTypeAnalysisResult.tslx_yj,
            func.count(ComplaintTypeAnalysisResult.id).label('count')
        ).filter(
            ComplaintTypeAnalysisResult.is_deleted == False,
            ComplaintTypeAnalysisResult.tslx_yj.isnot(None)
        ).filter(
            ComplaintTypeAnalysisResult.task_id == task_id if task_id else True
        ).group_by(ComplaintTypeAnalysisResult.tslx_yj).all()
        
        # 按二级类型统计
        level2_stats = db.query(
            ComplaintTypeAnalysisResult.tslx_yj,
            ComplaintTypeAnalysisResult.tslx_rj,
            func.count(ComplaintTypeAnalysisResult.id).label('count'),
            func.avg(
                func.cast(
                    func.replace(ComplaintTypeAnalysisResult.tslx_rj_zxd, '%', ''), 
                    Float
                ) / 100
            ).label('avg_confidence')
        ).filter(
            ComplaintTypeAnalysisResult.is_deleted == False,
            ComplaintTypeAnalysisResult.tslx_rj.isnot(None)
        ).filter(
            ComplaintTypeAnalysisResult.task_id == task_id if task_id else True
        ).group_by(
            ComplaintTypeAnalysisResult.tslx_yj,
            ComplaintTypeAnalysisResult.tslx_rj
        ).all()
        
        return {
            "level1_distribution": [
                {"type": stat.tslx_yj or "未分类", "count": stat.count}
                for stat in level1_stats
            ],
            "level2_distribution": [
                {
                    "level1": stat.tslx_yj or "未分类",
                    "level2": stat.tslx_rj or "未分类",
                    "count": stat.count,
                    "avg_confidence": round(stat.avg_confidence, 3) if stat.avg_confidence else 0
                }
                for stat in level2_stats
            ]
        }
        
    except Exception as e:
        logger.error(f"获取投诉类型统计失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 情感分析结果API
@analysis_results_router.get("/sentiment/")
@log_api_request("list_sentiment_analysis_results")
@require_permission("analysis_result:view")
async def list_sentiment_analysis_results(
    task_id: Optional[int] = Query(None, description="任务ID"),
    petition_record_id: Optional[str] = Query(None, description="信访记录ID"),
    sentiment_type: Optional[str] = Query(None, description="情感类型"),
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取情感分析结果列表"""
    logger.info("获取情感分析结果列表请求")
    
    try:
        # 构建查询
        query = db.query(SentimentAnalysisResult).filter(SentimentAnalysisResult.is_deleted == False)
        
        # 应用过滤器
        if task_id:
            query = query.filter(SentimentAnalysisResult.task_id == task_id)
        if petition_record_id:
            query = query.filter(SentimentAnalysisResult.petition_record_id == petition_record_id)
        if sentiment_type:
            query = query.filter(SentimentAnalysisResult.qgfx_lx == sentiment_type)
        
        # 计算总数
        total = query.count()
        
        # 分页查询
        results = query.order_by(desc(SentimentAnalysisResult.created_at)).offset(
            (page - 1) * page_size
        ).limit(page_size).all()
        
        # 转换为响应格式
        result_list = [result.to_dict() for result in results]
        
        # 统一返回标准响应格式
        return {
            "code": 200,
            "message": "success",
            "data": {
                "total": total,
                "page": page,
                "page_size": page_size,
                "items": result_list,
                "total_pages": (total + page_size - 1) // page_size
            },
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        logger.error(f"获取情感分析结果列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/sentiment/{result_id}/")
@log_api_request("get_sentiment_analysis_result_detail")
@require_permission("analysis_result:view")
async def get_sentiment_analysis_result_detail(
    result_id: int,
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取情感分析结果详情"""
    logger.info(f"获取情感分析结果详情请求 - ID: {result_id}")

    try:
        # 查询记录
        result = db.query(SentimentAnalysisResult).filter(
            SentimentAnalysisResult.id == result_id,
            SentimentAnalysisResult.is_deleted == False
        ).first()

        if not result:
            raise HTTPException(status_code=404, detail="情感分析结果不存在")

        # 转换为字典格式
        detail_data = result.to_dict()

        # 添加关联数据
        if result.task_id:
            task = db.query(AnalysisTask).filter(
                AnalysisTask.id == result.task_id,
                AnalysisTask.is_deleted == False
            ).first()
            if task:
                detail_data["task_info"] = {
                    "id": task.id,
                    "name": task.name,
                    "status": task.status,
                    "created_at": task.created_at.isoformat() if task.created_at else None
                }

        # 添加情感分析相关信息
        detail_data["sentiment_analysis"] = {
            "sentiment_type": result.qgfx_lx,
            "sentiment_label": result.get_sentiment_label(),
            "keywords": result.get_keywords_list(),
            "keyword_count": len(result.get_keywords_list()),
            "summary_length": len(result.qgfx_zy or ""),
            "intensity_score": result.get_sentiment_intensity()
        }

        return {
            "code": 200,
            "message": "success",
            "data": detail_data,
            "timestamp": datetime.now().isoformat()
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取情感分析结果详情失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/sentiment/statistics")
@log_api_request("get_sentiment_statistics")
@require_permission("analysis_result:view")
async def get_sentiment_statistics(
    task_id: Optional[int] = Query(None, description="任务ID"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取情感分析统计"""
    logger.info("获取情感分析统计请求")
    
    try:
        # 构建查询
        query = db.query(SentimentAnalysisResult).filter(SentimentAnalysisResult.is_deleted == False)
        
        if task_id:
            query = query.filter(SentimentAnalysisResult.task_id == task_id)
        
        # 按情感类型统计
        sentiment_stats = db.query(
            SentimentAnalysisResult.qgfx_lx,
            func.count(SentimentAnalysisResult.id).label('count')
        ).filter(
            SentimentAnalysisResult.is_deleted == False,
            SentimentAnalysisResult.qgfx_lx.isnot(None)
        ).filter(
            SentimentAnalysisResult.task_id == task_id if task_id else True
        ).group_by(SentimentAnalysisResult.qgfx_lx).all()
        
        # 关键词统计
        all_keywords = []
        results = query.all()
        for result in results:
            keywords = result.get_keywords_list()
            all_keywords.extend(keywords)
        
        # 统计关键词频率
        keyword_freq = {}
        for keyword in all_keywords:
            keyword_freq[keyword] = keyword_freq.get(keyword, 0) + 1
        
        # 排序并取前20个
        top_keywords = sorted(keyword_freq.items(), key=lambda x: x[1], reverse=True)[:20]
        
        return {
            "sentiment_distribution": [
                {"type": stat.qgfx_lx or "未知", "count": stat.count}
                for stat in sentiment_stats
            ],
            "top_keywords": [
                {"keyword": keyword, "count": count}
                for keyword, count in top_keywords
            ]
        }
        
    except Exception as e:
        logger.error(f"获取情感分析统计失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 规范性分析结果API
@analysis_results_router.get("/compliance/")
@log_api_request("list_compliance_analysis_results")
@require_permission("analysis_result:view")
async def list_compliance_analysis_results(
    task_id: Optional[int] = Query(None, description="任务ID"),
    petition_record_id: Optional[str] = Query(None, description="信访记录ID"),
    is_compliant: Optional[bool] = Query(None, description="是否合规"),
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取规范性分析结果列表"""
    logger.info("获取规范性分析结果列表请求")
    
    try:
        # 构建查询
        query = db.query(ComplianceAnalysisResult).filter(ComplianceAnalysisResult.is_deleted == False)
        
        # 应用过滤器
        if task_id:
            query = query.filter(ComplianceAnalysisResult.task_id == task_id)
        if petition_record_id:
            query = query.filter(ComplianceAnalysisResult.petition_record_id == petition_record_id)
        if is_compliant is not None:
            query = query.filter(ComplianceAnalysisResult.gffx_zt == "1" if is_compliant else "0")
        
        # 计算总数
        total = query.count()
        
        # 分页查询
        results = query.order_by(desc(ComplianceAnalysisResult.created_at)).offset(
            (page - 1) * page_size
        ).limit(page_size).all()
        
        # 转换为响应格式
        result_list = [result.to_dict() for result in results]
        
        # 统一返回标准响应格式
        return {
            "code": 200,
            "message": "success",
            "data": {
                "total": total,
                "page": page,
                "page_size": page_size,
                "items": result_list,
                "total_pages": (total + page_size - 1) // page_size
            },
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        logger.error(f"获取规范性分析结果列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/compliance/{result_id}/")
@log_api_request("get_compliance_analysis_result_detail")
@require_permission("analysis_result:view")
async def get_compliance_analysis_result_detail(
    result_id: int,
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取规范性分析结果详情"""
    logger.info(f"获取规范性分析结果详情请求 - ID: {result_id}")

    try:
        # 查询记录
        result = db.query(ComplianceAnalysisResult).filter(
            ComplianceAnalysisResult.id == result_id,
            ComplianceAnalysisResult.is_deleted == False
        ).first()

        if not result:
            raise HTTPException(status_code=404, detail="规范性分析结果不存在")

        # 转换为字典格式
        detail_data = result.to_dict()

        # 添加关联数据
        if result.task_id:
            task = db.query(AnalysisTask).filter(
                AnalysisTask.id == result.task_id,
                AnalysisTask.is_deleted == False
            ).first()
            if task:
                detail_data["task_info"] = {
                    "id": task.id,
                    "name": task.name,
                    "status": task.status,
                    "created_at": task.created_at.isoformat() if task.created_at else None
                }

        # 添加规范性分析相关信息
        detail_data["compliance_analysis"] = {
            "is_compliant": result.is_compliant(),
            "compliance_status": result.get_compliance_status(),
            "confidence_score": result.get_confidence_score(),
            "has_reasoning": bool(result.gffx_pdly),
            "reasoning_length": len(result.gffx_pdly or ""),
            "response_content_length": len(result.gffx_dfnr or "")
        }

        return {
            "code": 200,
            "message": "success",
            "data": detail_data,
            "timestamp": datetime.now().isoformat()
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取规范性分析结果详情失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/compliance/statistics")
@log_api_request("get_compliance_statistics")
@require_permission("analysis_result:view")
async def get_compliance_statistics(
    task_id: Optional[int] = Query(None, description="任务ID"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取规范性分析统计"""
    logger.info("获取规范性分析统计请求")
    
    try:
        # 构建查询
        query = db.query(ComplianceAnalysisResult).filter(ComplianceAnalysisResult.is_deleted == False)
        
        if task_id:
            query = query.filter(ComplianceAnalysisResult.task_id == task_id)
        
        # 合规性统计
        compliant_count = query.filter(ComplianceAnalysisResult.gffx_zt == "1").count()
        non_compliant_count = query.filter(ComplianceAnalysisResult.gffx_zt == "0").count()
        unknown_count = query.filter(
            ComplianceAnalysisResult.gffx_zt.is_(None) |
            (ComplianceAnalysisResult.gffx_zt != "1") &
            (ComplianceAnalysisResult.gffx_zt != "0")
        ).count()
        
        total = compliant_count + non_compliant_count + unknown_count
        
        return {
            "total": total,
            "compliant": compliant_count,
            "non_compliant": non_compliant_count,
            "unknown": unknown_count,
            "compliance_rate": round(compliant_count / total * 100, 2) if total > 0 else 0,
            "non_compliance_rate": round(non_compliant_count / total * 100, 2) if total > 0 else 0
        }
        
    except Exception as e:
        logger.error(f"获取规范性分析统计失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 重复分析结果API
@analysis_results_router.get("/duplicates/")
@log_api_request("list_duplicate_analysis_results")
@require_permission("analysis_result:view")
async def list_duplicate_analysis_results(
    task_id: Optional[int] = Query(None, description="任务ID"),
    cfbh: Optional[str] = Query(None, description="重复编号"),
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复分析结果列表"""
    logger.info("获取重复分析结果列表请求")
    
    try:
        # 构建查询
        query = db.query(DuplicateAnalysisResult).filter(DuplicateAnalysisResult.is_deleted == False)
        
        # 应用过滤器
        if task_id:
            query = query.filter(DuplicateAnalysisResult.task_id == task_id)
        if cfbh:
            query = query.filter(DuplicateAnalysisResult.cfbh == cfbh)
        
        # 计算总数
        total = query.count()
        
        # 分页查询
        results = query.order_by(desc(DuplicateAnalysisResult.created_at)).offset(
            (page - 1) * page_size
        ).limit(page_size).all()
        
        # 转换为响应格式
        result_list = [result.to_dict() for result in results]
        
        # 统一返回标准响应格式
        return {
            "code": 200,
            "message": "success",
            "data": {
                "total": total,
                "page": page,
                "page_size": page_size,
                "items": result_list,
                "total_pages": (total + page_size - 1) // page_size
            },
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        logger.error(f"获取重复分析结果列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/duplicates/{result_id}/")
@log_api_request("get_duplicate_analysis_result_detail")
@require_permission("analysis_result:view")
async def get_duplicate_analysis_result_detail(
    result_id: int,
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复分析结果详情"""
    logger.info(f"获取重复分析结果详情请求 - ID: {result_id}")

    try:
        # 查询记录
        result = db.query(DuplicateAnalysisResult).filter(
            DuplicateAnalysisResult.id == result_id,
            DuplicateAnalysisResult.is_deleted == False
        ).first()

        if not result:
            raise HTTPException(status_code=404, detail="重复分析结果不存在")

        # 转换为字典格式
        detail_data = result.to_dict()

        # 添加关联数据
        if result.task_id:
            task = db.query(AnalysisTask).filter(
                AnalysisTask.id == result.task_id,
                AnalysisTask.is_deleted == False
            ).first()
            if task:
                detail_data["task_info"] = {
                    "id": task.id,
                    "name": task.name,
                    "status": task.status,
                    "created_at": task.created_at.isoformat() if task.created_at else None
                }

        # 添加重复分析相关信息
        detail_data["duplicate_analysis"] = {
            "has_duplicate_id": bool(result.cfbh),
            "duplicate_group": result.cfbh,
            "confidence_score": result.get_confidence_score(),
            "similarity_score": result.get_similarity_score(),
            "content_length": len(result.xfnr or ""),
            "reasoning_length": len(result.cfxqyy or "")
        }

        # 如果有重复组，获取同组的其他记录
        if result.cfbh:
            group_members = db.query(DuplicateAnalysisResult).filter(
                DuplicateAnalysisResult.cfbh == result.cfbh,
                DuplicateAnalysisResult.id != result_id,
                DuplicateAnalysisResult.is_deleted == False
            ).all()

            detail_data["group_members"] = [
                {
                    "id": member.id,
                    "petition_record_id": member.xfxh,
                    "created_at": member.created_at.isoformat() if member.created_at else None
                }
                for member in group_members
            ]
            detail_data["group_size"] = len(group_members) + 1
        else:
            detail_data["group_members"] = []
            detail_data["group_size"] = 1

        return {
            "code": 200,
            "message": "success",
            "data": detail_data,
            "timestamp": datetime.now().isoformat()
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取重复分析结果详情失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/duplicates/statistics")
@log_api_request("get_duplicate_statistics")
@require_permission("analysis_result:view")
async def get_duplicate_statistics(
    task_id: Optional[int] = Query(None, description="任务ID"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取重复分析统计"""
    logger.info("获取重复分析统计请求")
    
    try:
        # 构建查询
        query = db.query(DuplicateAnalysisResult).filter(DuplicateAnalysisResult.is_deleted == False)
        
        if task_id:
            query = query.filter(DuplicateAnalysisResult.task_id == task_id)
        
        # 统计重复组
        duplicate_groups = db.query(
            DuplicateAnalysisResult.cfbh,
            func.count(DuplicateAnalysisResult.id).label('count')
        ).filter(
            DuplicateAnalysisResult.is_deleted == False,
            DuplicateAnalysisResult.cfbh.isnot(None)
        ).filter(
            DuplicateAnalysisResult.task_id == task_id if task_id else True
        ).group_by(DuplicateAnalysisResult.cfbh).all()
        
        # 统计信息
        total_duplicates = query.count()
        unique_groups = len(duplicate_groups)
        max_group_size = max([group.count for group in duplicate_groups]) if duplicate_groups else 0
        avg_group_size = sum([group.count for group in duplicate_groups]) / unique_groups if unique_groups > 0 else 0
        
        return {
            "total_duplicates": total_duplicates,
            "unique_groups": unique_groups,
            "max_group_size": max_group_size,
            "avg_group_size": round(avg_group_size, 2),
            "duplicate_groups": [
                {"group_id": group.cfbh, "count": group.count}
                for group in duplicate_groups
            ]
        }
        
    except Exception as e:
        logger.error(f"获取重复分析统计失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@analysis_results_router.get("/summary")
@log_api_request("get_analysis_results_summary")
@require_permission("analysis_result:view")
async def get_analysis_results_summary(
    task_id: Optional[int] = Query(None, description="任务ID"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取分析结果总览"""
    logger.info("获取分析结果总览请求")
    
    try:
        # 构建查询条件
        task_filter = {"task_id": task_id} if task_id else {}
        
        # 各类分析结果统计
        regional_count = db.query(RegionalAnalysisResult).filter(
            RegionalAnalysisResult.is_deleted == False,
            **task_filter
        ).count()
        
        complaint_type_count = db.query(ComplaintTypeAnalysisResult).filter(
            ComplaintTypeAnalysisResult.is_deleted == False,
            **task_filter
        ).count()
        
        sentiment_count = db.query(SentimentAnalysisResult).filter(
            SentimentAnalysisResult.is_deleted == False,
            **task_filter
        ).count()
        
        compliance_count = db.query(ComplianceAnalysisResult).filter(
            ComplianceAnalysisResult.is_deleted == False,
            **task_filter
        ).count()
        
        duplicate_count = db.query(DuplicateAnalysisResult).filter(
            DuplicateAnalysisResult.is_deleted == False,
            **task_filter
        ).count()
        
        return {
            "regional_analysis": {
                "total": regional_count,
                "with_location": db.query(RegionalAnalysisResult).filter(
                    RegionalAnalysisResult.is_deleted == False,
                    RegionalAnalysisResult.dzxx_lng.isnot(None),
                    RegionalAnalysisResult.dzxx_lat.isnot(None),
                    **task_filter
                ).count()
            },
            "complaint_type_analysis": {
                "total": complaint_type_count
            },
            "sentiment_analysis": {
                "total": sentiment_count
            },
            "compliance_analysis": {
                "total": compliance_count,
                "compliant": db.query(ComplianceAnalysisResult).filter(
                    ComplianceAnalysisResult.is_deleted == False,
                    ComplianceAnalysisResult.gffx_zt == "1",
                    **task_filter
                ).count()
            },
            "duplicate_analysis": {
                "total": duplicate_count,
                "unique_groups": db.query(
                    func.count(func.distinct(DuplicateAnalysisResult.cfbh))
                ).filter(
                    DuplicateAnalysisResult.is_deleted == False,
                    DuplicateAnalysisResult.cfbh.isnot(None),
                    **task_filter
                ).scalar()
            }
        }
        
    except Exception as e:
        logger.error(f"获取分析结果总览失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 新的统一查询接口
@analysis_results_router.get("/unified/")
@log_api_request("get_unified_analysis_results")
@require_permission("analysis_result:view")
async def get_unified_analysis_results(
    analysis_type: str = Query(..., description="分析类型: regional, complaint-type, sentiment, compliance, duplicate"),
    task_id: Optional[int] = Query(None, description="任务ID"),
    region: Optional[str] = Query(None, description="区域"),
    district: Optional[str] = Query(None, description="区县"),
    street: Optional[str] = Query(None, description="街道"),
    type_filter: Optional[str] = Query(None, description="类型过滤器"),
    start_date: Optional[date] = Query(None, description="开始日期"),
    end_date: Optional[date] = Query(None, description="结束日期"),
    search: Optional[str] = Query(None, description="搜索关键词"),
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """统一分析结果查询接口"""
    logger.info(f"获取统一分析结果请求 - 类型: {analysis_type}")
    
    try:
        result = {
            "total": 0,
            "page": page,
            "page_size": page_size,
            "items": [],
            "total_pages": 0,
            "statistics": {}
        }
        
        if analysis_type == "regional":
            result = await _get_regional_results_unified(
                db, task_id, region, district, street, search, start_date, end_date, page, page_size
            )
        elif analysis_type == "complaint-type":
            result = await _get_complaint_type_results_unified(
                db, task_id, type_filter, search, start_date, end_date, page, page_size
            )
        elif analysis_type == "sentiment":
            result = await _get_sentiment_results_unified(
                db, task_id, type_filter, search, start_date, end_date, page, page_size
            )
        elif analysis_type == "compliance":
            result = await _get_compliance_results_unified(
                db, task_id, type_filter, search, start_date, end_date, page, page_size
            )
        elif analysis_type == "duplicate":
            result = await _get_duplicate_results_unified(
                db, task_id, search, start_date, end_date, page, page_size
            )
        else:
            raise HTTPException(status_code=400, detail=f"不支持的分析类型: {analysis_type}")
        
        # 统一返回标准响应格式
        return {
            "code": 200,
            "message": "success",
            "data": result,
            "timestamp": datetime.now().isoformat()
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取统一分析结果失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


async def _get_regional_results_unified(
    db: Session, task_id: Optional[int], region: Optional[str], district: Optional[str], 
    street: Optional[str], search: Optional[str], start_date: Optional[date], 
    end_date: Optional[date], page: int, page_size: int
):
    """获取区域分析结果（统一接口）"""
    query = db.query(RegionalAnalysisResult).filter(RegionalAnalysisResult.is_deleted == False)
    
    # 应用过滤器
    if task_id:
        query = query.filter(RegionalAnalysisResult.task_id == task_id)
    if region:
        query = query.filter(
            or_(
                RegionalAnalysisResult.dzxx_xzq.contains(region),
                RegionalAnalysisResult.dzxx_jd.contains(region)
            )
        )
    if district:
        query = query.filter(RegionalAnalysisResult.dzxx_xzq == district)
    if street:
        query = query.filter(RegionalAnalysisResult.dzxx_jd == street)
    if search:
        query = query.filter(
            or_(
                RegionalAnalysisResult.xfnr.contains(search),
                RegionalAnalysisResult.tsr.contains(search)
            )
        )
    if start_date:
        query = query.filter(RegionalAnalysisResult.created_at >= start_date)
    if end_date:
        query = query.filter(RegionalAnalysisResult.created_at <= end_date)
    
    # 计算总数
    total = query.count()
    
    # 分页查询
    results = query.order_by(desc(RegionalAnalysisResult.created_at)).offset(
        (page - 1) * page_size
    ).limit(page_size).all()
    
    # 转换为响应格式
    items = []
    for result in results:
        item = result.to_dict()
        items.append(item)
    
    # 统计信息
    with_location = query.filter(
        and_(
            RegionalAnalysisResult.dzxx_lng.isnot(None),
            RegionalAnalysisResult.dzxx_lat.isnot(None),
            RegionalAnalysisResult.dzxx_lng != '',
            RegionalAnalysisResult.dzxx_lat != ''
        )
    ).count()
    
    return {
        "total": total,
        "page": page,
        "page_size": page_size,
        "items": items,
        "total_pages": (total + page_size - 1) // page_size,
        "statistics": {
            "with_location": with_location,
            "location_rate": round(with_location / total * 100, 2) if total > 0 else 0
        }
    }


async def _get_complaint_type_results_unified(
    db: Session, task_id: Optional[int], type_filter: Optional[str], 
    search: Optional[str], start_date: Optional[date], end_date: Optional[date], 
    page: int, page_size: int
):
    """获取投诉类型分析结果（统一接口）"""
    query = db.query(ComplaintTypeAnalysisResult).filter(ComplaintTypeAnalysisResult.is_deleted == False)
    
    # 应用过滤器
    if task_id:
        query = query.filter(ComplaintTypeAnalysisResult.task_id == task_id)
    if type_filter:
        query = query.filter(
            or_(
                ComplaintTypeAnalysisResult.tslx_yj == type_filter,
                ComplaintTypeAnalysisResult.tslx_rj == type_filter,
                ComplaintTypeAnalysisResult.tslx_sj == type_filter
            )
        )
    if search:
        query = query.filter(ComplaintTypeAnalysisResult.xfnr.contains(search))
    if start_date:
        query = query.filter(ComplaintTypeAnalysisResult.created_at >= start_date)
    if end_date:
        query = query.filter(ComplaintTypeAnalysisResult.created_at <= end_date)
    
    # 计算总数
    total = query.count()
    
    # 分页查询
    results = query.order_by(desc(ComplaintTypeAnalysisResult.created_at)).offset(
        (page - 1) * page_size
    ).limit(page_size).all()
    
    # 转换为响应格式
    items = [result.to_dict() for result in results]
    
    return {
        "total": total,
        "page": page,
        "page_size": page_size,
        "items": items,
        "total_pages": (total + page_size - 1) // page_size,
        "statistics": {
            "total": total
        }
    }


async def _get_sentiment_results_unified(
    db: Session, task_id: Optional[int], type_filter: Optional[str], 
    search: Optional[str], start_date: Optional[date], end_date: Optional[date], 
    page: int, page_size: int
):
    """获取情感分析结果（统一接口）"""
    query = db.query(SentimentAnalysisResult).filter(SentimentAnalysisResult.is_deleted == False)
    
    # 应用过滤器
    if task_id:
        query = query.filter(SentimentAnalysisResult.task_id == task_id)
    if type_filter:
        query = query.filter(SentimentAnalysisResult.qgfx_lx == type_filter)
    if search:
        query = query.filter(
            or_(
                SentimentAnalysisResult.xfnr.contains(search),
                SentimentAnalysisResult.qgfx_gjc.contains(search)
            )
        )
    if start_date:
        query = query.filter(SentimentAnalysisResult.created_at >= start_date)
    if end_date:
        query = query.filter(SentimentAnalysisResult.created_at <= end_date)
    
    # 计算总数
    total = query.count()
    
    # 分页查询
    results = query.order_by(desc(SentimentAnalysisResult.created_at)).offset(
        (page - 1) * page_size
    ).limit(page_size).all()
    
    # 转换为响应格式
    items = [result.to_dict() for result in results]
    
    return {
        "total": total,
        "page": page,
        "page_size": page_size,
        "items": items,
        "total_pages": (total + page_size - 1) // page_size,
        "statistics": {
            "total": total
        }
    }


async def _get_compliance_results_unified(
    db: Session, task_id: Optional[int], type_filter: Optional[str], 
    search: Optional[str], start_date: Optional[date], end_date: Optional[date], 
    page: int, page_size: int
):
    """获取规范性分析结果（统一接口）"""
    query = db.query(ComplianceAnalysisResult).filter(ComplianceAnalysisResult.is_deleted == False)
    
    # 应用过滤器
    if task_id:
        query = query.filter(ComplianceAnalysisResult.task_id == task_id)
    if type_filter:
        query = query.filter(ComplianceAnalysisResult.gffx_zt == ("1" if type_filter == "compliant" else "0"))
    if search:
        query = query.filter(
            or_(
                ComplianceAnalysisResult.xfnr.contains(search),
                ComplianceAnalysisResult.gffx_dfnr.contains(search)
            )
        )
    if start_date:
        query = query.filter(ComplianceAnalysisResult.created_at >= start_date)
    if end_date:
        query = query.filter(ComplianceAnalysisResult.created_at <= end_date)
    
    # 计算总数
    total = query.count()
    
    # 分页查询
    results = query.order_by(desc(ComplianceAnalysisResult.created_at)).offset(
        (page - 1) * page_size
    ).limit(page_size).all()
    
    # 转换为响应格式
    items = [result.to_dict() for result in results]
    
    return {
        "total": total,
        "page": page,
        "page_size": page_size,
        "items": items,
        "total_pages": (total + page_size - 1) // page_size,
        "statistics": {
            "total": total
        }
    }


async def _get_duplicate_results_unified(
    db: Session, task_id: Optional[int], search: Optional[str], 
    start_date: Optional[date], end_date: Optional[date], page: int, page_size: int
):
    """获取重复分析结果（统一接口）"""
    query = db.query(DuplicateAnalysisResult).filter(DuplicateAnalysisResult.is_deleted == False)
    
    # 应用过滤器
    if task_id:
        query = query.filter(DuplicateAnalysisResult.task_id == task_id)
    if search:
        query = query.filter(DuplicateAnalysisResult.cfbh.contains(search))
    if start_date:
        query = query.filter(DuplicateAnalysisResult.created_at >= start_date)
    if end_date:
        query = query.filter(DuplicateAnalysisResult.created_at <= end_date)
    
    # 计算总数
    total = query.count()
    
    # 分页查询
    results = query.order_by(desc(DuplicateAnalysisResult.created_at)).offset(
        (page - 1) * page_size
    ).limit(page_size).all()
    
    # 转换为响应格式
    items = [result.to_dict() for result in results]
    
    return {
        "total": total,
        "page": page,
        "page_size": page_size,
        "items": items,
        "total_pages": (total + page_size - 1) // page_size,
        "statistics": {
            "total": total
        }
    }


# 动态选项获取接口
@analysis_results_router.get("/options/")
@log_api_request("get_analysis_results_options")
@require_permission("analysis_result:view")
async def get_analysis_results_options(
    analysis_type: str = Query(..., description="分析类型"),
    option_type: str = Query(..., description="选项类型: tasks, regions, types"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取动态选项"""
    logger.info(f"获取分析结果选项请求 - 类型: {analysis_type}, 选项: {option_type}")
    
    try:
        if option_type == "tasks":
            # 获取相关的分析任务
            from models.analysis_task import AnalysisTask
            tasks = db.query(AnalysisTask).filter(
                AnalysisTask.is_deleted == False
            ).order_by(desc(AnalysisTask.created_at)).all()
            
            return [
                {
                    "value": task.id,
                    "label": task.name or f"任务 {task.id}",
                    "type": task.task_type,
                    "status": task.status
                }
                for task in tasks
            ]
        
        elif option_type == "regions":
            # 根据分析类型获取区域选项
            regions = []
            
            if analysis_type == "regional":
                # 从区域分析结果中获取区域
                district_results = db.query(
                    RegionalAnalysisResult.dzxx_xzq
                ).filter(
                    RegionalAnalysisResult.is_deleted == False,
                    RegionalAnalysisResult.dzxx_xzq.isnot(None)
                ).distinct().all()
                
                street_results = db.query(
                    RegionalAnalysisResult.dzxx_xzq,
                    RegionalAnalysisResult.dzxx_jd
                ).filter(
                    RegionalAnalysisResult.is_deleted == False,
                    RegionalAnalysisResult.dzxx_xzq.isnot(None),
                    RegionalAnalysisResult.dzxx_jd.isnot(None)
                ).distinct().all()
                
                districts = list(set([r[0] for r in district_results if r[0]]))
                streets = {}
                for district, street in street_results:
                    if district and street:
                        if district not in streets:
                            streets[district] = []
                        streets[district].append(street)
                
                regions = {
                    "districts": sorted(districts),
                    "streets": streets
                }
            else:
                # 从其他分析结果中获取区域（需要关联PetitionRecord）
                from models.petition_record import PetitionRecord
                
                if analysis_type == "complaint-type":
                    model = ComplaintTypeAnalysisResult
                elif analysis_type == "sentiment":
                    model = SentimentAnalysisResult
                elif analysis_type == "compliance":
                    model = ComplianceAnalysisResult
                elif analysis_type == "duplicate":
                    model = DuplicateAnalysisResult
                else:
                    return {"districts": [], "streets": {}}
                
                # 通过xfxh关联获取区域信息（对于重复分析结果）
                if analysis_type == "duplicate":
                    # 重复分析结果使用xfxh字段关联
                    results = db.query(
                        PetitionRecord.ts_ds,
                        PetitionRecord.ts_qxs,
                        PetitionRecord.ts_jd
                    ).join(
                        model, model.xfxh == PetitionRecord.xh
                    ).filter(
                        model.is_deleted == False,
                        PetitionRecord.ts_ds.isnot(None)
                    ).distinct().all()
                else:
                    # 其他分析结果使用petition_record_id字段关联
                    results = db.query(
                        PetitionRecord.ts_ds,
                        PetitionRecord.ts_qxs,
                        PetitionRecord.ts_jd
                    ).join(
                        model, model.petition_record_id == PetitionRecord.xh
                    ).filter(
                        model.is_deleted == False,
                        PetitionRecord.ts_ds.isnot(None)
                    ).distinct().all()
                
                districts = list(set([r[0] for r in results if r[0]]))
                streets = {}
                for ds, qxs, jd in results:
                    if ds and qxs:
                        if ds not in streets:
                            streets[ds] = []
                        if qxs not in streets[ds]:
                            streets[ds].append(qxs)
                    if ds and jd:
                        if ds not in streets:
                            streets[ds] = []
                        if jd not in streets[ds]:
                            streets[ds].append(jd)
                
                regions = {
                    "districts": sorted(districts),
                    "streets": streets
                }
            
            return regions
        
        elif option_type == "types":
            # 获取类型选项
            types = []
            
            if analysis_type == "complaint-type":
                # 获取投诉类型
                level1_results = db.query(
                    ComplaintTypeAnalysisResult.tslx_yj
                ).filter(
                    ComplaintTypeAnalysisResult.is_deleted == False,
                    ComplaintTypeAnalysisResult.tslx_yj.isnot(None)
                ).distinct().all()
                
                level2_results = db.query(
                    ComplaintTypeAnalysisResult.tslx_yj,
                    ComplaintTypeAnalysisResult.tslx_rj
                ).filter(
                    ComplaintTypeAnalysisResult.is_deleted == False,
                    ComplaintTypeAnalysisResult.tslx_rj.isnot(None)
                ).distinct().all()
                
                level1_types = list(set([r[0] for r in level1_results if r[0]]))
                level2_types = {}
                for level1, level2 in level2_results:
                    if level1 and level2:
                        if level1 not in level2_types:
                            level2_types[level1] = []
                        level2_types[level1].append(level2)
                
                types = {
                    "level1": sorted(level1_types),
                    "level2": level2_types
                }
            
            elif analysis_type == "sentiment":
                # 获取情感类型
                sentiment_results = db.query(
                    SentimentAnalysisResult.qgfx_lx
                ).filter(
                    SentimentAnalysisResult.is_deleted == False,
                    SentimentAnalysisResult.qgfx_lx.isnot(None)
                ).distinct().all()
                
                types = [r[0] for r in sentiment_results if r[0]]
            
            elif analysis_type == "compliance":
                # 合规状态
                types = [
                    {"value": "compliant", "label": "合规"},
                    {"value": "non_compliant", "label": "不合规"}
                ]
            
            return types
        
        else:
            raise HTTPException(status_code=400, detail=f"不支持的选项类型: {option_type}")
        
    except Exception as e:
        logger.error(f"获取分析结果选项失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 统计数据接口
@analysis_results_router.get("/dashboard-stats/")
@log_api_request("get_analysis_results_dashboard_stats")
@require_permission("analysis_result:view")
async def get_analysis_results_dashboard_stats(
    analysis_type: str = Query(..., description="分析类型"),
    task_id: Optional[int] = Query(None, description="任务ID"),
    current_user: User = Depends(get_current_active_user),
    db: Session = Depends(get_db)
):
    """获取仪表板统计数据"""
    logger.info(f"获取分析结果仪表板统计请求 - 类型: {analysis_type}")
    
    try:
        today = date.today()
        yesterday = today - timedelta(days=1)
        
        stats = {
            "total": 0,
            "today": 0,
            "avg_confidence": 0,
            "coverage_rate": 0
        }
        
        if analysis_type == "regional":
            # 区域分析统计 - 专业化指标
            total_query = db.query(RegionalAnalysisResult).filter(
                RegionalAnalysisResult.is_deleted == False
            )
            today_query = db.query(RegionalAnalysisResult).filter(
                RegionalAnalysisResult.is_deleted == False,
                func.date(RegionalAnalysisResult.created_at) == today
            )
            
            if task_id:
                total_query = total_query.filter(RegionalAnalysisResult.task_id == task_id)
                today_query = today_query.filter(RegionalAnalysisResult.task_id == task_id)
            
            total_count = total_query.count()
            today_count = today_query.count()
            
            # 基础统计数据
            stats["total"] = total_count
            stats["today"] = today_count
            
            # 计算平均置信度
            confidence_results = total_query.all()
            if confidence_results:
                confidence_sum = sum([r.get_confidence_score() for r in confidence_results])
                stats["avg_confidence"] = round(confidence_sum / len(confidence_results) * 100, 2)
            
            # 定位成功率（有位置信息的比例）
            with_location = total_query.filter(
                and_(
                    RegionalAnalysisResult.dzxx_lng.isnot(None),
                    RegionalAnalysisResult.dzxx_lat.isnot(None),
                    RegionalAnalysisResult.dzxx_lng != '',
                    RegionalAnalysisResult.dzxx_lat != ''
                )
            ).count()
            location_rate = round(with_location / total_count * 100, 2) if total_count > 0 else 0
            stats["location_rate"] = location_rate
            
            # 区域覆盖率（不同行政区的覆盖比例）
            if total_count > 0:
                distinct_regions = total_query.filter(
                    RegionalAnalysisResult.dzxx_xzq.isnot(None),
                    RegionalAnalysisResult.dzxx_xzq != ''
                ).distinct(RegionalAnalysisResult.dzxx_xzq).count()
                region_coverage = round(distinct_regions / total_count * 100, 2)
                stats["region_coverage"] = region_coverage
            else:
                stats["region_coverage"] = 0
            
            # 保持兼容性的字段
            stats["coverage_rate"] = location_rate
        
        elif analysis_type == "complaint-type":
            # 投诉类型分析统计 - 专业化指标
            total_query = db.query(ComplaintTypeAnalysisResult).filter(
                ComplaintTypeAnalysisResult.is_deleted == False
            )
            today_query = db.query(ComplaintTypeAnalysisResult).filter(
                ComplaintTypeAnalysisResult.is_deleted == False,
                func.date(ComplaintTypeAnalysisResult.created_at) == today
            )
            
            if task_id:
                total_query = total_query.filter(ComplaintTypeAnalysisResult.task_id == task_id)
                today_query = today_query.filter(ComplaintTypeAnalysisResult.task_id == task_id)
            
            total_count = total_query.count()
            today_count = today_query.count()
            results = total_query.all()
            
            # 基础统计数据
            stats["total"] = total_count
            stats["today"] = today_count
            
            # 计算平均置信度
            if results:
                confidence_sum = sum([max(r.get_level2_confidence(), r.get_level3_confidence()) for r in results])
                stats["avg_confidence"] = round(confidence_sum / len(results) * 100, 2)
            
            # 类型覆盖率（成功分类的比例）
            if total_count > 0:
                with_type = total_query.filter(
                    ComplaintTypeAnalysisResult.tslx_yj.isnot(None),
                    ComplaintTypeAnalysisResult.tslx_yj != ''
                ).count()
                type_coverage = round(with_type / total_count * 100, 2)
                stats["type_coverage"] = type_coverage
            else:
                stats["type_coverage"] = 0
            
            # 主要类型占比（最主要类型的占比）
            if total_count > 0:
                type_counts = db.query(
                    ComplaintTypeAnalysisResult.tslx_yj,
                    func.count(ComplaintTypeAnalysisResult.id).label('count')
                ).filter(
                    ComplaintTypeAnalysisResult.is_deleted == False,
                    ComplaintTypeAnalysisResult.tslx_yj.isnot(None),
                    ComplaintTypeAnalysisResult.tslx_yj != ''
                )
                if task_id:
                    type_counts = type_counts.filter(ComplaintTypeAnalysisResult.task_id == task_id)
                
                type_counts = type_counts.group_by(ComplaintTypeAnalysisResult.tslx_yj).all()
                
                if type_counts:
                    main_type_count = max(type_counts, key=lambda x: x.count).count
                    main_type_ratio = round(main_type_count / total_count * 100, 2)
                    stats["main_type_ratio"] = main_type_ratio
                else:
                    stats["main_type_ratio"] = 0
            else:
                stats["main_type_ratio"] = 0
            
            # 保持兼容性的字段
            stats["coverage_rate"] = stats.get("type_coverage", 100)
        
        elif analysis_type == "sentiment":
            # 情感分析统计 - 专业化指标
            total_query = db.query(SentimentAnalysisResult).filter(
                SentimentAnalysisResult.is_deleted == False
            )
            today_query = db.query(SentimentAnalysisResult).filter(
                SentimentAnalysisResult.is_deleted == False,
                func.date(SentimentAnalysisResult.created_at) == today
            )
            
            if task_id:
                total_query = total_query.filter(SentimentAnalysisResult.task_id == task_id)
                today_query = today_query.filter(SentimentAnalysisResult.task_id == task_id)
            
            total_count = total_query.count()
            today_count = today_query.count()
            results = total_query.all()
            
            # 基础统计数据
            stats["total"] = total_count
            stats["today"] = today_count
            stats["avg_confidence"] = 80  # 情感分析的置信度是固定的
            
            # 情感分布率（各情感类型的分布）
            if total_count > 0:
                sentiment_counts = {}
                for result in results:
                    sentiment = result.qgfx_lx or 'unknown'
                    sentiment_counts[sentiment] = sentiment_counts.get(sentiment, 0) + 1
                
                # 计算分布比例（使用最主要的情感类型占比）
                if sentiment_counts:
                    max_sentiment_count = max(sentiment_counts.values())
                    sentiment_distribution = round(max_sentiment_count / total_count * 100, 2)
                    stats["sentiment_distribution"] = sentiment_distribution
                else:
                    stats["sentiment_distribution"] = 0
                
                # 负面情绪占比（包含愤怒相关情绪）
                negative_sentiments = ['severe_anger', 'moderate_anger', 'mild_anger']
                negative_count = sum(sentiment_counts.get(s, 0) for s in negative_sentiments)
                negative_sentiment_rate = round(negative_count / total_count * 100, 2)
                stats["negative_sentiment_rate"] = negative_sentiment_rate
            else:
                stats["sentiment_distribution"] = 0
                stats["negative_sentiment_rate"] = 0
            
            # 平均情感强度（基于关键词数量和摘要长度）
            if results:
                total_intensity = 0
                for result in results:
                    # 基于关键词数量和情感类型计算强度
                    intensity = 1  # 基础强度
                    if result.qgfx_gjc:
                        keyword_count = len([k for k in result.qgfx_gjc.split(',') if k.strip()])
                        intensity += keyword_count * 0.2
                    
                    # 根据情感类型调整强度
                    if result.qgfx_lx in ['severe_anger', 'moderate_anger']:
                        intensity *= 1.5
                    elif result.qgfx_lx == 'mild_anger':
                        intensity *= 1.2
                    
                    total_intensity += intensity
                
                avg_sentiment_intensity = round(total_intensity / len(results), 1)
                stats["avg_sentiment_intensity"] = avg_sentiment_intensity
            else:
                stats["avg_sentiment_intensity"] = 0
            
            # 保持兼容性的字段
            stats["coverage_rate"] = 100
        
        elif analysis_type == "compliance":
            # 规范性分析统计 - 专业化指标
            total_query = db.query(ComplianceAnalysisResult).filter(
                ComplianceAnalysisResult.is_deleted == False
            )
            today_query = db.query(ComplianceAnalysisResult).filter(
                ComplianceAnalysisResult.is_deleted == False,
                func.date(ComplianceAnalysisResult.created_at) == today
            )
            
            if task_id:
                total_query = total_query.filter(ComplianceAnalysisResult.task_id == task_id)
                today_query = today_query.filter(ComplianceAnalysisResult.task_id == task_id)
            
            total_count = total_query.count()
            today_count = today_query.count()
            results = total_query.all()
            
            # 基础统计数据
            stats["total"] = total_count
            stats["today"] = today_count
            stats["avg_confidence"] = 90  # 规范性分析的置信度
            
            # 合规率计算
            if total_count > 0:
                compliant_count = sum(1 for r in results if r.is_compliant())
                compliance_rate = round(compliant_count / total_count * 100, 2)
                stats["compliance_rate"] = compliance_rate
                
                # 部门合规率（基于去向单位的合规情况）
                dept_query = total_query.filter(
                    ComplianceAnalysisResult.ts_qxdw.isnot(None),
                    ComplianceAnalysisResult.ts_qxdw != ''
                )
                dept_results = dept_query.all()
                
                if dept_results:
                    dept_compliance = {}
                    for result in dept_results:
                        dept = result.ts_qxdw
                        if dept not in dept_compliance:
                            dept_compliance[dept] = {'total': 0, 'compliant': 0}
                        dept_compliance[dept]['total'] += 1
                        if result.is_compliant():
                            dept_compliance[dept]['compliant'] += 1
                    
                    # 计算平均部门合规率
                    avg_dept_compliance = 0
                    if dept_compliance:
                        dept_rates = []
                        for dept, data in dept_compliance.items():
                            if data['total'] > 0:
                                rate = data['compliant'] / data['total'] * 100
                                dept_rates.append(rate)
                        if dept_rates:
                            avg_dept_compliance = round(sum(dept_rates) / len(dept_rates), 2)
                    
                    stats["department_compliance"] = avg_dept_compliance
                else:
                    stats["department_compliance"] = 0
                
                # 违规类型数（基于判断理由的不同类型）
                violation_reasons = set()
                for result in results:
                    if not result.is_compliant() and result.gffx_pdly:
                        violation_reasons.add(result.gffx_pdly[:50])  # 取前50字符作为类型标识
                
                stats["violation_types"] = len(violation_reasons)
            else:
                stats["compliance_rate"] = 0
                stats["department_compliance"] = 0
                stats["violation_types"] = 0
            
            # 保持兼容性的字段
            stats["coverage_rate"] = 100
        
        elif analysis_type == "duplicate":
            # 重复分析统计 - 专业化指标
            total_query = db.query(DuplicateAnalysisResult).filter(
                DuplicateAnalysisResult.is_deleted == False
            )
            today_query = db.query(DuplicateAnalysisResult).filter(
                DuplicateAnalysisResult.is_deleted == False,
                func.date(DuplicateAnalysisResult.created_at) == today
            )
            
            if task_id:
                total_query = total_query.filter(DuplicateAnalysisResult.task_id == task_id)
                today_query = today_query.filter(DuplicateAnalysisResult.task_id == task_id)
            
            total_count = total_query.count()
            today_count = today_query.count()
            results = total_query.all()
            
            # 基础统计数据
            stats["total"] = total_count
            stats["today"] = today_count
            stats["avg_confidence"] = 85  # 重复分析的置信度
            
            # 重复率计算（有重复编号的比例）
            if total_count > 0:
                with_duplicate = total_query.filter(
                    DuplicateAnalysisResult.cfbh.isnot(None),
                    DuplicateAnalysisResult.cfbh != ''
                ).count()
                duplicate_rate = round(with_duplicate / total_count * 100, 2)
                stats["duplicate_rate"] = duplicate_rate
                
                # 重复组数（不同的重复组编号数量）
                duplicate_groups = total_query.filter(
                    DuplicateAnalysisResult.cfbh.isnot(None),
                    DuplicateAnalysisResult.cfbh != ''
                ).distinct(DuplicateAnalysisResult.cfbh).count()
                stats["duplicate_groups"] = duplicate_groups
                
                # 高频重复问题（重复次数>=3的问题）
                duplicate_counts = db.query(
                    DuplicateAnalysisResult.cfbh,
                    func.count(DuplicateAnalysisResult.id).label('count')
                ).filter(
                    DuplicateAnalysisResult.is_deleted == False,
                    DuplicateAnalysisResult.cfbh.isnot(None),
                    DuplicateAnalysisResult.cfbh != ''
                )
                if task_id:
                    duplicate_counts = duplicate_counts.filter(DuplicateAnalysisResult.task_id == task_id)
                
                duplicate_counts = duplicate_counts.group_by(DuplicateAnalysisResult.cfbh).all()
                
                high_freq_duplicates = sum(1 for item in duplicate_counts if item.count >= 3)
                stats["high_freq_duplicates"] = high_freq_duplicates
            else:
                stats["duplicate_rate"] = 0
                stats["duplicate_groups"] = 0
                stats["high_freq_duplicates"] = 0
            
            # 保持兼容性的字段
            stats["coverage_rate"] = 100
        
        return stats
        
    except Exception as e:
        logger.error(f"获取分析结果仪表板统计失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))