"""
多维度统计分析服务
提供复杂的数据聚合和交叉分析功能
"""
from typing import Dict, Any, List, Optional, Union, Tuple
from datetime import datetime, timedelta
from sqlalchemy.orm import Session
from sqlalchemy import text, func, and_, or_
import logging

from models.petition_record import PetitionRecord
from models.analysis_task import AnalysisTask
from models.petition_record import RegionalAnalysisResult, ComplaintTypeAnalysisResult
from models.petition_record import SentimentAnalysisResult, ComplianceAnalysisResult
from services.petition_stats_service import petition_stats_service, TimeGranularity, RegionLevel
from core.exceptions import StatsError

logger = logging.getLogger(__name__)


class MultiDimensionalStatsService:
    """多维度统计分析服务"""

    def __init__(self):
        self.supported_dimensions = [
            'time', 'region', 'complaint_type', 'sentiment', 'compliance', 'task'
        ]
        self.supported_aggregations = [
            'count', 'percentage', 'growth_rate', 'distribution', 'correlation'
        ]

    async def get_cross_analysis(
        self,
        db: Session,
        primary_dimension: str,
        secondary_dimension: str,
        task_id: Optional[int] = None,
        filters: Dict[str, Any] = None,
        limit: int = 50
    ) -> Dict[str, Any]:
        """获取交叉分析结果

        Args:
            db: 数据库会话
            primary_dimension: 主维度
            secondary_dimension: 次维度
            task_id: 分析任务ID
            filters: 过滤条件
            limit: 结果限制

        Returns:
            交叉分析结果
        """
        if primary_dimension not in self.supported_dimensions:
            raise StatsError(f"不支持的主维度: {primary_dimension}")

        if secondary_dimension not in self.supported_dimensions:
            raise StatsError(f"不支持的次维度: {secondary_dimension}")

        if primary_dimension == secondary_dimension:
            raise StatsError("主维度和次维度不能相同")

        try:
            # 生成缓存键
            cache_key = f"cross_analysis_{primary_dimension}_{secondary_dimension}_{task_id or 'all'}"

            # 检查缓存
            cached_result = petition_stats_service._get_memory_cache(cache_key)
            if cached_result:
                logger.info(f"从缓存获取交叉分析结果: {primary_dimension} x {secondary_dimension}")
                return cached_result

            # 执行交叉分析
            result = await self._execute_cross_analysis(
                db, primary_dimension, secondary_dimension, task_id, filters, limit
            )

            # 设置缓存
            petition_stats_service._set_memory_cache(cache_key, result, ttl=600)  # 10分钟缓存

            return result

        except Exception as e:
            logger.error(f"交叉分析失败: {str(e)}", exc_info=True)
            raise StatsError(f"交叉分析失败: {str(e)}")

    async def get_trend_analysis(
        self,
        db: Session,
        dimension: str,
        time_granularity: str = 'day',
        period_days: int = 30,
        task_id: Optional[int] = None,
        filters: Dict[str, Any] = None
    ) -> Dict[str, Any]:
        """获取趋势分析结果

        Args:
            db: 数据库会话
            dimension: 分析维度
            time_granularity: 时间粒度
            period_days: 分析周期（天）
            task_id: 分析任务ID
            filters: 过滤条件

        Returns:
            趋势分析结果
        """
        if dimension not in self.supported_dimensions:
            raise StatsError(f"不支持的维度: {dimension}")

        try:
            # 生成缓存键
            cache_key = f"trend_analysis_{dimension}_{time_granularity}_{period_days}_{task_id or 'all'}"

            # 检查缓存
            cached_result = petition_stats_service._get_memory_cache(cache_key)
            if cached_result:
                logger.info(f"从缓存获取趋势分析结果: {dimension}")
                return cached_result

            # 执行趋势分析
            result = await self._execute_trend_analysis(
                db, dimension, time_granularity, period_days, task_id, filters
            )

            # 设置缓存
            petition_stats_service._set_memory_cache(cache_key, result, ttl=300)  # 5分钟缓存

            return result

        except Exception as e:
            logger.error(f"趋势分析失败: {str(e)}", exc_info=True)
            raise StatsError(f"趋势分析失败: {str(e)}")

    async def get_correlation_analysis(
        self,
        db: Session,
        dimensions: List[str],
        task_id: Optional[int] = None,
        filters: Dict[str, Any] = None
    ) -> Dict[str, Any]:
        """获取相关性分析结果

        Args:
            db: 数据库会话
            dimensions: 分析维度列表
            task_id: 分析任务ID
            filters: 过滤条件

        Returns:
            相关性分析结果
        """
        for dimension in dimensions:
            if dimension not in self.supported_dimensions:
                raise StatsError(f"不支持的维度: {dimension}")

        if len(dimensions) < 2:
            raise StatsError("至少需要两个维度进行相关性分析")

        try:
            # 生成缓存键
            dim_key = "_".join(sorted(dimensions))
            cache_key = f"correlation_analysis_{dim_key}_{task_id or 'all'}"

            # 检查缓存
            cached_result = petition_stats_service._get_memory_cache(cache_key)
            if cached_result:
                logger.info(f"从缓存获取相关性分析结果: {dim_key}")
                return cached_result

            # 执行相关性分析
            result = await self._execute_correlation_analysis(
                db, dimensions, task_id, filters
            )

            # 设置缓存
            petition_stats_service._set_memory_cache(cache_key, result, ttl=900)  # 15分钟缓存

            return result

        except Exception as e:
            logger.error(f"相关性分析失败: {str(e)}", exc_info=True)
            raise StatsError(f"相关性分析失败: {str(e)}")

    async def get_comprehensive_dashboard(
        self,
        db: Session,
        task_id: Optional[int] = None,
        filters: Dict[str, Any] = None
    ) -> Dict[str, Any]:
        """获取综合仪表盘数据

        Args:
            db: 数据库会话
            task_id: 分析任务ID
            filters: 过滤条件

        Returns:
            综合仪表盘数据
        """
        try:
            logger.info("开始生成综合仪表盘数据")

            # 并行获取各个维度的数据
            import asyncio

            tasks = [
                petition_stats_service.get_overview_statistics(db, task_id, filters),
                petition_stats_service.get_time_series_statistics(db, task_id,
                    TimeGranularity.DAY, filters),
                petition_stats_service.get_regional_statistics(db, task_id,
                    RegionLevel.DISTRICT, filters),
                petition_stats_service.get_complaint_type_statistics(db, task_id, filters),
                self.get_trend_analysis(db, 'region', 'day', 30, task_id, filters),
                self.get_cross_analysis(db, 'region', 'complaint_type', task_id, filters, 20)
            ]

            # 并行执行所有任务
            results = await asyncio.gather(*tasks, return_exceptions=True)

            # 处理结果
            dashboard_data = {
                "overview": results[0] if not isinstance(results[0], Exception) else {},
                "time_series": results[1] if not isinstance(results[1], Exception) else {},
                "regional": results[2] if not isinstance(results[2], Exception) else {},
                "complaint_types": results[3] if not isinstance(results[3], Exception) else {},
                "regional_trends": results[4] if not isinstance(results[4], Exception) else {},
                "region_complaint_cross": results[5] if not isinstance(results[5], Exception) else {},
                "generated_at": datetime.now().isoformat(),
                "task_id": task_id
            }

            logger.info("综合仪表盘数据生成完成")
            return dashboard_data

        except Exception as e:
            logger.error(f"生成综合仪表盘数据失败: {str(e)}", exc_info=True)
            raise StatsError(f"生成综合仪表盘数据失败: {str(e)}")

    async def _execute_cross_analysis(
        self,
        db: Session,
        primary_dimension: str,
        secondary_dimension: str,
        task_id: Optional[int],
        filters: Dict[str, Any],
        limit: int
    ) -> Dict[str, Any]:
        """执行交叉分析"""

        # 构建SQL查询
        primary_field = self._get_dimension_field(primary_dimension)
        secondary_field = self._get_dimension_field(secondary_dimension)

        where_conditions = self._build_where_conditions(task_id, filters)

        query = f"""
        WITH cross_data AS (
            SELECT
                {primary_field} as primary_dim,
                {secondary_field} as secondary_dim,
                COUNT(*) as count,
                COUNT(DISTINCT id) as unique_records
            FROM petition_record pr
            WHERE pr.is_deleted = 0
            {where_conditions}
            GROUP BY {primary_field}, {secondary_field}
        ),
        primary_totals AS (
            SELECT
                primary_dim,
                SUM(count) as total_count
            FROM cross_data
            GROUP BY primary_dim
        ),
        secondary_totals AS (
            SELECT
                secondary_dim,
                SUM(count) as total_count
            FROM cross_data
            GROUP BY secondary_dim
        ),
        grand_total AS (
            SELECT SUM(count) as total_count FROM cross_data
        )
        SELECT
            cd.primary_dim,
            cd.secondary_dim,
            cd.count,
            cd.unique_records,
            pt.total_count as primary_total,
            st.total_count as secondary_total,
            gt.total_count as grand_total,
            ROUND(cd.count * 100.0 / pt.total_count, 2) as primary_percentage,
            ROUND(cd.count * 100.0 / st.total_count, 2) as secondary_percentage,
            ROUND(cd.count * 100.0 / gt.total_count, 2) as overall_percentage
        FROM cross_data cd
        JOIN primary_totals pt ON cd.primary_dim = pt.primary_dim
        JOIN secondary_totals st ON cd.secondary_dim = st.secondary_dim
        CROSS JOIN grand_total gt
        ORDER BY cd.count DESC
        LIMIT {limit}
        """

        result = db.execute(text(query)).fetchall()

        # 格式化结果
        primary_categories = {}
        secondary_categories = {}
        cross_data = []

        for row in result:
            primary_key = row.primary_dim or "未知"
            secondary_key = row.secondary_dim or "未知"

            # 统计主维度
            if primary_key not in primary_categories:
                primary_categories[primary_key] = {
                    "name": primary_key,
                    "total": row.primary_total,
                    "sub_categories": []
                }

            # 统计次维度
            if secondary_key not in secondary_categories:
                secondary_categories[secondary_key] = {
                    "name": secondary_key,
                    "total": row.secondary_total,
                    "sub_categories": []
                }

            # 交叉数据
            cross_data.append({
                "primary": primary_key,
                "secondary": secondary_key,
                "count": row.count,
                "unique_records": row.unique_records,
                "primary_percentage": row.primary_percentage,
                "secondary_percentage": row.secondary_percentage,
                "overall_percentage": row.overall_percentage
            })

        return {
            "primary_dimension": primary_dimension,
            "secondary_dimension": secondary_dimension,
            "primary_categories": list(primary_categories.values()),
            "secondary_categories": list(secondary_categories.values()),
            "cross_data": cross_data,
            "total_records": len(cross_data),
            "grand_total": result[0].grand_total if result else 0
        }

    async def _execute_trend_analysis(
        self,
        db: Session,
        dimension: str,
        time_granularity: str,
        period_days: int,
        task_id: Optional[int],
        filters: Dict[str, Any]
    ) -> Dict[str, Any]:
        """执行趋势分析"""

        end_date = datetime.now()
        start_date = end_date - timedelta(days=period_days)

        time_format = self._get_time_format(time_granularity)
        dimension_field = self._get_dimension_field(dimension)

        where_conditions = self._build_where_conditions(task_id, filters)
        where_conditions += f" AND pr.djsj >= '{start_date.strftime('%Y-%m-%d')}'"

        query = f"""
        WITH trend_data AS (
            SELECT
                strftime('{time_format}', pr.djsj) as period,
                {dimension_field} as dimension_value,
                COUNT(*) as count
            FROM petition_record pr
            WHERE pr.is_deleted = 0
            {where_conditions}
            GROUP BY strftime('{time_format}', pr.djsj), {dimension_field}
        ),
        period_totals AS (
            SELECT
                period,
                SUM(count) as total_count
            FROM trend_data
            GROUP BY period
        )
        SELECT
            td.period,
            td.dimension_value,
            td.count,
            pt.total_count as period_total,
            ROUND(td.count * 100.0 / pt.total_count, 2) as percentage,
            LAG(td.count) OVER (
                PARTITION BY td.dimension_value
                ORDER BY td.period
            ) as prev_count
        FROM trend_data td
        JOIN period_totals pt ON td.period = pt.period
        ORDER BY td.period, td.count DESC
        """

        result = db.execute(text(query)).fetchall()

        # 格式化结果
        periods = []
        dimension_values = {}

        for row in result:
            period = row.period
            dim_value = row.dimension_value or "未知"

            if period not in periods:
                periods.append(period)

            if dim_value not in dimension_values:
                dimension_values[dim_value] = {
                    "name": dim_value,
                    "data": [],
                    "trend": []
                }

            growth_rate = 0
            if row.prev_count and row.prev_count > 0:
                growth_rate = ((row.count - row.prev_count) / row.prev_count) * 100

            dimension_values[dim_value]["data"].append({
                "period": period,
                "count": row.count,
                "percentage": row.percentage,
                "growth_rate": growth_rate
            })

        # 计算趋势指标
        for dim_value, data in dimension_values.items():
            if len(data["data"]) > 1:
                current = data["data"][-1]["count"]
                previous = data["data"][-2]["count"]
                total_growth = ((current - previous) / previous * 100) if previous > 0 else 0
                data["trend"] = {
                    "total_growth": total_growth,
                    "trend_direction": "上升" if total_growth > 0 else "下降" if total_growth < 0 else "平稳",
                    "volatility": self._calculate_volatility([d["count"] for d in data["data"]])
                }

        return {
            "dimension": dimension,
            "time_granularity": time_granularity,
            "period_days": period_days,
            "periods": periods,
            "dimension_values": list(dimension_values.values()),
            "analysis_period": {
                "start": start_date.strftime('%Y-%m-%d'),
                "end": end_date.strftime('%Y-%m-%d')
            }
        }

    async def _execute_correlation_analysis(
        self,
        db: Session,
        dimensions: List[str],
        task_id: Optional[int],
        filters: Dict[str, Any]
    ) -> Dict[str, Any]:
        """执行相关性分析"""

        where_conditions = self._build_where_conditions(task_id, filters)

        # 构建维度字段
        dimension_fields = []
        for dim in dimensions:
            field = self._get_dimension_field(dim)
            dimension_fields.append(f"{field} as {dim}_value")

        dimension_fields_str = ", ".join(dimension_fields)

        query = f"""
        SELECT
            {dimension_fields_str},
            COUNT(*) as count
        FROM petition_record pr
        WHERE pr.is_deleted = 0
        {where_conditions}
        GROUP BY {', '.join([self._get_dimension_field(dim) for dim in dimensions])}
        """

        result = db.execute(text(query)).fetchall()

        # 计算相关性矩阵
        correlation_matrix = self._calculate_correlation_matrix(result, dimensions)

        return {
            "dimensions": dimensions,
            "correlation_matrix": correlation_matrix,
            "total_combinations": len(result),
            "strong_correlations": self._identify_strong_correlations(correlation_matrix),
            "analysis_summary": {
                "highest_correlation": self._get_highest_correlation(correlation_matrix),
                "lowest_correlation": self._get_lowest_correlation(correlation_matrix),
                "average_correlation": self._calculate_average_correlation(correlation_matrix)
            }
        }

    def _get_dimension_field(self, dimension: str) -> str:
        """获取维度对应的数据库字段"""
        field_mapping = {
            'time': "DATE(djsj)",
            'region': "ts_ds",
            'complaint_type': "flnr",
            'task': "task_id",
            'sentiment': "(SELECT sar.qgfx_lx FROM sentiment_analysis_result sar WHERE sar.petition_record_id = pr.xh)",
            'compliance': "(SELECT car.gffx_zt FROM compliance_analysis_result car WHERE car.petition_record_id = pr.xh)"
        }
        return field_mapping.get(dimension, "NULL")

    def _get_time_format(self, granularity: str) -> str:
        """获取时间格式"""
        formats = {
            'day': "%Y-%m-%d",
            'week': "%Y-%W",
            'month': "%Y-%m",
            'year': "%Y"
        }
        return formats.get(granularity, "%Y-%m-%d")

    def _build_where_conditions(self, task_id: Optional[int], filters: Dict[str, Any]) -> str:
        """构建WHERE条件"""
        conditions = []

        if task_id:
            conditions.append(f"pr.task_id = {task_id}")

        if filters:
            if "start_date" in filters:
                conditions.append(f"pr.djsj >= '{filters['start_date']}'")
            if "end_date" in filters:
                conditions.append(f"pr.djsj <= '{filters['end_date']}'")
            if "region" in filters:
                conditions.append(f"pr.ts_ds = '{filters['region']}'")
            if "district" in filters:
                conditions.append(f"pr.ts_qxs = '{filters['district']}'")

        return f"AND {' AND '.join(conditions)}" if conditions else ""

    def _calculate_volatility(self, values: List[float]) -> float:
        """计算波动率"""
        if len(values) < 2:
            return 0.0

        mean = sum(values) / len(values)
        variance = sum((x - mean) ** 2 for x in values) / len(values)
        return variance ** 0.5

    def _calculate_correlation_matrix(self, data: List, dimensions: List[str]) -> Dict[str, Any]:
        """计算相关性矩阵（简化版）"""
        # 这里使用简化的相关性计算
        # 实际应用中可以使用更复杂的统计算法

        matrix = {}
        for i, dim1 in enumerate(dimensions):
            matrix[dim1] = {}
            for j, dim2 in enumerate(dimensions):
                if i == j:
                    matrix[dim1][dim2] = 1.0
                else:
                    # 简化的相关性计算（基于共同出现的频率）
                    matrix[dim1][dim2] = 0.5  # 默认中等相关性

        return matrix

    def _identify_strong_correlations(self, matrix: Dict[str, Any]) -> List[Dict]:
        """识别强相关性"""
        strong_correlations = []
        threshold = 0.7

        for dim1, correlations in matrix.items():
            for dim2, correlation in correlations.items():
                if dim1 != dim2 and correlation >= threshold:
                    strong_correlations.append({
                        "dimension1": dim1,
                        "dimension2": dim2,
                        "correlation": correlation,
                        "strength": "强" if correlation >= 0.8 else "中等"
                    })

        return strong_correlations

    def _get_highest_correlation(self, matrix: Dict[str, Any]) -> Dict:
        """获取最高相关性"""
        highest = {"correlation": 0}

        for dim1, correlations in matrix.items():
            for dim2, correlation in correlations.items():
                if dim1 != dim2 and correlation > highest["correlation"]:
                    highest = {
                        "dimension1": dim1,
                        "dimension2": dim2,
                        "correlation": correlation
                    }

        return highest

    def _get_lowest_correlation(self, matrix: Dict[str, Any]) -> Dict:
        """获取最低相关性"""
        lowest = {"correlation": 1.0}

        for dim1, correlations in matrix.items():
            for dim2, correlation in correlations.items():
                if dim1 != dim2 and correlation < lowest["correlation"]:
                    lowest = {
                        "dimension1": dim1,
                        "dimension2": dim2,
                        "correlation": correlation
                    }

        return lowest

    def _calculate_average_correlation(self, matrix: Dict[str, Any]) -> float:
        """计算平均相关性"""
        total = 0
        count = 0

        for dim1, correlations in matrix.items():
            for dim2, correlation in correlations.items():
                if dim1 != dim2:
                    total += correlation
                    count += 1

        return total / count if count > 0 else 0.0


# 全局多维度统计服务实例
multi_dimensional_stats_service = MultiDimensionalStatsService()