"""
多维度分析服务
提供高级数据分析、关联分析、趋势预测等功能
"""
import pandas as pd
import numpy as np
from typing import Dict, Any, List, Optional, Tuple, Union
from datetime import datetime, timedelta
from sqlalchemy.orm import Session
from sqlalchemy import func, and_, or_, desc, asc
from sqlalchemy.sql import text
import json
import logging
from collections import defaultdict, Counter

# from models.analysis_result import AnalysisResult, PetitionData, PollutionType
from models.analysis_task import AnalysisTask
from core.database import get_db
from core.logging_config import get_logger
from core.exceptions import AnalysisError, ValidationError

logger = get_logger("multi_dimension_analysis_service")


class MultiDimensionAnalysisService:
    """多维度分析服务"""
    
    def __init__(self):
        self.cache = {}
        self.cache_timeout = 300  # 5分钟缓存
    
    def get_cross_analysis(self, db: Session, result_id: Optional[int] = None,
                          dimensions: List[str] = None, filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取交叉分析结果"""
        try:
            if not dimensions or len(dimensions) < 2:
                raise ValidationError("交叉分析需要至少选择2个维度")
            
            query = db.query(PetitionData).join(AnalysisResult)
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            # 获取所有数据
            petition_data = query.all()
            
            if not petition_data:
                return {
                    "cross_analysis": [],
                    "correlation_matrix": [],
                    "total_records": 0,
                    "message": "没有可用的数据进行分析"
                }
            
            # 执行交叉分析
            cross_results = []
            
            # 两两维度交叉分析
            for i in range(len(dimensions)):
                for j in range(i + 1, len(dimensions)):
                    dim1, dim2 = dimensions[i], dimensions[j]
                    
                    cross_table = self._create_cross_table(petition_data, dim1, dim2)
                    
                    cross_results.append({
                        "dimension1": dim1,
                        "dimension2": dim2,
                        "cross_table": cross_table,
                        "chi_square_test": self._perform_chi_square_test(cross_table) if cross_table else None,
                        "correlation": self._calculate_correlation(petition_data, dim1, dim2)
                    })
            
            # 计算相关性矩阵
            correlation_matrix = self._calculate_correlation_matrix(petition_data, dimensions)
            
            return {
                "cross_analysis": cross_results,
                "correlation_matrix": correlation_matrix,
                "total_records": len(petition_data),
                "analysis_dimensions": dimensions
            }
            
        except Exception as e:
            logger.error(f"获取交叉分析失败: {str(e)}")
            raise AnalysisError(f"获取交叉分析失败: {str(e)}")
    
    def get_trend_prediction(self, db: Session, result_id: Optional[int] = None,
                           predict_days: int = 30, filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取趋势预测分析"""
        try:
            query = db.query(PetitionData).join(AnalysisResult)
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            # 获取历史数据（最近90天）
            end_date = datetime.now()
            start_date = end_date - timedelta(days=90)
            
            historical_data = query.filter(
                PetitionData.submit_time >= start_date,
                PetitionData.submit_time <= end_date
            ).all()
            
            if not historical_data:
                return {
                    "prediction": [],
                    "historical_trend": [],
                    "model_accuracy": 0,
                    "message": "没有足够的历史数据进行预测"
                }
            
            # 按日期统计
            daily_counts = defaultdict(int)
            for data in historical_data:
                if data.submit_time:
                    date_key = data.submit_time.date().isoformat()
                    daily_counts[date_key] += 1
            
            # 准备时间序列数据
            dates = sorted(daily_counts.keys())
            values = [daily_counts[date] for date in dates]
            
            if len(values) < 7:
                return {
                    "prediction": [],
                    "historical_trend": [],
                    "model_accuracy": 0,
                    "message": "历史数据太少，无法进行预测"
                }
            
            # 简单移动平均预测
            window_size = min(7, len(values) // 3)
            moving_avg = self._calculate_moving_average(values, window_size)
            
            # 线性趋势预测
            trend_slope, trend_intercept = self._calculate_linear_trend(values)
            
            # 生成预测数据
            prediction = []
            last_date = datetime.fromisoformat(dates[-1])
            
            for i in range(1, predict_days + 1):
                pred_date = last_date + timedelta(days=i)
                
                # 移动平均预测
                ma_pred = moving_avg[-1] if moving_avg else np.mean(values)
                
                # 线性趋势预测
                linear_pred = trend_slope * (len(values) + i) + trend_intercept
                
                # 组合预测（加权平均）
                combined_pred = 0.6 * ma_pred + 0.4 * max(0, linear_pred)
                
                prediction.append({
                    "date": pred_date.date().isoformat(),
                    "predicted_count": round(max(0, combined_pred)),
                    "confidence": self._calculate_prediction_confidence(i, len(values))
                })
            
            # 计算模型准确性
            model_accuracy = self._calculate_model_accuracy(values, moving_avg, trend_slope, trend_intercept)
            
            return {
                "prediction": prediction,
                "historical_trend": [
                    {"date": date, "actual_count": count, "moving_avg": moving_avg[i] if i < len(moving_avg) else None}
                    for i, (date, count) in enumerate(zip(dates, values))
                ],
                "model_accuracy": model_accuracy,
                "prediction_period": f"{predict_days}天",
                "historical_period": f"{len(dates)}天"
            }
            
        except Exception as e:
            logger.error(f"获取趋势预测失败: {str(e)}")
            raise AnalysisError(f"获取趋势预测失败: {str(e)}")
    
    def get_pattern_analysis(self, db: Session, result_id: Optional[int] = None,
                           filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取模式分析"""
        try:
            query = db.query(PetitionData).join(AnalysisResult)
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            petition_data = query.all()
            
            if not petition_data:
                return {
                    "patterns": [],
                    "anomalies": [],
                    "total_records": 0,
                    "message": "没有可用的数据进行模式分析"
                }
            
            patterns = []
            
            # 时间模式分析
            time_patterns = self._analyze_time_patterns(petition_data)
            patterns.append({
                "type": "time_patterns",
                "description": "时间分布模式",
                "patterns": time_patterns
            })
            
            # 空间模式分析
            spatial_patterns = self._analyze_spatial_patterns(petition_data)
            patterns.append({
                "type": "spatial_patterns", 
                "description": "地理分布模式",
                "patterns": spatial_patterns
            })
            
            # 污染类型模式分析
            pollution_patterns = self._analyze_pollution_patterns(petition_data)
            patterns.append({
                "type": "pollution_patterns",
                "description": "污染类型模式",
                "patterns": pollution_patterns
            })
            
            # 异常检测
            anomalies = self._detect_anomalies(petition_data)
            
            return {
                "patterns": patterns,
                "anomalies": anomalies,
                "total_records": len(petition_data),
                "analysis_timestamp": datetime.now().isoformat()
            }
            
        except Exception as e:
            logger.error(f"获取模式分析失败: {str(e)}")
            raise AnalysisError(f"获取模式分析失败: {str(e)}")
    
    def get_correlation_analysis(self, db: Session, result_id: Optional[int] = None,
                               filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取关联性分析"""
        try:
            query = db.query(PetitionData).join(AnalysisResult)
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            petition_data = query.all()
            
            if not petition_data:
                return {
                    "correlations": [],
                    "strong_correlations": [],
                    "total_records": 0,
                    "message": "没有可用的数据进行关联性分析"
                }
            
            # 定义要分析的变量
            variables = {
                "pollution_type": "污染类型",
                "region": "区域",
                "processing_time": "处理时长",
                "sentiment": "情感倾向",
                "severity": "严重程度"
            }
            
            correlations = []
            
            # 计算变量间的关联性
            for var1_name, var1_desc in variables.items():
                for var2_name, var2_desc in variables.items():
                    if var1_name < var2_name:  # 避免重复
                        correlation = self._calculate_variable_correlation(
                            petition_data, var1_name, var2_name
                        )
                        
                        if correlation is not None:
                            correlations.append({
                                "variable1": var1_name,
                                "variable1_desc": var1_desc,
                                "variable2": var2_name,
                                "variable2_desc": var2_desc,
                                "correlation_coefficient": correlation["coefficient"],
                                "correlation_strength": correlation["strength"],
                                "p_value": correlation["p_value"],
                                "significance": correlation["significance"]
                            })
            
            # 识别强关联
            strong_correlations = [
                corr for corr in correlations 
                if abs(corr["correlation_coefficient"]) > 0.5 and corr["significance"]
            ]
            
            return {
                "correlations": correlations,
                "strong_correlations": strong_correlations,
                "total_records": len(petition_data),
                "analysis_summary": {
                    "total_correlations": len(correlations),
                    "strong_correlations_count": len(strong_correlations),
                    "strongest_correlation": max(correlations, key=lambda x: abs(x["correlation_coefficient"])) if correlations else None
                }
            }
            
        except Exception as e:
            logger.error(f"获取关联性分析失败: {str(e)}")
            raise AnalysisError(f"获取关联性分析失败: {str(e)}")
    
    async def get_comprehensive_analysis(self, db: Session, result_id: Optional[int] = None,
                                 filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取综合分析报告"""
        try:
            import asyncio
            
            # 并行执行各种分析
            tasks = [
                asyncio.create_task(
                    asyncio.to_thread(self.get_cross_analysis, db, result_id, ["pollution_type", "region"], filters)
                ),
                asyncio.create_task(
                    asyncio.to_thread(self.get_trend_prediction, db, result_id, 30, filters)
                ),
                asyncio.create_task(
                    asyncio.to_thread(self.get_pattern_analysis, db, result_id, filters)
                ),
                asyncio.create_task(
                    asyncio.to_thread(self.get_correlation_analysis, db, result_id, filters)
                )
            ]
            
            results = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 处理结果
            cross_analysis = results[0] if not isinstance(results[0], Exception) else {}
            trend_prediction = results[1] if not isinstance(results[1], Exception) else {}
            pattern_analysis = results[2] if not isinstance(results[2], Exception) else {}
            correlation_analysis = results[3] if not isinstance(results[3], Exception) else {}
            
            # 生成综合洞察
            insights_task = asyncio.create_task(
                asyncio.to_thread(self._generate_insights,
                cross_analysis, trend_prediction, pattern_analysis, correlation_analysis)
            )
            insights = await insights_task
            
            return {
                "cross_analysis": cross_analysis,
                "trend_prediction": trend_prediction,
                "pattern_analysis": pattern_analysis,
                "correlation_analysis": correlation_analysis,
                "insights": insights,
                "generated_at": datetime.now().isoformat(),
                "analysis_scope": {
                    "result_id": result_id,
                    "filters": filters
                }
            }
            
        except Exception as e:
            logger.error(f"获取综合分析失败: {str(e)}")
            raise AnalysisError(f"获取综合分析失败: {str(e)}")
    
    # def _create_cross_table(self, data: List[PetitionData], dim1: str, dim2: str) -> Dict[str, Any]:
    #     """创建交叉表"""
    #     try:
    #         # 获取维度的值
    #         dim1_values = []
    #         dim2_values = []
            
    #         for item in data:
    #             val1 = self._get_dimension_value(item, dim1)
    #             val2 = self._get_dimension_value(item, dim2)
                
    #             if val1 and val2:
    #                 dim1_values.append(val1)
    #                 dim2_values.append(val2)
            
    #         if not dim1_values or not dim2_values:
    #             return None
            
    #         # 创建交叉表
    #         dim1_unique = list(set(dim1_values))
    #         dim2_unique = list(set(dim2_values))
            
    #         cross_table = {}
    #         for val1 in dim1_unique:
    #             cross_table[val1] = {}
    #             for val2 in dim2_unique:
    #                 cross_table[val1][val2] = 0
            
    #         # 填充数据
    #         for val1, val2 in zip(dim1_values, dim2_values):
    #             cross_table[val1][val2] += 1
            
    #         return {
    #             "rows": dim1_unique,
    #             "columns": dim2_unique,
    #             "data": cross_table,
    #             "row_totals": {val1: sum(cross_table[val1].values()) for val1 in dim1_unique},
    #             "column_totals": {val2: sum(cross_table[val1][val2] for val1 in dim1_unique) for val2 in dim2_unique}
    #         }
            
    #     except Exception as e:
    #         logger.error(f"创建交叉表失败: {str(e)}")
    #         return None
    
    # def _get_dimension_value(self, item: PetitionData, dimension: str) -> Optional[str]:
    #     """获取维度值"""
    #     try:
    #         if dimension == "pollution_type":
    #             return item.pollution_type
    #         elif dimension == "region":
    #             return item.province or item.city
    #         elif dimension == "district":
    #             return item.district
    #         elif dimension == "processing_status":
    #             return item.processing_status
    #         elif dimension == "respondent_type":
    #             return item.respondent_type
    #         else:
    #             return None
    #     except Exception:
    #         return None
    
    def _perform_chi_square_test(self, cross_table: Dict[str, Any]) -> Optional[Dict[str, Any]]:
        """执行卡方检验"""
        try:
            from scipy.stats import chi2_contingency
            
            # 构建列联表
            rows = cross_table["rows"]
            columns = cross_table["columns"]
            data = cross_table["data"]
            
            contingency_table = []
            for row in rows:
                row_data = [data[row][col] for col in columns]
                contingency_table.append(row_data)
            
            # 执行卡方检验
            chi2, p_value, dof, expected = chi2_contingency(contingency_table)
            
            return {
                "chi2_statistic": chi2,
                "p_value": p_value,
                "degrees_of_freedom": dof,
                "is_significant": p_value < 0.05,
                "contingency_table": contingency_table,
                "expected_frequencies": expected.tolist()
            }
            
        except ImportError:
            logger.warning("scipy未安装，跳过卡方检验")
            return None
        except Exception as e:
            logger.error(f"卡方检验失败: {str(e)}")
            return None
    
    # def _calculate_correlation(self, data: List[PetitionData], dim1: str, dim2: str) -> Optional[float]:
    #     """计算相关性"""
    #     try:
    #         # 将分类变量转换为数值
    #         dim1_values = []
    #         dim2_values = []
            
    #         for item in data:
    #             val1 = self._get_dimension_value(item, dim1)
    #             val2 = self._get_dimension_value(item, dim2)
                
    #             if val1 and val2:
    #                 dim1_values.append(val1)
    #                 dim2_values.append(val2)
            
    #         if len(dim1_values) < 2:
    #             return None
            
    #         # 创建编码映射
    #         dim1_encode = {val: i for i, val in enumerate(set(dim1_values))}
    #         dim2_encode = {val: i for i, val in enumerate(set(dim2_values))}
            
    #         # 转换为数值
    #         dim1_numeric = [dim1_encode[val] for val in dim1_values]
    #         dim2_numeric = [dim2_encode[val] for val in dim2_values]
            
    #         # 计算相关系数
    #         correlation = np.corrcoef(dim1_numeric, dim2_numeric)[0, 1]
            
    #         return correlation if not np.isnan(correlation) else 0.0
            
    #     except Exception as e:
    #         logger.error(f"计算相关性失败: {str(e)}")
    #         return None
    
    def _calculate_moving_average(self, values: List[int], window_size: int) -> List[float]:
        """计算移动平均"""
        moving_avg = []
        for i in range(len(values)):
            start = max(0, i - window_size + 1)
            window_values = values[start:i + 1]
            moving_avg.append(np.mean(window_values))
        return moving_avg
    
    def _calculate_linear_trend(self, values: List[int]) -> Tuple[float, float]:
        """计算线性趋势"""
        x = np.arange(len(values))
        y = np.array(values)
        
        # 使用最小二乘法拟合
        slope, intercept = np.polyfit(x, y, 1)
        
        return slope, intercept
    
    def _calculate_prediction_confidence(self, days_ahead: int, historical_days: int) -> float:
        """计算预测置信度"""
        # 随着预测天数增加，置信度下降
        base_confidence = 0.8
        decay_rate = 0.02
        
        confidence = base_confidence * (1 - decay_rate) ** days_ahead
        confidence *= min(1.0, historical_days / 30)  # 历史数据越多，置信度越高
        
        return max(0.1, min(0.95, confidence))
    
    def _calculate_model_accuracy(self, values: List[int], moving_avg: List[float], 
                                slope: float, intercept: float) -> float:
        """计算模型准确性"""
        try:
            # 计算移动平均的准确性
            ma_errors = []
            for i in range(len(moving_avg)):
                if i > 0:  # 跳过第一个点
                    ma_errors.append(abs(values[i] - moving_avg[i]))
            
            ma_mae = np.mean(ma_errors) if ma_errors else np.mean(values)
            
            # 计算线性趋势的准确性
            linear_predictions = [slope * i + intercept for i in range(len(values))]
            linear_errors = [abs(values[i] - linear_predictions[i]) for i in range(len(values))]
            linear_mae = np.mean(linear_errors)
            
            # 综合准确性（越低越好，转换为0-1分数）
            avg_error = (ma_mae + linear_mae) / 2
            max_error = max(values) if values else 1
            accuracy = max(0, 1 - (avg_error / max_error))
            
            return round(accuracy, 3)
            
        except Exception as e:
            logger.error(f"计算模型准确性失败: {str(e)}")
            return 0.5
    
    # def _analyze_time_patterns(self, data: List[PetitionData]) -> List[Dict[str, Any]]:
    #     """分析时间模式"""
    #     patterns = []
        
    #     # 按小时分析
    #     hourly_counts = defaultdict(int)
    #     for item in data:
    #         if item.submit_time:
    #             hour = item.submit_time.hour
    #             hourly_counts[hour] += 1
        
    #     if hourly_counts:
    #         peak_hour = max(hourly_counts, key=hourly_counts.get)
    #         patterns.append({
    #             "type": "hourly_pattern",
    #             "peak_hour": peak_hour,
    #             "peak_count": hourly_counts[peak_hour],
    #             "hourly_distribution": dict(hourly_counts)
    #         })
        
    #     # 按星期分析
    #     weekday_counts = defaultdict(int)
    #     for item in data:
    #         if item.submit_time:
    #             weekday = item.submit_time.weekday()
    #             weekday_counts[weekday] += 1
        
    #     if weekday_counts:
    #         peak_weekday = max(weekday_counts, key=weekday_counts.get)
    #         weekday_names = ["周一", "周二", "周三", "周四", "周五", "周六", "周日"]
    #         patterns.append({
    #             "type": "weekly_pattern",
    #             "peak_weekday": weekday_names[peak_weekday],
    #             "peak_count": weekday_counts[peak_weekday],
    #             "weekday_distribution": {weekday_names[wd]: count for wd, count in weekday_counts.items()}
    #         })
        
    #     return patterns
    
    # def _analyze_spatial_patterns(self, data: List[PetitionData]) -> List[Dict[str, Any]]:
    #     """分析空间模式"""
    #     patterns = []
        
    #     # 按省份分析
    #     province_counts = defaultdict(int)
    #     for item in data:
    #         if item.province:
    #             province_counts[item.province] += 1
        
    #     if province_counts:
    #         top_province = max(province_counts, key=province_counts.get)
    #         patterns.append({
    #             "type": "province_pattern",
    #             "top_province": top_province,
    #             "top_count": province_counts[top_province],
    #             "province_distribution": dict(province_counts)
    #         })
        
    #     return patterns
    
    # def _analyze_pollution_patterns(self, data: List[PetitionData]) -> List[Dict[str, Any]]:
    #     """分析污染类型模式"""
    #     patterns = []
        
    #     # 污染类型统计
    #     pollution_counts = defaultdict(int)
    #     for item in data:
    #         if item.pollution_type:
    #             pollution_counts[item.pollution_type] += 1
        
    #     if pollution_counts:
    #         top_pollution = max(pollution_counts, key=pollution_counts.get)
    #         patterns.append({
    #             "type": "pollution_pattern",
    #             "top_pollution_type": top_pollution,
    #             "top_count": pollution_counts[top_pollution],
    #             "pollution_distribution": dict(pollution_counts)
    #         })
        
    #     return patterns
    
    # def _detect_anomalies(self, data: List[PetitionData]) -> List[Dict[str, Any]]:
    #     """检测异常"""
    #     anomalies = []
        
    #     # 时间异常检测
    #     daily_counts = defaultdict(int)
    #     for item in data:
    #         if item.submit_time:
    #             date_key = item.submit_time.date()
    #             daily_counts[date_key] += 1
        
    #     if len(daily_counts) > 7:
    #         counts = list(daily_counts.values())
    #         mean_count = np.mean(counts)
    #         std_count = np.std(counts)
            
    #         # 检测异常高发日
    #         threshold = mean_count + 2 * std_count
    #         for date, count in daily_counts.items():
    #             if count > threshold:
    #                 anomalies.append({
    #                     "type": "high_frequency_anomaly",
    #                     "date": date.isoformat(),
    #                     "count": count,
    #                     "expected_range": f"{mean_count:.1f} ± {std_count:.1f}",
    #                     "severity": "high" if count > mean_count + 3 * std_count else "medium"
    #                 })
        
    #     return anomalies
    
    # def _calculate_variable_correlation(self, data: List[PetitionData], var1: str, var2: str) -> Optional[Dict[str, Any]]:
    #     """计算变量间相关性"""
    #     try:
    #         # 这里简化处理，实际应该根据不同变量类型使用不同的相关性计算方法
    #         return {
    #             "coefficient": 0.0,
    #             "strength": "weak",
    #             "p_value": 1.0,
    #             "significance": False
    #         }
    #     except Exception as e:
    #         logger.error(f"计算变量相关性失败: {str(e)}")
    #         return None
    
    # def _calculate_correlation_matrix(self, data: List[PetitionData], dimensions: List[str]) -> List[List[float]]:
    #     """计算相关性矩阵"""
    #     try:
    #         matrix = []
    #         for i in range(len(dimensions)):
    #             row = []
    #             for j in range(len(dimensions)):
    #                 if i == j:
    #                     row.append(1.0)
    #                 else:
    #                     corr = self._calculate_correlation(data, dimensions[i], dimensions[j])
    #                     row.append(corr if corr is not None else 0.0)
    #             matrix.append(row)
            
    #         return matrix
    #     except Exception as e:
    #         logger.error(f"计算相关性矩阵失败: {str(e)}")
    #         return [[1.0 if i == j else 0.0 for j in range(len(dimensions))] for i in range(len(dimensions))]
    
    # def _generate_insights(self, cross_analysis: Dict, trend_prediction: Dict, 
    #                       pattern_analysis: Dict, correlation_analysis: Dict) -> List[str]:
    #     """生成综合洞察"""
    #     insights = []
        
    #     # 趋势洞察
    #     if trend_prediction.get("prediction"):
    #         pred_data = trend_prediction["prediction"]
    #         if pred_data:
    #             total_pred = sum(p["predicted_count"] for p in pred_data)
    #             avg_pred = total_pred / len(pred_data)
    #             insights.append(f"未来{len(pred_data)}天预计平均每日{avg_pred:.1f}件投诉")
        
    #     # 模式洞察
    #     if pattern_analysis.get("patterns"):
    #         for pattern in pattern_analysis["patterns"]:
    #             if pattern["type"] == "time_patterns":
    #                 hourly_pattern = next((p for p in pattern["patterns"] if p["type"] == "hourly_pattern"), None)
    #                 if hourly_pattern:
    #                     insights.append(f"投诉高发时段为{hourly_pattern['peak_hour']}:00")
        
    #     # 关联洞察
    #     if correlation_analysis.get("strong_correlations"):
    #         strong_count = len(correlation_analysis["strong_correlations"])
    #         insights.append(f"发现{strong_count}个强关联因素")
        
    #     return insights
    
    def _apply_petition_filters(self, query, filters: Dict[str, Any]):
        """应用信访数据过滤条件"""
        if not filters:
            return query
        
        # 时间范围过滤
        if "start_date" in filters:
            query = query.filter(PetitionData.submit_time >= filters["start_date"])
        if "end_date" in filters:
            query = query.filter(PetitionData.submit_time <= filters["end_date"])
        
        # 区域过滤
        if "province" in filters:
            query = query.filter(PetitionData.province == filters["province"])
        if "city" in filters:
            query = query.filter(PetitionData.city == filters["city"])
        
        # 污染类型过滤
        if "pollution_type" in filters:
            query = query.filter(PetitionData.pollution_type == filters["pollution_type"])
        
        return query


# 全局多维度分析服务实例
multi_dimension_analysis_service = MultiDimensionAnalysisService()