"""
地理分析服务
提供地理分布分析、热力图、区域统计等功能
"""
import pandas as pd
import numpy as np
from typing import Dict, Any, List, Optional, Tuple, Union
from datetime import datetime, timedelta
from sqlalchemy.orm import Session
from sqlalchemy import func, and_, or_, desc, asc
from sqlalchemy.sql import text
import json
import logging

# from models.analysis_result import AnalysisResult, PetitionData
from models.analysis_task import AnalysisTask
from core.database import get_db
from core.logging_config import get_logger
from core.exceptions import GeoAnalysisError, ValidationError

logger = get_logger("geo_analysis_service")


class GeoAnalysisService:
    """地理分析服务"""
    
    def __init__(self):
        self.cache = {}
        self.cache_timeout = 300  # 5分钟缓存
    
    def get_geographic_distribution(self, db: Session, result_id: Optional[int] = None,
                                  filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取地理分布统计"""
        try:
            query = db.query(PetitionData).join(AnalysisResult)
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            # 省级分布
            province_stats = db.query(
                PetitionData.province,
                func.count(PetitionData.id).label('count')
            ).filter(PetitionData.province.isnot(None)).group_by(
                PetitionData.province
            ).order_by(desc('count')).all()
            
            # 市级分布
            city_stats = db.query(
                PetitionData.city,
                func.count(PetitionData.id).label('count')
            ).filter(PetitionData.city.isnot(None)).group_by(
                PetitionData.city
            ).order_by(desc('count')).limit(20).all()
            
            # 区县级分布
            district_stats = db.query(
                PetitionData.district,
                func.count(PetitionData.id).label('count')
            ).filter(PetitionData.district.isnot(None)).group_by(
                PetitionData.district
            ).order_by(desc('count')).limit(30).all()
            
            # 有坐标的数据统计
            location_data = query.filter(
                PetitionData.latitude.isnot(None),
                PetitionData.longitude.isnot(None)
            ).all()
            
            return {
                "province_distribution": [
                    {"name": stat.province, "value": stat.count}
                    for stat in province_stats
                ],
                "city_distribution": [
                    {"name": stat.city, "value": stat.count}
                    for stat in city_stats
                ],
                "district_distribution": [
                    {"name": stat.district, "value": stat.count}
                    for stat in district_stats
                ],
                "location_data_count": len(location_data),
                "total_count": query.count(),
                "location_coverage": round(len(location_data) / query.count() * 100, 2) if query.count() > 0 else 0
            }
            
        except Exception as e:
            logger.error(f"获取地理分布统计失败: {str(e)}")
            raise GeoAnalysisError(f"获取地理分布统计失败: {str(e)}")
    
    def get_heatmap_data(self, db: Session, result_id: Optional[int] = None,
                        filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取热力图数据"""
        try:
            query = db.query(PetitionData).join(AnalysisResult).filter(
                PetitionData.latitude.isnot(None),
                PetitionData.longitude.isnot(None)
            )
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            # 获取有坐标的数据
            location_data = query.all()
            
            # 生成热力图数据点
            heatmap_points = []
            for data in location_data:
                if data.latitude and data.longitude:
                    intensity = self._calculate_intensity(data)
                    heatmap_points.append({
                        "lat": float(data.latitude),
                        "lng": float(data.longitude),
                        "intensity": intensity,
                        "petition_id": data.petition_id,
                        "title": data.title,
                        "pollution_type": data.pollution_type
                    })
            
            # 计算热力中心点
            if heatmap_points:
                center_lat = np.mean([p["lat"] for p in heatmap_points])
                center_lng = np.mean([p["lng"] for p in heatmap_points])
            else:
                center_lat, center_lng = 0, 0
            
            return {
                "heatmap_points": heatmap_points,
                "center": {"lat": center_lat, "lng": center_lng},
                "bounds": self._calculate_bounds(heatmap_points),
                "total_points": len(heatmap_points),
                "intensity_stats": {
                    "max_intensity": max([p["intensity"] for p in heatmap_points]) if heatmap_points else 0,
                    "min_intensity": min([p["intensity"] for p in heatmap_points]) if heatmap_points else 0,
                    "avg_intensity": np.mean([p["intensity"] for p in heatmap_points]) if heatmap_points else 0
                }
            }
            
        except Exception as e:
            logger.error(f"获取热力图数据失败: {str(e)}")
            raise GeoAnalysisError(f"获取热力图数据失败: {str(e)}")
    
    def get_regional_comparison(self, db: Session, result_id: Optional[int] = None,
                               regions: List[str] = None, filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取区域对比分析"""
        try:
            query = db.query(PetitionData).join(AnalysisResult)
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            # 如果没有指定区域，使用前5个省份
            if not regions:
                top_provinces = db.query(
                    PetitionData.province,
                    func.count(PetitionData.id).label('count')
                ).filter(PetitionData.province.isnot(None)).group_by(
                    PetitionData.province
                ).order_by(desc('count')).limit(5).all()
                regions = [stat.province for stat in top_provinces]
            
            # 各区域详细统计
            regional_stats = []
            for region in regions:
                region_query = query.filter(PetitionData.province == region)
                
                # 基础统计
                total_count = region_query.count()
                
                # 污染类型分布
                pollution_stats = region_query.with_entities(
                    PetitionData.pollution_type,
                    func.count(PetitionData.id).label('count')
                ).filter(PetitionData.pollution_type.isnot(None)).group_by(
                    PetitionData.pollution_type
                ).all()
                
                # 时间趋势（最近30天）
                thirty_days_ago = datetime.now() - timedelta(days=30)
                recent_trend = region_query.filter(
                    PetitionData.submit_time >= thirty_days_ago
                ).count()
                
                regional_stats.append({
                    "region": region,
                    "total_count": total_count,
                    "recent_trend": recent_trend,
                    "pollution_distribution": [
                        {"type": stat.pollution_type, "count": stat.count}
                        for stat in pollution_stats
                    ],
                    "percentage": round(total_count / query.count() * 100, 2) if query.count() > 0 else 0
                })
            
            # 排序
            regional_stats.sort(key=lambda x: x["total_count"], reverse=True)
            
            return {
                "comparison_regions": regions,
                "regional_statistics": regional_stats,
                "total_compared": sum([stat["total_count"] for stat in regional_stats]),
                "analysis_period": "最近30天"
            }
            
        except Exception as e:
            logger.error(f"获取区域对比分析失败: {str(e)}")
            raise GeoAnalysisError(f"获取区域对比分析失败: {str(e)}")
    
    def get_clustering_analysis(self, db: Session, result_id: Optional[int] = None,
                             filters: Dict[str, Any] = None, radius: float = 0.01) -> Dict[str, Any]:
        """获取聚类分析"""
        try:
            from sklearn.cluster import DBSCAN
            
            # 获取有坐标的数据
            query = db.query(PetitionData).join(AnalysisResult).filter(
                PetitionData.latitude.isnot(None),
                PetitionData.longitude.isnot(None)
            )
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            location_data = query.all()
            
            if len(location_data) < 3:
                return {
                    "clusters": [],
                    "total_clusters": 0,
                    "noise_points": len(location_data),
                    "message": "数据点太少，无法进行聚类分析"
                }
            
            # 准备坐标数据
            coords = np.array([[float(data.latitude), float(data.longitude)] for data in location_data])
            
            # DBSCAN聚类
            clustering = DBSCAN(eps=radius, min_samples=3).fit(coords)
            labels = clustering.labels_
            
            # 分析聚类结果
            n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
            n_noise = list(labels).count(-1)
            
            # 生成聚类信息
            clusters = []
            for i in range(n_clusters):
                cluster_indices = np.where(labels == i)[0]
                cluster_points = coords[cluster_indices]
                
                # 计算聚类中心
                center = cluster_points.mean(axis=0)
                
                # 获取聚类中的信访数据
                cluster_petitions = [location_data[idx] for idx in cluster_indices]
                
                clusters.append({
                    "cluster_id": i,
                    "center": {"lat": float(center[0]), "lng": float(center[1])},
                    "point_count": len(cluster_points),
                    "radius": float(radius),
                    "petitions": [
                        {
                            "id": p.petition_id,
                            "title": p.title,
                            "pollution_type": p.pollution_type,
                            "submit_time": p.submit_time.isoformat() if p.submit_time else None
                        }
                        for p in cluster_petitions
                    ],
                    "main_pollution_types": self._get_main_pollution_types(cluster_petitions)
                })
            
            # 噪声点
            noise_points = []
            if n_noise > 0:
                noise_indices = np.where(labels == -1)[0]
                for idx in noise_indices:
                    data = location_data[idx]
                    noise_points.append({
                        "lat": float(data.latitude),
                        "lng": float(data.longitude),
                        "petition_id": data.petition_id,
                        "title": data.title
                    })
            
            return {
                "clusters": clusters,
                "total_clusters": n_clusters,
                "noise_points": noise_points,
                "noise_count": n_noise,
                "total_points": len(location_data),
                "clustering_radius": radius,
                "clustering_algorithm": "DBSCAN"
            }
            
        except ImportError:
            logger.warning("scikit-learn未安装，跳过聚类分析")
            return {
                "clusters": [],
                "total_clusters": 0,
                "noise_points": [],
                "noise_count": 0,
                "message": "聚类分析需要安装scikit-learn库"
            }
        except Exception as e:
            logger.error(f"获取聚类分析失败: {str(e)}")
            raise GeoAnalysisError(f"获取聚类分析失败: {str(e)}")
    
    def get_location_density_analysis(self, db: Session, result_id: Optional[int] = None,
                                   grid_size: float = 0.01, filters: Dict[str, Any] = None) -> Dict[str, Any]:
        """获取位置密度分析"""
        try:
            # 获取有坐标的数据
            query = db.query(PetitionData).join(AnalysisResult).filter(
                PetitionData.latitude.isnot(None),
                PetitionData.longitude.isnot(None)
            )
            
            if result_id:
                query = query.filter(AnalysisResult.task_id == result_id)
            
            if filters:
                query = self._apply_petition_filters(query, filters)
            
            location_data = query.all()
            
            if not location_data:
                return {
                    "grid_density": [],
                    "hotspots": [],
                    "total_points": 0,
                    "message": "没有可用的位置数据"
                }
            
            # 计算边界
            lats = [float(data.latitude) for data in location_data]
            lngs = [float(data.longitude) for data in location_data]
            
            min_lat, max_lat = min(lats), max(lats)
            min_lng, max_lng = min(lngs), max(lngs)
            
            # 创建网格
            grid_cells = {}
            for data in location_data:
                lat, lng = float(data.latitude), float(data.longitude)
                
                # 计算网格坐标
                grid_x = int((lng - min_lng) / grid_size)
                grid_y = int((lat - min_lat) / grid_size)
                grid_key = f"{grid_x}_{grid_y}"
                
                if grid_key not in grid_cells:
                    grid_cells[grid_key] = {
                        "grid_x": grid_x,
                        "grid_y": grid_y,
                        "center_lat": min_lat + (grid_x + 0.5) * grid_size,
                        "center_lng": min_lng + (grid_y + 0.5) * grid_size,
                        "count": 0,
                        "petitions": []
                    }
                
                grid_cells[grid_key]["count"] += 1
                grid_cells[grid_key]["petitions"].append({
                    "id": data.petition_id,
                    "title": data.title,
                    "pollution_type": data.pollution_type
                })
            
            # 转换为列表并排序
            grid_density = list(grid_cells.values())
            grid_density.sort(key=lambda x: x["count"], reverse=True)
            
            # 识别热点区域（密度高于平均值2倍）
            avg_density = np.mean([cell["count"] for cell in grid_density])
            hotspots = [cell for cell in grid_density if cell["count"] > avg_density * 2]
            
            return {
                "grid_density": grid_density[:50],  # 限制返回数量
                "hotspots": hotspots,
                "total_points": len(location_data),
                "grid_size": grid_size,
                "bounds": {
                    "min_lat": min_lat,
                    "max_lat": max_lat,
                    "min_lng": min_lng,
                    "max_lng": max_lng
                },
                "avg_density": avg_density,
                "max_density": max([cell["count"] for cell in grid_density]) if grid_density else 0
            }
            
        except Exception as e:
            logger.error(f"获取位置密度分析失败: {str(e)}")
            raise GeoAnalysisError(f"获取位置密度分析失败: {str(e)}")
    
    # def _calculate_intensity(self, petition_data: PetitionData) -> float:
    #     """计算投诉强度"""
    #     intensity = 1.0
        
    #     # 根据污染类型调整强度
    #     if petition_data.pollution_type:
    #         high_pollution_types = ["大气污染", "水污染", "土壤污染"]
    #         if petition_data.pollution_type in high_pollution_types:
    #             intensity += 0.5
        
    #     # 根据处理状态调整强度
    #     if petition_data.processing_status == "pending":
    #         intensity += 0.3
    #     elif petition_data.processing_status == "processing":
    #         intensity += 0.1
        
    #     return min(intensity, 3.0)  # 最大强度为3.0
    
    def _calculate_bounds(self, points: List[Dict]) -> Dict[str, float]:
        """计算边界"""
        if not points:
            return {"min_lat": 0, "max_lat": 0, "min_lng": 0, "max_lng": 0}
        
        lats = [p["lat"] for p in points]
        lngs = [p["lng"] for p in points]
        
        return {
            "min_lat": min(lats),
            "max_lat": max(lats),
            "min_lng": min(lngs),
            "max_lng": max(lngs)
        }
    
    # # def _get_main_pollution_types(self, petitions: List[PetitionData]) -> List[Dict]:
    #     """获取主要污染类型"""
    #     pollution_counts = {}
    #     for petition in petitions:
    #         if petition.pollution_type:
    #             pollution_counts[petition.pollution_type] = pollution_counts.get(petition.pollution_type, 0) + 1
        
    #     return [
    #         {"type": ptype, "count": count}
    #         for ptype, count in sorted(pollution_counts.items(), key=lambda x: x[1], reverse=True)
    #     ][:3]  # 返回前3个
    
    def _apply_petition_filters(self, query, filters: Dict[str, Any]):
        """应用信访数据过滤条件"""
        if not filters:
            return query
        
        # 时间范围过滤
        if "start_date" in filters:
            query = query.filter(PetitionData.submit_time >= filters["start_date"])
        if "end_date" in filters:
            query = query.filter(PetitionData.submit_time <= filters["end_date"])
        
        # 区域过滤
        if "province" in filters:
            query = query.filter(PetitionData.province == filters["province"])
        if "city" in filters:
            query = query.filter(PetitionData.city == filters["city"])
        if "district" in filters:
            query = query.filter(PetitionData.district == filters["district"])
        
        # 污染类型过滤
        if "pollution_type" in filters:
            query = query.filter(PetitionData.pollution_type == filters["pollution_type"])
        
        # 状态过滤
        if "processing_status" in filters:
            query = query.filter(PetitionData.processing_status == filters["processing_status"])
        
        return query


# 全局地理分析服务实例
geo_analysis_service = GeoAnalysisService()