# =========================================================================
# === gpu_evaluator.py (最终版 v5.0 - API修正) ==========================
# =========================================================================
# 描述:
#   1. [核心修正] 彻底修正了对 cugraph.sssp 返回结果的错误理解。
#      现在，在 run_batch_sssp 中为每次sssp的结果手动添加了'source'列。
#   2. 保留了所有之前有效的修复（人口增强、inf值处理等）。
#   3. 这是针对所有已知问题的最终、正确的版本。
# =========================================================================

import cudf
import cugraph
import cupy as cp
import pandas as pd
import networkx as nx
from scipy.spatial import KDTree
import time
import logging
import numpy as np

logger = logging.getLogger(__name__)

class GPUEvaluator:
    # --- __init__ 和 _find_nearest_node_int_id 保持不变 ---
    def __init__(self, road_network_nx: nx.Graph, population_data: list, poi_data: list):
        logger.info("--- [GPU] Initializing GPUEvaluator with Population Density Enhancement ---")
        start_time = time.time()
        self.node_list_cpu = list(road_network_nx.nodes())
        self.node_to_int_cpu = {node: i for i, node in enumerate(self.node_list_cpu)}
        self.node_kdtree_cpu = KDTree(self.node_list_cpu)
        edge_list = road_network_nx.edges(data=True)
        pdf = pd.DataFrame(edge_list, columns=['source_coord', 'target_coord', 'attrs'])
        pdf['source'] = pdf['source_coord'].map(self.node_to_int_cpu)
        pdf['target'] = pdf['target_coord'].map(self.node_to_int_cpu)
        pdf['weight'] = pdf['attrs'].apply(lambda x: x.get('weight', 1.0))
        pdf.dropna(subset=['source', 'target', 'weight'], inplace=True)
        self.road_gdf_gpu = cudf.from_pandas(pdf[['source', 'target', 'weight']])
        self.road_gdf_gpu['weight'] = self.road_gdf_gpu['weight'].astype('float32')
        self.G_gpu = cugraph.Graph(directed=False)
        self.G_gpu.from_cudf_edgelist(
            self.road_gdf_gpu, source='source', destination='target', edge_attr='weight'
        )
        logger.info(f"✅ [GPU] cuGraph graph created on GPU.")
        poi_coords_cpu = [p[0] for p in poi_data]
        poi_categories = [p[1] for p in poi_data]
        poi_weights = [p[2] for p in poi_data]
        self.poi_gdf_gpu = cudf.DataFrame({
            'x': [c[0] for c in poi_coords_cpu],
            'y': [c[1] for c in poi_coords_cpu],
            'category': poi_categories,
            'weight': poi_weights,
            'node_id': cudf.Series([self._find_nearest_node_int_id(c) for c in poi_coords_cpu], dtype='int32'),
            'poi_id': cudf.Series(range(len(poi_data)))
        })
        pop_coords_cpu = [p[0] for p in population_data]
        pop_values = [p[1] for p in population_data]
        pop_gdf_gpu = cudf.DataFrame({
            'x': [c[0] for c in pop_coords_cpu],
            'y': [c[1] for c in pop_coords_cpu],
            'weight': pop_values
        })
        logger.info(f"✅ [GPU] {len(pop_gdf_gpu)} population points loaded to GPU.")
        logger.info("  - Pre-calculating population density factor for each POI...")
        pop_kdtree = KDTree(pop_gdf_gpu[['x', 'y']].to_pandas())
        nearby_pop_indices = pop_kdtree.query_ball_point(self.poi_gdf_gpu[['x', 'y']].to_pandas(), r=500)
        poi_surrounding_pop = [pop_gdf_gpu['weight'].iloc[indices].sum() for indices in nearby_pop_indices]
        poi_surrounding_pop_series = cudf.Series(poi_surrounding_pop)
        max_pop = poi_surrounding_pop_series.max()
        if max_pop > 0:
             self.poi_gdf_gpu['pop_density_factor'] = 1.0 + (poi_surrounding_pop_series / max_pop)
        else:
             self.poi_gdf_gpu['pop_density_factor'] = 1.0
        self.poi_gdf_gpu['pop_density_factor'] = self.poi_gdf_gpu['pop_density_factor'].fillna(1.0)
        logger.info(f"✅ [GPU] {len(self.poi_gdf_gpu)} POI points loaded and enhanced with population data.")
        end_time = time.time()
        logger.info(f"--- [GPU] GPUEvaluator initialization finished in {end_time - start_time:.2f}s ---")

    def _find_nearest_node_int_id(self, coord: tuple) -> int:
        _, idx = self.node_kdtree_cpu.query(coord)
        return idx

    # 🔥 [核心修正] run_batch_sssp 现在手动添加 'source' 列
    def run_batch_sssp(self, source_node_ids: list):
        if not source_node_ids:
            return cudf.DataFrame()
        
        all_results = []
        for sid in source_node_ids:
            # 1. 运行sssp，得到包含 'vertex' 和 'distance' 的结果
            single_sssp_df = cugraph.sssp(self.G_gpu, source=sid)
            # 2. [关键] 手动添加 'source' 列，并用当前源点ID填充
            single_sssp_df['source'] = sid
            all_results.append(single_sssp_df)
            
        return cudf.concat(all_results) if all_results else cudf.DataFrame()

    # 🔥 现在这个函数中的所有 groupby('source') 都会正确工作
    def calculate_luckin_fitness_final(self, site_coords: list, config: dict) -> dict:
        if not site_coords:
            return {'total_fitness': 0}

        num_sites = len(site_coords)
        site_node_ids = [self._find_nearest_node_int_id(sc) for sc in site_coords]
        
        sssp_results_raw = self.run_batch_sssp(site_node_ids)
        if sssp_results_raw.empty:
            return {'total_fitness': 0}

        penalty_distance = config.get('pickup_radius', 300) * 10.0
        sssp_results = sssp_results_raw.replace(cp.inf, penalty_distance)
        sssp_results = sssp_results.rename(columns={'vertex': 'node_id'})

        merged_poi = self.poi_gdf_gpu.merge(sssp_results, on='node_id', how='inner').reset_index(drop=True)

        # --- Pickup Score ---
        pickup_radius = config.get('pickup_radius', 300)
        merged_poi['is_pickup'] = (merged_poi['distance'] <= pickup_radius).astype('int32')
        pickup_counts = merged_poi.groupby('poi_id')['is_pickup'].sum().to_frame().reset_index()
        poi_scores = self.poi_gdf_gpu.merge(pickup_counts, on='poi_id', how='left').fillna(0)
        poi_scores['contribution'] = (poi_scores['weight'] * 
                                      poi_scores['pop_density_factor'] * 
                                      (poi_scores['is_pickup'] ** 2))
        pickup_score = poi_scores['contribution'].sum().item()

        # --- Clustering Score ---
        core_poi_mask = merged_poi['category'].isin(config.get('core_poi_categories', ['OFFICE', 'MALL']))
        site_to_core_poi_paths = merged_poi[core_poi_mask]
        cluster_score = 0
        if not site_to_core_poi_paths.empty:
            # 这一行现在可以安全地执行了！
            site_to_closest_core_poi_dist = site_to_core_poi_paths.groupby('source')['distance'].min()
            cluster_radius = config.get('cluster_radius', 500)
            num_clustered_sites = (site_to_closest_core_poi_dist < cluster_radius).sum()
            cluster_score = (num_clustered_sites * config.get('cluster_bonus_factor', 50)).item()

        # --- Cannibalization Penalty ---
        site_to_site_dist = sssp_results[sssp_results['node_id'].isin(site_node_ids)]
        penalty = 0.0
        if not site_to_site_dist.empty and num_sites > 1:
            min_safe_dist = config.get('min_safe_distance', 150)
            violations = site_to_site_dist[(site_to_site_dist['distance'] < min_safe_dist) & (site_to_site_dist['distance'] > 0)]
            if not violations.empty:
                closest_dist = violations['distance'].min()
                penalty = (1 - (closest_dist / min_safe_dist)).item()

        # --- Final Fitness Score ---
        w_pickup = config.get('w_pickup', 0.8)
        w_cluster = config.get('w_cluster', 0.2)
        total_fitness = (w_pickup * pickup_score + w_cluster * cluster_score) * (1 - penalty)

        return {
            'total_fitness': total_fitness,
            'pickup_score': pickup_score,
            'cluster_score': cluster_score,
            'penalty': penalty
        }