import numpy as np
import networkx as nx
import random
from shapely.geometry import Point, LineString, MultiLineString, Polygon
from scipy.spatial import KDTree
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
import time
import matplotlib.pyplot as plt
from functools import lru_cache
import logging
from collections import defaultdict
import pickle
import os
import geopandas as gpd
from typing import List, Tuple, Optional, Dict, Any
import pandas as pd
from shapely.ops import unary_union
import gc
import folium
from folium.plugins import HeatMap
from gpu_evaluator import GPUEvaluator


# =========================================================================
# === 2. 初始化与配置 =====================================================
# =========================================================================
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# =========================================================================
# === 3. 辅助函数 =========================================================
# =========================================================================
def euclidean_distance_numba(x1, y1, x2, y2):
    """计算两点间的欧几里得距离"""
    return np.sqrt((x2 - x1)**2 + (y2 - y1)**2)

# =========================================================================
# === 4. 核心类定义 =======================================================
# =========================================================================

class FastGridIndex:
    """高性能网格空间索引"""
    def __init__(self, bounds, grid_size=500):
        self.bounds = bounds
        self.grid_size = grid_size
        self.nx = max(1, int((bounds[2] - bounds[0]) / grid_size))
        self.ny = max(1, int((bounds[3] - bounds[1]) / grid_size))
        self.grid = defaultdict(list)
        
    def _get_grid_cell(self, x, y):
        i = int((x - self.bounds[0]) / self.grid_size)
        j = int((y - self.bounds[1]) / self.grid_size)
        return max(0, min(i, self.nx-1)), max(0, min(j, self.ny-1))

    def insert(self, point_id, x, y):
        self.grid[self._get_grid_cell(x, y)].append((point_id, x, y))

    def query_radius(self, x, y, radius):
        grid_radius = int(np.ceil(radius / self.grid_size))
        center_i, center_j = self._get_grid_cell(x, y)
        candidates = []
        for i in range(max(0, center_i - grid_radius), min(self.nx, center_i + grid_radius + 1)):
            for j in range(max(0, center_j - grid_radius), min(self.ny, center_j + grid_radius + 1)):
                candidates.extend(self.grid.get((i, j), []))
        results = []
        for point_id, px, py in candidates:
            if euclidean_distance_numba(x, y, px, py) <= radius:
                results.append((point_id, px, py))
        return results

class HierarchicalRoadNetwork:
    """
    【最终整合版】分层路网结构。
    采用预计算的Hub节点和接入距离，以实现超高速的路网距离查询。
    """
    def __init__(self, road_gdf, cache_file=None):
        self.cache_file = cache_file
        if road_gdf is None:
            if cache_file and os.path.exists(cache_file):
                logger.info(f"强制从缓存 '{cache_file}' 加载路网...")
                self._load_cache()
            else:
                raise ValueError("未提供路网数据，且找不到有效的缓存文件。")
            return

        if 'geometry' not in road_gdf.columns:
            raise ValueError("道路数据必须包含'geometry'列")

        if cache_file and os.path.exists(cache_file):
            logger.info("加载路网缓存...")
            try:
                self._load_cache()
                return
            except Exception as e:
                logger.warning(f"加载缓存失败: {e}。将重新构建路网。")
        
        logger.info("构建分层路网...")
        self._build_hierarchical_network(road_gdf)
        if cache_file:
            self._save_cache()

    def _build_hierarchical_network(self, road_gdf):
        start_time = time.time()
        self.full_graph = nx.Graph()
        logger.info("正在将路网几何体添加到图中...")
        total_lines = 0
        added_edges = 0
        if not isinstance(road_gdf, gpd.GeoDataFrame) or road_gdf.empty:
            raise ValueError("传入的路网数据不是有效的GeoDataFrame或为空。")
        for geom in tqdm(road_gdf.geometry, desc="处理路网几何体"):
            total_lines += 1
            if geom is None or geom.is_empty: continue
            if isinstance(geom, LineString):
                added_edges += self._add_line_to_graph(geom, self.full_graph)
            elif isinstance(geom, MultiLineString):
                for line in geom.geoms:
                    added_edges += self._add_line_to_graph(line, self.full_graph)
        logger.info(f"几何体处理完毕。总共处理了 {total_lines} 条线。")
        logger.info(f"成功向图中添加了 {added_edges} 条边。")
        self.node_coords = list(self.full_graph.nodes())
        if not self.node_coords: 
            raise ValueError("路网中没有有效节点。可能的原因是路网数据中的线段长度过短或坐标精度问题。")
        logger.info("构建空间索引 (KDTree 和 Grid)...")
        self.kdtree = KDTree(self.node_coords)
        bounds = self._get_bounds()
        self.grid_index = FastGridIndex(bounds, grid_size=500)
        for i, (x, y) in enumerate(self.node_coords): self.grid_index.insert(i, x, y)
        self._build_multilevel_index()
        build_time = time.time() - start_time
        logger.info(f"分层路网构建完成: {len(self.full_graph.nodes)} 节点, "
                   f"{len(self.full_graph.edges)} 边, 耗时: {build_time:.1f}s")

    def _add_line_to_graph(self, line, graph) -> int:
        coords = list(line.coords)
        if len(coords) < 2: return 0
        edges_added_count = 0
        for i in range(len(coords) - 1):
            p1_raw = coords[i]; p2_raw = coords[i+1]
            p1 = (p1_raw[0], p1_raw[1]); p2 = (p2_raw[0], p2_raw[1])
            dist = euclidean_distance_numba(p1[0], p1[1], p2[0], p2[1])
            if dist > 1e-6:
                graph.add_edge(p1, p2, weight=dist)
                edges_added_count += 1
        return edges_added_count

    def _get_bounds(self):
        coords = np.array(self.node_coords)
        return (coords[:, 0].min(), coords[:, 1].min(), coords[:, 0].max(), coords[:, 1].max())

    def _build_multilevel_index(self):
        logger.info("构建多层次索引...")
        node_degrees = dict(self.full_graph.degree())
        # Select more hubs for better accuracy, e.g., top 10% (90th percentile)
        degree_threshold = np.percentile(list(node_degrees.values()), 90)
        self.important_nodes = {n for n, d in node_degrees.items() if d >= degree_threshold}
        logger.info(f"识别出 {len(self.important_nodes)} 个Hub节点。")
        
        important_node_list = list(self.important_nodes)
        if not important_node_list:
            logger.warning("未能识别出任何Hub节点。路网查询将回退到标准A*算法。")
            self.node_to_hub_dist = {}
            self.shortcut_graph = nx.Graph()
            return
            
        self.shortcut_graph = nx.Graph()
        logger.info("预计算Hub节点间的捷径...")
        for node1 in tqdm(important_node_list, desc="计算捷径"):
            try:
                distances = nx.single_source_dijkstra_path_length(
                    self.full_graph, node1, weight='weight', cutoff=5000)
                for node2 in important_node_list:
                    if node1 != node2 and node2 in distances:
                        self.shortcut_graph.add_edge(node1, node2, weight=distances[node2])
            except Exception as e:
                logger.warning(f"计算Hub节点 {node1} 路径时出错: {e}")

        logger.info("预计算所有节点的Hub接入距离 (这可能需要几分钟)...")
        self.node_to_hub_dist = {}
        hub_kdtree = KDTree(important_node_list)

        for node in tqdm(self.full_graph.nodes(), desc="计算接入距离"):
            if node in self.important_nodes:
                self.node_to_hub_dist[node] = (node, 0.0)
                continue
            
            try:
                # Query more neighbors for better accuracy
                _, indices = hub_kdtree.query(node, k=min(10, len(important_node_list)))
                indices = indices if isinstance(indices, np.ndarray) else [indices]
                candidate_hubs = [important_node_list[i] for i in indices]
                
                min_dist, best_hub = float('inf'), None
                for hub in candidate_hubs:
                    try:
                        dist = nx.shortest_path_length(self.full_graph, source=node, target=hub, weight='weight')
                        if dist < min_dist:
                            min_dist, best_hub = dist, hub
                    except nx.NetworkXNoPath: continue
                if best_hub:
                    self.node_to_hub_dist[node] = (best_hub, min_dist)
            except Exception as e:
                 logger.warning(f"为节点 {node} 计算接入距离时出错: {e}")
        logger.info("Hub接入距离预计算完成。")

    def find_nearest_node_fast(self, point: Tuple[float, float]) -> Tuple[float, float]:
        try:
            point = (round(point[0], 5), round(point[1], 5))
            candidates = self.grid_index.query_radius(point[0], point[1], radius=500)
            if not candidates:
                _, indices = self.kdtree.query([point], k=1)
                idx = indices[0] if isinstance(indices, np.ndarray) else indices
                return self.node_coords[idx]
            
            min_distance, nearest_node = float('inf'), None
            for _, x, y in candidates:
                dist = euclidean_distance_numba(point[0], point[1], x, y)
                if dist < min_distance:
                    min_distance, nearest_node = dist, (x, y)
            return nearest_node
        except Exception:
            return self.node_coords[0] if self.node_coords else (0.0, 0.0)

    @lru_cache(maxsize=500000)
    def get_distance_fast(self, origin: tuple, destination: tuple) -> float:
        if origin == destination: return 0.0
        orig_node = self.find_nearest_node_fast(origin)
        dest_node = self.find_nearest_node_fast(destination)
        dist_access = euclidean_distance_numba(origin[0], origin[1], orig_node[0], orig_node[1])
        dist_egress = euclidean_distance_numba(destination[0], destination[1], dest_node[0], dest_node[1])
        if orig_node == dest_node: return dist_access + dist_egress
        road_dist = self._get_road_distance(orig_node, dest_node)
        return dist_access + road_dist + dist_egress

    def _get_road_distance(self, orig_node: tuple, dest_node: tuple) -> float:
        try:
            orig_hub, dist_to_orig_hub = self.node_to_hub_dist[orig_node]
            dest_hub, dist_to_dest_hub = self.node_to_hub_dist[dest_node]
            if orig_hub == dest_hub:
                return nx.astar_path_length(self.full_graph, orig_node, dest_node, heuristic=lambda u, v: euclidean_distance_numba(u[0], u[1], v[0], v[1]), weight='weight')
            hub_dist = nx.shortest_path_length(self.shortcut_graph, orig_hub, dest_hub, weight='weight')
            return dist_to_orig_hub + hub_dist + dist_to_dest_hub
        except (KeyError, nx.NetworkXNoPath, nx.NodeNotFound):
            try:
                return nx.astar_path_length(self.full_graph, orig_node, dest_node, heuristic=lambda u, v: euclidean_distance_numba(u[0], u[1], v[0], v[1]), weight='weight')
            except nx.NetworkXNoPath:
                return float('inf')

    def _save_cache(self):
        try:
            with open(self.cache_file, 'wb') as f:
                pickle.dump({
                    'full_graph': self.full_graph, 'node_coords': self.node_coords,
                    'important_nodes': self.important_nodes, 'shortcut_graph': self.shortcut_graph,
                    'node_to_hub_dist': self.node_to_hub_dist
                }, f)
            logger.info(f"路网缓存已保存: {self.cache_file}")
        except Exception as e:
            logger.error(f"保存缓存失败: {e}")

    def _load_cache(self):
        with open(self.cache_file, 'rb') as f:
            cache_data = pickle.load(f)
        self.full_graph = cache_data['full_graph']
        self.node_coords = cache_data['node_coords']
        self.important_nodes = cache_data['important_nodes']
        self.shortcut_graph = cache_data['shortcut_graph']
        self.node_to_hub_dist = cache_data['node_to_hub_dist']
        self.kdtree = KDTree(self.node_coords)
        bounds = self._get_bounds()
        self.grid_index = FastGridIndex(bounds, grid_size=500)
        for i, (x, y) in enumerate(self.node_coords): self.grid_index.insert(i, x, y)
        logger.info("路网缓存加载完成")

class AdaptiveParameterScheduler:
    # ... (使用您选择的版本A或版本B，这里使用版本A) ...
    """
    一个真正的自适应调度器，它根据算法的性能反馈来调整 c1 和 c2。
    """
    def __init__(self, initial_w=0.9, initial_c1=2.0, initial_c2=2.0):
        self.initial_w, self.initial_c1, self.initial_c2 = initial_w, initial_c1, initial_c2
        self.min_w, self.max_w = 0.2, 0.9
        
    def update_parameters(self, iteration: int, max_iter: int, improvement_rate: float) -> Dict[str, float]:
        progress = iteration / max_iter
        w = self.max_w - (self.max_w - self.min_w) * progress
        if improvement_rate > 0.005:
            c1, c2 = self.initial_c1 * 1.3, self.initial_c2 * 0.7
        elif improvement_rate < 0.0005 and progress > 0.15:
            c1, c2 = self.initial_c1 * 0.7, self.initial_c2 * 1.3
        else:
            c1, c2 = self.initial_c1, self.initial_c2
        return {'w': w, 'c1': c1, 'c2': c2}

# class AdaptiveParameterScheduler:
#     """自适应调整PSO核心参数(w, c1, c2)的调度器"""
#     def __init__(self, w_range=[0.9, 0.4], c1_range=[1.5, 2.5], c2_range=[2.5, 1.5]):
#         """
#         :param w_range: 惯性权重的 [起始值, 结束值]
#         :param c1_range: 个人学习因子的 [起始值, 结束值]
#         :param c2_range: 群体学习因子的 [起始值, 结束值]
#         """
#         self.w_start, self.w_end = w_range
#         self.c1_start, self.c1_end = c1_range
#         self.c2_start, self.c2_end = c2_range

#     def _linear_interpolate(self, start_val, end_val, progress):
#         """线性插值"""
#         return start_val + (end_val - start_val) * progress

#     def get_params(self, iteration: int, max_iter: int) -> Dict[str, float]:
#         """根据迭代进度返回更新后的w, c1, c2"""
#         progress = iteration / max_iter
#         w = self._linear_interpolate(self.w_start, self.w_end, progress)
#         c1 = self._linear_interpolate(self.c1_start, self.c1_end, progress)
#         c2 = self._linear_interpolate(self.c2_start, self.c2_end, progress)
#         return {'w': w, 'c1': c1, 'c2': c2}


class OptimizedMultiSitePSO:
    def __init__(self, 
                 num_sites: int, 
                 boundary_polygon, 
                 gpu_evaluator: GPUEvaluator,
                 road_network: Optional[HierarchicalRoadNetwork] = None,
                 **kwargs):
        
        logger.info("Initializing OptimizedMultiSitePSO with GPU support...")
        self.num_sites = num_sites
        self.boundary_polygon = boundary_polygon
        self.gpu_evaluator = gpu_evaluator
        self.road_network = road_network
        self.population_points = kwargs.get('population_points', [])
        self.poi_coords = kwargs.get('poi_coords', [])
        self.radius, self.max_search_radius = kwargs.get('radius', 2500), kwargs.get('max_search_radius', 2500)
        self.num_particles, self.max_iter = kwargs.get('num_particles', 30), kwargs.get('max_iter', 100)
        self.w, self.c1, self.c2 = kwargs.get('w', 0.9), kwargs.get('c1', 2.0), kwargs.get('c2', 2.0)
        self.use_adaptive_params = kwargs.get('use_adaptive_params', True)
        self.early_stop_patience = kwargs.get('early_stop_patience', 30)
        self.poi_weight, self.pop_weight, self.spacing_weight = \
            kwargs.get('poi_weight', 0.3), kwargs.get('pop_weight', 0.5), kwargs.get('spacing_weight', 0.2)
        self.overlap_penalty_factor = kwargs.get('overlap_penalty_factor', 0.15)
        self.far_dist_penalty_factor = kwargs.get('far_dist_penalty_factor', 0.3)
        
        if self.population_points:
            self.population_coords = np.array([p[0] for p in self.population_points])
            self.population_values = np.array([p[1] for p in self.population_points])

        self.dim = self.num_sites * 2
        self.global_best_position, self.global_best_score = None, float('inf')
        self.g_best_scores = []
        self.bounds = self.boundary_polygon.bounds
        if self.use_adaptive_params:
            self.scheduler = AdaptiveParameterScheduler(initial_w=self.w, initial_c1=self.c1, initial_c2=self.c2)
        self.valid_points_cache = self._generate_valid_points_cache()
        self.eval_config = {
            'radius': self.radius,
            'max_search_radius': self.max_search_radius,
            'overlap_penalty': self.overlap_penalty_factor,
            'dist_penalty': self.far_dist_penalty_factor,
            'min_req_dist': self._calculate_min_req_dist(self.num_sites)
        }
        logger.info(f"PSO优化器初始化完成 - 站点数: {self.num_sites}, 粒子数: {self.num_particles}")

    def _calculate_min_req_dist(self, num_sites):
        area = getattr(self.boundary_polygon, 'area', 1e6)
        if num_sites == 0: return 300
        ideal = np.sqrt(area / num_sites / np.pi) * 1.2
        return min(max(self.radius * 0.6, 300), min(ideal, self.radius * 1.8, self.max_search_radius * 0.8))

    def _evaluate_single_particle_precise(self, position: np.ndarray) -> tuple:
        try:
            coords = [(round(position[i], 5), round(position[i + 1], 5)) for i in range(0, len(position), 2)]
            
            # 🔥 调用最终的、包含人口数据的适应度函数
            fitness_scores = self.gpu_evaluator.calculate_luckin_fitness_final(coords, self.eval_config)
            
            total_cost = -fitness_scores.get('total_fitness', 0)
            
            pickup_score = fitness_scores.get('pickup_score', 0)
            cluster_score = fitness_scores.get('cluster_score', 0)
            penalty = fitness_scores.get('penalty', 0)

            return (total_cost, pickup_score, cluster_score, penalty)
        
        except Exception as e:
            logger.error(f"评估粒子时发生异常(luckin_fitness_final): {e}", exc_info=True)
            return (float('inf'), 0, 0, 1)
            
    def _generate_valid_points_cache(self, cache_size: int = 2000):
        logger.info("正在生成有效点缓存...")
        points = []
        attempts = 0
        max_attempts = cache_size * 50 # 大幅增加尝试次数
        minx, miny, maxx, maxy = self.bounds
        
        # 确保边界多边形是有效的
        if not self.boundary_polygon.is_valid:
            logger.warning("边界多边形(boundary_polygon)无效，正在尝试修复 (buffer(0))...")
            self.boundary_polygon = self.boundary_polygon.buffer(0)
            if not self.boundary_polygon.is_valid:
                 raise ValueError("无法修复边界多边形，请检查shapefile文件。")

        while len(points) < cache_size and attempts < max_attempts:
            p = Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
            if self.boundary_polygon.contains(p):
                points.append((p.x, p.y))
            attempts += 1
        
        # 🔥 [核心诊断与修复]
        if len(points) < 100: # 如果生成的点少于100个，就认为出错了
            logger.error(f"致命错误：未能生成足够数量的有效点！只生成了 {len(points)} 个。")
            logger.error("这通常意味着边界文件(.shp)存在问题，或者其几何形状非常奇特。")
            # 抛出异常，强制停止，而不是继续用错误的数据运行
            raise RuntimeError("有效点缓存生成失败，优化无法继续。")
        
        logger.info(f"有效点缓存生成完毕: {len(points)} 个点")
        # 🔥 [最终诊断] 打印样本，看看它们是否不同
        if len(points) > 5:
            print("缓存点样本:", points[:5])
        return points

    def _init_particles(self, initial_population: Optional[List[Tuple[float, float]]] = None):
        """支持从一个给定的初始解来生成粒子群。"""
        particles = []
        if initial_population is not None:
            logger.info("从提供的初始解生成粒子群...")
            base_particle = np.array(initial_population).flatten()
            particles.append(base_particle)
            noise_level = (self.bounds[2] - self.bounds[0]) * 0.05
            for _ in range(self.num_particles - 1):
                noise = np.random.normal(0, noise_level, self.dim)
                new_particle = base_particle + noise
                self._repair_particle_boundaries(new_particle)
                particles.append(new_particle)
        else:
            logger.info("随机生成初始粒子群...")
            particles = [np.array(random.sample(self.valid_points_cache, self.num_sites)).flatten() for _ in range(self.num_particles)]
        velocities = [np.random.uniform(-0.01, 0.01, self.dim) for _ in range(self.num_particles)]
        return particles, velocities
    
    def _repair_particle_boundaries(self, particle: np.ndarray):
        for j in range(0, self.dim, 2):
            if not self.boundary_polygon.contains(Point(particle[j], particle[j + 1])):
                particle[j], particle[j + 1] = random.choice(self.valid_points_cache)
    
    def _calculate_improvement_rate(self, history_scores: list, window_size: int = 10) -> float:
        """计算最近N次迭代的分数改进率"""
        if len(history_scores) < window_size:
            # 在早期迭代，假设改进率良好，以鼓励探索
            return 0.01 

        recent_scores = history_scores[-window_size:]
        initial_score = recent_scores[0]
        final_score = recent_scores[-1]
        
        # 避免除以零
        if initial_score == 0 or initial_score == final_score: 
            return 0.0
        
        # 改进率 = (初始分 - 最终分) / |初始分|
        improvement = (initial_score - final_score) / abs(initial_score)
        
        # 返回平均每次迭代的改进率
        return improvement / window_size
    # --- 核心执行方法 ---

    def set_cpu_road_network_for_analysis(self, road_network: HierarchicalRoadNetwork):
        logger.info("CPU road network has been set for final analysis.")
        self.road_network = road_network

    def fit(self, progress_callback: Optional[callable] = None, 
            evaluation_mode: str = 'precise', 
            initial_population: Optional[List[Tuple[float, float]]] = None) -> Optional[List[Tuple[float, float]]]:
        """
        执行PSO优化。
        - 使用串行循环评估粒子。
        - 集成了基于性能反馈的自适应参数调度器(版本A)。
        """
        start_time = time.time()
        logger.info(f"=== 开始PSO优化 (评估模式: {evaluation_mode}) ===")
        
        if evaluation_mode != 'precise' and self.gpu_evaluator:
            logger.warning(f"GPU评估器已提供，强制使用 'precise' 评估模式。")
            evaluation_mode = 'precise'
        eval_func = self._evaluate_single_particle_precise if evaluation_mode == 'precise' else self._evaluate_single_particle_hybrid

        try:
            # --- 阶段一：初始化粒子群 ---
            logger.info("正在初始化粒子群...")
            particles, velocities = self._init_particles(initial_population)
            p_best_pos = [p.copy() for p in particles]
            
            # --- 阶段二：初始评估 ---
            logger.info("正在执行初始评估...")
            p_best_scores = [float('inf')] * self.num_particles

            for i in tqdm(range(self.num_particles), desc="初始评估"):
                try:
                    p_best_scores[i] = eval_func(particles[i])[0]
                except Exception as exc:
                    logger.error(f"粒子 {i} 在初始评估中失败: {exc}", exc_info=True)
                    p_best_scores[i] = 1.0

            best_idx = np.argmin(p_best_scores)
            self.global_best_position = p_best_pos[best_idx].copy()
            self.global_best_score = p_best_scores[best_idx]
            
            self.g_best_scores.append(self.global_best_score)
            logger.info(f"初始化完成 - 初始最优值: {self.global_best_score:.4f}")

            # --- 阶段三：主优化循环 ---
            logger.info("正在开始主优化循环...")
            last_improve_iter = 0
            
            # 🔥 用于计算改进率的历史分数列表
            history_scores_for_rate = [self.global_best_score]
            
            # 初始化PSO参数
            current_pso_params = {'w': self.w, 'c1': self.c1, 'c2': self.c2}

            for iteration in range(self.max_iter):
                # 🔥 [核心修正] 更新动态PSO参数的逻辑
                if self.use_adaptive_params:
                    # 1. 计算改进率
                    improvement_rate = self._calculate_improvement_rate(history_scores_for_rate)
                    # 2. 调用正确的调度器方法 update_parameters
                    current_pso_params = self.scheduler.update_parameters(
                        iteration, self.max_iter, improvement_rate
                    )

                current_scores = [float('inf')] * self.num_particles
                for i in tqdm(range(self.num_particles), desc=f"迭代 {iteration + 1}/{self.max_iter}"):
                    try:
                        current_scores[i] = eval_func(particles[i])[0]
                    except Exception as exc:
                        logger.error(f"粒子 {i} 在迭代 {iteration + 1} 中失败: {exc}", exc_info=True)
                        current_scores[i] = 1.0
                
                # 更新个体最优和全局最优
                for i in range(self.num_particles):
                    if current_scores[i] < p_best_scores[i]:
                        p_best_scores[i], p_best_pos[i] = current_scores[i], particles[i].copy()
                        if current_scores[i] < self.global_best_score:
                            self.global_best_score = current_scores[i]
                            self.global_best_position = particles[i].copy()
                            last_improve_iter = iteration
                
                # 记录收敛历史
                self.g_best_scores.append(self.global_best_score)
                # 🔥 更新用于计算改进率的列表
                history_scores_for_rate.append(self.global_best_score)
                
                logger.info(f"Iteration {iteration + 1}/{self.max_iter} | Best Score: {self.global_best_score:.4f} | w: {current_pso_params['w']:.2f}, c1: {current_pso_params['c1']:.2f}, c2: {current_pso_params['c2']:.2f}")

                if progress_callback:
                    progress_callback(iteration + 1, self.max_iter, self.global_best_score)
                
                if iteration - last_improve_iter > self.early_stop_patience:
                    logger.info(f"连续 {self.early_stop_patience} 次迭代无改进，提前停止。")
                    break
                
                # 更新粒子速度和位置 (使用动态参数)
                for i in range(self.num_particles):
                    r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim)
                    velocities[i] = (current_pso_params['w'] * velocities[i] + 
                                     current_pso_params['c1'] * r1 * (p_best_pos[i] - particles[i]) + 
                                     current_pso_params['c2'] * r2 * (self.global_best_position - particles[i]))
                    v_limit = 0.05 * (self.bounds[2] - self.bounds[0])
                    velocities[i] = np.clip(velocities[i], -v_limit, v_limit)
                    particles[i] += velocities[i]
                    self._repair_particle_boundaries(particles[i])

            # --- 优化结束 ---
            total_time = time.time() - start_time
            logger.info(f"PSO优化完成。总耗时: {total_time:.2f}s，最终最优值: {self.global_best_score:.4f}")
            if self.global_best_position is None: 
                logger.warning("优化完成，但未能找到全局最优位置。")
                return None
            return [(self.global_best_position[i], self.global_best_position[i+1]) for i in range(0, self.dim, 2)]

        except Exception as e:
            logger.error(f"PSO优化过程中发生严重错误: {e}", exc_info=True)
            return None
    
    def analyze_solution(self, position: np.ndarray) -> dict:
        """
        对给定的最优位置方案进行全面、详细的分析。
        这个函数主要在优化完成后，用于生成最终报告。
        它依赖于CPU端的路网模型来进行详细的路网距离计算。
        """
        if not isinstance(position, np.ndarray):
            # 确保 position 是一个扁平化的 numpy 数组
            position = np.array(position).flatten()

        results = {}
        
        # --- 步骤 1: 获取模型分数 ---
        # 注意：这里的分析总是使用【高精度】模型，以获得最真实的结果。
        # 假设 _evaluate_single_particle_precise 已经被更新以适应GPU或CPU
        # 并且不再需要传入动态权重，因为它只在最后被调用一次。
        scores = self._evaluate_single_particle_precise(position)
        results['model_score'] = {
            'total': scores[0], 
            'poi_weighted': scores[1], 
            'population_weighted': scores[2], 
            'spacing_weighted': scores[3]
        }
        
        coords = [(position[i], position[i+1]) for i in range(0, len(position), 2)]
        num_sites = len(coords)
        results['site_count'] = num_sites

        # --- 步骤 2: 检查CPU路网是否存在，如果不存在，则提前返回 ---
        if self.road_network is None:
            logger.warning("无法进行详细的解决方案分析（如覆盖率、间距），因为没有提供CPU路网模型。")
            # 只返回已经计算好的模型分数
            return results

        # --- 步骤 3: 如果路网存在，则执行所有需要路网距离的详细分析 ---
        logger.info("使用CPU路网模型进行详细的解决方案分析...")
        
        # a. 空间布局统计 (Spacing Stats)
        if num_sites > 1:
            try:
                # 使用 self.road_network (CPU版) 进行计算
                dists = [self.road_network.get_distance_fast(coords[i], coords[j]) for i in range(num_sites) for j in range(i + 1, num_sites)]
                if dists:
                    results['spacing_stats'] = {
                        'average_distance_m': np.mean(dists), 
                        'min_distance_m': min(dists), 
                        'max_distance_m': max(dists)
                    }
            except Exception as e:
                logger.error(f"计算间距统计时出错: {e}")
                results['spacing_stats'] = {}

        # b. 人口覆盖率 (Population Coverage)
        # 确保 self.population_values 存在且不为空
        if hasattr(self, 'population_values') and self.population_values.any():
            try:
                total_pop = np.sum(self.population_values)
                covered_mask = np.zeros(len(self.population_values), dtype=bool)
                site_kdtree = KDTree(np.array(coords))
                
                for i, pop_coord in enumerate(self.population_coords):
                    # 使用KDTree快速找到欧氏距离最近的站点
                    dist, idx = site_kdtree.query(pop_coord)
                    # 快速剪枝 + 精确路网距离判断
                    if dist < self.radius * 1.5 and self.road_network.get_distance_fast(coords[idx], tuple(pop_coord)) <= self.radius:
                        covered_mask[i] = True
                
                covered_pop = np.sum(self.population_values[covered_mask])
                results['population_coverage'] = {
                    'total_population': total_pop, 
                    'actual_covered_population': covered_pop, 
                    'coverage_ratio': covered_pop / total_pop if total_pop > 0 else 0
                }
            except Exception as e:
                logger.error(f"计算人口覆盖率时出错: {e}")
                results['population_coverage'] = {}

        # c. POI服务率 (POI Service Rate)
        if hasattr(self, 'poi_coords') and self.poi_coords:
            try:
                total_poi = len(self.poi_coords)
                serviced_poi = 0
                site_kdtree = KDTree(np.array(coords))
                
                for poi in self.poi_coords:
                    dist, idx = site_kdtree.query(poi)
                    if dist < self.max_search_radius * 1.5 and self.road_network.get_distance_fast(coords[idx], poi) <= self.max_search_radius:
                        serviced_poi += 1
                
                results['poi_service'] = {
                    'total_poi_count': total_poi, 
                    'serviced_poi_count': serviced_poi, 
                    'service_ratio': serviced_poi / total_poi if total_poi > 0 else 0
                }
            except Exception as e:
                logger.error(f"计算POI服务率时出错: {e}")
                results['poi_service'] = {}
            
        return results

    def export_results(self, coords: List[Tuple[float, float]], file_path: str, analysis_data: Optional[dict] = None):
        try:
            import json
            results = {
                'optimization_info': {
                    'num_sites': self.num_sites, 'num_particles': self.num_particles,
                    'max_iter': self.max_iter, 'final_score': self.global_best_score
                },
                'coordinates': [{'site_id': i+1, 'longitude': lon, 'latitude': lat} for i, (lon, lat) in enumerate(coords)],
                'convergence_history': self.g_best_scores
            }
            if analysis_data: results['quality_metrics'] = analysis_data
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, indent=2, ensure_ascii=False)
            logger.info(f"结果已导出到: {file_path}")
        except Exception as e:
            logger.error(f"导出结果时出错: {e}")
        
        

# =========================================================================
# === 5. 地图生成函数 (最终版) ============================================
# =========================================================================
def create_scenario_map(scenario_name: str, 
                        optimal_locations: list, 
                        boundary_polygon_3857,
                        poi_coords_3857: list,
                        population_points_3857: list,
                        road_network_gdf_3857=None,
                        actual_locations_gdf=None,
                        output_folder: str = "scenario_maps"):
    logger.info(f"-> 正在为情景 '{scenario_name}' 生成地图...")
    
    optimal_gdf_wgs84 = gpd.GeoDataFrame(geometry=[Point(loc) for loc in optimal_locations], crs="EPSG:3857").to_crs("EPSG:4326")
    boundary_gdf_wgs84 = gpd.GeoDataFrame(geometry=[boundary_polygon_3857], crs="EPSG:3857").to_crs("EPSG:4326")
    
    poi_gdf_wgs84 = None
    if poi_coords_3857:
        poi_gdf_wgs84 = gpd.GeoDataFrame(geometry=[Point(p) for p in poi_coords_3857], crs="EPSG:3857").to_crs("EPSG:4326")
    
    pop_data_wgs84 = []
    if population_points_3857:
        pop_coords = [Point(p[0]) for p in population_points_3857]
        pop_values = [p[1] for p in population_points_3857]
        pop_gdf_wgs84 = gpd.GeoDataFrame(geometry=pop_coords, crs="EPSG:3857").to_crs("EPSG:4326")
        pop_data_wgs84 = [[point.y, point.x, value] for point, value in zip(pop_gdf_wgs84.geometry, pop_values)]

    map_center = [boundary_gdf_wgs84.union_all().centroid.y, boundary_gdf_wgs84.union_all().centroid.x]
    m = folium.Map(location=map_center, zoom_start=12, tiles="cartodbpositron")

    if pop_data_wgs84: HeatMap(pop_data_wgs84, name="人口热力图", radius=15).add_to(m)
    folium.GeoJson(boundary_gdf_wgs84, name="研究区域边界", style_function=lambda x: {'color': 'black', 'weight': 2, 'fillOpacity': 0.1}).add_to(m)
    
    if poi_gdf_wgs84 is not None:
        poi_group = folium.FeatureGroup(name="POI 位置", show=False)
        for point in poi_gdf_wgs84.geometry:
            folium.CircleMarker(location=[point.y, point.x], radius=3, color='blue', fill=True, fill_color='blue', fill_opacity=0.6).add_to(poi_group)
        poi_group.add_to(m)

    if road_network_gdf_3857 is not None:
        road_group = folium.FeatureGroup(name="路网信息", show=False)
        road_network_wgs84 = road_network_gdf_3857.to_crs("EPSG:4326")
        folium.GeoJson(road_network_wgs84, style_function=lambda x: {'color': 'gray', 'weight': 1}).add_to(road_group)
        road_group.add_to(m)

    if actual_locations_gdf is not None:
        actual_group = folium.FeatureGroup(name="实际位置 (例如, 瑞幸)")
        processed_actual_gdf = actual_locations_gdf.copy()
        if processed_actual_gdf.crs and processed_actual_gdf.crs.to_epsg() != 4326:
            processed_actual_gdf = processed_actual_gdf.to_crs("EPSG:4326")
        for _, row in processed_actual_gdf.iterrows():
            folium.Marker(location=[row.geometry.y, row.geometry.x], icon=folium.Icon(color='green', icon='coffee', prefix='fa')).add_to(actual_group)
        actual_group.add_to(m)

    optimized_group = folium.FeatureGroup(name=f"最优选址点 ({scenario_name})")
    for idx, point in enumerate(optimal_gdf_wgs84.geometry):
        folium.Marker(location=[point.y, point.x], popup=f"最优站点 {idx+1}", icon=folium.Icon(color='red', icon='star', prefix='fa')).add_to(optimized_group)
    optimized_group.add_to(m)
    
    bounds = boundary_gdf_wgs84.total_bounds
    m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]]) 
    folium.LayerControl().add_to(m)
    
    os.makedirs(output_folder, exist_ok=True)
    map_path = os.path.join(output_folder, f"{scenario_name}_map.html")
    m.save(map_path)
    logger.info(f"-> 地图已保存至: {map_path}")