import numpy as np
import networkx as nx
import random
from shapely.geometry import Point, LineString, MultiLineString, Polygon
from scipy.spatial import KDTree
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
import time
import matplotlib.pyplot as plt
from functools import lru_cache
import logging
from collections import defaultdict
import pickle
import os
import geopandas as gpd
from typing import List, Tuple, Optional, Dict, Any
import pandas as pd
from shapely.ops import unary_union
import gc
import folium
from folium.plugins import HeatMap

# =========================================================================
# === 2. 初始化与配置 =====================================================
# =========================================================================
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# =========================================================================
# === 3. 辅助函数 =========================================================
# =========================================================================
def euclidean_distance_numba(x1, y1, x2, y2):
    """计算两点间的欧几里得距离"""
    return np.sqrt((x2 - x1)**2 + (y2 - y1)**2)

# =========================================================================
# === 4. 核心类定义 =======================================================
# =========================================================================

class FastGridIndex:
    """高性能网格空间索引"""
    def __init__(self, bounds, grid_size=500):
        self.bounds = bounds
        self.grid_size = grid_size
        self.nx = max(1, int((bounds[2] - bounds[0]) / grid_size))
        self.ny = max(1, int((bounds[3] - bounds[1]) / grid_size))
        self.grid = defaultdict(list)
        
    def _get_grid_cell(self, x, y):
        i = int((x - self.bounds[0]) / self.grid_size)
        j = int((y - self.bounds[1]) / self.grid_size)
        return max(0, min(i, self.nx-1)), max(0, min(j, self.ny-1))

    def insert(self, point_id, x, y):
        self.grid[self._get_grid_cell(x, y)].append((point_id, x, y))

    def query_radius(self, x, y, radius):
        grid_radius = int(np.ceil(radius / self.grid_size))
        center_i, center_j = self._get_grid_cell(x, y)
        candidates = []
        for i in range(max(0, center_i - grid_radius), min(self.nx, center_i + grid_radius + 1)):
            for j in range(max(0, center_j - grid_radius), min(self.ny, center_j + grid_radius + 1)):
                candidates.extend(self.grid.get((i, j), []))
        results = []
        for point_id, px, py in candidates:
            if euclidean_distance_numba(x, y, px, py) <= radius:
                results.append((point_id, px, py))
        return results

class HierarchicalRoadNetwork:
    """
    【最终整合版】分层路网结构。
    采用预计算的Hub节点和接入距离，以实现超高速的路网距离查询。
    """
    def __init__(self, road_gdf, cache_file=None):
        self.cache_file = cache_file
        if road_gdf is None:
            if cache_file and os.path.exists(cache_file):
                logger.info(f"强制从缓存 '{cache_file}' 加载路网...")
                self._load_cache()
            else:
                raise ValueError("未提供路网数据，且找不到有效的缓存文件。")
            return

        if 'geometry' not in road_gdf.columns:
            raise ValueError("道路数据必须包含'geometry'列")

        if cache_file and os.path.exists(cache_file):
            logger.info("加载路网缓存...")
            try:
                self._load_cache()
                return
            except Exception as e:
                logger.warning(f"加载缓存失败: {e}。将重新构建路网。")
        
        logger.info("构建分层路网...")
        self._build_hierarchical_network(road_gdf)
        if cache_file:
            self._save_cache()

    def _build_hierarchical_network(self, road_gdf):
        start_time = time.time()
        self.full_graph = nx.Graph()
        logger.info("正在将路网几何体添加到图中...")
        for geom in tqdm(road_gdf.geometry, desc="处理路网几何体"):
            if geom is None or geom.is_empty: continue
            if isinstance(geom, LineString):
                if geom.length > 1: self._add_line_to_graph(geom, self.full_graph)
            elif isinstance(geom, MultiLineString):
                for line in geom.geoms:
                    if line.length > 1: self._add_line_to_graph(line, self.full_graph)
        
        self.node_coords = list(self.full_graph.nodes())
        if not self.node_coords: raise ValueError("路网中没有有效节点")
        
        logger.info("构建空间索引 (KDTree 和 Grid)...")
        self.kdtree = KDTree(self.node_coords)
        bounds = self._get_bounds()
        self.grid_index = FastGridIndex(bounds, grid_size=500)
        for i, (x, y) in enumerate(self.node_coords): self.grid_index.insert(i, x, y)
        
        self._build_multilevel_index()
        
        build_time = time.time() - start_time
        logger.info(f"分层路网构建完成: {len(self.full_graph.nodes)} 节点, "
                   f"{len(self.full_graph.edges)} 边, 耗时: {build_time:.1f}s")

    def _add_line_to_graph(self, line, graph):
        coords = list(line.coords)
        if len(coords) < 2: return
        for i in range(len(coords) - 1):
            p1 = (round(coords[i][0], 5), round(coords[i][1], 5))
            p2 = (round(coords[i+1][0], 5), round(coords[i+1][1], 5))
            if p1 != p2:
                dist = euclidean_distance_numba(p1[0], p1[1], p2[0], p2[1])
                graph.add_edge(p1, p2, weight=dist)

    def _get_bounds(self):
        coords = np.array(self.node_coords)
        return (coords[:, 0].min(), coords[:, 1].min(), coords[:, 0].max(), coords[:, 1].max())

    def _build_multilevel_index(self):
        logger.info("构建多层次索引...")
        node_degrees = dict(self.full_graph.degree())
        # Select more hubs for better accuracy, e.g., top 10% (90th percentile)
        degree_threshold = np.percentile(list(node_degrees.values()), 90)
        self.important_nodes = {n for n, d in node_degrees.items() if d >= degree_threshold}
        logger.info(f"识别出 {len(self.important_nodes)} 个Hub节点。")
        
        important_node_list = list(self.important_nodes)
        if not important_node_list:
            logger.warning("未能识别出任何Hub节点。路网查询将回退到标准A*算法。")
            self.node_to_hub_dist = {}
            self.shortcut_graph = nx.Graph()
            return
            
        self.shortcut_graph = nx.Graph()
        logger.info("预计算Hub节点间的捷径...")
        for node1 in tqdm(important_node_list, desc="计算捷径"):
            try:
                distances = nx.single_source_dijkstra_path_length(
                    self.full_graph, node1, weight='weight', cutoff=5000)
                for node2 in important_node_list:
                    if node1 != node2 and node2 in distances:
                        self.shortcut_graph.add_edge(node1, node2, weight=distances[node2])
            except Exception as e:
                logger.warning(f"计算Hub节点 {node1} 路径时出错: {e}")

        logger.info("预计算所有节点的Hub接入距离 (这可能需要几分钟)...")
        self.node_to_hub_dist = {}
        hub_kdtree = KDTree(important_node_list)

        for node in tqdm(self.full_graph.nodes(), desc="计算接入距离"):
            if node in self.important_nodes:
                self.node_to_hub_dist[node] = (node, 0.0)
                continue
            
            try:
                # Query more neighbors for better accuracy
                _, indices = hub_kdtree.query(node, k=min(10, len(important_node_list)))
                indices = indices if isinstance(indices, np.ndarray) else [indices]
                candidate_hubs = [important_node_list[i] for i in indices]
                
                min_dist, best_hub = float('inf'), None
                for hub in candidate_hubs:
                    try:
                        dist = nx.shortest_path_length(self.full_graph, source=node, target=hub, weight='weight')
                        if dist < min_dist:
                            min_dist, best_hub = dist, hub
                    except nx.NetworkXNoPath: continue
                if best_hub:
                    self.node_to_hub_dist[node] = (best_hub, min_dist)
            except Exception as e:
                 logger.warning(f"为节点 {node} 计算接入距离时出错: {e}")
        logger.info("Hub接入距离预计算完成。")

    def find_nearest_node_fast(self, point: Tuple[float, float]) -> Tuple[float, float]:
        try:
            point = (round(point[0], 5), round(point[1], 5))
            candidates = self.grid_index.query_radius(point[0], point[1], radius=500)
            if not candidates:
                _, indices = self.kdtree.query([point], k=1)
                idx = indices[0] if isinstance(indices, np.ndarray) else indices
                return self.node_coords[idx]
            
            min_distance, nearest_node = float('inf'), None
            for _, x, y in candidates:
                dist = euclidean_distance_numba(point[0], point[1], x, y)
                if dist < min_distance:
                    min_distance, nearest_node = dist, (x, y)
            return nearest_node
        except Exception:
            return self.node_coords[0] if self.node_coords else (0.0, 0.0)

    @lru_cache(maxsize=500000)
    def get_distance_fast(self, origin: tuple, destination: tuple) -> float:
        if origin == destination: return 0.0
        orig_node = self.find_nearest_node_fast(origin)
        dest_node = self.find_nearest_node_fast(destination)
        dist_access = euclidean_distance_numba(origin[0], origin[1], orig_node[0], orig_node[1])
        dist_egress = euclidean_distance_numba(destination[0], destination[1], dest_node[0], dest_node[1])
        if orig_node == dest_node: return dist_access + dist_egress
        road_dist = self._get_road_distance(orig_node, dest_node)
        return dist_access + road_dist + dist_egress

    def _get_road_distance(self, orig_node: tuple, dest_node: tuple) -> float:
        try:
            orig_hub, dist_to_orig_hub = self.node_to_hub_dist[orig_node]
            dest_hub, dist_to_dest_hub = self.node_to_hub_dist[dest_node]
            if orig_hub == dest_hub:
                return nx.astar_path_length(self.full_graph, orig_node, dest_node, heuristic=lambda u, v: euclidean_distance_numba(u[0], u[1], v[0], v[1]), weight='weight')
            hub_dist = nx.shortest_path_length(self.shortcut_graph, orig_hub, dest_hub, weight='weight')
            return dist_to_orig_hub + hub_dist + dist_to_dest_hub
        except (KeyError, nx.NetworkXNoPath, nx.NodeNotFound):
            try:
                return nx.astar_path_length(self.full_graph, orig_node, dest_node, heuristic=lambda u, v: euclidean_distance_numba(u[0], u[1], v[0], v[1]), weight='weight')
            except nx.NetworkXNoPath:
                return float('inf')

    def _save_cache(self):
        try:
            with open(self.cache_file, 'wb') as f:
                pickle.dump({
                    'full_graph': self.full_graph, 'node_coords': self.node_coords,
                    'important_nodes': self.important_nodes, 'shortcut_graph': self.shortcut_graph,
                    'node_to_hub_dist': self.node_to_hub_dist
                }, f)
            logger.info(f"路网缓存已保存: {self.cache_file}")
        except Exception as e:
            logger.error(f"保存缓存失败: {e}")

    def _load_cache(self):
        with open(self.cache_file, 'rb') as f:
            cache_data = pickle.load(f)
        self.full_graph = cache_data['full_graph']
        self.node_coords = cache_data['node_coords']
        self.important_nodes = cache_data['important_nodes']
        self.shortcut_graph = cache_data['shortcut_graph']
        self.node_to_hub_dist = cache_data['node_to_hub_dist']
        self.kdtree = KDTree(self.node_coords)
        bounds = self._get_bounds()
        self.grid_index = FastGridIndex(bounds, grid_size=500)
        for i, (x, y) in enumerate(self.node_coords): self.grid_index.insert(i, x, y)
        logger.info("路网缓存加载完成")

class AdaptiveParameterScheduler:
    def __init__(self, initial_w=0.9, initial_c1=2.0, initial_c2=2.0):
        self.initial_w, self.initial_c1, self.initial_c2 = initial_w, initial_c1, initial_c2
        self.min_w, self.max_w = 0.1, 0.9
        
    def update_parameters(self, iteration: int, max_iter: int, improvement_rate: float) -> Dict[str, float]:
        progress = iteration / max_iter
        w = self.max_w - (self.max_w - self.min_w) * progress
        if improvement_rate > 0.01: c1, c2 = self.initial_c1 * 1.2, self.initial_c2 * 0.8
        elif improvement_rate < 0.001: c1, c2 = self.initial_c1 * 0.8, self.initial_c2 * 1.2
        else: c1, c2 = self.initial_c1, self.initial_c2
        return {'w': w, 'c1': c1, 'c2': c2}

class AdaptiveParameterScheduler:
    """自适应调整PSO核心参数(w, c1, c2)的调度器"""
    def __init__(self, w_range=[0.9, 0.4], c1_range=[1.5, 2.5], c2_range=[2.5, 1.5]):
        """
        :param w_range: 惯性权重的 [起始值, 结束值]
        :param c1_range: 个人学习因子的 [起始值, 结束值]
        :param c2_range: 群体学习因子的 [起始值, 结束值]
        """
        self.w_start, self.w_end = w_range
        self.c1_start, self.c1_end = c1_range
        self.c2_start, self.c2_end = c2_range

    def _linear_interpolate(self, start_val, end_val, progress):
        """线性插值"""
        return start_val + (end_val - start_val) * progress

    def get_params(self, iteration: int, max_iter: int) -> Dict[str, float]:
        """根据迭代进度返回更新后的w, c1, c2"""
        progress = iteration / max_iter
        w = self._linear_interpolate(self.w_start, self.w_end, progress)
        c1 = self._linear_interpolate(self.c1_start, self.c1_end, progress)
        c2 = self._linear_interpolate(self.c2_start, self.c2_end, progress)
        return {'w': w, 'c1': c1, 'c2': c2}


class OptimizedMultiSitePSO:
   
    def __init__(self, num_sites: int, boundary_polygon, road_network,
                 poi_coords: List[Tuple[float, float]] = None,
                 population_points: List[Tuple[Tuple[float, float], float]] = None, 
                 radius: float = 500, max_search_radius: float = 2000.0,
                 num_particles: int = 30, max_iter: int = 100,
                 
                 # 固定的PSO参数，在启用自适应时作为起点
                 w: float = 0.7, c1: float = 1.5, c2: float = 1.5,
                 
                 # 控制是否启用动态参数的开关
                 use_adaptive_params: bool = True,
                 
                 early_stop_patience: int = 15,
                 poi_weight=0.3, pop_weight=0.5, spacing_weight=0.2,
                 dist_pruning_factor: float = 1.5, overlap_penalty_factor: float = 0.15,
                 far_dist_penalty_factor: float = 0.3):
        
        self.num_sites, self.boundary_polygon, self.road_network = num_sites, boundary_polygon, road_network
        self.poi_coords, self.population_points = poi_coords or [], population_points or []
        self.radius, self.max_search_radius = radius, max_search_radius
        
        # 目标权重现在是固定的
        self.poi_weight, self.pop_weight, self.spacing_weight = poi_weight, pop_weight, spacing_weight
        
        self.dist_pruning_factor = dist_pruning_factor
        self.overlap_penalty_factor, self.far_dist_penalty_factor = overlap_penalty_factor, far_dist_penalty_factor

        if self.population_points:
            self.population_coords = np.array([p[0] for p in self.population_points])
            self.population_values = np.array([p[1] for p in self.population_points])
            self.population_kdtree = KDTree(self.population_coords) 
        else:
            self.population_coords, self.population_values, self.population_kdtree = np.array([]), np.array([]), None

        self.num_particles, self.max_iter = num_particles, max_iter
        
        # 保存固定的w,c1,c2
        self.w, self.c1, self.c2 = w, c1, c2
        self.use_adaptive_params = use_adaptive_params
        self.early_stop_patience = early_stop_patience
        
        self.dim = num_sites * 2
        self.global_best_position, self.global_best_score = None, float('inf')
        self.g_best_scores = []
        self.bounds = self.boundary_polygon.bounds
        
        # 🔥 如果启用，则创建调度器实例
        if self.use_adaptive_params:
            # 这里的范围定义了动态调整的策略
            self.scheduler = AdaptiveParameterScheduler(w_range=[0.9, 0.4], c1_range=[1.5, 2.5], c2_range=[2.5, 1.5])
        
        self.valid_points_cache = self._generate_valid_points_cache()

        logger.info(f"PSO优化器初始化完成 - 站点数: {num_sites}, 粒子数: {num_particles}, 最大迭代: {max_iter}")
    # --- 评估函数 ---

    def _evaluate_single_particle_hybrid(self, position: np.ndarray) -> tuple:
        """【高速混合精度版】评估函数。"""
        try:
            coords = [(round(position[i], 5), round(position[i + 1], 5)) for i in range(0, len(position), 2)]
            if not coords: return (1.0, 1.0, 1.0, 1.0)
            site_kdtree = KDTree(np.array(coords))

            # 人口覆盖成本 (高速近似法)
            raw_coverage_cost = 1000.0
            if self.population_points:
                site_coverage_sets = []
                for site_coord in coords:
                    start_node = self.road_network.find_nearest_node_fast(site_coord)
                    try:
                        distances = nx.single_source_dijkstra_path_length(self.road_network.full_graph, start_node, cutoff=self.radius, weight='weight')
                        site_coverage_sets.append(set(distances.keys()))
                    except nx.NodeNotFound:
                        site_coverage_sets.append(set())
                
                covered_population = sum(
                    self.population_values[pop_idx]
                    for pop_idx, pop_coord in enumerate(self.population_coords)
                    if self.population_values[pop_idx] > 0 and any(
                        self.road_network.find_nearest_node_fast(tuple(pop_coord)) in site_set
                        for site_set in site_coverage_sets
                    )
                )
                total_population = np.sum(self.population_values)
                coverage_ratio = covered_population / total_population if total_population > 0 else 0.0
                raw_coverage_cost = (1.0 - coverage_ratio) * 1000.0

            # POI 成本 (高精度)
            raw_avg_poi_cost = 0.0
            if self.poi_coords:
                poi_cost_sum = 0.0
                K_NEAREST_SITES = 5
                for poi in self.poi_coords:
                    
                    # 1. 查询最近的k个邻居
                    _, nearest_site_indices = site_kdtree.query(poi, k=min(K_NEAREST_SITES, len(coords)))
                    
                    # 2. 确保索引是可迭代的
                    indices = nearest_site_indices if isinstance(nearest_site_indices, np.ndarray) else [nearest_site_indices]
                    
                    # 3. 计算这些候选者中的最短路网距离
                    min_road_distance = min(self.road_network.get_distance_fast(coords[i], tuple(poi)) for i in indices)
                    
                    poi_cost_sum += min(min_road_distance, self.max_search_radius * 1.5)
                raw_avg_poi_cost = poi_cost_sum / len(self.poi_coords)
            # 间距成本 (高精度)
            raw_spacing_cost = 0.0
            if len(coords) > 1:
                area = getattr(self.boundary_polygon, 'area', 1e6)
                ideal = np.sqrt(area / len(coords) / np.pi) * 1.2
                min_req = min(max(self.radius * 0.6, 300), min(ideal, self.radius * 1.8, self.max_search_radius * 0.8))
                violations, total_ratio = 0, 0.0
                for i in range(len(coords)):
                    for j in range(i + 1, len(coords)):
                        eu_dist = euclidean_distance_numba(coords[i][0], coords[i][1], coords[j][0], coords[j][1])
                        if eu_dist > min_req * self.dist_pruning_factor: continue
                        road_dist = self.road_network.get_distance_fast(coords[i], coords[j])
                        if road_dist < min_req:
                            total_ratio += (min_req - road_dist) / min_req
                            violations += 1
                if violations > 0:
                    raw_spacing_cost = 50 * (np.exp(3 * (total_ratio / violations)) - 1)

            # 归一化与加权
            norm_poi = np.clip(raw_avg_poi_cost / (self.max_search_radius * 1.5), 0, 1)
            norm_pop = np.clip(raw_coverage_cost / 1000.0, 0, 1)
            norm_space = np.clip(raw_spacing_cost / 500.0, 0, 1)
            total_cost = (norm_poi * self.poi_weight + norm_pop * self.pop_weight + norm_space * self.spacing_weight)
            return (total_cost, norm_poi, norm_pop, norm_space)
        except Exception as e:
            logger.error(f"评估粒子时发生异常(hybrid): {e}", exc_info=True)
            return (1.0, 1.0, 1.0, 1.0)

    def _evaluate_single_particle_precise(self, position: np.ndarray) -> tuple:
        """【高精度版】评估函数。"""
        try:
            coords = [(round(position[i], 5), round(position[i + 1], 5)) for i in range(0, len(position), 2)]
            if not coords: return (1.0, 1.0, 1.0, 1.0)
            site_kdtree = KDTree(np.array(coords))

            # 人口覆盖成本 (高精度)
            raw_coverage_cost = 1000.0
            if self.population_points:
                pop_info = defaultdict(list)
                for pop_idx, pop_coord_tuple in enumerate(self.population_coords):
                    if self.population_values[pop_idx] > 0:
                        pop_coord = tuple(pop_coord_tuple)
                        indices = site_kdtree.query_ball_point(pop_coord, r=self.radius * self.dist_pruning_factor)
                        for site_idx in indices:
                            dist = self.road_network.get_distance_fast(coords[site_idx], pop_coord)
                            if dist <= self.radius:
                                pop_info[pop_idx].append(dist)
                total_pop = np.sum(self.population_values)
                weighted_pop = 0.0
                for pop_idx, distances in pop_info.items():
                    pop_val = self.population_values[pop_idx]
                    n_sites, avg_dist = len(distances), np.mean(distances)
                    eff = 1.0 / (1.0 + self.overlap_penalty_factor * max(0, n_sites - 1))
                    dist_factor = max(0.5, 1.0 - (avg_dist / self.radius) * self.far_dist_penalty_factor)
                    weighted_pop += pop_val * eff * dist_factor
                raw_coverage_cost = (1.0 - (weighted_pop / total_pop if total_pop > 0 else 0)) * 1000.0

            # POI 成本 (高精度)
            raw_avg_poi_cost = 0.0
            if self.poi_coords:
                poi_cost_sum = 0.0
                K_NEAREST_SITES = 5
                for poi in self.poi_coords:
                    # 🔥 关键修改：将一行流拆分为清晰的步骤
                    
                    # 1. 查询最近的k个邻居
                    _, nearest_site_indices = site_kdtree.query(poi, k=min(K_NEAREST_SITES, len(coords)))
                    
                    # 2. 确保索引是可迭代的
                    indices = nearest_site_indices if isinstance(nearest_site_indices, np.ndarray) else [nearest_site_indices]
                    
                    # 3. 计算这些候选者中的最短路网距离
                    min_road_distance = min(self.road_network.get_distance_fast(coords[i], tuple(poi)) for i in indices)
                    
                    poi_cost_sum += min(min_road_distance, self.max_search_radius * 1.5)
                raw_avg_poi_cost = poi_cost_sum / len(self.poi_coords)            
            # 间距成本 (高精度)
            raw_spacing_cost = 0.0
            if len(coords) > 1:
                area = getattr(self.boundary_polygon, 'area', 1e6)
                ideal = np.sqrt(area / len(coords) / np.pi) * 1.2
                min_req = min(max(self.radius * 0.6, 300), min(ideal, self.radius * 1.8, self.max_search_radius * 0.8))
                violations, total_ratio = 0, 0.0
                for i in range(len(coords)):
                    for j in range(i + 1, len(coords)):
                        eu_dist = euclidean_distance_numba(coords[i][0], coords[i][1], coords[j][0], coords[j][1])
                        if eu_dist > min_req * self.dist_pruning_factor: continue
                        road_dist = self.road_network.get_distance_fast(coords[i], coords[j])
                        if road_dist < min_req:
                            total_ratio += (min_req - road_dist) / min_req
                            violations += 1
                if violations > 0:
                    raw_spacing_cost = 50 * (np.exp(3 * (total_ratio / violations)) - 1)

            # 归一化与加权
            norm_poi = np.clip(raw_avg_poi_cost / (self.max_search_radius * 1.5), 0, 1)
            norm_pop = np.clip(raw_coverage_cost / 1000.0, 0, 1)
            norm_space = np.clip(raw_spacing_cost / 500.0, 0, 1)
            total_cost = (norm_poi * self.poi_weight + norm_pop * self.pop_weight + norm_space * self.spacing_weight)
            return (total_cost, norm_poi, norm_pop, norm_space)
        except Exception as e:
            logger.error(f"评估粒子时发生异常(precise): {e}", exc_info=True)
            return (1.0, 1.0, 1.0, 1.0)

    # --- 辅助方法 ---

    def _generate_valid_points_cache(self, cache_size: int = 2000):
        points, attempts, max_attempts = [], 0, cache_size * 10
        minx, miny, maxx, maxy = self.bounds
        while len(points) < cache_size and attempts < max_attempts:
            p = Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
            if self.boundary_polygon.contains(p): points.append((p.x, p.y))
            attempts += 1
        logger.info(f"生成有效点缓存: {len(points)} 个点")
        if not points: points.append((self.boundary_polygon.centroid.x, self.boundary_polygon.centroid.y))
        return points

    def _init_particles(self, initial_population: Optional[List[Tuple[float, float]]] = None):
        """支持从一个给定的初始解来生成粒子群。"""
        particles = []
        if initial_population is not None:
            logger.info("从提供的初始解生成粒子群...")
            base_particle = np.array(initial_population).flatten()
            particles.append(base_particle)
            noise_level = (self.bounds[2] - self.bounds[0]) * 0.05
            for _ in range(self.num_particles - 1):
                noise = np.random.normal(0, noise_level, self.dim)
                new_particle = base_particle + noise
                self._repair_particle_boundaries(new_particle)
                particles.append(new_particle)
        else:
            logger.info("随机生成初始粒子群...")
            particles = [np.array(random.sample(self.valid_points_cache, self.num_sites)).flatten() for _ in range(self.num_particles)]
        velocities = [np.random.uniform(-0.01, 0.01, self.dim) for _ in range(self.num_particles)]
        return particles, velocities
    
    def _repair_particle_boundaries(self, particle: np.ndarray):
        for j in range(0, self.dim, 2):
            if not self.boundary_polygon.contains(Point(particle[j], particle[j + 1])):
                particle[j], particle[j + 1] = random.choice(self.valid_points_cache)
    
    # --- 核心执行方法 ---

    def fit(self, progress_callback: Optional[callable] = None, 
            evaluation_mode: str = 'precise', 
            initial_population: Optional[List[Tuple[float, float]]] = None) -> Optional[List[Tuple[float, float]]]:
        start_time = time.time()
        logger.info(f"=== 开始PSO优化 (评估模式: {evaluation_mode}) ===")
        max_workers = min(32, (os.cpu_count() or 1))
        eval_func = self._evaluate_single_particle_precise if evaluation_mode == 'precise' else self._evaluate_single_particle_hybrid
        if evaluation_mode != 'precise':
            logger.warning(f"警告：当前配置为单一高精度优化，但传入的模式为 '{evaluation_mode}'。将强制使用 'precise' 模式。")
            evaluation_mode = 'precise'

        max_workers = min(32, (os.cpu_count() or 1))
        # 现在 eval_func 总是 _evaluate_single_particle_precise
        eval_func = self._evaluate_single_particle_precise
        try:
            logger.info("第一阶段：初始化粒子群...")
            particles, velocities = self._init_particles(initial_population)
            p_best_pos = [p.copy() for p in particles]
            
            logger.info("执行初始评估...")
            # (评估部分代码不变，因为评估函数不再需要动态权重)
            p_best_scores = [float('inf')] * self.num_particles
            with ProcessPoolExecutor(max_workers=max_workers) as executor:
                futures = {executor.submit(eval_func, particles[i]): i for i in range(self.num_particles)}
                for future in tqdm(as_completed(futures), total=len(particles), desc="初始评估"):
                    particle_idx = futures[future]
                    try:
                        p_best_scores[particle_idx] = future.result()[0]
                    except Exception as exc:
                        logger.error(f"粒子 {particle_idx} 在初始评估中失败: {exc}")
                        p_best_scores[particle_idx] = 1.0

            best_idx = np.argmin(p_best_scores)
            self.global_best_position = p_best_pos[best_idx].copy()
            self.global_best_score = p_best_scores[best_idx]
            self.g_best_scores.append(self.global_best_score)
            logger.info(f"初始化完成 - 初始最优值: {self.global_best_score:.4f}")

            logger.info("第二阶段：开始主优化循环...")
            last_improve_iter = 0
            
            # 初始化PSO参数
            current_pso_params = {'w': self.w, 'c1': self.c1, 'c2': self.c2}

            for iteration in range(self.max_iter):
                # 🔥 [新增] 如果启用自适应，就在每次迭代开始时更新PSO参数
                if self.use_adaptive_params:
                    current_pso_params = self.scheduler.get_params(iteration, self.max_iter)

                # (评估部分代码不变)
                with ProcessPoolExecutor(max_workers=max_workers) as executor:
                    futures = {executor.submit(eval_func, particles[i]): i for i in range(self.num_particles)}
                    current_scores = [float('inf')] * self.num_particles
                    pbar = tqdm(total=self.num_particles, desc=f"迭代 {iteration + 1}/{self.max_iter}")
                    for future in as_completed(futures):
                        particle_idx = futures[future]
                        try:
                            current_scores[particle_idx] = future.result()[0]
                        except Exception as exc:
                            logger.error(f"粒子 {particle_idx} 在迭代 {iteration + 1} 中失败: {exc}")
                            current_scores[particle_idx] = 1.0
                        pbar.update(1)
                    pbar.close()

                # (更新 p_best, g_best 的逻辑不变)
                for i in range(self.num_particles):
                    if current_scores[i] < p_best_scores[i]:
                        p_best_scores[i], p_best_pos[i] = current_scores[i], particles[i].copy()
                        if current_scores[i] < self.global_best_score:
                            self.global_best_score, self.global_best_position = current_scores[i], particles[i].copy()
                            last_improve_iter = iteration
                
                self.g_best_scores.append(self.global_best_score)
                logger.info(f"Iteration {iteration + 1}/{self.max_iter} | Best Score: {self.global_best_score:.4f} | w: {current_pso_params['w']:.2f}, c1: {current_pso_params['c1']:.2f}, c2: {current_pso_params['c2']:.2f}")

                if progress_callback:
                    progress_callback(iteration + 1, self.max_iter, self.global_best_score)
                if iteration - last_improve_iter > self.early_stop_patience:
                    logger.info(f"连续 {self.early_stop_patience} 次迭代无改进，提前停止。")
                    break
                
                # 🔥 [修改] 使用动态更新的PSO参数来更新粒子速度
                for i in range(self.num_particles):
                    r1, r2 = np.random.rand(self.dim), np.random.rand(self.dim)
                    velocities[i] = (current_pso_params['w'] * velocities[i] + 
                                     current_pso_params['c1'] * r1 * (p_best_pos[i] - particles[i]) + 
                                     current_pso_params['c2'] * r2 * (self.global_best_position - particles[i]))
                    v_limit = 0.05 * (self.bounds[2] - self.bounds[0])
                    velocities[i] = np.clip(velocities[i], -v_limit, v_limit)
                    particles[i] += velocities[i]
                    self._repair_particle_boundaries(particles[i])

            total_time = time.time() - start_time
            logger.info(f"PSO优化完成。总耗时: {total_time:.2f}s，最终最优值: {self.global_best_score:.4f}")
            if self.global_best_position is None: 
                logger.warning("优化完成，但未能找到全局最优位置。")
                return None
            return [(self.global_best_position[i], self.global_best_position[i+1]) for i in range(0, self.dim, 2)]

        except Exception as e:
            logger.error(f"PSO优化过程中发生严重错误: {e}", exc_info=True)
            return None
    
    def analyze_solution(self, position: np.ndarray) -> dict:
        if not isinstance(position, np.ndarray): position = np.array(position)
        results = {}
        # 注意：这里的分析总是使用【高精度】模型，以获得最真实的结果
        scores = self._evaluate_single_particle_precise(position)
        results['model_score'] = {'total': scores[0], 'poi_weighted': scores[1], 'population_weighted': scores[2], 'spacing_weighted': scores[3]}
        
        coords = [(position[i], position[i+1]) for i in range(0, len(position), 2)]
        num_sites = len(coords)
        results['site_count'] = num_sites

        if num_sites > 1:
            dists = [self.road_network.get_distance_fast(coords[i], coords[j]) for i in range(num_sites) for j in range(i + 1, num_sites)]
            results['spacing_stats'] = {'average_distance_m': np.mean(dists), 'min_distance_m': min(dists), 'max_distance_m': max(dists)}
        
        if self.population_values.any():
            total_pop = np.sum(self.population_values)
            covered_mask = np.zeros(len(self.population_values), dtype=bool)
            site_kdtree = KDTree(np.array(coords))
            for i, pop_coord in enumerate(self.population_coords):
                dist, idx = site_kdtree.query(pop_coord)
                if dist < self.radius * 1.5 and self.road_network.get_distance_fast(coords[idx], tuple(pop_coord)) <= self.radius:
                    covered_mask[i] = True
            covered_pop = np.sum(self.population_values[covered_mask])
            results['population_coverage'] = {'total_population': total_pop, 'actual_covered_population': covered_pop, 'coverage_ratio': covered_pop / total_pop if total_pop > 0 else 0}
        
        if self.poi_coords:
            total_poi = len(self.poi_coords)
            serviced_poi = 0
            site_kdtree = KDTree(np.array(coords))
            for poi in self.poi_coords:
                dist, idx = site_kdtree.query(poi)
                if dist < self.max_search_radius * 1.5 and self.road_network.get_distance_fast(coords[idx], poi) <= self.max_search_radius:
                    serviced_poi += 1
            results['poi_service'] = {'total_poi_count': total_poi, 'serviced_poi_count': serviced_poi, 'service_ratio': serviced_poi / total_poi if total_poi > 0 else 0}
            
        return results

    def export_results(self, coords: List[Tuple[float, float]], file_path: str, analysis_data: Optional[dict] = None):
        try:
            import json
            results = {
                'optimization_info': {
                    'num_sites': self.num_sites, 'num_particles': self.num_particles,
                    'max_iter': self.max_iter, 'final_score': self.global_best_score
                },
                'coordinates': [{'site_id': i+1, 'longitude': lon, 'latitude': lat} for i, (lon, lat) in enumerate(coords)],
                'convergence_history': self.g_best_scores
            }
            if analysis_data: results['quality_metrics'] = analysis_data
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, indent=2, ensure_ascii=False)
            logger.info(f"结果已导出到: {file_path}")
        except Exception as e:
            logger.error(f"导出结果时出错: {e}")

# =========================================================================
# === 5. 地图生成函数 (最终版) ============================================
# =========================================================================
def create_scenario_map(scenario_name: str, 
                        optimal_locations: list, 
                        boundary_polygon_3857,
                        poi_coords_3857: list,
                        population_points_3857: list,
                        road_network_gdf_3857=None,
                        actual_locations_gdf=None,
                        output_folder: str = "scenario_maps"):
    logger.info(f"-> 正在为情景 '{scenario_name}' 生成地图...")
    
    optimal_gdf_wgs84 = gpd.GeoDataFrame(geometry=[Point(loc) for loc in optimal_locations], crs="EPSG:3857").to_crs("EPSG:4326")
    boundary_gdf_wgs84 = gpd.GeoDataFrame(geometry=[boundary_polygon_3857], crs="EPSG:3857").to_crs("EPSG:4326")
    
    poi_gdf_wgs84 = None
    if poi_coords_3857:
        poi_gdf_wgs84 = gpd.GeoDataFrame(geometry=[Point(p) for p in poi_coords_3857], crs="EPSG:3857").to_crs("EPSG:4326")
    
    pop_data_wgs84 = []
    if population_points_3857:
        pop_coords = [Point(p[0]) for p in population_points_3857]
        pop_values = [p[1] for p in population_points_3857]
        pop_gdf_wgs84 = gpd.GeoDataFrame(geometry=pop_coords, crs="EPSG:3857").to_crs("EPSG:4326")
        pop_data_wgs84 = [[point.y, point.x, value] for point, value in zip(pop_gdf_wgs84.geometry, pop_values)]

    map_center = [boundary_gdf_wgs84.union_all().centroid.y, boundary_gdf_wgs84.union_all().centroid.x]
    m = folium.Map(location=map_center, zoom_start=12, tiles="cartodbpositron")

    if pop_data_wgs84: HeatMap(pop_data_wgs84, name="人口热力图", radius=15).add_to(m)
    folium.GeoJson(boundary_gdf_wgs84, name="研究区域边界", style_function=lambda x: {'color': 'black', 'weight': 2, 'fillOpacity': 0.1}).add_to(m)
    
    if poi_gdf_wgs84 is not None:
        poi_group = folium.FeatureGroup(name="POI 位置", show=False)
        for point in poi_gdf_wgs84.geometry:
            folium.CircleMarker(location=[point.y, point.x], radius=3, color='blue', fill=True, fill_color='blue', fill_opacity=0.6).add_to(poi_group)
        poi_group.add_to(m)

    if road_network_gdf_3857 is not None:
        road_group = folium.FeatureGroup(name="路网信息", show=False)
        road_network_wgs84 = road_network_gdf_3857.to_crs("EPSG:4326")
        folium.GeoJson(road_network_wgs84, style_function=lambda x: {'color': 'gray', 'weight': 1}).add_to(road_group)
        road_group.add_to(m)

    if actual_locations_gdf is not None:
        actual_group = folium.FeatureGroup(name="实际位置 (例如, 瑞幸)")
        processed_actual_gdf = actual_locations_gdf.copy()
        if processed_actual_gdf.crs and processed_actual_gdf.crs.to_epsg() != 4326:
            processed_actual_gdf = processed_actual_gdf.to_crs("EPSG:4326")
        for _, row in processed_actual_gdf.iterrows():
            folium.Marker(location=[row.geometry.y, row.geometry.x], icon=folium.Icon(color='green', icon='coffee', prefix='fa')).add_to(actual_group)
        actual_group.add_to(m)

    optimized_group = folium.FeatureGroup(name=f"最优选址点 ({scenario_name})")
    for idx, point in enumerate(optimal_gdf_wgs84.geometry):
        folium.Marker(location=[point.y, point.x], popup=f"最优站点 {idx+1}", icon=folium.Icon(color='red', icon='star', prefix='fa')).add_to(optimized_group)
    optimized_group.add_to(m)
    
    bounds = boundary_gdf_wgs84.total_bounds
    m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]]) 
    folium.LayerControl().add_to(m)
    
    os.makedirs(output_folder, exist_ok=True)
    map_path = os.path.join(output_folder, f"{scenario_name}_map.html")
    m.save(map_path)
    logger.info(f"-> 地图已保存至: {map_path}")