import pickle
import time
import gc
import numpy as np
import multiprocessing as mp
import logging
from pso_core_gpu import HierarchicalRoadNetwork, OptimizedMultiSitePSO 
from gpu_evaluator import GPUEvaluator
import pandas as pd

# (setup_progress_logger 和 progress_callback 函数保持不变)
def setup_progress_logger():
    
    progress_logger = logging.getLogger('pso_progress')
    progress_logger.setLevel(logging.INFO)
    if progress_logger.hasHandlers():
        progress_logger.handlers.clear()
    fh = logging.FileHandler('pso_progress.log', mode='w')
    formatter = logging.Formatter('%(message)s')
    fh.setFormatter(formatter)
    progress_logger.addHandler(fh)
    return progress_logger

def progress_callback(iteration, max_iter, current_best_score):
    
    progress_logger = logging.getLogger('pso_progress')
    progress_logger.info(f"Iteration {iteration}/{max_iter} | Best Score: {current_best_score:.4f}")
    for handler in progress_logger.handlers:
        handler.flush()

if __name__ == "__main__":
    # 在Windows和macOS上，'spawn'是更安全的多进程启动方法
    # 'force=True' 确保即使在Linux上也能使用，避免一些fork带来的问题
    mp.set_start_method('spawn', force=True) 
    setup_progress_logger()

    print("\n--- GPU加速优化脚本已启动 ---")
    
    # --- 1. 加载并完整解包输入数据 ---
    print("正在加载输入数据...")
    try:
        with open("pso_input_data.pkl", "rb") as f:
            pso_input_data = pickle.load(f)
        
        boundary_polygon_3857 = pso_input_data["boundary_polygon_3857"]
        poi_data_3857 = pso_input_data["poi_data_3857"]
        population_points_3857 = pso_input_data["population_points_3857"]
        road_network_cache = pso_input_data["road_network_cache"]
        print("✅ 输入数据加载并解包成功。")

    except FileNotFoundError:
        print("❌ 致命错误：找不到输入数据文件 'pso_input_data.pkl'。请先运行数据准备单元格。")
        exit()
    except KeyError as e:
        print(f"❌ 致命错误：输入数据文件 'pso_input_data.pkl' 中缺少关键键: {e}")
        exit()

    # --- 2. 初始化GPU和CPU模块 ---
    # a. 加载CPU路网模型。它有两个用途：
    #    1. 为GPUEvaluator提供底层的NetworkX图对象。
    #    2. 在优化完成后，用于最终的、详细的解决方案分析。
    road_network_for_analysis = HierarchicalRoadNetwork(road_gdf=None, cache_file=road_network_cache)
    
    # b. 初始化GPU评估器，将所有重计算所需的数据传输到GPU
    gpu_eval = GPUEvaluator(
        road_network_for_analysis.full_graph, 
        population_points_3857, 
        poi_data_3857
    )
    
    # =========================================================================
    # === 3. 配置并执行PSO优化 ================================================
    # =========================================================================
    print("\n" + "="*20 + " 开始执行GPU加速的高精度优化 " + "="*20)
    
    # 将所有PSO参数集中在此处，方便修改和实验
    pso_kwargs = {
        'num_particles': 20,
        'max_iter': 400,
        
        'pickup_radius': 500,
        'cluster_radius': 8000,
        'min_safe_distance': 200,
        'core_poi_categories': ['OFFICE', 'MALL', 'UNIVERSITY'],
        
        'w_pickup': 0.8,
        'w_cluster': 0.2,
        
        'cluster_bonus_factor': 50,
        
        'use_adaptive_params': True,
        'early_stop_patience': 50
    }

    
    # 实例化PSO优化器
    pso_optimizer = OptimizedMultiSitePSO(
        num_sites=50, 
        boundary_polygon=boundary_polygon_3857,
        gpu_evaluator=gpu_eval,
        # 传入CPU路网，供最后的analyze_solution使用
        road_network=road_network_for_analysis, 
        **pso_kwargs
    )
    
    # 运行优化
    final_locations = pso_optimizer.fit(
        progress_callback=progress_callback,
        evaluation_mode='precise' 
    )

    # =========================================================================
    # === 4. 结果分析与保存 ====================================================
    # =========================================================================
    if final_locations:
        print("\n优化成功！正在进行最终方案分析...")
        final_position_3857 = np.array(final_locations).flatten()
        
        # 调用analyze_solution进行详细的CPU端分析
        analysis_metrics = pso_optimizer.analyze_solution(final_position_3857)
        
        convergence_history = pso_optimizer.g_best_scores

        results_to_save = {
            "optimal_locations_3857": final_locations,
            "analysis_metrics": analysis_metrics,
            "convergence_history": convergence_history,
        }

        with open("pso_results.pkl", "wb") as f:
            pickle.dump(results_to_save, f)
        print("✅ 优化结果已成功保存到 'pso_results.pkl'")
    else:
        print("❌ 优化失败，未生成结果文件。")

    print("\n--- GPU加速优化脚本执行完毕 ---")