from mpi4py import MPI
import pycuda.driver as cuda
import matplotlib.pyplot as plt
import numpy as np
from pprint import pprint
import time
import math
from tqdm import tqdm
import os
import random
import sys
from parallel_sa import Parallel_SA_CUDA
from common import *
from SA6 import SA6  # 导入串行版本作为基准

# 初始化MPI环境
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

# 设置CUDA环境变量优化
os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
os.environ["CUDA_CACHE_DISABLE"] = "0"

# 初始化CUDA驱动
cuda.init()
num_gpus = cuda.Device.count()

# 每个进程绑定到不同的GPU
cuda_device = rank % num_gpus
device = cuda.Device(cuda_device)
context = device.make_context()
print(f"Rank {rank} using GPU {cuda_device}")

# 仅Rank 0创建结果目录
if rank == 0 and not os.path.exists('results'):
    os.makedirs('results')

fileName = [
    "a280.tsp","d198.tsp","d493.tsp","eil101.tsp","gil262.tsp",
    "gr431.tsp","kroA150.tsp","p654.tsp","u724.tsp","ch130.tsp","eil76.tsp"
]
optDis = [2579, 15780, 35002, 629, 2378, 171414, 26524, 34643, 41910, 6110, 538]

# 通过命令行参数传入选择
if rank == 0:
    if len(sys.argv) < 2:
        print("请通过命令行参数指定测试集序号（例如：python main.py 1）")
        sys.exit(1)
    choose = int(sys.argv[1]) - 1
    run_serial = True  # 默认运行串行基准
    if len(sys.argv) > 2 and sys.argv[2].lower() == "no_serial":
        run_serial = False
else:
    choose = None
    run_serial = False

# 广播选择
choose = comm.bcast(choose, root=0)
run_serial = comm.bcast(run_serial, root=0)

# 加载数据
if rank == 0:
    with open('data/' + fileName[choose]) as fp:
        lines = fp.readlines()
        pos = [[float(x) for x in s.split()[1:]] + [int(x) for x in s.split()[:1]] for s in lines]
        dim = len(lines)
        graph = np.zeros((dim, 3))
        distmat = np.zeros((dim, dim))
        for i in range(dim):
            for j, pox in enumerate(filter(lambda x: x and x.strip(), lines[i].split(' '))):
                graph[i][j] = float(pox)
        for i in range(dim):
            for j in range(i, dim):
                if i == j:
                    distmat[i][j] = float('inf')
                else:
                    distmat[i][j] = distmat[j][i] = np.linalg.norm(graph[i, 1:] - graph[j, 1:])
else:
    dim = None
    pos = None
    distmat = None

# 广播图数据
dim = comm.bcast(dim, root=0)
opt_cost = comm.bcast(optDis[choose], root=0)

# 广播距离矩阵
if rank == 0:
    distmat_shape = distmat.shape
    distmat_flat = distmat.flatten()
else:
    distmat_shape = None
    distmat_flat = None

distmat_shape = comm.bcast(distmat_shape, root=0)
if rank != 0:
    distmat_flat = np.empty(distmat_shape[0] * distmat_shape[1], dtype=np.float64)
comm.Bcast(distmat_flat, root=0)
distmat = distmat_flat.reshape(distmat_shape)

# 广播坐标
if rank == 0:
    pos_flat = np.array(pos).flatten()
    pos_shape = (len(pos), len(pos[0]))
else:
    pos_flat = None
    pos_shape = None

pos_shape = comm.bcast(pos_shape, root=0)
if rank != 0:
    pos_flat = np.empty(pos_shape[0] * pos_shape[1], dtype=np.float64)
comm.Bcast(pos_flat, root=0)
pos = pos_flat.reshape(pos_shape).tolist()

# 参数设置
num_tests = 100
method = 'PISA-CUDA'
method_name = 'Parallel Island-SA with CUDA'
term_count_2 = 200  # 定义迭代次数

if rank == 0:
    result = {
        'best_cost': math.inf, 'best_gap': math.inf,
        'cost': [0] * num_tests, 'time': [0] * num_tests,
        'avg_cost': math.inf, 'avg_gap': math.inf, 'cost_std': math.inf,
        'avg_time': math.inf, 'time_std': math.inf
    }
    
    # 初始化收敛曲线数据
    avg_cost_curve = np.zeros(term_count_2)
    avg_best_cost_curve = np.zeros(term_count_2)
    curve_count = 0
    
    # 初始化性能数据
    perf_data = {
        'serial_time': None,
        'parallel_times': [],
        'speedup': [],
        'efficiency': []
    }

# 根据问题规模设置CUDA线程配置
if dim < 200:
    block_size = 128
elif dim < 500:
    block_size = 256
else:
    block_size = 512

print(f"Rank {rank} using {block_size} threads/block")

# 运行串行版本作为基准
if rank == 0 and run_serial:
    print("Running serial version for baseline...")
    serial_start = time.time()
    serial_sol, serial_cost, _ = SA6(
        dim, pos, distmat,
        max_tnm=100,
        term_count_1=100,
        term_count_2=term_count_2,
        t_0=2500,
        alpha=0.93
    )
    serial_time = time.time() - serial_start
    if rank == 0:
        perf_data['serial_time'] = serial_time
        print(f"Serial version completed in {serial_time:.2f} seconds")
        print(f"Serial solution cost: {serial_cost:.2f} (Optimal: {opt_cost})")
else:
    serial_time = None

# 广播串行时间
serial_time = comm.bcast(perf_data['serial_time'] if rank == 0 else None, root=0)
serial_time = None

try:
    # GPU预热
    if rank == 0:
        print(f"Rank {rank}: Performing GPU warmup...")

    # 每个进程都打一个“即将进入预热”的日志
    print(f"Rank {rank}: About to call Parallel_SA_CUDA for warmup")

    warmup_start = time.time()

    Parallel_SA_CUDA(
        dim, pos, distmat,
        M=size,
        L=1,
        max_tnm=5,
        term_count_1=3,
        term_count_2=3,
        t_0=100,
        alpha=0.9,
        comm=comm,
        rank=rank,
        block_size=block_size
    )

    # 每个进程在预热结束后也打一个日志
    print(f"Rank {rank}: Returned from Parallel_SA_CUDA warmup")

    comm.Barrier()
    if rank == 0:
        warmup_time = time.time() - warmup_start
        print(f"GPU warmup completed in {warmup_time:.2f} seconds")
    time.sleep(1.0)

    
    # 广播预热完成信号
    warmup_done = True
    warmup_done = comm.bcast(warmup_done, root=0)
    
    # 创建进度条
    if rank == 0:
        pbar = tqdm(total=num_tests, desc=f"Running tests (Islands={size})", position=0, leave=True)
        plt.figure(figsize=(10, 6))

    for test_idx in range(num_tests):
        start = time.time()

        # 实际测试运行
        best_sol, best_cost, data = Parallel_SA_CUDA(
            dim, pos, distmat,
            M=size,
            L=4,
            max_tnm=100,
            term_count_1=100,
            term_count_2=term_count_2,
            t_0=2500,
            alpha=0.93,
            comm=comm,
            rank=rank,
            block_size=block_size
        )

        elapsed = time.time() - start

        if rank == 0:
            # 收集结果
            all_costs = [best_cost]
            all_times = [elapsed]
            all_sols = [best_sol]
            for i in range(1, size):
                result_data = comm.recv(source=i, tag=0)
                all_costs.append(result_data['cost'])
                all_times.append(result_data['time'])
                all_sols.append(result_data['sol'])

            min_idx = np.argmin(all_costs)
            global_best_cost = all_costs[min_idx]
            global_best_sol = all_sols[min_idx]
            parallel_time = max(all_times)  # 并行时间由最慢的进程决定

            result['time'][test_idx] = parallel_time
            result['cost'][test_idx] = global_best_cost

            # 记录性能数据
            perf_data['parallel_times'].append(parallel_time)
            
            # 计算加速比和效率（如果有串行基准）
            if serial_time is not None and serial_time > 0:
                speedup = serial_time / parallel_time
                efficiency = (speedup / size) * 100
                perf_data['speedup'].append(speedup)
                perf_data['efficiency'].append(efficiency)
            else:
                speedup = efficiency = 0

            if global_best_cost < result['best_cost']:
                result['best_sol'] = global_best_sol
                result['best_cost'] = global_best_cost
                result['best_gap'] = global_best_cost / opt_cost - 1

            # 更新收敛曲线数据
            if curve_count == 0:
                avg_cost_curve = np.array(data['cost'])
                avg_best_cost_curve = np.array(data['best_cost'])
            else:
                # 确保长度一致
                min_len = min(len(avg_cost_curve), len(data['cost']))
                avg_cost_curve[:min_len] += np.array(data['cost'][:min_len])
                avg_best_cost_curve[:min_len] += np.array(data['best_cost'][:min_len])
            
            curve_count += 1
            
            # 每10次测试更新一次进度和图表
            if test_idx % 10 == 0 or test_idx == num_tests - 1:
                current_avg_cost = np.mean(result['cost'][:test_idx+1])
                current_avg_gap = (current_avg_cost / opt_cost - 1) * 100
                progress_desc = f"Tests: {test_idx+1}/{num_tests}, Avg Cost: {current_avg_cost:.1f}, Gap: {current_avg_gap:.2f}%"
                
                if serial_time is not None and serial_time > 0:
                    progress_desc += f", Speedup: {speedup:.2f}x, Eff: {efficiency:.1f}%"
                
                pbar.set_description(progress_desc)
                
                # 更新图表
                plt.clf()
                current_avg_curve = avg_cost_curve / curve_count
                current_best_curve = avg_best_cost_curve / curve_count
                
                plt.plot(current_avg_curve, 'b-', label='Average Cost')
                plt.plot(current_best_curve, 'r-', label='Best Cost')
                plt.axhline(y=opt_cost, color='g', linestyle='--', label='Optimal')
                
                plt.title(f'{method_name} Convergence (Test {test_idx+1}/{num_tests})')
                plt.xlabel('Iteration')
                plt.ylabel('Cost')
                plt.legend()
                plt.grid(True)
                plt.tight_layout()
                plt.savefig(f'results/{method}_convergence_M{size}.png')
                plt.pause(0.1)
            
            pbar.update(1)
        else:
            comm.send({'cost': best_cost, 'time': elapsed, 'sol': best_sol}, dest=0, tag=0)
        
        # GPU同步和冷却
        context.synchronize()
        if test_idx < num_tests - 1:
            time.sleep(0.5)

    # 计算最终结果
    if rank == 0:
        result['avg_cost'] = np.mean(result['cost'])
        result['avg_gap'] = result['avg_cost'] / opt_cost - 1
        result['worst_cost'] = np.max(result['cost'])
        result['worst_gap'] = result['worst_cost'] / opt_cost - 1
        result['cost_std'] = np.std(result['cost'])
        result['cost_std_gap'] = result['cost_std'] / opt_cost
        result['avg_time'] = np.mean(result['time'])
        result['time_std'] = np.std(result['time'])
        
        # 计算性能统计
        if perf_data['parallel_times']:
            result['avg_parallel_time'] = np.mean(perf_data['parallel_times'])
            result['parallel_time_std'] = np.std(perf_data['parallel_times'])
            
            if 'speedup' in perf_data and perf_data['speedup']:
                result['avg_speedup'] = np.mean(perf_data['speedup'])
                result['speedup_std'] = np.std(perf_data['speedup'])
                result['avg_efficiency'] = np.mean(perf_data['efficiency'])
                result['efficiency_std'] = np.std(perf_data['efficiency'])
        
        # 保存最终收敛曲线
        final_avg_curve = avg_cost_curve / num_tests
        final_best_curve = avg_best_cost_curve / num_tests
        
        plt.clf()
        plt.plot(final_avg_curve, 'b-', label='Average Cost')
        plt.plot(final_best_curve, 'r-', label='Best Cost')
        plt.axhline(y=opt_cost, color='g', linestyle='--', label='Optimal')
        
        plt.title(f'{method_name} Final Convergence ({fileName[choose]})')
        plt.xlabel('Iteration')
        plt.ylabel('Cost')
        plt.legend()
        plt.grid(True)
        plt.tight_layout()
        plt.savefig(f'results/{method}_final_convergence_M{size}.png', dpi=300)
        plt.savefig(f'results/{method}_final_convergence_M{size}.svg')
        
        # 输出结果
        print("\nFinal Results:")
        print(f"Best Cost: {result['best_cost']} (Gap: {result['best_gap']*100:.2f}%)")
        print(f"Average Cost: {result['avg_cost']} (Gap: {result['avg_gap']*100:.2f}%)")
        print(f"Worst Cost: {result['worst_cost']} (Gap: {result['worst_gap']*100:.2f}%)")
        print(f"Cost Std: {result['cost_std']} (Gap Std: {result['cost_std_gap']*100:.2f}%)")
        print(f"Average Time: {result['avg_time']:.2f}s (Std: {result['time_std']:.2f}s)")
        
        # 输出性能结果
        print("\nParallel Performance:")
        if 'avg_parallel_time' in result:
            print(f"Average Parallel Time: {result['avg_parallel_time']:.2f}s (Std: {result['parallel_time_std']:.2f}s)")
            
            if serial_time is not None and serial_time > 0:
                print(f"Serial Baseline Time: {serial_time:.2f}s")
            
            if 'avg_speedup' in result:
                print(f"Average Speedup ({size} islands): {result['avg_speedup']:.2f} ± {result['speedup_std']:.2f}")
                print(f"Average Efficiency: {result['avg_efficiency']:.1f}% ± {result['efficiency_std']:.1f}%")
        
        # 绘制性能图
        if 'speedup' in perf_data and perf_data['speedup']:
            plt.figure(figsize=(12, 6))
            
            # 加速比图
            plt.subplot(1, 2, 1)
            plt.plot(perf_data['speedup'], 'bo-', label='Actual Speedup')
            plt.axhline(y=size, color='r', linestyle='--', label='Ideal Speedup')
            plt.title(f'Speedup (Serial: {serial_time:.1f}s)')
            plt.xlabel('Test Iteration')
            plt.ylabel('Speedup (Serial / Parallel)')
            plt.legend()
            plt.grid(True)
            
            # 效率图
            plt.subplot(1, 2, 2)
            plt.plot(perf_data['efficiency'], 'go-', label='Actual Efficiency')
            plt.axhline(y=100, color='r', linestyle='--', label='Ideal Efficiency')
            plt.title('Parallel Efficiency')
            plt.xlabel('Test Iteration')
            plt.ylabel('Efficiency (%)')
            plt.legend()
            plt.grid(True)
            
            plt.tight_layout()
            plt.savefig(f'results/{method}_performance_M{size}.png', dpi=300)
            plt.savefig(f'results/{method}_performance_M{size}.svg')
            print(f"Saved performance plot to results/{method}_performance_M{size}.png")
        
        del result['best_sol']
        pprint(result)

        pbar.close()

finally:
    context.pop()