import numpy as np
import random
import math
import time
from mpi4py import MPI
import pycuda.driver as cuda
from pycuda import gpuarray
from pycuda.compiler import SourceModule
from common import get_cost, Init
from hybrid_algorithm import HybridAlgorithm  # ← 新增：导入 HybridAlgorithm
import os

class SolutionPool:
    def __init__(self, n, pool_size=100):
        self.n = n
        self.pool = [None] * pool_size
        self.costs = np.full(pool_size, float('inf'))
        self.pool_size = pool_size
        
    def add_solution(self, sol, cost):
        if cost < np.max(self.costs):
            max_idx = np.argmax(self.costs)
            self.pool[max_idx] = sol.copy()
            self.costs[max_idx] = cost
            
    def get_diverse_solutions(self, num):
        # 基于成本差异选择多样化解，跳过 None
        sorted_indices = np.argsort(self.costs)
        selected = []
        for idx in sorted_indices:
            if len(selected) >= num:
                break
            sol = self.pool[idx]
            if sol is None:
                continue
            if not self._is_similar(selected, sol):
                selected.append(sol.copy())
        return selected
    def _is_similar(self, solutions, candidate):
        for sol in solutions:
            if sol is None or candidate is None:
                continue
            if np.array_equal(sol, candidate):
                return True
            diff_count = np.sum(sol != candidate)
            if diff_count < self.n * 0.1:  # 90%相同视为相似
                return True
        return False

class ParallelSA:
    def __init__(self, n, points, distmat, M, L, max_tnm, 
                 term_count_1, term_count_2, t_0, alpha, comm, rank, block_size=256, algorithm_type="SA_ENSEMBLE"):
        self.n = n
        self.points = points
        self.distmat = distmat
        self.M = M
        self.L = L
        self.max_tnm = max_tnm
        self.term_count_1 = term_count_1
        self.term_count_2 = term_count_2
        self.t_0 = t_0
        self.alpha = alpha
        self.comm = comm
        self.rank = rank
        self.block_size = block_size
        self.algorithm_type = algorithm_type
        self.temperatures = self._init_temperatures()
        self.replicas = self._init_replicas()
        self._init_cuda()
        self.solution_pool = SolutionPool(n, pool_size=50)
        self.data = {
            'cost': [],
            'best_cost': []
        }
        
        # 为每个副本分配不同的混合策略
        hybrid_types = ["SA_GA", "SA_ACO", "SA_TS", "SA_VNS"]
        self.hybrid_algorithms = []
        
        for i in range(self.L):
            algo_type = hybrid_types[i % len(hybrid_types)]
            params = {
                'population': [rep['sol'] for rep in self.replicas],
                'best_cost': self.global_best_cost,
                'tabu_tenure': 20,
                'pheromone_decay': 0.95,
                'pheromone_influence': 0.3
            }
            if i == 0:
                params['hybrid_ratio'] = 0.4
            self.hybrid_algorithms.append(
                HybridAlgorithm(
                    algorithm_type=algo_type,
                    n=n,
                    distmat=distmat,
                    params=params
                )
            )
    
    def adaptive_termination(self, outer_count):
        """自适应终止条件-添加全局同步"""
        # 本地终止建议
        local_stop = False
        
        if outer_count < self.term_count_1:
            return False
        
        if len(self.data['best_cost']) >= 21:
            recent = self.data['best_cost'][-20:]
            best_recent = min(recent)
            if best_recent >= self.data['best_cost'][-21] * 0.995:
                local_stop = True
        
        # 全局同步终止决定
        all_stops = self.comm.allgather(local_stop)
        global_stop = all(all_stops)
        
        return global_stop
        
    def two_opt_fast(self, sol, max_trials=None):
        """优化的2-opt局部搜索"""
        if max_trials is None:
            max_trials = min(2000, self.n * 5)
            
        best_sol = np.array(sol)
        best_cost = get_cost(self.n, self.distmat, best_sol)
        
        improved = True
        trials = 0
        
        while improved and trials < max_trials:
            improved = False
            
            # ——【修改点】确保 i+2 <= n-1
            if self.n < 4:
                break
            
            i = random.randint(0, self.n - 3)
            j = random.randint(i + 2, self.n - 1)
            
            delta = self.calculate_2opt_delta_fast(best_sol, i, j)
            
            if delta < -1e-6:
                best_sol[i:j+1] = best_sol[i:j+1][::-1]
                best_cost += delta
                improved = True
                trials = 0
            else:
                trials += 1
                
        return best_sol.tolist(), best_cost
        
    def calculate_2opt_delta_fast(self, sol, i, j):
        n = self.n
        distmat = self.distmat
        
        a = sol[i-1] if i > 0 else sol[n-1]
        b = sol[i]
        c = sol[j]
        d = sol[j+1] if j < n-1 else sol[0]
        
        old_cost = distmat[a][b] + distmat[c][d]
        new_cost = distmat[a][c] + distmat[b][d]
        
        return new_cost - old_cost
        
    def _init_temperatures(self):
        temperatures = []
        for i in range(self.L):
            temp = self.t_0 * (self.alpha ** i)
            temperatures.append(temp)
        return temperatures
    
    def _init_replicas(self):
        replicas = []
        init_obj = Init(self.points, self.n, self.L, self.distmat)
        initial_solutions = init_obj.newCreate()

        # ——— 如果 newCreate() 返回的是单个解（一个 [城市, …] 的 list），
        #      那么就把它包装成 “解列表”[[…]]，以保证下面的索引不会得到 int。
        if isinstance(initial_solutions, list) \
           and len(initial_solutions) > 0 \
           and isinstance(initial_solutions[0], int):
            initial_solutions = [initial_solutions]

        self.global_best_cost = float('inf')
        self.global_best_sol = None
        self.best_replica_idx = 0

        for i in range(self.L):
            # 现在 initial_solutions 肯定是 “解列表” 了（即使 L==1，也是一层外壳的列表）
            sol = initial_solutions[i]
            cost = get_cost(self.n, self.distmat, sol)
            replicas.append({
                'sol': sol,
                'cost': cost,
                'best_sol': sol.copy(),
                'best_cost': cost,
                'temperature': self.temperatures[i],
                'inner_count': 0,
                'outer_count': 0
            })

            if cost < self.global_best_cost:
                self.global_best_cost = cost
                self.global_best_sol = sol.copy()
                self.best_replica_idx = i

        return replicas

    
    def _init_cuda(self):
        cuda_device = self.rank % cuda.Device.count()
        device = cuda.Device(cuda_device)
        
        try:
            self.distmat_gpu = gpuarray.to_gpu(np.ascontiguousarray(self.distmat, dtype=np.float32))
        except Exception as e:
            print(f"[Rank {self.rank}] Error uploading distmat to GPU: {e}")
            raise
        kernel_file = "cuda_kernels.cu"
        if not os.path.exists(kernel_file):
            raise FileNotFoundError(f"CUDA kernel file not found: {kernel_file}")
        
        with open(kernel_file, 'r') as f:
            kernel_code = f.read()
        
        try:
            cache_dir = "/mnt/ann25-22336161/lab/cuda_cache"
            self.mod = SourceModule(
                kernel_code,
                options=[
                    '-I/usr/local/cuda/include',
                    '--compiler-options', '-fPIC',
                    '-D_FORCE_INLINES',
                    f'-arch=sm_{device.compute_capability()[0]}{device.compute_capability()[1]}',
                    '-Xcompiler', '-O2'
                ],
                no_extern_c=True,
                cache_dir=cache_dir,
                include_dirs=[],
                keep=False
            )
            self.sa_kernel = self.mod.get_function("sa_kernel_opt")
            self.init_randoms = self.mod.get_function("init_randoms")
            
        except Exception as e:
            print(f"CUDA compilation failed for rank {self.rank}")
            if hasattr(e, 'log'):
                print(f"Compilation log:{e.log}")
            else:
                print(f"Error: {str(e)}")
            raise
        
        CURAND_STATE_SIZE = 48
        self.random_states = gpuarray.empty(
            self.L * self.block_size * CURAND_STATE_SIZE, 
            dtype=np.uint8
        )
        
        self.init_randoms(
            self.random_states,
            np.uint64(time.time() + self.rank),
            block=(self.block_size, 1, 1),
            grid=(self.L, 1)
        )
        
    def run(self):
        """执行并行模拟退火算法（集成策略 + 2-opt 精调 + 解池管理）"""
        try:
            solutions = np.zeros(self.L * self.n, dtype=np.int32)
            for i, rep in enumerate(self.replicas):
                start = i * self.n
                end = start + self.n
                solutions[start:end] = rep['sol']
            costs = np.array([rep['cost'] for rep in self.replicas], dtype=np.float32)
            temperatures = np.array([rep['temperature'] for rep in self.replicas], dtype=np.float32)

            solutions_gpu = gpuarray.to_gpu(solutions)
            costs_gpu = gpuarray.to_gpu(costs)
            temperatures_gpu = gpuarray.to_gpu(temperatures)

            threads_per_block = self.block_size
            blocks_per_grid = self.L

            outer_count = 0
            migration_interval = 50
            last_print = time.time()
            
            # 添加冷却计数器
            cool_down_counter = 0
            cool_down_interval = 5  # 每5次外层迭代冷却一次

            prev_global_best = self.global_best_cost
            last_improvement = 0
            stagnation_threshold = 30

            base_max_tnm = self.max_tnm
            adaptive_alpha = self.alpha

            while outer_count < self.term_count_2:
                alive_flags = self.comm.allgather(outer_count < self.term_count_2)
                if not all(alive_flags):
                    print(f"Rank {self.rank}: Skip migration due to dead processes")
                    outer_count += 1
                    continue
                 # ===== 添加冷却机制 =====
                cool_down_counter += 1
                if cool_down_counter >= cool_down_interval:
                    cool_down_counter = 0
                    # 暂停计算让GPU冷却
                    time.sleep(0.1)  # 100ms冷却
                # ===== 自适应终止检查 =====
                if self.adaptive_termination(outer_count):
                    print(f"Rank {self.rank}: Early termination at iteration {outer_count}")
                    break  # 仅跳出循环，后续会走到函数末尾返回

                # ===== 自适应参数调整 =====
                if prev_global_best < self.global_best_cost:
                    improvement_ratio = (prev_global_best - self.global_best_cost) / (prev_global_best + 1e-9)
                else:
                    improvement_ratio = 0.0

                if improvement_ratio > 0.05:
                    adaptive_alpha = min(0.99, adaptive_alpha * 1.05)
                elif improvement_ratio < 0.01:
                    adaptive_alpha = max(0.85, adaptive_alpha * 0.95)
                prev_global_best = self.global_best_cost

                avg_temp = np.mean(temperatures)
                adaptive_max_tnm = int(base_max_tnm * (1 + math.log(1 + self.t_0 / max(avg_temp, 1e-5))))
                adaptive_max_tnm = max(100, min(adaptive_max_tnm, 1000))

                if time.time() - last_print > 1.0:
                    print(f"Rank {self.rank}: Outer {outer_count}/{self.term_count_2}, "
                          f"AvgTemp={avg_temp:.2f}, Alpha={adaptive_alpha:.4f}, "
                          f"AdaptiveMaxTNM={adaptive_max_tnm}")
                    last_print = time.time()

                # ===== 定期添加到解池 =====
                if outer_count % 20 == 0:
                    for rep in self.replicas:
                        self.solution_pool.add_solution(rep['sol'], rep['cost'])
                
                # ===== 对每个 replica 做一步混合迭代 =====
                for i in range(self.L):
                    current_sol = solutions[i * self.n : (i + 1) * self.n].tolist()
                    current_cost = float(costs[i])
                    temp = float(temperatures[i])

                    # 更新 population 与 best_cost
                    self.hybrid_algorithms[i].params['population'] = (
                        self.solution_pool.get_diverse_solutions(5) +
                        [solutions[j * self.n : (j + 1) * self.n].tolist() for j in range(self.L)]
                    )
                    self.hybrid_algorithms[i].params['best_cost'] = self.global_best_cost

                    if self.algorithm_type == "SA_ENSEMBLE":
                        new_sol, new_cost = self._sa_ensemble_step(current_sol, current_cost, temp)
                    else:
                        new_sol, new_cost = self.hybrid_algorithms[i].run_iteration(
                            current_solution=current_sol,
                            temperature=temp
                        )

                    solutions[i * self.n : (i + 1) * self.n] = new_sol
                    costs[i] = float(new_cost)

                    if new_cost < self.global_best_cost:
                        self.global_best_cost = new_cost
                        self.global_best_sol = new_sol.copy()

                solutions_gpu.set(solutions)
                costs_gpu.set(costs)

                # ===== 执行 CUDA 内核 =====
                shared_mem = self.n * 4  # 每个块需要存储一个解决方案
                self.sa_kernel(
                    self.distmat_gpu,
                    solutions_gpu,
                    costs_gpu,
                    temperatures_gpu,
                    np.int32(adaptive_max_tnm),
                    np.int32(self.n),
                    np.int32(self.L),
                    self.random_states,  # 添加此参数
                    block=(threads_per_block, 1, 1),
                    grid=(blocks_per_grid, 1),
                    shared=shared_mem
                )
                cuda.Context.synchronize()  # 添加同步
                # 从 GPU 获取更新后的数据
                solutions = solutions_gpu.get()
                costs = costs_gpu.get()
                temperatures = temperatures_gpu.get()

                # ===== 自适应冷却 =====
                temperatures = np.maximum(temperatures * adaptive_alpha, 0.1)
                temperatures_gpu.set(temperatures)
                
                # ===== 刷新 replicas 信息 =====
                for i in range(self.L):
                    sol_i = solutions[i * self.n : (i + 1) * self.n].tolist()
                    cost_i = float(costs[i])
                    temp_i = float(temperatures[i])

                    self.replicas[i]['sol'] = sol_i
                    self.replicas[i]['cost'] = cost_i
                    self.replicas[i]['temperature'] = temp_i

                    if cost_i < self.replicas[i]['best_cost']:
                        self.replicas[i]['best_cost'] = cost_i
                        self.replicas[i]['best_sol'] = sol_i.copy()
                        self.replicas[i]['inner_count'] = 0
                    else:
                        self.replicas[i]['inner_count'] += 1

                self.data['cost'].append(float(np.mean(costs)))
                self.data['best_cost'].append(float(self.global_best_cost))

                # ===== 全局最优改进 + 强扰动检测 =====
                if self.global_best_cost < prev_global_best:
                    last_improvement = outer_count
                else:
                    if outer_count - last_improvement > stagnation_threshold:
                        for i in range(self.L):
                            if self.replicas[i]['cost'] > self.global_best_cost * 1.1:
                                perturbed = self.hybrid_algorithms[i].strong_perturbation(
                                    self.replicas[i]['sol'])
                                perturbed_cost = get_cost(self.n, self.distmat, perturbed)
                                self.replicas[i]['sol'] = perturbed.copy()
                                self.replicas[i]['cost'] = perturbed_cost

                                start_i = i * self.n
                                end_i = (i + 1) * self.n
                                solutions[start_i:end_i] = perturbed
                                costs[i] = float(perturbed_cost)
                        last_improvement = outer_count
                        time.sleep(0.05)  # 扰动后额外冷却

                # ===== 概率触发的2-opt精调 =====
                if random.random() < 0.3 and outer_count % 10 == 0:
                    improved_sol, improved_cost = self.two_opt_fast(
                        self.global_best_sol, 
                        max_trials=min(500, self.n//2)
                    )
                    if improved_cost < self.global_best_cost:
                        self.global_best_cost = improved_cost
                        self.global_best_sol = improved_sol.copy()
                        idx = self.best_replica_idx
                        self.replicas[idx]['sol'] = improved_sol.copy()
                        self.replicas[idx]['cost'] = improved_cost
                        start_i = idx * self.n
                        end_i = (idx + 1) * self.n
                        solutions[start_i:end_i] = improved_sol
                        costs[idx] = float(improved_cost)
                        prev_global_best = improved_cost
                        last_improvement = outer_count
                    time.sleep(0.03)  # 2-opt后额外冷却
                # ===== 定期迁移（Migration） =====
                if outer_count % migration_interval == 0 and outer_count > 0:
                    local_solutions = [
                        (rep['cost'], idx, rep['sol'])
                        for idx, rep in enumerate(self.replicas)
                    ]
                    local_solutions.sort(key=lambda x: x[0])

                    elite_count = max(1, int(self.L * 0.5))
                    local_elites_info = local_solutions[:elite_count]
                    local_elites = [info[2] for info in local_elites_info]
                    
                    # 确保所有进程发送相同数量的精英解
                    if len(local_elites) < elite_count:
                        # 补充空列表以保持数量一致
                        local_elites += [[] for _ in range(elite_count - len(local_elites))]
                    
                    # 收集所有精英解
                    all_elites = self.comm.gather(local_elites, root=0)

                    if self.rank == 0:
                        merged = []
                        for sub in all_elites:
                            merged.extend(sub)
                        random.shuffle(merged)
                        migrated_elites = []
                        for pid in range(self.M):
                            start = pid * elite_count
                            end = start + elite_count
                            migrated_elites.append(merged[start:end])
                    else:
                        migrated_elites = None

                    # 确保迁移数据的一致性
                    my_elites = self.comm.scatter(migrated_elites, root=0)
                    
                    # 仅当有精英解时进行迁移
                    if my_elites:
                        worst_info = local_solutions[-len(my_elites):]
                        worst_indices = [info[1] for info in worst_info]

                        for k, elite_sol in enumerate(my_elites):
                            if k < len(worst_indices):
                                worst_idx = worst_indices[k]
                                if k == 0:
                                    # 对第一个最差解做一次交叉
                                    child = self._crossover(elite_sol, self.replicas[0]['sol'])
                                    self.replicas[worst_idx]['sol'] = child.copy()
                                    new_cost = get_cost(self.n, self.distmat, child)
                                else:
                                    self.replicas[worst_idx]['sol'] = elite_sol.copy()
                                    new_cost = get_cost(self.n, self.distmat, elite_sol)

                                self.replicas[worst_idx]['cost'] = new_cost
                                start_i = worst_idx * self.n
                                end_i = (worst_idx + 1) * self.n
                                solutions[start_i:end_i] = self.replicas[worst_idx]['sol']
                                costs[worst_idx] = float(new_cost)

                        solutions_gpu.set(solutions)
                        costs_gpu.set(costs)

                outer_count += 1
    
            # 循环结束后，从 replicas 中选出最优解返回
            best_replica = min(self.replicas, key=lambda x: x['best_cost'])
            return best_replica['best_sol'], best_replica['best_cost'], self.data

        finally:
            # 清理 GPU 资源 - 添加更多清理操作
            if hasattr(self, 'distmat_gpu'):
                self.distmat_gpu.gpudata.free()
                del self.distmat_gpu
            if hasattr(self, 'random_states'):
                self.random_states.gpudata.free()
                del self.random_states
            if hasattr(self, 'solutions_gpu'):
                self.solutions_gpu.gpudata.free()
                del self.solutions_gpu
            if hasattr(self, 'costs_gpu'):
                self.costs_gpu.gpudata.free()
                del self.costs_gpu
            if hasattr(self, 'temperatures_gpu'):
                self.temperatures_gpu.gpudata.free()
                del self.temperatures_gpu

    def _sa_ensemble_step(self, sol, cost, temp):
        """集成多种策略的混合步骤"""
        strategies = [
            self.hybrid_algorithms[0]._sa_step,      # 纯 SA
            self.hybrid_algorithms[0]._sa_ga_step,
            self.hybrid_algorithms[0]._sa_aco_step,
            self.hybrid_algorithms[0]._sa_ts_step,
            self.hybrid_algorithms[0]._sa_vns_step
        ]
        
        strategy_idx = min(int(temp / 50), len(strategies) - 1)
        selected_strategy = strategies[strategy_idx]
        
        return selected_strategy(sol, cost, temp)
        
    def _crossover(self, sol1, sol2):
        """简易的顺序交叉"""
        n = self.n
        start, end = sorted(random.sample(range(n), 2))
        child = [-1] * n
        child[start:end] = sol1[start:end]
        pos = end % n
        for city in sol2:
            if city not in child:
                child[pos] = city
                pos = (pos + 1) % n
        return child

def Parallel_SA_CUDA(n, points, distmat, M, L, max_tnm, 
                     term_count_1, term_count_2, t_0, alpha, comm, rank, 
                     block_size=256, algorithm_type="SA_ENSEMBLE"):
    sa = ParallelSA(n, points, distmat, M, L, max_tnm, 
                   term_count_1, term_count_2, t_0, alpha, comm, rank,
                   block_size, algorithm_type)
    return sa.run()