from config_init import ParamInit
import numpy as np

class PSOSearch:
    def __init__(self, config : ParamInit):
        self.config = config
        self.w_max = 0.9
        self.w_min = 0.4
        self.c1 = 1.5
        self.c2 = 1.5
        self.bounds = {}
        self.v_max = {}
        self.hard_constrains = []
        self.num_populations = 0
        self.num_iterations = 0

    def get_pso_input(self):
         # 初始化返回值
        bounds = {
                    'Prefill BatchSize': [
                        float(self.config.get_args("pref_batch_lower")),
                        float(self.config.get_args("pref_batch_upper"))
                    ],
                    'Decode BatchSize': [
                        float(self.config.get_args("decode_batch_lower")),
                        float(self.config.get_args("decode_batch_upper"))
                    ],
                    'SelectBatch Prefill Delay Tolerance': [
                        float(self.config.get_args("prefill_token_delay_tolerance_lower")),
                        float(self.config.get_args("prefill_token_delay_tolerance_upper"))
                    ],
                    'Request Rate': [
                        float(self.config.get_args("request_rate_lower")),
                        float(self.config.get_args("request_rate_upper"))
                    ]
                }
        if self.config.get_args("is_firsttoken_constrained"):
            self.hard_constrains = [float(self.config.get_args("decode_constrains")), float(self.config.get_args("firsttoken_constrains"))]
        else:
            self.hard_constrains = [float(self.config.get_args("decode_constrains"))]
        self.num_populations = self.config.get_args("num_populations")
        self.num_iterations = self.config.get_args("num_iterations")

        self.v_max = {key: (bounds[key][1] - bounds[key][0]) * 0.4 for key in bounds.keys()}

    def apply_random_preturbation(self, velocities):
        for key, value in velocities.items():
            for i in range(len(value)):
                if value[i] == 0:
                    perturbation = random.uniform(0, 0.1)
                    value[i] += perturbation * abs(self.v_max[key])
        return velocities

    
    def generate_particles_optimized(self):
        
        dim = len(self.bounds)
        keys = list(self.bounds.keys())
        
        # 1. 使用拉丁超立方采样（LHS）生成粒子
        intervals = np.linspace(0, 1, self.num_populations + 1)
        points = np.zeros((self.num_populations, dim))

        for i in range(dim):
            lower_bound, upper_bound = self.bounds[keys[i]]
            perm = np.random.permutation(self.num_populations)  # Random permutation to ensure diversity
            for j in range(self.num_populations):
                # 在拉丁超立方的每个区间内采样
                sample_point = (intervals[perm[j]] + intervals[perm[j] + 1]) / 2
                points[j, i] = lower_bound + sample_point * (upper_bound - lower_bound)

        # 2. 确保每一对粒子在初始时覆盖最大最小边界组合
        # 确保粒子分别覆盖参数空间的两端
        points[0, :] = np.array([self.bounds[keys[i]][0] for i in range(dim)])  # Min boundary
        points[-1, :] = np.array([self.bounds[keys[i]][1] for i in range(dim)])  # Max boundary

        # 3. 最后，返回粒子的字典形式
        particles = {keys[i]: points[:, i].tolist() for i in range(dim)}
        return particles

    def generate_particles_latin_hypercube_perturbed(self, perturbation=0.01):
        intervals = np.linspace(0, 1, self.num_populations + 1)
        dim = len(self.bounds)
        points = np.zeros((self.num_populations, dim))
        keys = list(self.bounds.keys())

        for i in range(dim):
            lower_bound, upper_bound = self.bounds[keys[i]]
            perm = np.random.permutation(self.num_populations)
            for j in range(self.num_populations):
                # 中心采样加扰动
                midpoint = (intervals[perm[j]] + intervals[perm[j] + 1]) / 2
                sample_point = midpoint + np.random.uniform(-perturbation, perturbation) * (intervals[perm[j] + 1] - intervals[perm[j]])
                sample_point = np.clip(sample_point, intervals[perm[j]], intervals[perm[j] + 1])
                points[j, i] = lower_bound + sample_point * (upper_bound - lower_bound)

        # 转换为字典形式
        particles = {keys[i]: points[:, i].tolist() for i in range(dim)}
        return particles

    def clip_particles(self, particles):
        clipped_particles = {}

        for key in particles:
            # 获取当前参数的粒子位置和边界
            particle_values = particles[key]

            print(f'key:',key, f'bounds:', self.bounds[key])

            lower_bound, upper_bound = self.bounds[key]

            # 确保粒子位置是一个 NumPy 数组
            particle_values = np.array(particle_values)

            # 使用 np.clip 限制粒子位置在边界范围内
            clipped_particles[key] = np.clip(particle_values, lower_bound, upper_bound).astype(int)

        return clipped_particles

    def reflect_particles(self, positions, velocities):
        updated_positions = {}
        updated_velocities = {}

        # 遍历每个粒子属性
        for key in positions.keys():
            pos = np.array(positions[key])
            vel = np.array(velocities[key])
            lower_bound = self.bounds[key][0]
            upper_bound = self.bounds[key][1]

            # 确保位置在边界内
            for i in range(len(pos)):
                if pos[i] < lower_bound:
                    pos[i] = lower_bound
                    vel[i] = -vel[i]  # 反弹速度方向
                elif pos[i] > upper_bound:
                    pos[i] = upper_bound
                    vel[i] = -vel[i]  # 反弹速度方向

            # 更新粒子信息
            updated_positions[key] = pos
            updated_velocities[key] = vel

        return updated_positions, updated_velocities

    def round_particles(self, particles):
        # 创建一个新的字典来存储取整后的粒子值
        rounded_particles = {}

        for key, value in particles.items():
            # 确保 value 是一个 NumPy 数组
            value_array = np.array(value)

            if key == "Request Rate":
                rounded_value = np.round(value, 1)

            # 对 NumPy 数组中的值进行取整
            else:
                rounded_value = np.floor(value_array).astype(int)

            # 将处理后的值存回新字典中
            rounded_particles[key] = rounded_value

        return rounded_particles

    def penalty_functions(self, constrain_params, hard_constrain):
        # 对每个 hard_constrain 计算惩罚
        penalty_coef = 10
        fitness_penalty = []
        for i in range(len(constrain_params)):
            penalty = (max(0, (constrain_params[i] - hard_constrain) / hard_constrain) * penalty_coef) ** 2
            fitness_penalty.append(penalty)  # 累加惩罚
        return fitness_penalty

    def fitness_functions(self, criteria, constrain_params,):
        # 计算初始 fitness
        fitness = 1 / criteria

        # 遍历 constrain_params 和其对应的 hard_constrain
        print(constrain_params)
        print(fitness)
        for idx, constrain_param in enumerate(constrain_params):
            # 针对每个约束，计算惩罚并应用到 fitness 上
            fitness += self.penalty_functions(constrain_param, self.hard_constrain[idx])
            # print("fitness: ",fitness)
        return fitness