import random
import numpy as np

from beam.util_psll import get_psll_by_phase


class RISParticleSwarmOptimizer:
    __bit_num = 0  # 比特数
    __beam_num = 0  # 波束数

    def __init__(self, bit_num, beam_num, num_particles=50, max_iter=100, w=0.7, c1=2, c2=2):
        self.__bit_num = bit_num
        self.__beam_num = beam_num
        self.num_particles = num_particles
        self.max_iter = max_iter
        self.w = w  # 惯性权重
        self.c1 = c1  # 认知系数
        self.c2 = c2  # 社会系数
        self.particles = None
        self.velocities = None
        self.personal_bests = None
        self.personal_best_fitnesses = None
        self.global_best = None
        self.global_best_fitness = None
        self.best_fitness_history = []
        self.best_individual_history = []

    def fitness(self, phase):
        return get_psll_by_phase(phase, self.__bit_num, self.__beam_num)

    def initialize_swarm(self, phase_mix_init):
        # 初始化粒子的位置和速度
        self.particles = [np.copy(phase_mix_init) for _ in range(self.num_particles)]
        self.velocities = [np.random.uniform(-180, 180, phase_mix_init.shape) for _ in self.particles]
        self.personal_bests = [np.copy(p) for p in self.particles]
        self.personal_best_fitnesses = [self.fitness(p) for p in self.particles]
        self.global_best = self.particles[np.argmin(self.personal_best_fitnesses)].copy()
        self.global_best_fitness = min(self.personal_best_fitnesses)

    def update_velocity(self, velocity, particle, personal_best, global_best):
        r1, r2 = np.random.rand(), np.random.rand()
        cognitive = self.c1 * r1 * (personal_best - particle)
        social = self.c2 * r2 * (global_best - particle)
        new_velocity = self.w * velocity + cognitive + social
        return new_velocity

    def run(self, phase_mix_init, logger):
        # 初始化返回值
        self.initialize_swarm(phase_mix_init)
        self.best_fitness_history = [self.global_best_fitness]
        self.best_individual_history = [self.global_best]

        for iteration in range(self.max_iter):
            for i, (particle, velocity) in enumerate(zip(self.particles, self.velocities)):
                # 更新速度
                new_velocity = self.update_velocity(velocity, particle, self.personal_bests[i], self.global_best)
                # 更新位置
                new_particle = particle + new_velocity
                # 应用边界条件
                new_particle = np.clip(new_particle, -180, 180)
                # 评估新的位置
                new_fitness = self.fitness(new_particle)

                # 更新个人最佳位置和个人最佳分数
                if new_fitness < self.personal_best_fitnesses[i]:
                    self.personal_bests[i] = new_particle
                    self.personal_best_fitnesses[i] = new_fitness
                    # 如果找到了新的全局最佳，更新全局最佳
                    if new_fitness < self.global_best_fitness:
                        self.global_best = new_particle
                        self.global_best_fitness = new_fitness

                # 更新粒子的速度和位置
                self.velocities[i] = new_velocity
                self.particles[i] = new_particle

            # 记录最佳适应度曲线
            self.best_fitness_history.append(self.global_best_fitness)
            self.best_individual_history.append(self.global_best)
            logger.info("iteration=%d: self.global_best_fitness=%f" % (iteration, self.global_best_fitness))

        return self.global_best, self.global_best_fitness, self.best_fitness_history, self.best_individual_history
