import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.tree import DecisionTreeRegressor
import matplotlib.animation as animation
from IPython.display import HTML
import pandas as pd
import matplotlib

# 设置字体为SimHei，确保该字体在你的系统中存在
matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决保存图像时负号'-'显示为方块的问题




class PSOAdaboostRegressorOptimizer:
    """使用粒子群优化(PSO)算法寻找Adaboost回归模型的最优超参数"""

    def __init__(self, n_particles=20, max_iter=50, w=0.5, c1=1.5, c2=1.5,
                 n_estimators_range=(50, 200), learning_rate_range=(0.01, 2.0)):
        """
        初始化PSO优化器

        参数:
            n_particles: 粒子数量
            max_iter: 最大迭代次数
            w: 惯性权重
            c1: 个体学习因子
            c2: 社会学习因子
            n_estimators_range: 基学习器数量的搜索范围
            learning_rate_range: 学习率的搜索范围
        """
        self.n_particles = n_particles
        self.max_iter = max_iter
        self.w = w
        self.c1 = c1
        self.c2 = c2
        self.n_estimators_range = n_estimators_range
        self.learning_rate_range = learning_rate_range

        # 存储PSO过程中的最佳适应度值和参数
        self.best_fitness = np.inf  # 回归任务中，我们希望MSE越小越好
        self.best_params = None
        self.best_fitness_history = []

        # 用于可视化的变量
        self.particle_positions_history = []
        self.particle_fitness_history = []

    def objective_function(self, X, y, n_estimators, learning_rate):
        """
        目标函数: 使用Adaboost回归并通过交叉验证评估性能

        参数:
            X, y: 数据集
            n_estimators: 基学习器数量
            learning_rate: 学习率

        返回:
            负的交叉验证平均MSE(作为适应度值，因为PSO默认最大化适应度)
        """
        # 创建Adaboost回归器
        adaboost = AdaBoostRegressor(
            estimator=DecisionTreeRegressor(max_depth=3),
            n_estimators=int(n_estimators),
            learning_rate=learning_rate,
            random_state=42
        )

        # 使用5折交叉验证评估模型性能，使用负MSE作为评分
        scores = cross_val_score(adaboost, X, y, cv=2, scoring='neg_mean_squared_error')
        return np.mean(scores)  # 返回负MSE，因为PSO是最大化适应度

    def optimize(self, X, y):
        """
        执行PSO优化

        参数:
            X, y: 训练数据集
        """
        # 初始化粒子位置和速度
        particles_position = np.zeros((self.n_particles, 2))
        particles_velocity = np.zeros((self.n_particles, 2))

        # 初始化个体最优位置和全局最优位置
        particles_best_position = np.zeros((self.n_particles, 2))
        particles_best_fitness = np.full(self.n_particles, -np.inf)
        global_best_position = np.zeros(2)
        global_best_fitness = -np.inf

        # 随机初始化粒子位置(在参数范围内)
        particles_position[:, 0] = np.random.randint(
            self.n_estimators_range[0],
            self.n_estimators_range[1],
            size=self.n_particles
        )
        particles_position[:, 1] = np.random.uniform(
            self.learning_rate_range[0],
            self.learning_rate_range[1],
            size=self.n_particles
        )

        # 随机初始化粒子速度
        particles_velocity[:, 0] = np.random.randint(
            -20, 20, size=self.n_particles
        )
        particles_velocity[:, 1] = np.random.uniform(
            -0.1, 0.1, size=self.n_particles
        )

        # 迭代优化
        for iteration in range(self.max_iter):
            # 评估每个粒子的适应度
            current_fitness = np.zeros(self.n_particles)
            for i in range(self.n_particles):
                n_estimators = particles_position[i, 0]
                learning_rate = particles_position[i, 1]

                # 确保参数在有效范围内
                n_estimators = max(self.n_estimators_range[0],
                                   min(int(n_estimators), self.n_estimators_range[1]))
                learning_rate = max(self.learning_rate_range[0],
                                    min(learning_rate, self.learning_rate_range[1]))

                # 计算适应度
                fitness = self.objective_function(X, y, n_estimators, learning_rate)
                current_fitness[i] = fitness

                # 更新个体最优
                if fitness > particles_best_fitness[i]:
                    particles_best_fitness[i] = fitness
                    particles_best_position[i] = particles_position[i].copy()

                # 更新全局最优
                if fitness > global_best_fitness:
                    global_best_fitness = fitness
                    global_best_position = particles_position[i].copy()

            # 记录历史
            self.best_fitness_history.append(global_best_fitness)
            self.particle_positions_history.append(particles_position.copy())
            self.particle_fitness_history.append(current_fitness.copy())

            # 更新粒子速度和位置
            r1, r2 = np.random.rand(2)
            for i in range(self.n_particles):
                # 更新速度
                particles_velocity[i, 0] = (self.w * particles_velocity[i, 0] +
                                            self.c1 * r1 * (particles_best_position[i, 0] - particles_position[i, 0]) +
                                            self.c2 * r2 * (global_best_position[0] - particles_position[i, 0]))

                particles_velocity[i, 1] = (self.w * particles_velocity[i, 1] +
                                            self.c1 * r1 * (particles_best_position[i, 1] - particles_position[i, 1]) +
                                            self.c2 * r2 * (global_best_position[1] - particles_position[i, 1]))

                # 更新位置
                particles_position[i, 0] += particles_velocity[i, 0]
                particles_position[i, 1] += particles_velocity[i, 1]

                # 边界处理
                particles_position[i, 0] = max(self.n_estimators_range[0],
                                               min(particles_position[i, 0], self.n_estimators_range[1]))
                particles_position[i, 1] = max(self.learning_rate_range[0],
                                               min(particles_position[i, 1], self.learning_rate_range[1]))

            # 打印当前迭代信息
            if (iteration + 1) % 10 == 0:
                print(f"迭代 {iteration + 1}/{self.max_iter}, 最佳适应度: {global_best_fitness:.4f}")

        # 存储最终结果
        self.best_fitness = -global_best_fitness  # 转换回正的MSE
        self.best_params = {
            'n_estimators': int(global_best_position[0]),
            'learning_rate': global_best_position[1]
        }

        return self.best_params, self.best_fitness

    def plot_convergence_curve(self):
        """绘制收敛曲线"""
        plt.figure(figsize=(10, 6))
        plt.plot(range(1, self.max_iter + 1), [-fit for fit in self.best_fitness_history], 'b-', linewidth=2)
        plt.xlabel('迭代次数')
        plt.ylabel('适应度值 (MSE)')
        plt.title('PSO优化Adaboost回归的收敛曲线')
        plt.grid(True)
        plt.show()

    def visualize_pso(self):
        """可视化PSO优化过程"""
        fig, ax = plt.subplots(figsize=(10, 6))

        # 设置参数范围
        x_min, x_max = self.n_estimators_range
        y_min, y_max = self.learning_rate_range

        # 创建散点图
        scatter = ax.scatter([], [], c=[], cmap='viridis', alpha=0.8, s=80)
        ax.set_xlim(x_min, x_max)
        ax.set_ylim(y_min, y_max)
        ax.set_xlabel('基学习器数量 (n_estimators)')
        ax.set_ylabel('学习率 (learning_rate)')
        ax.set_title('PSO优化Adaboost回归超参数过程')

        # 颜色条
        cbar = plt.colorbar(scatter)
        cbar.set_label('适应度值 (-MSE)')

        # 更新函数
        def update(frame):
            positions = self.particle_positions_history[frame]
            fitness = self.particle_fitness_history[frame]

            scatter.set_offsets(positions)
            scatter.set_array(fitness)

            # 添加最佳点标记
            if frame > 0:
                best_idx = np.argmax(self.particle_fitness_history[frame])
                best_x, best_y = positions[best_idx]
                ax.plot(best_x, best_y, 'r*', markersize=12)

            ax.set_title(f'PSO优化过程 - 迭代 {frame + 1}/{self.max_iter}')
            return scatter,

        # 创建动画
        ani = animation.FuncAnimation(
            fig, update, frames=len(self.particle_positions_history),
            interval=300, blit=True
        )

        plt.close()
        return HTML(ani.to_jshtml())


# 使用示例
if __name__ == "__main__":
    #读取文件，假设你的文件是Excel文件，第1 2 3 4 5 列是X，第6列是Y
    #改成你的文件，改成你的列数就行了
    data_file = pd.read_excel('test_data.xlsx')
    X = data_file.values[0 : 6]
    y = data_file.values[6]

    # 划分训练集和测试集
    '''这里需要你修改的就是测试集的比例，我这里训练集和测试集是7:3，你可以自己改，0.3就是30%以此类推'''
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )

    # 初始化PSO优化器
    '''这里面所有的参数你都可以改，随便改，建议参考网上给你的或者别的论文里的靠谱的数据'''
    pso = PSOAdaboostRegressorOptimizer(
        n_particles=15,
        max_iter=30,
        w=0.7,
        c1=1.5,
        c2=1.5,
        n_estimators_range=(50, 200),
        learning_rate_range=(0.01, 2.0)
    )

    # 执行优化
    best_params, best_mse = pso.optimize(X_train, y_train)
    print(f"\n最优参数: {best_params}")
    print(f"最优MSE: {best_mse:.4f}")

    '''下面的这个可视化可有可无,AI写的，我觉得没什么用，你自己定，我觉得可视化就是出一个结果，你看看有没有更好的可视化方案'''
    # 可视化收敛曲线
    #pso.plot_convergence_curve()
    #可视化PSO优化过程
    #pso.visualize_pso()


    # 使用最优参数训练最终模型
    final_model = AdaBoostRegressor(
        estimator=DecisionTreeRegressor(max_depth=3),
        n_estimators=best_params['n_estimators'],
        learning_rate=best_params['learning_rate'],
        random_state=42
    )

    final_model.fit(X_train, y_train)

    # 在测试集上评估
    y_pred = final_model.predict(X_test)
    test_mse = mean_squared_error(y_test, y_pred)
    test_r2 = r2_score(y_test, y_pred)
    print(f"测试集MSE: {test_mse:.4f}")
    print(f"测试集R²分数: {test_r2:.4f}")

    '''这里也是，可有可无，就是把计算的过程画出来'''
    # 可视化预测结果
    plt.figure(figsize=(10, 6))
    plt.scatter(y_test, y_pred, alpha=0.7)
    plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--')
    plt.xlabel('真实值')
    plt.ylabel('预测值')
    plt.title('Adaboost回归模型预测结果')
    plt.grid(True)
    plt.show()