# -*- coding: utf-8 -*-
"""
Created on  2019
@author: QW
"""

# coding: utf-8
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from geatpy import selecting
from geatpy import xovmp
from geatpy import ranking
from geatpy import mutuni
from geatpy import crtpc
from geatpy import bs2ri
from geatpy import recombin
from geatpy import mutate
'''
GA_PSO算法的思想
1 ： 设置初始参数
2 ： 初始化速度与位置
3 ： 更新粒子的位置与速度
4 ： 计算粒子的适应值
5 ：     判断是否满足了适应度门限
6 ：     FALSE：遗传，交叉，变异，操作
7 ：     更新粒子的适应度值
8 ：      判断是否满足终止条件，若否执行3到7
为便于计算，故采用实数交叉，实数选择，实数变异，以及实数重插入
'''
from sklearn.model_selection import train_test_split
from sklearn.linear_model import  LinearRegression

class PSO():
    def __init__(self, N, dim, step):
        self.w = 0.5                                # 惯性权重 一般在0-1之间
        self.c1 = self.c2 = 2                       # 学习因子,通常c1=c2=2
        self.r1 = 0.8
        self.r2 = 0.3
        self.N = N                                  # 粒子数量
        self.dim = dim                              # 搜索维度
        self.step = step                            # 迭代次数
        self.X = np.zeros((self.N, self.dim))       # 所有粒子的位置和速度
        self.V = np.zeros((self.N, self.dim))
        self.pbest = np.zeros((self.N, self.dim))   # 个体经历的最佳位置
        self.gbest = np.zeros((1, self.dim))        # 全局最佳位置
        self.p_fit = np.zeros(self.N)               # 每个个体的历史最佳适应值
        self.fit = 1000000000
        self.data = pd.read_csv(r"E:\机器学习\水质数据建模预测\data\去噪后的数据.csv", sep=',',encoding='gbk')
        y = self.data.iloc[:,1]
        x = self.data.iloc[:,2:]
        # 划分数据集
        train_x,test_x,train_y,test_y = train_test_split(x,y,test_size=0.3,random_state=12)
        self.train_x = train_x
        self.test_x = test_x
        self.train_y = train_y
        self.test_y  = test_y

    # 目标函数，粒子群选择的是最优波长点数 d1,d2,d3,d4,d5,d6代表所在列的数字
    def function(self,d1,d2,d3,d4,d5,d6):
        model = LinearRegression()
        train_x = self.train_x.iloc[:,[d1,d2,d3,d4,d5,d6]]
        model.fit(train_x, self.train_y)
        test_x = self.test_x.iloc[:,[d1,d2,d3,d4,d5,d6]]
        result = model.predict(test_x)
        loss = np.sqrt(0.5/len(result)* (np.square(self.test_y-result)).sum())
        return loss        # 求解均方根误差

    # 初始化粒子群
    def init_Population(self):
        for i in range(self.N):
            for j in range(self.dim):
                self.X[i][j] = np.random.randint(80, 800)
                self.V[i][j] = np.random.randint(0, 20)
            self.pbest[i] = self.X[i]
            print(self.X[i][0], self.X[i][1],self.X[i][2],self.X[i][3],self.X[i][4],self.X[i][5])
            tmp = self.function(self.X[i][0], self.X[i][1],self.X[i][2],self.X[i][3],self.X[i][4],self.X[i][5])
            self.p_fit[i] = tmp
            if (tmp < self.fit):
                self.fit = tmp
                self.gbest = self.X[i]
        print("初始化的粒子群",self.X)
        print("初始化全局最优粒子",self.gbest)
        print("初始化全局适度值", self.fit)
    # 更新粒子位置
    def iterator(self):
        fitness = []
        for t in range(self.step):
            for i in range(self.N):    # 更新gbest\pbest
                temp = self.function(self.X[i][0], self.X[i][1],self.X[i][2],self.X[i][3],self.X[i][4],self.X[i][5])
                if (temp < self.p_fit[i]):                    # 更新个体最优
                    self.p_fit[i] = temp
                    self.pbest[i] = self.X[i]
                    if (self.p_fit[i] < self.fit):            # 更新全局最优
                        self.gbest = self.X[i]
                        self.fit = self.p_fit[i]
            for i in range(self.N):
                V = self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i]) + self.c2 * self.r2 * (
                            self.gbest - self.X[i])
                X = np.round(self.X[i] + V)
                if 80 < X[0]< 800 and 80 < X[1]<800 and 80 < X[2]< 800 and 80 < X[3]< 800 and 80 < X[4]< 800 and 80 < X[5]< 800 :
                    self.V[i] = V
                    self.X[i] = X
            print("i:", self.X)   # self.X 就是表现型矩阵，在这里加入遗传，交叉，变异，重插入
            if self.fit>0.1011:
                # 引入遗传算法思想，交叉，变异，重插入
                # 1 选择
                Chrom = self.X
                b = self.p_fit.reshape((self.N,1))
                FitnV = ranking([1] * b)
                SelCh = Chrom[selecting('rws', FitnV, self.N-1), :]  # 使用'rws'轮盘赌选择算子，同时片取Chrom得到所选择个体的染色体
                # 交叉
                NewChrom =recombin('xovmp', SelCh, 0.8)    # 采用多点交叉
                # 变异
                # 创建区域描述器（又称译码矩阵）
                FieldDR = np.array([[80, 80, 80, 80, 80, 80],
                                    [800, 800, 800, 800, 800, 800],
                                    [1, 1, 1, 1, 1, 1]])
                # 此处设编码方式为实值编码中的“实整数编码”RI，表示染色体可代表实数和整数
                NewChrom = mutuni('RI', NewChrom, FieldDR, 0.9)
                print("变异之后的种群：", NewChrom)
                # 重插入，也就是典遗传算法通过选择、重组和变异后，我们得到的是育种后代
                # 把父代精英个体与子代的染色体进行合并，得到新一代种群
                NewChrom = np.vstack([self.gbest, NewChrom])
                self.X = NewChrom
                for i in range(self.N):                           # 更新gbest\pbest
                    temp = self.function(self.X[i][0], self.X[i][1],self.X[i][2],self.X[i][3],self.X[i][4],self.X[i][5])
                    if (temp < self.p_fit[i]):                    # 更新个体最优
                        self.p_fit[i] = temp
                        self.pbest[i] = self.X[i]
                        if (self.p_fit[i] < self.fit):            # 更新全局最优
                            self.gbest = self.X[i]
                            self.fit = self.p_fit[i]
                for i in range(self.N):
                    V = self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i]) + self.c2 * self.r2 * (
                            self.gbest - self.X[i])
                    X = np.round(self.X[i] + V)
                    if 80 < X[0] <800 and 80 < X[1] < 800 and 80 < X[2] < 800 and 80 < X[3] < 800 and 80 < X[
                        4] < 800 and 80 < X[5] < 800:
                        self.V[i] = V
                        self.X[i] = X
            print(self.p_fit)
            fitness.append(self.fit)
            print("适应度:", self.fit)  # 输出最优值
            print("最优解:", self.gbest)
        return fitness
start_time = time.time()  # 开始计时
my_pso = PSO(N=100, dim=6, step=200)
my_pso.init_Population()
fitness = my_pso.iterator()
end_time = time.time()  # 结束计时
plt.figure(1)
plt.title("Figure1")
plt.xlabel("iterators", size=14)
plt.ylabel("fitness", size=14)
t = np.array([t for t in range(0, 200)])
fitness = np.array(fitness)
plt.plot(t, fitness, color='r')
plt.show()
print('用时：', end_time - start_time, '秒')
data = pd.DataFrame(fitness)
data.to_csv("ga_pso最优粒子目标函数值.csv")
'''
适应度: 0.14785229715381673
最优解: [313. 106. 166. 204. 168. 438.]
用时： 221.2115592956543 秒
'''