import sys
import random
import numpy as np
import progressbar
import pandas as pd
from sklearn.preprocessing import LabelEncoder, normalize
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
import classifier
import numpy as np
import matplotlib.pyplot as plt
import logging
import tools
from collections import Counter
from time import time
logging.basicConfig(level=logging.INFO)
class PSO_FS:
    '''
    @param  size: 种群大小; 
            c1: 认知学习因子; 
            c2: 社会学习因子; 
            w: 惯性权重.
            epoches: 迭代轮数
            v_max:最大速度
    '''
    def __init__(self, size = 100, c1 = 1.4962, c2 = 1.4962, c3 = 1.4962, w = 0.7289, 
    v_max = 0.5, epoches = 100,  threshold = 0.6, initialize_strategy="most_small_other_large",
    classify = "knn", feature_alpha = 1e-4, lbest=True,updating_mechanism = "num_weight", early_stop_epoches = 10):
        self.size = size
        self.c1 = c1
        self.c2 = c2
        self.c3 = c3
        self.w = w
        self.v_max = v_max
        self.epoches = epoches
        self.threshold = threshold        
        self.feature_alpha = feature_alpha    
        self.lbest = lbest  
        self.initialize_strategy = initialize_strategy
        self.early_stop_epoches = early_stop_epoches
        self.lsize=10 #local size表示局部种群包含的粒子数量
        if classify == "lda":
            self.classify = classifier.lda
        elif classify == "knn":
            self.classify = classifier.knn        

    def update(self):
        # 计算每个粒子的速度并更新其位置
        for i in range(self.size):
            #表达式换行要加上括号或者\
            if self.lbest:
                self.velocities[i] = (self.w * self.velocities[i] 
                + self.c1 * random.uniform(0,1) * (self.pbest_positions[i] - self.positions[i]) 
                + self.c2 * random.uniform(0,1) * (self.gbest_position - self.positions[i])
                + self.c3 * random.uniform(0,1) * (self.lbest_positions[int(i/self.lsize)] - self.positions[i]))
            else:
                self.velocities[i] = (self.w * self.velocities[i] 
                + self.c1 * random.uniform(0,1) * (self.pbest_positions[i] - self.positions[i]) 
                + self.c2 * random.uniform(0,1) * (self.gbest_position - self.positions[i]))                
            logging.debug("%s"%self.pbest_positions[i])
            logging.debug("%s"%self.gbest_position)
            logging.debug("%s"%self.positions[i])
            logging.debug("%s"%self.velocities[i])

            for j in range(self.velocities.shape[1]):
                if(self.velocities[i,j] > self.v_max):
                    self.velocities[i,j] = self.v_max
                elif(self.velocities[i,j] < -self.v_max):
                    self.velocities[i,j] = -self.v_max

            
            self.positions[i] = self.positions[i] + self.velocities[i]
            
            for j in range(self.velocities.shape[1]):
                if(self.positions[i,j] > 1):
                    self.positions[i,j] = 1
                elif(self.positions[i,j] < 0):
                    self.positions[i,j] = 0

    def initialize(self, features_num):
        #初始化种群
        if self.initialize_strategy == "most_small_other_large":#7:3开
            small = int(self.size * 0.7)                        
            large = self.size -small
            scale = self.threshold * 1.01
            self.positions = np.concatenate(\
                (np.random.rand(small,features_num) * scale,\
                np.random.rand(large,features_num) * scale +(1-scale)),\
                axis=0)
        elif("normal"):
            self.positions=np.zeros((self.size,features_num))
            for p in self.positions:
                rand_num = np.random.randint(1,features_num)
                p[tools.gen_unrepeat_random(features_num,rand_num)]=1            

        self.velocities = np.random.rand(self.size, features_num) * 2 - np.ones((self.size, features_num))        
        self.pbest_fitnesses = np.zeros(self.size)
        local_num = int((self.size-1)/self.lsize) + 1
        self.lbest_fitnesses = np.zeros(local_num) #向上取整
        self.gbest_fitness = 0 #全局最佳
        self.last_gbest_fitness = 0
        

        self.last_fn = 0
        self.fn =0

        self.pbest_positions = np.zeros((self.size, features_num))#positiones best 每一个个体历史最佳位置        
        self.lbest_positions = np.zeros((local_num, features_num))
        self.gbest_position = np.zeros(features_num) # global position best
        self.times = 0


        
    def change_info(self, dir_for_imgs):
        
        selected_features = [1 if p > self.threshold else 0 for p in self.gbest_position]
        self.fn = sum(selected_features)
        fn_change = self.fn - self.last_fn
        self.last_fn = self.fn
        gbest_fitness_change = self.gbest_fitness - self.last_gbest_fitness
        self.last_gbest_fitness = self.gbest_fitness        

        for p in self.show_features:
            print("%d"%(1 if self.gbest_position[p]>=self.threshold else 0), end = "")
        print("\t%d\t%+d\t%.4f\t%+.4f"%(self.fn,fn_change,self.gbest_fitness,gbest_fitness_change))
        if(gbest_fitness_change >= 1e-4 or fn_change < 0):        
            self.no_improve_epoches = 0
        else:
            self.no_improve_epoches += 1
        
        #输出图片观察效果        
        color_map = plt.imshow(np.where(self.positions>=self.threshold,1,0), interpolation="none")
        color_map.set_cmap("Greys") 
        plt.colorbar()
        plt.show()
        plt.savefig(dir_for_imgs+"/out%d.png"%self.times)        
        self.fig.clf()
        plt.close()

        

    def seek(self, x_data,y_data):        
        # 计算fitness           
        for i in (range(self.size)):
            lno=int(i/self.lsize)

            fitness = self.evaluator(x_data,y_data,self.positions[i])
            pbest_fn = sum([1 for p in self.pbest_positions[i] if p >= self.threshold])
            gbest_fn = sum([1 for p in self.gbest_position if p >= self.threshold])
            lbest_fn = sum([1 for p in self.lbest_positions[lno] if p >= self.threshold])
            fn = sum([1 for p in self.positions[i] if p >= self.threshold])

            # print("particle %d, "%(i+1),"features_num = %d, "%fn,
            # "num of change: %d, "%(fn-pbest_fn), "fitness = %.4f "%fitness, end = "")

            #计算每一个个体历史最佳
            if fitness > self.pbest_fitnesses[i] + 1e-4 or (abs(fitness - self.pbest_fitnesses[i]) < 1e-4 and pbest_fn > fn):
                # print(" %.4f↑"%(fitness - self.pbest_fitnesses[i]))
                self.pbest_fitnesses[i] = fitness
                self.pbest_positions[i] = self.positions[i].copy()#深复制            
            # else:
            #     print(" %.4f↓"%(self.pbest_fitnesses[i] - fitness))
            
            #计算局部最佳
            if fitness > self.lbest_fitnesses[lno] + 1e-4 or (abs(fitness - self.lbest_fitnesses[lno]) < 1e-4 and lbest_fn > fn):
                # print(" %.4f↑"%(fitness - self.pbest_fitnesses[i]))
                self.lbest_fitnesses[lno] = fitness
                self.lbest_positions[lno] = self.positions[i].copy()#深复制

            #计算全局最佳
            if fitness > self.gbest_fitness + 1e-4 or (abs(fitness - self.gbest_fitness) < 1e-4 and gbest_fn > fn):
                self.gbest_fitness = fitness
                self.gbest_position = self.positions[i].copy()                                          
        self.update()
        
    def evaluator(self, X, y, position):
        """ 计算fitness
        :param X: 训练数据
        :param y: 标签
        :param position: 粒子的位置
        :return: fitness
        """
        #根据position选择对应特征进行分类并正则化        
        X_selected = X[:,[i for i in range(X.shape[1]) if position[i] >= self.threshold]]   
        #未选择任何特征
        if X_selected.shape[1] == 0:
            return 0
        knn = KNeighborsClassifier(n_neighbors=5)
        fitness = cross_val_score(knn, X_selected, y, cv=self.k).mean()
        
        return fitness
    '''
    @param  train_da: 训练数据; 
            size: 种群大小; 
            c1: 认知学习因子; 
            c2: 社会学习因子; 
            w: 惯性权重.
            epoches: 迭代轮数
            v_max:最大速度
    '''
    def start(self, train_data_path, train_lables_path, dir_for_imgs ,batch_size = 1000, show_num = 50):  
        begin_time = time()      
        #定义图表
        self.fig = plt.figure(figsize=(100, 100),dpi=4) 
        self.no_improve_epoches = 0
        #初始化种群
        train_data_chunks = pd.read_csv(train_data_path, header = None, sep = ' ',chunksize = batch_size)
        train_lables_chunks = pd.read_csv(train_lables_path, header = None, sep = ' ', chunksize = batch_size)
        fn = train_data_chunks.get_chunk(1).shape[1]
        y = train_lables_chunks.get_chunk(1)
        self.k=min(Counter(y).values())+1
        if(self.k>5):
            self.k=5
        self.initialize(fn)
        train_data_chunks.close()      

        #从0~fn产出show_num个不重复的随机, 
        self.show_features = tools.gen_unrepeat_random(fn, show_num)        

        message = "Sample %d features for showing training process."%show_num  
        print(message+" "*(show_num - len(message))+"\tNUM\tNCH\tFIT\tFCH")
        
        while self.times < self.epoches:
            train_data_chunks = pd.read_csv(train_data_path, header = None, sep = ' ',chunksize = batch_size)
            train_lables_chunks = pd.read_csv(train_lables_path, header = None, sep = ' ', chunksize = batch_size)
            
            for data_chunk, lables_chunk in zip(train_data_chunks, train_lables_chunks):
                
                x_data = data_chunk.fillna(0).values            
                le = LabelEncoder()
                y_data = le.fit_transform(lables_chunk.values.ravel())
                self.seek(normalize(x_data),y_data)
                self.times += 1
                self.change_info(dir_for_imgs)
            train_data_chunks.close()
            train_lables_chunks.close()
            if self.no_improve_epoches >= self.early_stop_epoches:
                print("Early stop. %d epoches."%self.times)
                break
        end_time=time()        
        print("Total Used Time: %ds."%(end_time-begin_time))

            