import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.model_selection import GridSearchCV
import random

#计算准确率
def accuracy(test,predict,p,g):
    failNumber = 0
    if p > g:
        failNumber = (1.01 - g)*len(test)
    else:
        failNumber = (1.01 - p)*len(test)
    accuracy = 0.0
    wrong = 0.0
    for i,j in zip(np.array(test).tolist(),predict.tolist()):
        if i[0] == j:
            accuracy += 1
        else:
            wrong +=1

        if wrong > failNumber:
            return (1 - wrong/len(test))
    accuracy = accuracy / len(test)
    return accuracy
#绘图
#绘图，显示预测和实际之间的误差
def drwa(resault,title):
    plt.figure()
    plt.suptitle(title)
    plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
    plt.plot(np.arange(1,len(resault)+1),resault)
    plt.legend(loc="upper right")  # 显示图中的标签
    plt.xlabel("迭代次数")
    plt.ylabel('最优结果')
    plt.show()

def plot_confusion_matrix(y_test, predictLabel, classLabel,title):
    plt.rcParams['font.sans-serif'] = ['SimHei']
    cmap = plt.cm.get_cmap('Paired') #'Accent_r'
    cm = confusion_matrix(y_test, predictLabel)
    tick_marks = np.array(range(len(classLabel))) + 0.5
    np.set_printoptions(precision=2)
    cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    plt.figure(figsize=(10, 8), dpi=120)
    ind_array = np.arange(len(classLabel))
    x, y = np.meshgrid(ind_array, ind_array)

    intFlag = 1# 标记在图片中对文字是整数型还是浮点型
    for x_val, y_val in zip(x.flatten(), y.flatten()):
        if (intFlag):
            c = cm[y_val][x_val]
            plt.text(x_val, y_val, "%d" % (c,), color='red', fontsize=8, va='center', ha='center')
        else:
            c = cm_normalized[y_val][x_val]
            if (c > 0.01):
                #这里是绘制数字，可以对数字大小和颜色进行修改
                plt.text(x_val, y_val, "%0.2f" % (c,), color='black', fontsize=5, va='center', ha='center')
            else:
                plt.text(x_val, y_val, "%d" % (0,), color='black', fontsize=7, va='center', ha='center')
    if(intFlag):
        plt.imshow(cm, interpolation='nearest', cmap=cmap)
    else:
        plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)
    plt.gca().set_xticks(tick_marks, minor=True)
    plt.gca().set_yticks(tick_marks, minor=True)
    plt.gca().xaxis.set_ticks_position('none')
    plt.gca().yaxis.set_ticks_position('none')
    plt.grid(True, which='minor', linestyle='-')
    plt.gcf().subplots_adjust(bottom=0.15)
    plt.title(title + u'模型预测混淆矩阵结果') #, fontproperties=custom_font
    plt.colorbar()
    xlocations = np.array(range(len(classLabel)))
    plt.xticks(xlocations, classLabel, rotation=30)
    plt.yticks(xlocations, classLabel)
    plt.ylabel(u'真实类别')
    plt.xlabel(u'预测类别')
    #plt.savefig('confusion_matrix.jpg', dpi=300)
    plt.show()


#数值化
def numerlization(Data,names):
    parents_maping = {'usual':1, 'pretentious':2, 'great_pret':3}
    has_nurs_maping = {'proper':1,'less_proper':2, 'improper':3,'critical':4,'very_crit':5}
    form_maping = {'complete':1, 'completed':2, 'incomplete':3, 'foster':4}
    housing_maping = {'convenient':1,'less_conv':2,'critical':3}
    finance_maping ={'convenient':1, 'inconv':2}
    social_maping = {'nonprob':1, 'slightly_prob':2, 'problematic':3}
    health_maping = {'recommended':1, 'priority':2, 'not_recom':3}
    Distribution_maping = {'not_recom':1,'recommend':2,'very_recom':3,'priority':4,'spec_prior':5}
    maping = [parents_maping,has_nurs_maping,form_maping,housing_maping,finance_maping,social_maping,health_maping,Distribution_maping]
    for name,map_ing in zip(names,maping):
        Data[name] = Data[name].map(map_ing)

def loadData(filename):
    Data = pd.read_csv(filename,sep=',',names=['parents','has_nurs','form','children','housing','finance','social','health','Distribution'])
    Data = Data.drop_duplicates()
    Data.dropna(axis = 0)
    row,columns = Data.shape
    for i in  range(row):
        if Data.loc[i,'children'] == 'more':
            Data.loc[i, 'children'] = 4
    numerlization(Data,['parents','has_nurs','form','housing','finance','social','health','Distribution'])
    Data = Data.astype(np.float)
    dataSet = Data.iloc[:,0:8]
    labelSet = Data.iloc[:,8:9]
    X_train, X_test, y_train, y_test = train_test_split(dataSet, labelSet, test_size=0.90,\
                                                        random_state=0)
    return row,columns,X_train, X_test, y_train, y_test

#支持向量机，scikit_learn 默认参数
def mySvm(X_train, X_test, y_train, y_test,row,classLabel):
    clf = svm.SVC(decision_function_shape='ovo')
    clf.fit(X_train,y_train)
    predictLabel = clf.predict(X_test)
    #drwa(np.array(predictLabel), y_test, 'SVM多分类效果显示')
    print("当前默认参数对应的分类准确率：",accuracy(y_test, predictLabel,0,0))
    plot_confusion_matrix(y_test, predictLabel, classLabel, u"支持向量机-默认参数-结果显示")

#用于显示最优参数对应的svm的显示结果
def mySvm_1(X_train, X_test, y_train, y_test,row,classLabel,parameter):
    clf = svm.SVC(decision_function_shape='ovo')
    # clf.fit(X_train,y_train)
    grid = GridSearchCV(clf, param_grid=parameter, cv=4)
    grid.fit(X_train.values, y_train.values.ravel())
    predictLabel = grid.predict(X_test.values)
    # drwa(np.array(predictLabel), y_test, 'SVM多分类效果显示')
    print("当前最优参数对应的分类准确率：",accuracy(y_test, predictLabel,0,0))
    plot_confusion_matrix(y_test, predictLabel, classLabel, u"支持向量机-最优参数-结果显示")

#用于计算使用度
def mySvm_2(X_train, X_test, y_train, y_test,row,classLabel,parameter,p,g):
    clf = svm.SVC(decision_function_shape='ovo')
    #clf.fit(X_train,y_train)
    grid = GridSearchCV(clf, param_grid=parameter, cv=4)
    grid.fit(X_train.values, y_train.values.ravel())
    predictLabel = grid.predict(X_test.values)
    #drwa(np.array(predictLabel), y_test, 'SVM多分类效果显示')
    return accuracy(y_test, predictLabel,p,g)

#用于产生初始随机解空间
def formInitialX(num):
    x =  []
    for i in range(num):
        temp_1 = []
        temp_1.append(float('%.3f' % random.uniform(0,1)))
        temp_1.append(float('%.3f' % random.uniform(1,1000)))
        x.append(temp_1)

        temp_2 = []
        temp_2.append(float('%.3f' % random.uniform(0, 1)))
        temp_2.append(float('%.3f' % random.uniform(0, 1)))
        x.append(temp_2)

        temp_3 = []
        temp_3.append(float('%.3f' % random.uniform(1, 1000)))
        temp_3.append(float('%.3f' % random.uniform(0, 1)))
        x.append(temp_3)

        temp_4 = []
        temp_4.append(float('%.3f' % random.uniform(1, 1000)))
        temp_4.append(float('%.3f' % random.uniform(1, 1000)))
        x.append(temp_4)
    return np.array(x)

#用于规范svm惩罚参数
def shapeC(x):
    curr_x = x.tolist()
    for i in range(len(curr_x)):
        if(curr_x[i][0] <=0):
            curr_x[i][0] = 1
        if (curr_x[i][1] <= 0):
            curr_x[i][1] = 0.25
    return np.array(curr_x)

class PSO_SVM(object):
    def __init__(self, population_size, max_steps, row, columns, X_train, X_test, y_train, y_test, classLabel):

        #训练集
        self.row = row
        self.colums = columns
        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        self.classLabel = classLabel

        #存储每次迭代全局最优解分类正确率
        self.best_resault = []

        self.w = 0.6  # 惯性权重
        self.c1 = self.c2 = 2
        self.population_size = population_size  # 粒子群数量
        self.dim = 2  # 搜索空间的维度
        self.max_steps = max_steps  # 迭代次数
        #self.x_bound = [0, 10]  # 解空间范围
        #self.x = np.random.uniform(self.x_bound[0], self.x_bound[1],
        #                           (self.population_size, self.dim))  # 初始化粒子群位置
        #self.v = np.random.rand(self.population_size, self.dim)  # 初始化粒子群速度

        self.x = formInitialX(self.population_size//4) # 初始化粒子群位置
        #self.v = np.random.rand(self.population_size, self.dim)  # 初始化粒子群速度
        self.v = np.array([[0,0]]*self.population_size)

        fitness = self.initial_fitness(self.x)
        self.p = self.x  # 个体的最佳位置
        self.pg = self.x[np.argmax(fitness)]  # 全局最佳位置
        #print(self.pg,type(self.pg))
        self.individual_best_fitness = fitness  # 个体的最优适应度
        self.global_best_fitness = np.max(fitness)  # 全局最佳适应度

    #初始化使用度
    def initial_fitness(self,x):
        curr_x = x.tolist()
        curr_fintness = []

        for i in range(len(curr_x)):
            parameter = {"C": [curr_x[i][0]], "gamma": [curr_x[i][1]]}
            # print(mySvm_2(self.X_train, self.X_test, self.y_train, self.y_test,self.row,self.classLabel,parameter))
            curr_fintness.append(
                mySvm_2(self.X_train, self.X_test, self.y_train, self.y_test, self.row, self.classLabel, parameter,
                        0, 0))
        return np.array(curr_fintness)
    #计算适应度
    def calculate_fitness(self,x):
        curr_x = x.tolist()
        curr_fintness = []

        for i in range(len(curr_x)):
            parameter = {"C":[curr_x[i][0]],"gamma":[curr_x[i][1]]}
            #print(mySvm_2(self.X_train, self.X_test, self.y_train, self.y_test,self.row,self.classLabel,parameter))
            curr_fintness.append(mySvm_2(self.X_train, self.X_test, self.y_train, self.y_test,self.row,self.classLabel,parameter,self.individual_best_fitness[i],self.global_best_fitness))
        return np.array(curr_fintness)

    def evolve(self):
        print("run this")
        for step in range(self.max_steps):
            print("第 ",step," 次迭代")
            r1 = np.random.rand(self.population_size, self.dim)
            r2 = np.random.rand(self.population_size, self.dim)
            # 更新速度和权重
            self.v = self.w * self.v + self.c1 * r1 * (self.p - self.x) + self.c2 * r2 * (self.pg - self.x)
            self.x = self.v + self.x
            self.x = shapeC(self.x)
            #print(self.x)
            fitness = self.calculate_fitness(self.x)
            # 需要更新的个体
            update_id = np.greater(fitness,self.individual_best_fitness) #找出需要更新的
            self.p[update_id] = self.x[update_id]
            self.individual_best_fitness[update_id] = fitness[update_id]
            # 新一代出现了更大的fitness，所以更新全局最优fitness和位置
            if np.max(fitness) > self.global_best_fitness:
                self.pg = self.x[np.argmax(fitness)]
                self.global_best_fitness = np.max(fitness)

            print(self.x)
            print(self.individual_best_fitness)
            self.best_resault.append(self.global_best_fitness)

    def showResault(self):

        last_pg = self.pg.tolist()
        C_parameter = {"C": [last_pg[0]], "gamma": [last_pg[1]]}
        print("最优参数：",last_pg,"   最优分类准确率：",self.global_best_fitness)
        mySvm(self.X_train, self.X_test, self.y_train, self.y_test, self.row, self.classLabel)
        mySvm_1(self.X_train, self.X_test, self.y_train, self.y_test, self.row, self.classLabel, parameter=C_parameter)
        drwa(self.best_resault,title="群智能算法优化SVM问题")

if __name__ == '__main__':


    fileName = 'nursery.csv'
    row, columns, X_train, X_test, y_train, y_test = loadData(fileName)
    classLabel = ['not_recom', 'recommend', 'very_recom', 'priority', 'spec_prior']
    pso = PSO_SVM(20, 200, row, columns, X_train, X_test, y_train, y_test, classLabel)

    pso.evolve()
    pso.showResault()