
'''
Copyright: 
Descripttion: 
version: 
Author: chengx
Date: 2021-05-27 10:35:28
LastEditors: chengx
LastEditTime: 2022-03-03 15:39:42
'''
# -*-coding=utf-8 -*-
from sklearn.svm import SVC
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report


# 加载数据
def readHsi():
    # blx = np.load('./data/blx.npy')
    # blt = np.load('./data/blt.npy')
    # njx = np.load('./data/njx.npy')
    # x = np.concatenate((blx,blt,njx),axis=0)
    # x = x[:,80:150]
    # y1 = np.zeros((blx.shape[0]))
    # y2 = np.ones(blt.shape[0])
    # y3 = np.ones((njx.shape[0]))*2
    # y = np.concatenate((y1,y2,y3),axis=0)
    # # print('X',blx.shape,blt.shape,njx.shape,x.shape)

    x = np.load('./data/x.npy')
    y = np.load('./data/y.npy')

    # plt.plot(x[1,:],color='r')
    # plt.plot(x[200,:],color='g')
    # plt.plot(x[320,:],color='b')
    # plt.show()

    print('read data',x.shape,y.shape)
    # plt.plot(x[1,:],color='r')

    # 归一化
    # for i in range(x.shape[1]):
    #     fm = np.max(x[:,i])-np.min(x[:,i])
    #     x[:,i] = (x[:,i] - float(np.min(x[:,i])))
    #     x[:,i] = x[:,i]/fm

    # SG平滑
    x = scipy.signal.savgol_filter(x, 9, 2, deriv=1)  # 一阶导数处理

    # z-score
    def Z_Score(data):
        lenth = len(data)
        total = sum(data)
        ave = float(total)/lenth
        tempsum = sum([pow(data[i] - ave,2) for i in range(lenth)])
        tempsum = pow(float(tempsum)/lenth,0.5)
        for i in range(lenth):
            data[i] = (data[i] - ave)/tempsum
        return data
    for i in range(x.shape[1]):
        x[:,i] = Z_Score(x[:,i])

    return x,y

def train(train,train_label):
    clf = SVC()
    # parameters = [ { 'C': [0.001,0.01,0.1,1,100], 'gamma': [1, 10, 100, 1000, 10000, 100000, 1000000], 'kernel': ['rbf','linear'] } ]
    parameters = [ { 'C': [100], 'gamma': [100], 'kernel': ['linear'] } ]
    clf = GridSearchCV(clf, parameters, cv=10, n_jobs=8)
    clf.fit(train, train_label)
    print(clf.best_params_)
    clf = clf.best_estimator_

    return clf

def test(model,test,test_label):
    y_pred = model.predict(test)
    M = metrics.confusion_matrix(test_label,y_pred)
    print(M)
    print(M.shape)
    # 计算中药的
    AA= (float(M[0,0]/(M[0,0]+M[0,1]))+float(M[1,1]/(M[1,0]+M[1,1])))/2

    # 计算西药的
    # AA = (float(M[0,0]/M[0].sum())+float(M[1,1]/M[1].sum())+float(M[2,2]/M[2].sum()))/3


    OA = accuracy_score(test_label, y_pred)
    from sklearn.metrics import cohen_kappa_score
    kappa = cohen_kappa_score(test_label, y_pred)

    # 生成分类报告
    classification = classification_report(test_label, y_pred)
    print(classification)

    print('The OA of prediction is:', OA)
    return OA,kappa,AA

def main(i):
    ratio = i/100 #测试数据占总数据的比例
    x,y = readHsi()
    ratio = 0.1
    print('ratio',ratio)

    x_train, x_test, y_train, y_test = train_test_split(x,y,train_size =ratio,random_state=i+10)
    model = train(x_train,y_train)
    OA,k,AA = test(model,x_test,y_test)

    return OA,k,AA

if __name__ == '__main__':
    oa = []
    aa = []
    kappa = []

    for i in range(20,85,5):
        a,k,av = main(i)
        oa.append(a)
        kappa.append(k)
        aa.append(av)


    print('oa',list(np.round(oa,3)))
    print('aa',list(np.round(aa,3)))
    print('k',list(np.round(kappa,3)))

    std_oa = np.std(oa)
    std_aa= np.std(aa)
    std_k = np.std(kappa)
    print('std is{},{},{}'.format(std_oa,std_aa,std_k))






