'''
Copyright: 
Descripttion: 
version: 
Author: chengx
Date: 2021-11-16 16:01:39
LastEditors: chengx
LastEditTime: 2022-03-03 22:22:43
'''
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import torch
import torch.nn as nn
import torch.nn.functional as F# 和nn的区别，他是一个def函数，给输入就有输出，nn是用类定义的，会自动提取可学习的参数
import torch.optim as optim
import torch.utils.data
from matplotlib import  pyplot as plt
import scipy.signal
from sklearn.metrics import precision_score,accuracy_score,cohen_kappa_score


CLASS_NUM = 3
ENPOCH = 200
loss_avr = []
loss_cur = []
oa_epoch_ched = []

class SELayer(nn.Module):
    def __init__(self):
        super(SELayer, self).__init__()
        self.c1 = nn.Sequential(
            nn.Conv1d(180,50,kernel_size=1),
            nn.BatchNorm1d(50),
            nn.LeakyReLU(),
        )
        # self.fc_1 = nn.Conv1d(256, 50, kernel_size=1)
        self.fc_2 = nn.Conv1d(50, 180, kernel_size=1)

    def forward(self, x):
        x = F.relu(self.c1(x))
        x = F.sigmoid(self.fc_2(x))

        return x


class myCNN1d(nn.Module):
    def __init__(self):
        super(myCNN1d,self).__init__()
        
        self.layer = nn.Sequential(
            nn.Conv1d(1,16,5),
            nn.BatchNorm1d(16),
            nn.AvgPool1d(2),
            # nn.dropout(0.1),
            # nn.CELU(),
            nn.LeakyReLU(),

            nn.Conv1d(16,16,5),
            nn.BatchNorm1d(16),
            nn.AvgPool1d(2),
            # nn.CELU(),
            nn.LeakyReLU(),

            nn.Conv1d(16,16,5),
            nn.BatchNorm1d(16),
            nn.AvgPool1d(2),
            # nn.CELU(),
            nn.LeakyReLU(),
            )

        self.fc = nn.Sequential(
            # nn.Linear(976, 45), 
            # nn.CELU(),
            # nn.Dropout(0.2),

            nn.Linear(304, CLASS_NUM),
            nn.LogSoftmax(dim=1)
        )
        self.seLayer = SELayer()

    def forward(self,x):
        x = torch.transpose(x,2,1)
        w = self.seLayer(x)
        x = x*w
        x = torch.transpose(x,2,1)

        # print("layer in x.shape {}".format(x.shape))
        x = self.layer(x)
        x = x.view(x.size(0), -1)#flatten操作
        # print("fc in x.shape {}".format(x.shape))
        x = self.fc(x)
        # w = 1
        return x,w


# 加载数据
def readHsi():
    # 西药
    blx = np.load('./data/blx.npy')
    blt = np.load('./data/blt.npy')
    njx = np.load('./data/njx.npy')
    x = np.concatenate((blx,blt,njx),axis=0)
    x = x[:,30:210]
    # print('X',blx.shape,blt.shape,njx.shape,x.shape)
    y1 = np.zeros((blx.shape[0]))
    y2 = np.ones(blt.shape[0])
    y3 = np.ones((njx.shape[0]))*2
    y = np.concatenate((y1,y2,y3),axis=0)
    # 中药
    # x = np.load('./data/x.npy')[:,0:180]
    # y = np.load('./data/y.npy')



    print('read data',x.shape,y.shape)

    # 归一化
    for i in range(x.shape[1]):
        fm = np.max(x[:,i])-np.min(x[:,i])
        x[:,i] = (x[:,i] - float(np.min(x[:,i])))
        x[:,i] = x[:,i]/fm
    x = scipy.signal.savgol_filter(x, 9, 2, deriv=1)  # 一阶导数处理

    return x,y


def create_data_loader(randon,ratio):
    x,y = readHsi()
    print('ratio',ratio)
    x_train, x_test, y_train, y_test = train_test_split(x,y,train_size=ratio,random_state=randon)
    print('Xtrain shape: ', x_train.shape)
    print('Xtest  shape: ', x_test.shape)


    x_train = x_train.reshape(x_train.shape[0],1, x_train.shape[1])
    x_test = x_test.reshape(x_test.shape[0],1, x_test.shape[1])

    # 创建 train_loader 和 test_loader
    train_set = TrainDS(x_train, y_train)
    test_set = TestDS(x_test, y_test)
    train_loader = torch.utils.data.DataLoader(dataset=train_set,
                                               batch_size=128,
                                               shuffle=True,
                                               num_workers=8)
    test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                              batch_size=128,
                                              shuffle=False,
                                              num_workers=8)

    return train_loader, test_loader, y_test

class TrainDS(torch.utils.data.Dataset):

    def __init__(self, x_train, y_train):

        self.len = x_train.shape[0]
        self.x_data = torch.FloatTensor(x_train)
        self.y_data = torch.LongTensor(y_train)

    def __getitem__(self, index):

        # 根据索引返回数据和对应的标签
        return self.x_data[index], self.y_data[index]

    def __len__(self):

        # 返回文件数据的数目
        return self.len

class TestDS(torch.utils.data.Dataset):
    def __init__(self, x_test, y_test):

        self.len = x_test.shape[0]
        self.x_data = torch.FloatTensor(x_test)
        self.y_data = torch.LongTensor(y_test)

    def __getitem__(self, index):

        # 根据索引返回数据和对应的标签
        return self.x_data[index], self.y_data[index]

    def __len__(self):

        # 返回文件数据的数目
        return self.len

def train(train_loader,test_loader,y_test):
    # 使用GPU训练
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print('device',device)
    # 网络放到GPU上
    net = myCNN1d().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    def test2(device, net, test_loader):
        # 模型测试
        for inputs, _ in test_loader:
            inputs = inputs.to(device)
            outputs,w= net(inputs)
            outputs = np.argmax(outputs.detach().cpu().numpy(), axis=1)
            y_pred_test = outputs
        return y_pred_test

    # 开始训练
    total_loss = 0
    for epoch in range(ENPOCH):
        loss = 0
        for i, (inputs, labels) in enumerate(train_loader):
            inputs = inputs.to(device)
            labels = labels.to(device)
            # 优化器梯度归零
            optimizer.zero_grad()
            # 正向传播 +　反向传播 + 优化
            outputs,w = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        print('[Epoch: %d]   [loss avg: %.4f]   [current loss: %.4f]' % (epoch + 1,
                                                                         total_loss / (epoch + 1),
                                                                         loss.item()))
        # 保存loss
        global loss_avr
        loss_avr.append(total_loss / (epoch + 1))
        loss_cur.append(loss.item())

        y_pred_test = test2(device, net, test_loader)
        oa_epoch_ched.append(accuracy_score(y_test, y_pred_test))

    if epoch == ENPOCH-1:
        np.save('./result/wa_loss_x',np.array(loss_avr))
        np.save('./result/wa_loss_x_cur',np.array(loss_cur))
        np.save('./result/wa_epoch_x_oa',np.array(oa_epoch_ched))


    print('Finished Training')

    return net, device

def test(device, net, test_loader, y_test):
    count = 0
    # 模型测试
    y_pred_test = 0
    for inputs, _ in test_loader:
        inputs = inputs.to(device)
        outputs,_ = net(inputs)
        outputs = np.argmax(outputs.detach().cpu().numpy(), axis=1)
        if count == 0:
            y_pred_test = outputs
            count = 1
        else:
            y_pred_test = np.concatenate((y_pred_test, outputs))

    # 生成分类报告
    classification = classification_report(y_test, y_pred_test, digits=4)
    print(classification)
    return y_test,y_pred_test

def main():
    results =[]
    precision = []
    k = []
    for i in range(20,25,5):

        train_loader, test_loader, y_test = create_data_loader(i,0.7)
        net, device = train(train_loader,test_loader,y_test)
        y_test,y_pred_test = test(device, net, test_loader, y_test)
        oa = accuracy_score(y_test, y_pred_test)
        prec = precision_score(y_test, y_pred_test, average=None)
        kappa = cohen_kappa_score(y_test, y_pred_test)
        print('kappa train',kappa)
        results.append(oa)
        precision.append(prec)
        k.append(kappa)

    print('oa',results)
    print('precision',precision)
    print('k',k)


def weightPic(coff_10runs): # 注意力权重图
    x_range = range(180)

    fig = plt.figure()
    ax2 = fig.add_subplot(911)
    ax2.plot(x_range, coff_10runs[0,:].reshape((180)), color='blue')
    ax3 = fig.add_subplot(912)
    ax3.plot(x_range, coff_10runs[1,:].reshape((180)), color='blue')
    ax4 = fig.add_subplot(913)
    ax4.plot(x_range, coff_10runs[2, :].reshape((180)), color='blue')
    ax5 = fig.add_subplot(914)
    ax5.plot(x_range, coff_10runs[3, :].reshape((180)), color='blue')
    ax6 = fig.add_subplot(915)
    ax6.plot(x_range, coff_10runs[4, :].reshape((180)), color='blue')
    ax7 = fig.add_subplot(916)
    ax7.plot(x_range, coff_10runs[5, :].reshape((180)), color='blue')
    ax8 = fig.add_subplot(917)
    ax8.plot(x_range, coff_10runs[6, :].reshape((180)), color='blue')
    ax9 = fig.add_subplot(918)
    ax9.plot(x_range, coff_10runs[7, :].reshape((180)), color='blue')
    ax10 = fig.add_subplot(919)
    ax10.plot(x_range, coff_10runs[8,:].reshape((180)), color='red')

    plt.xticks([0,45,90,135,180],[500,600,700,800,900])
    plt.draw()
    plt.show()

if __name__ == '__main__':
    # main()
    loss = np.load('./result/wa_loss_ched_cur.npy')
    oa = np.load('./result/wa_loss_cur_oa.npy')

    plt.plot(loss)
    # plt.plot(oa)
    plt.show()
    
