import numpy as np
import torch.optim as optim
import torch.nn as nn
 
import torch
from torch.utils.data import DataLoader, Dataset

## 此程序文件主要用于对预处理完成的脑电伪迹数据集进行ResNet模型训练。 ##
## 在网络开源程序的基础上，对参数进行了调整。此程序文件参考了网络文章《深度学习模型：Pytorch搭建ResNet、DenseNet网络，完成一维数据分类任务》 ##

 
 
class Bottleneck(torch.nn.Module):
    def __init__(self, In_channel, Med_channel, Out_channel, downsample=False):
        super(Bottleneck, self).__init__()
        self.stride = 1
        if downsample == True:
            self.stride = 2
 
        self.layer = torch.nn.Sequential(
            torch.nn.Conv1d(In_channel, Med_channel, 1, self.stride),
            torch.nn.BatchNorm1d(Med_channel),
            torch.nn.ReLU(),
            torch.nn.Conv1d(Med_channel, Med_channel, 3, padding=1),
            torch.nn.BatchNorm1d(Med_channel),
            torch.nn.ReLU(),
            torch.nn.Conv1d(Med_channel, Out_channel, 1),
            torch.nn.BatchNorm1d(Out_channel),
            torch.nn.ReLU(),
        )
 
        if In_channel != Out_channel:
            self.res_layer = torch.nn.Conv1d(In_channel, Out_channel, 1, self.stride)
        else:
            self.res_layer = None
 
    def forward(self, x):
        if self.res_layer is not None:
            residual = self.res_layer(x)
        else:
            residual = x
        return self.layer(x) + residual
 
 
class ResNet(torch.nn.Module):
    def __init__(self, in_channels=2, classes=4): #根据我们的脑电伪迹数据集情况，将classes参数调整为了4
        super(ResNet, self).__init__()
        self.features = torch.nn.Sequential(
            torch.nn.Conv1d(in_channels, 64, kernel_size=7, stride=2, padding=3),
            torch.nn.MaxPool1d(3, 2, 1),
 
            Bottleneck(64, 64, 256, False),
            Bottleneck(256, 64, 256, False),
            Bottleneck(256, 64, 256, False),
            #
            Bottleneck(256, 128, 512, True),
            Bottleneck(512, 128, 512, False),
            Bottleneck(512, 128, 512, False),
            Bottleneck(512, 128, 512, False),
            #
            Bottleneck(512, 256, 1024, True),
            Bottleneck(1024, 256, 1024, False),
            Bottleneck(1024, 256, 1024, False),
            Bottleneck(1024, 256, 1024, False),
            Bottleneck(1024, 256, 1024, False),
            Bottleneck(1024, 256, 1024, False),
            #
            Bottleneck(1024, 512, 2048, True),
            Bottleneck(2048, 512, 2048, False),
            Bottleneck(2048, 512, 2048, False),
 
            torch.nn.AdaptiveAvgPool1d(1)
        )
        self.classifer = torch.nn.Sequential(
            torch.nn.Linear(2048, classes)
        )
        
 
    def forward(self, x):
        x = torch.Tensor.view(x, (-1, 2, 64))
        x = self.features(x)
        x = x.view(-1, 2048)
        x = self.classifer(x)
        return x
 
 
class MyDataset(Dataset):
    def __init__(self, raman_dir, label_file):
        self.raman_dir = np.loadtxt(raman_dir)
        self.label_file = np.loadtxt(label_file)
        self.raman_data = []
        self.label_list = []
        for index in self.raman_dir:
            self.raman_data.append(index)
 
        for index in self.label_file:
            self.label_list.append(index)
 
    def __getitem__(self, idx):
        raman = torch.Tensor(self.raman_data[idx])
        label = torch.Tensor(self.label_list[idx])
        label = np.argmax(label)
 
        return raman, label
 
    def __len__(self):
        # 获取数据集的长度
        return len(self.label_list)
 
 
if __name__ == '__main__':
    # 准备数据集
    train_data = './train_data.txt'
    val_data = './val_data.txt'
    
    # 标签
    train_label = './train_labels.txt'
    val_label = './val_labels.txt'
 
    # 创建数据加载器
    batch_size = 256 #根据我们的脑电伪迹数据集情况，将此参数调整为了256
    # 训练集输入
    device = "cuda" if torch.cuda.is_available() else "cpu"
    train_dataset = MyDataset(train_data, train_label)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

    # 验证集输入
    val_dataset = MyDataset(val_data, val_label)
    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
 
    train_data_size = len(train_dataset)
    val_data_size = len(val_dataset)
 
    # 创建网络模型
    class_num = 4 #根据我们的脑电伪迹数据集情况，将此参数调整为了4
    ResNetOutput = ResNet(in_channels=2, classes=class_num).to(device)

 
    # 定义损失函数
    loss_fn = nn.CrossEntropyLoss().to(device)
 
    # 定义优化器
    learning_rate = 0.001 #根据我们的脑电伪迹数据集情况，将此参数调整为了0.001
    optimizer = optim.Adam(ResNetOutput.parameters(), lr=learning_rate)
 
    # 记录验证的次数
    total_train_step = 0
    total_val_step = 0
 
    # 训练
    epoch = 50 #根据我们的脑电伪迹数据集情况，将数据集遍历次数调整为了50
    acc_list = np.zeros(epoch)
    print("{0:-^27}".format('Train_Model'))
    for i in range(epoch):
        print("----------epoch={}----------".format(i + 1))
        ResNetOutput.train()
        for data in train_dataloader:  # data 是batch大小
            raman_train_data, t_labels = data
            raman_train_data = raman_train_data.to(device)
            t_labels = t_labels.to(device)
            output = ResNetOutput(raman_train_data)
            loss = loss_fn(output, t_labels)
 
            # 优化器优化模型
            optimizer.zero_grad()  # 梯度清零
            loss.backward()  # 反向传播
            optimizer.step()  # 优化更新参数
 
            total_train_step = total_train_step + 1
            print("train_times:{},Loss:{}".format(total_train_step, loss.item()))
 
        # 验证步骤开始
        ResNetOutput.eval()
        total_val_loss = 0
        total_accuracy = 0
        with torch.no_grad():  # 验证的时候不需要对梯度进行调整，所以梯度设置不调整
            for data in val_dataloader:
                raman_val_data, v_labels = data
                raman_val_data = raman_val_data.to(device)
                v_labels = v_labels.to(device)
                outputs = ResNetOutput(raman_val_data)
                loss = loss_fn(outputs, v_labels)
                total_val_loss = total_val_loss + loss.item()  # 计算损失值的和
                accuracy = 0
 
                for j in v_labels:
 
                    if outputs.argmax(1)[j] == v_labels[j]:
                        accuracy = accuracy + 1
 
                total_accuracy = total_accuracy + accuracy
 
        val_acc = float(total_accuracy / 269) * 100 #269对应val的总数
        acc_list[i] = val_acc
        print('the_classification_is_correct :', total_accuracy)  # 正确分类的个数
        print("val_Loss:{}".format(total_val_loss))
        print("val_acc:{}".format(float(total_accuracy / 269) * 100), '%')
 
        total_val_step += 1
        torch.save(ResNetOutput.state_dict(), "ResNet_{}.pth".format(i))
        print("{0:-^24}".format('Model_Save'), '\n')
        print('val_max=', max(acc_list), '%')
