import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split
import torch.optim as optim
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score,confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
# =============================================================================
# 加载数据
# =============================================================================
zhengchang_xls = pd.read_excel(r'./zhengchang.xlsx',header=None) # 读取excel数据
zhengchang = np.array(zhengchang_xls) # 200个正常样本

yichang_xls = pd.read_excel(r'./yichang.xlsx', header=None)
yichang = np.array(yichang_xls) # 200个异常样本

data = np.append(zhengchang, yichang, axis=0) # 合并成一个数组,一行是一个样本。 400*500：400个样本，样本长度500
# reshape成400*10*50，即样本数目*序列长度*特征(维度)数，每个样本的500个数让Transformer看成是10个单词，每个单词是50个数组成的向量
# 序列长度10相当于10个单词，维度50相当于每个单词嵌入后是50个数字
data = data.reshape(400,10,50)
label = np.append(np.zeros(200),np.ones(200)) # 标签，200个0和200个1

# =============================================================================
# 构造数据集：封装成Tensor、划分训练集测试集、打包成批
# =============================================================================
class MyDataSet(Dataset):   # 定义类，用于构建数据集
     def __init__(self, data, label):
         self.data = torch.from_numpy(data).float()
         self.label = torch.from_numpy(label).float()
         self.length = label.shape[0]
     def __getitem__(self, index):
         return self.data[index], self.label[index]
     def __len__(self):
         return self.length

dataset = MyDataSet(data, label) # 调用MyDataSet类构建数据集
# 划分训练集、测试集，随机选320个做训练，80个做测试集
train_dataset, test_dataset = random_split(dataset, [320,80])
# 打包成loader，如果是想一批一批训练的话，这个是必须的
train_dataloader = DataLoader(train_dataset, batch_size = 15, shuffle=True) # 每批15个样本
test_dataloader = DataLoader(test_dataset, batch_size= len(test_dataset), shuffle=True) # 测试集没必要分成小批量

# 查看一批batch的数据格式
x_train, y_train = next(iter(train_dataloader))
print(x_train.shape, y_train.shape)  # (批大小15，序列长度10，维度50)

# =============================================================================
# 构建Transformer网络：其实不是完整的Transformer，只使用了Encoder，不使用Decoder，最后用个全连接做分类
# =============================================================================
class TransformerEncoderClassification(nn.Module):
    def __init__(self):
        super(TransformerEncoderClassification, self).__init__()
        # 先用nn.TransformerEncoderLayer构造Encoder层，再用nn.TransformerEncoder构造最终的Encoder，最终的Encoder可以包括几个层
        self.transformer_encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=50, nhead=5), # 两个参数分别是输入样本的维度(50)和Encoder头的数量(头的数量必须能被维度整除)
            num_layers= 6, # 层数，即最终的Encoder由6个EncoderLayer组成
        ) # 最终transformer_encoder的输入形式是(序列长度10，批大小15，维度50)，输出也还是(序列长度10，批大小15，维度50)，大小都不变
        self.fc = nn.Linear(10*50, 2) # 全连接层，输入是 序列长度10*维度50，输出是2

    def forward(self, x):
        x = x.permute(1, 0, 2)  # 转换维度，原本x_train是(批大小15，序列长度10，维度50)，变为TransformerEncoder需要的(序列长度10，批大小15，维度50)
        x = self.transformer_encoder(x) # 经过transformer_encoder，x_train的形状还是(序列长度10，批大小15，维度50)
        x = x.permute(1, 0, 2)  # 恢复原始维度顺序，准备输入全连接
        x = x.flatten(1) # 拉平，x维度是(批大小15，序列长度10，维度50)，指定维度1，拉平为(15,500)
        x = self.fc(x) # 经过全连接
        return x
    
# 实例化模型
model = TransformerEncoderClassification() 

# =============================================================================
# 训练
# =============================================================================
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)

# 训练模型
train_loss = 0
for epoch in range (10):
    for i,(x_train,y_train) in enumerate(train_dataloader):
        outputs = model(x_train)
        # 计算误差
        loss = criterion(outputs, y_train.long())
        # 计算准确率
        _, y_pred = torch.max(outputs.data, dim=1)
        # 清空上一次梯度
        optimizer.zero_grad()
        # 反向传播、更新参数
        loss.backward()
        optimizer.step()
        # 计算训练集acc、loss并输出
        train_acc = (y_pred == y_train).sum() / len(y_train)
        train_loss += loss.item()
        if (i+1) % 10 == 0: # 每10次迭代输出一次
            print('[%d %5d] loss: %.3f acc: %.3f' % (epoch + 1, i + 1, train_loss / 10, train_acc))
            train_loss = 0.0
            
# =============================================================================
# 测试
# =============================================================================
correct = 0
total = 0
with torch.no_grad():
    for x_test,y_test in test_dataloader:
        outputs = model(x_test)
        _, y_pred = torch.max(outputs.data, dim=1)
        correct += (y_pred == y_test).sum()
print('Accuracy of the testing samples: %.3f %%' % (100 * correct / 80))

# =============================================================================
# 计算各种指标
# =============================================================================
acc = accuracy_score(y_test,y_pred) # acc
pre = precision_score(y_test,y_pred) # pre
recall = recall_score(y_test,y_pred) # reacall
f1score = f1_score(y_test,y_pred) # f1-socre
print('测试集结果：\nAcc: %.2f%% \nPre: %.2f%% \nRecall: %.2f%% \nF1-score: %.2f%% ' % (100*acc,100*pre,100*recall,100*f1score))
## 绘制混淆矩阵
disp = ConfusionMatrixDisplay(confusion_matrix(y_test,y_pred))
disp.plot()
plt.show()
