import os
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
import torch.nn as nn
import matplotlib.pyplot as plt
import torch.optim
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, accuracy_score

#自定义一个PyTorch数据集
class LoadData(Dataset):
    def __init__(self, X, y):
        self.X = X
        self.y = y

    def __len__(self):
        return len(self.X)

    def __getitem__(self, index):
        X = torch.tensor(self.X.iloc[index])
        y = torch.tensor(self.y.iloc[index])
        return X, y

#数据加载
    #导入数据集
df = pd.read_csv('Normalization_total_extend.csv')

X = df.drop(columns=['Label'])
y = df['Label']

    #将数据集划分为训练集和测试
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=50)

train_data = LoadData(X_train, y_train)
test_data = LoadData(X_test, y_test)

X_dimension = len(X_train.columns)
y_dimension = len(y_train.value_counts())
print(f"X的维度：{X_dimension}")
print(f"y的维度：{y_dimension}")
    #创建训练集和测试集的数据加载器
batch_size = 128
train_dataloader = DataLoader(train_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)

#搭建ICNN模型
    # 使用cuda进行GPU加速，如果无可加速显卡，则使用cpu
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'


class CNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = nn.Sequential(
            nn.Conv1d(1, 32, kernel_size=2),#一个一维卷积层，输入通道数为1，输出通道数为32，卷积核大小为2
            nn.Conv1d(32, 64, kernel_size=2),#另一个一维卷积层，输入通道数为32（与前一个卷积层的输出通道数匹配），输出通道数为64，卷积核大小为2
            nn.MaxPool1d(2, 2),#一个一维最大池化层，池化窗口大小为2，步长也为2
            nn.Conv1d(64, 64, kernel_size=2),#一个一维卷积层，输入通道数为64，输出通道数仍为64，卷积核大小为2
            nn.Conv1d(64, 128, kernel_size=2),#另一个一维卷积层，输入通道数为64，输出通道数为128，卷积核大小为2
            nn.MaxPool1d(2, 2),# 另一个一维最大池化层
        )
        self.flatten = nn.Flatten()#self.flatten是一个nn.Flatten层，用于将多维的输入一维化，以便可以将其输入到全连接层中
        self.fc = nn.Sequential(#self.fc是另一个nn.Sequential容器，它包含了多个全连接层（线性层）
            nn.Linear(2304, 64),# 一个全连接层，输入特征数为2304（这个数字通常是根据backbone的输出形状计算得出的），输出特征数为64
            nn.ReLU(),#一个ReLU激活函数
            nn.Linear(64, 64),#另一个全连接层，输入和输出特征数都是64
            nn.ReLU(),#又一个ReLU激活函数
            nn.Linear(64, y_dimension)#最后一个全连接层，输入特征数为64，输出特征数为y_dimension（类别数）
        )
    def forward(self, X):
        X = self.backbone(X)
        X = self.flatten(X)
        logits = self.fc(X)
        return logits

def loss_value_plot(losses, iter):
    plt.figure()
    plt.plot([i for i in range(1, iter+1)], losses)
    plt.xlabel('Iterations (×100)')#实际的迭代次数是图表中显示的 100 倍
    plt.ylabel('Loss Value')#损失值
    #plt.show()

#加载模型
CNN_model = CNN()
CNN_model.to(device=device)

def train(model, optimizer, loss_fn, epochs):

    losses = []
    iter = 0

    for epoch in range(epochs):
        print(f"epoch {epoch+1}\n-----------------")
        for i, (X, y) in enumerate(train_dataloader):
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.float32)
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y.long())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                print(f"loss: {loss.item()}\t[{(i+1)*len(X)}/{len(train_data)}]")

                iter += 1
                losses.append(loss.item())

    return losses, iter


def test(model, loss_fn):
    # 初始化变量
    y_true_list = []
    y_pred_list = []
    loss_sum = 0
    iter = 0

    with torch.no_grad():
        for X, y in test_dataloader:
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.long)  # 确保y是long类型
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y)
            loss_sum += loss.item()
            iter += 1

            # 获取预测结果和真实标签
            y_pred_class = torch.argmax(y_pred, dim=1)
            y_true_list.extend(y.cpu().numpy())
            y_pred_list.extend(y_pred_class.cpu().numpy())

            # 计算精确度、召回率、F1分数和准确率
    conf_mat = confusion_matrix(y_true_list, y_pred_list)
    precision = precision_score(y_true_list, y_pred_list, average='macro')  # 使用macro平均
    recall = recall_score(y_true_list, y_pred_list, average='macro')  # 使用macro平均
    f1 = f1_score(y_true_list, y_pred_list, average='macro')  # 使用macro平均
    accuracy = accuracy_score(y_true_list, y_pred_list)
    avg_loss = loss_sum / iter

    print("Confusion Matrix:\n", conf_mat)
    print("Macro-average Precision:", precision)
    print("Macro-average Recall:", recall)
    print("Macro-average F1 Score:", f1)
    print("Accuracy:", accuracy)
    print("Average Loss:", avg_loss)

#开始
if os.path.exists('ICNNModel/CNN_model.pth'):
    CNN_model.load_state_dict(torch.load('ICNNModel/CNN_model.pth'))
else:
    optimizer=torch.optim.Adam(CNN_model.parameters(), lr=0.001)
    loss_fn=torch.nn.CrossEntropyLoss()
    epochs=10
    losses, iter = train(CNN_model, optimizer, loss_fn, epochs)
    torch.save(CNN_model.state_dict(), 'ICNNModel/CNN_model.pth')

    loss_value_plot(losses, iter)
    plt.savefig('ICNNModel/CNN_loss.png')

loss_fn=torch.nn.CrossEntropyLoss()
test(CNN_model,loss_fn)


