import pandas as pd
import torch
import torch.optim as optim
from sklearn.preprocessing import StandardScaler
from torch import nn
from torch.utils.data import DataLoader, Dataset


# 自定义Dataset类
class MyDataSet(Dataset):
    def __init__(self, csv_file):
        # 读取数据
        self.data = pd.read_csv(csv_file)
        
        # 提取特征（假设特征列为所有列，去掉标签列）
        self.features = self.data.drop(columns=['*label_Dos', '*label_Probe', '*label_R2L', '*label_U2R', '*label_normal'])
        self.features = self.features.values
        
        # 对特征进行标准化
        self.scaler = StandardScaler()
        self.features = self.scaler.fit_transform(self.features)

        # 提取标签
        self.labels = self.convert_labels(self.data)

    def __len__(self):
        # 返回数据集大小
        return len(self.data)

    def __getitem__(self, idx):
        # 获取每个样本的特征和标签
        x = torch.tensor(self.features[idx], dtype=torch.float32)
        y = torch.tensor(self.labels[idx], dtype=torch.float32)
        return x, y
    
    def convert_labels(self, df):
        # 将最后5列标签转化为数值类型
        labels = df[['*label_Dos', '*label_Probe', '*label_R2L', '*label_U2R', '*label_normal']].values
        return labels

# 定义CNN + LSTM模型
class CNNLSTMModel(nn.Module):
    def __init__(self):
        super(CNNLSTMModel, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv1d(in_channels=1, out_channels=6, kernel_size=5, stride=1, bias=False),
            nn.ReLU(),
            nn.Dropout(p=0.5)
        )
        self.lstm = nn.LSTM(input_size=114, hidden_size=15, num_layers=5, batch_first=True)
        self.fc = nn.Sequential(
            nn.Linear(15 * 6, 20),
            nn.ReLU(),
            nn.Linear(20, 5),  # 5类标签（Dos, Probe, R2L, U2R, normal）
            nn.ReLU(),
        )

    def forward(self, x):
        x = x.view(x.size(0), 1, -1)  # 扩展维度以适应Conv1d
        x = self.conv(x)
        h_0 = torch.randn(5, x.size(0), 15).to(x.device)  # 使用x.device来确保h_0在正确的设备上
        c_0 = torch.randn(5, x.size(0), 15).to(x.device)  # 使用x.device来确保c_0在正确的设备上
        x, _ = self.lstm(x, (h_0, c_0))
        x = x.reshape(-1, 15 * 6)
        x = self.fc(x)
        return x

# 训练循环
def train_loop(dataloader, model, loss_fn, optimizer, device):
    size = len(dataloader.dataset)
    model.train()  # 设置模型为训练模式
    for batch, (x, y) in enumerate(dataloader):
        # 将数据移动到设备上
        x, y = x.to(device), y.to(device)

        output = model(x)
        loss = loss_fn(output, y)

        optimizer.zero_grad()  # 清空梯度
        loss.backward()  # 反向传播
        optimizer.step()  # 更新模型

        if batch % 100 == 0:
            loss, current = loss.item(), (batch + 1) * len(x)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")

# 测试循环
def test_loop(dataloader, model, loss_fn, device):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    test_loss, correct = 0, 0

    model.eval()  # 设置模型为评估模式
    with torch.no_grad():  # 不计算梯度
        for x, y in dataloader:
            # 将数据移动到设备上
            x, y = x.to(device), y.to(device)

            output = model(x)
            test_loss += loss_fn(output, y).item()
            correct += (output.argmax(1) == y.argmax(1)).type(torch.float).sum().item()  # 计算准确率

    test_loss /= num_batches
    correct /= size
    print(f"Test Error: \n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

def main():
    # 设置设备（CUDA或CPU）
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    # 设置超参数
    learning_rate = 0.001
    batch_size = 128
    epochs = 10

    # 加载数据
    train_data = MyDataSet('../data/train_subset.csv')
    train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)

    # 创建模型实例并将其移动到设备上
    model = CNNLSTMModel().to(device)

    # 设置损失函数和优化器
    loss_fn = nn.BCEWithLogitsLoss()  # 二元交叉熵损失函数，用于多标签分类
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    test_data = MyDataSet('../data/ren_gan_subset.csv')
    test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

    # 训练模型
    for t in range(epochs):
        print(f"Epoch {t + 1}\n-------------------------------")
        train_loop(train_dataloader, model, loss_fn, optimizer, device)
        test_loop(test_dataloader, model, loss_fn, device)

    # 保存训练好的模型
    torch.save(model.state_dict(), "cnn_lstm_model.pth")
    print("模型已保存")

if __name__ == "__main__":
    main()

