import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt
from sklearn.metrics import precision_score, recall_score
from gen_corr import get_corr_feature

# 设置随机种子以确保结果可复现
from torchviz import make_dot

np.random.seed(42)
torch.manual_seed(42)
cols = get_corr_feature()
# 加载数据
train_df = pd.read_csv('data_c1.csv')
test_df_4 = pd.read_csv('data_c4.csv')
test_df_6 = pd.read_csv('data_c6.csv')

# 分离特征和标签
X_train = train_df.drop('label', axis=1)[cols].values
y_train = train_df['label'].values

# 分离特征和标签
X_test = test_df_4.drop('label', axis=1)[cols].values
X_test_4 = test_df_4.drop('label', axis=1)[cols].values
y_test_4 = test_df_4['label'].values

X_test_6 = test_df_6.drop('label', axis=1)[cols].values
y_test_6 = test_df_6['label'].values

# 数据归一化
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test_4 = scaler.fit_transform(X_test_4)
X_test_6 = scaler.fit_transform(X_test_6)

# 将标签转换为one-hot编码
encoder = OneHotEncoder(sparse=False)
y_train = encoder.fit_transform(y_train.reshape(-1, 1))
y_test_4 = encoder.fit_transform(y_test_4.reshape(-1, 1))
y_test_6 = encoder.fit_transform(y_test_6.reshape(-1, 1))

# 转换为PyTorch tensors
X_train = torch.tensor(X_train, dtype=torch.float).unsqueeze(1)
y_train = torch.tensor(y_train, dtype=torch.float)

X_test_4 = torch.tensor(X_test_4, dtype=torch.float).unsqueeze(1)
y_test_4 = torch.tensor(y_test_4, dtype=torch.float)
X_test_6 = torch.tensor(X_test_6, dtype=torch.float).unsqueeze(1)
y_test_6 = torch.tensor(y_test_6, dtype=torch.float)

# 创建数据加载器
batch_size = 32
train_loader = DataLoader(TensorDataset(X_train, y_train), batch_size=batch_size, shuffle=True)
test_loader_4 = DataLoader(TensorDataset(X_test_4, y_test_4), batch_size=batch_size)
test_loader_6 = DataLoader(TensorDataset(X_test_6, y_test_6), batch_size=batch_size)


# 定义LSTM模型
class LSTM(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super(LSTM, self).__init__()
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(x.device)
        out, _ = self.lstm(x, (h0, c0))
        out = self.fc(out[:, -1, :])
        return out


# 实例化模型、损失函数和优化器
input_dim = X_train.shape[2]  # 特征数量
hidden_dim = 128  # LSTM隐藏层的维度
output_dim = 3  # 输出类别的数量
num_layers = 3  # LSTM层的数量

model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

test_accuracies_4 = []
test_accuracies_6 = []
best_accuracy_4 = 0  # Initialize the best accuracy for test_loader_4
best_accuracy_6 = 0  # Init
# 训练模型
num_epochs = 30
for epoch in range(num_epochs):
    model.train()
    for inputs, labels in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        g = make_dot(outputs, params=dict(list(model.named_parameters()) + [('inputs', inputs)]))
        # Save the graph
        g.render('lstm_structure', format='png', view=False)
        loss = criterion(outputs, torch.max(labels, 1)[1])
        loss.backward()
        optimizer.step()
    print(f'Epoch {epoch + 1}, Loss: {loss.item()}')  # 输出当前轮次和对应的损失值
    # 在测试集上评估模型
    model.eval()
    total = 0
    correct = 0
    with torch.no_grad():
        for inputs, labels in test_loader_4:
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == torch.max(labels, 1)[1]).sum().item()
    test_accuracy_4 = correct / total
    test_accuracies_4.append(test_accuracy_4)
    print(f'Epoch {epoch + 1}, Test Accuracy_4: {test_accuracy_4:.4f}')

    total = 0
    correct = 0

    with torch.no_grad():
        for inputs, labels in test_loader_6:
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == torch.max(labels, 1)[1]).sum().item()
    test_accuracy_6 = correct / total
    test_accuracies_6.append(test_accuracy_6)
    print(f'Epoch {epoch + 1}, Test Accuracy_6: {test_accuracy_6:.4f}')
    # Check if the current model is the best for test_loader_4
    if test_accuracy_4 > best_accuracy_4:
        best_accuracy_4 = test_accuracy_4
        # Save the model
        torch.save(model.state_dict(), 'lstm_best_model_4.pth')
        print(f'Saved new best model for test set 4 with accuracy: {test_accuracy_4:.4f}')

    # Check if the current model is the best for test_loader_6
    if test_accuracy_6 > best_accuracy_6:
        best_accuracy_6 = test_accuracy_6
        # Save the model
        torch.save(model.state_dict(), 'lstm_best_model_6.pth')
        print(f'Saved new best model for test set 6 with accuracy: {test_accuracy_6:.4f}')
print('4:', best_accuracy_4, '6:', best_accuracy_6)
plt.figure(figsize=(12, 5))
plt.plot(test_accuracies_4, label='Test_4 Accuracy', marker='o')
# 绘制测试集准确率曲线
plt.plot(test_accuracies_6, label='Test_6 Accuracy', marker='x')
plt.title('Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

# 保存图像
plt.savefig('lstm_acc.png')
plt.close()  # 关闭图形
