import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import time
import os
from sklearn.metrics import precision_score, recall_score, f1_score
from tqdm import tqdm
import psutil  # 用于获取内存和显存使用情况
import gc  # 用于清理内存

# 设置随机种子，确保结果的可重复性
torch.manual_seed(42)
np.random.seed(42)
class BiLSTM(nn.Module):
    def __init__(self, input_size=28, hidden_size=128, output_size=10, num_layers=2):
        super(BiLSTM, self).__init__()
        # 输入的大小28 (MNIST的28x28)
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(hidden_size * 2, output_size)  # 双向LSTM的输出是hidden_size * 2
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        # x的形状是 (batch_size, seq_len, input_size)，seq_len=28, input_size=28
        out, _ = self.lstm(x)  # out的形状是 (batch_size, seq_len, hidden_size * 2)
        out = out[:, -1, :]  # 只取最后一个时间步的输出
        out = self.fc(out)  # 最后一层全连接层
        return self.softmax(out)

# 使用torchvision加载MNIST数据集
transform = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize((0.5,), (0.5,))
])

train_dataset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)


def train_model(model, train_loader, criterion, optimizer, device, epochs=10):
    model.to(device)
    train_losses = []
    train_accuracies = []
    misclassification_rates = []
    start_time = time.time()

    # 用于保存训练过程
    os.makedirs('results', exist_ok=True)
    os.makedirs('visualization', exist_ok=True)
    os.makedirs('models', exist_ok=True)

    for epoch in range(epochs):
        model.train()
        correct = 0
        total = 0
        running_loss = 0.0
        misclassified = 0

        for images, labels in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{epochs}"):
            images = images.view(-1, 28, 28).to(device)  # (batch_size, 28, 28)
            labels = labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            misclassified += (predicted != labels).sum().item()

        # 记录训练数据
        train_losses.append(running_loss / len(train_loader))
        train_accuracies.append(100 * correct / total)
        misclassification_rates.append(100 * misclassified / total)

        # 可视化训练过程
        if (epoch + 1) % 1 == 0:
            plot_metrics(train_losses, train_accuracies, misclassification_rates, epoch + 1)

    # 保存训练的模型
    torch.save(model.state_dict(), f"models/bilstm_mnist.pth")

    # 训练总时间
    end_time = time.time()
    training_time = end_time - start_time
    with open('results/training_time.txt', 'w') as f:
        f.write(f"Training time: {training_time:.2f} seconds\n")


def plot_metrics(losses, accuracies, misclassification_rates, epoch):
    # 绘制损失、准确率、误分类率曲线
    plt.figure(figsize=(12, 4))

    plt.subplot(1, 3, 1)
    plt.plot(losses, label="Loss")
    plt.title("Training Loss")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")

    plt.subplot(1, 3, 2)
    plt.plot(accuracies, label="Accuracy")
    plt.title("Training Accuracy")
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy (%)")

    plt.subplot(1, 3, 3)
    plt.plot(misclassification_rates, label="Misclassification Rate")
    plt.title("Misclassification Rate")
    plt.xlabel("Epoch")
    plt.ylabel("Misclassification Rate (%)")

    plt.tight_layout()
    plt.savefig(f"visualization/bilstm_epoch_{epoch}_metrics.png")
    plt.close()

def evaluate_model(model, test_loader, device):
    model.eval()
    all_labels = []
    all_predictions = []
    with torch.no_grad():
        for images, labels in tqdm(test_loader, desc="Evaluating"):
            images = images.view(-1, 28, 28).to(device)
            labels = labels.to(device)

            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)

            all_labels.extend(labels.cpu().numpy())
            all_predictions.extend(predicted.cpu().numpy())

    # 计算精确率、召回率、F1分数
    precision = precision_score(all_labels, all_predictions, average='macro')
    recall = recall_score(all_labels, all_predictions, average='macro')
    f1 = f1_score(all_labels, all_predictions, average='macro')

    with open('results/evaluation_metrics.txt', 'w') as f:
        f.write(f"Precision: {precision:.4f}\n")
        f.write(f"Recall: {recall:.4f}\n")
        f.write(f"F1 Score: {f1:.4f}\n")

    print(f"Precision: {precision:.4f}")
    print(f"Recall: {recall:.4f}")
    print(f"F1 Score: {f1:.4f}")

def visualize_predictions(model, test_loader, device):
    model.eval()
    images, labels = next(iter(test_loader))
    images = images.view(-1, 28, 28).to(device)
    labels = labels.to(device)

    with torch.no_grad():
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)

    # 随机选择一些预测结果进行可视化
    num_samples = 10
    fig, axes = plt.subplots(2, num_samples // 2, figsize=(12, 6))
    for i in range(num_samples):
        ax = axes[i // (num_samples // 2), i % (num_samples // 2)]
        ax.imshow(images[i].cpu().numpy().squeeze(), cmap="gray")
        ax.set_title(f"Pred: {predicted[i].item()} \nTrue: {labels[i].item()}")
        ax.axis('off')

    plt.tight_layout()
    plt.savefig(f"visualization/predictions.png")
    plt.close()

def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = BiLSTM()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 训练模型
    train_model(model, train_loader, criterion, optimizer, device, epochs=10)

    # 评估模型
    evaluate_model(model, test_loader, device)

    # 可视化预测结果
    visualize_predictions(model, test_loader, device)

if __name__ == '__main__':
    main()
