import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
import matplotlib.pyplot as plt
import numpy as np
import time
from tqdm import tqdm
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sns
import logging  # 导入日志模块
import os

# 确保日志文件存在
if not os.path.exists('experiment_results.log'):
    open('experiment_results.log', 'w').close()

# 设置日志
logging.basicConfig(filename='experiment_results.log', level=logging.INFO, 
                    format='%(asctime)s - %(levelname)s - %(message)s')

# 设置随机种子以确保结果可复现
torch.manual_seed(42)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(42)

# 配置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")
logging.info(f"使用设备: {device}")  # 记录设备信息

# 超参数
input_size = 784
hidden_size1 = 512
hidden_size2 = 256
hidden_size3 = 128
num_classes = 10
num_epochs = 20
batch_size = 128
learning_rate = 0.001
weight_decay = 1e-5  # L2正则化
dropout_rate = 0.3

# 数据增强和预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))  # MNIST数据集的均值和标准差
])

# 加载数据集
train_dataset = MNIST(root='./data', train=True, transform=transform, download=True)
test_dataset = MNIST(root='./data', train=False, transform=transform)

# 数据加载器，设置num_workers为0，禁用多进程
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=False)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=0, pin_memory=False)

# 改进的神经网络模型
class ImprovedNeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size1, hidden_size2, hidden_size3, num_classes, dropout_rate=0.3):
        super(ImprovedNeuralNet, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.bn1 = nn.BatchNorm1d(hidden_size1)
        self.dropout1 = nn.Dropout(dropout_rate)
        
        self.fc2 = nn.Linear(hidden_size1, hidden_size2)
        self.bn2 = nn.BatchNorm1d(hidden_size2)
        self.dropout2 = nn.Dropout(dropout_rate)
        
        self.fc3 = nn.Linear(hidden_size2, hidden_size3)
        self.bn3 = nn.BatchNorm1d(hidden_size3)
        self.dropout3 = nn.Dropout(dropout_rate)
        
        self.fc4 = nn.Linear(hidden_size3, num_classes)
        
    def forward(self, x):
        x = x.view(-1, 784)  # 展平输入
        
        x = self.fc1(x)
        x = self.bn1(x)
        x = F.relu(x)
        x = self.dropout1(x)
        
        x = self.fc2(x)
        x = self.bn2(x)
        x = F.relu(x)
        x = self.dropout2(x)
        
        x = self.fc3(x)
        x = self.bn3(x)
        x = F.relu(x)
        x = self.dropout3(x)
        
        x = self.fc4(x)
        return x

# 训练函数
def train(model, train_loader, criterion, optimizer, device):
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    
    # 使用tqdm显示进度条
    loop = tqdm(train_loader, desc="Training")
    for batch_idx, (inputs, targets) in enumerate(loop):
        inputs, targets = inputs.to(device), targets.to(device)
        
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        
        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        
        # 更新进度条
        loop.set_postfix(loss=train_loss/(batch_idx+1), acc=100.*correct/total)
    
    return train_loss/len(train_loader), 100.*correct/total

# 测试函数
def test(model, test_loader, criterion, device):
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    all_targets = []
    all_predicted = []
    
    with torch.no_grad():
        for inputs, targets in test_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            
            # 收集所有预测和目标值用于后续分析
            all_targets.extend(targets.cpu().numpy())
            all_predicted.extend(predicted.cpu().numpy())
    
    accuracy = 100.*correct/total
    print(f"Test Loss: {test_loss/len(test_loader):.4f}, Test Accuracy: {accuracy:.2f}%")
    
    return test_loss/len(test_loader), accuracy, all_targets, all_predicted

# 可视化函数
def visualize_results(train_losses, train_accuracies, test_losses, test_accuracies, targets, predicted):
    # 绘制损失和准确率曲线
    plt.figure(figsize=(15, 5))
    
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Testing Loss')
    plt.legend()
    
    plt.subplot(1, 2, 2)
    plt.plot(train_accuracies, label='Train Accuracy')
    plt.plot(test_accuracies, label='Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Training and Testing Accuracy')
    plt.legend()
    plt.tight_layout()
    plt.savefig('training_metrics.png')
    plt.close()  # 关闭图形，避免显示
    
    # 绘制混淆矩阵
    plt.figure(figsize=(10, 8))
    cm = confusion_matrix(targets, predicted)
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.xlabel('Predicted')
    plt.ylabel('True')
    plt.title('Confusion Matrix')
    plt.tight_layout()
    plt.savefig('confusion_matrix.png')
    plt.close()  # 关闭图形，避免显示
    
    # 打印分类报告
    print("\nClassification Report:")
    classification_rep = classification_report(targets, predicted)
    print(classification_rep)
    logging.info("\nClassification Report:\n" + classification_rep)  # 记录分类报告

# 预测和可视化样本
def predict_and_visualize(model, test_dataset, device, num_samples=15):
    # 选择样本
    samples = test_dataset.data[:num_samples].float() / 255.0  # 归一化
    targets = test_dataset.targets[:num_samples]
    
    # 准备输入
    inputs = samples.view(-1, 784).to(device)
    inputs = (inputs - 0.1307) / 0.3081  # 应用相同的归一化
    
    # 预测
    with torch.no_grad():
        outputs = model(inputs)
        _, predicted = torch.max(outputs, 1)
    
    # 显示结果
    plt.figure(figsize=(15, 9))
    for i in range(num_samples):
        plt.subplot(3, 5, i+1)
        plt.imshow(samples[i].cpu().numpy(), cmap='gray')
        plt.title(f'Pred: {predicted[i].item()}, True: {targets[i].item()}', 
                 color='green' if predicted[i].item() == targets[i].item() else 'red')
        plt.axis('off')
    
    plt.tight_layout()
    plt.savefig('sample_predictions.png')
    plt.close()  # 关闭图形，避免显示

# 主函数
def main():
    # 创建模型并移动到设备
    model = ImprovedNeuralNet(input_size, hidden_size1, hidden_size2, hidden_size3, num_classes, dropout_rate).to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    
    # 学习率调度器
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.5, patience=2, verbose=True
    )
    
    # 存储训练历史
    train_losses = []
    train_accuracies = []
    test_losses = []
    test_accuracies = []
    
    # 早停设置
    best_test_loss = float('inf')
    patience = 5
    counter = 0
    
    # 记录超参数
    logging.info(f"超参数: input_size={input_size}, hidden_size1={hidden_size1}, hidden_size2={hidden_size2}, hidden_size3={hidden_size3}, "
                 f"num_classes={num_classes}, num_epochs={num_epochs}, batch_size={batch_size}, learning_rate={learning_rate}, "
                 f"weight_decay={weight_decay}, dropout_rate={dropout_rate}")
    
    # 训练模型
    start_time = time.time()
    for epoch in range(num_epochs):
        print(f"\nEpoch {epoch+1}/{num_epochs}")
        logging.info(f"\nEpoch {epoch+1}/{num_epochs}")
        
        # 训练
        train_loss, train_acc = train(model, train_loader, criterion, optimizer, device)
        train_losses.append(train_loss)
        train_accuracies.append(train_acc)
        
        # 测试
        test_loss, test_acc, _, _ = test(model, test_loader, criterion, device)
        test_losses.append(test_loss)
        test_accuracies.append(test_acc)
        
        # 更新学习率
        scheduler.step(test_loss)
        
        # 早停检查
        if test_loss < best_test_loss:
            best_test_loss = test_loss
            counter = 0
            # 保存最佳模型
            torch.save(model.state_dict(), 'best_mnist_model.pth')
            print(f"模型已保存，测试损失: {best_test_loss:.4f}")
            logging.info(f"模型已保存，测试损失: {best_test_loss:.4f}")
        else:
            counter += 1
            if counter >= patience:
                print(f"早停: {patience}个epoch没有改善")
                logging.info(f"早停: {patience}个epoch没有改善")
                break
        
        # 记录每个epoch的训练和测试结果
        logging.info(f"Epoch {epoch+1} - Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, "
                     f"Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%")
    
    training_time = time.time() - start_time
    print(f"\n训练完成! 总训练时间: {training_time:.2f}秒")
    logging.info(f"\n训练完成! 总训练时间: {training_time:.2f}秒")
    
    # 加载最佳模型进行最终评估
    model.load_state_dict(torch.load('best_mnist_model.pth'))
    
    # 最终评估
    _, _, all_targets, all_predicted = test(model, test_loader, criterion, device)
    
    # 可视化结果
    visualize_results(train_losses, train_accuracies, test_losses, test_accuracies, all_targets, all_predicted)
    
    # 预测和可视化样本
    predict_and_visualize(model, test_dataset, device)
    
    # 记录最终测试结果
    logging.info(f"最终测试损失: {test_loss:.4f}, 最终测试准确率: {test_acc:.2f}%")
    logging.info("混淆矩阵和分类报告已保存")

if __name__ == "__main__":
    main()