import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import logging
from datetime import datetime
from sklearn.metrics import confusion_matrix
import seaborn as sns
import pandas as pd
import os

# 配置日志系统
log_filename = f"lenet5_fashionmnist_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(log_filename),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger()

# 设置随机种子确保可复现性
torch.manual_seed(42)
np.random.seed(42)


# 定义LeNet5模型
class LeNet5(nn.Module):
    def __init__(self):
        super(LeNet5, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
        self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.conv1(x))
        x = self.pool1(x)
        x = self.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.view(-1, 16 * 5 * 5)
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x)
        return x


# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5,), (0.5,))
])

# 加载数据集
train_set = torchvision.datasets.FashionMNIST(
    root='./data', train=True, download=True, transform=transform)
test_set = torchvision.datasets.FashionMNIST(
    root='./data', train=False, download=True, transform=transform)

# 修改点1: 设置num_workers=0避免Windows上的多进程问题
train_loader = torch.utils.data.DataLoader(
    train_set, batch_size=128, shuffle=True, num_workers=0)
test_loader = torch.utils.data.DataLoader(
    test_set, batch_size=128, shuffle=False, num_workers=0)

# 类别标签映射
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

# 设备配置
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {device}")
logger.info(f"Number of training examples: {len(train_set)}")
logger.info(f"Number of testing examples: {len(test_set)}")

# 初始化模型、损失函数和优化器
model = LeNet5().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练参数
num_epochs = 20
train_losses, test_losses = [], []
train_accs, test_accs = [], []

# 训练循环
logger.info("Starting training...")
for epoch in range(num_epochs):
    # 训练阶段
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0

    for images, labels in train_loader:
        images, labels = images.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = outputs.max(1)
        total += labels.size(0)
        correct += predicted.eq(labels).sum().item()

    train_loss = running_loss / len(train_loader)
    train_acc = 100. * correct / total
    train_losses.append(train_loss)
    train_accs.append(train_acc)

    # 测试阶段
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)

            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()

    test_loss = running_loss / len(test_loader)
    test_acc = 100. * correct / total
    test_losses.append(test_loss)
    test_accs.append(test_acc)

    logger.info(f"Epoch [{epoch + 1}/{num_epochs}] | "
                f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}% | "
                f"Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%")

# 保存模型
model_path = 'lenet5_fashionmnist.pth'
torch.save(model.state_dict(), model_path)
logger.info(f"Final test accuracy: {test_acc:.2f}%")
logger.info(f"Training completed. Model saved to {model_path}")

# 可视化训练过程
plt.figure(figsize=(12, 5))

plt.subplot(1, 2, 1)
plt.plot(train_losses, label='Train Loss')
plt.plot(test_losses, label='Test Loss')
plt.title('Loss Curve')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(train_accs, label='Train Accuracy')
plt.plot(test_accs, label='Test Accuracy')
plt.title('Accuracy Curve')
plt.xlabel('Epochs')
plt.ylabel('Accuracy (%)')
plt.legend()

plt.tight_layout()
plt.savefig('training_curves.png')
logger.info("Saved training curves to training_curves.png")
plt.show()


# 可视化预测结果
def imshow(img):
    img = img / 2 + 0.5  # 反归一化
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))


model.eval()
dataiter = iter(test_loader)
images, labels = next(dataiter)
images, labels = images.cpu(), labels.cpu()

# 显示真实图片
plt.figure(figsize=(10, 5))
imshow(torchvision.utils.make_grid(images[:16]))
plt.title('True Labels')
plt.axis('off')
plt.savefig('true_samples.png')
logger.info("Saved true samples to true_samples.png")
plt.show()

# 获取预测结果
with torch.no_grad():
    outputs = model(images.to(device))
_, predicted = torch.max(outputs, 1)

# 显示预测结果
plt.figure(figsize=(15, 8))
for i in range(16):
    plt.subplot(4, 4, i + 1)
    plt.imshow(images[i].squeeze(), cmap='gray')
    plt.title(f"True: {class_names[labels[i]]}\nPred: {class_names[predicted[i]]}", fontsize=10)
    plt.axis('off')
plt.tight_layout()
plt.savefig('prediction_results.png')
logger.info("Saved prediction results to prediction_results.png")
plt.show()

# 生成混淆矩阵
all_labels = []
all_preds = []

with torch.no_grad():
    for images, labels in test_loader:
        images, labels = images.to(device), labels.to(device)
        outputs = model(images)
        _, preds = torch.max(outputs, 1)
        all_labels.extend(labels.cpu().numpy())
        all_preds.extend(preds.cpu().numpy())

cm = confusion_matrix(all_labels, all_preds)
plt.figure(figsize=(12, 10))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
            xticklabels=class_names, yticklabels=class_names)
plt.title('Confusion Matrix')
plt.xlabel('Predicted')
plt.ylabel('True')
plt.xticks(rotation=45, ha='right')
plt.yticks(rotation=0)
plt.tight_layout()
plt.savefig('confusion_matrix.png')
logger.info("Saved confusion matrix to confusion_matrix.png")
plt.show()

# 保存结果到CSV
results = pd.DataFrame({
    'Epoch': range(1, num_epochs + 1),
    'Train Loss': train_losses,
    'Test Loss': test_losses,
    'Train Acc': train_accs,
    'Test Acc': test_accs
})
csv_path = 'training_results.csv'
results.to_csv(csv_path, index=False)
logger.info(f"Saved training results to {csv_path}")

# 记录所有生成的文件
generated_files = [
    log_filename, model_path, 'training_curves.png',
    'true_samples.png', 'prediction_results.png',
    'confusion_matrix.png', csv_path
]

logger.info("All generated files:")
for file in generated_files:
    if os.path.exists(file):
        logger.info(f"  - {file} ({os.path.getsize(file) / 1024:.1f} KB)")
    else:
        logger.warning(f"  - {file} (not found)")