import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from cnn import ResNet
from ecanet18 import ECANet18
from resnet18 import ResNet18
from ECA_ResNeSt import ECA_ResNeSt, ECA_ResNestBottleneck
import torch.optim.lr_scheduler as lr_scheduler
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


def get_dataloaders(batch_size=4):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    train_set = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform)
    test_set = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform)

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=2)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=2)

    return train_loader, test_loader, train_set.classes


def train_one_epoch(model, loader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0

    for inputs, labels in loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    avg_loss = running_loss / len(loader)
    accuracy = 100 * correct / total
    return avg_loss, accuracy


def evaluate_model(model, loader, device):
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return accuracy


def plot_training(epochs, losses, accuracies, filename):
    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(epochs, losses, 'r-', label='Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss over Epochs')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(epochs, accuracies, 'b-', label='Training Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Training Accuracy over Epochs')
    plt.legend()

    plt.tight_layout()
    plt.savefig(filename)


def main():
    batch_size = 4
    num_epochs = 20
    learning_rate = 0.001
    # model_path = f'model/ResNeSt_dlr{learning_rate}_e{num_epochs}.pth'
    # model_path = f'model/resnet18_dlr0.01_e20.pth'
    model_path = f'model/ecanet18_dlr0.001_e20.pth'

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    train_loader, test_loader, classes = get_dataloaders(batch_size)

    model = ECANet18(blocks_num=[2, 2, 2, 2], num_classes=100).to(device)
    # model = ECA_ResNeSt(ECA_ResNestBottleneck, [2, 2, 2, 2], num_classes=100,
    #               radix=2, cardinality=2, base_width=64).to(device)
    model.load_state_dict(torch.load(model_path))  # 加载参数
    # model = Conv_Next(num_classes=100).to(device)

    criterion = nn.CrossEntropyLoss()
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
    warmup_epochs = 4
    total_epochs = num_epochs
    optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
    warmup_scheduler = lr_scheduler.LinearLR(optimizer, start_factor=1e-3, end_factor=1.0, total_iters=warmup_epochs)
    cosine_scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epochs - warmup_epochs)
    scheduler = lr_scheduler.SequentialLR(optimizer, schedulers=[warmup_scheduler, cosine_scheduler], milestones=[warmup_epochs])


    train_losses = []
    train_accuracies = []

    for epoch in range(num_epochs):
        loss, acc = train_one_epoch(model, train_loader, criterion, optimizer, device)
        scheduler.step()
        train_losses.append(loss)
        train_accuracies.append(acc)
        print(f"Epoch {epoch + 1}: Loss = {loss:.4f}, Accuracy = {acc:.2f}%")

    print("Training complete. Saving model...")
    torch.save(model.state_dict(), model_path)

    print("Evaluating on test set...")
    test_accuracy = evaluate_model(model, test_loader, device)
    print(f"Test Accuracy: {test_accuracy:.2f}%")


    correct_pred = {classname: 0 for classname in classes}
    total_pred = {classname: 0 for classname in classes}

    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predictions = torch.max(outputs, 1)
            for label, prediction in zip(labels, predictions):
                if label == prediction:
                    correct_pred[classes[label]] += 1
                total_pred[classes[label]] += 1


    for classname, correct_count in correct_pred.items():
        accuracy = 100 * float(correct_count) / total_pred[classname]
        print(f'{classname:5s} : {accuracy:.1f} %')

    print(train_losses)
    print(train_accuracies)

    plot_training(range(1, num_epochs + 1), train_losses, train_accuracies, f"photo/ResNeSt_dlr{learning_rate}_e{num_epochs}epochs.png")


if __name__ == "__main__":
    main()
