import sys
from itertools import count
import os
import time
import math

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision.transforms as T
import matplotlib.pyplot as plt

from dataloader import StanfordDogsDataset
import glob

def maintain_top_k_models(pattern, top_k=5):
    """只保留准确率最高的 top_k 个模型文件，删除多余的。"""
    files = glob.glob(pattern)
    if len(files) <= top_k:
        return

    # 提取 acc 分数（文件名形如 HAL9000_best_89.32.pt）
    def extract_acc(file):
        try:
            return float(file.split("_")[-1].replace(".pt", ""))
        except:
            return -1.0

    files = sorted(files, key=extract_acc, reverse=True)
    for f in files[top_k:]:
        os.remove(f)
        print(f"Deleted old model: {f}")


class HAL9000(nn.Module):
    def __init__(self):
        super().__init__()

        self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2)
        self.maxpool1 = nn.MaxPool2d(2, 2)
        self.bn1 = nn.BatchNorm2d(32)
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2)
        self.maxpool2 = nn.MaxPool2d(2, 2)
        self.bn2 = nn.BatchNorm2d(64)
        self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2)
        self.maxpool3 = nn.MaxPool2d(2, 2)
        self.bn3 = nn.BatchNorm2d(64)
        self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3)
        self.maxpool4 = nn.MaxPool2d(2, 2)
        self.bn4 = nn.BatchNorm2d(64)
        self.fc1 = nn.Linear(in_features=256, out_features=128)
        self.fc2 = nn.Linear(in_features=128, out_features=120)
        self.dropout = nn.Dropout(.5)

    # def forward(self, x):
    #     x = F.leaky_relu(self.conv1(x))
    #     x = self.bn1(self.maxpool1(x))
    #     x = F.leaky_relu(self.conv2(x))
    #     x = self.bn2(self.maxpool2(x))
    #     x = F.leaky_relu(self.conv3(x))
    #     x = self.bn3(self.maxpool3(x))
    #     # x = F.leaky_relu(self.conv4(x))
    #     # x = self.bn3(self.maxpool4(x))
    #     x = F.leaky_relu(self.conv4(x))
    #     x = self.bn4(self.maxpool4(x))
    #     x = F.leaky_relu(self.dropout(self.fc1(x.view(-1, 256))))
    #     # x = F.softmax(self.fc2(x), dim=1)
    #     x = self.fc2(x)
    #     return x

    def forward(self, x):
        x = F.leaky_relu(self.conv1(x))
        x = self.bn1(self.maxpool1(x))
        x = F.leaky_relu(self.conv2(x))
        x = self.bn2(self.maxpool2(x))
        x = F.leaky_relu(self.conv3(x))
        x = self.bn3(self.maxpool3(x))
        x = F.leaky_relu(self.conv4(x))
        x = self.bn4(self.maxpool4(x))
        x = x.view(-1, 256)
        x = F.leaky_relu(self.dropout(self.fc1(x)))
        x = self.fc2(x)  # logits
        return x


def preprocess(image):
    width, height = image.size
    if width > height and width > 512:
        height = math.floor(512 * height / width)
        width = 512
    elif width < height and height > 512:
        width = math.floor(512 * width / height)
        height = 512
    pad_values = (
        (512 - width) // 2 + (0 if width % 2 == 0 else 1),
        (512 - height) // 2 + (0 if height % 2 == 0 else 1),
        (512 - width) // 2,
        (512 - height) // 2,
    )

    transform = T.Compose([
        T.Resize((height, width)),
        T.Pad(pad_values),
        T.ToTensor(),
        T.Lambda(lambda x: x[:3] if x.size(0) >= 3 else x.repeat(3, 1, 1)),  # 保证是3通道
        T.RandomGrayscale(p=0.1),  # 放在ToTensor之后，对Tensor操作
    ])

    image = transform(image)
    return image


DEVICE = torch.device("cuda")
SAVE_FILE = "HAL9000.pt"

network = HAL9000().to(DEVICE)
train_set = StanfordDogsDataset(
    root=os.path.join(os.getcwd(), "data"), set_type="train", transform=preprocess
)
validation_set = StanfordDogsDataset(
    root=os.path.join(os.getcwd(), "data"), set_type="validation", transform=preprocess
)
train_loader = DataLoader(train_set, batch_size=16, shuffle=True, num_workers=4)
validation_loader = DataLoader(validation_set, batch_size=16, shuffle=True, num_workers=4)

optimizer = optim.Adam(network.parameters(), lr=.001)
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.95)
# lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=500)
losses = []

if os.path.isfile(SAVE_FILE):
    checkpoint = torch.load(SAVE_FILE)
    epoch = checkpoint["epoch"]
    network.load_state_dict(checkpoint["network"])
    optimizer.load_state_dict(checkpoint["optimizer"])

best_acc = 0.0
best_train_acc = 0.0

def validate(i):
    global best_acc
    total_correct = 0
    validation_losses = []

    network.eval()  # 设置为评估模式
    with torch.no_grad():
        for images, labels in validation_loader:
            images, labels = images.to(DEVICE), labels.to(DEVICE)
            predictions = network(images)
            loss = F.cross_entropy(predictions, labels)

            num_correct = predictions.argmax(dim=1).eq(labels).sum().item()
            total_correct += num_correct
            validation_losses.append(loss.item())

    current_acc = total_correct / len(validation_set) * 100
    # avg_loss = torch.tensor(validation_losses, device=DEVICE).mean().item()
    avg_loss = sum(validation_losses) / len(validation_losses)
    print(f"Validation accuracy: {current_acc:.2f}%, Average loss: {avg_loss:.4f}")

    if current_acc > best_acc:
        best_acc = current_acc
        best_checkpoint = {
            "epoch": i + 1,
            "network": network.state_dict(),
            "optimizer": optimizer.state_dict(),
            "best_acc": best_acc
        }
        filename = f"HAL9000_best_val_{current_acc:.2f}.pt"
        torch.save(best_checkpoint, filename)
        print(f"New best model saved as {filename}")
        maintain_top_k_models("HAL9000_best_val_*.pt", top_k=5)

    network.train()  # 恢复为训练模式

# 主程序入口
if __name__ == '__main__':
    epoch = 300
    for i in range(epoch):
        print('\n================ epoch:', i + 1)
        total_correct = 0
        start_time = time.thread_time()

        network.train()  # 设置为训练模式
        for images, labels in train_loader:
            images, labels = images.to(DEVICE), labels.to(DEVICE)
            optimizer.zero_grad()

            predictions = network(images)
            loss = F.cross_entropy(predictions, labels)
            loss.backward()
            optimizer.step()

            num_correct = predictions.argmax(dim=1).eq(labels).sum().item()
            total_correct += num_correct
            losses.append(loss.item())

        train_acc = total_correct / len(train_set) * 100
        # avg_loss = torch.tensor(losses[-len(train_loader):], device=DEVICE).mean().item()
        avg_loss = sum(losses[-len(train_loader):]) / len(train_loader)
        print(
            f"Epoch: {i + 1}, Accuracy: {train_acc:.2f}%, Average loss: {avg_loss:.4f}, Time: {time.thread_time() - start_time:.2f}s")

        # 保存最佳训练模型
        if train_acc > best_train_acc:
            best_train_acc = train_acc
            best_train_checkpoint = {
                "epoch": i + 1,
                "network": network.state_dict(),
                "optimizer": optimizer.state_dict(),
                "best_train_acc": best_train_acc
            }
            train_filename = f"HAL9000_best_train_{train_acc:.2f}.pt"
            torch.save(best_train_checkpoint, train_filename)
            print(f"New best training model saved as {train_filename}")
            maintain_top_k_models("HAL9000_best_train_*.pt", top_k=5)

        checkpoint = {
            "epoch": i + 1,
            "network": network.state_dict(),
            "optimizer": optimizer.state_dict(),
        }
        torch.save(checkpoint, SAVE_FILE)
        # Uncomment for loss graph
        # plt.ion()
        # plt.title("Loss")
        # plt.plot(losses)
        # plt.ioff()
        # plt.show()
        validate(i)
        print()