import torch
from torch import nn, optim
import time
import sys
import numpy as np
from tqdm import tqdm
import pickle
sys.path.append("./")
from LoadData import load_cifar10, load_fashion_mnist, load_awa2
from LoadModel import load_alexnet, load_vggnet, load_resnet
# from AlexNetModel import AlexNet
from VggNetModel import Vgg16Net
# from ResNetModel import ResNet

# 选择load的数据集
dataset_name = "AwA2"
load_func_dict = {"Cifar10": load_cifar10, "FashionMNIST": load_fashion_mnist, "AwA2": load_awa2}
load_func = load_func_dict[dataset_name]
print(load_func)
batch_size = 128
train_data_loader, test_data_loader, num_classes, in_channels = load_func(batch_size, 224)

num_epochs = 400
lr = 0.0001
# lr_list = np.logspace(-3, -5, num_epochs // 10)

# 选择网络模型
model_name = "resnet"
model_dict = {"alex": load_alexnet, "vgg": load_vggnet, "resnet": load_resnet}
model = model_dict[model_name](num_classes, in_channels)

optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
criterion = criterion.to(device)

acc_loss_data = []
for epoch in range(num_epochs):
    
    torch.cuda.empty_cache()
    acc_num = 0
    tot_num = 0
    tot_loss = 0.0
    start_time = time.time()

    # if epoch != 0 and epoch % 10 == 0:
    #     for ix, param_group in enumerate(optimizer.param_groups):
    #         param_group['lr'] = lr_list[epoch // 10]
    #     print("current learning rate", lr_list[epoch // 10])

    model.train()
    for i, (images, labels) in enumerate(tqdm(train_data_loader)):
        optimizer.zero_grad()
        images = images.to(device)
        labels = labels.to(device)
        output = model.forward(images)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()
        tot_loss += loss.cpu().item()
        acc_num += (output.argmax(dim=1) == labels).sum().cpu().item()
        tot_num += labels.shape[0]

    test_acc_num = 0
    test_tot_num = 0
    test_tot_loss = 0.0

    model.eval()
    with torch.no_grad():
        for i, (images, labels) in enumerate(tqdm(test_data_loader)):
            images = images.to(device)
            labels = labels.to(device)
            output = model.forward(images)
            loss = criterion(output, labels)
            test_tot_loss += loss.cpu().item()
            test_acc_num += (output.argmax(dim=1) == labels).sum().cpu().item()
            test_tot_num += labels.shape[0]
    
    train_avg_loss = tot_loss / tot_num * batch_size
    train_acc = acc_num / tot_num
    test_avg_loss = test_tot_loss / test_tot_num * batch_size
    test_acc = test_acc_num / test_tot_num
    acc_loss_data.append((train_avg_loss, train_acc, test_avg_loss, test_acc))
    print('epoch %d, train loss %.4f, train acc %.3f, test loss %.3f, test acc %.3f, time % .1f sec'
    % (epoch + 1, train_avg_loss, train_acc, test_avg_loss, test_acc, time.time() - start_time))

    if (epoch + 1) % 20 == 0:
        f = open("./Model/" + dataset_name + "/" + model_name + f"_data_epoch{epoch}.bin", "wb")
        pickle.dump(acc_loss_data, f)
        f.close()
        torch.save(model.state_dict(), "./Model/" + dataset_name + "/" + model_name + f"_model_epoch{epoch}.pth")