import torch
from torch import nn, optim
import time
import sys
import pickle
import numpy as np
from tqdm import tqdm
sys.path.append("./")


# class Vgg16Net(nn.Module):

#     def __init__(self, num_class=10, channels=3):
#         super(Vgg16Net, self).__init__()

#         self.conv1 = nn.Sequential(
#             nn.Conv2d(channels, 64, 3, 1, 1),
#             nn.BatchNorm2d(64),
#             nn.ReLU(),
#             nn.Conv2d(64, 64, 3, 1, 1),
#             nn.BatchNorm2d(64),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2)
#         )

#         self.conv2 = nn.Sequential(
#             nn.Conv2d(64, 128, 3, 1, 1),
#             nn.BatchNorm2d(128),
#             nn.ReLU(),
#             nn.Conv2d(128, 128, 3, 1, 1),
#             nn.BatchNorm2d(128),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2)
#         )

#         self.conv3 = nn.Sequential(
#             nn.Conv2d(128, 256, 3, 1, 1),
#             nn.BatchNorm2d(256),
#             nn.ReLU(),
#             nn.Conv2d(256, 256, 3, 1, 1),
#             nn.BatchNorm2d(256),
#             nn.ReLU(),
#             nn.Conv2d(256, 256, 3, 1, 1),
#             nn.BatchNorm2d(256),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2)
#         )

#         self.conv4 = nn.Sequential(
#             nn.Conv2d(256, 512, 3, 1, 1),
#             nn.BatchNorm2d(512),
#             nn.ReLU(),
#             nn.Conv2d(512, 512, 3, 1, 1),
#             nn.BatchNorm2d(512),
#             nn.ReLU(),
#             nn.Conv2d(512, 512, 3, 1, 1),
#             nn.BatchNorm2d(512),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2)
#         )

#         self.conv5 = nn.Sequential(
#             nn.Conv2d(512, 512, 3, 1, 1),
#             nn.BatchNorm2d(512),
#             nn.ReLU(),
#             nn.Conv2d(512, 512, 3, 1, 1),
#             nn.BatchNorm2d(512),
#             nn.ReLU(),
#             nn.Conv2d(512, 512, 3, 1, 1),
#             nn.BatchNorm2d(512),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2)
#         )

#         self.conv = nn.Sequential(
#             self.conv1,
#             self.conv2,
#             self.conv3,
#             self.conv4,
#             self.conv5,
#         )

#         self.fc = nn.Sequential(
#             nn.Linear(7 * 7 * 512, 4096),
#             nn.ReLU(),
#             nn.Dropout(0.5),
#             nn.Linear(4096, 4096),
#             nn.ReLU(),
#             nn.Dropout(0.5),
#             nn.Linear(4096, num_class)
#         )

#     def forward(self, img):
#         feature = self.conv(img)
#         feature = feature.view(-1, 7 * 7 * 512)
#         output = self.fc(feature)
#         return output

class Vgg16Net(nn.Module):

    def __init__(self, conv_struct, fc_features=7 * 7 * 512, fc_hidden_num=4096, num_class=10, channels=1):
        super(Vgg16Net, self).__init__()

        self.fc_features = fc_features

        self.conv = nn.Sequential()
        for i, (num_convs, in_channels, out_channels) in enumerate(conv_struct):
            self.conv.add_module("vgg_block_" + str(i + 1), self.vgg_block(num_convs, in_channels, out_channels))

        
        self.fc = nn.Sequential(
            nn.Linear(fc_features, fc_hidden_num),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(fc_hidden_num, fc_hidden_num),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(fc_hidden_num, num_class)
        )

    
    def forward(self, img):
        # print(img.shape)
        feature = self.conv(img)
        # print(feature.shape)
        feature = feature.view(img.shape[0], -1)
        # print(feature.shape)
        output = self.fc(feature)
        return output


    def vgg_block(self, conv_num, in_channels, out_channels):
        block = []
        for i in range(conv_num):
            if i == 0:
                block.append(nn.Conv2d(in_channels, out_channels, 3, 1, 1))
            else:
                block.append(nn.Conv2d(out_channels, out_channels, 3, 1, 1))
            
            block.append(nn.ReLU())
        block.append(nn.BatchNorm2d(out_channels))
        block.append(nn.MaxPool2d(2, 2))

        return nn.Sequential(*block)

'''
model_name = "FashionMNIST"
load_func_dict = {"Cifar10": load_cifar10, "FashionMNIST": load_fashion_mnist, "AwA2": load_awa2}
load_func = load_func_dict[model_name]
batch_size = 128
train_data_loader, test_data_loader, num_classes, in_channels = load_func(batch_size, 224)
num_epochs = 400
lr = 0.001
lr_list = np.logspace(-3, -5, num_epochs)
ratio = 8
small_conv_struct = [(1, in_channels, 64//ratio), (1, 64//ratio, 128//ratio),
                   (2, 128//ratio, 256//ratio), (2, 256//ratio, 512//ratio), (2, 512//ratio, 512//ratio)]
model = Vgg16Net(conv_struct=small_conv_struct, fc_features=7 * 7 * 512 // ratio, fc_hidden_num=4096 // ratio, num_class=num_classes, channels=in_channels)
optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
criterion = criterion.to(device)

acc_loss_data = []
for epoch in range(num_epochs):
    
    torch.cuda.empty_cache()
    acc_num = 0
    tot_num = 0
    tot_loss = 0.0
    start_time = time.time()

    for ix, param_group in enumerate(optimizer.param_groups):
        param_group['lr'] = lr_list[epoch]
    print("current learning rate", lr_list[epoch])

    model.train()
    for i, (images, labels) in enumerate(tqdm(train_data_loader)):
        optimizer.zero_grad()
        images = images.to(device)
        labels = labels.to(device)
        output = model.forward(images)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()
        tot_loss += loss.cpu().item()
        acc_num += (output.argmax(dim=1) == labels).sum().cpu().item()
        tot_num += labels.shape[0]

    test_acc_num = 0
    test_tot_num = 0
    test_tot_loss = 0.0

    model.eval()
    with torch.no_grad():
        for i, (images, labels) in enumerate(tqdm(test_data_loader)):
            images = images.to(device)
            labels = labels.to(device)
            output = model.forward(images)
            loss = criterion(output, labels)
            test_tot_loss += loss.cpu().item()
            test_acc_num += (output.argmax(dim=1) == labels).sum().cpu().item()
            test_tot_num += labels.shape[0]
    
    train_avg_loss = tot_loss / tot_num * batch_size
    train_acc = acc_num / tot_num
    test_avg_loss = test_tot_loss / test_tot_num * batch_size
    test_acc = test_acc_num / test_tot_num
    acc_loss_data.append((train_avg_loss, train_acc, test_avg_loss, test_acc))
    print('epoch %d, train loss %.4f, train acc %.3f, test loss %.3f, test acc %.3f, time % .1f sec'
    % (epoch + 1, train_avg_loss, train_acc, test_avg_loss, test_acc, time.time() - start_time))

    if (epoch + 1) % 10 == 0:
        f = open("./Model/" + model_name + "/vgg_data.bin", "wb")
        pickle.dump(acc_loss_data, f)
        f.close()
        torch.save(model.state_dict(), "./Model/" + model_name + "/vgg_model.pth")
'''
