import torch
from torch import nn, optim
from torch.nn import functional as F
from tqdm import tqdm
import time
import sys
import pickle
import numpy as np
sys.path.append("./")
from LoadData import load_cifar10, load_fashion_mnist, load_awa2


class ResBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResBlock, self).__init__()

        self.left = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
            nn.BatchNorm2d(out_channels)
        )
        # self.shortcut = nn.Sequential()
        # if stride != 1 or in_channels != out_channels:
        #     self.shortcut = nn.Sequential(
        #         nn.Conv2d(in_channels, out_channels, 1, stride, bias=False),
        #         nn.BatchNorm2d(out_channels)
        #     )
            
    def forward(self, x):
        out = self.left(x)
        # out = out + self.shortcut(x)
        out = F.relu(out)
        
        return out


class ResNet18(nn.Module):
    def __init__(self, ResBlock, num_class=10, channels=1):
        super(ResNet18, self).__init__()
        self.in_channels = 64
        self.conv1 = nn.Sequential(
            nn.Conv2d(channels, 64, 7, 2, 3, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2, 1)
        )
        self.layer1 = self.make_layer(ResBlock, 64, 2, stride=1)
        self.layer2 = self.make_layer(ResBlock, 128, 2, stride=2)
        self.layer3 = self.make_layer(ResBlock, 256, 2, stride=2)        
        self.layer4 = self.make_layer(ResBlock, 512, 2, stride=2)        
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.fc = nn.Linear(512, num_class)
    

    def make_layer(self, block, channels, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_channels, channels, stride))
            self.in_channels = channels
        return nn.Sequential(*layers)
    
    def forward(self, x):
        out = self.conv1(x)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = self.avgpool(out)
        out = out.view(out.shape[0], -1)
        out = self.fc(out)
        return out

'''
model_name = "FashionMNIST"
load_func_dict = {"Cifar10": load_cifar10, "FashionMNIST": load_fashion_mnist, "AwA2": load_awa2}
load_func = load_func_dict[model_name]
batch_size = 128
train_data_loader, test_data_loader, num_classes, in_channels = load_func(batch_size, 224)
num_epochs = 400
lr = 0.001
lr_list = np.logspace(-3, -5, num_epochs)
model = ResNet18(ResBlock, num_class=num_classes, channels=in_channels)
optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
criterion = criterion.to(device)
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.3)

acc_loss_data = []
for epoch in range(num_epochs):
    
    torch.cuda.empty_cache()
    acc_num = 0
    tot_num = 0
    tot_loss = 0.0
    start_time = time.time()
    for ix, param_group in enumerate(optimizer.param_groups):
        param_group['lr'] = lr_list[epoch]
    print("current learning rate", lr_list[epoch])

    model.train()
    for i, (images, labels) in enumerate(tqdm(train_data_loader, desc="train")):
        optimizer.zero_grad()
        images = images.to(device)
        # images.requires_grad = True
        labels = labels.to(device)
        output = model.forward(images)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()
        tot_loss += loss.cpu().item()
        acc_num += (output.argmax(dim=1) == labels).sum().cpu().item()
        tot_num += labels.shape[0]

    test_acc_num = 0
    test_tot_num = 0
    test_tot_loss = 0.0
    model.eval()
    with torch.no_grad():
        for i, (images, labels) in enumerate(tqdm(test_data_loader, desc="test")):
            images = images.to(device)
            labels = labels.to(device)
            output = model.forward(images)
            loss = criterion(output, labels)
            test_tot_loss += loss.cpu().item()
            test_acc_num += (output.argmax(dim=1) == labels).sum().cpu().item()
            test_tot_num += labels.shape[0]
    

    train_avg_loss = tot_loss / tot_num * batch_size
    train_acc = acc_num / tot_num
    test_avg_loss = test_tot_loss / test_tot_num * batch_size
    test_acc = test_acc_num / test_tot_num
    acc_loss_data.append((train_avg_loss, train_acc, test_avg_loss, test_acc))
    print('epoch %d, train loss %.4f, train acc %.3f, test loss %.3f, test acc %.3f, time % .1f sec'
    % (epoch + 1, train_avg_loss, train_acc, test_avg_loss, test_acc, time.time() - start_time))

    if (epoch + 1) % 10 == 0:
        f = open("./Model/" + model_name + "/resnet_data.bin", "wb")
        pickle.dump(acc_loss_data, f)
        f.close()
        torch.save(model.state_dict(), "./Model/" + model_name + "/resnet_model.pth")
'''