import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
import torchvision as ptv
from torch import nn
from torch.autograd import Variable
from python_ai.common.xcommon import *
from sklearn.model_selection import train_test_split


class Conv_BN_LeakyReLU(nn.Module):
    def __init__(self, in_channels, out_channels, ksize, padding=0, dilation=1):
        super(Conv_BN_LeakyReLU, self).__init__()
        self.convs = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, ksize, padding=padding, dilation=dilation),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(0.1, inplace=True)
        )

    def forward(self, x):
        return self.convs(x)


class DarkNet_19(nn.Module):
    def __init__(self, num_classes=1000):
        print("Initializing the darknet19 network ......")

        super(DarkNet_19, self).__init__()
        # backbone network : DarkNet-19
        # output : stride = 2, c = 32
        self.conv_1 = nn.Sequential(
            Conv_BN_LeakyReLU(3, 32, 3, 1),
            nn.MaxPool2d((2 ,2), 2),
        )

        # output : stride = 4, c = 64
        self.conv_2 = nn.Sequential(
            Conv_BN_LeakyReLU(32, 64, 3, 1),
            nn.MaxPool2d((2 ,2), 2)
        )

        # output : stride = 8, c = 128
        self.conv_3 = nn.Sequential(
            Conv_BN_LeakyReLU(64, 128, 3, 1),
            Conv_BN_LeakyReLU(128, 64, 1),
            Conv_BN_LeakyReLU(64, 128, 3, 1),
            nn.MaxPool2d((2 ,2), 2)
        )

        # output : stride = 16, c = 256
        self.conv_4 = nn.Sequential(
            Conv_BN_LeakyReLU(128, 256, 3, 1),
            Conv_BN_LeakyReLU(256, 128, 1),
            Conv_BN_LeakyReLU(128, 256, 3, 1),
            nn.MaxPool2d((2 ,2), 2)
        )

        # output : stride = 32, c = 512
        self.conv_5 = nn.Sequential(
            Conv_BN_LeakyReLU(256, 512, 3, 1),
            Conv_BN_LeakyReLU(512, 256, 1),
            Conv_BN_LeakyReLU(256, 512, 3, 1),
            Conv_BN_LeakyReLU(512, 256, 1),
            Conv_BN_LeakyReLU(256, 512, 3, 1),
            nn.MaxPool2d((2 ,2), 2)
        )

        # output : stride = 32, c = 1024
        self.conv_6 = nn.Sequential(
            Conv_BN_LeakyReLU(512, 1024, 3, 1),
            Conv_BN_LeakyReLU(1024, 512, 1),
            Conv_BN_LeakyReLU(512, 1024, 3, 1),
            Conv_BN_LeakyReLU(1024, 512, 1),
            Conv_BN_LeakyReLU(512, 1024, 3, 1)
        )

        self.conv_7 = nn.Conv2d(1024, num_classes, 1)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

    def forward(self, x):
        x = self.conv_1(x)
        # print('conv_1 output: {}'.format(x.shape))
        x = self.conv_2(x)
        # print('conv_2 output: {}'.format(x.shape))
        x = self.conv_3(x)
        # print('conv_3 output: {}'.format(x.shape))
        x = self.conv_4(x)
        # print('conv_4 output: {}'.format(x.shape))
        x = self.conv_5(x)
        # print('conv_5 output: {}'.format(x.shape))
        x = self.conv_6(x)
        # print('conv_6 output: {}'.format(x.shape))

        x = self.conv_7(x)
        # print('conv_7 output: {}'.format(x.shape))
        x = self.avgpool(x)
        # print('avgpool output: {}'.format(x.shape))
        x = torch.squeeze(x, dim=3)
        x = torch.squeeze(x, dim=2)
        print('x:', x.size())
        return x

if __name__ == '__main__':
    # test
    test_net = DarkNet_19(10)
    test_x = Variable(torch.zeros(2, 3, 224, 224))
    # test_x = Variable(torch.zeros(1, 3, 64, 64))
    test_y = test_net(test_x)
    print('output: {}'.format(test_y.shape))

    sep()
    sep('cat and dog')
    sep()

    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    print('device', device)
    device = torch.device(device)

    # 200 cats 200 dogs 8 epoch is 0.6 for acc
    N_LIM = 200  # 1e4
    BATCH_SIZE = 4
    EPOCH = 2
    ALPHA = 1e-4

    data_path = r'../../../../large_data/DL1/_many_files/catdog_data/data/train'
    IMG_H = 416
    IMG_W = 416

    from python_ai.DL.tensorflow.x9_read_data.x_cv2_cat_dog import get_all_files

    x_cat = get_all_files(data_path, 'cat', N_LIM, IMG_W, IMG_H)
    print('x_cat:', x_cat.shape)
    n_cat = len(x_cat)
    x_dog = get_all_files(data_path, 'dog', N_LIM, IMG_W, IMG_H)
    print('x_dog:', x_dog.shape)
    n_dog = len(x_dog)
    x = np.concatenate([x_cat, x_dog], axis=0)
    x = np.transpose(x, [0, 3, 1, 2])
    y_cat = np.full([n_cat], 0, dtype=np.int32)
    print('y_cat:', y_cat.shape)
    y_dog = np.full([n_dog], 1, dtype=np.int32)
    print('y_dog:', y_dog.shape)
    y = np.concatenate([y_cat, y_dog], axis=0)
    print('x:', x.shape)
    print('y:', y.shape)
    x_train, x_val_test, y_train, y_val_test = train_test_split(x, y, train_size=0.8, random_state=1, shuffle=True)
    x_val, x_test, y_val, y_test = train_test_split(x_val_test, y_val_test, train_size=0.5, random_state=1, shuffle=True)
    print('x_train', x_train.shape)
    print('x_val', x_val.shape)
    print('x_test', x_test.shape)
    print('y_train', y_train.shape)
    print('y_val', y_val.shape)
    print('y_test', y_test.shape)

    x_train = torch.Tensor(x_train)
    x_val = torch.Tensor(x_val)
    x_test = torch.Tensor(x_test)
    y_train = torch.Tensor(y_train)
    y_val = torch.Tensor(y_val)
    y_test = torch.Tensor(y_test)
    ds_train = TensorDataset(x_train, y_train)
    ds_val = TensorDataset(x_val, y_val)
    ds_test = TensorDataset(x_test, y_test)
    dl_train = DataLoader(ds_train, BATCH_SIZE, shuffle=True)
    dl_val = DataLoader(ds_val, BATCH_SIZE, shuffle=True)
    dl_test = DataLoader(ds_test, BATCH_SIZE, shuffle=True)

    model = DarkNet_19(2).to(device)
    # print(model)

    criterion = nn.CrossEntropyLoss()
    # optim = torch.optim.Adam(model.parameters(), lr=ALPHA)
    optim = torch.optim.SGD(model.parameters(), lr=ALPHA, momentum=0.9, weight_decay=1e-4)

    def accuracy(y_true, y_pred):
        y_pred = y_pred.argmax(dim=1)
        y_true = y_true
        acc = (y_pred == y_true).float().mean()
        return acc


    def process_data(dl, is_train, label):
        len_dl = len(dl)
        GROUP = int(np.ceil(len_dl / 10))
        avg_loss = 0.
        avg_acc = 0.
        for i, (bx, by) in enumerate(dl):
            bx = bx.float().to(device)
            by = by.long().to(device)
            if is_train:
                model.train(True)
                optim.zero_grad()
                h = model(bx)
                loss = criterion(h, by)
                loss.backward()
                optim.step()
                acc = accuracy(by, h)
                model.train(False)
            else:
                model.train(False)
                h = model(bx)
                loss = criterion(h, by)
                acc = accuracy(by, h)
            lossv = loss.detach().cpu().numpy()
            accv = acc.detach().cpu().numpy()
            avg_loss += lossv
            avg_acc += accv
            if i % GROUP == 0:
                print(f'{label}: epoch#{epoch + 1}: #{i + 1} loss = {lossv}, acc = {accv}')
        if i % GROUP != 0:
            print(f'{label}: epoch#{epoch + 1}: #{i + 1} loss = {lossv}, acc = {accv}')
        avg_loss /= i + 1
        avg_acc /= i + 1
        return avg_loss, avg_acc

    loss_his = []
    acc_his = []
    loss_his_val = []
    acc_his_val = []
    for epoch in range(EPOCH):
        sep(epoch + 1)
        avg_loss, avg_acc = process_data(dl_train, True, 'train')
        avg_loss_val, avg_acc_val = process_data(dl_val, False, 'val')
        loss_his.append(avg_loss)
        loss_his_val.append(avg_loss_val)
        acc_his.append(avg_acc)
        acc_his_val.append(avg_acc_val)
        print(f'epoch#{epoch+1}: loss = {avg_loss} acc = {avg_acc}, loss_val = {avg_loss_val}, acc_val = {avg_acc_val}')

    sep('Test')
    avg_loss_test, avg_acc_test = process_data(dl_test, False, 'test')
    print(f'Test loss = {avg_loss_test}, acc = {avg_acc_test}')

    import matplotlib.pyplot as plt
    plt.figure(figsize=[12, 6])
    spr = 1
    spc = 2
    spn = 0

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(loss_his, label='train loss')
    plt.plot(loss_his_val, label='val loss')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(acc_his, label='train acc')
    plt.plot(acc_his_val, label='val acc')
    plt.legend()

    plt.show()
