import torch as pt
import torchvision as ptv
from python_ai.common.xcommon import *


def ConvBnRelu(in_ch, filters, ksize, stride=1, padding='same'):
    if padding == 'same':
        padding_size = (ksize - 1) // 2
    else:
        padding_size = 0
    model = pt.nn.Sequential(
        pt.nn.Conv2d(
            in_ch,
            filters,
            ksize,
            stride=stride,
            padding=padding_size,
        ),
        pt.nn.BatchNorm2d(filters),
        pt.nn.ReLU()
    )
    return model


class InceptionBlock(pt.nn.Module):

    def __init__(self, in_ch, branch_out_ch, shrink=False, **kwargs):
        super().__init__(**kwargs)

        if shrink:
            stride = 2
        else:
            stride = 1
        self.branch01 = ConvBnRelu(in_ch, branch_out_ch, 1, stride)
        self.branch02_1 = ConvBnRelu(in_ch, branch_out_ch, 1, stride)
        self.branch02_2 = ConvBnRelu(branch_out_ch, branch_out_ch, 3, 1)

        self.branch03_1 = ConvBnRelu(in_ch, branch_out_ch, 1, stride)
        self.branch03_2 = ConvBnRelu(branch_out_ch, branch_out_ch, 3, 1)
        self.branch03_3 = ConvBnRelu(branch_out_ch, branch_out_ch, 3, 1)

        self.branch04_1 = pt.nn.AvgPool2d(3, 1, padding=1)
        self.branch04_2 = ConvBnRelu(in_ch, branch_out_ch, 1, stride)

    def forward(self, x):
        x1 = self.branch01(x)
        
        x2 = self.branch02_1(x)
        x2 = self.branch02_2(x2)

        x3 = self.branch03_1(x)
        x3 = self.branch03_2(x3)
        x3 = self.branch03_3(x3)

        x4 = self.branch04_1(x)
        x4 = self.branch04_2(x4)

        res = pt.cat((x1, x2, x3, x4), dim=1)
        return res


class InceptionNet(pt.nn.Module):

    def __init__(self, size, init_in_ch, init_out_ch, n_blocks, device='cpu', **kwargs):
        super().__init__(**kwargs)

        self.device = device
        self.conv = ConvBnRelu(init_in_ch, init_out_ch, 3, 1)
        inception_layers = []
        in_ch = init_out_ch
        br_ch = in_ch
        for block_id in range(n_blocks):
            for layer_id in range(2):
                if layer_id == 0:
                    br_ch *= 2
                    out_ch = br_ch * 4
                    shrink = True
                    size //= 2
                else:
                    shrink = False
                layer = InceptionBlock(in_ch, br_ch, shrink=shrink)
                inception_layers.append(layer)
                in_ch = out_ch
        self.inception_layers = inception_layers
        self.g_avg_pool = pt.nn.AvgPool2d(size, 1, 0)
        self.fc = pt.nn.Linear(out_ch, 10)

    def forward(self, x):
        x = self.conv(x)
        for i, layer in enumerate(self.inception_layers):
            layer.to(self.device)
            # print(i)
            # print(x.shape)
            x = layer(x)
            # print(x.shape)
        x = self.g_avg_pool(x)
        x = pt.squeeze(x, dim=3)
        x = pt.squeeze(x, dim=2)
        x = self.fc(x)
        return x


if '__main__' == __name__:
    device = pt.device("cuda:0" if pt.cuda.is_available() else "cpu")

    pt.manual_seed(777)
    BATCH_SIZE = 64
    N_EPOCHS = 1
    ALPHA = 0.001

    model = InceptionNet(32, 3, 16, 2, device=device).to(device)
    print(model)

    criterion = pt.nn.CrossEntropyLoss().to(device)
    optim = pt.optim.Adam(params=model.parameters(), lr=ALPHA)

    data_dir = '../../../../../large_data/DL2/pt/cifar10'

    train_ds = ptv.datasets.CIFAR10(root=data_dir, train=True,
                                    transform=ptv.transforms.ToTensor(),
                                    download=False)
    M_TRAIN, PIC_H, PIC_W, PIC_CH = train_ds.data.shape
    classes = train_ds.classes
    test_ds = ptv.datasets.CIFAR10(root=data_dir, train=False,
                                   transform=ptv.transforms.ToTensor(),
                                   download=False)

    dl_train = pt.utils.data.DataLoader(dataset=train_ds, batch_size=BATCH_SIZE, shuffle=True)
    dl_test = pt.utils.data.DataLoader(dataset=test_ds, batch_size=BATCH_SIZE, shuffle=True)
    dl_val = dl_test


    def acc(h, y):
        return h.argmax(1).eq(y.long()).double().mean()


    N_BATCH = int(np.floor(M_TRAIN / BATCH_SIZE))
    GROUP = int(np.ceil(N_BATCH / 10))
    for epoch in range(N_EPOCHS):
        i = -1
        for bx, by in dl_train:
            i += 1
            bx = bx.to(device)
            by = by.long().to(device)
            model.train(True)
            optim.zero_grad()
            h = model(bx)
            cost = criterion(h, by)
            cost.backward()
            optim.step()
            model.train(False)
            cost = cost.cpu().data.numpy()
            accv = acc(h, by).cpu().data.numpy()
            if i % GROUP == 0:
                print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
        if i % GROUP != 0:
            print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
        print('Validating...')
        i = -1
        avg_cost = 0.
        avg_acc = 0.
        for bx, by in dl_val:
            i += 1
            bx = bx.to(device)
            by = by.to(device)
            h = model(bx)
            cost = criterion(h, by).cpu().data.numpy()
            accv = acc(h, by).cpu().data.numpy()
            avg_cost += cost
            avg_acc += accv
        avg_cost /= i + 1
        avg_acc /= i + 1
        print(f'Val: cost = {avg_cost}, acc = {avg_acc}')

    print('Testing')
    i = -1
    avg_cost = 0.
    avg_acc = 0.
    for bx, by in dl_test:
        i += 1
        bx = bx.to(device)
        by = by.to(device)
        h = model(bx)
        cost = criterion(h, by).cpu().data.numpy()
        accv = acc(h, by).cpu().data.numpy()
        avg_cost += cost
        avg_acc += accv
    avg_cost /= i + 1
    avg_acc /= i + 1
    print(f'Val: cost = {avg_cost}, acc = {avg_acc}')
    print('Over')
