import torch
import torchvision
import torch.utils.data
from tqdm import tqdm
from utils.segmentation import bilinear_kernel, acc_fcn
import numpy
from utils.dataset.voc2012 import VOC2012

if __name__ == '__main__':
    train_dataset = VOC2012(root='out/VOCdevkit/VOC2012', filename='ImageSets/Segmentation/train.txt',
                            image_size=(256, 256),
                            transform=None,
                            target_transform=None)
    resnet_pretrained = torchvision.models.resnet18(pretrained=True)
    model = torch.nn.Sequential(*list(resnet_pretrained.children())[:-2])
    num_classes = 21  # include background class
    model.add_module('final_conv', torch.nn.Conv2d(512, num_classes, kernel_size=(1, 1)))
    model.add_module('transpose_conv',
                     torch.nn.ConvTranspose2d(num_classes, num_classes, kernel_size=(64, 64), padding=(16, 16),
                                              stride=(32, 32)))

    W = bilinear_kernel(num_classes, num_classes, 64)
    model.transpose_conv.weight.data.copy_(W)
    torch.nn.init.xavier_normal_(model.final_conv.weight)
    loss = torch.nn.CrossEntropyLoss(reduction='none')
    device = torch.device('cuda:0')
    optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4)
    model.to(device)
    epochs = 50
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
    train_iter = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True)
    model.train()
    for e in range(epochs):
        sum_loss = 0.
        acc = 0.
        for x_cpu, y_cpu in tqdm(train_iter, colour='green'):
            optimizer.zero_grad()
            x, y = x_cpu.to(device), y_cpu.to(device)
            yp = model(x)
            train_loss = loss(yp, y).mean(1).mean(1).sum()
            train_loss.backward()
            sum_loss += train_loss.item()
            acc += acc_fcn(yp, y)
            optimizer.step()
        acc /= train_iter.__len__()
        sum_loss /= train_dataset.__len__()
        arr = numpy.array([e, sum_loss, acc, scheduler.get_last_lr()[0]]).reshape((1, -1))
        scheduler.step()
    model.eval()
    val_dataset = VOC2012(root='out/VOCdevkit/VOC2012', filename='ImageSets/Segmentation/trainval.txt',
                          image_size=(256, 256),
                          transform=None,
                          target_transform=None)
    val_iter = torch.utils.data.DataLoader(val_dataset, batch_size=8, shuffle=False)
    acc = 0.
    for x_cpu, y_cpu in tqdm(val_iter, colour='green'):
        x, y = x_cpu.to(device), y_cpu.to(device)
        yp = model(x)
        acc += acc_fcn(yp, y)
    acc /= val_iter.__len__()
    print(acc)
