
# coding: utf-8

# # Model

# In[ ]:


# %config IPCompleter.greedy=True
# print('starting')


#  ## base_net

# In[ ]:


import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable

class Bottleneck(nn.Module):
    def __init__(self,inplanes,planes,stride=1):
        super(Bottleneck, self).__init__()      
        self.conv = nn.Sequential(
            nn.Conv2d(inplanes, planes, 1,bias=False),
            nn.BatchNorm2d(planes),
            nn.LeakyReLU(),
            
            nn.Conv2d(planes, planes, 3, stride=stride,padding=1, bias=False),
            nn.BatchNorm2d(planes),
            nn.LeakyReLU(),
            
            nn.Conv2d(planes, planes*4, 1,bias=False),
            nn.BatchNorm2d(planes*4))
        
        self.relu = nn.LeakyReLU()
        
        self.downsample = nn.Sequential(
            nn.Conv2d(inplanes, planes *4, 1, stride=stride),
            nn.BatchNorm2d(planes * 4))
        self.ds = (inplanes != planes * 4)   

    def forward(self, x):
        out = self.conv(x)
        if self.ds:
            x = self.downsample(x)
        out += x
        return self.relu(out)
        
class ResNet( nn.Module):
    def __init__(self,layers=[3,4,23,3]):
        super(ResNet, self).__init__()
        planes = [64,128,256,512]
        inplanes = 64
        
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, inplanes, 7, stride=2, padding=3,bias=False),
            nn.BatchNorm2d(inplanes),
            nn.LeakyReLU(),
            nn.MaxPool2d(3, stride=2, padding=1))
        
        for i in range(4):
            block = nn.Sequential(Bottleneck(inplanes, planes[i],stride=(i+7)//4))
            inplanes = planes[i]*4
            for j in range(1, layers[i]):
                block.add_module(str(j),Bottleneck(inplanes, planes[i]))
            self.add_module('conv'+str(i+2), block)
    
    def forward(self, x):
        out1 = self.conv1(x)
        out2 = self.conv2(out1)
        out3 = self.conv3(out2)
        out4 = self.conv4(out3)
        out5 = self.conv5(out4)
        global_pool = F.adaptive_avg_pool2d(out5,(1,1))
        return out2, out3, out4, out5, global_pool
        
# net = ResNet([1,2,1,1])  
# # print(net)
# x=  torch.ones((1,3,800,800))
# y = net(x)
# for i in y:
#     print(i.size())         


# ## smooth_net

# In[ ]:


class RRB(nn.Module):
    def __init__(self,inplanes,planes,interplanes=512):
        super(RRB,self).__init__()
        self.conv1 = nn.Conv2d(inplanes,planes,1)
        self.conv2 = nn.Sequential(
            nn.Conv2d(planes,interplanes,3,padding=1),
            nn.BatchNorm2d(planes),
            nn.LeakyReLU(),
            nn.Conv2d(interplanes,planes,3,padding=1))
        self.relu = nn.LeakyReLU()
        
    def forward(self, x):
        out = self.conv1(x)
        out1 = self.conv2(out)
        return self.relu(out+out1)

class CAB(nn.Module):
    def __init__(self,inplanes,interplanes=512):
        super(CAB,self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(inplanes*2,interplanes,1),
            nn.LeakyReLU(),
            nn.Conv2d(interplanes,inplanes,1))
    
    def forward(self, x1, x2):
        if  x1.size()!=x2.size():
            print(x1.size(),x2.size())
        x = torch.cat((x1, x2), 1)
        out = F.avg_pool2d(x, x.size()[2:4], stride = x.size()[2:4])
        out = self.conv1(out)
        out = torch.sigmoid(out)
        return out*x1+x2
    
    
def side_branch(class_num, factor, inplanes = 512):
    branch = nn.Sequential(
        nn.Conv2d(inplanes,class_num,1),  #hout = (hin - kernel + 2*padding)/stride +1 :(32-1+0)/1+1=32  hout = (hin-1)*stride-2*padding+kernel+outputpadding
        nn.ConvTranspose2d(class_num,class_num,factor, stride = factor))  #(512-1)*2-2*0+2
    return branch
    
class SmoothBlock(nn.Module):
    def __init__(self,plane, factor = None, inplanes = 512, class_num = 0):
        super(SmoothBlock,self).__init__()
        self.factor = factor
        self.rrb = RRB(plane,512)
        self.cab = CAB(512)
        self.rrbb = RRB(512,512)
        self.transpose = nn.ConvTranspose2d(512, 512, 1, stride = 2, output_padding = 1)
        if self.factor:
            self.branch = side_branch(class_num, factor, inplanes)
            
    def forward(self, x1, x2):
        b = None
        input1 = self.rrb(x1)
        input2 = self.transpose(x2)
        if self.factor:
            b = self.branch(input2)
        cab_out = self.cab(input1, input2)
        rrb_out = self.rrbb(cab_out)
        return b, rrb_out
    
    def patch(self, trans):
        self.transpose = trans
    
class SmoothNet(nn.Module):
    def __init__(self,class_num):
        super(SmoothNet,self).__init__()
        planes=[2048,1024,512,256]
        factor = [16,8,4,2]
        self.class_num = class_num
        self.block4 = SmoothBlock(planes[0])
        self.block3 = SmoothBlock(planes[1], factor[0], inplanes = 512, class_num = class_num)
        self.block2 = SmoothBlock(planes[2], factor[1], inplanes = 512, class_num = class_num)
        self.block1 = SmoothBlock(planes[3], factor[2], inplanes = 512, class_num = class_num)
        self.transpose = nn.ConvTranspose2d(512, 512, 1, stride = 2, output_padding=1)
        self.branch = side_branch(class_num, factor[3], 512)
        self.conv = nn.Conv2d(class_num*4, class_num, 1)
        self.patched = False
        
    def forward(self, x):
        if not self.patched:
            self.patch(tuple(x[-2].size()[2:4]), x[-2].device)       
        _ , out = self.block4(x[-2], x[-1])
        b4, out = self.block3(x[-3], out) 
        b3, out = self.block2(x[-4], out) 
        b2, out = self.block1(x[-5], out) 
        out = self.transpose(out)
        b1 = self.branch(out)
        b = torch.cat((b1,b2,b3,b4), 1)
        fuse = self.conv(b)
        return b1,b2,b3,b4,fuse
    
    def patch(self, pic_size, device):
        trans = nn.ConvTranspose2d(2048, 512, pic_size, stride = 1)
        if device.type == 'cuda':
            trans = trans.cuda(device.index)
        self.block4.patch( trans)
        self.patched = True
        


# ## boder_net

# In[ ]:


class BorderBlock(nn.Module):
    def __init__(self,planes, stride = 2):
        super(BorderBlock,self).__init__()
        self.rrb1 = RRB(planes, 512)
        self.transpose = nn.ConvTranspose2d(512, 512, 1, stride = stride, output_padding=stride -1)#(32-1)*4-2*0+1+3
        self.rrb2 = RRB(512, 512)
            
    def forward(self, x1, x2):
        out = self.rrb1(x1)
        out = self.transpose(out)
        out = out + x2
        out = self.rrb2(out)
        return out

class BorderNet(nn.Module):
    def __init__(self,class_num):
        super(BorderNet,self).__init__()
        planes=[256,512,1024,2048]
        self.rrb = RRB(planes[0],512)
        self.block1 = BorderBlock(planes[1], 2)
        self.block2 = BorderBlock(planes[2], 4)
        self.block3 = BorderBlock(planes[3], 8)
        self.transpose = nn.ConvTranspose2d(512, 512, 1, stride = 2, output_padding= 1)
        self.branch = side_branch(class_num, 2, 512)
        
    def forward(self,x):
        out = self.rrb(x[0])
        out = self.block1(x[1], out)
        out = self.block2(x[2], out)
        out = self.block3(x[3], out)
        out = self.transpose(out)
        return self.branch(out)


# ## dfn_net

# In[ ]:


class DFN(nn.Module):
    def __init__(self, class_num):
        super(DFN, self).__init__()
        self.base = ResNet()
        self.smooth = SmoothNet(class_num)
        self.border = BorderNet(class_num)
        
    def forward(self, x):
        out = self.base(x)
        b1, b2, b3, b4, fuse = self.smooth(out)
        r1 = self.border(out)
        return b1, b2, b3, b4, fuse, r1

class Lossor(object):
    def __init__(self):
        self.sum_loss = 0.0
        self.num = 0
        
    def MixLoss(self, out, label, focal_weight, add = True):
        b1, b2, b3, b4, fuse, r1 = out
        label = label.long()
        s1 = F.cross_entropy(b1, label)
        s2 = F.cross_entropy(b2, label)
        s3 = F.cross_entropy(b3, label)
        s4 = F.cross_entropy(b4, label)
        sf = F.cross_entropy(fuse, label)
        sl = s1 + s2 + s3 + s4 +sf
        fl = self.focal_loss(r1, label)
        total_loss = sl + fl * focal_weight 
        if add:
            self.sum_loss += total_loss.item()
            self.num +=1
        return  total_loss
    
    def mean_loss(self, clear = True):
        loss = self.sum_loss/self.num
        if clear:
            self.sum_loss = 0.0
            self.num_loss = 0
        return loss

    def focal_loss(self, pred, label, alpha=0.25, gamma=2.0):
        label = self.flat_dim(label, pred.size()[1]).float()
        pk = torch.sum(pred*label, 1)
        loss = - alpha * torch.mean(torch.pow(1.0-pk, gamma) * torch.log(torch.clamp(pk, 1e-12, 1.0)))
        return loss
    
    def flat_dim(self, label, num_class):
        out = []
        for i in range( num_class):
            out.append(label == i)
        out = torch.stack(out, dim = 1)
        return out 
    

class Evaluator(object):
    def __init__(self, num_class):
        self.num_class = num_class
        self.matrix = torch.zeros((self.num_class,self.num_class))

    def add_batch(self, gt_image, pre_image):
        assert gt_image.shape == pre_image.shape
        self.matrix += self.generate_matrix(gt_image, pre_image).cpu()
        
        
    def mIoU(self, clear = False):
        iou = torch.diag(self.matrix)/(torch.sum(self.matrix,0) + torch.sum(self.matrix,1) - torch.diag(self.matrix))
#         print(iou)
#         iou = torch.sum(torch.diag(self.matrix))/torch.sum(self.matrix)
        miou = torch.mean(iou[torch.isfinite(iou)])
        if clear:
            self.matrix = torch.zeros((self.num_class,self.num_class))
        return miou.item()

    def generate_matrix(self, gt_image, pre_image):
        gt_image = gt_image.float()
        pre_image = pre_image.float()
        mask = (gt_image >= 0) & (gt_image < self.num_class)
        label = self.num_class * gt_image[mask]+ pre_image[mask]
        count = torch.bincount(label.int(), minlength=self.num_class**2)
        confusion_matrix = count[:self.num_class**2].reshape(self.num_class, self.num_class)# int can't div 0
        return confusion_matrix.float() 
    
        


# # Train

# In[ ]:


import torch
import torch.nn as nn
import torchvision
import math
import glob
import os
import datetime
import numpy as np
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from IPython.display import display 


class CustomDataset(Dataset):
    def __init__(self, image_dir, label_dir, img_format, lab_format, task = 'train' ): 
        images = glob.glob(r'./data/{}/{}/*/*{}'.format(image_dir, task, img_format))
        labels = glob.glob(r'./data/{}/{}/*/*{}'.format(label_dir, task, lab_format))
        images.sort()
        labels.sort()
        self.data = list(zip(images, labels))
        self.transforms = torchvision.transforms.Resize((256, 512),Image.NEAREST)
            
    def __getitem__(self, index):
        image_path, label_path = self.data[index]
        image = Image.open(image_path)
        label = Image.open(label_path)
        image = np.array(self.transforms(image))
        label = np.array(self.transforms(label))
        image = image.transpose((2,0,1))
        label = label + 1
        label[label == 256] = 0
        return image, label
        
    def __len__(self):
        return len(self.data)
    
dataset = CustomDataset('leftImg8bit', 'gtFine', '_leftImg8bit.png', '_gtFine_labelTrainIds.png')


# In[ ]:


batch_size = 2
num_class = 20
num_image = len(dataset)
num_epoch = 30
focal_weight = 0.1
learning_rate = 4e-3
weight_decay = 0.0001
momentum=0.9
power = 0.9
num_show = 2
ckpt_path = './ckpt'
last_epoch = 19
net_weight_path = 'model_epoch_19.pth'
if not os.path.isdir(ckpt_path):
    os.makedirs(ckpt_path)
trainloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=4)
net = DFN(num_class)
# print(net)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
    net = nn.DataParallel(net, device_ids=[0])
if net_weight_path:
    net.load_state_dict(torch.load(ckpt_path+'/'+net_weight_path))
    net.train()
net.to(device)


# In[ ]:


history = {'train': []}
for epoch in range(last_epoch+1, num_epoch):  # loop over the dataset multiple times
    lossor = Lossor()
    evaluator = Evaluator(num_class)
#     evaluator.cuda(device)
    learning_rate *= math.pow(1 - epoch/num_epoch, power) # learning_rate *= math.pow(1 - (i+epoch*epoch_iter)/max_iter, power)
    optimizer = torch.optim.SGD(net.parameters(), lr = learning_rate, momentum= momentum, weight_decay = weight_decay)
    for i, data in enumerate(trainloader, 0):
        inputs, labels = data
        inputs = inputs.float() 
#         labels = labels.float()
        inputs = inputs.to(device)
        labels = labels.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = lossor.MixLoss(outputs, labels, focal_weight)
        loss.backward()
        optimizer.step()

        evaluator.add_batch(labels, torch.argmax(outputs[-2], 1)) 
        if i % num_show == num_show-1 or i == num_image - 1: 
            loss_tmp = lossor.mean_loss()
            mIoU_tmp = evaluator.mIoU()
            print(datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), 
                  ' epoch: %d/iter: %-5d    loss: %-10.3f    mIoU: %-10.3f'%(epoch + 1, i + 1, loss_tmp, mIoU_tmp))
            history['train'].append({'epoch':epoch+1, 'iter':i+1, 'loss':loss_tmp, 'mIoU':mIoU_tmp})
        
    torch.save(net.state_dict(), '{}/model_epoch_{}.pth'.format(ckpt_path, epoch))
    torch.save(history, '{}/history_epoch_{}.pth'.format(ckpt_path, epoch))