# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/18
from datetime import datetime

import numpy as np
import torch
import torchvision
import torch.nn as nn
import torchvision.models as ModelZoo
import torch.nn.functional as F
from torch.autograd import Variable


def vgg16():
    _vgg16 = ModelZoo.vgg16(pretrained=True)
def resNet34():
    _resnet50 = ModelZoo.resnet34(pretrained=False,)
    for idx,layer in enumerate(list(_resnet50.children())[:-4]):
        print(idx+1,'->', layer,'\n\n')
    return _resnet50
resnet34 = resNet34()


def bilinear_kernel(numClass, numClass1, param):
    pass


class FCN8(nn.Module):
    def __init__(self,numClass):
        super(FCN8,self).__init__()
        # torch另一种构建模型方法
        self.stage1 = nn.Sequential(*list(resnet34.children())[:-4])
        self.stage2 = list(resnet34.children())[-4]
        self.stage3 = list(resnet34.children())[-3]

        #全卷积
        self.scores1 = nn.Conv2d(512,numClass,1)
        self.scores2 = nn.Conv2d(256, numClass, 1)
        self.scores3 = nn.Conv2d(128, numClass, 1)

        self.upsample_8x = nn.ConvTranspose2d(numClass,numClass,16,8,4,bias=False)
        self.upsample_8x.weight.data = bilinear_kernel(numClass, numClass, 16)  # 使用双线性 kernel

        self.upsample_4x = nn.ConvTranspose2d(numClass, numClass, 4, 2, 1, bias=False)
        self.upsample_4x.weight.data = bilinear_kernel(numClass, numClass, 4)  # 使用双线性 kernel

        self.upsample_2x = nn.ConvTranspose2d(numClass, numClass, 4, 2, 1, bias=False)
        self.upsample_2x.weight.data = bilinear_kernel(numClass, numClass, 4)  # 使用双线性 kernel

    def forward(self, x):
        x = self.stage1(x)
        s1 = x # 1/8

        x = self.stage2(x)
        s2 = x # 1/16

        x = self.stage3(x)
        s3 = x  # 1/32

        s3 = self.scores1(s3)
        s3 = self.upsample_2x(s3)
        s2 = self.scores2(s2)  # 要先卷积一下
        s2 = s2+s3

        s1 = self.scores3(s1)
        s2 = self.upsample_4x(s2)
        s1 = s1+s2

        s = self.upsample_8x(s1)
        return s

# TODO:...
# 语义分割常用的指标，比如 overal accuracy，mean IU 等、
def _fast_hist(label_true,label_pred,n_class):
    mask = (label_true>=0) & ( label_true < n_class)
    hist = np.bincount( # 统计每个值出现的次数
        n_class*label_true[mask].astype(int) +
        label_pred[mask],minlength=n_class**2).reshape(n_class,n_class)
    return hist

def label_accuracy_score(label_trues, label_preds, n_class):
    """Returns accuracy score evaluation result.
         - overall accuracy
         - mean accuracy
         - mean IU
         - fwavacc
       """
    hist = np.zeros((n_class,n_class))
    for lt,lp in zip(label_trues,label_preds):
        #
        hist+= _fast_hist(lt.flatten(),lp.flatten(),n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    return acc, acc_cls, mean_iu, fwavacc
net = FCN8()
from torch import optim
criterion = nn.NLLLoss2d()
basic_optim = optim.SGD(net.parameters(), lr=1e-2, weight_decay=1e-4)
optimizer = optim.lr_scheduler.StepLR(basic_optim,step_size=50)

for e in range(80):

    train_loss = 0
    train_acc = 0
    train_acc_cls = 0
    train_mean_iu = 0
    train_fwavacc = 0

    prev_time = datetime.now()
    net = net.train()
    for data in train_data:
        im = Variable(data[0].cuda())
        label = Variable(data[1].cuda())
        # forward
        out = net(im)
        # REW:
        out = F.log_softmax(out, dim=1)  # (b, n, h, w)
        loss = criterion(out, label)
        # backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.data[0]

        label_pred = out.max(dim=1)[1].data.cpu().numpy()
        label_true = label.data.cpu().numpy()
        for lbt, lbp in zip(label_true, label_pred):
            acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(lbt, lbp, num_classes)
            train_acc += acc
            train_acc_cls += acc_cls
            train_mean_iu += mean_iu
            train_fwavacc += fwavacc

    net = net.eval()  # 切换eval模式
    eval_loss = 0
    eval_acc = 0
    eval_acc_cls = 0
    eval_mean_iu = 0
    eval_fwavacc = 0
    for data in valid_data:
        im = Variable(data[0].cuda(), volatile=True)
        label = Variable(data[1].cuda(), volatile=True)
        # forward
        out = net(im)
        out = F.log_softmax(out, dim=1)
        loss = criterion(out, label)
        eval_loss += loss.data[0]

        label_pred = out.max(dim=1)[1].data.cpu().numpy()
        label_true = label.data.cpu().numpy()
        for lbt, lbp in zip(label_true, label_pred):
            acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(lbt, lbp, num_classes)
            eval_acc += acc
            eval_acc_cls += acc_cls
            eval_mean_iu += mean_iu
            eval_fwavacc += fwavacc

    cur_time = datetime.now()
    h, remainder = divmod((cur_time - prev_time).seconds, 3600)
    m, s = divmod(remainder, 60)
    epoch_str = ('Epoch: {}, Train Loss: {:.5f}, Train Acc: {:.5f}, Train Mean IU: {:.5f}, \
Valid Loss: {:.5f}, Valid Acc: {:.5f}, Valid Mean IU: {:.5f} '.format(
        e, train_loss / len(train_data), train_acc / len(voc_train), train_mean_iu / len(voc_train),
           eval_loss / len(valid_data), eval_acc / len(voc_test), eval_mean_iu / len(voc_test)))
    time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
    print(epoch_str + time_str + ' lr: {}'.format(optimizer.learning_rate))






