# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/25
import os
import random
import torch.nn as nn
import cv2
import torch.nn.functional as F
import numpy as np
import torch

# Unet主要有个镜像填边变大图像
class Unet(nn.Module):
    def __init__(self, in_channel, out_channel):
        super(Unet, self).__init__()
        self.conv_encode1 = self.ContractBlock(in_channel, 64)
        self.pool1 = nn.MaxPool2d(2)
        self.conv_encode2 = self.ContractBlock(64, 128)
        self.pool2 = nn.MaxPool2d(2)
        self.conv_encode3 = self.ContractBlock(128, 256)
        self.pool3 = nn.MaxPool2d(2)
        # self.contractblock4 = self.ContractBlock(256, 512)
        # self.pool4 = nn.MaxPool2d(2)
        # Bottleneck
        self.bottleneck = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=3, in_channels=256, out_channels=512),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(512),
            torch.nn.Conv2d(kernel_size=3, in_channels=512, out_channels=512),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(512),
            torch.nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=3, stride=2, padding=1,
                                     output_padding=1)
        )
        # 解码阶段都是通道变小
        self.conv_decode3 = self.ExpandBlock(512, 256, 128)
        self.conv_decode2 = self.ExpandBlock(256, 128, 64)
        self.final_layer = self.finalBlock(128, 64, out_channel)

    # 提取特征
    def ContractBlock(self, inChannel, outChannel, kernelSize=3):
        return nn.Sequential(
            nn.Conv2d(inChannel, outChannel, kernelSize, bias=False),
            nn.ReLU(),
            torch.nn.BatchNorm2d(outChannel),
            nn.Conv2d(outChannel, outChannel, kernelSize, bias=False),
            nn.ReLU(),
            torch.nn.BatchNorm2d(outChannel),
        )

    # midChannel,因为扩充那里是与池化层那里相加才没变化
    # 上采样
    def ExpandBlock(self, inChannel, midChannel, outChannel, kernelSize=3):
        return nn.Sequential(
            nn.Conv2d(inChannel, midChannel, kernelSize),
            nn.ReLU(),
            nn.BatchNorm2d(midChannel),
            nn.Conv2d(midChannel, midChannel, kernelSize),
            nn.ReLU(),
            nn.BatchNorm2d(midChannel),
            # 因为没有直接上采样的函数,根据公式计算得，
            nn.ConvTranspose2d(midChannel, outChannel, kernel_size=3, stride=2, padding=1, output_padding=1)
        )

    def finalBlock(self, inChannel, midChannel, outChannel, kernelSize=3):
        return torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernelSize, in_channels=inChannel, out_channels=midChannel),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(midChannel),
            torch.nn.Conv2d(kernel_size=kernelSize, in_channels=midChannel, out_channels=midChannel),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(midChannel),
            torch.nn.Conv2d(kernel_size=kernelSize, in_channels=midChannel, out_channels=outChannel, padding=1),
            torch.nn.ReLU(),  # 我是多分类
            torch.nn.BatchNorm2d(outChannel),
        )

    # REW:unet  保证可以concat
    def crop_and_concat(self, upsampled, bypass, crop=False):
        if crop:
            c = (bypass.size()[2] - upsampled.size()[2]) // 2
            bypass = F.pad(bypass, [-c, -c, -c, -c])  # 均匀填充
        return torch.cat([upsampled, bypass], dim=1)

    def forward(self, x):
        # encode
        encode_block1 = self.conv_encode1(x)
        encode_pool1 = self.pool1(encode_block1)
        encode_block2 = self.conv_encode2(encode_pool1)
        encode_pool2 = self.pool2(encode_block2)
        encode_block3 = self.conv_encode3(encode_pool2)
        encode_pool3 = self.pool3(encode_block3)

        bootleneck = self.bottleneck(encode_pool3)
        decode_block3 = self.crop_and_concat(bootleneck, encode_block3, crop=True)
        cat_layer2 = self.conv_decode3(decode_block3)
        decode_block2 = self.crop_and_concat(cat_layer2, encode_block2, crop=True)

        cat_layer1 = self.conv_decode2(decode_block2)
        decode_block1 = self.crop_and_concat(cat_layer1, encode_block1, crop=True)
        final_layer = self.final_layer(decode_block1)
        # TODO: 在forward这里更改shape
        # final_layer =
        return final_layer

def getImageArr(img):
    img = img.astype(np.float32)
    img[:, :, 0] -= 103.939  # 利用imagenet上的均值，去中心化
    img[:, :, 1] -= 116.779
    img[:, :, 2] -= 123.68
    return img


class_colors = [(random.randint(0, 255),
                 random.randint(0, 255),
                 random.randint(0, 255)) for _ in range(5000)]

modelpath = r'F:\Resources\model\pth\unetseg.pt'
unet = Unet(3,11)
unet.load_state_dict(torch.load(modelpath))
nclasses = 11
testData = r'F:\Resources\dataset\SegNet-Tutorial-master\CamVid\test'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
width = 284
height = 284
def predict(img,seg,width,height):
    img = cv2.imread(img, 1)
    seg = cv2.imread(seg, 0)
    assert img.shape[:2] == seg.shape[:2]
    assert img.shape[0] >= height and img.shape[1] >= width
    xx = random.randint(0, img.shape[0] - height)
    yy = random.randint(0, img.shape[1] - width)
    im = img[xx:xx + height, yy:yy +width]
    seg = seg[xx:xx + height, yy:yy + width]
    random.seed(0)
    with torch.no_grad():
        inputs = torch.from_numpy(np.transpose(getImageArr(im),(2,0,1)))
        inputs = torch.unsqueeze(inputs,dim=0).to(device)
        outputs = unet(inputs)

        outwidth = outputs.size(2)
        outheight = outputs.size(3)

        ypre = outputs.permute((0,2,3,1))
        ypre:torch.Tensor = torch.squeeze(ypre,0)
        pridx = ypre.numpy().argmax(axis=2)

        segimg = np.zeros((pridx.shape[0],pridx.shape[1],3))
        for c in range(nclasses):
            # pridx:outheight*outwidth的array
            # REW:[:,:,0]这种模式都是返回得前面的shape[:,:]
            segimg[:,:,0] += ((c == pridx)*(class_colors[c][0])).astype('uint8')
            segimg[:,:,1] += ((c == pridx)*(class_colors[c][1])).astype('uint8')
            segimg[:,:,2] += ((c == pridx)*(class_colors[c][2])).astype('uint8')
        segimg = segimg.astype(np.uint8)
        segimg = cv2.resize(segimg,(height,width))
        cv2.imshow('origin',im)
        cv2.imshow('prediction',segimg)
        cv2.imshow('label',seg)
        cv2.waitKey(6000)
segann = r"F:\Resources\dataset\SegNet-Tutorial-master\CamVid\testannot"
names = [name for name in os.listdir(segann)]
for i,name in enumerate(names):
    img = os.path.join(testData,name)
    seg = os.path.join(segann,name)
    predict(img,seg,width,height)