# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/23
import copy
import random
import torchsummary
import time
from datetime import datetime
import tqdm
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torch
import torchvision
import torch.nn as nn
import cv2
import glob
import torch.nn.functional as F
from torch import optim
import os


# REW:Unet主要有个镜像填边变大图像
class Unet(nn.Module):
    def __init__(self, in_channel, out_channel):
        super(Unet, self).__init__()
        self.conv_encode1 = self.ContractBlock(in_channel, 64)
        self.pool1 = nn.MaxPool2d(2)
        self.conv_encode2 = self.ContractBlock(64, 128)
        self.pool2 = nn.MaxPool2d(2)
        self.conv_encode3 = self.ContractBlock(128, 256)
        self.pool3 = nn.MaxPool2d(2)
        # 确实少了一层
        # self.conv_encode4 = self.ContractBlock(256, 512)
        # self.pool4 = nn.MaxPool2d(2)
        # Bottleneck
        self.bottleneck = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=3, in_channels=256, out_channels=512),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(512),
            torch.nn.Conv2d(kernel_size=3, in_channels=512, out_channels=512),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(512),
            torch.nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=3, stride=2, padding=1,
                                     output_padding=1)
        )
        # 解码阶段都是通道变小
        self.conv_decode3 = self.ExpandBlock(512, 256, 128)
        self.conv_decode2 = self.ExpandBlock(256, 128, 64)
        self.final_layer = self.finalBlock(128, 64, out_channel)

    # 提取特征
    def ContractBlock(self, inChannel, outChannel, kernelSize=3):
        return nn.Sequential(
            nn.Conv2d(inChannel, outChannel, kernelSize, bias=False),
            nn.ReLU(),
            torch.nn.BatchNorm2d(outChannel),
            nn.Conv2d(outChannel, outChannel, kernelSize, bias=False),
            nn.ReLU(),
            torch.nn.BatchNorm2d(outChannel),
        )

    # midChannel,因为扩充那里是与池化层那里相加才没变化
    # 上采样
    def ExpandBlock(self, inChannel, midChannel, outChannel, kernelSize=3):
        return nn.Sequential(
            nn.Conv2d(inChannel, midChannel, kernelSize),
            nn.ReLU(),
            nn.BatchNorm2d(midChannel),
            nn.Conv2d(midChannel, midChannel, kernelSize),
            nn.ReLU(),
            nn.BatchNorm2d(midChannel),
            # 因为没有直接上采样的函数,根据公式计算得，
            nn.ConvTranspose2d(midChannel, outChannel, kernel_size=3, stride=2, padding=1, output_padding=1)
        )

    def finalBlock(self, inChannel, midChannel, outChannel, kernelSize=3):
        return torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernelSize, in_channels=inChannel, out_channels=midChannel),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(midChannel),
            torch.nn.Conv2d(kernel_size=kernelSize, in_channels=midChannel, out_channels=midChannel),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(midChannel),
            torch.nn.Conv2d(kernel_size=kernelSize, in_channels=midChannel, out_channels=outChannel, padding=1),
            torch.nn.ReLU(),  # 我是多分类
            torch.nn.BatchNorm2d(outChannel),
        )

    # REW:unet  保证可以concat
    def crop_and_concat(self, upsampled, bypass, crop=False):
        if crop:
            print(bypass.size()[2])
            print(upsampled.size()[2])
            c = (bypass.size()[2] - upsampled.size()[2]) // 2
            bypass = F.pad(bypass, [-c, -c, -c, -c])  # 均匀填充
        return torch.cat([upsampled, bypass], dim=1)

    def forward(self, x):
        # encode
        encode_block1 = self.conv_encode1(x)
        encode_pool1 = self.pool1(encode_block1)
        encode_block2 = self.conv_encode2(encode_pool1)
        encode_pool2 = self.pool2(encode_block2)
        encode_block3 = self.conv_encode3(encode_pool2)
        encode_pool3 = self.pool3(encode_block3)

        bootleneck = self.bottleneck(encode_pool3)
        decode_block3 = self.crop_and_concat(bootleneck, encode_block3, crop=True)
        cat_layer2 = self.conv_decode3(decode_block3)
        decode_block2 = self.crop_and_concat(cat_layer2, encode_block2, crop=True)

        cat_layer1 = self.conv_decode2(decode_block2)
        decode_block1 = self.crop_and_concat(cat_layer1, encode_block1, crop=True)
        final_layer = self.final_layer(decode_block1)
        # TODO: 在forward这里更改shape
        # final_layer =
        return final_layer


class CamvidSet(Dataset):
    def __init__(self, imgpath, segpath, width, height, nclasses=11, transform=None):
        self.imgpath = imgpath
        self.segpath = segpath
        self.transform = transform
        self.width = width
        self.height = height
        self.nclasses = nclasses

    @property
    def imgs(self):
        # return glob.glob("*.jpg")
        imgs = []
        for i in os.listdir(self.imgpath):
            imgs.append(i)
        return imgs

    def getImageArr(self, img):
        img = img.astype(np.float32)
        img[:, :, 0] -= 103.939  # 利用imagenet上的均值，去中心化
        img[:, :, 1] -= 116.779
        img[:, :, 2] -= 123.68
        return img

    def getSegmentationArr(self, seg: np.ndarray, nclasses):
        segLabels = np.zeros((self.height, self.width, nclasses))
        # tensorflow/keras需要这么做
        for c in range(nclasses):
            pass
            # pytorch的坑 交叉熵不用onehot 所以改下
            segLabels[:, :, c] = (seg == c).astype(int)  # FAQ:REW：我先以通道的方式来理解，这个通道全部改为1
        # reshape = False
        # if reshape:
        #     segLabels = segLabels.reshape((-1, nclasses))  #
        return segLabels

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self, item):
        # FIXME:可以把这一步放到transform里去做
        imgpath = os.path.join(self.imgpath, self.imgs[item])
        segpath = os.path.join(self.segpath, self.imgs[item])
        img = cv2.imread(imgpath, 1)
        seg = cv2.imread(segpath, 0)
        assert img.shape[:2] == seg.shape[:2]

        assert img.shape[0] >= self.height and img.shape[1] >= self.width
        xx = random.randint(0, img.shape[0] - self.height)
        yy = random.randint(0, img.shape[1] - self.width)

        im = img[xx:xx + self.height, yy:yy + self.width]
        seg = seg[xx:xx + self.height, yy:yy + self.width]

        im = np.transpose(self.getImageArr(im),(2,0,1))  # FIXME:这因为用的是channel_first
        # 训练数据 [height,width,.] 像素级
        return torch.from_numpy(im), torch.from_numpy(seg)

class Reshape():
    def __init__(self):
        pass

    def __call__(self, sample):
        # sample 来自dataset 返回的
        pass


input_height = 284
input_width = 284
nclasses = 11
epoch = 50
modelPath = "/data/soft/redldw/CamVid/unetseg.pt"

unet = Unet(3, 11)
print(torchsummary.summary(unet,(3,284,284),8,device='cpu'))
imgpath = "/data/soft/redldw/CamVid/train"
segpath = "/data/soft/redldw/CamVid/trainannot"
valpath = "/data/soft/redldw/CamVid/val"
valsegpath='/data/soft/redldw/CamVid/valannot'
optimzer = optim.SGD(unet.parameters(), lr=0.001)
criterion = torch.nn.CrossEntropyLoss()

traincamdataset = CamvidSet(imgpath, segpath, input_height, input_width, nclasses)
validcamdataset = CamvidSet(valpath, valsegpath, input_height, input_width, nclasses)
traindataloader = DataLoader(traincamdataset, batch_size=8, num_workers=2, shuffle=True)
valdataloader = DataLoader(validcamdataset, batch_size=4, num_workers=1, shuffle=True)

# device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
unet = unet.to(device)

best_acc = 0.0  # done:见Xsegment项目:图像分割正确率不好算 待定
best_state = copy.deepcopy(unet.state_dict())
since =time.time()
for i in tqdm.tqdm(range(epoch)):
    print('run epoch {}'.format(i))
    running_loss = 0
    running_corrects = 0
    for data,label in traindataloader:
        inputs = data.to(device)
        label = label.to(device)

        optimzer.zero_grad()  # 每次数据前先清空梯度

        outputs:torch.Tensor = unet(inputs)  # unet输出需要变形 因为是针对像素
        # REW:因为torch读入图片有把维度位置变换，所以需要手动变换回来
        outputs = outputs.permute((0,2,3,1))
        b,width,height = outputs.size()[slice(0,3)]
        outputs = outputs.reshape((-1,nclasses))
        label = label.resize_(b*width*height).long()
        # print(label[:2],label.shape,outputs.dtype,outputs.shape)

        loss = criterion(outputs,label)

        loss.backward()
        optimzer.step()

        running_loss += loss.item() * data.size(0)
        # running_corrects +=
    epochloss = running_loss / len(traincamdataset)
    if epochloss < 0.1:
        best_state = copy.deepcopy(unet.state_dict())
        break
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
unet.load_state_dict(best_state)
# 保存到 CPU, 加载到 GPU
torch.save(unet.state_dict(), modelPath)
# device = torch.device("cuda")
# model = unet(*args, **kwargs)
# model.load_state_dict(torch.load(PATH, map_location="cuda:0"))  # Choose whatever GPU device number you want
# model.to(device)
# ，确保在所有模型输入上使用 .to(torch.device('cuda')) 函数来为CUDA优化模型
