#!/usr/bin/python
# -*- encoding: utf-8 -*-

import logging
import math
import os
import os.path as osp
import time

import cv2
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm

from cityscapes import CityScapes
from logger import setup_logger
from models.model_stages import BiSeNet
from steel import Steel

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

class MscEvalV0(object):

    def __init__(self, use_heatmap=False, ignore_label=-1):
        self.ignore_label = ignore_label
        self.use_heatmap = use_heatmap

    def __call__(self, net, dl, n_classes):
        assert n_classes == 1, "只支持单个类别评测"
        ## evaluate
        diter = enumerate(tqdm(dl))

        ious = []
        for i, (imgs, label) in diter:

            N, _, H, W = label.shape

            if self.use_heatmap:
                label = label.squeeze(1).squeeze(0).numpy()
                label = label > 0.5
            else:
                label = label.squeeze(1).squeeze(0).numpy().astype(np.bool)
            

            imgs = imgs.cuda()

            logits = net(imgs)[0]
  
            probs = torch.sigmoid(logits)
            preds = (probs[0][0] > 0.5).cpu().numpy()

            iou = (label == preds).astype(np.int64)
            iou = iou.sum() / (H * W)
            ious.append(iou)

        miou = np.mean(ious)
        return miou

    def predict(self, net, dl, save_dir):
        ## predict
        diter = enumerate(tqdm(dl))
        for i, (imgs, label) in diter:
            imgs = imgs.cuda()

            logits = net(imgs)[0]
  
            probs = torch.sigmoid(logits)
            preds = (probs[0][0] > 0.5).cpu().numpy()
            preds = preds.astype(np.uint8) * 255
            # preds = probs[0][0].cpu().numpy() * 255
            # preds = preds.astype(np.uint8)

            cv2.imwrite(osp.join(save_dir, "{}_image.png".format(i)), preds)
            # cv2.imshow('img', preds)
            # cv2.waitKey()
    
    def predict2vis(self, net, dl, save_dir):
        ## predict
        thresh = 0.1 if self.use_heatmap else 0.5
        diter = enumerate(tqdm(dl))
        for i, (imgs, label) in diter:
            imgs = imgs.cuda()
            label = (label.squeeze(1).squeeze(0).numpy() * 255).astype(np.uint8)

            logits = net(imgs)[0]
  
            probs = torch.sigmoid(logits)
            preds = (probs[0][0] > thresh).cpu().numpy()
            preds = preds.astype(np.uint8) * 255

            # label nums
            contours_label = cv2.findContours(label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
            nums_label = len(contours_label)

            # pred nums
            # 腐蚀处理
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
            # kernel = np.ones((5, 5), dtype=np.uint8)
            # GaussianKernel = cv2.getGaussianKernel(5, 0, cv2.CV_32F)
            # kernel = GaussianKernel * GaussianKernel.T
            erosion_preds = cv2.erode(preds, kernel, anchor=(-1,-1), iterations=2)
            contours_pred = cv2.findContours(erosion_preds, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
            nums_pred = len(contours_pred)

            # 获取预测中心点
            points_center = get_center(contours_pred)

            # 画mask
            img = (imgs.cpu().numpy()[0].transpose(1,2,0) * 255).astype(np.uint8)
            img[..., 0][preds==255] = np.array(img[..., 0][preds==255] * 0.5 + 255.0 * 0.5, np.uint8)

            # 画数字
            for idx in range(nums_pred):
                point = points_center[idx]
                img = cv2.putText(img.copy(), str(idx+1), (point[0], point[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # 统计
            img = cv2.putText(img.copy(), 'nums_label: {}'.format(nums_label), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            img = cv2.putText(img.copy(), 'nums_pred: {}'.format(nums_pred), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

            # 保存图片
            # cv2.imwrite(osp.join(save_dir, "{}_vis_image.png".format(i)), img)
            cv2.imshow('img', img)
            cv2.waitKey(0)
            

def get_center(contours_pred):
    points_center = []
    for contour in contours_pred:
        x_mean = np.mean(contour[..., 0]) - 10
        y_mean = np.mean(contour[..., 1]) - 10
        points_center.append([x_mean, y_mean])
    
    points_center = np.array(points_center, np.int64)
    keep = np.argsort(points_center[:, 0])
    points_center = points_center[keep]

    return points_center

def evaluatev0(respth='./pretrained', save_dir='evaluation_logs', 
    dspth='./data', backbone='STDCNet813', 
    use_heatmap=False, inputsize=[800, 800],
    use_boundary_2=False, use_boundary_4=False, use_boundary_8=False, 
    use_boundary_16=False, use_conv_last=False):

    print('use_boundary_2', use_boundary_2)
    print('use_boundary_4', use_boundary_4)
    print('use_boundary_8', use_boundary_8)
    print('use_boundary_16', use_boundary_16)
    ## dataset
    dsval = Steel(dspth, mode='test', use_heatmap=use_heatmap, inputsize=inputsize)
    dlval = DataLoader(dsval,
                    batch_size = 1,
                    shuffle = False,
                    # sampler = sampler_val,
                    num_workers = 0,
                    drop_last = False)

    n_classes = 1
    print("backbone:", backbone)
    net = BiSeNet(backbone=backbone, n_classes=n_classes,
     use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4, 
     use_boundary_8=use_boundary_8, use_boundary_16=use_boundary_16, 
     use_conv_last=use_conv_last)
    net.load_state_dict(torch.load(respth))
    net.cuda()
    net.eval()
    

    with torch.no_grad():
        single_scale = MscEvalV0()
        mIOU = single_scale(net, dlval, n_classes)
    print('mIOU is: ', mIOU)

def inference(respth='./pretrained', save_dir='evaluation_logs', 
    dspth='./data', backbone='STDCNet813', 
    use_heatmap=False, inputsize=[800, 800],
    use_boundary_2=False, use_boundary_4=False, use_boundary_8=False, 
    use_boundary_16=False, use_conv_last=False):
    print('use_boundary_2', use_boundary_2)
    print('use_boundary_4', use_boundary_4)
    print('use_boundary_8', use_boundary_8)
    print('use_boundary_16', use_boundary_16)
    ## dataset
    dsval = Steel(dspth, mode='test', use_heatmap=use_heatmap, inputsize=inputsize)
    dlval = DataLoader(dsval,
                    batch_size = 1,
                    shuffle = False,
                    # sampler = sampler_val,
                    num_workers = 0,
                    drop_last = False)

    n_classes = 1
    print("backbone:", backbone)
    net = BiSeNet(backbone=backbone, n_classes=n_classes,
     use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4, 
     use_boundary_8=use_boundary_8, use_boundary_16=use_boundary_16, 
     use_conv_last=use_conv_last)
    net.load_state_dict(torch.load(respth))
    net.cuda()
    net.eval()
    

    with torch.no_grad():
        single_scale = MscEvalV0()
        single_scale.predict(net, dlval, save_dir)

def inference2vis(respth='./pretrained', save_dir='evaluation_logs/tmp', 
    dspth='./data', backbone='STDCNet813', 
    use_heatmap=False, inputsize=[800, 800],
    use_boundary_2=False, use_boundary_4=False, use_boundary_8=False, 
    use_boundary_16=False, use_conv_last=False):
    print('use_boundary_2', use_boundary_2)
    print('use_boundary_4', use_boundary_4)
    print('use_boundary_8', use_boundary_8)
    print('use_boundary_16', use_boundary_16)
    ## dataset
    dsval = Steel(dspth, mode='test', use_heatmap=use_heatmap, inputsize=inputsize)
    dlval = DataLoader(dsval,
                    batch_size = 1,
                    shuffle = False,
                    # sampler = sampler_val,
                    num_workers = 0,
                    drop_last = False)

    n_classes = 1
    print("backbone:", backbone)
    net = BiSeNet(backbone=backbone, n_classes=n_classes,
     use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4, 
     use_boundary_8=use_boundary_8, use_boundary_16=use_boundary_16, 
     use_conv_last=use_conv_last)
    net.load_state_dict(torch.load(respth))
    net.cuda()
    net.eval()
    

    with torch.no_grad():
        single_scale = MscEvalV0()
        single_scale.predict2vis(net, dlval, save_dir)

class MscEval(object):
    def __init__(self,
            model,
            dataloader,
            scales = [0.5, 0.75, 1, 1.25, 1.5, 1.75],
            n_classes = 19,
            lb_ignore = 255,
            cropsize = 1024,
            flip = True,
            *args, **kwargs):
        self.scales = scales
        self.n_classes = n_classes
        self.lb_ignore = lb_ignore
        self.flip = flip
        self.cropsize = cropsize
        ## dataloader
        self.dl = dataloader
        self.net = model


    def pad_tensor(self, inten, size):
        N, C, H, W = inten.size()
        outten = torch.zeros(N, C, size[0], size[1]).cuda()
        outten.requires_grad = False
        margin_h, margin_w = size[0]-H, size[1]-W
        hst, hed = margin_h//2, margin_h//2+H
        wst, wed = margin_w//2, margin_w//2+W
        outten[:, :, hst:hed, wst:wed] = inten
        return outten, [hst, hed, wst, wed]


    def eval_chip(self, crop):
        with torch.no_grad():
            out = self.net(crop)[0]
            prob = F.softmax(out, 1)
            if self.flip:
                crop = torch.flip(crop, dims=(3,))
                out = self.net(crop)[0]
                out = torch.flip(out, dims=(3,))
                prob += F.softmax(out, 1)
            prob = torch.exp(prob)
        return prob


    def crop_eval(self, im):
        cropsize = self.cropsize
        stride_rate = 5/6.
        N, C, H, W = im.size()
        long_size, short_size = (H,W) if H>W else (W,H)
        if long_size < cropsize:
            im, indices = self.pad_tensor(im, (cropsize, cropsize))
            prob = self.eval_chip(im)
            prob = prob[:, :, indices[0]:indices[1], indices[2]:indices[3]]
        else:
            stride = math.ceil(cropsize*stride_rate)
            if short_size < cropsize:
                if H < W:
                    im, indices = self.pad_tensor(im, (cropsize, W))
                else:
                    im, indices = self.pad_tensor(im, (H, cropsize))
            N, C, H, W = im.size()
            n_x = math.ceil((W-cropsize)/stride)+1
            n_y = math.ceil((H-cropsize)/stride)+1
            prob = torch.zeros(N, self.n_classes, H, W).cuda()
            prob.requires_grad = False
            for iy in range(n_y):
                for ix in range(n_x):
                    hed, wed = min(H, stride*iy+cropsize), min(W, stride*ix+cropsize)
                    hst, wst = hed-cropsize, wed-cropsize
                    chip = im[:, :, hst:hed, wst:wed]
                    prob_chip = self.eval_chip(chip)
                    prob[:, :, hst:hed, wst:wed] += prob_chip
            if short_size < cropsize:
                prob = prob[:, :, indices[0]:indices[1], indices[2]:indices[3]]
        return prob


    def scale_crop_eval(self, im, scale):
        N, C, H, W = im.size()
        new_hw = [int(H*scale), int(W*scale)]
        im = F.interpolate(im, new_hw, mode='bilinear', align_corners=True)
        prob = self.crop_eval(im)
        prob = F.interpolate(prob, (H, W), mode='bilinear', align_corners=True)
        return prob


    def compute_hist(self, pred, lb):
        n_classes = self.n_classes
        ignore_idx = self.lb_ignore
        keep = np.logical_not(lb==ignore_idx)
        merge = pred[keep] * n_classes + lb[keep]
        hist = np.bincount(merge, minlength=n_classes**2)
        hist = hist.reshape((n_classes, n_classes))
        return hist


    def evaluate(self):
        ## evaluate
        n_classes = self.n_classes
        hist = np.zeros((n_classes, n_classes), dtype=np.float32)
        dloader = tqdm(self.dl)
        if dist.is_initialized() and not dist.get_rank()==0:
            dloader = self.dl
        for i, (imgs, label) in enumerate(dloader):
            N, _, H, W = label.shape
            probs = torch.zeros((N, self.n_classes, H, W))
            probs.requires_grad = False
            imgs = imgs.cuda()
            for sc in self.scales:
                # prob = self.scale_crop_eval(imgs, sc)
                prob = self.eval_chip(imgs)
                probs += prob.detach().cpu()
            probs = probs.data.numpy()
            preds = np.argmax(probs, axis=1)

            hist_once = self.compute_hist(preds, label.data.numpy().squeeze(1))
            hist = hist + hist_once
        IOUs = np.diag(hist) / (np.sum(hist, axis=0)+np.sum(hist, axis=1)-np.diag(hist))
        mIOU = np.mean(IOUs)
        return mIOU


def evaluate(respth='./resv1_catnet/pths/', dspth='./data'):
    ## logger
    logger = logging.getLogger()

    ## model
    logger.info('\n')
    logger.info('===='*20)
    logger.info('evaluating the model ...\n')
    logger.info('setup and restore model')
    n_classes = 19
    net = BiSeNet(n_classes=n_classes)

    net.load_state_dict(torch.load(respth))
    net.cuda()
    net.eval()

    ## dataset
    batchsize = 5
    n_workers = 2
    dsval = CityScapes(dspth, mode='val')
    dl = DataLoader(dsval,
                    batch_size = batchsize,
                    shuffle = False,
                    num_workers = n_workers,
                    drop_last = False)

    ## evaluator
    logger.info('compute the mIOU')
    evaluator = MscEval(net, dl, scales=[1], flip = False)

    ## eval
    mIOU = evaluator.evaluate()
    logger.info('mIOU is: {:.6f}'.format(mIOU))



if __name__ == "__main__":
    save_dir = 'evaluation_logs/pred_0427_vis'
    use_heatmap = False
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    
    # 精度评测。    返回：miou
    evaluatev0(r'checkpoints\train_STDC1-Seg\train_0510\pths\model_final.pth', 
        save_dir=save_dir,
        dspth='data/shushu/test', backbone='STDCNet813', 
        use_heatmap=use_heatmap, inputsize=[800, 800],
        use_boundary_2=False, use_boundary_4=False, 
        use_boundary_8=False, use_boundary_16=False)

    # 推理。    返回：预测的mask
    # inference(r'checkpoints\train_STDC1-Seg\train_0418_sigmoid\pths\model_final.pth', 
    #     save_dir=save_dir,
    #     dspth='data/shushu/test', backbone='STDCNet813', 
    #     use_heatmap=use_heatmap, inputsize=[800, 800],
    #     use_boundary_2=False, use_boundary_4=False, 
    #     use_boundary_8=False, use_boundary_16=False)

    # 推理及可视化。    返回：可视化结果，包括原图、mask掩膜、数量
    # inference2vis(r'checkpoints\train_STDC1-Seg\train_0510\pths\model_final.pth', 
    #     save_dir=save_dir,
    #     dspth='data/shushu/test', backbone='STDCNet813', 
    #     use_heatmap=use_heatmap, inputsize=[800, 800],
    #     use_boundary_2=False, use_boundary_4=False, 
    #     use_boundary_8=False, use_boundary_16=False)

   

