# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:7/13/2021 1:06 PM
# @File:预测图片并且生成json标准文件
import argparse
import copy
import cv2
import os
import pprint
import random
import numpy as np
import skimage
import torch
from docopt import docopt
from imutils import paths
from tqdm import tqdm

from dataset.constants import NORMALIZATION_HEIGHT, NORMALIZATION_WIDTH, TB_DATATYPE
import lcnn
from demo import get_model_eval, Hpreds_post1
from lcnn.config import C, M
from lcnn.models.line_vectorizer import LineVectorizer
from lcnn.models.multitask_learner import MultitaskHead, MultitaskLearner
from lcnn.postprocess import postprocess
from python_developer_tools.cv.utils.torch_utils import recursive_to
from python_developer_tools.cv.datasets.datasets_utils import letterbox
from python_developer_tools.files.common import get_filename_suf_pix
from python_developer_tools.files.image_utils import imgToBase64
from python_developer_tools.files.json_utils import save_json_file

def Hpreds_post2(nlines,nscores):
    middle_x = (np.max(nlines[:, :, 1]) - np.min(nlines[:, :, 1])) / 2 + np.min(nlines[:, :, 1])
    x_mean = np.mean(nlines[:, :, 1], axis=1)
    try:
        line1 = nlines[np.where(x_mean > middle_x)][np.argmax(nscores[np.where(x_mean > middle_x)])]
        line2 = nlines[np.where(x_mean < middle_x)][np.argmax(nscores[np.where(x_mean < middle_x)])]
        nlines = np.concatenate((line1[None, :, :], line2[None, :, :]), axis=0)
    except Exception as e:
        print("Hpreds_post2 error",str(e))
    return nlines

def Hpreds_post3(nlines):
    nlines = nlines[np.argsort(np.mean(nlines[:, :, 1], axis=1))]  # 对(这里[:,:,1]是X)X 进行排序
    for i in range(len(nlines)):
        nlines[i, :, :] = nlines[i, :, :][np.argsort(nlines[i, :, :][:, 0])]  # 对(这里[:,:,0]是Y)Y 进行排序
    return nlines

def predict(datadict, model):
    img = datadict["img"]
    datatype = datadict["datatype"]
    device = datadict["device"]
    # origin_img = copy.deepcopy(img)
    if "tb" == datatype:
        img = cv2.transpose(img)
        img = cv2.flip(img, 1)
    h0, w0 = img.shape[:2]  # orig hw
    im, ratio, pad = letterbox(img, [NORMALIZATION_HEIGHT, NORMALIZATION_WIDTH], auto=False, scaleFill=True)
    cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    # if im.ndim == 2:
    #     im = np.repeat(im[:, :, None], 3, 2)
    # im = im[:, :, :3]
    im_resized = skimage.transform.resize(im, (NORMALIZATION_HEIGHT, NORMALIZATION_WIDTH)) * 255
    image = (im_resized - M.image.mean) / M.image.stddev
    image = torch.from_numpy(np.rollaxis(image, 2)[None].copy()).float()
    with torch.no_grad():
        input_dict = {
            "image": image.to(device),
            "meta": [
                {
                    "junc": torch.zeros(1, 2).to(device),
                    "jtyp": torch.zeros(1, dtype=torch.uint8).to(device),
                    "Lpos": torch.zeros(2, 2, dtype=torch.uint8).to(device),
                    "Lneg": torch.zeros(2, 2, dtype=torch.uint8).to(device),
                }
            ],
            "target": {
                "jmap": torch.zeros([1, 1, int(NORMALIZATION_HEIGHT / 4), int(NORMALIZATION_WIDTH / 4)]).to(device),
                "joff": torch.zeros([1, 1, 2, int(NORMALIZATION_HEIGHT / 4), int(NORMALIZATION_WIDTH / 4)]).to(device),
            },
            "mode": "testing",
        }
        H = model(input_dict)["preds"]

    nlines, nscores = Hpreds_post1(H,im)

    if len(nscores) < 2:
        return []
    elif len(nscores) == 2:
        pass
    else:
        nlines = Hpreds_post2(nlines, nscores)

    nlines[:, :, 1] = (nlines[:, :, 1] - pad[0]) * w0 / (NORMALIZATION_WIDTH - pad[0] * 2)  # x
    nlines[:, :, 0] = (nlines[:, :, 0] - pad[1]) * h0 / (NORMALIZATION_HEIGHT - pad[1] * 2)  # y

    nlines = Hpreds_post3(nlines)

    if "tb" == datatype:
        nlines = nlines[:, :, [1, 0]]
        nlines[:, :, 0] = w0 - nlines[:, :, 0]  # y

    return nlines

def predict_to_json(image_path):
    filename, filedir, filesuffix, filenamestem = get_filename_suf_pix(image_path)
    image = cv2.imread(image_path)
    imageHeight,imageWidth ,_ = image.shape
    datadict = {}
    datadict["img"] = image
    datadict["datatype"] = opt.predict_type
    datadict["device"] = device
    result_lines = predict(datadict,model)

    shapes = []
    for i,(a,b) in enumerate(result_lines):
        if "_t." in filename or "_b." in filename:
            if max(a[0], b[0]) == np.max(result_lines[:,:,0]):
                label = "min"
            else:
                label = "max"
        if "_l." in filename or "_r." in filename:
            if max(a[1], b[1]) == np.max(result_lines[:,:,1]):
                label = "min"
            else:
                label = "max"

        shape = {
              "label": label,
              "points": [
                [a[1],a[0]],
                [b[1],b[0]]
              ],
              "group_id": None,
              "shape_type": "line",
              "flags": {}
            }
        shapes.append(shape)

    jsoncont = {}
    jsoncont["version"] = "4.5.7"
    jsoncont["flags"] = {}
    jsoncont["shapes"] = shapes
    jsoncont["imagePath"] = filename
    jsoncont["imageData"] = imgToBase64(image_path)
    jsoncont["imageHeight"] = imageHeight
    jsoncont["imageWidth"] = imageWidth

    save_json_file(os.path.join(filedir, "{}.json".format(filenamestem)),jsoncont)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="获取杭州人工认为有缺陷大图")
    parser.add_argument('--devices',
                        default=r"0",
                        help="没有分的文件夹")
    parser.add_argument('--config_file',
                        default=r"config/wireframe.yaml",
                        help="没有分的文件夹")
    parser.add_argument('--checkpoint_path',
                        default=r"/home/zengxh/workspace/lcnn/logs/210803-094635-88f281a-baseline/checkpoint_best.pth",
                        help="没有分的文件夹")
    parser.add_argument('--predict_dir',
                        default=r"/home/zengxh/medias/data/ext/creepageDistance/20210714/smallimg/tb/org",
                        help="没有分的文件夹")
    parser.add_argument('--predict_type',
                        default=r"tb",
                        help="没有分的文件夹")
    opt = parser.parse_args()
    config_file = opt.config_file
    model,device = get_model_eval(config_file, opt.devices, opt.checkpoint_path)

    image_paths = list(paths.list_images(opt.predict_dir))
    
    for image_path in tqdm(image_paths):
        predict_to_json(image_path)