import cv2
import random
import numpy as np
import onnxruntime as ort
import logging
from albumentations import Normalize
import numpy as np
from maxpool2d import maxpool2d
from topk import topk_np

# 清理一些无用的告警
ort.set_default_logger_severity(3)


class ONNX_engine:
    def __init__(self, weights, size=512, cuda_id=0, score_threshold=0.1, top_k=200, min_len=5.0) -> None:
        self.img_new_shape = (size, size)
        self.weights = weights
        self.cuda_id = cuda_id
        self.init_engine()
        
        self.score_threshold = score_threshold
        self.top_k = top_k
        self.min_len = min_len
        
        self.normalize = Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))

    def init_engine(self):
        session_options = ort.SessionOptions()
        session_options.log_severity_level = 1
        provider = ['CUDAExecutionProvider']
        provider_option = [{"device_id": self.cuda_id}]
        self.session = ort.InferenceSession(self.weights, providers=provider, provider_options=provider_option)

    def predict(self, im):
        outname = [i.name for i in self.session.get_outputs()]
        inname = [i.name for i in self.session.get_inputs()]
        inp = {inname[0]: im}
        outputs = self.session.run(outname, inp)[0]
        # print(outputs.shape)
        return outputs

    def __call__(self, image_path):
        return self.run(image_path)

    def preprocess(self, img):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, self.img_new_shape)
        img = self.normalize(image=img)['image']
        img = img.transpose(2,0,1)
        # 添加Batch维度
        img = np.expand_dims(img, 0)
        return img
    
    def decode_lines(self, tpMap, score_thresh = 0.1, len_thresh=2, topk_n = 1000, ksize = 3 ):
        '''
        tpMap:
        center: tpMap[1, 0, :, :]
        displacement: tpMap[1, 1:5, :, :]
        '''
        b, c, h, w = tpMap.shape
        assert  b==1, 'only support batch size==1'
        displacement = tpMap[:, 1:5, :, :]
        center = tpMap[:, 0, :, :]
        # sigmod
        heat = 1 / (1 + np.exp(-center))
        hmax = maxpool2d(heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
        
        keep = np.where(hmax == heat, 1.0, 0.0)
        heat = heat * keep
        heat = heat.reshape(-1, )
        
        heat = np.where(heat < score_thresh, np.zeros_like(heat), heat)

        scores, indices = topk_np(heat, topk_n, dim=-1)
        valid_inx = np.where(scores > score_thresh)
        scores = scores[valid_inx]
        indices = indices[valid_inx]

        yy = np.floor_divide(indices, h)
        yy = np.expand_dims(yy, axis=-1)
        xx = np.fmod(indices, w)
        xx = np.expand_dims(xx, axis=-1)
        center_ptss = np.concatenate((xx, yy), axis=-1)

        start_point = center_ptss + displacement[0, :2, yy, xx].transpose(2, 0, 1).reshape(2, -1).transpose(1,0)
        end_point = center_ptss + displacement[0, 2:, yy, xx].transpose(2, 0, 1).reshape(2, -1).transpose(1,0)

        lines = np.concatenate((start_point, end_point), axis=-1)

        all_lens = (end_point - start_point) ** 2
        all_lens = all_lens.sum(axis=-1)
        all_lens = np.sqrt(all_lens)
        valid_inx = np.where(all_lens > len_thresh)

        center_ptss = center_ptss[valid_inx]
        lines = lines[valid_inx]
        scores = scores[valid_inx]

        return center_ptss, lines, scores
        
    
    def postprocess(self, output, img_w, img_h):
        tp_mask = output[:, 7:, :, :]
        _, pred_lines, _ = self.decode_lines(tp_mask, self.score_threshold, self.min_len, self.top_k, 3)
        pred_lines_list = []
        for line in pred_lines:
            x0, y0, x1, y1 = line
            x0 = img_w * x0 / (self.img_new_shape[1] / 2)
            x1 = img_w * x1 / (self.img_new_shape[1] / 2)

            y0 = img_h * y0 / (self.img_new_shape[0] / 2)
            y1 = img_h * y1 / (self.img_new_shape[0] / 2)

            pred_lines_list.append([x0, y0, x1, y1])
        
        return pred_lines_list
            
    def run(self, image_path):
        # 读取图片
        im0 = cv2.imread(image_path)
        ori_shape = im0.shape
        img = self.preprocess(im0)
        output = self.predict(img)
        result = self.postprocess(output, ori_shape[1], ori_shape[0])
        return result


cur_color = np.array([0,0,255]).astype(np.uint8)
cur_color = tuple([int(x) for x in cur_color])
SAVE_DIR="./onnx_tmp"
import os
import tqdm
def showResult(result, filepath):
    lines = result
    img = cv2.imread(filepath)
    for line in lines:
        point1 = [int(line[0]), int(line[1])]
        point2 = [int(line[2]), int(line[3])]
        cv2.line(img, point1, point2, cur_color, 2)
    filename = os.path.basename(filepath)
    cv2.imwrite(os.path.join(SAVE_DIR, filename), img)

if __name__ == "__main__":
    model_path = "/root/autodl-tmp/mlsd_pytorch/mlsd_pytorch/super_resolution.onnx"
    dataset_dir = "/root/autodl-tmp/mlsd_pytorch/mlsd_pytorch/line/val2017"
    engine = ONNX_engine(model_path, score_threshold=0.1)
    engine.init_engine()
    
    for filename in tqdm.tqdm(os.listdir(dataset_dir)):
        filepath = os.path.join(dataset_dir, filename)
        result = engine(filepath)
        showResult(result, filepath)
    