import copy
import os.path
import cv2
import numpy as np
import onnxruntime as ort
from glob import glob
from tqdm import tqdm
import os
import cv2
import base64
import json

def bbox2json(bboxes,categorys,imgs,image_path,json_dir):
    assert len(bboxes)==len(categorys)
    assert len(image_path)==len(bboxes)

    for i in range(len(image_path)):
        res={"version": "4.2.10",
                "flags": {},}
        json_path = os.path.join(json_dir, image_path[i].split('/')[-1].split('.png')[0] + '.json')
        with open(json_path,encoding='utf8',mode='w') as f:
            res['imagePath']=os.path.basename( image_path[i])
            bbox=bboxes[i]
            category=categorys[i]
            img=imgs[i]
            h,w,c=img.shape
            imag = cv2.imencode('.jpg', img)[1]
            base64_data = str(base64.b64encode(imag))[2:-1]
            #b64=base64.b64encode(img).decode('utf-8')
            res['imageData']=base64_data
            cur_res=[]
            #print(category)
            for j in range(len(bbox)):
                cur_poly=bbox[j]
                cur_res.append({'label':category[j],'points':[[poly[0],poly[1]] for poly in cur_poly],"group_id": 'null',"shape_type": "polygon","flags": {}})
            res['shapes']=cur_res
            # res['time_labeled']=13141516
            # res['labeled']='true'
            im=cv2.imread(image_path[i])
            h,w,c=im.shape
            res['imageHeight']=h
            res['imageWidth']=w
            res=json.dumps(res)
            f.write(res)
class SegLabel:
    def __init__(self,onnx_path='',thresh=0.5):
        self.sess= ort.InferenceSession(onnx_path)
        self.update_params()
        self.thresh=thresh
    def update_params(self):
        input_tensors = self.sess.get_inputs()
        self.input_name=input_tensors[0].name
        input_shape=input_tensors[0].shape
        self.batch=input_shape[0]
        self.wh=(input_shape[-1],input_shape[-2])
        output_tensors = self.sess.get_outputs()
        self.output_name=output_tensors[0].name
        print(self.input_name,self.output_name)
    def infer_img(self,im):
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        h, w, c = im.shape
        im = cv2.resize(im, self.wh)
        im = im / 255.
        im = (im - 0.5) / 0.5
        im = np.transpose(im, [2, 0, 1])
        im = im[np.newaxis, ...]
        if self.batch==1:
            out = self.sess.run([self.output_name], {self.input_name: im.astype(np.float32)})[0]
        else:
            imgs=[im]*self.batch
            imgs=np.concatenate(imgs,axis=0)
            out = self.sess.run([self.output_name], {self.input_name: imgs.astype(np.float32)})[0]
        out=cv2.resize(out[0,0],(w,h))
        out=out>self.thresh
        out=out*255
        return out.astype(np.uint8)
if __name__=='__main__':
    onnx_path='/home/wsl/项目/ars_train_yun/work_dir/work_dir/line_seg/model.onnx'
    im_path='/media/wsl/a9f0161f-7971-c843-8c81-c68049a0235a/DataSet/肥东水泥/line/1'
    files=glob('%s/*g'%im_path)
    model=SegLabel(onnx_path)
    for file in tqdm(files):
        im=cv2.imread(file)
        out=model.infer_img(copy.deepcopy(im))
        contours, hier = cv2.findContours(out.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        polys=[]
        for i in range(len(contours)):
            arclen = cv2.arcLength(contours[i], True)
            epsilon = max(3, int(arclen * 0.005))  # 拟合出的多边形与原轮廓最大距离，可以自己设置，这里根据轮廓周长动态设置
            approx = cv2.approxPolyDP(contours[i], epsilon, True)  # 轮廓的多边形拟合
            approx=approx.reshape(-1,2).astype(np.float)
            polys.append(approx)
        bbox2json([polys],[['0']*len(polys)],[im],[file],im_path)
