# -*- coding: UTF-8 -*-
#import _init_paths
import json,requests
import os
import numpy as np
import cv2
import io
import glob
import base64
import datetime
import uuid
import torch
from torchvision import transforms
from src.dataset import CocoDataset, Resizer, Normalizer
from conf_classes import COCO_CLASSES_FACE
import getCameraTemp as temp
#os.environ['CUDA_VISIBLE_DEVICES']='6'

mask_classes_score = {
    'smoke':0.5,
    'no-smoke':0.5
}


class MaskHandler:
    
    def __init__(self):
        self.checkpoint_file_smoke = 'models/smoke_signatrix_efficientdet_coco_best_epoch144.pth'
        self.checkpoint_file_fire = 'models/fire_signatrix_efficientdet_coco_best_epoch303.pth'
        self.checkpoint_file_face = 'models/face_signatrix_efficientdet_coco_best_epoch74.pth'

    def start(self,start_all=False): 
        print("loading model \n")
        tb = datetime.datetime.now()
        print("tb:",tb)
        # build the model from a config file and a checkpoint file
        self.model = torch.load(self.checkpoint_file_smoke).module
        self.model.cuda()
        self.model.eval()
        self.transform = transforms.Compose([Normalizer(), Resizer()])
        print("smoke-detection model is loaded.\n")
        self.model_fire = None
        self.model_face = None
        if start_all:
            self.model_fire = torch.load(self.checkpoint_file_fire).module 
            self.model_fire.cuda()
            self.model_fire.eval()
            print("smoke-detection model is loaded.\n")
            
            self.model_face = torch.load(self.checkpoint_file_face).module 
            self.model_face.cuda()
            self.model_face.eval()
            print("face-detection model is loaded.\n")
        te = datetime.datetime.now()
        print("te:",te)
        print("model(s) loaded successfully, time cost:",te-tb)
        return True
        
    def runDetector_raw(self,obj_type,img,visual_result=False): 
        #print("DetectorUrl = " + imgUrl)
        tb = datetime.datetime.now()
        results=[] 
        d = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        d = d.astype(np.float32) / 255.
        annots = np.zeros((1,5))
        sample ={"img":d,"annot":annots}
        sample = self.transform(sample)
        d = sample['img']
        scale = sample['scale']
        #print("scale",scale)
        #print("d's shape",d.shape,"image's shape",img.shape)
        height,width,channel=img.shape
        with torch.no_grad():
            tb = datetime.datetime.now()
            d = d.cuda().permute(2, 0, 1)
            d = d.float().unsqueeze(dim=0)
            t1 = datetime.datetime.now()
            if obj_type == 'face':
                scores, labels, boxes = self.model(d)
                #print("fire boxes:",boxes)
                boxes /= scale 
                #print("scaled boxes",boxes)
                #boxes=self.clip_boxes(boxes,height,width)
                #print("scaled boxes",boxes)
               
                res,img=self.json_result_raw(obj_type,scores, labels, boxes,img,visual_result)
                t2 = datetime.datetime.now()
                #print("[runDetector]fire time cost:",t2-t1)
              
            elif obj_type=='candle':
                scores, labels, boxes = self.model_candle(d)
                boxes /= scale 
                res,b64_img=self.json_result(obj_type,scores, labels, boxes,img,visual_result)
                t2 = datetime.datetime.now()
                #print("[inference_detector]candle time cost:",t2-t1)
        #print(result)
        #print(res)   #dict {'data':[{}],'base64':'...'}
        te = datetime.datetime.now()
        #print("[runDetectorNetimg]Time Cost:",te-tb)
        return res,img
        
    def json_result_raw(self, obj_type,scores,labels,boxes,img,visual_result=False):
            json_data = []
            b64_img = None
            for i in range(boxes.shape[0]):
                    data = dict()
                    # data['id'] = '{}'.format(imgid)
                    # data['path']=img
                    bbox=boxes[i]
                    data['xmin'] = '{0:.2f}'.format(bbox[0])
                    data['ymin'] = '{0:.2f}'.format(bbox[1])
                    data['xmax'] = '{0:.2f}'.format(bbox[2])
                    data['ymax'] = '{0:.2f}'.format(bbox[3])
                    data['score'] = '{0:.4f}'.format(float(scores[i]))
                    data['type'] = COCO_CLASSES_FIRE[int(labels[i])]
                    
                    if obj_type =='fire':
                        classes_score=fire_classes_score
                    elif obj_type =='candle':
                        classes_score=candle_classes_score 
                    if float(data['score']) >= classes_score[data['type']]:
                        json_data.append(data)
                        if visual_result:
                           x_min=int(float(data['xmin']))
                           y_min=int(float(data['ymin']))
                           x_max=int(float(data['xmax']))
                           y_max=int(float(data['ymax']))
                           text=data['type']+" "+ str(round(float(data['score']),2))
                           cv2.rectangle(img,(x_min,y_min),(x_max,y_max),(0,255,0),3)
                           cv2.putText(img, text,(x_min,y_min),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,0,255),1)
            return json_data,img

    def json_result(self, obj_type,scores,labels,boxes,img,visual_result=False):
        json_data = []
        b64_img = None
        global_max_temp = 0
        for i in range(boxes.shape[0]):
            data = dict()
            # data['id'] = '{}'.format(imgid)
            # data['path']=img
            bbox=boxes[i]
            data['xmin'] = '{0:.2f}'.format(bbox[0])
            data['ymin'] = '{0:.2f}'.format(bbox[1])
            data['xmax'] = '{0:.2f}'.format(bbox[2])
            data['ymax'] = '{0:.2f}'.format(bbox[3])
            data['score'] = '{0:.4f}'.format(float(scores[i]))
            data['type'] = COCO_CLASSES_FACE[int(labels[i])]
            
            if float(data['score']) >= mask_classes_score[data['type']]:
                json_data.append(data)
                x_min=int(float(data['xmin']))
                y_min=int(float(data['ymin']))
                x_max=int(float(data['xmax']))
                y_max=int(float(data['ymax']))
                text=data['type']+" "+ str(round(float(data['score']),2))
                if data['type']=='smoke':
                    cv2.rectangle(img,(x_min,y_min),(x_max,y_max),(0,255,0),2)
                    #cv2.putText(img, text,(x_min,y_min),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,0,255),1)
                    cv2.putText(img, 'smoke',(x_min,y_min),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,0,0),2)
                    #增加温度
                    #maxTemp = 35#temp.getCameraTemp(api_url='http://192.168.20.65/api/v1/temp', xmin=x_min, ymin=y_min, xmax=x_max, ymax=y_max)
                    #if (maxTemp > global_max_temp):
                        #global_max_temp = maxTemp
                        #print(global_max_temp)

                    #cv2.putText(img, str(maxTemp), (x_min, y_min + 15), cv2.FONT_HERSHEY_COMPLEX, 0.6, (0,0,255), 1)
                else:
                    print('~~~~~~~~~~~~~~~~~')
                    #cv2.rectangle(img,(x_min,y_min),(x_max,y_max),(0,255,0),1)
                    #cv2.putText(img, text,(x_min,y_min),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,255,0),1)
                    #cv2.putText(img, 'mask',(x_min,y_min),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,255,0),1)
        #global_max_temp = 0
        if visual_result and len(json_data)>0:
            str_uuid = str(uuid.uuid1())+"-"+str(uuid.uuid4())
            if not os.path.exists("images"):
                os.makedirs("images")
            tmp_img_path = "images/{}.jpg".format(str_uuid)
            cv2.imwrite(tmp_img_path,img)
            with open(tmp_img_path,'rb') as f:
                b64_img =  base64.b64encode(f.read()).decode()
            os.remove(tmp_img_path)
                
        #json_data["global_max_temp"] = int(float(global_max_temp))
        return json_data,b64_img,global_max_temp


    def runDetectorNetFile(self,obj_type, img_data,visual_result=False):           
        return self.runDetector(obj_type,img_data,visual_result)

    
    def runDetectorNetImg(self,obj_type, img_data,visual_result=False):
        img_data = base64.b64decode(img_data)
        return self.runDetector(obj_type,img_data,visual_result)
    
    def runDetector(self,obj_type,img_bytes_data,visual_result=False): 
        #print("DetectorUrl = " + imgUrl)
        tb = datetime.datetime.now()
        results=[]
        #step2 请求获取url对应的内容
        #img_data = requests.get(imgUrl).content
        #step3 解码为图像
        img = cv2.imdecode(np.frombuffer(img_bytes_data, np.uint8), cv2.IMREAD_COLOR)
        d = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        d = d.astype(np.float32) / 255.
        annots = np.zeros((1,5))
        sample ={"img":d,"annot":annots}
        sample = self.transform(sample)
        d = sample['img']
        scale = sample['scale']
        print("scale",scale,"obj_type",obj_type)
        with torch.no_grad():
            tb = datetime.datetime.now()
            #print("image dataset shape",d.shape)
            d = d.cuda().permute(2, 0, 1)
            #print("image dataset shape after permute",d.shape)
            d = d.float().unsqueeze(dim=0)
            #print("image dataset shape after unsuqeeze",d.shape)
            t1 = datetime.datetime.now()
            if obj_type == 1:
                scores, labels, boxes = self.model(d)
                boxes /= scale 
                t2 = datetime.datetime.now()
                print("[mask_detector]time cost:",t2-t1)
                res, b64_img, global_max_temp = self.json_result(obj_type,scores, labels, boxes,img,visual_result)
            elif obj_type==2:
                scores, labels, boxes = self.model_face(d) #model_face(d)
                boxes /= scale
                t2 = datetime.datetime.now()
                print("[face_detector]time cost:",t2-t1)
                res, b64_img, global_max_temp = self.json_result(obj_type,scores, labels, boxes,img,visual_result)
        #print(result)
        #print(res)   #dict {'data':[{}],'base64':'...'}
        te = datetime.datetime.now()
        print("[runDetector]Time Cost:",te-tb)
        return res, b64_img, global_max_temp


