import cv2 as cv
from scipy.spatial import distance
import numpy as np
from collections import OrderedDict
import time
import os.path
import zipfile
import subprocess
import shutil
import json
from PIL import Image
import os
import torch
from torch import nn as nn


'''
+videodata
    +images ----每帧图像存放位置
        -img_{}.png
    +labelstext -----一个视频的完整的追踪结果
        -record.json
    -framelabel -----一个视频每个对象信息拆分结果。
'''

# 环境初始化
classmodel=torch.load("/home/gis/gisdata/data/jupyterlabhub/gitcode/cardanger/arckive/modelrecord_1208/modelRecord/resnet50_minvail.pkl")
yolomodel = {"config_path":"./yolo_dir/yolov3.cfg",
                "model_weights_path":"./yolo_dir/yolov3.weights",
                "coco_names":"./yolo_dir/coco.names",
                "confidence_threshold": 0.5,
                "threshold":0.3
                }
    #net = cv.dnn.readNetFromDarknet(yolomodel["config_path"], yolomodel["model_weights_path"])
net = cv.dnn_DetectionModel('./yolo_dir/yolov4.cfg', './yolo_dir/yolov4.weights')
    # 启动GPU,参与渲染
net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)

labels = open(yolomodel["coco_names"]).read().strip().split("\n")

np.random.seed(12345)
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0]-1] for i in net.getUnconnectedOutLayers()]
print(layer_names)  

# 从路径中，得到文件名
def getFileName(filesrc):
    return os.path.basename(filesrc)



class Tracker:
    def __init__(self, maxLost = 30):           # maxLost: maximum object lost counted when the object is being tracked
        self.nextObjectID = 0                   # ID of next object
        self.objects = OrderedDict()            # stores ID:Locations
        self.lost = OrderedDict()               # stores ID:Lost_count
        
        self.maxLost = maxLost                  # maximum number of frames object was not detected.
        
        # 记录文件归档信息
        self.labelinfo={} # 注意，这个和nextobject相关
        self.video_name='' # 当前视频的名称
        self.frame_names=[] # 当前图像的名称
        self.frame_H=0
        self.frame_W=0

        self.exrate=0.1

    def setvideo_name(self,video_name):
        self.video_name=video_name
    def addFrame_name(self,frame_names,H,W):
        self.frame_names.append(frame_names)
        self.frame_H=H
        self.frame_W=W

    def getCurr_Frame_name(self):
        return self.frame_names[-1] # 返回最新目标值

    # 当出现新的目标对像
    def addnewobject(self,objid,new_object_location,danger):
        self.labelinfo[objid]=[]

        tx,ty,ttx,tty,labelname=new_object_location
        #
        minx=tx if tx<=ttx else ttx
        miny=ty if ty<=tty else tty
        maxx=tx if tx>=ttx else ttx
        maxy=ty if ty>=tty else tty

        xlen=maxx-minx
        ylen=maxy-miny
        xex=int(xlen*self.exrate)
        yex=int(ylen*self.exrate)

        minx=minx-xex if minx-xex>=0 else 0
        miny=miny-yex if miny-yex>=0 else 0
        maxx=maxx+xex if maxx+xex<=self.frame_W else self.frame_W
        maxy=maxy+yex if maxy+yex<=self.frame_H else self.frame_H
    
        self.labelinfo[objid].append({
            'objectid':objid,
            'framebox':[minx,miny,maxx,maxy],
            'label':labelname,
            'framename':self.getCurr_Frame_name(),
            'danger':danger
        })
        

    # 对于当前目标对象进行追踪
    def updataobject(self,objid,new_object_location,danger): # objectid,新的坐标情况，标签
        tx,ty,ttx,tty,labelname=new_object_location
        #
        minx=tx if tx<=ttx else ttx
        miny=ty if ty<=tty else tty
        maxx=tx if tx>=ttx else ttx
        maxy=ty if ty>=tty else tty

        xlen=maxx-minx
        ylen=maxy-miny
        xex=int(xlen*self.exrate)
        yex=int(ylen*self.exrate)

        minx=minx-xex if minx-xex>=0 else 0
        miny=miny-yex if miny-yex>=0 else 0
        maxx=maxx+xex if maxx+xex<=self.frame_W else self.frame_W
        maxy=maxy+yex if maxy+yex<=self.frame_H else self.frame_H
    
        self.labelinfo[objid].append({
            'objectid':objid,
            'framebox':[minx,miny,maxx,maxy],
            'label':labelname,
            'framename':self.getCurr_Frame_name(),
            'danger':danger
        })
        pass

    # 当目标丢失时,目标丢失不进行操作
    def lostobject(self,objid):
        pass
    
    def output(self): # 输出层次结构对象
        res={}
        res['videoname']=self.video_name
        res['frames']=self.frame_names # 加载文件列表
        # 这里 按照追踪的id 进行分类处理
        res['labelinfo']=self.labelinfo 
        return res


    def addObject(self, new_object_location):
        self.objects[self.nextObjectID] = new_object_location    # store new object location
        self.lost[self.nextObjectID] = 0                         # initialize frame_counts for when new object is undetected
        
        self.nextObjectID += 1
        return self.nextObjectID
    
    def removeObject(self, objectID):                          # remove tracker data after object is lost
        del self.objects[objectID]
        del self.lost[objectID]
    
    @staticmethod
    def getLocation(bounding_box):
        xlt, ylt, xrb, yrb,labelname = bounding_box
        return (int((xlt + xrb) / 2.0), int((ylt + yrb) / 2.0))
    

    def update(self,  detections,dagerls):
        
        if len(detections) == 0:   # if no object detected in the frame
            lost_ids = list(self.lost.keys())
            for objectID in lost_ids:
                self.lost[objectID] +=1
                if self.lost[objectID] > self.maxLost: self.removeObject(objectID)
            
            return self.objects
        
        new_object_locations = np.zeros((len(detections), 2), dtype="int")     # current object locations
        
        # 增加一个类别，信息
        labells={}
        for (i, detection) in enumerate(detections): 
            tx,ty=self.getLocation(detection)
            new_object_locations[i] = tx,ty # 获取坐标的左下右上，
            labells[i]=detection
            
        if len(self.objects)==0:
            for i in range(0, len(detections)): 
                newid=self.addObject(new_object_locations[i])

                self.addnewobject(newid-1,labells[i],dagerls[i]) # 新的id追踪情况
        else:
            objectIDs = list(self.objects.keys())
            previous_object_locations = np.array(list(self.objects.values()))
            
            D = distance.cdist(previous_object_locations, new_object_locations) # pairwise distance between previous and current
            
            row_idx = D.min(axis=1).argsort()   # (minimum distance of previous from current).sort_as_per_index
            
            cols_idx = D.argmin(axis=1)[row_idx]   # index of minimum distance of previous from current
            
            assignedRows, assignedCols = set(), set()
            
            for (row, col) in zip(row_idx, cols_idx):
                
                if row in assignedRows or col in assignedCols:
                    continue
                
                objectID = objectIDs[row]
                self.objects[objectID] = new_object_locations[col] # 找到替换数据进行替换 updata
                self.lost[objectID] = 0
                #更新代码
                self.updataobject(objectID,labells[col],dagerls[col])

                assignedRows.add(row)
                assignedCols.add(col)
                
            unassignedRows = set(range(0, D.shape[0])).difference(assignedRows) # 检查原始id，有否丢失
            unassignedCols = set(range(0, D.shape[1])).difference(assignedCols) # 检查新生id，有否新目标
            
            
            if D.shape[0]>=D.shape[1]:
                for row in unassignedRows:
                    objectID = objectIDs[row]
                    self.lost[objectID] += 1
                    
                    if self.lost[objectID] > self.maxLost:
                        self.removeObject(objectID) # 删除丢失对象 lost
                        
            else:
                for col in unassignedCols:
                    newid=self.addObject(new_object_locations[col],dagerls[col]) # 增加新的数据 add

                    self.addnewobject(newid-1,labells[col],dagerls[col]) # 新的id追踪情况
            
        return self.objects

def classfycls(img,clssmodel):
    imgcu=torch.from_numpy( np.transpose(img,(2,0,1))).cuda().float()
    tlabel=classmethod(imgcu)
    tl=np.argmax(tlabel.detach().cpu().numpy())
    return tl

def rendervideo(videosrc=''):
    '''
    videosrc:视频源文件
    videotc:视频目标文件夹 ---用来存放目标文件
    videoziptc:视频目标文件压缩
    '''
    # 整理文件部分
    print('正在整理文件，进行环境初始化：{}'.format(videosrc))
    if os.path.exists('./videodata'):
        shutil.rmtree('./videodata')
    os.mkdir('./videodata')
    os.mkdir('./videodata/images')
    os.mkdir('./videodata/labelstext')
    os.mkdir('./videodata/framelabel') 

    print('环境初始化结束，开始正式追踪测试')



               

    maxLost = 5   # maximum number of object losts counted when the object is being tracked
    tracker = Tracker(maxLost = maxLost) # 设置最大丢失情况

    video_src = videosrc # 视频来源
    tracker.setvideo_name(getFileName(video_src))
    cap = cv.VideoCapture(video_src)
    (H, W) = (None, None)  # input image height and width for the network
    imgcount=-1
    while(True):
        imgcount=imgcount+1
        ok, image = cap.read()
        if not ok:
            print("Cannot read the video feed.")
            break

        if W is None or H is None: (H, W) = image.shape[:2] 
        
        # 保存当前帧图像，路径为 ./data/images
        imname='img_{}.png'.format(str(imgcount))
        print('\r---{}---'.format(imname),end='')

        cv.imwrite(os.path.join('./videodata/images',imname),image)
        tracker.addFrame_name(imname,H,W)
        blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
        net.setInput(blob)
        detections_layer = net.forward(layer_names)   # detect objects using object detection model
        
        detections_bbox = []     # bounding box for detections
        
        boxes, confidences, classIDs = [], [], []
        dangerlist=[]
        for out in detections_layer:
            for detection in out:
                scores = detection[5:]
                classID = np.argmax(scores)
                confidence = scores[classID]
                
                if confidence > yolomodel['confidence_threshold']:
                    box = detection[0:4] * np.array([W, H, W, H])
                    (centerX, centerY, width, height) = box.astype("int")
                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))
                    tempimg=image[x:x+width,y:y+height,:]
                    danger=classfycls(tempimg,clssmodel)
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    classIDs.append(classID)
                    dangerlist.append(danger)
        
        idxs = cv.dnn.NMSBoxes(boxes, confidences, yolomodel["confidence_threshold"], yolomodel["threshold"]) # 获得相关内容坐标
        dagerls=[]
        if len(idxs)>0:
            for i in idxs.flatten():
                (x, y) = (boxes[i][0], boxes[i][1]) # 坐标左下角
                (w, h) = (boxes[i][2], boxes[i][3]) # 
                detections_bbox.append((x, y, x+w, y+h,labels[classIDs[i]])) #  这里增加一个维度
                dagerls.append(danger[i])
        
        objects = tracker.update(detections_bbox,dagerls)           # update tracker based on the newly detected objects
        
        for (objectID, centroid) in objects.items():
            text = "ID {}".format(objectID)  


    cap.release() # 释放框架

    trackrecord=tracker.output()
    resultTrack={'videoname':trackrecord["videoname"],
    'frames':trackrecord["frames"],
    'labelinfo':{}
    }
    for i in trackrecord["labelinfo"]:
        if trackrecord["labelinfo"][i][0]["label"]=="car" :
            resultTrack['labelinfo'][i]=trackrecord["labelinfo"][i]

    with open("./videodata/labelstext/record.json","w",encoding='utf-8') as f:
        restext=json.dumps(resultTrack,ensure_ascii=False)
        f.writelines(restext)
        print("写入框架文件：{}...".format("./videodata/labelstext/record.json"))

# 这个是将渲染之后的图像进行保存并展示
def renderImagebyJson():
    # 读取相关的文件，
    rootdir='./videodata'
    imagelist=os.listdir(os.path.join(rootdir,'images'))
    jsonfpath=os.path.join(rootdir,'labelstext','record.json')
    with open(jsonfpath,"r") as load_f:
        labelinfo = json.load(load_f)
    print(labelinfo['videoname'],str(len(labelinfo['frames'])))
    # 下面开始进行渲染
    bbox_colors = np.random.randint(0, 255, size=(10, 3))
    for objid in labelinfo['labelinfo']:
        frames_temp=labelinfo['labelinfo'][objid]
        for temp in frames_temp:
            # 读取图片
            img=cv.imread(os.path.join(os.path.join(rootdir,'images',temp['framename'])))
            xlt, ylt, xrb, yrb = temp['framebox']
            clr = [int(c) for c in bbox_colors[3]]
            cv.rectangle(img, (xlt, ylt), (xrb, yrb), [200,33,53], 2)
            cv.putText(img, "{}: {} {}".format(objid,temp['label'],temp['danger']),(xlt, ylt-5), cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, 2)
            cv.imshow("image", img)
            if cv.waitKey(1) & 0xFF == ord('q'):
                break
    cv.destroyWindow("image")

#renderImagebyJson()
#rendervideo('./videosrc/03_内涝.mp4') 
