import pymysql
import sys
from AIDetector_pytorch import Detector
from threading import Thread
import cv2
import os
import time
import threading
import traceback
import plotline
from sql_process import *
from facedetect import *
from flask import jsonify
from speed_check import estimateSpeed, WIDTH, HEIGHT
from speed_get import speed_get
from retinaface import Retinaface
import numpy as np
import math
import random
import global_val
import ffmpeg 
from PIL import Image


class base_detector():
    def __init__(self):
        self.img1 = None
        self.true_time = 0
        self.start_time = 0

    def MeanShift(self, input, r):

        classification = []
        startNum = 60  # 起始点数量
        radium = r  # 窗口半径
        num = len(input)  # 样本数量
        Sample = np.int32([[0, 0, 0] for m in range(num)])  # 添加分类信息 0为未分类
        for i in range(num):
            Sample[i][0] = input[i][0]
            Sample[i][1] = input[i][1]

        # 随机选择一个起始点
        for i in range(startNum):
            # 范围
            ptr = random.randint(0, num - 1)

            # 记录分类中心点
            center = [0, 0]
            center[0] = Sample[ptr][0]
            center[1] = Sample[ptr][1]
            Flag = 0
            # 判断终止条件
            iteration = 0
            while ((Flag == 0) & (iteration < 10)):
                orientation = [0, 0]  # 移动方向
                # 找出窗口内的所有样本点
                for j in range(num):
                    oX = Sample[j][0] - center[0]
                    oY = Sample[j][1] - center[1]
                    dist = math.sqrt(oX * oX + oY * oY)
                    # 该点在观察窗内
                    if dist <= radium:
                        orientation[0] = orientation[0] + oX / 20
                        orientation[1] = orientation[1] + oY / 20
                # 开始漂移
                center[0] = center[0] + orientation[0]
                center[1] = center[1] + orientation[1]
                # 中心点不再移动时
                oX = orientation[0]
                oY = orientation[1]
                iteration = iteration + 1
                if math.sqrt(oX * oX + oY * oY) < 3:
                    Flag = 1

            # 添加不重复的新分类信息
            Flag = 1
            for i in range(len(classification)):
                # 与当前存在的分类位置差别小于5
                oX = classification[i][0] - center[0]
                oY = classification[i][1] - center[1]
                if math.sqrt(oX * oX + oY * oY) < math.sqrt(classification[i][2]) + 30:
                    Flag = 0
                    break
            if Flag == 1:
                temp = [center[0], center[1], 0]
                classification.append(temp)

        # 给所有样本点分类
        for i in range(num):
            Index = 0
            minValue = 99999
            # 找出最近的分类
            for j in range(len(classification)):
                xx = classification[j][0] - Sample[i][0]
                yy = classification[j][1] - Sample[i][1]
                distination = abs(xx * xx + yy * yy)
                if distination <= minValue:
                    Index = j
                    minValue = distination
            Sample[i][2] = Index
            classification[Index][2] = classification[Index][2] + 1

        return classification

    def soundries_detect(self, img, videonum, cur, connector, type):
        func = 2
        a = 1  # 显示比例
        detectDensity = 2
        shreshood = 350
        windowSize = 10
        img2 = None
        end_time = 0
        # 载入图像
        # img1 = cv2.imread('img2.jpg')
        # img2 = cv2.imread('img1.jpg')
        pointArray = global_val.get_pointArray('pointArray')
        left_x = pointArray[0][0]
        left_y = pointArray[0][1]
        right_X = pointArray[3][0]
        right_y = pointArray[3][1]
        # temp_img = Image.open(img)
        # crop_img = temp_img.crop((left_x, left_y, right_X, right_y))
        crop_img = img[left_y:right_y, left_x:right_X].copy()
        if videonum == 1:
            self.start_time = time.time()
            self.img1 = crop_img
        else:
            if self.true_time == 0:
                self.start_time = time.time()
                img2 = crop_img
            else:
                img2 = crop_img
                end_time = time.time()
        self.true_time = end_time - self.start_time
        if self.true_time > 0.5:
            print('true time>0.5 执行')
            # if self.img1.shape[0]<self.img1.shape[1]:
            #     # self.img1 = cv2.resize(self.img1,dsize=(600,int(600*self.img1.shape[0]/self.img1.shape[1])))
            #     img2 = cv2.resize(img2,dsize=(600,int(600*self.img1.shape[0]/self.img1.shape[1])))
            # else:
            #     # self.img1 = cv2.resize(self.img1,dsize=(int(600*self.img1.shape[1]/self.img1.shape[0]),600))
            #     img2 = cv2.resize(img2,dsize=(int(600*self.img1.shape[1]/self.img1.shape[0]),600))

            # sift = cv2.SIFT_create()
            sift = cv2.xfeatures2d.SIFT_create()
            print('siftbefore')
            print('sift', sift)
            # 检测关键点
            kp1, des1 = sift.detectAndCompute(self.img1, None)
            kp2, des2 = sift.detectAndCompute(img2, None)

            # 关键点匹配
            FLANN_INDEX_KDTREE = 0
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=6)
            search_params = dict(checks=10)

            flann = cv2.FlannBasedMatcher(index_params, search_params)

            matches = flann.knnMatch(des1, des2, k=2)

            good = []
            for m, n in matches:
                if m.distance < 0.7 * n.distance:
                    good.append(m)

            # 把good中的左右点分别提出来找单应性变换
            pts_src = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            pts_dst = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
            # 单应性变换
            M, mask = cv2.findHomography(pts_src, pts_dst, cv2.RANSAC, 5.0)

            # 输出SIFT匹配结果 ********************************************************
            print('输出差异before')
            # 输出差异识别结果 ********************************************************

            if (func == 1) | (func == 2):
                print('输出差异识别结果')
                # M矩阵中xy方向的偏移量
                dX = M[0][2]  # x方向 负为左比右小
                dY = M[1][2]  # y方向 负为左比右小

                # 图像的长宽
                height, width, channel = self.img1.shape

                # 设定关键点的尺度
                size = int(width * 0.01)

                # 自动选择采样点的位置范围
                xMinLeft = width
                xMaxLeft = 0
                yMinLeft = height
                yMaxLeft = 0
                xMinRight = width
                xMaxRight = 0
                yMinRight = height
                yMaxRight = 0
                I = 0

                # 用当前匹配成功的点集分析合适的检测范围
                for i in range(len(pts_src)):
                    if mask[i] == 1:
                        if pts_src[i][0][1] < yMinLeft:
                            yMinLeft = pts_src[i][0][1]
                        if pts_src[i][0][1] > yMaxLeft:
                            yMaxLeft = pts_src[i][0][1]
                        if pts_src[i][0][0] < xMinLeft:
                            xMinLeft = pts_src[i][0][0]
                        if pts_src[i][0][0] > xMaxLeft:
                            xMaxLeft = pts_src[i][0][0]
                for i in range(len(pts_dst)):
                    if mask[i] == 1:
                        if pts_dst[i][0][1] < yMinRight:
                            yMinRight = pts_dst[i][0][1]
                        if pts_dst[i][0][1] > yMaxRight:
                            yMaxRight = pts_dst[i][0][1]
                        if pts_dst[i][0][0] < xMinRight:
                            xMinRight = pts_dst[i][0][0]
                        if pts_dst[i][0][0] > xMaxRight:
                            xMaxRight = pts_dst[i][0][0]

                xMinLeft = xMinLeft + 2 * size
                yMinLeft = yMinLeft + 3 * size

                # 检测范围确定
                interval = detectDensity * size  # 监测点间隔
                searchWidth = int((xMaxLeft - xMinLeft) / interval - 2)
                searchHeight = int((yMaxLeft - yMinLeft) / interval - 2)
                searchNum = searchWidth * searchHeight
                demo_src = np.float32([[0] * 2] * searchNum * 1).reshape(-1, 1, 2)
                for i in range(searchWidth):
                    for j in range(searchHeight):
                        demo_src[i + j * searchWidth][0][0] = xMinLeft + i * interval + size
                        demo_src[i + j * searchWidth][0][1] = yMinLeft + j * interval + size

                # 单应性变换 左图映射到右图的位置
                demo_dst = cv2.perspectiveTransform(demo_src, M)

                # 把差异点画出来
                heightO = max(self.img1.shape[0], img2.shape[0])
                widthO = self.img1.shape[1] + self.img1.shape[1]
                output = np.zeros((heightO, widthO, 3), dtype=np.uint8)
                output[0:self.img1.shape[0], 0:self.img1.shape[1]] = self.img1
                output[0:img2.shape[0], img2.shape[1]:] = img2[:]
                # output2
                output2 = output

                # 转换成KeyPoint类型
                kp_src = [cv2.KeyPoint(demo_src[i][0][0], demo_src[i][0][1], size)
                          for i in range(demo_src.shape[0])]
                kp_dst = [cv2.KeyPoint(demo_dst[i][0][0], demo_dst[i][0][1], size)
                          for i in range(demo_dst.shape[0])]

                # 计算这些关键点的SIFT描述子
                keypoints_image1, descriptors_image1 = sift.compute(self.img1, kp_src)
                keypoints_image2, descriptors_image2 = sift.compute(img2, kp_dst)

                # 差异点
                diffLeft = []
                diffRight = []

                # 分析差异
                for i in range(searchNum):

                    nowShreshood = shreshood
                    difference = 0
                    for j in range(128):
                        d = abs(descriptors_image1[i][j] - descriptors_image2[i][j])
                        difference = difference + d * d
                    difference = math.sqrt(difference)

                    # 右图关键点位置不超出范围
                    if (demo_dst[i][0][1] >= 0) & (demo_dst[i][0][0] >= 0):
                        # if difference <= nowShreshood:
                        if (difference <= nowShreshood) & (func == 1):
                            cv2.circle(output, (demo_src[i][0][0], demo_src[i][0][1]), 1, (0, 255, 0), 2)
                            cv2.circle(output, (int(demo_dst[i][0][0] + width), demo_dst[i][0][1]), 1, (0, 255, 0), 2)

                        if difference > nowShreshood:
                            if func == 1:
                                cv2.circle(output, (demo_src[i][0][0], demo_src[i][0][1]), 1, (0, 0, 255), 2)
                                cv2.circle(output, (int(demo_dst[i][0][0] + width), demo_dst[i][0][1]), 1, (0, 0, 255),
                                           2)
                            if func == 2:
                                #                       cv2.circle(output, (demo_src[i][0][0],demo_src[i][0][1]),1, (0, 0, 255), 2)
                                #                       cv2.circle(output, (int(demo_dst[i][0][0]+width),demo_dst[i][0][1]),1, (0, 0, 255), 2)
                                diffLeft.append([demo_src[i][0][0], demo_src[i][0][1]])
                                diffRight.append([demo_dst[i][0][0], demo_dst[i][0][1]])

                # 检测结果输出
                if func == 1:
                    output = cv2.resize(output, (int(output.shape[1] * a), int(output.shape[0] * a)),
                                        interpolation=cv2.INTER_CUBIC)
                    cv2.imshow('show', output)
                    k = cv2.waitKey(0)
                    if k == 27:
                        cv2.destroyAllWindows()

                # 聚类后输出
                if func == 2:
                    print('fun2聚类中...')

                    outLeft = self.MeanShift(diffLeft, windowSize)

                    left = np.float32([[0] * 2] * len(outLeft) * 1).reshape(-1, 1, 2)
                    for i in range(len(outLeft)):
                        left[i][0][0] = outLeft[i][0]
                        left[i][0][1] = outLeft[i][1]
                        right = cv2.perspectiveTransform(left, M)
                        outRight = [[0 for x in range(3)] for y in range(len(outLeft))]
                    for i in range(len(outLeft)):
                        outRight[i][0] = right[i][0][0]
                        outRight[i][1] = right[i][0][1]
                        outRight[i][2] = outLeft[i][2]

                    # 将点数大于10的类画出来 点数不足10认为是错误导致的
                    # for i in range(len(outLeft)):
                    #    if outLeft[i][2] > 50:
                    #        cv2.circle(output2, (int(outLeft[i][0]),int(outLeft[i][1])),int(np.sqrt(outLeft[i][2]))*7, (0, 0, 255), 2)
                    print('img3 before')
                    if outRight[i][2] > 15:
                        for i in range(len(outRight)):
                            cv2.circle(output2, (int(outRight[i][0]) + width, int(outRight[i][1])),
                                       int(np.sqrt(outRight[i][2])) * 7, (255, 255, 0), 2)
                        #img3 = output2[0:img2.shape[0], img2.shape[1]:]
                        img3=img2
                        #img[left_y:right_y, left_x:right_X] = img3
                        # 数据库插入
                        time_now = str(time.time())
                        name = 1
                        jpgName = str(time.time()) + type + '.jpg'
                        path = 'static/' + jpgName
                        cv2.imwrite(path, img3)
                        url = '/static/' + jpgName
                        mysql_connector().insert_violator_id(cur, connector, type, name, time_now, url)
                        result = img
                        self.true_time = 0
                    else:
                        result = img
                        self.true_time = 0
        else:
            result = img

        return result

    def get_frame(self, det, cap, choicenum, im, framenum, ret, cur, connector, length):
        # 根据不同的choicenum类型选择不同的表
        if choicenum == 1:
            type = 'hat'
        elif choicenum == 2:
            type = 'reflect'
        elif choicenum == 3:
            type = 'phone'
        elif choicenum == 4:
            type = 'fire'
        elif choicenum == 5:
            type = 'smoking'
        elif choicenum == 6:
            type = 'sundry'
        elif choicenum == 7:
            type = 'sleep'
        # 初始化参数
        if ret is True:
            if choicenum == 6:
                try:
                    print('sundry-----检测')
                    result = self.soundries_detect(im, framenum)
                    ret, jpeg = cv2.imencode('.jpg', result)
                except:
                    result = im
                    ret, jpeg = cv2.imencode('.jpg', result)
            else:
                try:
                    result_ = det.feedCap(im, framenum, choicenum)
                    result = result_['frame']  # 返回的检测完画完的帧
                    nohadposes = result_['dectwarn']  # 发现违规人员的id
                    length = len(nohadposes)
                    print(length)
                    i = 0
                    while i < length:
                        jpgName = str(time.time()) + type + '.jpg'
                        path = 'static/' + jpgName
                        cv2.imwrite(path, result)
                        url = '/static/' + jpgName
                        name = nohadposes[i]
                        i = i + 1
                        time_now = str(time.time())
                        mysql_connector().insert_violator_id(cur, connector, type, name, time_now, url)
                    pointArray = global_val.get_pointArray('pointArray')
                    cv2.line(result, tuple(pointArray[0]), tuple(pointArray[1]),(255, 0, 0), thickness=3)
                    cv2.line(result, tuple(pointArray[0]), tuple(pointArray[2]),(255, 0, 0), thickness=3)
                    cv2.line(result, tuple(pointArray[1]), tuple(pointArray[3]),(255, 0, 0), thickness=3)
                    cv2.line(result, tuple(pointArray[2]), tuple(pointArray[3]),(255, 0, 0), thickness=3)   
                    ret, jpeg = cv2.imencode('.jpg', result)
                except:
                    pointArray = global_val.get_pointArray('pointArray')
                    cv2.line(im, tuple(pointArray[0]), tuple(pointArray[1]),(255, 0, 0), thickness=3)
                    cv2.line(im, tuple(pointArray[0]), tuple(pointArray[2]),(255, 0, 0), thickness=3)
                    cv2.line(im, tuple(pointArray[1]), tuple(pointArray[3]),(255, 0, 0), thickness=3)
                    cv2.line(im, tuple(pointArray[2]), tuple(pointArray[3]),(255, 0, 0), thickness=3) 
                    result = im
                    ret, jpeg = cv2.imencode('.jpg', result)
        return jpeg.tobytes()

    def get_sundry_frame(self, choicenum, im, framenum, ret, cur, connector):
        type = 'sundry'
        if ret is True:
            if choicenum == 6:
                try:
                    print('sundry-----检测')
                    print(framenum)
                    result = self.soundries_detect(im, framenum, cur, connector, type)
                    pointArray = global_val.get_pointArray('pointArray')
                    cv2.line(result, tuple(pointArray[0]), tuple(pointArray[1]),(255, 0, 0), thickness=3)
                    cv2.line(result, tuple(pointArray[0]), tuple(pointArray[2]),(255, 0, 0), thickness=3)
                    cv2.line(result, tuple(pointArray[1]), tuple(pointArray[3]),(255, 0, 0), thickness=3)
                    cv2.line(result, tuple(pointArray[2]), tuple(pointArray[3]),(255, 0, 0), thickness=3)   
                    ret, jpeg = cv2.imencode('.jpg', result)
                except:
                    result = im
                    pointArray = global_val.get_pointArray('pointArray')
                    cv2.line(im, tuple(pointArray[0]), tuple(pointArray[1]),(255, 0, 0), thickness=3)
                    cv2.line(im, tuple(pointArray[0]), tuple(pointArray[2]),(255, 0, 0), thickness=3)
                    cv2.line(im, tuple(pointArray[1]), tuple(pointArray[3]),(255, 0, 0), thickness=3)
                    cv2.line(im, tuple(pointArray[2]), tuple(pointArray[3]),(255, 0, 0), thickness=3) 
                    ret, jpeg = cv2.imencode('.jpg', result)
        return jpeg.tobytes()

    def get_speed_frame(self, det, cap, choicenum, im, framenum, ret, cur, connector, length):
        Dmax = 200
        Dmin = 10
        H = 4
        B0 = 100
        speed_limit = 40
        image = im
        self.speedcounter = speed_get(Dmin, Dmax, H, B0, image, speed_limit)
        if ret is True:
            try:
                result_ = det.feedCap(im, framenum, choicement=9, speedget=self.speedcounter)
                result = result_['frame']  # 返回的检测完画完的帧
                ret, jpeg = cv2.imencode('.jpg', result)
            except:
                result = im
                ret, jpeg = cv2.imencode('.jpg', result)
        return jpeg.tobytes()

    def feed_up_image(self, path, choicenum):
        try:
            image = cv2.imread(path)
            try:
                det = Detector(choicenum)
                line_thickness = None
                _, bboxes = det.detect(image)
                tl = line_thickness or round(
                    0.002 * (image.shape[0] + image.shape[1]) / 2) + 1  # line/font thickness
                # 左上角，右下角，类别
                for (x1, y1, x2, y2, cls_id, _) in bboxes:
                    if choicenum == 3 and cls_id not in ["cell phone"]:
                        continue
                    else:
                        if cls_id in ['person', 'other_clothes']:
                            color = (0, 0, 255)
                        else:
                            color = (0, 255, 0)
                        c1, c2 = (x1, y1), (x2, y2)
                        cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
                        tf = max(tl - 1, 1)  # font thickness
                        t_size = cv2.getTextSize(cls_id, 0, fontScale=tl / 3, thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(image, c1, c2, color, -1, cv2.LINE_AA)  # filled
                        cv2.putText(image, '{}'.format(cls_id), (c1[0], c1[1] - 2), 0, tl / 3,
                                    [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
            except:
                pass
            # result = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            name = str(time.time()) + '.jpg'
            path = 'static/' + name
            cv2.imwrite(path, image)
            return name
        except:
            print('error')

    def feed_up_video(self, path, choicenum):
        try:
            det = Detector(choicenum)
            cap = cv2.VideoCapture(path)
            rate = cap.get(5)
            width = cap.get(3)
            height = cap.get(4)
            fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
            # fourcc = cv2.VideoWriter_fourcc(*'vp80')
            name = str(time.time())
            path = 'static/' + name + '.avi'
            out = cv2.VideoWriter(path, fourcc, int(rate), (int(width), int(height)), True)
            videonum = 1
            nohadposes = []
            while (cap.isOpened()):
                ret, im = cap.read()
                if ret == True:
                    try:
                        result_ = det.feedCap(im, videonum, choicenum)  # 目标跟踪视频检测
                        result = result_['frame']  # 返回的检测完画完的帧
                        nohadposes = result_['dectwarn']  # 发现违规人员的id
                    except:
                        result = im
                    # result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
                    out.write(result)
                    videonum = videonum + 1
                else:
                    break
            url1 = path
            stream = ffmpeg.input(url1)
            url2 = 'static/' + name + '.mp4'
            stream = ffmpeg.output(stream,url2)
            ffmpeg.run(stream)
            url2 = '/static/' + name + '.mp4'
            json_dict = {
                "res_path": url2,
                "res_text": nohadposes
            }
            cap.release()
            out.release()
            return jsonify(json_dict)
        except Exception as e:
            print(e.args)
            print(traceback.format_exc())

    def get_borderframe_rect(self, det, cap, choicenum, im, framenum, ret, linedraw, cur, connector, length):
        # 初始化参数
        try:
            result_ = det.feedCap(im, framenum, choicenum, linedraw.plot)  # 目标跟踪视频检测
            result = result_['frame']  # 返回的检测完画完的帧
            i = len(linedraw.plot['pointlist']) - 1
            typeindex = len(linedraw.plot['pointlist']) - 1
            type = linedraw.plot['typelist'][typeindex]
            if type == 0:
                cv2.line(result, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][0], (255, 0, 0), thickness=3)
            elif type == 1:
                cv2.line(result, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][0], (0, 255, 0), thickness=3)
            elif type == 2:
                cv2.line(result, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][0], (0, 0, 255), thickness=3)
            borderwarn = result_['borderwarn']
            length = len(borderwarn)
            i = 0
            while i < length:
                jpgName = str(time.time()) + 'borderRect' + '.jpg'
                path = 'static/' + jpgName
                cv2.imwrite(path, result)
                url = '/static/' + jpgName
                print('boder-url:' + url)
                name = borderwarn[i][0]
                type = borderwarn[i][1]
                i = i + 1
                # time_now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
                time_now = str(time.time())
                mysql_connector().insert_violator(cur, connector, type, name, time_now, url)
            ret, jpeg = cv2.imencode('.jpg', result)
        except:
            if len(linedraw.plot['linelist']):
                typeindex = 0
                for i in range(1, len(linedraw.plot['pointlist'])):
                    if i in linedraw.plot['draw_index']:
                        type = linedraw.plot['typelist'][typeindex]
                        typeindex = typeindex + 1
                        if type == 0:
                            cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][i - 1], (255, 0, 0),
                                     thickness=3)
                        elif type == 1:
                            cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][i - 1], (0, 255, 0),
                                     thickness=3)
                        elif type == 2:
                            cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][i - 1], (0, 0, 255),
                                     thickness=3)
                if type == 0:
                    cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][0], (255, 0, 0), thickness=3)
                elif type == 1:
                    cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][0], (0, 255, 0), thickness=3)
                elif type == 2:
                    cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][0], (0, 0, 255), thickness=3)
            result = im
            ret, jpeg = cv2.imencode('.jpg', result)
        return jpeg.tobytes()

    def get_borderframe_line(self, det, cap, choicenum, im, framenum, ret, linedraw, cur, connector, length):
        # 初始化参数
        try:
            result_ = det.feedCap(im, framenum, choicenum, linedraw.plot)  # 目标跟踪视频检测
            result = result_['frame']  # 返回的检测完画完的帧
            borderwarn = result_['borderwarn']
            length = len(borderwarn)
            i = 0
            while i < length:
                jpgName = str(time.time()) + 'borderLine' + '.jpg'
                path = 'static/' + jpgName
                cv2.imwrite(path, result)
                url = '/static/' + jpgName
                print('boder-url:' + url)
                name = borderwarn[i][0]
                type = borderwarn[i][1]
                i = i + 1
                # time_now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
                time_now = str(time.time())
                mysql_connector().insert_violator(cur, connector, type, name, time_now, url)
            ret, jpeg = cv2.imencode('.jpg', result)
        except:
            if len(linedraw.plot['linelist']):
                typeindex = 0
                for i in range(1, len(linedraw.plot['pointlist'])):
                    if i in linedraw.plot['draw_index']:
                        type = linedraw.plot['typelist'][typeindex]
                        typeindex = typeindex + 1
                        if type == 0:
                            cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][i - 1], (255, 0, 0),
                                     thickness=3)
                        elif type == 1:
                            cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][i - 1], (0, 255, 0),
                                     thickness=3)
                        elif type == 2:
                            cv2.line(im, linedraw.plot['pointlist'][i], linedraw.plot['pointlist'][i - 1], (0, 0, 255),
                                     thickness=3)
            result = im
            ret, jpeg = cv2.imencode('.jpg', result)
        return jpeg.tobytes()


if __name__ == '__main__':
    a = 0