import argparse

import torch.backends.cudnn as cudnn

from models.experimental import *
from utils.datasets import *
from utils.utils import *
import torch
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import math
import json
import requests
from utils.classify import *

I_P = 'http://' + '192.168.1.169' + ':8686'
lastSaveTime = None
def upload_det_result(c_type, c_time, deviceId, img_name, image):
    return
    global lastSaveTime
    if lastSaveTime is None or time.time() - lastSaveTime > 600:
        lastSaveTime = time.time()
        mustpath = '/home/docker/docker/file/img/result/'
        image_path_ = mustpath + img_name + '.jpg'
        cv2.imwrite(image_path_, image)
        if not os.path.exists(image_path_):
            print("save img error!!!!!")
            return
        url = I_P + '/fim/api/alg/result'
        # image_path_ = result_path + '/' + img_name + '.jpg'
        device_payload = json.dumps({'deviceId': 11, 'type': 100, 'time': c_time, 'resultImgPathList': [image_path_]})  # 前端要求resultImgPathList格式为list
        headers = {'Content-Type': 'application/json'}
        upload_result = requests.post(url, headers=headers, data=device_payload)
        print(upload_result.text)
        assert upload_result.status_code == 200


class parseseq:
    def __init__(self, maxl = 10) -> None:
        pass
        maxl = (maxl-1) //2 * 2 + 1
        self.maxl = maxl
        self.data = []
        self.base = 1
        self.cur = 0
    def append(self, v):
        if self.cur == 0:
            self.data.append(v)
            if len(self.data) > self.maxl:
                ndata = self.data[:-1:2]
                ndata.append(self.data[-1])
                self.data = ndata
                self.base *= 2
        else:
            self.data[-1] = v
        self.cur += 1
        if self.cur == self.base:
            self.cur = 0



known = [10]
need = ['truck', 'person']

Classify_size = (32, 32)
def classifyModel(use_gpu=True):
    net_weight = './clothes_classify/model/efficientnet-b0.pth'
    if use_gpu:
        model_ft = torch.load(net_weight)
    else:
        model_ft = torch.load(net_weight, map_location=torch.device('cpu'))
    # .load_state_dict(state_dict)

    if use_gpu:
        model_ft = model_ft.cuda()
    model_ft.eval()
    return model_ft


def cropBoxOf(box, width, height):
    w = box[2] - box[0]
    h = box[3] - box[1]
    w = max(w, h)
    h = w
    x = (box[2] + box[0])/2
    y = (box[3] + box[1])/2
    w = w*1.3 + 30
    h = h*1.3 + 30
    cbox = [0 for _ in range(4)]
    cbox[0] = int(max(0, x - w/2))
    cbox[2] = int(min(width, x + w/2))
    cbox[1] = int(max(0, y - h/2))
    cbox[3] = int(min(height, y + h/2))
    return cbox


cnt = 0


# def correctLabel(model, insize, detect_results, image, use_gpu=True):
#     t1 = time.time()
#     image = image.copy()
#     result_boxes, result_scores, result_classid = detect_results
#     # print(result_classid)
#     cboxes = [cropBoxOf(result_boxes[i], image.shape[1], image.shape[0])
#               for i in range(len(result_boxes)) if result_classid[i] == 0]
#     if len(cboxes) == 0:
#         return detect_results
#     # print(cboxes)
#     cimages = [image[box[1]:box[3], box[0]:box[2]] for box in cboxes]
#     oinputs = [cv2.resize(cutout, insize)[:, :, ::-1].transpose(2, 0, 1)
#                for cutout in cimages]  # scale to input size
#     tr = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
#     # print(oinputs)
#     outputs = np.array(oinputs)
#     precc = outputs.copy().astype(np.float)/255.0
#     # precc = precc.reshape(1, precc.shape[0], precc.shape[1], precc.shape[2])
#     inputs = tr.forward(torch.Tensor(precc))
#     if use_gpu:
#         inputs = Variable(inputs.cuda())
#     else:
#         inputs = Variable(inputs)
#     outputs = model(inputs)
#     # loss = criterion(outputs, labels)
#     _, pred = torch.max(outputs.data, 1)
#     # print(pred)
#     idx = -1
#     for i in range(len(result_classid)):
#         if result_classid[i] != 0:
#             continue
#         idx += 1
#         # score = math.fabs(outputs.data[0][0] - outputs.data[0][1])
#         # print(result_classid[i],  pred, score)
#         # if pred == 1:
#         #     cnt += 1
#         #     cv2.imwrite(str(cnt)+'.jpg', cimages[i])
#         result_classid[i] = float(pred[idx])
#         # print(detect_results)
#     return result_boxes, result_scores, result_classid




def classImg(prediction, path, shape):
    if prediction.dtype is torch.float16:
        prediction = prediction.float()  # to FP32

    nc = prediction[0].shape[1] - 5  # number of classes
    pred = prediction[0].cpu()
    print(len(pred))

    # baseStep = shape[2] * shape[3] // (32 * 32)
    # step = [baseStep * 16, baseStep * 4, baseStep]
    start = 0
    c_white = np.array([255, 255, 255])
    c_red = np.array([0, 0, 255])
    c_green = np.array([0, 255, 0])
    c_blk = np.array([0, 0, 0])
    colors = [(255,0,0), (0,255,0), (0,0,255)]
    for i in range(3):
        # end = start + (step[i]) * 3

        imgs = [np.ones([shape[2], shape[3], 3], np.uint8)*255 for _ in range(3)]
        t_row = shape[2]//32 * (2**(2-i))
        t_col = shape[3]//32 * (2**(2-i))
        step = t_row * t_col

        def posofrowcol(row, col, anchorid):
            id = start + row * t_col + col + anchorid*(t_row * t_col)
            return int(pred[id][0]), int(pred[id][1])

        if 1:
            for anchorid in range(3):
                img = imgs[anchorid]
                for row in range(t_row):
                    for col in range(t_col):

                        if row > 0:
                            cv2.line(img, posofrowcol(row ,col, anchorid), posofrowcol(row-1, col, anchorid), colors[anchorid])
                        if col > 0:
                            cv2.line(img, posofrowcol(row ,col, anchorid), posofrowcol(row, col-1, anchorid), colors[anchorid])


                for j in range(start + anchorid*step, start + (anchorid+1)*step):
                    box = pred[j]
                    ibox = box[:4].int()
                    # print(int(box[0]),  int(box[1]))
                    # cv2.putText(img, str(j - start), (int(box[0]), int(box[1])), 0, 0.3, [0, 0, 0])
                    if (box[4] < 0.4):
                        img[int(box[1])][int(box[0])] = c_green
                    else:
                        # img[int(box[1])][int(box[0])] = c_red
                        cv2.circle(img, (int(box[0]), int(box[1])), 3, [0, 0, 0], cv2.FILLED)
                    continue
                    box[5:] *= box[4]
                    color = (1-box[4])*c_white + box[5]*c_red + box[6]*c_green
                    img[int(box[1])][int(box[0])] = color
            # cv2.imshow("", img)
                cv2.imwrite(Path(path).name + str(i) + "_anchor_{}.bmp".format(anchorid), img)
        start += 3*step
    assert(start == len(pred))
    pass


def makeEmptyDir(out):
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder


def saveBoxes(img, boxes):
    res = []
    for box in boxes:
        x1 = int(max(0, box[0] - 200))
        y1 = int(max(0, box[1] - 200))
        x2 = int(min(img.shape[1], box[2] + 200))
        y2 = int(min(img.shape[0], box[3] + 200))
        region = img[y1:y2, x1:x2]
        res.append(region)
    return res


def is_rect_intersect(bbox_1, bbox_2):   #bbox格式（xmin,ymin,xamx,ymax）
    return not((bbox_1[2] < bbox_2[0]) or (bbox_1[0] > bbox_2[2]) or (bbox_1[3] < bbox_2[1]) or (bbox_1[1] > bbox_2[3]))

def haveBoth(det):
    labs = det[:, -2]
    return 0 in labs and 1 in labs

def haveOne(det):
    labs = det[:, -2]
    return 0 in labs or 1 in labs

def haveCross(det):
    person_box = []
    helmet_box = []

    for box in det:
        #c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))   #(xmin,ymin,xmax,ymax)
        #idx = int(float(idx))
        #print(box, score, idx)
        label = box[-2]
        if label == 0:
            person_box.append(box[:4])
        if label == 1:
            helmet_box.append(box[:4])

    for i in range (len(helmet_box)):
        for j in range(len(person_box)):
            if is_rect_intersect(helmet_box[i], person_box[j]):
                return True
    return False

def report(nowashimgs, bout):
    path =  './inference/not_wash'
    prev = str(int(time.time()))
    for i, img in enumerate(nowashimgs):
        cv2.imwrite(path + '/{}_{}_{}.jpg'.format(prev, i, bout), img)



def detect(save_img=False, bexit = False):
    recent = [0] * 10
    recentimg = [None] * 10

    # report not wash
    truckmissthresh = 15
    state = 0  # 0 no truck 1 truck
    trucktimes = []
    washcnt = 0
    nowashimgs = parseseq(10)


    out, source, weights, view_img, save_txt, imgsz = \
        opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
    webcam = source == '0' or source.startswith('rtsp') or source.startswith('rtmp') or source.startswith('http') or source.endswith('.txt')
    weights2 = './weights/yolov5s.pt'
    # Initialize
    device = torch_utils.select_device(opt.device)
    # if os.path.exists(out):
    #     shutil.rmtree(out)  # delete output folder
    try:
        os.makedirs(out)  # make new output folder
    except:
        pass
    half = device.type != 'cpu'  # half precision only supported on CUDA
    half = False

    # Load model
    model = attempt_load(weights, map_location=device)  # load FP32 model
    # model2 = attempt_load(weights2, map_location=device)
    imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size
    if half:
        model.half()  # to FP16

    # Second-stage classifier
    classify = 1
    if classify:
        # modelc = classifyModel(0)  # initialize
        modelc = getClassifyModel(20)
        makeEmptyDir('./0')
        makeEmptyDir('./1')
        makeEmptyDir('./2')

        makeEmptyDir('./0-1')
        makeEmptyDir('./1-0')

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)
        for path in [
            './inference/res',
            './inference/orig',
            './inference/orig_missing_positive',
            './inference/orig_false_positive',
            './inference/orig_sub_img',
            './inference/not_wash',
            ]:
            try:
                os.makedirs(path)
            except:
                pass
    else:
        # save_img = True
        dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    names = model.module.names if hasattr(model, 'module') else model.names
    # names = ["wash" for _ in range(8)]
    # names[7] = 'truck'
    print("=========", names)
    colors = [[255, 0, 0], [0, 0, 255], [0, 255, 0], [255, 255, 0], [255, 0, 255], [0, 255, 255], [255, 0, 0], [0, 0, 255], ]
    colors.extend(
        [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names)-len(colors))])

    # Run inference
    t0 = time.time()
    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img) if device.type != 'cpu' else None  # run once
    icnt = 0
    previmg = None
    totalcnt = -1
    exitcnt = 0
    for path, img, im0s, vid_cap in dataset:
        # print("get new iter")
        # time.sleep(0.1)

        exitcnt += 1
        if bexit and exitcnt > 10 + sum(recent)*5:
            # print("try close")
            sys.stdout.flush()
            dataset.close()
            # print("close")
            sys.stdout.flush()
            return
        if len(im0s) == 1 and dataset.mode != 'image':
            assert(len(im0s[0].shape) == 3)
            if previmg is not None and previmg.shape == im0s[0].shape:
                comparison = previmg == im0s[0]
                equal_arrays = comparison.all()

                if equal_arrays:
                    print("si")
                    time.sleep(0.3)
                    continue
            previmg = im0s[0].copy()
        # print(' ')
        # print("prepare input")
        totalcnt = (totalcnt+1) % 10
        sys.stdout.flush()
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = torch_utils.time_synchronized()
        # print("infer")
        sys.stdout.flush()
        pred = model(img, augment=opt.augment)[0]
        # pred2 = model2(img, augment=opt.augment)[0]
        # classImg(pred, path, img.shape)

        # Apply NMS
        # print("nms")
        sys.stdout.flush()
        pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms, merge=False)
        # pred2 = non_max_suppression(pred2, 0.3, opt.iou_thres, classes=7, agnostic=opt.agnostic_nms, merge=True)
        # for i, p2 in enumerate(pred2):
        #     if pred[i] is None:
        #         pred[i] = p2
        #     elif pred[i] is not None and p2 is not None:
        #         pred[i] = torch.cat((pred[i], p2), 0)
        t2 = torch_utils.time_synchronized()

        # Apply Classifier
        if classify:
            pass
            # modelc = None
            # apply_classifier(pred, modelc, Classify_size, img, im0s,  Path(path).name)
            modelc.apply_classifier(pred, Classify_size, img, im0s,  Path(path).name)
        # continue
        if classify:
            pass

        # Process detections

        for i, det in enumerate(pred):  # detections per image
            have_one = False
            have_both = False
            have_p = False
            have_tr = False
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
            else:
                p, s, im0 = path, '', im0s

            save_path = str(Path(out) / Path(p).name)
            txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
            s += '%gx%g ' % img.shape[2:]  # print string
            # print(s)
            sys.stdout.flush()
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
            origImg = im0.copy()

            # report not wash
            if 0:
                def posoftruck():
                    for objc in det:
                        if objc[-2] == 1:
                            return (objc[0] + objc[2])/2
                if det is not None and len(det):
                    clss = det[:, -2]
                else:
                    clss = []
                if state == 0 and 1 in clss:
                    state = 1
                    trucktimes.append([int(time.time()), posoftruck()])
                    washcnt = 0
                    if 0 in clss: washcnt += 1
                    else: nowashimgs.append(origImg)
                elif state == 1:
                    if 1 in clss:
                        trucktimes.append([int(time.time()), posoftruck()])
                        if 0 in clss:
                            washcnt += 1
                        else:
                            nowashimgs.append(origImg)

                    else:
                        # state 1, no truck
                        print(int(time.time()) - trucktimes[-1][0], ' seconds gone!')
                        if int(time.time()) > trucktimes[-1][0] + truckmissthresh:
                            # end of this truck

                            if washcnt < 10: report(nowashimgs.data, trucktimes[0][1] < trucktimes[-1][1] - imgsz / 100.0)
                            state = 0
                            trucktimes = []
                            washcnt = 0
                            nowashimgs = parseseq(10)
                        else:
                            if 0 in clss: washcnt += 1

            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
                to_save_box = det[:, :4]
                # Print results
                if haveOne(det): have_one = True
                if haveCross(det): have_both = True

                for c in det[:, -2].unique():
                    n = (det[:, -2] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string
                    if names[int(c)] == 'person':
                        have_p = True
                    if names[int(c)] == 'truck':
                        have_tr = True
                    # if names[int(c)] == 'wash':
                    #     save = True

                # Write results
                for *xyxy, conf, cls, objconf in det:
                    if save_txt:  # Write to file
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                        with open(txt_path + '.txt', 'a') as f:
                            f.write(('%g ' * 5 + '\n') % (cls, *xywh))  # label format

                    if save_img or view_img:  # Add bbox to image
                        label = str(int(cls)) + " " + str(int(objconf.item()*100)) + " " + str(int(conf.item() /objconf.item() *100))
                        # label = None
                        plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
                        # if not int(cls) in known:
                        #     save = True

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))

            # Stream results
            if 1 and (view_img or dataset.mode == 'video'):
                cv2.imshow(' ', im0)
                if cv2.waitKey(1) == ord('q'):  # q to quit
                    pass

            # Save results (image with detections)
            if 1:
                if (dataset.mode == 'images' or dataset.mode == 'image'):
                    if 1: cv2.imwrite(save_path, im0)
                elif dataset.mode == 'video':
                    pass
                else:
                    if False and vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release()  # release previous video writer

                        fourcc = 'XVID'  # output video codec
                        # fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        fps = 10
                        # w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        # h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        prev = str(int(time.time()))
                        vid_writer = cv2.VideoWriter(prev + 'out.mp4', cv2.VideoWriter_fourcc(*fourcc), fps, (1280, 720))
                        orig_writer = cv2.VideoWriter(prev + 'orig.mp4', cv2.VideoWriter_fourcc(*fourcc), fps, (1280, 720))


                    if have_one:
                        recent[totalcnt] = 1
                    else:
                        recent[totalcnt] = 0

                    miss_positive = False
                    if sum(recent) > 0:
                        if have_one == False:
                            miss_positive = True
                        # save = True

                    previd = (totalcnt + 5)%10
                    if sum(recent) == 1  and recent[previd] == 1:
                        orig, prev = recentimg[previd]
                        cv2.imwrite('./inference/orig_false_positive/{}.jpg'.format(prev), orig)
                        # for envid in range(10):
                        #     if envid


                    if 1 and (have_one or miss_positive):
                        img_name, img_time = time.strftime(str(20)+'%Y%m%d%H%M%S'), time.strftime('%Y-%m-%d %H:%M:%S')  # 算法id+时间的格式

                        # vid_writer.write(im0)
                        # orig_writer.write(origImg)

                        icnt += 1
                        prev = str(int(time.time())) + '_' + str(icnt)

                        if have_both: cv2.imwrite('./inference/res/{}.jpg'.format(prev), im0)
                        # cv2.imwrite('./inference/res/{}.jpg'.format(img_name), im0)
                        # if save: upload_det_result(20, img_time, 11, img_name, im0)
                        # no need to save whole image of miss_positive
                        if have_both and not miss_positive: cv2.imwrite('./inference/orig/{}.jpg'.format(prev), origImg)
                        if miss_positive:
                            cv2.imwrite('./inference/orig_missing_positive/{}.jpg'.format(prev), origImg)

                        # to_save_imgs = saveBoxes(origImg, to_save_box)
                        recentimg[totalcnt] = origImg, prev
                        # for sid, to_save_img in enumerate(to_save_imgs):
                        #     cv2.imwrite('./inference/orig_sub_img/{}_sub{}.jpg'.format(prev, sid), to_save_img)
                        #     if miss_positive:
                        #         cv2.imwrite('./inference/orig_missing_positive/{}_sub{}.jpg'.format(prev, sid), to_save_img)


    if save_txt or save_img:
        print('Results saved to %s' % os.getcwd() + os.sep + out)
        if platform == 'darwin' and not opt.update:  # MacOS
            os.system('open ' + save_path)
    dataset.close()
    # print("closed on last")
    sys.stdout.flush()
    # print('Done. (%.3fs)' % (time.time() - t0))
    sys.stdout.flush()

def testparse():
    t = parseseq(10)
    for i in range(100):
        t.append(i)
        print(t.data, t.base, t.cur)
    exit()

if __name__ == '__main__':
    # testparse()

    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str,
                        default= './weights/' +
                        # 'best_helmet_clothes_person.pt',
                        # 'good_helmet_aomen.pt',
                        'best.pt',
                        help='model.pt path(s)')
    # file/folder, 0 for webcam
    parser.add_argument('--source', type=str,
                        default=
                        # 'streams.txt',
                        # f'http://hls01open.ys7.com/openlive/5c8f159fce8941a8a1e9a1899a74b844.hd.m3u8',
                        # f'http://hls01open.ys7.com/openlive/0ff4d509fd054d91bbcf172b800fa7ce.hd.m3u8',
                        # f"rtmp://rtmp01open.ys7.com/openlive/dd7b738edca541d48acab74a833d4776.hd",
                        # "rtmp://rtmp01open.ys7.com/openlive/7b5d34519cf24674a0cb39a5be53d778.hd",
                        # "rtsp://admin:1qaz2wsx@60.12.233.54:5016/h264/ch1/main/av_stream",
                        "rtsp://admin:1qaz2wsx@60.12.233.54:5016/Streaming/Channels/101",
                        # "C:/Users/fcwl/Downloads/Robbery_Accident_Fire_Database/Robbery_Accident_Fire_Database2/Fire",
                        # f"./test",
                        # '8fa5ab15-e5c0-4957-9eea-ecdf2a0f5548-B.mp4',
                        help='source')
    parser.add_argument('--output', type=str, default='inference/output', help='output folder')  # output folder
    parser.add_argument('--img-size', type=int, default=960, help='inference size (pixels)')
    parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
    parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--view-img', action='store_true', help='display results')
    parser.add_argument('--save-txt', action='store_true', default = True, help='save results to *.txt')
    parser.add_argument('--classes', nargs='+', type=int,
    # default=[1, 2],
    help='filter by class: --class 0, or --class 0 2 3')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', action='store_true', help='augmented inference', default = False)
    parser.add_argument('--update', action='store_true', help='update all models')
    opt = parser.parse_args()
    print(opt)

    with torch.no_grad():
        if opt.update:  # update all models (to fix SourceChangeWarning)
            for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov3-spp.pt']:
                detect()
                create_pretrained(opt.weights, opt.weights)
        else:
            detect(1)
            exit()

            i = 0
            while True:
                f =  open('list.txt', 'r')
                sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
                opt.source = sources[i % len(sources)]
                detect(1, bexit = True)
                # time.sleep(5)
                i += 1
