# -*- coding: utf-8 -*-
# coding='utf-8'
import os
import sys
import threading
import numpy as np
import time
import datetime
import json
import importlib
import logging
import shutil
import cv2
import random
from tqdm import tqdm
from PIL import Image
from torch.autograd import Variable
# from .ioutracker_videocam_hyf_new import track_iou

import matplotlib

matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
from torchvision import transforms

import torch
import torch.nn as nn

MY_DIRNAME = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(MY_DIRNAME, '..'))
from nets.model_main import ModelMain
from nets.yolo_loss import YOLOLoss
from common.utils import non_max_suppression, bbox_iou

cmap = plt.get_cmap('tab20b')
colors = [cmap(i) for i in np.linspace(0, 1, 20)]

logging.basicConfig(level=logging.WARN,
                    format="[%(asctime)s %(filename)s] %(message)s")

if len(sys.argv) != 2:
    logging.error("Usage: python test_images.py params.py")
    sys.exit()
params_path = sys.argv[1]
if not os.path.isfile(params_path):
    logging.error("no params file found! path: {}".format(params_path))
    sys.exit()
config = importlib.import_module(params_path[:-3]).TRAINING_PARAMS
config["batch_size"] *= len(config["parallels"])

# Start training
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, config["parallels"]))

is_training = False
# Load and initialize network
net = ModelMain(config, is_training=is_training)
net.train(is_training)

# Set data parallel
net = nn.DataParallel(net)
net = net.cuda()

# Restore pretrain model
if config["pretrain_snapshot"]:
    logging.info("load checkpoint from {}".format(config["pretrain_snapshot"]))
    state_dict = torch.load(config["pretrain_snapshot"])
    net.load_state_dict(state_dict)
else:
    raise Exception("missing pretrain_snapshot!!!")

# YOLO loss with 3 scales
yolo_losses = []
for i in range(3):
    yolo_losses.append(YOLOLoss(config["yolo"]["anchors"][i],
                                config["yolo"]["classes"], (config["img_w"], config["img_h"])))

# model = torch.load('../weights/20_2.0000_1.9801.pkl')       # old
# model = torch.load('../weights/25_1.0000_0.9917.pkl')       # 0908
# model = torch.load('../weights/49_1.0000_0.9842.pkl')       # 0909  more data set
# model = torch.load('../weights/43_1.0000_1.0000.pkl')       # 0924  3 classes   all data set
# model = torch.load('../weights/8_1.0000_0.9884.pkl')       # 0924  3 classes  all data set
model = torch.load('../weights/20_1.0000_1.0000.pkl')       # 0924  3 classes  all data set

model.eval()
m = torch.nn.Sigmoid().cuda()

preprocess_img = transforms.Compose([
    transforms.ToTensor(),
    # normalize
])


def img_loader(PIL_img):
    img = preprocess_img(PIL_img)
    if img.size()[0] == 1:
        img = torch.cat((img, img, img), 0)
    return img


user, pwd, ip, channel = "admin", "iotcam213", "10.15.199.182", 1

# cap_path = "rtsp://%s:%s@%s//Streaming/Channels/%d" % (user, pwd, ip, channel)  # HIKIVISION new version 2017
# cap_path = '/home/workspace/cq/helmet_test2.mp4'
cap_path = '/home/iotcam/Pictures/video/6.mp4'
cap = cv2.VideoCapture(cap_path)

global result
global frame
frame = cap.read()[1]


def video_process():
    frame_interval = 10
    global frame
    global result
    result = None
    while (1):
        if frame_interval % 10 == 0:
            img = frame
            x = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            x = cv2.resize(x, (config["img_h"], config["img_w"]), interpolation=cv2.INTER_LINEAR).astype(np.float32)
            x /= 255.0
            image = np.transpose(x, (2, 0, 1))
            image = image.astype(np.float32)
            image = np.asarray(image)
            image = torch.from_numpy(image).unsqueeze(0).cuda()
            # inference
            with torch.no_grad():
                outputs = net(image)
                output_list = []
                for i in range(3):
                    output_list.append(yolo_losses[i](outputs[i]))
                output = torch.cat(output_list, 1)
                batch_detections = non_max_suppression(output, config["yolo"]["classes"],
                                                       conf_thres=config["confidence_threshold"],
                                                       nms_thres=0.25)
            decs = []
            for detections in batch_detections:
                if detections is not None:
                    index = 0
                    for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                        if int(cls_pred) < 2:
                            # Rescale coordinates to original dimensions
                            ori_h, ori_w = img.shape[:2]
                            pre_h, pre_w = config["img_h"], config["img_w"]
                            box_h = ((y2 - y1) / pre_h) * ori_h
                            box_w = ((x2 - x1) / pre_w) * ori_w
                            y1 = (y1 / pre_h) * ori_h
                            x1 = (x1 / pre_w) * ori_w
                            y2 = y1 + box_h
                            x2 = x1 + box_w
                            x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
                            if x1 < 0:
                                x1 = 0
                            if y1 < 0:
                                y1 = 0
                            if x2 > ori_w:
                                x2 = ori_w
                            if y2 > ori_h:
                                y2 = ori_h

                            image_cls = img[y1:y2, x1:x2]  # img[height, width]
                            image = Image.fromarray(cv2.cvtColor(image_cls, cv2.COLOR_BGR2RGB))
                            tensor = img_loader(image).unsqueeze(0).cuda()

                            out, feature = model(tensor)
                            pre = (m(out) > 0.3).cpu().numpy().squeeze()
                            text = ''
                            # if pre[0] == 0 and pre[1] == 0:
                            #     text = 'wcaqm wcgz'
                            # elif pre[0] == 0 and pre[1] == 1:
                            #     text = 'wcaqm cgz'
                            # elif pre[0] == 1 and pre[1] == 0:
                            #     text = 'caqm wcgz'
                            # elif pre[0] == 1 and pre[1] == 1:
                            #     text = 'caqm cgz'
                            if pre[0] == 0 and pre[1] == 0 and pre[1] == 0:
                                text = 'wcaqm wcgz wcmj'
                            elif pre[0] == 0 and pre[1] == 1 and pre[1] == 0:
                                text = 'wcaqm cgz wcmj'
                            elif pre[0] == 1 and pre[1] == 0 and pre[1] == 0:
                                text = 'caqm wcgz wcmj'
                            elif pre[0] == 1 and pre[1] == 1 and pre[1] == 0:
                                text = 'caqm cgz wcmj'
                            elif pre[0] == 0 and pre[1] == 0 and pre[1] == 1:
                                text = 'wcaqm wcgz cmj'
                            elif pre[0] == 0 and pre[1] == 1 and pre[1] == 1:
                                text = 'wcaqm cgz cmj'
                            elif pre[0] == 1 and pre[1] == 0 and pre[1] == 1:
                                text = 'caqm wcgz cmj'
                            elif pre[0] == 1 and pre[1] == 1 and pre[1] == 1:
                                text = 'caqm cgz cmj'
                            print(text)

                            decs.append([x1, y1, x2, y2, text, '%.2f' % conf])
                            index += 1
            result = decs
            # print(result)
            frame_interval = 0

        frame_interval += 1


def video():
    global frame
    global result
    while (1):
        ret, frame = cap.read()
        if result is not None:
            bbox = result
            if len(bbox) > 0:
                for i in range(len(bbox)):
                    cv2.rectangle(frame, (bbox[i][0], bbox[i][1]), (bbox[i][2], bbox[i][3]),
                                  (255, 0, 0), 5)
                    font = cv2.FONT_HERSHEY_COMPLEX_SMALL
                    cv2.putText(frame, bbox[i][4] + str(bbox[i][5]), (bbox[i][0], bbox[i][1]),
                                font, 3.5, (0, 0, 255), 4)

        cv2.namedWindow('test video', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('test video', 1000, 800)
        cv2.imshow('test video', frame)
        if cv2.waitKey(1) & 0xff == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()



def main():
    t1 = threading.Thread(target=video, name='play')
    t2 = threading.Thread(target=video_process, name='show_result')
    t1.start()
    t2.start()
    t1.join()
    t2.join()


if __name__ == '__main__':
    # Video_test(device_id=1).video_read()
    main()
