import os
from cv2 import cv2
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms

from VidSrc import  create_mousesleep_video
from utils.hardware import create_best_device
from utils.geometry import rescale_boxes
from utils.nms import non_max_suppression
from utils.paths import load_classes, is_contain_file, list_avi_paths, group_avi_by_channel, scan_ch_prefix, safe_mkdir
from utils.datasets import pad_to_square, resize
from model.factory import create_yolov3

class Config(object):
    def __init__(self, model_def, img_size, class_path, fg_thresh, nms_thresh, weights):
        self.model_def = model_def
        self.img_size = img_size
        self.class_path = class_path
        self.fg_thresh = fg_thresh
        self.nms_thresh = nms_thresh
        self.weights = weights

def cooked_img(img, img_size):
    img = transforms.ToTensor()(img) # package into torch tensor
    img, _ = pad_to_square(img, 0) # Pad to square
    img = resize(img, img_size) # Resize
    return img

def mtrace_one_channel(ch_path, weights, ch_err=None):
    # 参数设定
    print('configuration')
    np.random.seed(1)
    output_f =os.path.join(ch_path, 'locations.txt')
    fps = 1
    nsec_per_clip = 3600
    model_def = 'config/yolov3-2classes.cfg'
    img_size = 416
    class_path = 'class_names/mouse_2classes.names'
    fg_thresh = 0.8
    nms_thresh = 0.4
    batch_size = 8
    #batch_size = 1
    cfg = Config(model_def, img_size, class_path, fg_thresh, nms_thresh, weights)
    w_trace = "trace"
    # w_trace = None
    
    # 初始化追踪模块
    print('load video clips.')
    h_vid = create_mousesleep_video(ch_path, fps=fps, nsec_per_clip=nsec_per_clip)
    h_loc = open(output_f, 'w')
    device = create_best_device()
    model = create_yolov3(cfg, device).to(device)
    if cfg.weights.endswith(".weights"):
        # Load darknet weights
        model.load_darknet_weights(cfg.weights)
    else:
        # Load checkpoint weights
        model.load_state_dict(torch.load(cfg.weights))
    print('model loading complete.')
    model.eval()
    classes = load_classes(cfg.class_path)
    class_num = len(classes)
    color_list = [[np.random.randint(0, 255) for j in range(3)] for i in range(class_num)]

    # 追踪loop
    if w_trace is not None:
        cv2.namedWindow(w_trace)
    i_sec = 0
    i_batch = 0
    cmd = 0
    while(1):
        # 获取 img
        img_bgr_list = []
        clip_ilist = []
        fr_ilist = []
        for i in range(batch_size):
            clip_ilist.append(h_vid.curr_clip_idx)
            fr_ilist.append(h_vid.curr_fr_idx)
            img = h_vid.imread(mode='bgr')
            if img is not None:
                img_bgr_list.append(img)
            else:
                break
        if h_vid.is_beyond_clipindex:
            break
        if len(img_bgr_list) < 1:
            continue
        img_bgr = img_bgr_list[0]
        if w_trace is None:
            pass
        else:
            cmd = cv2.waitKey(1)
            if img_bgr is None or cmd == 27:
                break
        original_shape = img_bgr.shape[:2] # pick [ H x W ] x C
        img_rgb_list = [cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for img in img_bgr_list]
        img_stack = torch.stack([cooked_img(img, img_size) for img in img_rgb_list])
        img_stack = img_stack.to(device)
        # 预测 roi
        with torch.no_grad():
            predictions = model(img_stack) # 初步标出roi
        predictions = non_max_suppression(predictions, cfg.fg_thresh, cfg.nms_thresh) # 过滤重叠roi
        # 恢复rois原有尺寸和标注信息
        rois = np.zeros((batch_size, class_num, 4), dtype=int)
        is_predict_success = False
        for offset, prediction in enumerate(predictions):
            if prediction is not None:
                sample_preds = rescale_boxes(prediction, img_size, original_shape)
                labels = sample_preds[:, -1].cpu().unique()
                # 遍历所有目标
                results = sample_preds.numpy()
                for x1, y1, x2, y2, obj_prob, cls_prob, label in results:
                    w = x2 - x1
                    h = y2 - y1
                    if int(label) == 0:
                        is_predict_success = True
                    rois[offset, int(label)] = np.array([x1, y1, w, h], dtype=int)
                h_loc.write('clip\t%d\tfr\t%d\t' % (clip_ilist[offset], fr_ilist[offset]))
                for i in range(class_num):
                    label = i
                    x, y, w, h = rois[offset, i]
                    h_loc.write('%s\t%d\t%d\t%d\t%d\t' % (classes[label], x, y, w, h))
                h_loc.write('\n')
        if predictions[0] is not None:
            for i in range(class_num):
                roi = rois[0, i]
                cv2.rectangle(img_bgr, rec=roi, color=color_list[i])
        i_sec += h_vid.play_spf * batch_size
        i_batch += 1
        msg = '#%d in-clip fr, #%.2f sec' % (h_vid.curr_fr_idx, i_sec)
        if w_trace is None:
            pass
        else:
            if i_batch % 100 == 0:
                print(msg)
            leak_img = np.random.randint(1000) # 0.1% probability to leak an image for iteratory labeling
            if ch_err is not None and  i_batch % 10 and (not is_predict_success or leak_img >= 999):
                    cv2.imwrite(os.path.join(ch_err, 'fr-%.2f.png' % i_sec), img_bgr)
            cv2.putText(img_bgr, msg, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, color=[0, 255, 0])
            cv2.imshow(w_trace, img_bgr)
    h_loc.close()
    if w_trace is not None:
        cv2.destroyAllWindows()

if __name__ == '__main__':
    root_path = '/home/youngway/data/mtrace/20201202_c57-males-dvr1'
    # weights = 'weights/mouse_yolov3-2classes-augmented_iter1-38.pth'
    weights = 'weights/mouse_yolov3-2classes-augmented_iter3-10.pth'
    # 将视频按通道分组
    ch_list = scan_ch_prefix(root_path)
    for chn in ch_list:
        ch_dir = os.path.join(root_path, str(chn))
        group_avi_by_channel(root_path, chn, ch_dir)
    # 遍历所有存在视频片段的通道
    root_err = '/home/youngway/data/mtrace/preview/20201202_c57-males-dvr1/iter4'
    for ch_dir in list_avi_paths(root_path):
        if is_contain_file(ch_dir, 'locations.txt'):
            continue
        print('tracing %s' % ch_dir)
        ch_s = os.path.split(ch_dir)[-1]
        ch_err = os.path.join(root_err, ch_s)
        safe_mkdir(ch_err)
        mtrace_one_channel(ch_dir, weights, ch_err)
