# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import os
import random
import re
from collections import deque
from operator import itemgetter
from pprint import pprint

import cv2
import mmcv
import numpy as np
import torch
from mmcv import Config, DictAction
from mmcv.parallel import collate, scatter
from tqdm import tqdm

from demo import detector_v3, config
from mmaction.apis import init_recognizer
from mmaction.datasets.pipelines import Compose

FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL
FONTSCALE = 4
THICKNESS = 4
LINETYPE = 1

EXCLUED_STEPS = [
    'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',
    'PyAVDecode', 'RawFrameDecode'
]


def parse_args():
    parser = argparse.ArgumentParser(
        description='MMAction2 predict different labels in a long video demo')
    parser.add_argument('config', help='test config file path')
    parser.add_argument('checkpoint', help='checkpoint file/url')
    parser.add_argument('video_path', help='video file/url')
    parser.add_argument('label', help='label file')
    parser.add_argument('out_file', help='output result file in video/json')
    parser.add_argument(
        '--input-step',
        type=int,
        default=1,
        help='input step for sampling frames')
    parser.add_argument(
        '--device', type=str, default='cuda:0', help='CPU/CUDA device option')
    parser.add_argument(
        '--threshold',
        type=float,
        default=0.01,
        help='recognition score threshold')
    parser.add_argument(
        '--stride',
        type=float,
        default=0,
        help=('the prediction stride equals to stride * sample_length '
              '(sample_length indicates the size of temporal window from '
              'which you sample frames, which equals to '
              'clip_len x frame_interval), if set as 0, the '
              'prediction stride is 1'))
    parser.add_argument(
        '--cfg-options',
        nargs='+',
        action=DictAction,
        default={},
        help='override some settings in the used config, the key-value pair '
             'in xxx=yyy format will be merged into config file. For example, '
             "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
    parser.add_argument(
        '--label-color',
        nargs='+',
        type=int,
        default=(255, 255, 255),
        help='font color (B, G, R) of the labels in output video')
    parser.add_argument(
        '--msg-color',
        nargs='+',
        type=int,
        default=(128, 128, 128),
        help='font color (B, G, R) of the messages in output video')
    parser.add_argument('--json', action='store_true', help='保存json文件')
    args = parser.parse_args()
    return args


def show_results_video(result_queue,
                       text_info,
                       thr,
                       msg,
                       frame,
                       video_writer,
                       frame_size,
                       label_color=(255, 255, 255),
                       msg_color=(128, 128, 128)):
    frame_width, frame_height = frame_size
    FONTSCALE = int((frame_width / 3840) * 4)
    FONTSCALE = max(FONTSCALE, 1)
    THICKNESS = FONTSCALE
    if len(result_queue) != 0:
        text_info = {}
        results = result_queue.popleft()
        for i, result in enumerate(results):
            selected_label, score = result
            if score < thr:
                break
            location = (frame_width - 250 * FONTSCALE, 40 * FONTSCALE + i * 20 * FONTSCALE)
            text = selected_label + ': ' + str(round(score, 2))
            text_info[location] = text
            cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
                        label_color, THICKNESS, LINETYPE)
    elif len(text_info):
        for location, text in text_info.items():
            cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
                        label_color, THICKNESS, LINETYPE)
    else:
        cv2.putText(frame, msg, (frame_width - 250 * FONTSCALE, 40 * FONTSCALE), FONTFACE, FONTSCALE, msg_color,
                    THICKNESS, LINETYPE)
    video_writer.write(frame)
    return text_info


def get_results_json(result_queue, text_info, thr, msg, ind, out_json):
    if len(result_queue) != 0:
        text_info = {}
        results = result_queue.popleft()
        for i, result in enumerate(results):
            selected_label, score = result
            if score < thr:
                break
            text_info[i + 1] = selected_label + ': ' + str(round(score, 2))
        out_json[ind] = text_info
    elif len(text_info):
        out_json[ind] = text_info
    else:
        out_json[ind] = msg
    return text_info, out_json


def show_results(model, data, label, args, video_path, output_filepath, bg_path):
    detector_v3.motion_detect(video_path,
                              area_thres=2,
                              slope_thres=0.5,
                              num_thres=5,
                              relevance=0.8,
                              relevance_time=0.5,
                              # noise_proportion=0.00022, # 原来的
                              noise_proportion=0.00032,
                              show_frame=False,
                              show_log=False,
                              output=bg_path,
                              skip_time=1)
    with open(bg_path, mode='r', encoding='utf-8') as f:
        bg_dict = json.loads(f.read())
        bg = [1 if bg_dict[f'{i}']['is_detected'] else 0 for i in range(1, len(bg_dict) + 1)]
    os.remove(bg_path)

    frame_queue = deque(maxlen=args.sample_length)
    result_queue = deque(maxlen=1)

    cap = cv2.VideoCapture(video_path)
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)

    msg = 'Preparing action recognition ...'
    text_info = {}
    out_json = {}
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    frame_size = (frame_width, frame_height)

    ind = 0
    video_writer = None if output_filepath.endswith('.json') \
        else cv2.VideoWriter(output_filepath, fourcc, fps, frame_size)
    prog_bar = mmcv.ProgressBar(num_frames)
    backup_frames = []

    while ind < num_frames:
        ind += 1
        prog_bar.update()
        ret, frame = cap.read()
        if frame is None:
            # drop it when encounting None
            continue
        backup_frames.append(np.array(frame)[:, :, ::-1])
        if ind == args.sample_length:
            # provide a quick show at the beginning
            frame_queue.extend(backup_frames)
            backup_frames = []
        elif ((len(backup_frames) == args.input_step
               and ind > args.sample_length) or ind == num_frames):
            # pick a frame from the backup
            # when the backup is full or reach the last frame
            chosen_frame = random.choice(backup_frames)
            backup_frames = []
            frame_queue.append(chosen_frame)

        ret, scores = inference(model, data, args, frame_queue)

        if ret:
            num_selected_labels = min(len(label), 5)
            scores_tuples = tuple(zip(label, scores))
            scores_sorted = sorted(
                scores_tuples, key=itemgetter(1), reverse=True)
            results = scores_sorted[:num_selected_labels]
            result_queue.append(results)

        if output_filepath.endswith('.json'):
            text_info, out_json = get_results_json(result_queue, text_info,
                                                   args.threshold, msg, ind,
                                                   out_json)
        else:
            text_info = show_results_video(result_queue, text_info,
                                           args.threshold, msg, frame,
                                           video_writer, frame_size, args.label_color,
                                           args.msg_color)

    cap.release()
    cv2.destroyAllWindows()
    if video_writer is not None:
        video_writer.release()

    result = out_json
    result = [result[i] for i in range(1, len(result) + 1)]
    result_changed = []
    # 过滤掉预测出的背景
    for r in result:
        if type(r) == str:
            r_dict = {1: r}
        else:
            r_dict = r
        new_r_dict = {}
        for key in r_dict:
            if not re.match('.*background.*', r_dict[key]):
                new_r_dict[len(new_r_dict) + 1] = r_dict[key]
        result_changed.append(new_r_dict)
    result = result_changed
    return result, bg, bg_dict


def show_results_combine(model, data, label, args, video_path, output_filepath, is_json):
    result_path = f'{output_filepath}_result.json'
    bg_path = f'{output_filepath}_bg.json'
    video_output_path = f'{output_filepath}_out.mp4'
    json_output_path = f'{output_filepath}.json'
    window_size = int(25 * 4)  # 滑动窗口大小
    bg_len = int(window_size * 0.5)  # background比例

    result, bg, bg_dict = show_results(model, data, label, args, video_path, result_path, bg_path)
    final_result_mask = get_final_result(result, bg, window_size, bg_len)

    def get_total_result(local_result):
        total_result = {}
        length = len(local_result)
        for local_r in local_result:
            if type(local_r) == str:
                local_dict = {1: local_r}
            else:
                local_dict = local_r.copy()

            for key in local_dict:
                try:
                    class_, score = local_dict[key].split(': ')
                except:
                    # 处理刚开始没有开始检测的那部分情况
                    class_ = 'background'
                    score = 0
                score = float(score)
                if class_ in total_result:
                    total_result[class_] += score
                else:
                    total_result[class_] = score
        total_result_list = [[key, total_result[key] / length] for key in total_result]
        total_result_list = sorted(total_result_list, key=lambda x: x[1], reverse=True)
        return total_result_list

    output_dict = {'frame': {}, 'segment': {}}
    start_i = 0
    for i in range(len(result)):
        if type(result[i]) == str:
            r = {1: result[i]}
        else:
            r = result[i].copy()
        bbox = bg_dict[f'{i + 1}']['bbox']
        if final_result_mask[i]:
            # 是背景
            r = {1: 'detected_background'}
            bbox = []

        output_dict['frame'][i + 1] = {
            'result': r,
            'rectangle': bbox
        }

        if not final_result_mask[i]:
            # 如果不是背景
            if i != 0 and final_result_mask[i - 1]:
                # 但是前一帧率为背景，则从此处开始
                start_i = i
            # 前一帧不是背景，则pass
        if final_result_mask[i]:
            # 如果是背景
            if i != 0 and not final_result_mask[i - 1]:
                # 前一帧率不是背景，则动作到前一帧结束
                output_dict['segment'][f'{start_i + 1}-{i}'] = get_total_result(result[start_i:i])

    if not is_json:
        # 如果输出json，则下面不输出了
        # 如果要求视频也要输出，则继续输出视频
        cap = cv2.VideoCapture(video_path)
        num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        frame_size = (frame_width, frame_height)
        target_size = (min(frame_width, 1080), min(frame_height, 720))  # 设置展示视频大小
        video_writer = cv2.VideoWriter(video_output_path, fourcc, fps, target_size)
        frame_idx = -1
        ret = True
        prog_bar = mmcv.ProgressBar(num_frames)
        while ret:
            frame_idx += 1
            prog_bar.update()
            ret, frame = cap.read()
            if frame is None:
                # drop it when encounting None
                continue
            if type(result[frame_idx]) != str:
                txt_dict = result[frame_idx].copy()
            else:
                txt_dict = result[frame_idx]

            if final_result_mask[frame_idx]:
                txt_dict = {1: 'detected_background'}
            if type(txt_dict) == str:
                txt_dict = {1: txt_dict}

            # next_idx = len(txt_dict) + 1
            # to_show = "".join([f'{i}' for i in bg[frame_idx - bg_len:frame_idx + window_size - bg_len]])
            # txt_dict[next_idx] = to_show

            # 如果不是背景，画框
            if not final_result_mask[frame_idx]:
                for bbox in bg_dict[f'{frame_idx + 1}']['bbox']:
                    x, y, w, h = bbox
                    frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            frame = cv2.resize(frame, target_size)
            combine_show_results_video(txt_dict, frame, video_writer, target_size, args.label_color)
        video_writer.release()
    # 清空没有动作的数据
    with open(json_output_path, mode='w', encoding='utf-8') as f:
        f.write(json.dumps(output_dict, indent=2))

    if output_dict['segment'] == {}:
        # 清除没有输出的数据
        os.remove(video_path)
        if not is_json:
            os.remove(video_output_path)
        os.remove(json_output_path)
        os.removedirs(os.path.abspath(os.path.join(video_path, "..")))
    else:
        print(f'\n [{video_path}] 中有动作！')


def combine_show_results_video(result,
                               frame,
                               video_writer,
                               frame_size,
                               label_color=(255, 255, 255)):
    frame_width, frame_height = frame_size
    FONTSCALE = int((frame_width / 3840) * 4)
    FONTSCALE = max(FONTSCALE, 1)
    THICKNESS = FONTSCALE
    for i in result:
        text = result[i]
        # location = (frame_width - 250 * FONTSCALE, 40 * FONTSCALE + i * 20 * FONTSCALE)
        location = (40, 40 * FONTSCALE + i * 20 * FONTSCALE)

        cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
                    label_color, THICKNESS, LINETYPE)
    video_writer.write(frame)


def get_final_result(result, bg, window_size, bg_len):
    final_result_mask = [False for _ in range(len(result))]
    assert len(result) == len(bg), f'{len(result)} != {len(bg)}, 出错了！'
    is_bg = True  # 默认上来就是背景
    shift_step = window_size - bg_len
    is_last = False
    i = bg_len
    while i <= len(result) - shift_step:
        if sum(bg[i - bg_len:i + shift_step]) > bg_len:
            # 窗口内认为是背景
            final_result_mask[i - bg_len:i + shift_step] = [True for _ in range(window_size)]
            if not is_bg:
                # 如果之前是动作
                final_result_mask[i - bg_len:i - bg_len + shift_step] = [False for _ in range(shift_step)]
                i += shift_step
            is_bg = True
        else:
            # 窗口内认为是动作
            final_result_mask[i - bg_len:i + shift_step] = [False for _ in range(window_size)]
            if is_bg:
                # 如果之前是bg
                final_result_mask[i - bg_len:i] = [True for _ in range(bg_len)]
                i += bg_len
            is_bg = False
        # 如果最后一次已经处理好了
        if is_last:
            break
        # 处理最后一次
        if i > len(result) - shift_step:
            is_last = True
            i = len(result) - shift_step
        i += 1

    return final_result_mask


def inference(model, data, args, frame_queue):
    if len(frame_queue) != args.sample_length:
        # Do no inference when there is no enough frames
        # print('\n Do no inference when there is no enough frames!')
        return False, None

    cur_windows = list(np.array(frame_queue))
    if data['img_shape'] is None:
        data['img_shape'] = frame_queue[0].shape[:2]

    cur_data = data.copy()
    cur_data['imgs'] = cur_windows
    cur_data = args.test_pipeline(cur_data)
    cur_data = collate([cur_data], samples_per_gpu=1)
    if next(model.parameters()).is_cuda:
        cur_data = scatter(cur_data, [args.device])[0]
    with torch.no_grad():
        scores = model(return_loss=False, **cur_data)[0]

    if args.stride > 0:
        pred_stride = int(args.sample_length * args.stride)
        for _ in range(pred_stride):
            frame_queue.popleft()

    # for case ``args.stride=0``
    # deque will automatically popleft one element

    return True, scores


def main():
    args = parse_args()

    args.device = torch.device(args.device)

    cfg = Config.fromfile(args.config)
    cfg.merge_from_dict(args.cfg_options)

    model = init_recognizer(cfg, args.checkpoint, device=args.device)
    data = dict(img_shape=None, modality='RGB', label=-1)
    with open(args.label, 'r') as f:
        label = [line.strip() for line in f]

    # prepare test pipeline from non-camera pipeline
    cfg = model.cfg
    sample_length = 0
    pipeline = cfg.data.test.pipeline
    pipeline_ = pipeline.copy()
    for step in pipeline:
        if 'SampleFrames' in step['type']:
            sample_length = step['clip_len'] * step['num_clips']
            data['num_clips'] = step['num_clips']
            data['clip_len'] = step['clip_len']
            pipeline_.remove(step)
        if step['type'] in EXCLUED_STEPS:
            # remove step to decode frames
            pipeline_.remove(step)
    test_pipeline = Compose(pipeline_)

    assert sample_length > 0
    args.sample_length = sample_length
    args.test_pipeline = test_pipeline

    if not os.path.exists(args.out_file):
        os.makedirs(args.out_file)

    video_paths = []

    if args.video_path.endswith('.list'):
        prefix = '/'.join(args.video_path.split('/')[:-1])
        prefix += '/videos'
        with open(args.video_path, mode='r', encoding='utf-8') as f:
            for line in f.readlines():
                video_path = line.strip().split(' ')[0]
                video_paths.append(
                    [f'{prefix}/{video_path}',
                     f'{args.out_file}/{video_path.split("/")[-1]}'])
    else:
        video_paths.append(
            [args.video_path, f'{args.out_file}/{args.video_path.split("/")[-1]}'])

    for video_path, output_file in tqdm(video_paths):
        show_results_combine(model, data, label, args, video_path, output_file, args.json)
        if args.json:
            print(f'\n json saved at {output_file}.json')
        else:
            print(f'\n video saved at {output_file}')


def run(video_path, out_file):
    args = Config(
        {
            'config': f'configs/recognition/slowfast/slowfast_model_config.py',
            'checkpoint': f'configs/recognition/slowfast/best_top1_acc_epoch_20.pth',
            'video_path': video_path,
            'label': f'configs/recognition/slowfast/label.txt',
            'out_file': out_file,
            'input_step': 5,
            'device': 'cuda:0',
            'threshold': 0.2,
            'stride': 0.3,
            'cfg_options': {},
            'label_color': [0, 0, 255],
            'msg_color': [128, 128, 128],
            'json': config.is_only_json,
        }
    )

    args.device = torch.device(args.device)

    cfg = Config.fromfile(args.config)
    cfg.merge_from_dict(args.cfg_options)

    model = init_recognizer(cfg, args.checkpoint, device=args.device)
    data = dict(img_shape=None, modality='RGB', label=-1)
    with open(args.label, 'r') as f:
        label = [line.strip() for line in f]

    # prepare test pipeline from non-camera pipeline
    cfg = model.cfg
    sample_length = 0
    pipeline = cfg.data.test.pipeline
    pipeline_ = pipeline.copy()
    for step in pipeline:
        if 'SampleFrames' in step['type']:
            sample_length = step['clip_len'] * step['num_clips']
            data['num_clips'] = step['num_clips']
            data['clip_len'] = step['clip_len']
            pipeline_.remove(step)
        if step['type'] in EXCLUED_STEPS:
            # remove step to decode frames
            pipeline_.remove(step)
    test_pipeline = Compose(pipeline_)

    assert sample_length > 0
    args.sample_length = sample_length
    args.test_pipeline = test_pipeline

    if not os.path.exists(args.out_file):
        os.makedirs(args.out_file)

    video_paths = []

    if args.video_path.endswith('.list'):
        prefix = '/'.join(args.video_path.split('/')[:-1])
        prefix += '/videos'
        with open(args.video_path, mode='r', encoding='utf-8') as f:
            for line in f.readlines():
                video_path = line.strip().split(' ')[0]
                video_paths.append(
                    [f'{prefix}/{video_path}',
                     f'{args.out_file}/{video_path.split("/")[-1]}'])
    else:
        video_paths.append(
            [args.video_path, f'{args.out_file}/{args.video_path.split("/")[-1]}'])

    for video_path, output_file in tqdm(video_paths):
        show_results_combine(model, data, label, args, video_path, output_file, args.json)
        if args.json:
            print(f'\n json saved at {output_file}.json')
        else:
            print(f'\n video saved at {output_file}')


if __name__ == '__main__':
    main()
