from repnet import get_repnet_model
from tqdm import tqdm
import cv2
import numpy as np
import requests
import math
import os

DEBUG = False

# from scipy.stats import mode # noqa
# test_param = {
#     'pyr_scale': 0.5,
#     'levels': 3,
#     'winsize': 15,
#     'iterations': 3,
#     'poly_n': 5,
#     'poly_sigma': 1.1,
#     'flags': cv2.OPTFLOW_LK_GET_MIN_EIGENVALS
# }
# flow = cv2.calcOpticalFlowFarneback(pre_frame, frame_gray, None, **test_param)
# mag, ang = cv2.cartToPolar(flow[:, :, 0], flow[:, :, 1], angleInDegrees=True)
# move_sense = ang[mag > 5]
# move_mode = mode(move_sense)[0]
# if 135 < move_mode <= 225:
#     keep_flag = True


lower_red_1 = np.array([0, 43, 46])
upper_red_1 = np.array([10, 255, 255])
lower_red_2 = np.array([156, 43, 46])
upper_red_2 = np.array([180, 255, 255])

lower_orange = np.array([11, 43, 46])
upper_orange = np.array([25, 255, 255])

lower_yellow = np.array([26, 43, 46])
upper_yellow = np.array([34, 255, 255])

lower_green = np.array([35, 43, 46])
upper_green = np.array([77, 255, 255])

lower_cyan = np.array([78, 43, 46])
upper_cyan = np.array([99, 255, 255])

lower_blue = np.array([100, 43, 46])
upper_blue = np.array([124, 255, 255])

lower_purple = np.array([125, 43, 46])
upper_purple = np.array([155, 255, 255])

lower_black = np.array([0, 0, 0])
upper_black = np.array([180, 255, 46])

lower_white = np.array([0, 0, 221])
upper_white = np.array([180, 30, 255])

lower_gray = np.array([0, 0, 46])
upper_gray = np.array([180, 43, 220])


def cal_rect_points(w, h, box):
    if box[0] < 1.0 and box[1] < 1.0 and box[2] <= 1.0 and box[3] <= 1.0:
        x1, y1 = int(w * box[0]), int(h * box[1])
        x2, y2 = int(w * box[2]), int(h * box[3])
    else:
        x1, y1 = box[0], box[1]
        x2, y2 = box[2], box[3]
    return x1, y1, x2, y2


def read_video(
        video_filename, width=224, height=224,
        rot=None, black_box=None, focus_box=None, focus_box_repnum=1,
        progress_cb=None, args={}, dev=False):
    """Read video from file."""
    cap = cv2.VideoCapture(video_filename)
    fps = cap.get(cv2.CAP_PROP_FPS)

    n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    pbar = tqdm(total=n_frames, desc=f"Getting frames from {video_filename} {len} ...")

    w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    if black_box is not None:
        black_x1, black_y1, black_x2, black_y2 = cal_rect_points(w, h, black_box)
    if focus_box is not None:
        focus_x1, focus_y1, focus_x2, focus_y2 = cal_rect_points(w, h, focus_box)
        w = focus_x2 - focus_x1
        h = focus_y2 - focus_y1

    rmstill_pre_frame = None
    if args['rmstill_frame_enable']: # remove still frames
        area_rate_thres = args.get('rmstill_rate_threshold', 0.001)
        rmstill_bin_threshold = args.get('rmstill_bin_threshold', 20)
        rmstill_brightness_norm = args.get('rmstill_brightness_norm', False)
        rmstill_area_mode = args.get('rmstill_area_mode', 0)
        rmstill_noise_level = args.get('rmstill_noise_level', 1)
        rmstill_area_thres = math.ceil(area_rate_thres * w * h)
        print(f'rmstill: ({rmstill_area_thres}, {rmstill_bin_threshold}, {rmstill_noise_level})')

    color_pre_count = 0
    if args['color_tracker_enable']:
        color_select = args.get('color_select', 8)
        color_rate_threshold = args.get('color_rate_threshold', 0.9)
        color_buffer_size = args.get('color_buffer_size', 12)
        color_lower_rate = args.get('color_lower_rate', 0.2)
        color_upper_rate = args.get('color_upper_rate', 0.8)
        color_track_direction = args.get('color_track_direction', 0)
        color_buffer = np.zeros((color_buffer_size, ))
        if color_track_direction > 0:
            color_direction_buffer = np.zeros_like(color_buffer)
        color_area_thres = math.ceil(color_rate_threshold * w * h)
        color_lower_value = int(color_buffer_size * color_lower_rate)
        color_upper_value = int(color_buffer_size * color_upper_rate)
        print(f'color_tracker: ({color_area_thres}, {color_lower_value}, {color_upper_value})')

    frames = []
    still_frames = []
    binframes = []
    points = []

    if DEBUG:
        debug_file = os.path.join('/raceai/data', 'debug_file.mp4')
        debug_vid = cv2.VideoWriter(debug_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
    if cap.isOpened():
        frame_idx = 0
        frame_tmp = np.zeros((h, w), dtype=np.uint8)
        while True:
            success, frame_bgr = cap.read()
            if not success:
                break
            keep_flag = False
            if black_box is not None:
                frame_bgr[black_y1:black_y2, black_x1:black_x2, :] = 0
            if focus_box is not None:
                frame_bgr = frame_bgr[focus_y1:focus_y2, focus_x1:focus_x2, :]
            if args['rmstill_frame_enable']:
                if rmstill_brightness_norm:
                    h, s, v = cv2.split(cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV))
                    v = np.array((v - np.mean(v)) / np.std(v) * 32 + 127, dtype=np.uint8)
                    frame_bgr = cv2.cvtColor(cv2.merge([h, s, v]), cv2.COLOR_HSV2BGR)
                frame_gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)
                if rmstill_pre_frame is not None:
                    frame_tmp = cv2.absdiff(frame_gray, rmstill_pre_frame)
                    frame_tmp = cv2.threshold(frame_tmp, rmstill_bin_threshold, 255, cv2.THRESH_BINARY)[1]
                    if rmstill_noise_level > 0:
                        frame_tmp = cv2.erode(frame_tmp, None, iterations=rmstill_noise_level)
                        frame_tmp = cv2.dilate(frame_tmp, None, iterations=rmstill_noise_level)
                    if rmstill_area_mode == 0:
                        val = np.sum(frame_tmp == 255)
                        if val > rmstill_area_thres:
                            keep_flag = True
                            if dev:
                                frame_tmp = cv2.cvtColor(frame_tmp, cv2.COLOR_GRAY2RGB)
                                points.append(np.round(val / rmstill_area_thres, 2))
                                binframes.append(cv2.resize(frame_tmp, (width, height)))
                    else:
                        contours, _ = cv2.findContours(frame_tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
                        if len(contours) > 0:
                            contours = sorted(contours, key=lambda x:cv2.contourArea(x), reverse=True)
                            if cv2.contourArea(contours[0]) > rmstill_area_thres:
                                keep_flag = True
                                if dev:
                                    frame_tmp = cv2.cvtColor(frame_tmp, cv2.COLOR_GRAY2RGB)
                                    cv2.drawContours(frame_tmp, [contours[0]], 0, (0, 0, 255), 3)
                                    binframes.append(cv2.resize(frame_tmp, (width, height)))
                rmstill_pre_frame = frame_gray
            elif args['color_tracker_enable']:
                frame_hsv = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV)
                if color_select == 0:
                    mask_red_1 = cv2.inRange(frame_hsv, lower_red_1, upper_red_1)
                    mask_red_2 = cv2.inRange(frame_hsv, lower_red_2, upper_red_2)
                    color_mask = cv2.bitwise_or(mask_red_1, mask_red_2)
                elif color_select == 1:
                    color_mask = cv2.inRange(frame_hsv, lower_orange, upper_orange)
                elif color_select == 2:
                    color_mask = cv2.inRange(frame_hsv, lower_yellow, upper_yellow)
                elif color_select == 3:
                    color_mask = cv2.inRange(frame_hsv, lower_green, upper_green)
                elif color_select == 4:
                    color_mask = cv2.inRange(frame_hsv, lower_cyan, upper_cyan)
                elif color_select == 5:
                    color_mask = cv2.inRange(frame_hsv, lower_blue, upper_blue)
                elif color_select == 6:
                    color_mask = cv2.inRange(frame_hsv, lower_purple, upper_purple)
                elif color_select == 7:
                    color_mask = cv2.inRange(frame_hsv, lower_black, upper_black)
                elif color_select == 8:
                    color_mask = cv2.inRange(frame_hsv, lower_white, upper_white)
                elif color_select == 9:
                    color_mask = cv2.inRange(frame_hsv, lower_gray, upper_gray)
                val = np.sum(color_mask == 255)
                if val > color_area_thres:
                    color_buffer[-1] = 1
                else:
                    color_buffer[-1] = 0
                color_buffer = np.roll(color_buffer, shift=-1, axis=0)
                color_count = np.sum(color_buffer, axis=0)
                if color_track_direction == 0:
                    val = color_count
                else:
                    if color_track_direction == 1: # +
                        color_direction_buffer[-1] = 1 if color_count > color_pre_count else 0
                    elif color_track_direction == 2: # -
                        color_direction_buffer[-1] = 1 if color_count < color_pre_count else 0
                    color_direction_buffer = np.roll(color_direction_buffer, shift=-1, axis=0)
                    val = np.sum(color_direction_buffer, axis=0)

                color_pre_count = color_count

                if color_lower_value < val < color_upper_value:
                    keep_flag = True
                    if dev:
                        frame_tmp = cv2.cvtColor(color_mask, cv2.COLOR_GRAY2RGB)
                        binframes.append(cv2.resize(frame_tmp, (width, height)))

            if not keep_flag:
                still_frames.append((frame_idx, None))
            else:
                if focus_box is not None:
                    if focus_box_repnum > 1:
                        frame_bgr = np.hstack([frame_bgr] * focus_box_repnum)
                        frame_bgr = np.vstack([frame_bgr] * focus_box_repnum)
                frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
                frame_rgb = cv2.resize(frame_rgb, (width, height))
                if rot:
                    M = cv2.getRotationMatrix2D(center=(int(width / 2), int(height / 2)), angle=rot, scale=1.0)
                    frame_rgb = cv2.warpAffine(frame_rgb, M, (width, height))
                frames.append(frame_rgb)

                if DEBUG:
                    debug_vid.write(frame_bgr)

            if progress_cb:
                if frame_idx % 200 == 0:
                    progress_cb((100 * float(frame_idx)) / n_frames)
            frame_idx += 1

            del frame_bgr

            pbar.update()
    pbar.close()
    if DEBUG:
        debug_vid.release()
    print(n_frames, 'vs', len(frames))
    # if rm_still:
    #     fps = len(frames) * fps / n_frames
    frames = np.asarray(frames)
    if progress_cb:
        progress_cb(100)
    return frames, round(fps), still_frames, binframes, points


def wget(url, path):
    """
    Source from https://stackoverflow.com/questions/37573483/progress-bar-while-download-file-over-http-with-requests
    Args:
        url (str):
        path (str):

    Returns:

    """
    response = requests.get(url, stream=True)
    total_size_in_bytes = int(response.headers.get('content-length', 0))
    block_size = 1024  # 1 Kibibyte
    progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True, desc=f"Downloading {url} to {path} ...")
    with open(path, 'wb') as file:
        for data in response.iter_content(block_size):
            progress_bar.update(len(data))
            file.write(data)
    progress_bar.close()
    if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
        print("ERROR, something went wrong")


def get_model(weight_root):
    os.makedirs(weight_root, exist_ok=True)

    weight_urls = [
        "https://storage.googleapis.com/repnet_ckpt/checkpoint",
        "https://storage.googleapis.com/repnet_ckpt/ckpt-88.data-00000-of-00002",
        "https://storage.googleapis.com/repnet_ckpt/ckpt-88.data-00001-of-00002",
        "https://storage.googleapis.com/repnet_ckpt/ckpt-88.index"
    ]
    for url in weight_urls:
        path = f"{weight_root}/{url.split('/')[-1]}"
        if os.path.isfile(path):
            continue

        wget(url, path)

    return get_repnet_model(weight_root)
