import os

import argparse
import random

import sys
sys.path.append("/home/binpeng/mall-buc/src/generated")
sys.path.append("../")

import online_tracking_results_pb2

from functools import partial

import cv2
from multiprocessing import Pool, cpu_count
import json
import numpy as np
from tqdm import tqdm
import utils.io as data_io
import glob

from collections import defaultdict

def get_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument("--src", type=str, help="original sv pbs dir", default="/mnt/qamall_data/full_data/qa-mall-pb-adjustment/K11_guangzhou_artfull_20201230_sv2.7.3_buc3_1_prepare/20230614-142251/pbs")
    parser.add_argument("--dst", type=str, help="filtered sv pbs dir", default="/mnt/qamall_data/full_data/qa-mall-pb-adjustment/K11_guangzhou_artfull_20201230_sv2.7.3_buc3_1_prepare_add_staff/20230309-134406/pbs")
    parser.add_argument("--original_min_detection_num", type=int, default=10, help="mininum detection num in track are needed")
    parser.add_argument("--short_filter_det_num",type=int, default=0, help="when one track appear in short filter region, use this detection num to filter")
    parser.add_argument("--region_root", help="region info root dir", default="./data/labeling_result_20230316135637.json")
    parser.add_argument("--image_height", type=int, default=1440)
    parser.add_argument("--image_width", type=int, default=2560)
    return parser


def multiprocess_run(func, deal_list, n_cpus=None):
    if n_cpus == None:
        N_CPUS = cpu_count()
    else:
        N_CPUS = int(n_cpus)
    print('running command with ' + str(N_CPUS) + ' CPUs')
    pool = Pool(N_CPUS)
    a = pool.map(func, deal_list)
    pool.close()
    pool.join()
    return a

def build_image_mask_from_regions(regions, img_height, img_width, channel):    
    img = np.zeros((img_height, img_width, 3))
    for region in regions["graphic_list"]:
        region_ponits = []
        for pt_idx in region["graphic_data"]:
            pt = regions["point_dict"][str(pt_idx)]
            pt = [int(pt['x']), int(pt['y'])]
            region_ponits.append(pt)
        pts = np.array(region_ponits, dtype=np.int64)
        img = cv2.fillPoly(img, [pts], (255, 255, 255))
    
    tmp_img_dir = "./tmp/image_mask"
    os.makedirs(tmp_img_dir, exist_ok=True)

    debug_img_path = os.path.join(tmp_img_dir, "{}.jpg".format(channel))
    print("Debug: mask regions img are writing to {}".format(debug_img_path))
    cv2.imwrite(debug_img_path, img)

    return img


def load_regions(region_root, img_height, img_width):
    regions_info = defaultdict(list)
    region_data = data_io.load_json_file(region_root)
    for img_name in region_data.keys():
        channel = img_name.split(".")[0]
        if len(region_data[img_name]["graphic_list"]):
            regions_info[channel] = build_image_mask_from_regions(region_data[img_name], img_height, img_width, channel)
    return regions_info


def get_pb_times(pb_file):
    pb_name  = os.path.basename(pb_file)
    ch_ts = pb_name.split("-")[2]
    ts = ch_ts.split("_")[1][-6:]
    h  = int(ts[:2])
    return h



def parser_one_channel(src_sv_pb_dir, dst_sv_pb_dir):
    if not os.path.exists(dst_sv_pb_dir):
        os.makedirs(dst_sv_pb_dir, exist_ok=True)

    print("start parser_one_channel")
    pb_files = glob.glob(os.path.join(src_sv_pb_dir, "*.pb"))
    print("Parsing {} pb files in {}".format(len(pb_files), src_sv_pb_dir))
    ch_pbs = {}
    for pb_file in pb_files:
        pb_h = get_pb_times(pb_file)
        # if pb_h !=  14:
        #     continue
        ch = os.path.basename(pb_file).split("-")[1]
        if ch not in ch_pbs.keys():
            ch_pbs[ch] = []
        ch_pbs[ch].append(pb_file)
    
    threan_num = 10
    for ch in ch_pbs.keys():
        pb_size = len(ch_pbs[ch])
        bath_num = (pb_size + threan_num - 1) // threan_num
        parms = []
        bath_start = 0
        while bath_start < pb_size:
            bathc_end = min(bath_start + bath_num, pb_size)
            parms.append([ch_pbs[ch][bath_start:bathc_end], dst_sv_pb_dir])
            bath_start += bath_num
        
        multiprocess_run(parser, parms, threan_num)


def parser(parm):

    src_sv_pb_files, dst_dir = parm


    for src_sv_pb_file in src_sv_pb_files:

        filtered_res = online_tracking_results_pb2.Tracks()

        tracking_res = online_tracking_results_pb2.Tracks()
        
        print('src_sv_pb_file', src_sv_pb_file)
        

        with open(src_sv_pb_file, 'rb') as f:
            channel = os.path.basename(src_sv_pb_file).split("-")[1]

            tracking_res.ParseFromString(f.read())

            original_track_num = len(tracking_res.tracks)        
            
            remain_tracks = []
            for track in tracking_res.tracks:
                track.staff_result.confidence = random.random()
                remain_tracks.append(track)

        remain_track_num = len(remain_tracks)
        print("Track num in {} after filtered: {} --> {}".format(src_sv_pb_file, original_track_num, remain_track_num))
        
        filtered_res.tracks.extend(remain_tracks)

        file_name = os.path.basename(src_sv_pb_file)
        dst_file_path = os.path.join(dst_dir, file_name)

        with open(dst_file_path, "wb") as f:
            f.write(filtered_res.SerializeToString())

if __name__ == "__main__":
    
    args = get_parser().parse_args()

    # get_pb_times("/home/binpeng/test_pb/k11_guangzhou_artfull-ch06012-ch06012_20201230095949-fid_track-s1575481-1609293660-1609293720.pb")

    # channel_mask = None

    # src = args.src
    # dst = args.dst

    # if not os.path.exists(dst):
    #     os.makedirs(dst, exist_ok=True)

    # original_min_detection_num = args.original_min_detection_num
    # short_filter_det_num = args.short_filter_det_num


    parser_one_channel(args.src, args.dst)

