import os.path as osp
from collections import defaultdict
import os
import cv2
import numpy as np
from tqdm import tqdm
from argparse import ArgumentParser
data_dir = '../../data/2019/round1'
"分成query 和 gallery"
def dict_slice(ori_dict, start=0, end=None):
    """
    字典类切片
    :param ori_dict: 字典
    :param start: 起始
    :param end: 终点
    :return:
    """
    slice_dict = {k: ori_dict[k] for k in list(ori_dict.keys())[start:end]}
    return slice_dict

def divided_query_gallery_from_data(data_dir, num_val_data=1000000, relabel=True):

    filename = osp.join(data_dir, 'train_list.txt')
    "" \
    "TODO+可选指定比例图片作为验证集"
    count_image = defaultdict(list)  # 所有的数据集 dict {'0':['1.png','2.png',...],'1':[],...}
    with open(filename, 'r') as file_to_read:
        while True:
            num_val_data -= 1
            lines = file_to_read.readline()
            if (not lines) or num_val_data<=0:
                break
            (img_name, img_label) = lines.strip().split('/')[-1].split(' ')
            count_image[img_label].append(img_name)
    val_imgs = {}
    pid_container = set()
    for pid, img_name in count_image.items():
        if len(img_name) < 2:  # 去掉只有一张的图片（去掉长尾数据）
            pass
        else:
            val_imgs[pid] = count_image[pid]
            pid_container.add(pid)
    # pid2label = {pid: label for label, pid in enumerate(pid_container)}  # 把训练集的label变成连续的ID（因为上面去掉长尾数据后，部分ID丢失）
    "根据label反向读入img，自动跳过没有标签的图片"
    data_dir += '/train_set'
    "ID第一张作为query，其余为gallery"
    dataset = []
    query_dataset = []
    gallery_dataset = []
    camid = 0
    for pid, img_name in val_imgs.items():
        # pid = pid2label[pid]
        index = 0
        for img in img_name:
            dataset.append((osp.join(data_dir, img), pid, camid))
            if index == 0:  # query
                query_dataset.append((osp.join(data_dir, img),pid,camid))
                index += 1
            else:  # gallery
                gallery_dataset.append((osp.join(data_dir, img),pid,camid))
            camid += 1
    return query_dataset, gallery_dataset

"分别提取green gallery 格式：img_path id camid"
def divided_green_normal_And_save_to_txt(dataset, save_dir, is_query = False,no_green=True):
    "1，读入数据dataset=[('img_path',pid,camid),(),...]"
    if is_query:
        # subfix = 'query_a'
        subfix = 'query'
    else:
        # subfix = 'gallery_a'
        subfix = 'gallery'
    file_name_green = []
    file_name_none = []
    for (img_path, pid, camid) in tqdm(dataset):
        if no_green:
            "默认情况下，把所有的图片都分到normal文件，不区分颜色"
            file_name_none.append((img_path.split('/')[-1], pid, camid))
        else:
            img = cv2.imread(img_path)
            per_image_Bmean = np.mean(img[:, :, 0])
            per_image_Gmean = np.mean(img[:, :, 1])
            per_image_Rmean = np.mean(img[:, :, 2])
            if per_image_Bmean > 65 and per_image_Gmean > 65 and per_image_Rmean > 65:
                file_name_green.append((img_path.split('/')[-1],pid,camid))  # query 1376张绿图
            else:
                file_name_none.append((img_path.split('/')[-1],pid,camid))  # query 1524张非绿图
    file = open(os.path.join(save_dir, subfix+'_green.txt'), 'w')

    for filename in file_name_green:
        file.write(str(filename[0]+' '+filename[1]+' '+str(filename[2])+'\n'))
    file.close()
    file = open(os.path.join(save_dir, subfix+'_normal.txt'), 'w')
    for filename in sorted(file_name_none):
        file.write(str(filename[0]+' '+filename[1]+' '+str(filename[2])+'\n'))
    file.close()

if __name__ == '__main__':
    no_green = True  # False：区分green图片
    query_dataset, gallery_dataset = divided_query_gallery_from_data(data_dir, 100000)
    divided_green_normal_And_save_to_txt(query_dataset,data_dir,True,no_green=no_green)
    divided_green_normal_And_save_to_txt(gallery_dataset, data_dir, False,no_green=no_green)
