from .bases import BaseImageDataset
import os.path as osp
from collections import defaultdict
import logging


# from calculate_mean_std import calculate_mean_std_in_dataset,calculate_pids_num

class mydata(BaseImageDataset):
    def __init__(self, root='../data', verbose=True):
        super(mydata, self).__init__()
        logger = logging.getLogger("reid_baseline.train")
        self.pid_max = -1  # 记录relabel最大的ID，用于多个数据集ID的连接，初始时没有ID，默认设为-1
        self.dataset_dir = root
        self.dataset_dir_train = osp.join(self.dataset_dir, 'train')
        self.dataset_dir_test = osp.join(self.dataset_dir, 'test')
        "1.以2019年的初赛数据集做为query gallery验证集 2.将2019年数据集加入训练集"
        self.dataset_dir_val = osp.join(self.dataset_dir, '2019/round1')
        self.dataset_dir_train2019 = osp.join(self.dataset_dir, '2019/round2')
        "是否加入2019年复赛数据集"
        train = self._process_dir_train(self.dataset_dir_train, relabel=True)
        USE_TrainData2019 = False  # False：只使用2020年训练集
        if USE_TrainData2019:
            logger.info("using 2019 round2 datasets for training")
            train.extend(self._process_dir_train(self.dataset_dir_train2019, relabel=True,
                                                 year='2019',
                                                 ID_start=self.pid_max + 1))
        "green的query和gallery单独拉出来"
        "test data"
        query_green, query_normal = self._process_dir_test(self.dataset_dir_test, query=True)
        gallery_green, gallery_normal = self._process_dir_test(self.dataset_dir_test, query=False)

        # data_cal = train
        # data_cal = data_cal+query_normal+query_normal_val+gallery_normal+gallery_normal_val  # 计算所有数据集的均值和标准差
        # self.print_dataset_statistics(data_cal, query_green + query_normal, gallery_green + gallery_normal)
        # calculate_mean_std_in_dataset(data_cal)  # 计算mean和std
        # calculate_pids_num(data_cal)

        is_test = True  # True：直接用test集，False：使用验证集
        if not is_test:
            "val data"
            query_green_val, query_normal_val = self._process_dir_val(self.dataset_dir_val, query=True)
            gallery_green_val, gallery_normal_val = self._process_dir_val(self.dataset_dir_val, query=False)
            query_green, query_normal, gallery_green, gallery_normal = query_green_val, query_normal_val, gallery_green_val, gallery_normal_val
        if verbose:
            print("=> NAIC Competition data loaded")
            self.print_dataset_statistics(train, query_green + query_normal, gallery_green + gallery_normal)

        self.train = train
        self.query_green = query_green
        self.gallery_green = gallery_green
        self.query_normal = query_normal
        self.gallery_normal = gallery_normal

        self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)

    def _process_dir_train(self, data_dir, relabel=True, year='2020', ID_start=0):
        """

        :param data_dir:
        :param relabel:
        :param year: 读入的是2020还是2019年的数据
        :param index_start: relabel起始的ID
        :return:
        """
        if year == '2020':
            filename = osp.join(data_dir, 'label.txt')
        else:
            filename = osp.join(data_dir, 'train_list.txt')
        dataset = []
        camid = 1
        count_image = defaultdict(list)  # 所有的数据集 dict {'0':['1.png','2.png',...],'1':[],...}
        with open(filename, 'r') as file_to_read:
            while True:
                lines = file_to_read.readline()
                if not lines:
                    break
                if year == '2020':
                    (img_name, img_label) = lines.strip().split(':')
                else:
                    img_name, img_label = [i for i in lines.split()]
                    if img_name == 'train/105180993.png' or img_name == 'train/829283568.png' or img_name == 'train/943445997.png':  # remove samples with wrong label
                        continue
                count_image[img_label].append(img_name)
        val_imgs = {}
        pid_container = set()
        for pid, img_name in count_image.items():
            if len(img_name) < 2:  # 去掉只有一张的图片（去掉长尾数据）
                pass
            else:
                val_imgs[pid] = count_image[pid]
                pid_container.add(pid)
        pid_container = sorted(pid_container)  # 防止每次训练，同一图片ID不同，确保可以resume
        pid2label = {pid: label + ID_start for label, pid in
                     enumerate(pid_container)}  # 把训练集的label变成连续的ID（因为上面去掉长尾数据后，部分ID丢失
        self.pid_max = max(pid2label.values())
        "根据label反向读入img，自动跳过没有标签的图片"
        if year == '2020':
            data_dir += '/images'
        for pid, img_name in val_imgs.items():
            pid = pid2label[pid]
            for img in img_name:
                dataset.append((osp.join(data_dir, img), pid, camid))

        return dataset

    def _process_dir_val(self, data_dir, query=True):
        if query:
            # subfix = 'query_a'
            subfix = 'query'
        else:
            # subfix = 'gallery_a'
            subfix = 'gallery'

        datatype = ['green', 'normal']
        for index, type in enumerate(datatype):
            filename = osp.join(data_dir, '{}_{}.txt'.format(subfix, type))
            dataset = []
            with open(filename, 'r') as file_to_read:
                while True:
                    lines = file_to_read.readline()
                    if not lines:
                        break
                    # for i in lines.split(' '):
                    #     img_name = i
                    img_name, pid, camid = lines.strip().split(' ')
                    "2019 round1数据集"
                    dataset.append((osp.join(self.dataset_dir_val, 'train_set', img_name), int(pid), int(camid)))
            if index == 0:
                dataset_green = dataset
        return dataset_green, dataset

    def _process_dir_test(self, data_dir, query=True):
        if query:
            # subfix = 'query_a'
            subfix = 'query'
        else:
            # subfix = 'gallery_a'
            subfix = 'gallery'

        datatype = ['green', 'normal']
        for index, type in enumerate(datatype):
            filename = osp.join(data_dir, '{}_{}.txt'.format(subfix, type))
            dataset = []
            with open(filename, 'r') as file_to_read:
                while True:
                    lines = file_to_read.readline()
                    if not lines:
                        break
                    for i in lines.split():
                        img_name = i
                    # if img_name == '00105473.png':
                    #     continue

                    dataset.append((osp.join(self.dataset_dir_test, subfix, img_name), 1, 1))
            if index == 0:
                dataset_green = dataset
        return dataset_green, dataset
