"""
YOLO 格式的数据集转化为 COCO 格式的数据集
--root_dir 输入根路径
--save_path 保存文件的名字(没有random_split时使用)
--random_split 有则会随机划分数据集，然后再分别保存为3个文件。
--split_by_file 按照 ./train.txt ./val.txt ./test.txt 来对数据集进行划分。
"""

import os
import shutil
import time

import cv2
import json
from tqdm import tqdm
from sklearn.model_selection import train_test_split


from pathlib import Path

from utils.log import ColorStr


class YOLO2COCO:

    def __init__(self,
                 data_path,
                 split_by_file=False,
                 generate_file_name='generate'):
        self.data_path = data_path
        self.generate_file_name = generate_file_name
        self.random_split = True if split_by_file is False else False
        self.split_by_file = split_by_file
        self.logger_color = ColorStr()


    def __yolo2coco(self):
        root_path = self.root_dir
        colorstr = self.logger_color
        print(f"\n{colorstr('Loading data:')}  ", root_path)

        assert os.path.exists(root_path)
        originLabelsDir = os.path.join(root_path, 'labels')
        originImagesDir = os.path.join(root_path, 'images')
        with open(os.path.join(root_path, 'classes.txt')) as f:
            classes = f.read().strip().split()
        # images dir name
        indexes = os.listdir(originImagesDir)

        if self.random_split or self.split_by_file:
            # 用于保存所有数据的图片信息和标注信息
            train_dataset = {'categories': [], 'annotations': [], 'images': []}
            val_dataset = {'categories': [], 'annotations': [], 'images': []}
            test_dataset = {'categories': [], 'annotations': [], 'images': []}

            # 建立类别标签和数字id的对应关系, 类别id从0开始。
            for i, cls in enumerate(classes, 0):
                train_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
                val_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
                test_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})

            if self.random_split:
                print(f"\n{colorstr('splitting mode:')} random split")
                time.sleep(1)
                train_img, val_img, test_img = self.__train_test_val_split_random(indexes, 0.8, 0.1, 0.1)
            elif self.split_by_file:
                print(f"\n{colorstr('splitting mode:')} split by files")
                time.sleep(1)
                train_img, val_img, test_img = self.__train_test_val_split_by_files(indexes, root_path)
        else:
            dataset = {'categories': [], 'annotations': [], 'images': []}
            for i, cls in enumerate(classes, 0):
                dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})

        # 标注的id
        ann_id_cnt = 0
        for k, index in enumerate(tqdm(indexes)):
            # 支持 png jpg 格式的图片。
            txtFile = index.replace('images', 'txt').replace('.jpg', '.txt').replace('.png', '.txt')
            # 读取图像的宽和高
            img_path = os.path.join(root_path, 'images/') + index
            # print(img_path)
            im = cv2.imread(img_path)
            height, width, _ = im.shape
            if self.random_split or self.split_by_file:
                # 切换dataset的引用对象，从而划分数据集
                if index in train_img:
                    dataset = train_dataset
                elif index in val_img:
                    dataset = val_dataset
                elif index in test_img:
                    dataset = test_dataset
            # 添加图像的信息
            dataset['images'].append({'file_name': index,
                                      'id': k,
                                      'width': width,
                                      'height': height})
            if not os.path.exists(os.path.join(originLabelsDir, txtFile)):
                # 如没标签，跳过，只保留图片信息。
                continue
            with open(os.path.join(originLabelsDir, txtFile), 'r') as fr:
                labelList = fr.readlines()
                for label in labelList:
                    label = label.strip().split()
                    x = float(label[1])
                    y = float(label[2])
                    w = float(label[3])
                    h = float(label[4])

                    # convert x,y,w,h to x1,y1,x2,y2
                    H, W, _ = im.shape
                    x1 = (x - w / 2) * W
                    y1 = (y - h / 2) * H
                    x2 = (x + w / 2) * W
                    y2 = (y + h / 2) * H
                    # 标签序号从0开始计算, coco2017数据集标号混乱，不管它了。
                    cls_id = int(label[0])
                    width = max(0, x2 - x1)
                    height = max(0, y2 - y1)
                    dataset['annotations'].append({
                        'area': width * height,
                        'bbox': [x1, y1, width, height],
                        'category_id': cls_id,
                        'id': ann_id_cnt,
                        'image_id': k,
                        'iscrowd': 0,
                        # mask, 矩形是从左上角点按顺时针的四个顶点
                        'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]]
                    })
                    ann_id_cnt += 1

        # 保存结果
        folder = os.path.join(root_path, 'annotations')
        if not os.path.exists(folder):
            os.makedirs(folder)
        if self.random_split or self.split_by_file:
            for phase in ['train', 'val', 'test']:
                json_name = os.path.join(root_path, 'annotations/{}.json'.format(phase))
                with open(json_name, 'w') as f:
                    if phase == 'train':
                        json.dump(train_dataset, f)
                    elif phase == 'val':
                        json.dump(val_dataset, f)
                    elif phase == 'test':
                        json.dump(test_dataset, f)
                print('Save annotation to {}'.format(json_name))
        else:
            json_name = os.path.join(root_path, 'annotations/{}'.format(self.save_path))
            with open(json_name, 'w') as f:
                json.dump(dataset, f)
                print('Save annotation to {}'.format(json_name))

    def __split_labels_images(self, path, generate_folder_name='generate'):
        colorstr = self.logger_color
        current_dir = os.getcwd()
        relative_path = path
        file_path = os.path.join(current_dir, relative_path)
        # 数据集文件夹里的所有子文件夹
        folders = os.listdir(file_path)
        folder_types = set()
        class_types = list()
        folder_num = len(folders)
        with tqdm(total=folder_num, desc=f"{colorstr('Reading Folders:')} ") as pbar:
            for i, folder in enumerate(folders):
                ftype, class_name = self.__folder_type(folder)
                folder_types.add(ftype)
                if class_name not in class_types:
                    if class_name != 'None':
                        class_types.append(class_name)

                pbar.update(1)
                pbar.set_postfix({
                    'folder id': f'{i}',
                    'folder name': f'{folder}',
                    'folder type': f'{ftype}',

                })
            # class_types.append('None')

        # 生成标签路径和图片路径
        generate_folder_name = generate_folder_name if generate_folder_name is not None else 'generate'
        generate_path = f'{file_path}{os.sep}{generate_folder_name}'
        # images_path_dict, labels_path_dict = images_labels_path_generate(generate_path, folder_types)
        image_path = f'{generate_path}{os.sep}images'
        label_path = f'{generate_path}{os.sep}labels'
        if not os.path.isdir(image_path):
            os.makedirs(image_path)
        if not os.path.isdir(label_path):
            os.makedirs(label_path)

        # 生成类文件class.txt
        class_filename = 'classes.txt'
        class_file_path = f'{generate_path}{os.sep}{class_filename}'
        self.__generate_classfile(class_types, class_file_path)

        # 将数据集分成yolo格式
        time.sleep(1)
        print(f"\n{colorstr('Split images and labels.....')}")
        time.sleep(1)

        for folder in folders:
            ftype, _ = self.__folder_type(folder)
            # save_path_images_prefix = images_path_dict[ftype]
            # save_path_labels_prefix = labels_path_dict[ftype]
            save_path_images_prefix = image_path
            save_path_labels_prefix = label_path
            folder_path = f'{file_path}{os.sep}{folder}'
            contents = Path(folder_path)
            ftype_img_names = list()
            content_len = len(os.listdir(folder_path))

            # 分离图片与标签
            with tqdm(total=content_len, desc=f"Folder Name: {folder}") as pbar:
                # 一点一点读取数据集，防止内存爆炸
                i = 0
                for content in contents.iterdir():
                    content_name = content.name
                    content_path = f'{folder_path}{os.sep}{content_name}'
                    content_type = self.__file_type(content_name)
                    # print(content_path)
                    if content_type == 'image':
                        # target_path = f'{save_path_images_prefix}{os.sep}{content_name}'
                        target_path = save_path_images_prefix
                        shutil.copy(content_path, target_path)
                        ftype_img_names.append(f'{content_name}\n')
                        i += 1
                    if content_type == 'label':
                        # target_path = f'{save_path_labels_prefix}{os.sep}{content_name}'
                        target_path = save_path_labels_prefix
                        shutil.copy(content_path, target_path)

                    if i >= 10 or content_len - i < 10:
                        # 生成图片分类文件，train.txt val.txt
                        dtype_file_path = f'{generate_path}{os.sep}{ftype}.txt'
                        with open(dtype_file_path, 'a', encoding='utf-8') as f:
                            f.writelines(ftype_img_names)
                            ftype_img_names = list()

                    pbar.update(1)

        if 'test' not in folder_types:
            dtype_file_path = f'{generate_path}{os.sep}test.txt'
            with open(dtype_file_path, 'a', encoding='utf-8'):
                print('No test data, generating test.txt....')

        return generate_path

    def __call__(self):
        self.root_dir = self.__split_labels_images(self.data_path, self.generate_file_name)
        time.sleep(1)
        self.__yolo2coco()

    def __train_test_val_split_random(self, img_paths, ratio_train=0.8, ratio_test=0.1, ratio_val=0.1):
        # 这里可以修改数据集划分的比例。
        assert int(ratio_train + ratio_test + ratio_val) == 1
        train_img, middle_img = train_test_split(img_paths, test_size=1 - ratio_train, random_state=233)
        ratio = ratio_val / (1 - ratio_train)
        val_img, test_img = train_test_split(middle_img, test_size=ratio, random_state=233)
        print("NUMS of train:val:test = {}:{}:{}".format(len(train_img), len(val_img), len(test_img)))
        return train_img, val_img, test_img

    def __train_test_val_split_by_files(self, img_paths, root_dir):
        # 根据文件 train.txt, val.txt, test.txt（里面写的都是对应集合的图片名字） 来定义训练集、验证集和测试集
        phases = ['train', 'val', 'test']
        img_split = []
        for p in phases:
            define_path = os.path.join(root_dir, f'{p}.txt')
            # print(f'Read {p} dataset definition from {define_path}')
            assert os.path.exists(define_path)
            with open(define_path, 'r') as f:
                # img_paths = f.readlines()
                img_paths = [impath.strip() for impath in f.readlines()]
                # img_paths = [os.path.split(img_path.strip())[1] for img_path in img_paths]  # NOTE 取消这句备注可以读取绝对地址。
                img_split.append(img_paths)
        return img_split[0], img_split[1], img_split[2]

    def __images_labels_path_generate(self, path_prefix, folders_sub):
        images_path_dict = {}
        labels_path_dict = {}

        for folder_name in folders_sub:
            path_images = f'{path_prefix}{os.sep}images{os.sep}{folder_name}'
            path_labels = f'{path_prefix}{os.sep}labels{os.sep}{folder_name}'
            images_path_dict[folder_name] = path_images
            labels_path_dict[folder_name] = path_labels

            if not os.path.isdir(path_images):
                os.makedirs(path_images)

            if not os.path.isdir(path_labels):
                os.makedirs(path_labels)

        return images_path_dict, labels_path_dict

    def __folder_type(self, dataset_folder):
        name = dataset_folder.split('_')

        class_name = name[0]
        if 'None' in dataset_folder:
            class_name = 'None'

        try:

            ftype = name[1]
            if ftype == 'val':
                return 'val', class_name

            if ftype == 'train':
                return 'train', class_name

        except:
            return 'train', class_name

    def __file_type(self, file_name):
        content = file_name.split('.')

        try:
            if content[1] == 'txt':
                return 'label'
            if content[1] in ('jpg', 'png', 'JPG', 'PNG'):
                return 'image'

            return NameError('Unknown file type ')
        except:
            return NameError('Unknown file type ')

    def __generate_classfile(self, classes, path):
        colorstr = self.logger_color
        with tqdm(total=len(classes), desc=f"{colorstr('Generating class file:')} ") as pbar:
            with open(path, 'w', encoding='utf-8') as f:
                for cls in classes:
                    f.write(f'{cls}\n')
                    pbar.update(1)
                    pbar.set_postfix({
                        'class name': f'{cls}'

                    })



