#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: zhangjian
# date: 2024/1/30
import json
import os.path
import random
import re

import numpy as np
import zj_utils
from tqdm import tqdm


def icdar2019_mlt_transform(language_type=None):
    """
    00001 - 01000:  Arabic
    01001 - 02000:  English
    02001 - 03000:  French
    03001 - 04000:  Chinese
    04001 - 05000:  German
    05001 - 06000:  Korean
    06001 - 07000:  Japanese
    07001 - 08000:  Italian
    08001 - 09000:  Bangla
    09001 - 10000:  Hindi
    """
    image_root_lst = ['data/icdar2019_mlt/ImagesPart1', 'data/icdar2019_mlt/ImagesPart2']
    gt_root = 'data/icdar2019_mlt/train_gt_t13'
    if language_type:
        language_type = language_type.lower()
        new_paddle_file = f'data/icdar2019_mlt/{language_type}_train_full_labels.txt'
        new_paddle_file_train = f'data/icdar2019_mlt/{language_type}_train_labels.txt'
        new_paddle_file_val = f'data/icdar2019_mlt/{language_type}_val_labels.txt'
    else:

        new_paddle_file = 'data/icdar2019_mlt/train_full_labels.txt'
        new_paddle_file_train = 'data/icdar2019_mlt/train_labels.txt'
        new_paddle_file_val = 'data/icdar2019_mlt/val_labels.txt'
    zj_utils.makedirs(os.path.dirname(new_paddle_file))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_train))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_val))

    language_type2id_interval = {
        'arabic': [1, 1000],
        'english': [1001, 2000],
        'french': [2001, 3000],
        'chinese': [3001, 4000],
        'german': [4001, 5000],
        'korean': [5001, 6000],
        'japanese': [6001, 7000],
        'italian': [7001, 8000],
        'bangla': [8001, 9000],
        'hindi': [9001, 10000],
    }
    assert language_type in language_type2id_interval or language_type is None

    image_path_lst = zj_utils.get_file_path_list(image_root_lst[0])
    for image_root in image_root_lst[1:]:
        image_path_lst.extend(zj_utils.get_file_path_list(image_root))

    with open(new_paddle_file, 'w') as f:
        for image_path in tqdm(image_path_lst):
            _, dir1, dir2, basename = image_path.split('/')
            basename_no_ext = os.path.splitext(basename)[0]
            if language_type:
                image_id = int(basename_no_ext[-5:])
                id_interval = language_type2id_interval[language_type]
                if not (image_id >= id_interval[0] and image_id <= id_interval[1]):
                    continue

            filepath = os.path.join(dir1, dir2, basename)
            gt_path = os.path.join(gt_root, basename_no_ext + '.txt')
            with open(gt_path, 'r') as f_:
                lines = f_.readlines()
            label = []
            for line in lines:
                line = line.strip()
                try:
                    line_ = [eval(x) for i, x in enumerate(line.strip().split(',', 9))]
                except:
                    line_ = [eval(x) if i < 8 else x for i, x in enumerate(line.strip().split(',', 9))]
                    if line_[-1].startswith('"') and line_[-1].endswith('"'):
                        line_[-1] = line_[-1][1:-1]
                assert len(line_) == 10, print(f'len(line_) != 10')
                item = {'transcription': line_[-1], 'points': np.array(line_[:8]).reshape(4, 2).tolist()}
                label.append(item)

            if len(label) == 0:
                continue
            f.write(f'{filepath}\t{json.dumps(label, ensure_ascii=False)}\n')
    train_lines, val_lines = train_val_split(new_paddle_file, val_split=0.1)
    with open(new_paddle_file_train, 'w') as f:
        f.writelines(train_lines)
    with open(new_paddle_file_val, 'w') as f:
        f.writelines(val_lines)
    print(f'train_file saved to {new_paddle_file_train}')
    print(f'val_file saved to {new_paddle_file_val}')


def cocotextv2_transform():
    """
    ['cats', 'anns', 'imgs', 'imgToAnns', 'info']
    cats: {}

    anns:
    dict, {"45346": {'mask': [468.9, 286.7, 468.9, 295.2, 493.0, 295.8, 493.0, 287.2], 'class': 'machine printed',
    'bbox': [468.9, 286.7, 24.1, 9.1], 'image_id': 217925, 'id': 45346, 'language': 'english', 'area': 206.06,
    'utf8_string': 'New', 'legibility': 'legible'}, ...}

    imgs:
    dict, {"390310": {'id': 390310, 'set': 'val', 'width': 640, 'file_name': 'COCO_train2014_000000390310.jpg',
    'height': 640}, ...}

    imgToAnns:
    dict, {"540965": [], "260932": [63993, 63994, 63995, 63996, 63997, 63998, 63999],...}

    info: {}

    """
    json_path = 'data/coco_textv2/cocotext.v2.json'
    # image_root = 'data/coco_textv2/train2014'

    new_paddle_file = 'data/coco_textv2/train_full_labels.txt'
    new_paddle_file_train = 'data/coco_textv2/train_labels.txt'
    new_paddle_file_val = 'data/coco_textv2/val_labels.txt'
    zj_utils.makedirs(os.path.dirname(new_paddle_file))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_train))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_val))

    with open(json_path, 'r') as f:
        data = json.load(f)
    anns = data['anns']
    imgs = data['imgs']
    imgToAnns = data['imgToAnns']

    # ids = []
    # for k, v in anns.items():
    #     ids.append(k)
    # print(len(ids), len(set(ids)))
    # print(len(imgs))

    with open(new_paddle_file, 'w') as f:
        for image_id in imgs.keys():
            filename = imgs[image_id]['file_name']
            filepath = f'coco_textv2/train2014/{filename}'
            label = []
            ann_id_lst = imgToAnns[image_id]
            ann_id_lst = list(map(str, ann_id_lst))
            for ann_id in ann_id_lst:
                ann = anns[ann_id]
                item = {'transcription': ann['utf8_string'], 'points': np.array(ann['mask']).reshape(-1, 2).tolist()}
                label.append(item)
            if len(label) == 0:
                continue
            f.write(f'{filepath}\t{json.dumps(label, ensure_ascii=False)}\n')
    train_lines, val_lines = train_val_split(new_paddle_file, val_split=0.1)
    with open(new_paddle_file_train, 'w') as f:
        f.writelines(train_lines)
    with open(new_paddle_file_val, 'w') as f:
        f.writelines(val_lines)
    print(f'train_file saved to {new_paddle_file_train}')
    print(f'val_file saved to {new_paddle_file_val}')


def MTWI_2018_transform():
    data_root = 'data/MTWI_2018'
    new_paddle_file = 'data/MTWI_2018/train_full_labels.txt'
    new_paddle_file_train = 'data/MTWI_2018/train_labels.txt'
    new_paddle_file_val = 'data/MTWI_2018/val_labels.txt'
    zj_utils.makedirs(os.path.dirname(new_paddle_file))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_train))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_val))
    txt_path_lst = zj_utils.get_file_path_list(os.path.join(data_root, 'txt_train'))
    pattern = ','
    with open(new_paddle_file, 'w') as f:
        for txt_path in txt_path_lst:
            with open(txt_path, 'r') as f_:
                lines = f_.readlines()
            label = []
            for line in lines:
                line_ = re.split(pattern, line.strip(), maxsplit=8)
                line_ = [eval(x) if i < 8 else x for i, x in enumerate(line_)]
                assert len(line_) == 9, print(f'len(line_) != 9')
                item = {'transcription': line_[-1], 'points': np.array(line_[:8]).reshape(4, 2).tolist()}
                label.append(item)
            filename = os.path.join('MTWI_2018', 'image_train',
                                    os.path.splitext(os.path.basename(txt_path))[0] + '.jpg')
            f.write(f'{filename}\t{json.dumps(label, ensure_ascii=False)}\n')
    train_lines, val_lines = train_val_split(new_paddle_file, val_split=0.1)
    with open(new_paddle_file_train, 'w') as f:
        f.writelines(train_lines)
    with open(new_paddle_file_val, 'w') as f:
        f.writelines(val_lines)
    print(f'train_file saved to {new_paddle_file_train}')
    print(f'val_file saved to {new_paddle_file_val}')


def icdar2017_rctw_17_transform():
    data_root = 'data/ICDAR2017-RCTW-17'
    new_paddle_file = 'data/ICDAR2017-RCTW-17/train_full_labels.txt'
    new_paddle_file_train = 'data/ICDAR2017-RCTW-17/train_labels.txt'
    new_paddle_file_val = 'data/ICDAR2017-RCTW-17/val_labels.txt'
    sub_dir_lst = ['part1', 'part2', 'part3', 'part4', 'part5', 'part6']
    zj_utils.makedirs(os.path.dirname(new_paddle_file))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_train))
    zj_utils.makedirs(os.path.dirname(new_paddle_file_val))
    with open(new_paddle_file, 'w') as f:
        for sub_dir in sub_dir_lst:
            print(f'process {sub_dir}...')
            data_root_tmp = os.path.join(data_root, sub_dir)
            image_names = [x for x in os.listdir(data_root_tmp) if x.endswith('.jpg')]
            filenames = [os.path.join('ICDAR2017-RCTW-17', sub_dir, n) for n in image_names]
            label_paths = [os.path.join(data_root_tmp, f'{os.path.splitext(n)[0]}.txt') for n in image_names]
            for i in tqdm(range(len(label_paths))):
                label_path = label_paths[i]
                filename = filenames[i]
                with open(label_path, 'r', encoding='utf-8-sig') as f_:
                    lines = f_.readlines()
                label = []
                for line in lines:
                    try:
                        line_ = [eval(x) for i, x in enumerate(line.strip().split(',', 9))]
                    except:
                        line_ = [eval(x) if i < 8 else x for i, x in enumerate(line.strip().split(',', 9))]
                        if line_[-1].startswith('"') and line_[-1].endswith('"'):
                            line_[-1] = line_[-1][1:-1]
                    assert len(line_) == 10, print(f'len(line_) != 10')
                    item = {'transcription': line_[-1], 'points': np.array(line_[:8]).reshape(4, 2).tolist()}
                    label.append(item)
                f.write(f'{filename}\t{json.dumps(label, ensure_ascii=False)}\n')
    train_lines, val_lines = train_val_split(new_paddle_file, val_split=0.1)
    with open(new_paddle_file_train, 'w') as f:
        f.writelines(train_lines)
    with open(new_paddle_file_val, 'w') as f:
        f.writelines(val_lines)
    print(f'train_file saved to {new_paddle_file_train}')
    print(f'val_file saved to {new_paddle_file_val}')


def icdar2019_lsvt_transform():
    gt_file = 'data/ICDAR2019-LSVT/train_full_labels.json'
    image_root = 'train_full_images'
    new_paddle_file = 'data/ICDAR2019-LSVT/train_full_labels.txt'
    new_paddle_file_train = 'data/ICDAR2019-LSVT/train_train_labels.txt'
    new_paddle_file_val = 'data/ICDAR2019-LSVT/train_val_labels.txt'
    with open(gt_file, 'r') as f:
        data = json.load(f)

    with open(new_paddle_file, 'w') as f:
        for filename, label in tqdm(data.items()):
            filepath = f'ICDAR2019-LSVT/{image_root}/{filename}.jpg'
            f.write(f'{filepath}\t{json.dumps(label, ensure_ascii=False)}\n')
    train_lines, val_lines = train_val_split(new_paddle_file, val_split=0.1)
    with open(new_paddle_file_train, 'w') as f:
        f.writelines(train_lines)
    with open(new_paddle_file_val, 'w') as f:
        f.writelines(val_lines)
    print(f'train_file saved to {new_paddle_file_train}')
    print(f'val_file saved to {new_paddle_file_val}')


def train_val_split(label_txt, val_split=0.1):
    with open(label_txt, 'r') as f:
        lines = f.readlines()
    random.shuffle(lines)
    val_split = int(len(lines) * val_split)
    val_lines = lines[:val_split]
    train_lines = lines[val_split:]
    return train_lines, val_lines


def main():
    icdar2019_lsvt_transform()
    icdar2017_rctw_17_transform()
    MTWI_2018_transform()
    # cocotextv2_transform() # 未使用
    icdar2019_mlt_transform(language_type=None)
    icdar2019_mlt_transform(language_type='Chinese')


if __name__ == '__main__':
    main()
