import argparse
import datetime
import multiprocessing
import multiprocessing as mp
import os
import uuid
from multiprocessing.context import Process
from pathlib import Path

import cv2
import time
from loguru import logger

from Ling.common.utils.OcrUtils import get_file_mapping, get_lines_from_file, filter_line_in, get_random_items, \
    get_char_in, get_dict_from_file, labels_split
from text_renderer.config import get_cfg, GeneratorCfg
from text_renderer.dataset import LmdbDataset, ImgDataset
from text_renderer.render import Render

cv2.setNumThreads(1)

STOP_TOKEN = "kill"

num_processes = 8
image_size = (9, 10)

# each child process will initialize Render in process_setup
render: Render


class DBWriterProcess():
    def __init__(
            self,
            dataset_cls,
            datas,
            generator_cfg: GeneratorCfg,
            log_period: float = 1,
    ):
        super().__init__()
        self.dataset_cls = dataset_cls
        self.datas = datas
        self.generator_cfg = generator_cfg
        self.log_period = log_period

    def save(self):
        num_image = self.generator_cfg.num_image
        save_dir = self.generator_cfg.save_dir
        log_period = max(1, int(self.log_period / 100 * num_image))
        try:
            with self.dataset_cls(str(save_dir)) as db:
                exist_count = db.read_count()
                count = 0
                logger.info(f"Exist image count in {save_dir}: {exist_count}")
                start = time.time()
                for data in self.datas:
                    # name = "{:09d}".format(exist_count + count)
                    name_uuid = str(uuid.uuid1())
                    name = name_uuid[0:8] + name_uuid[19:23]
                    db.write(name, data["image"], data["label"])
                    count += 1
                    if count % log_period == 0:
                        logger.info(
                            f"{(count / num_image) * 100:.2f}%({count}/{num_image}) {log_period / (time.time() - start + 1e-8):.1f} img/s"
                        )
                        start = time.time()
                db.write_count(count + exist_count)
                logger.info(f"{(count / num_image) * 100:.2f}%({count}/{num_image})")
                logger.info(f"Finish generate: {count}. Total: {exist_count + count}")
        except Exception as e:
            logger.exception("DBWriterProcess error")
            raise e


def generate_img():
    data = render()
    if data is not None:
        return {"image": data[0], "label": data[1]}
    return None


def process_setup(*args):
    global render
    import numpy as np

    # Make sure different process has different random seed
    np.random.seed()

    render = Render(args[0])
    logger.info(f"Finish setup image generate process: {os.getpid()}")


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", required=False, default='example_data/ling.py', help="python file path")
    parser.add_argument("--dataset", default="img", choices=["lmdb", "img"])
    parser.add_argument("--log_period", type=float, default=10)
    return parser.parse_args()


def process_img(generator_cfgs, num_processes):
    for generator_cfg in generator_cfgs:
        mp.set_start_method("spawn", force=True)
        # manager = mp.Manager()
        # data_queue = manager.Queue()
        args = parse_args()
        dataset_cls = LmdbDataset if args.dataset == "lmdb" else ImgDataset
        process_setup(generator_cfg.render_cfg)
        datas = []
        for _ in range(generator_cfg.num_image):
            data = generate_img()
            if data is not None:
                datas.append(data)
        db_writer_process = DBWriterProcess(
            dataset_cls, datas, generator_cfg, args.log_period
        )
        db_writer_process.save()


# **********************************begin customer****************************#

root = 'D:/workspace/python/train_data/'
dictRoot = root + 'train_data_configs/words/'
reader_root = 'D:/ocr/ocr_resources/reader/'


def process_by_type(category):
    if 'enum' == category:
        process_enum('config/enum.py')
    else:
        raise ValueError("支持类型为:enum")


def process_le_enum(begin_num, end_num, num_image, mapping, dict_in, out_path, num_processes, reader_dict_root):
    all_chars = []
    # pool = multiprocessing.Pool()
    process_list = list()
    for pos, key in enumerate(mapping):
        value = mapping[key]
        if key in dict_in and value <= end_num and value >= begin_num:
            # lines = process_char_enum(key, num_image, out_path)
            # print(key)
            # pool.apply(process_char_enum, (key, num_image, dict_in, out_path))
            process_list.append(
                multiprocessing.Process(target=process_char_enum,
                                        args=(key, num_image, dict_in, out_path, pos, num_processes, reader_dict_root)))
            all_chars.append(key)
    # pool.close()
    # pool.join()
    for process in process_list:
        process.start()
    for process in process_list:
        process.join()
    return all_chars


def process_char_enum(char, num_image, dict_in, out_path, pos, num_processes, reader_dict_root):
    print('begin process:' + str(pos) + char)
    lines = get_lines_from_file(reader_dict_root + char + '.txt')
    get_line_num = int(num_image * 1.5)
    lines = get_random_items(lines, get_line_num)
    lines = filter_line_in(lines, dict_in)
    process_enum('config/enum.py', num_image, lines, out_path, num_processes)
    print('end process:' + str(pos) + char)
    return lines


# def process_le_chars_rand(le_num, num_image, out_path):
#     mapping = get_file_mapping('D:/ocr/ocr_resources/reader/all_words/char_num_sort.txt', '\t')
#     dict_in = get_char_in()
#     all_chars = []
#     for key in mapping:
#         value = mapping[key]
#         if key in dict_in and value <= le_num:
#             all_chars.append(key)
#     process_rand('config/rand.py', all_chars, (15, 20), num_image, out_path)


def process_chars_rand(all_chars, num_image, out_path, num_processes, font_path, info):
    print('begin process:' + info)
    process_rand('config/rand.py', all_chars, image_size, num_image, out_path, num_processes, font_path)
    print('end process:' + info)


def process_char_enum_by_font(dict_in, char, font_path, reader_dict_root, num_image, out_path, num_processes):
    print('begin process:' + char)
    lines = get_lines_from_file(reader_dict_root + char + '.txt')
    if len(lines) > 0:
        get_line_num = int(num_image * 1.5)
        # 取得字符串
        lines = get_random_items(lines, get_line_num)
        # 保证字符串的文字都能生成
        lines = filter_line_in(lines, dict_in)
        # 取得的是与char有关的字符串
        lines_filter = []
        for line in lines:
            if line.find(char) >= 0:
                lines_filter.append(line)
        if len(lines_filter) > 0:
            process_enum('config/enum.py', num_image, lines, out_path, num_processes, font_path)
        print('end process:' + char)
    return lines


def chn_test(text, num_image, out_path):
    process_chn('config/chn.py', text, image_size, num_image, out_path)


def process_enum(config, num_image, lines, out_path, num_processes, font_path):
    generator_cfgs = get_cfg(config)
    cfg = generator_cfgs[0]

    cfg.num_image = num_image
    cfg.save_dir = Path(out_path)  # 中文会导致保存不了,待查

    render_cfg = cfg.render_cfg
    corpus = render_cfg.corpus
    corpu = corpus[0]

    corpus_cfg = corpu.cfg

    corpus_cfg.text_paths = []
    # 设置items2选1
    # corpus_cfg.items = result
    # corpu.load_text()
    corpu.texts = lines

    if font_path is not None:
        corpu.cfg.font_list_file = font_path
        corpu.font_manager.load_font(font_path, None)

    process_img(generator_cfgs, num_processes)


def process_rand(config, all_chars, char_length, num_image, out_path, num_processes, font_path=None):
    generator_cfgs = get_cfg(config)
    cfg = generator_cfgs[0]

    cfg.num_image = num_image

    cfg.save_dir = Path(out_path)  # 中文会导致保存不了,待查

    render_cfg = cfg.render_cfg
    corpus = render_cfg.corpus
    corpu = corpus[0]

    corpu.cfg.length = char_length

    if font_path is not None:
        corpu.cfg.font_list_file = font_path
        corpu.font_manager.load_font(font_path, None)

    # corpu.cfg.chars_file = Path(chars_file)
    # 设置chars2选1
    corpu.cfg.chars_file = None
    corpu.chars = all_chars

    process_img(generator_cfgs, num_processes)


def process_chn(config, text, char_length, num_image, out_path):
    generator_cfgs = get_cfg(config)
    cfg = generator_cfgs[0]

    cfg.num_image = num_image

    cfg.save_dir = Path(out_path)  # 中文会导致保存不了,待查

    render_cfg = cfg.render_cfg
    corpus = render_cfg.corpus
    corpu = corpus[0]

    corpu.cfg.length = char_length

    # corpu.cfg.chars_file = Path(chars_file)
    # 设置chars2选1
    corpu.cfg.chars_file = None
    dict_in = get_char_in()
    text = filter_line_in(text, dict_in)
    corpu.text = text

    process_img(generator_cfgs)


# **********************************end customer****************************#

if __name__ == "__main__":
    starttime = datetime.datetime.now()
    out_path = 'D:/ocr/ocr_resources/reader/train_data/train_data_v3'
    font_root = 'D:/ocr/ocr_resources/reader/fonts/'
    dict_in = get_char_in()
    # 100以下,enum 100, rand 100
    # 100~200 enum_num=len(lines) rand=200-enum_num
    # >200 enum=200 rand=total*10

    # 100 以下
    # path_le_100 = out_path + '_le_100'
    # all_chars_le_100 = process_le_enum(100, 100, mapping, dict_in, path_le_100, 0)  
    # char_show_num = len(all_chars_le_100) * 100
    # total_rand_img_num_le_100 = int(char_show_num / 10)  # enum已经100行*字了,rand只需要100行*字 每行已经生成了10次
    # num_processes = 7  # 由外面生成线程
    # process_chars_rand(all_chars_le_100, total_rand_img_num_le_100, path_le_100, 7)  # 这里由 旧有的多线程保证写最终结果不出问题

    # # 100~200
    # total_line_num_101_200 = 0
    # all_chars_101_200 = []
    # path_101_200 = out_path + '_101_200'
    #
    # process_list = list()
    # for pos, key in enumerate(mapping):
    #     value = mapping[key]
    #     if key in dict_in and 200 >= value > 100:
    #         all_chars_101_200.append(key)
    #         process_list.append(
    #             multiprocessing.Process(target=process_char_enum,
    #                                     args=(key, value, dict_in, path_101_200, pos, 0)))
    #         total_line_num = total_line_num_101_200 + value
    # for process in process_list:
    #     process.start()
    # for process in process_list:
    #     process.join()
    # char_show_num_101_200 = len(
    #     all_chars_101_200) * 200 - total_line_num_101_200  # enum已经value行*字了,rand只需要200行*字-total_line_num 每行已经生成了20次
    # total_rand_img_num_101_200 = int(char_show_num_101_200 / 10)
    # process_chars_rand(all_chars_101_200, total_rand_img_num_101_200, path_101_200, 8)

    # >200
    # all_chars_ge_200 = []
    # path_ge_200 = out_path + '_ge_200'
    #
    # process_list = list()
    # for pos, key in enumerate(mapping):
    #     value = mapping[key]
    #     if key in dict_in and value > 200:
    #         all_chars_ge_200.append(key)
    #         process_list.append(
    #             multiprocessing.Process(target=process_char_enum,
    #                                     args=(key, 190, dict_in, path_ge_200, pos, 0)))
    #         # process_char_enum(key, 190, dict_in, path_ge_200, pos, 0)
    # for process in process_list:
    #     process.start()
    # for process in process_list:
    #     process.join()
    # char_show_num_ge_200 = len(
    #     all_chars_ge_200) * 10  # enum已经value行*字了,rand只需要200行*字-total_line_num 每行已经生成了20次
    # total_rand_img_num_ge_200 = int(char_show_num_ge_200 / 10)
    # process_chars_rand(all_chars_ge_200, total_rand_img_num_ge_200, path_ge_200, 8)

    # text = get_lines_from_file('D:/ocr/ocr_resources/reader/all_words/chn_demo.txt')
    # text_ = ''.join(text)
    # chn_test(text_, 10, out_path)

    # labels_error('D:/ocr/ocr_resources/reader/train_data/',
    #              ['train_data_zy650w_char_101_200', 'train_data_zy650w_char_le_100'])

    # labels_split('D:/ocr/ocr_resources/reader/train_data/', ['train_data_zy650w_char_101_200'], 20000)
    # labels_split('D:/ocr/ocr_resources/reader/train_data/',
    #              ['train_data_zy650w_char_le_100', 'train_data_zy650w_char_ge_200'], 50000)

    # chars = '择一侧腺叶加峡部切除'
    # process_chars_rand(chars, 50, 'D:/ocr/ocr_resources/reader/train_data/bg_test', 0)

    # font_root = 'D:/ocr/ocr_resources/reader/fonts/'
    # files = [f for f in os.listdir(font_root) if f.lower().endswith('ttc') or f.lower().endswith('ttf')]

    # chars = get_dict_from_file(font_root + 'FZSJ-LAOXLDM.ttf.txt')
    # process_chars_rand(chars, 100, 'D:/ocr/ocr_resources/reader/train_data/rand_font', 0,
    #                    font_root + 'FZSJ-LAOXLDM.ttf')

    # process_list = list()
    # for pos, file in enumerate(files):
    #     txt_file = file + '.txt'
    #     chars = get_dict_from_file(font_root + txt_file)
    #     process_list.append(
    #         multiprocessing.Process(target=process_chars_rand,
    #                                 args=(chars, 3000, 'D:/ocr/ocr_resources/reader/train_data/rand_font', 0,
    #                                       font_root + file, file)))
    # for process in process_list:
    #     process.start()
    # for process in process_list:
    #     process.join()

    # for pos, file in enumerate(files):
    #     # print('begin process:' + file)
    #     txt_file = file + '.txt'
    #     chars = get_dict_from_file(font_root + txt_file)
    #     process_chars_rand(chars, 30000, 'D:/ocr/ocr_resources/reader/train_data/rand_font', 8,
    #                        font_root + file, file)

    # labels_split('D:/ocr/ocr_resources/reader/train_data/',
    #              ['rand_font'], 50000)

    # batch = []
    # batches = []
    # for pos, file in enumerate(files):
    #     font_size = len(file)
    #     batch_size = int(font_size) / 16 + 1
    #     for i in range(0, batch_size):
    #         batch_index=i*
    #         batch.append(i * 16 + i)
    #     # print('begin process:' + file)
    #     txt_file = file + '.txt'
    #     chars = get_dict_from_file(font_root + txt_file)
    #     process_chars_rand(chars, 3000, 'D:/ocr/ocr_resources/reader/train_data/rand_font', 5,
    #                        font_root + file, file)
    # # # 100~200
    # total_line_num_101_200 = 0
    # all_chars_101_200 = []
    # path_101_200 = out_path + '_51_200'
    # process_list = list()
    # for pos, key in enumerate(mapping):
    #     value = mapping[key]
    #     if key in dict_in and 200 >= value > 50:
    #         all_chars_101_200.append(key)
    #         process_list.append(
    #             multiprocessing.Process(target=process_char_enum,
    #                                     args=(key, value, dict_in, path_101_200, pos, 0)))
    #         total_line_num = total_line_num_101_200 + value
    # for process in process_list:
    #     process.start()
    # for process in process_list:
    #     process.join()
    #
    # # >200
    # all_chars_ge_200 = []
    # path_ge_200 = out_path + '_ge_200'
    #
    # process_list = list()
    # for pos, key in enumerate(mapping):
    #     value = mapping[key]
    #     if key in dict_in and value > 200:
    #         all_chars_ge_200.append(key)
    #         process_list.append(
    #             multiprocessing.Process(target=process_char_enum,
    #                                     args=(key, 200, dict_in, path_ge_200, pos, 0)))
    #         # process_char_enum(key, 190, dict_in, path_ge_200, pos, 0)
    # for process in process_list:
    #     process.start()
    # for process in process_list:
    #     process.join()
    # ********************************v3 before end*************************#

    # ********************************v3 begin*************************#
    reader_dict_root = reader_root + 'dict_10/'
    reader_dict_root_no_dot = reader_root + 'dict_10_no_dot/'
    mapping = get_file_mapping(reader_root + 'all_words/char_num_sort_10.txt', '\t')
    mapping_no_dot = get_file_mapping(reader_root + 'all_words/char_num_sort_10_no_dot.txt', '\t')
    # # 50条
    # path_le_25 = out_path + '_path_le_25'
    # all_chars_le_25 = process_le_enum(1, 25, 25, mapping, dict_in, path_le_25, 0,
    #                                   reader_dict_root)
    # all_chars_le_25 = process_le_enum(1, 25, 25, mapping_no_dot, dict_in, path_le_25, 0, reader_dict_root_no_dot)
    #
    # # 100条
    # path_26_50 = out_path + '_path_26_50'
    # process_le_enum(26, 50, 50, mapping, dict_in, path_26_50, 0, reader_dict_root)
    # process_le_enum(26, 50, 50, mapping_no_dot, dict_in, path_26_50, 0, reader_dict_root_no_dot)
    #
    # # 200条
    # path_51_100 = out_path + '_path_51_100'
    # process_le_enum(51, 100, 100, mapping, dict_in, path_51_100, 0, reader_dict_root)
    # process_le_enum(51, 100, 100, mapping_no_dot, dict_in, path_51_100, 0, reader_dict_root_no_dot)
    #
    # # 300条
    # path_101_150 = out_path + '_path_101_150'
    # process_le_enum(101, 150, 150, mapping, dict_in, path_101_150, 0,
    #                 reader_dict_root)
    # process_le_enum(101, 150, 150, mapping_no_dot, dict_in, path_101_150, 0, reader_dict_root_no_dot)
    #
    # # 400条
    # path_151_200 = out_path + '_path_151_200'
    # process_le_enum(151, 200, 200, mapping, dict_in, path_151_200, 0,
    #                 reader_dict_root)
    # process_le_enum(151, 200, 200, mapping_no_dot, dict_in, path_151_200, 0, reader_dict_root_no_dot)
    #
    # # 300条
    # path_201_500 = out_path + '_path_201_500'
    # process_le_enum(201, 500, 150, mapping, dict_in, path_201_500, 0,
    #                 reader_dict_root)
    # process_le_enum(201, 500, 150, mapping_no_dot, dict_in, path_201_500, 0, reader_dict_root_no_dot)

    # 200条
    # path_ge_500 = out_path + '_path_ge_500'
    # process_le_enum(501, 999999, 50, mapping, dict_in, path_ge_500, 0,
    #                 reader_dict_root)

    # fonts = [f for f in os.listdir(font_root) if f.lower().endswith('ttc') or f.lower().endswith('ttf')]
    # for font in fonts:
    #     # for char in dict_in:
    #     for char in ['一']:
    #         font_dict_in = get_dict_from_file(font_root + font + '.txt')
    #         if char in font_dict_in:
    #             print('begin process:{} with font :{}'.format(char, font))
    #             process_char_enum_by_font(font_dict_in, char, font_root + font,
    #                                       'D:/ocr/ocr_resources/reader/dict_10_no_dot/'
    #                                       , 1, 'D:/ocr/ocr_resources/reader/train_data/process_char_enum_by_font', 0)
    # ********************************v3 end*************************#

    endtime = datetime.datetime.now()
    seconds = (endtime - starttime).seconds
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    print("%d:%02d:%02d" % (h, m, s))
