import argparse
import multiprocessing as mp
import os
from multiprocessing.context import Process
from pathlib import Path

import cv2
import time
from loguru import logger

from Ling.common.utils.OcrUtils import get_file_mapping, get_lines_from_file, get_dict_mapping_value, \
    get_dict_from_file, merge_dict, get_dict_dot, filter_line_in
from text_renderer.config import get_cfg, GeneratorCfg
from text_renderer.dataset import LmdbDataset, ImgDataset
from text_renderer.render import Render

cv2.setNumThreads(1)

STOP_TOKEN = "kill"

# each child process will initialize Render in process_setup
render: Render


class DBWriterProcess(Process):
    def __init__(
            self,
            dataset_cls,
            data_queue,
            generator_cfg: GeneratorCfg,
            log_period: float = 1,
    ):
        super().__init__()
        self.dataset_cls = dataset_cls
        self.data_queue = data_queue
        self.generator_cfg = generator_cfg
        self.log_period = log_period

    def run(self):
        num_image = self.generator_cfg.num_image
        save_dir = self.generator_cfg.save_dir
        log_period = max(1, int(self.log_period / 100 * num_image))
        try:
            with self.dataset_cls(str(save_dir)) as db:
                exist_count = db.read_count()
                count = 0
                logger.info(f"Exist image count in {save_dir}: {exist_count}")
                start = time.time()
                while True:
                    m = self.data_queue.get()
                    if m == STOP_TOKEN:
                        logger.info("DBWriterProcess receive stop token")
                        break

                    name = "{:09d}".format(exist_count + count)
                    db.write(name, m["image"], m["label"])
                    count += 1
                    if count % log_period == 0:
                        logger.info(
                            f"{(count / num_image) * 100:.2f}%({count}/{num_image}) {log_period / (time.time() - start + 1e-8):.1f} img/s"
                        )
                        start = time.time()
                db.write_count(count + exist_count)
                logger.info(f"{(count / num_image) * 100:.2f}%({count}/{num_image})")
                logger.info(f"Finish generate: {count}. Total: {exist_count + count}")
        except Exception as e:
            logger.exception("DBWriterProcess error")
            raise e


def generate_img(data_queue):
    data = render()
    if data is not None:
        data_queue.put({"image": data[0], "label": data[1]})


def process_setup(*args):
    global render
    import numpy as np

    # Make sure different process has different random seed
    np.random.seed()

    render = Render(args[0])
    logger.info(f"Finish setup image generate process: {os.getpid()}")


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", required=False, default='example_data/ling.py', help="python file path")
    parser.add_argument("--dataset", default="img", choices=["lmdb", "img"])
    parser.add_argument("--num_processes", type=int, default=7)  # 总8个 留一个干其他事情
    parser.add_argument("--log_period", type=float, default=10)
    return parser.parse_args()


def process_img(generator_cfgs):
    for generator_cfg in generator_cfgs:
        db_writer_process = DBWriterProcess(
            dataset_cls, data_queue, generator_cfg, args.log_period
        )
        db_writer_process.start()

        if args.num_processes == 0:
            process_setup(generator_cfg.render_cfg)
            for _ in range(generator_cfg.num_image):
                generate_img(data_queue)
            data_queue.put(STOP_TOKEN)
            db_writer_process.join()
        else:
            with mp.Pool(
                    processes=args.num_processes,
                    initializer=process_setup,
                    initargs=(generator_cfg.render_cfg,),
            ) as pool:
                for _ in range(generator_cfg.num_image):
                    pool.apply_async(generate_img, args=(data_queue,))

                pool.close()
                pool.join()

            data_queue.put(STOP_TOKEN)
            db_writer_process.join()


# **********************************begin customer****************************#

root = 'D:/workspace/python/train_data/'
dictRoot = root + 'train_data_configs/words/'
reader_root = 'D:/ocr/ocr_resources/reader/'
dict_root = 'D:/ocr/ocr_resources/reader/dict/'


def process_by_type(category):
    if 'enum' == category:
        process_enum('config/enum.py')
    else:
        raise ValueError("支持类型为:enum")


def get_char_in():
    # 这里保证下一步出现的char都在字体中能正常显示
    mapping_values = get_dict_mapping_value()
    # 清理掉不在字体库支持的文字
    dict_in = merge_dict(get_dict_from_file(dictRoot + 'font/in_chars.txt') + get_dict_dot(),
                         mapping_values)
    dict_in.remove('\r')
    dict_in.remove('\n')
    dict_in.remove('\t')
    return dict_in


def process_le_enum(le_num, num_image, mapping, dict_in, out_path):
    all_chars = []
    all_lines = []
    for key in mapping:
        value = mapping[key]
        if key in dict_in and value <= le_num:
            lines = process_char_enum(key, num_image, out_path)
            all_lines = all_lines + lines
            all_chars.append(key)
    return all_chars, all_lines


def process_char_enum(char, num_image, out_path):
    lines = get_lines_from_file(dict_root + char + '.txt')
    lines = filter_line_in(lines, dict_in)
    process_enum('config/enum.py', num_image, lines, out_path)
    return lines


def process_le_chars_rand(le_num, num_image, out_path):
    mapping = get_file_mapping('D:/ocr/ocr_resources/reader/all_words/char_num_sort.txt', '\t')
    dict_in = get_char_in()
    all_chars = []
    for key in mapping:
        value = mapping[key]
        if key in dict_in and value <= le_num:
            all_chars.append(key)
    process_rand('config/rand.py', all_chars, (15, 20), num_image, out_path)


def process_chars_rand(all_chars, num_image, out_path):
    process_rand('config/rand.py', all_chars, (15, 20), num_image, out_path)


def chn_test(text, num_image, out_path):
    process_chn('config/chn.py', text, (15, 20), num_image, out_path)


def process_enum(config, num_image, lines, out_path):
    generator_cfgs = get_cfg(config)
    cfg = generator_cfgs[0]

    cfg.num_image = num_image
    cfg.save_dir = Path(out_path)  # 中文会导致保存不了,待查

    render_cfg = cfg.render_cfg
    corpus = render_cfg.corpus
    corpu = corpus[0]

    corpus_cfg = corpu.cfg

    corpus_cfg.text_paths = []
    # 设置items2选1
    # corpus_cfg.items = result
    # corpu.load_text()
    corpu.texts = lines

    process_img(generator_cfgs)


def process_rand(config, all_chars, char_length, num_image, out_path):
    generator_cfgs = get_cfg(config)
    cfg = generator_cfgs[0]

    cfg.num_image = num_image

    cfg.save_dir = Path(out_path)  # 中文会导致保存不了,待查

    render_cfg = cfg.render_cfg
    corpus = render_cfg.corpus
    corpu = corpus[0]

    corpu.cfg.length = char_length

    # corpu.cfg.chars_file = Path(chars_file)
    # 设置chars2选1
    corpu.cfg.chars_file = None
    corpu.chars = all_chars

    process_img(generator_cfgs)


def process_chn(config, text, char_length, num_image, out_path):
    generator_cfgs = get_cfg(config)
    cfg = generator_cfgs[0]

    cfg.num_image = num_image

    cfg.save_dir = Path(out_path)  # 中文会导致保存不了,待查

    render_cfg = cfg.render_cfg
    corpus = render_cfg.corpus
    corpu = corpus[0]

    corpu.cfg.length = char_length

    # corpu.cfg.chars_file = Path(chars_file)
    # 设置chars2选1
    corpu.cfg.chars_file = None
    dict_in = get_char_in()
    text = filter_line_in(text, dict_in)
    corpu.text = text

    process_img(generator_cfgs)


# **********************************end customer****************************#

if __name__ == "__main__":
    mp.set_start_method("spawn", force=True)
    manager = mp.Manager()
    data_queue = manager.Queue()
    args = parse_args()

    dataset_cls = LmdbDataset if args.dataset == "lmdb" else ImgDataset

    out_path = 'D:/ocr/ocr_resources/reader/train_data/train_data_zy650w_char'
    mapping = get_file_mapping('D:/ocr/ocr_resources/reader/all_words/char_num_sort.txt', '\t')
    dict_in = get_char_in()
    # 100以下,enum 100, rand 100
    # 100~200 enum_num=len(lines) rand=200-enum_num
    # >200 enum=200 rand=total*10

    # 100 以下
    # all_chars_le_100, all_lines = process_le_enum(100, 100, mapping, dict_in, out_path + '_le_100')
    # char_show_num = len(all_chars_le_100) * 100
    # total_rand_img_num_le_100 = int(char_show_num / 20)  # enum已经100行*字了,rand只需要100行*字 每行已经生成了20次
    # process_chars_rand(all_chars_le_100, total_rand_img_num_le_100, mapping)

    # # 100~200
    # total_line_num_101_200 = 0
    # all_chars_101_200 = []
    # for key in mapping:
    #     value = mapping[key]
    #     if key in dict_in and 200 >= value > 100:
    #         all_chars_101_200.append(key)
    #         lines = process_char_enum(key, value, out_path + '_101_200')
    #         total_line_num = total_line_num_101_200 + value
    # char_show_num_101_200 = len(
    #     all_chars_101_200) * 200 - total_line_num_101_200  # enum已经value行*字了,rand只需要200行*字-total_line_num 每行已经生成了20次
    # total_rand_img_num_101_200 = int(char_show_num_101_200 / 20)
    # process_chars_rand(all_chars_101_200, total_rand_img_num_101_200, out_path + '_101_200')
    # # >200
    # all_chars_ge_200 = []
    # for key in mapping:
    #     value = mapping[key]
    #     if key in dict_in and value > 200:
    #         all_chars_ge_200.append(key)
    #         process_char_enum(key, 190, out_path + '_ge_200')
    #
    # char_show_num_ge_200 = len(
    #     all_chars_ge_200) * 10  # enum已经value行*字了,rand只需要200行*字-total_line_num 每行已经生成了20次
    # total_rand_img_num_ge_200 = int(char_show_num_ge_200 / 20)
    # process_chars_rand(all_chars_ge_200, total_rand_img_num_ge_200, out_path + '_ge_200')





# text = get_lines_from_file('D:/ocr/ocr_resources/reader/all_words/chn_demo.txt')
# text_ = ''.join(text)
# chn_test(text_, 10, out_path)
