# 判断字符是否为汉字
import json
import math
import multiprocessing
import os
import random
import shutil
from collections import defaultdict

import cv2
import numpy as np
from PIL import Image, ImageFont, ImageDraw
from bs4 import BeautifulSoup
from fontTools.ttLib import TTFont, TTCollection
from loguru import logger

from Ling.common.utils.QB import Q2B
from Ling.common.utils.StringUtils import format_mapping, clear_r_n, isEmpty

root_studio = '/home/aistudio/train_data/'
root_win = 'D:/ocr/ocr_resources/train_data/'

root = 'D:/workspace/python/train_data/'
dictRoot = root + 'train_data_configs/words/'

font_root = root + 'train_data_configs/fonts/chn/'
msyh_font = font_root + "msyh.ttc"
simfang_font = font_root + "simfang.ttf"
simsun_font = font_root + "simsun.ttc"
FZTJLSK_font = font_root + 'FZTJLSK.TTF'


# def is_chinese1(word):
#     if '\u4e00' <= word <= '\u9fff':
#         return True
#     else:
#         return False
#
#
# def is_chinese2(char):
#     try:
#         name = unicodedata.name(char)
#         if 'CJK' in name:
#             return True
#         else:
#             return False
#     except:
#         return False
#
#
# def is_chinese3(word):
#     pattern = re.compile(r'[^\u4e00-\u9fa5]')
#     if pattern.search(word):
#         return False
#     else:
#         return True
#
#
# # 判断字符是否为汉字
# def is_chinese4(word):
#     try:
#         if b'\xb0\xa1' <= word.encode('gb2312') <= b'\xd7\xf9':
#             return True
#         else:
#             return False
#     except:
#         return False
#
#
# # 判断字符是否为汉字
# def is_chinese5(word):
#     pinyin = Pinyin()
#     if pinyin.get_pinyin(word, '').isalpha():
#         return False
#     else:
#         return True
#
#
# def is_chinese(word):
#     return is_chinese1(word) or is_chinese2(word) or is_chinese3(word) or is_chinese4(word) or is_chinese5(word)


def recList(folders, root, root_studio):
    mappings = get_dict_mapping()
    for folder in folders:
        contents = []
        with open(root + folder + '/tmp_labels.txt', 'r', encoding='utf-8') as l:
            lines = l.readlines()
            # print(lines)
            for line in lines:
                fileName = line[0:8]
                fileContent = line[9:len(line)].replace('\n', '').replace('\r', '')
                fileContent = Q2B(fileContent)
                # 去掉stop,全角转半角,mapping等不能用于生成图形的原始文本,只能用在这里
                fileContent = format_mapping(fileContent, mappings)
                # print(fileName)
                # print(fileContent)
                content = root_studio + folder + '/' + fileName + '.jpg\t' + fileContent + '\n'
                # print(content)
                contents.append(content)
        with open(root + folder + ".txt", 'w', encoding='utf-8') as target:
            target.writelines(contents)


def recListChinese(folders, root, root_studio):
    max_length = 0
    for folder in folders:
        contents = []
        fileName = 'tmp_labels.txt'
        path = root + folder + '/' + fileName
        if not os.path.exists(path):
            fileName = 'rec_gt.txt'
        path = root + folder + '/' + fileName
        with open(path, 'r', encoding='utf-8') as l:
            lines = l.readlines()
            # print(lines)
            for line in lines:
                splitIndex1 = line.find('\t')
                fileName = line[0:splitIndex1 + 1]
                fileContent = line[splitIndex1 + 1:len(line)].replace('\n', '').replace('\r', '')
                fileContent = Q2B(fileContent)
                # print(fileName)
                # print(fileContent)
                path = root_studio + folder + '/' + fileName
                win_path = root_win + folder + '/' + fileName
                win_path = win_path.replace('\t', '')
                cha_length = len(fileContent)
                if cha_length > max_length:
                    max_length = cha_length
                if image_use_able(win_path):
                    content = path + fileContent + '\n'
                    # print(content)
                    contents.append(content)
        with open(root + folder + ".txt", 'w', encoding='utf-8') as target:
            target.writelines(contents)
    print(max_length)


def jjList():
    root = root_win
    # folders = ['街景']
    folders = ['train_data_jj']

    for folder in folders:
        contents = []
        with open(root + folder + '/train.list', 'r', encoding='utf-8') as l:
            lines = l.readlines()
            # print(lines)
            for line in lines:
                # print(line)
                splitIndex1 = line.find('\t')
                # print(splitIndex1)
                splitIndex2 = line.find('\t', splitIndex1 + 1)
                # print(splitIndex2)
                splitIndex3 = line.find('\t', splitIndex2 + 1)
                # print(splitIndex3)
                fileName = line[splitIndex2 + 1:splitIndex3]
                fileContent = line[splitIndex3:len(line)].replace('\n', '').replace('\r', '').replace('\t', '')
                fileContent = Q2B(fileContent)
                # print(fileName)
                # print(fileContent)
                content = root_studio + folder + '/train_images/' + fileName + '\t' + fileContent + '\n'
                # print(content)
                contents.append(content)
        with open(root + folder + ".txt", 'w', encoding='utf-8') as target:
            for x in contents:
                target.write(x)


def get_dict_mapping():
    results = defaultdict(str)
    with open(dictRoot + "char_mapping.txt", 'r', encoding='utf-8') as f:
        lines = clear_r_n(f.readlines())
        for line in lines:
            key = line[0]
            if len(line) > 3:
                value = line[3]
                results[key] = value
    return results


def get_file_mapping(file, split):
    results = defaultdict(int)
    with open(file, 'r', encoding='utf-8') as f:
        lines = clear_r_n(f.readlines())
        for line in lines:
            if not isEmpty(line):
                contents = line.split(split)
                key = contents[0]
                value = int(contents[1])
                results[key] = value
    return results


def get_dict_mapping_key():
    result = []
    with open(dictRoot + "char_mapping.txt", 'r', encoding='utf-8') as f1:
        lines = f1.readlines()
        for line in lines:
            result.append(line[0])
    return result


def get_dict_mapping_value():
    results = []
    with open(dictRoot + "char_mapping.txt", 'r', encoding='utf-8') as f:
        lines = clear_r_n(f.readlines())
        for line in lines:
            if len(line) > 3:
                value = line[3]
                results.append(value)
    return results


def winFile(folders):
    root = root_win
    for folder in folders:
        with open(root + folder + '_rec_train.txt', 'r', encoding='utf-8') as ft:
            contents = ft.readlines()
            with open(root + folder + "_rec_train_win.txt", 'w', encoding='utf-8') as ftw:
                for line in contents:
                    line = line.replace(root_studio, root_win)
                    ftw.write(line)
        with open(root + folder + '_rec_test.txt', 'r', encoding='utf-8') as ft:
            contents = ft.readlines()
            with open(root + folder + "_rec_test_win.txt", 'w', encoding='utf-8') as ftw:
                for line in contents:
                    line = line.replace(root_studio, root_win)
                    ftw.write(line)


def spz2temp(fromFolders: [], targetFolder, split_num: 10000):
    for folder in fromFolders:
        contents = []
        with open(folder + 'tmp_labels.txt', 'r', encoding='utf-8') as l:
            lines = l.readlines()
            for line in lines[:split_num]:
                contents.append(line)
                fileName = line[0:8]
                src = folder + fileName + '.jpg'
                dst = targetFolder + fileName + '.jpg'
                shutil.copy(src, dst)
        # 分割数据为训练和验证集
        with open(targetFolder + "tmp_labels.txt", 'w', encoding='utf-8') as f:
            for line in contents:
                f.write(line)


def trainAndTest(folders, total_num: None):
    root = root_win
    for folder in folders:
        with open(root + folder + ".txt", 'r', encoding='utf-8') as f:
            lines = f.readlines()
            total = len(lines)
            train_num = total_num if total_num is not None else total
            test_num = int(train_num * 0.05)
            ran = random.sample(range(0, total), test_num)
        # split_num = total_num - 1000
        # 分割数据为训练和验证集
        with open(root + folder + "_rec_test.txt", 'w', encoding='utf-8') as f:
            for index in ran:
                f.write(lines[index])
        with open(root + folder + "_rec_train.txt", 'w', encoding='utf-8') as f:
            if len(lines) > 500000:
                for pos, line in enumerate(lines):
                    f.write(lines[pos])
            else:
                for pos, line in enumerate(lines):
                    if pos not in ran:
                        f.write(lines[pos])


def randomFolder(fromName, targetName, ramdonNum, testNum: 1000):
    root = root_win
    results = []

    with open(root + fromName + ".txt", 'r', encoding='utf-8') as f:
        lines = f.readlines()
        lineLen = len(lines)
    while len(results) < int(ramdonNum):
        s = random.randint(0, lineLen - 1)
        results.append(lines[s])
        # results = list(set(results)) #保证唯一,但太慢
    with open(root + targetName + "_rec_train.txt", 'w', encoding='utf-8') as target:
        for line in results:
            target.write(line)
    with open(root + targetName + "_rec_test.txt", 'w', encoding='utf-8') as fest:
        tests = []
        while len(tests) < int(testNum):
            s = random.randint(0, ramdonNum - 1)
            tests.append(results[s])
            # tests = list(set(tests)) #保证唯一,但太慢
        for line in tests:
            fest.write(line)
    winFile[targetName]


def trainListFromAll(trainNum, testNum, fromName, targetName):
    root = root_win
    with open(root + fromName + ".txt", 'r', encoding='utf-8') as f:
        lines = f.readlines()
        trainLine = []
        testLine = []
        assert trainNum > testNum, '训练数量必须大于测试数量'
        for line in lines[:trainNum]:
            line = line.replace(root_studio + fromName, root_studio + targetName)
            trainLine.append(line)
        for line in lines[:testNum]:
            line = line.replace(root_studio + fromName, root_studio + targetName)
            testLine.append(line)
        with open(root + targetName + ".txt", 'w', encoding='utf-8') as f:
            for line in trainLine:
                f.write(line)
        with open(root + targetName + "_rec_train.txt", 'w', encoding='utf-8') as f:
            for line in trainLine:
                f.write(line)
        with open(root + targetName + "_rec_test.txt", 'w', encoding='utf-8') as f:
            # lineLen = len(lines)
            # for line in lines[(lineLen - testNum):]: #test内容会在train外,acc过低
            for line in testLine:
                f.write(line)

        winFile([targetName])
        withFile = True
        if withFile:
            fileList = list(set(trainLine))
            # print(fileList)
            rootFolder = root_win + targetName
            if not os.path.exists(rootFolder):
                os.makedirs(rootFolder)
            for line in fileList:
                splitIndex1 = line.find('\t')
                fileName = line[0:splitIndex1].replace(root_studio + targetName + '/', '')
                fromFile = root_win + fromName + '/' + fileName
                toFile = root_win + targetName + '/' + fileName
                if not os.path.exists(toFile):
                    shutil.copyfile(fromFile, toFile)


def maxChar(files):
    max_length = 0
    root = root_win
    for file in files:
        path = root + file
        with open(path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            for line in lines:
                splitIndex1 = line.find('\t')
                fileContent = line[splitIndex1 + 1:len(line)].replace('\n', '')
                char_length = len(fileContent)
                if char_length > max_length:
                    max_length = char_length
                # print(char_length)
    return max_length


def image_use_able(path):
    test_length = 480
    test_height = 48

    img = Image.open(path)
    imgSize = img.size  # 图片的长和宽
    maxSize = max(imgSize)  # 图片的长边
    minSize = min(imgSize)  # 图片的短边

    rate = int(maxSize / minSize)
    # if maxSize > test_length:
    #     print(path)
    # if minSize > test_height:
    #     print(path)
    if (maxSize <= test_length and minSize <= test_height) or rate < 10:
        return True
    return False


def setAndSortList(inList, q2b: False):
    """得到唯一并且排序的字典"""
    temp = []
    for line in inList:
        if line is not None and line != '':
            if (q2b):
                line = Q2B(line)
            temp.append(line)
    itemSet = set(temp)
    list_set = list(itemSet)
    list_set.sort()
    return list_set


def merge_dict_files(mergeFiles, stops: []):
    """文件字典合并"""
    results = []
    for item in mergeFiles:
        with open(item, 'r', encoding='utf-8') as f2:
            news = f2.readlines()
            for new in news:
                results.append(new)
    results = clear_r_n(results)
    return setAndSortList(sub_dict(results, stops), False)


def merge_dict(dicts1: [], dicts2: []):
    """合并两个list,得到唯一并且排序的字典"""
    return setAndSortList(dicts1 + dicts2, False)


def sub_dict(all: [], sub: []):
    """all中去掉sub中出现的char,得到唯一并且排序的字典"""
    results = []
    for result in all:
        if result not in sub:
            results.append(result)
    return setAndSortList(results, False)


def get_dict_from_file(file):
    """file中出现的字符集合"""
    with open(file, 'r', encoding='utf-8') as f:
        contents = f.readlines()
        allText = ''.join(contents)
        chars = set(allText)
        chars.remove('\n')
        return setAndSortList(chars, False)


def strOfFile(file):
    """file中出现的字符集合"""
    with open(file, 'r', encoding='utf-8') as f:
        contents = f.readlines()
        allText = ''.join(contents).replace('\n', '')
        return allText


def get_dict_from_files(files: []):
    results = []
    for file in files:
        dict = get_dict_from_file(file);
        results = merge_dict(results, dict)
    return results


def sub_dictFiles(fileAll: [], fileSub: []):
    dictAll = get_dict_from_files(fileAll)
    dictSub = get_dict_from_files(fileSub)
    results = sub_dict(dictAll, dictSub)
    return results


def sub_dictFile(fileAll, fileSub):
    dictAll = get_dict_from_file(fileAll)
    dictSub = get_dict_from_file(fileSub)
    return sub_dict(dictAll, dictSub)


def process_dict_stop_2_ignore():
    # 不要的填入 stop.txt
    """stop.txt--->ignore.txt"""
    with open(dictRoot + "stop.txt", 'r', encoding='utf-8') as fr:
        stops = clear_r_n(fr.readlines())
        stops = setAndSortList(stops, False)
        with open(dictRoot + "ignore.txt", 'w', encoding='utf-8') as fw:
            for x in stops:
                fw.write(x + "\n")


# def process_dict_mapping_barkup_2_mapping():
#     # 不要的填入 stop.txt
#     """mapping_barkup.txt--->mapping.txt"""
#     with open(dictRoot + "char_mapping_barkup.txt", 'r', encoding='utf-8') as fr:
#         barkup = clear_r_n(fr.readlines())
#         mapping = sub_dict(barkup, get_dict_ignore())
#         mapping = setAndSortList(mapping, False)
#         old_mapping = get_dict_mapping()
#         with open(dictRoot + "char_mapping.txt", 'w', encoding='utf-8') as fw:
#             for x in mapping:
#                 value = old_mapping.get(x)
#                 value = x if value is None else value
#                 value = Q2B(value)
#                 fw.write(x + '->' + value + '\n')


def dictLoss(inFile, allFile, losssOutFile):
    """fromFile-losssFile==>losssOutFile"""
    dictLoss = sub_dictFile(inFile, allFile)
    with open(losssOutFile, 'w', encoding='UTF-8') as f:
        for dict in dictLoss:
            f.write(dict + "\n")
        return dictLoss


def get_dict_ignore():
    with open(dictRoot + "ignore.txt", 'r', encoding='utf-8') as f1:
        stops = clear_r_n(f1.readlines())
        stops.append(' ')
        stops.append('\r')
        stops.append('\n')
        stops.append('\t')
        return stops


def get_dict_dot():
    with open(dictRoot + "font/in_chars_dot.txt", 'r', encoding='utf-8') as f1:
        stops = clear_r_n(f1.readlines())
        stops.append(' ')
        stops.append('\r')
        stops.append('\n')
        stops.append('\t')
        return stops


def get_dict_ignore_and_dot():
    stops = get_dict_ignore()
    dot_chars = get_dict_dot()

    ignore = merge_dict(stops, dot_chars)
    return ignore


def outMergeDict():
    # 因为有字体不支持,这里废弃
    """ 将字典输出"""
    mergeFiles = [
        dictRoot + 'ppocr_keys_v1.txt',
        dictRoot + '3500常用汉字.txt',
        dictRoot + '生僻字.txt',
        dictRoot + 'loss_jj.txt',
        dictRoot + 'zy.txt',
        dictRoot + 'loss.txt',
        dictRoot + 'gs.txt'
    ]  # ,
    stops = get_dict_ignore()
    allDict = merge_dict_files(mergeFiles, [])
    with open(dictRoot + "ppocr_keys_v1.txt", 'r', encoding='utf-8') as f1:
        olds = clear_r_n(f1.readlines())
    list_spz = sub_dict(allDict, olds)

    # 去掉生僻字
    list_spz = sub_dict(list_spz, stops)
    allDict = sub_dict(allDict, stops)
    with open(dictRoot + "result_生僻字.txt", 'w', encoding='utf-8') as c_spz:
        # 缩减字符列表：
        # 全角统一为半角
        # 英文字符统一为小写
        # 中文字符统一为简体
        # 忽略所有空格和符号
        # 经过缩减的字符列表能够从原来4000多个字符降低到现在的3808个字符
        # 对字符列表进行缩减有助于模型更加快速的收敛 但这个缩减是在train,test txt上进行,生成的图片需要包含全角,txt上不能有全角,所以下面的参数应该是 False
        list_spz = setAndSortList(list_spz, False)
        for x in list_spz:
            c_spz.write((x + '\n'))
    with open(dictRoot + "result_all.txt", 'w', encoding='utf-8') as c_all:
        # with open(dictRoot + "ppocr_keys_v1.txt", 'r', encoding='utf-8') as f1:
        #     olds = f1.readlines()
        allDict = setAndSortList(allDict, False)
        for x in allDict:
            c_all.write((x + '\n'))


def merge_into_result_all():
    mapping_values = get_dict_mapping_value()
    dict_in = merge_dict(get_dict_from_file(dictRoot + 'font/in_chars.txt') + get_dict_dot(),
                         mapping_values)
    # dict_in = sub_dict(dict_in, get_dict_ignore())
    dict_in.remove('\n')
    dict_in.remove('\r')
    dict_in.remove('\t')
    with open(dictRoot + "result_all.txt", 'w', encoding='utf-8') as c_all:
        for x in dict_in:
            c_all.write((x + '\n'))
    draw_to_image(msyh_font, ''.join(dict_in), dictRoot + 'result_all.jpg')


def lossDict(fromFiles, currentDict):
    fileAll = fromFiles
    fileSub = currentDict
    loss = sub_dictFiles(fileAll, fileSub)
    return loss


def process_barkup_2_loss():
    # 不要的填入loss_backup
    lossFile = 'D:/workspace/python/train_data/train_data_configs/words/loss.txt'
    loss = lossDict(['D:/ocr/ocr_resouces/icdar2017rctw_train_v1.2/train.txt',
                     'D:/workspace/python/train_data/train_data_configs/words/loss_backup.txt',
                     'D:/workspace/python/train_data/train_data_configs/words/gs.txt'],
                    ['D:/workspace/python/train_data/train_data_configs/words/result_all.txt'])
    stops = get_dict_ignore()
    loss = sub_dict(loss, stops)
    print(loss)
    with open(lossFile, 'w', encoding='UTF-8') as f:
        for dict in loss:
            f.write(dict + "\n")


def getDictFromDictFile(path):
    chars = []
    with open(path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        for line in lines:
            line = line.replace('\n', '')
            chars.append(line)
    return chars


zy_docs = [dictRoot + 'zy/第一辑.txt', dictRoot + 'zy/第二辑.txt',
           dictRoot + 'zy/第三辑.txt', dictRoot + 'zy/第五辑.txt',
           dictRoot + 'zy/第六辑.txt', dictRoot + 'zy/第七辑.txt']


def process_zy_spz():
    # get_char_poses('湃', dictRoot + 'zy/第一辑.txt')

    # getDictFromFile(dictRoot + 'words/zy.txt')

    zyspz = sub_dictFiles(
        zy_docs,
        ['D:/workspace/python/train_data/train_data_configs/words/ppocr_keys_v1.txt'])
    print(zyspz)
    with open(dictRoot + 'zy_生僻字.txt', 'w', encoding='UTF-8') as f:
        for dict in zyspz:
            f.write(dict + "\n")


def process_Chinese_loss():
    # 不要的填入loss_backup
    lossFile = 'D:/workspace/python/train_data/train_data_configs/words/loss_Chinese.txt'
    loss = lossDict(['D:/ocr/ocr_resouces/train_data_version2/Chinese_dataset/labels.txt'],
                    ['D:/workspace/python/train_data/train_data_configs/words/ppocr_keys_v1.txt'])
    stops = get_dict_ignore()
    loss = sub_dict(loss, stops)
    print(loss)
    with open(lossFile, 'w', encoding='UTF-8') as f:
        for dict in loss:
            f.write(dict + "\n")


def write_list_2_dict_file(dicts, file_path):
    with open(file_path, 'w', encoding='UTF-8') as f:
        for dict in dicts:
            f.write(dict + "\n")


def find_loss_dict_from_loss_files_2_target_file(loss_dict_file, loss_files, loss_target_path, word_length):
    zy_spz = getDictFromDictFile(loss_dict_file)
    with open(loss_target_path, 'w', encoding='utf-8') as w:
        for loss_file in loss_files:
            with open(loss_file, 'r', encoding='utf-8') as f:
                lines = f.readlines()
                for line in lines:
                    for pos, char in enumerate(line):
                        for find_char in zy_spz:
                            if char == find_char:
                                word = get_word_of_line(line, pos, word_length)
                                if word != '' and word != '\r' and word != '\n' and word != '\r\n':
                                    word = word.replace('\n', '').replace('\r', '')
                                    word = Q2B(word)
                                    w.write(word + '\n')


def doc_2_char_dict(file, in_char, ignore):
    records = defaultdict(list)  # 初始化
    lines = []
    with open(file, 'r', encoding='utf-8') as f:
        lines_ = f.readlines()
        # 一次性干掉,避免标点等浪费位置,以后通过in_char需要啥,抽取啥
        lines_clear = filter_line_in(lines_, in_char)
        for line_num, line in enumerate(lines_clear):
            lines.append(line)
            for pos, char in enumerate(line):
                if char not in ignore:
                    char_record = records[char]
                    char_record.append((line_num, pos))

    # 按大小排序
    # records = sorted(records.items(), key=lambda d: d[1])
    return records, lines


def doc_2_words(file, max_word_rate, word_length, out_path, in_char, ignore):
    records, lines = doc_2_char_dict(file, in_char, ignore)  # 初始化
    char_words = defaultdict(list)  # 初始化
    for record in records:
        # target.write('<------------------------------' + record + '------------------------------>' + '\n')
        poses = records[record]
        length = len(poses)
        if length > max_word_rate:
            # 随机从所有位置取文本
            ran = random.sample(range(1, length), max_word_rate)
            for i in ran:
                pos = poses[i]
                line_num = pos[0]
                index = pos[1]
                line = lines[line_num]
                text = get_word_of_line(line, index, word_length)
                # 去掉stop,全角转半角,mapping等不能用于生成图形的原始文本
                # text = formart_mapping(text, mappings)
                if text != '':
                    char_record = char_words[record]
                    char_record.append(text)
        else:
            for pos in poses:
                line_num = pos[0]
                index = pos[1]
                line = lines[line_num]
                text = get_word_of_line(line, index, word_length)
                # 去掉stop,全角转半角,mapping等不能用于生成图形的原始文本
                # text = formart_mapping(text, mappings)
                if text != '':
                    char_record = char_words[record]
                    char_record.append(text)
    json_str = json.dumps(char_words, indent=4, ensure_ascii=False, sort_keys=True)
    # print(json_str)
    with open(out_path, 'w', encoding='utf-8') as target:
        target.write(json_str)
    return char_words


def get_word_of_line(line, index, word_length):
    mid = int(word_length / 2)
    ran = random.randint(-mid + 1, mid)  # line[begin:end]的规则需要向右移一位
    # ran = -8
    # ran = 10
    left = index - mid + ran  # 20-10+10
    right = index + mid + ran  # 20+10+10
    text = line[left: right]  # 20~40
    # 去掉标点,标点和字母
    return text


def get_folder_name(path):
    if path.endswith('/'):
        path = path[:len(path) - 1]
    last_split = find_poses(path, '/')
    name = path[last_split + 1:]
    return name


def find_poses(string, str):
    last_position = -1
    while True:
        position = string.find(str, last_position + 1)
        if position == -1:
            return last_position
        last_position = position


def get_txt_from_reader_folders(out_root, folders):
    for folder in folders:
        files = [f for f in os.listdir(folder) if f.find('~') > 0]
        files.sort(key=lambda x: int(x.split('~')[0]))  # 对‘.’进行切片，并取列表的第一个值（左边的文件名）转化整数型

        texts = []

        for file in files:
            file_text = get_txt_from_reader_file(folder + '/' + file)
            texts = texts + file_text

        name = get_folder_name(folder)
        if not os.path.exists(out_root):
            os.makedirs(out_root)
        with open(out_root + name + '_content.txt', 'w', encoding='utf-8') as result:
            for text in texts:
                result.write(text + '\n')
            # result.close()


def get_txt_from_reader_file(path):
    texts = []
    exits = False
    last_content = ''
    if os.path.exists(path):
        exits = True
        with open(path, 'r', encoding='utf-8') as f:
            contents = f.readlines()
            # f.close()
        html = ''.join(contents)
        # print(contents)
        soup = BeautifulSoup(html, 'lxml')
        text = soup.get_text(strip=True)
        if last_content != text:
            last_content = text
            texts.append(text)
    else:
        print('file not exists:' + path)
    return exits, texts


def get_txt_from_reader_file_check(path):
    return os.path.exists(path)


def get_chars_from_folder_txt_file(root):
    files = [f for f in os.listdir(root) if (f != 'char.txt' and f.endswith('.txt'))]
    allContents = []
    for file in files:
        print(file)
        with open(root + '/' + file, 'r', encoding='utf-8') as f:
            contents = f.readlines()
            # f.close()
        text = ''.join(contents)
        allContents.append(text)
    all = ''.join(allContents)
    chars = set(all)
    # print(chars)
    with open(root + '/' + 'char.txt', 'w', encoding='utf-8') as result:
        result.truncate()
        list_ = list(chars)
        list_.sort()
        str = '\n'.join(list_)
        result.write(str)


def process_dict_words_of_file(file_path, words_root, max_word_rate, word_length, in_char, ignore):
    # 缺失的字典
    folder_name = get_folder_name(file_path)
    file_name = folder_name.replace('_content.txt', '')
    print('begin process:', file_name)
    # loss = sub_dictFile(file_path,
    #                    dictRoot + "result_all.txt")
    # loss = sub_dict(loss, get_dict_ignore())
    # loss_path = words_root + file_name + '_loss.txt'
    # write_list_2_dict_file(loss, loss_path)
    # 缺失的字典对应的words
    # loss_word_path = words_root + file_name + '_loss_word.txt'
    # find_loss_dict_from_loss_files_2_target_file(loss_path,
    #                                              [file_path],
    #                                              loss_word_path, word_length)
    # 所有出现的字典和对应的文字
    words_path = words_root + file_name + '_words.txt'
    if not os.path.exists(words_path):
        doc_2_words(file_path, max_word_rate, word_length,
                    words_path, in_char, ignore)
    print('end process:', file_name)


def process_dict_of_file(file_path, words_root, ignore):
    # 缺失的字典
    folder_name = get_folder_name(file_path)
    file_name = folder_name.replace('_content.txt', '')
    print('begin process:', file_name)
    dicts = get_dict_from_file(file_path)
    dicts = sub_dict(dicts, ignore)
    dict_path = words_root + file_name + '_dict.txt'
    write_list_2_dict_file(dicts, dict_path)
    print('end process:', file_name)


# 定义一个带参函数方法，里面设置时分秒，通过计算秒数来获取定时多久
def sleeptime(hour, min, sec):
    return hour * 3600 + min * 60 + sec


def process_dict_words_of_folder(folders, max_word_rate, word_length):
    ignore = get_dict_ignore_and_dot()
    # in_char 判断会过滤掉ignore的内容
    in_char = get_in_char()

    for folder in folders:
        words_root = folder + 'words_' + str(word_length) + '/'
        if not os.path.exists(words_root):
            os.makedirs(words_root)
        files = [f for f in os.listdir(folder) if f.endswith('_content.txt')]

        # 多线程
        # threads = []
        # for pos, file in enumerate(files):
        #     file_path = folder + file
        #     t = threading.Thread(target=process_dict_words_of_file, args=(file_path, words_root, max_word_rate, word_length))
        #     threads.append(t)
        # for thread in threads:
        #     thread.start()

        # 线程池
        # threadPool = ThreadPoolExecutor(max_workers=100, thread_name_prefix="words")
        # for pos, file in enumerate(files):
        #     file_path = folder + file
        #     future = threadPool.submit(process_dict_words_of_file, (file_path, words_root, max_word_rate, word_length))
        # threadPool.shutdown(wait=True)

        # 进程池
        # 维持执行的进程总数为processes，当一个进程执行完毕后会添加新的进程进去
        # 创建Pool池时，如果不指定进程最大数量，默认创建的进程数为系统的内核数量.
        pool = multiprocessing.Pool(processes=8)
        for pos, file in enumerate(files):
            file_path = folder + file
            # 非阻塞式，子进程不影响主进程的执行，会直接运行到 pool.join()
            pool.apply_async(process_dict_words_of_file, (file_path, words_root, max_word_rate,
                                                          word_length, in_char, ignore))
            # 阻塞式，先执行完子进程，再执行主进程
            # pool.apply(func, (msg, ))
        print("begin process")
        # 调用join之前，先调用close函数，否则会出错。
        pool.close()
        # 执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束
        pool.join()
        print("end process")


def process_dict_of_folder(folders):
    ignore = get_dict_ignore_and_dot()
    for folder in folders:
        words_root = folder + 'words_10/'
        if not os.path.exists(words_root):
            os.makedirs(words_root)
        files = [f for f in os.listdir(folder) if f.endswith('_content.txt')
                 # and (f == '12196472南怀瑾著作全集_content.txt'
                 #      or f == '古诗_content.txt')
                 ]
        pool = multiprocessing.Pool()
        for pos, file in enumerate(files):
            file_path = folder + file
            # 非阻塞式，子进程不影响主进程的执行，会直接运行到 pool.join()
            pool.apply_async(process_dict_of_file, (file_path, words_root, ignore))
            # 阻塞式，先执行完子进程，再执行主进程
            # pool.apply(func, (msg, ))
        print("begin process")
        # 调用join之前，先调用close函数，否则会出错。
        pool.close()
        # 执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束
        pool.join()
        print("end process")


def merge_dict_folders(folders, out_path):
    all_dicts = []
    for folder in folders:
        files = [f for f in os.listdir(folder) if f.endswith('_dict.txt')]
        for file in files:
            path = folder + file
            dict = get_dict_from_file(path)
            all_dicts = merge_dict(all_dicts, dict)
    # print(all_dicts)
    all_dicts = list(set(all_dicts))
    all_dicts = sub_dict(all_dicts, get_dict_ignore())
    all_dicts.sort()
    write_list_2_dict_file(all_dicts, out_path)


def get_txt_from_file(path):
    with open(path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        return ''.join(lines)


def get_lines_from_file(path):
    if os.path.exists(path):
        with open(path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            return lines
    else:
        return []


def merge_words_files(folders, out_path):
    char_words = defaultdict(list)

    # 线程池
    # threadPool = ThreadPoolExecutor(max_workers=100, thread_name_prefix="words")
    # futures = []
    # for folder in folders:
    #     files = [f for f in os.listdir(folder) if f.endswith('_words.txt')]
    #     for pos, file in enumerate(files):
    #         path = os.path.join(folder, file)
    #         future = threadPool.submit(merge_word, pos, path, char_words)
    #         futures.append(future)
    #     for future in futures:
    #         future.result()
    # threadPool.shutdown(wait=True)

    # manager = multiprocessing.Manager()
    # char_words = manager.dict(defaultdict(list))
    # pool = multiprocessing.Pool()
    # for folder in folders:
    #     files = [f for f in os.listdir(folder) if f.endswith('_words.txt')]
    #     for pos, file in enumerate(files):
    #         if pos <10:
    #             path = os.path.join(folder, file)
    #             pool.apply_async(merge_word, (pos, path, char_words))
    # print("begin process")
    # # 调用join之前，先调用close函数，否则会出错。
    # pool.close()
    # # 执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束
    # pool.join()
    # print("end process")

    batch_size = 100

    for folder in folders:
        files = [f for f in os.listdir(folder) if f.endswith('_words.txt')]

        current_size = 0
        batch = 0
        for pos, file in enumerate(files):
            print('begin process:', pos, file)
            path = os.path.join(folder, file)
            content = get_txt_from_file(path)
            file_words = json.loads(content)
            for k in file_words:
                for item in file_words[k]:
                    char_words[k].append(item)
            current_size = current_size + 1
            print('end process:', pos, path)
            if current_size == batch_size:
                json_str = json.dumps(char_words, indent=4, ensure_ascii=False, sort_keys=True)
                # print(json_str)
                batch_path = out_path + str(batch)
                with open(batch_path, 'w', encoding='utf-8') as target:
                    target.write(json_str)
                char_words = defaultdict(list)
                current_size = 0
                batch = batch + 1

    json_str = json.dumps(char_words, indent=4, ensure_ascii=False, sort_keys=True)
    # print(json_str)
    with open(out_path, 'w', encoding='utf-8') as target:
        target.write(json_str)


def random_word_form_merge(merge_root, max_size):
    # 效率太低,废弃
    # dicts = get_dict_from_file(dict_root)
    char_words = defaultdict(list)
    files = [f for f in os.listdir(merge_root) if f != 'all_words.txt']

    for pos, file in enumerate(files):
        print('begin process:', pos, file)
        path = os.path.join(merge_root, file)
        content = get_txt_from_file(path)
        file_words = json.loads(content)
        for key in file_words:
            items = file_words[key]
            exits = char_words[key]
            new_items = get_random_items(items + exits, max_size)
            char_words[key] = new_items

    json_str = json.dumps(char_words, indent=4, ensure_ascii=False, sort_keys=True)
    # print(json_str)
    with open(merge_root + 'all_words.txt', 'w', encoding='utf-8') as target:
        target.write(json_str)


def folder_2_single_dict_file(folder_path, dict_root):
    if not os.path.exists(dict_root):
        os.mkdir(dict_root)
    files = [f for f in os.listdir(folder_path) if f != 'all_words.txt' and f.endswith('_words.txt')]

    for pos, file in enumerate(files):
        print('begin process:', pos, file)
        path = os.path.join(folder_path, file)
        file_2_single_dict_file(path, dict_root)


def file_2_single_dict_file(file_path, dict_root):
    content = get_txt_from_file(file_path)
    file_words = json.loads(content)
    for key in file_words:
        dict_path = os.path.join(dict_root, key) + '.txt'
        with open(dict_path, 'a', encoding='utf-8') as target:
            for line in file_words[key]:
                if line != '' and line != '\r\n' and line != '\r' and line != '\n':
                    target.write(line + '\n')


def get_dict_words_from_folder(dicts, dict_folder, max_size, out_path):
    # dicts.remove('\n')
    # all_line = []
    with open(out_path, 'w', encoding='utf-8') as target:
        for pos, dict in enumerate(dicts):
            # print("begin process:" + str(pos))
            dict_path = dict_folder + dict + '.txt'
            if os.path.exists(dict_path):
                lines = get_lines_from_file(dict_path)
                max_size_ = len(lines) if len(lines) < max_size else max_size
                words = get_random_items(lines, max_size_)
                # all_line = all_line + words
                for line in words:
                    target.write(line)


def filter_line_out(out_path, lines, out_dict):
    result = []
    for line in lines:
        for char in out_dict:
            line = line.replace(char, '')
            clear_r_n(line)
            line = line.replace('\t', '')
        result.append(line)
    return (out_path, result)


def filter_line_in(text, in_dict):
    total_count = 0
    filtered_count = 0

    # TODO: find a more efficient way
    filtered_chars = []
    if isinstance(text, list):
        out = []
        for t in text:
            _text = ""
            for c in t:
                if c in in_dict:
                    _text += c
                else:
                    filtered_count += 1
                    filtered_chars.append(c)
                total_count += 1
            if not isEmpty(_text):
                out.append(_text)
    else:
        out = ""
        for c in text:
            if c in in_dict:
                out += c
            else:
                filtered_count += 1
                filtered_chars.append(c)
            total_count += 1
    # logger.info(
    #     f"Filter {(filtered_count / total_count) * 100:.2f}%({filtered_count}) chars in input text。"
    #     f"Unique chars({len(set(filtered_chars))}): {set(filtered_chars)}"
    # )
    return out


# @synchronized
def white_line_2_file(res):
    out_path, result = res
    with open(out_path, 'a', encoding='utf-8') as target:
        for line in result:
            if line != '' and line != '\n':
                target.write(line)


def clear_not_in_char_of_file(file, dict_in, out_path):
    # 这里不能用out ,因为 file中的字符集肯定是大于out的,只能用一个自能在的集合
    # not_in_char = get_dict_from_file(not_in_dict_file)
    lines = get_lines_from_file(file)
    # lines = ['file中的字符集肯定是大于out的', '只能用一个自能在的集合']
    #
    file_char = get_dict_from_file(file)
    dict_filter = sub_dict(file_char, dict_in)
    # print(dict_filter)
    batch = []
    pool = multiprocessing.Pool()
    if os.path.exists(out_path):
        os.remove(out_path)
    for pos, line in enumerate(lines):
        if len(batch) == 1000:
            pool.apply_async(filter_line_out, (out_path, batch, dict_filter), callback=white_line_2_file)
            batch = []
        else:
            batch.append(line)
    # 最后一轮不足1000
    pool.apply_async(filter_line_out, (out_path, batch, dict_filter), callback=white_line_2_file)
    pool.close()
    pool.join()
    print(1)


def get_random_items(items, max_size):
    items_size = len(items)
    if items_size > max_size:
        # 所以当数量较少的时候，random.sample() 用时非常少，
        # 而numpy.random.choice()则很长；当抽样数量很大的时候，numpy.random.choice()几乎不变，而random.sample() 用时变长
        # 变化点在2万
        ran = random.sample(range(1, items_size), max_size)
        result = []
        for i in ran:
            result.append(items[i].replace('\n', ''))
        return result
    else:
        temp = []
        for item in items:
            temp.append(item.replace('\n', ''))
        return temp


def filter_dict_words(word_path, in_dict, out_dict, out_path):
    # 原用于从all.json中抽出需要的字典内容,现在已经生成字典文件了,这里也用不上了
    dicts = get_dict_from_file(in_dict)
    content = get_txt_from_file(word_path)
    file_words = json.loads(content)

    char_words = defaultdict(list)
    for pos, key in enumerate(file_words):
        print('begin process:', pos)
        if key in dicts:
            lines = file_words[key]
            # new_lines = []
            # for line in lines:
            #     line = filter_loss_dict(line, dicts_loss)
            #     new_lines.append(line)
            char_words[key] = lines
        else:
            print(key)
    json_str = json.dumps(char_words, indent=4, ensure_ascii=False, sort_keys=True)
    # print(json_str)
    with open(out_path, 'w', encoding='utf-8') as target:
        target.write(json_str)


def dict_words_2_doc(word_path, out_path):
    content = get_txt_from_file(word_path)
    file_words = json.loads(content)
    all_words = []
    for key in file_words:
        all_words = all_words + file_words[key]
    with open(out_path, 'w', encoding='utf-8') as target:
        for line in all_words:
            target.write(line + '\n')


def clean_out_char():
    print(1)


def merge_word(pos, path, char_words):
    print('begin process:', pos, path)
    content = get_txt_from_file(path)
    file_words = json.loads(content)
    for k in file_words:
        for item in file_words[k]:
            char_words[k].append(item)
    print('end process:', pos, path)


def remove_loss_dict_files(folders):
    for folder in folders:
        files = [f for f in os.listdir(folder) if
                 f.endswith('_loss.txt') or f.endswith('_loss_word.txt')]
        for file in files:
            os.remove(folder + file)


def draw_to_image(fontPath, text, out_path):
    text = text.replace('\n', '')
    font_size = 33
    max_line_text_length = 35
    img_line_height = 42
    img_char_width = 34.4
    length = len(text)
    # if length > max_length:
    batch = math.ceil(length / max_line_text_length)

    size = (img_line_height * batch, math.ceil(max_line_text_length * img_char_width))
    background = np.ones(size) * 255
    font = ImageFont.truetype(fontPath, font_size, encoding="utf-8")
    img = Image.fromarray(background)
    draw = ImageDraw.Draw(img)

    for index in range(0, batch):
        index_begin = index * max_line_text_length
        index_end = (index + 1) * max_line_text_length if (index + 1) * max_line_text_length < length else length
        line = text[index_begin:index_end]
        # print(line)
        draw.text((1, 1 + index * img_line_height), line, font=font, fill='black')
    img = np.array(img)
    # print(out_path)
    cv2.imwrite(out_path, img, [int(cv2.IMWRITE_JPEG_QUALITY), 95])


def draw_large_text_to_image(fontPath, text, out_path):
    length = len(text)
    max_length = 50000
    batch = math.ceil(length / max_length)
    index = 0
    while index < batch:
        index_path = out_path.replace('.jpg', str(index) + '.jpg')
        draw_to_image(fontPath, text[index * max_length:(index + 1) * max_length],
                      index_path)
        index = index + 1


def is_char_in_font(font, glyph):
    for pos, table in enumerate(font['cmap'].tables):
        if ord(glyph) in table.cmap.keys():
            return True
    return False


def check_font_chars(font_path, text):
    font = TTFont(font_path, fontNumber=0)
    in_chars = []
    out_chars = []
    for char in text:
        result = is_char_in_font(font, char)
        if result:
            in_chars.append(char)
        else:
            out_chars.append(char)

    return in_chars, out_chars


def get_font_chars(font_path):
    chars_int = set()
    try:
        ttf = _load_ttfont(font_path)
        for table in ttf["cmap"].tables:
            for k, v in table.cmap.items():
                chars_int.add(k)
        ttf.close()
    except Exception as e:
        logger.error(f"Load font file {font_path} failed, skip it. Error: {e}")
    supported_chars = set([chr(c_int) for c_int in chars_int])
    return supported_chars


def _load_ttfont(font_path: str) -> TTFont:
    """
    Read ttc, ttf, otf font file, return a TTFont object
    """

    # ttc is collection of ttf
    if font_path.endswith("ttc"):
        ttc = TTCollection(font_path)
        # assume all ttfs in ttc file have same supported chars
        return ttc.fonts[0]
    if (
            font_path.endswith("ttf")
            or font_path.endswith("TTF")
            or font_path.endswith("otf")
    ):
        ttf = TTFont(
            font_path, 0, allowVID=0, ignoreDecompileErrors=True, fontNumber=-1
        )

        return ttf


def check_font_files(font_path, files):
    in_chars = []
    out_chars = []
    for file in files:
        text = strOfFile(file)
        in_chars_, out_chars_ = check_font_chars(font_path, text)
        in_chars = in_chars + in_chars_
        out_chars = out_chars + out_chars_

    in_chars = list(set(in_chars))
    out_chars = list(set(out_chars))
    in_chars.sort()
    out_chars.sort()
    return in_chars, out_chars

    # def char_in_font(char: str, fontfile: str) -> bool:


#     """判断字符是否在字体里
#
#     Args:
#         char (str): 单字符文本
#         fontfile (str): 字体文件
#
#     Returns:
#         bool: 是否在字体里
#     """
#     code = char.encode("unicode-escape").decode()
#     if "\\u" in code:
#         code = "uni" + code[2:].upper()
#     font = TTFont(fontfile)
#     glyf = font["glyf"]
#     if not glyf.has_key(code):
#         return False
#     return len(glyf[code].getCoordinates(0)[0]) > 0
#
#     font = TTFont(msyh_font)
#     unicode_map = font['cmap'].tables[0].ttFont.getBestCmap()
#     glyf_map = font['glyf']
#     words = '一二龍三四'
#     for word in words:
#         if ord(word) in unicode_map and len(glyf_map[unicode_map[ord(word)]].getCoordinates(0)[0]) > 0:
#             print(f'字体库中有：【{word}】这个汉字')
#             continue
#         print(f'字体库没有：【{word}】这个汉字')


def check_all_font_of_folder(font_folder, text, out_root):
    if isinstance(text, str):
        text = text.replace('\n', '')
    # 检查window font下最能写的字体
    files = [f for f in os.listdir(font_folder)]
    for file in files:
        if file.lower().endswith('ttc') or file.lower().endswith('ttf'):
            out_path = out_root + file + '.txt'
            print('begin process:{}'.format(file))
            if not os.path.exists(out_path):
                font_path = font_folder + file
                in_chars, out_chars = check_font_chars(font_path, text)
                # print(file, len(text), len(in_chars), '/', len(out_chars))
                if len(in_chars) > 100:
                    setAndSortList(in_chars, False)
                    with open(out_path, 'w', encoding='utf-8') as target:
                        target.write('\n'.join(in_chars))
                    src = font_path
                    dst = out_root + file
                    if not os.path.exists(dst):
                        shutil.copy(src, dst)
        elif os.path.isdir(font_folder + file):
            check_all_font_of_folder(font_folder + file + '/', text, out_root)


def gen_all_font_of_folder(font_folder, text, out_root, ow=False):
    if isinstance(text, str):
        text = text.replace('\n', '')
    # 检查window font下最能写的字体
    files = [f for f in os.listdir(font_folder)]
    for file in files:
        if file.lower().endswith('ttc') or file.lower().endswith('ttf'):
            out_path = out_root + file + '.txt'
            print('begin process:{}'.format(file))
            if not os.path.exists(out_path) or ow:
                font_path = font_folder + file
                # in_chars = get_font_chars(font_path)
                in_chars, out_chars = check_font_chars(font_path, text)
                if len(in_chars) > 6000:
                    s = ''.join(in_chars).encode('utf-8', 'ignore').decode('utf-8')
                    # s = re.sub('[^\u4e00-\u9fa5]+', '', s)
                    # print(file, len(text), len(in_chars), '/', len(out_chars))
                    s = setAndSortList(s, False)
                    if len(s) > 16660 and file not in ['by-鹿亡玫瑰海.ttf', '【晚歌】夏天的风.ttf',
                                                       'Dreamofgirl-【优优】美人鱼.ttf', 'Ev - YunYou-钟仁是学霸.ttf',
                                                       'HanziPen SC-我点我点我点.ttf', 'Heiti SC-爱心中文字体.ttf',
                                                       'HYXiaoKangF-彩虹糖.ttf', 'J007-日文毛笔.ttf',
                                                       'J015-hkgyokk.ttf', 'J024-バジョカ廉書体.ttf', 'J025-ＴＡ雫０１.TTF',
                                                       'J026-ＴＡなすび０１.TTF', 'J028-大髭113.ttf', 'J028-大髭115.ttf',
                                                       'J046-懐風体.TTF', 'SetoFont【墨墨】念汐.ttf', 'STHeiti8.ttc',
                                                       'YueYuan  Belle【阿萌】倾城.ttf', '爱心中文智能手机专用字体.TTF',
                                                       '白舟草书.TTF', '白舟古印体.TTF', '白舟角崩白.TTF',
                                                       '白舟鯨海酔侯書体.ttf', '白舟隶书.TTF',
                                                       '白舟行书.TTF', '超世纪粗方篆.TTF',
                                                       '方正悠黑简体.TTF',
                                                       # 不支持 㐶
                                                       '方正悠黑系列_503L.ttf',
                                                       '方正悠黑系列_504L.ttf',
                                                       '方正悠黑系列_506L.ttf',
                                                       '方正悠黑系列_508R.ttf',
                                                       '方正悠黑系列_509R.ttf',
                                                       '方正悠黑系列_510M.ttf',
                                                       '方正悠黑系列_511M.ttf',
                                                       '方正悠黑系列_512B.ttf',
                                                       '方正悠黑系列_513B.ttf',
                                                       ] and not file.find(
                        '白舟') >= 0:
                        with open(out_path, 'w', encoding='utf-8') as target:
                            for char in s:
                                target.write(char + '\n')
                                # if is_chinese(char):
                                #     target.write(char + '\n')
                                # else:
                                #     print(char)
                        src = font_path
                        dst = out_root + file
                        if not os.path.exists(dst):
                            shutil.copy(src, dst)
        elif os.path.isdir(font_folder + file):
            gen_all_font_of_folder(font_folder + file + '/', out_root)


def parse_text_render_labels_json(folders, root, root_studio):
    mappings = get_dict_mapping()

    for folder in folders:
        file_path = root + folder + '/labels.json'
        content = get_txt_from_file(file_path)
        text_render = json.loads(content)
        labels = text_render['labels']
        folder_name = get_folder_name(folder)
        out_path = root + folder + '.txt'
        with open(out_path, 'w', encoding='utf-8') as target:
            for key in labels:
                # "000000000": "辜挨打。这是一起规模罕见的医患冲突，持有"
                value = labels[key]
                value = format_mapping(value, mappings)
                line = root_studio + folder_name + '/images/' + str(key) + '.jpg\t' + value + '\n'
                target.write(line)


def parse_text_render_labels_txt(folders, root, root_studio):
    mappings = get_dict_mapping()

    for folder in folders:
        file_path = root + folder + '/labels_split.txt'
        if not os.path.exists(file_path):
            file_path = root + folder + '/rec_gt.txt'
        if not os.path.exists(file_path):
            file_path = root + folder + '/labels.txt'
        lines = get_lines_from_file(file_path)

        folder_name = get_folder_name(folder)
        out_path = root + folder + '.txt'
        with open(out_path, 'w', encoding='utf-8') as target:
            for line in lines:
                labels = line.split('\t')
                key = labels[0]
                value = labels[1]
                value = format_mapping(value, mappings)
                name = str(key)
                line = root_studio + folder_name + '/' + name + (
                    '' if name.endswith('.jpg') or name.endswith('.png') else '.jpg') + '\t' + value + (
                           '\n' if not value.endswith('\n') else '')
                target.write(line)


def get_char_and_line_num(root, out_path):
    path_list = os.listdir(root)
    mapping = defaultdict(int)
    for name in path_list:
        char_ = name[0: 1]
        lines = get_lines_from_file(root + name)
        mapping[char_] = len(lines)
    mapping = sort_mapping_by_value(mapping)
    with open(out_path, 'w', encoding='utf-8') as f:
        for key in mapping:
            f.write(key + '\t' + str(mapping[key]) + '\n')


def sort_mapping_by_key(mapping):
    mapping_list = sorted(mapping.items(), key=lambda d: d[0])
    mapping.clear()
    for item in mapping_list:
        mapping[item[0]] = item[1]
    return mapping


def sort_mapping_by_value(mapping):
    mapping_list = sorted(mapping.items(), key=lambda d: d[1])
    mapping.clear()
    for item in mapping_list:
        mapping[item[0]] = item[1]
    return mapping


def sort_char_and_line_num(path, out_path):
    mapping = get_file_mapping(path, '\t')
    mapping = sort_mapping_by_value(mapping)
    with open(out_path, 'w', encoding='utf-8') as f:
        for key in mapping:
            f.write(key + '\t' + str(mapping[key]) + '\n')


def get_char_num_mapping(path, begin, end, out_path, filter_dict):
    mapping = get_file_mapping(path, '\t')
    with open(out_path, 'w', encoding='utf-8') as f:
        for key in mapping:
            if filter_dict and len(filter_dict) > 0:
                if key in filter_dict:
                    value = mapping[key]
                    if value >= begin and value <= end:
                        f.write(key + '\t' + str(value) + '\n')
            else:
                value = mapping[key]
                if value >= begin and value <= end:
                    f.write(key + '\t' + str(value) + '\n')


def labels_error(root, folders):
    # 比对labels文件,去掉丢失的图像信息
    for folder in folders:
        file_path = root + folder + '/labels.txt'
        lines = get_lines_from_file(file_path)
        target_root = root + folder + '/images_barkup/'
        if not os.path.exists(target_root):
            os.mkdir(target_root)
        for line in lines:
            labels = line.split('\t')
            key = labels[0]
            source = root + folder + '/images/' + key + '.jpg'
            if os.path.exists(source):
                target = target_root + key + '.jpg'
                shutil.move(source, target)
            else:
                print(source)


def labels_split(root, folders, spilt_num, start_index=0):
    # 一个文件夹太大了,按spilt_num分成多个文件夹
    for folder in folders:
        file_path = root + folder + '/labels.txt'
        lines = get_lines_from_file(file_path)
        length = len(lines)
        batch = int(length / spilt_num) + 1

        # new_lines = []
        with open(root + folder + '/labels_split.txt', 'w', encoding='utf-8') as f:
            for i in range(0, batch):
                img_index = start_index + i
                target_root = root + folder + '/images_' + str(img_index) + '/'
                if not os.path.exists(target_root):
                    os.mkdir(target_root)
                for j in range(0, spilt_num):
                    index = i * spilt_num + j
                    if index < length:
                        line = lines[index]
                        labels = line.split('\t')
                        key = labels[0]
                        new_line = 'images_' + str(img_index) + '/' + line
                        source = root + folder + '/images/' + key + '.jpg'
                        if os.path.exists(source):
                            target = target_root + key + '.jpg'
                            shutil.move(source, target)
                            f.write(new_line)
                        else:
                            print(source)


def get_in_char():
    in_char = get_dict_from_file(dictRoot + 'font/in_chars.txt')
    return in_char


def get_char_in():
    # 这里保证下一步出现的char都在字体中能正常显示
    mapping_values = get_dict_mapping_value()
    # 清理掉不在字体库支持的文字
    dict_in = merge_dict(get_in_char() + get_dict_dot(),
                         mapping_values)
    dict_in.remove('\r')
    dict_in.remove('\n')
    dict_in.remove('\t')
    return dict_in


def parse_char_num_mapping_of_file(file_path, out_path):
    char_num_mapping = defaultdict(int)
    with open(file_path, 'r', encoding='utf-8') as l:
        lines = l.readlines()
        # print(lines)
        for line in lines:
            for char in line:
                char_num_mapping[char] = char_num_mapping[char] + 1
    sort_mapping_by_value(char_num_mapping)
    with open(out_path, 'w', encoding='utf-8') as f:
        for key in char_num_mapping:
            f.write(key + '\t' + str(char_num_mapping[key]) + '\n')

# draw_large_text_to_image('D:/ocr/ocr_resources/reader/fonts/Deng.ttf', get_txt_from_file(
#     'D:/workspace/python/train_data/train_data_configs/words/font/in_chars_dot.txt'),
#                          'D:/ocr/ocr_resources/reader/fonts/Deng.ttf.jpg')
