# process_Chinese_loss()
# genImage()
# dictStop()
# outMergeDict()
# dictOfFile('D:/ocr/ocr_resouces/train_data/train_data_jj.txt')
# dictLoss = dictLoss('D:/ocr/ocr_resouces/train_data/train_data_jj.txt',
#                     'D:/workspace/python/train_data/train_data_configs/words/result_all.txt',
#                     'D:/workspace/python/train_data/train_data_configs/words/loss_jj.txt')
# print(dictLoss)
# process_barkup_2_loss()
#
# process_zy_spz()

# for file in zy_docs:
#     get_zy_spz_docs(file)

# ppocr_keys_v1 比 Chinese_dataset 少 '哐', '忸', '怩', '怵', '懑', '揄', '揶', '搡', '犒', '诌', '趄', '趔', '趿', '蹚'
# loss = subDictFile('D:/ocr/ocr_resouces/Chinese_dataset/labels.txt',
#                    dictRoot + "ppocr_keys_v1.txt"
#                    )

# Chinese_dataset 比 ppocr_keys_v1 少 4060 也没有太多参考作用
# loss = subDictFile(dictRoot + "ppocr_keys_v1.txt",
#                    'D:/ocr/ocr_resouces/Chinese_dataset/labels.txt'
#                    )

# Chinese_dataset 包含在 result_all.txt中
# loss = subDictFile('D:/ocr/ocr_resouces/Chinese_dataset/labels.txt',
#                    dictRoot + "result_all.txt"
#                    )

# result_all 比 Chinese_dataset 多16130
# loss = subDictFile(dictRoot + "result_all.txt",
#                    'D:/ocr/ocr_resouces/Chinese_dataset/labels.txt'
#                    )

# 2579
# dict = dictOfFile('D:/ocr/ocr_resouces/Chinese_dataset/labels.txt')
# print(len(dict))

# 语料库覆盖了ppocr_keys_v1
# loss = subDictFile(dictRoot + "ppocr_keys_v1.txt",
#                    'D:/ocr/ocr_resouces/train_data/train_data_cyz7w/tmp_labels.txt',
#                    )

# train_data_zy20w比ppocr_keys_v1少 2115 zy20w 无作用
# loss = subDictFile(
#     dictRoot + "ppocr_keys_v1.txt", 'D:/ocr/ocr_resouces/train_data_version2/train_data_zy20w/tmp_labels.txt'
# )

# 少54 ['乇', '佇', '偝', '偞', '傎', '凥', '厣', '唺', '啌', '嘏', '夤', '屦', '廻', '恲', '挢', '挶', '摠', '晻', '棲', '槨', '欻', '湲', '潲', '犘', '猊', '猋', '皁', '矒', '磄', '稺', '篘', '籦', '罾', '羷', '肐', '胹', '膫', '茒', '蔌', '褫', '覯', '觕', '豈', '跡', '蹀', '躞', '邶', '鞶', '颒', '餈', '騐', '魒', '鮝', '鰂']
# loss = subDictFile(
#     dictRoot + "zy_生僻字.txt", 'D:/ocr/ocr_resouces/train_data/train_data_zywbspz20w/tmp_labels.txt'
# )

# 少2572
# loss = subDictFile(
#     dictRoot + "ppocr_keys_v1.txt", 'D:/ocr/ocr_resouces/train_data/train_data_zywbspz20w/tmp_labels.txt'
# )

# print(len(loss))
# print(loss)
#
# loss = subDictFiles(
#     [
#         dictRoot + '南怀瑾/12196472南怀瑾著作全集_content.txt',
#     ],
#     [dictRoot + "result_all.txt"])
# loss = subDict(loss, get_dict_ignore())
# write_list_2_dict_file(loss, dictRoot + '南怀瑾_loss.txt')
#

# loss = subDictFiles(
#     [
#     [
#         'D:/ocr/ocr_resouces/文本/words_output.txt',
#     ],dictRoot + "result_all.txt"])
# print(len(loss))
# loss = subDict(loss, get_dict_ignore())
# # print(loss)
# write_list_2_dict_file(loss, dictRoot + 'result_all_words_output_loss.txt')

# 11519
# loss = subDictFiles(
#     [dictRoot + "result_all.txt"],
#     [
#         'D:/ocr/ocr_resouces/文本/words_output.txt',
#     ])
# # print(len(loss))
# loss = subDict(loss, get_dict_ignore())
# # # print(loss)
# write_list_2_dict_file(loss, dictRoot + 'result_all_words_output_loss.txt')

# loss = subDictFiles(
#     [
#         dictRoot + '医著大成/11899981曹颖甫医著大成_content.txt',
#         dictRoot + '医著大成/11899982费伯雄医著大成_content.txt',
#         dictRoot + '医著大成/11899997恽铁樵医著大成_content.txt',
#         dictRoot + '医著大成/11899998张锡纯医著大成_content.txt',
#         dictRoot + '医著大成/11899999丁甘仁医著大成_content.txt',
#         dictRoot + '医著大成/12873131_content.txt',
#     ],
#     [dictRoot + "result_all.txt"])
# print(len(loss))
# loss = subDict(loss, get_dict_ignore())
# # print(loss)
# write_list_2_dict_file(loss, dictRoot + '医著大成_loss.txt')

# find_loss_dict_from_loss_files_2_target_file(dictRoot + 'loss_dict/loss_南怀瑾.txt',
#                                              [dictRoot + '南怀瑾/12196472南怀瑾著作全集_content.txt'],
#                                              dictRoot + 'loss_words/loss_word_南怀瑾.txt')
#
# find_loss_dict_from_loss_files_2_target_file(dictRoot + 'loss_dict/loss_words_output.txt',
#                                              ['D:/ocr/ocr_resouces/文本/words_output.txt'],
#                                              dictRoot + 'loss_words/loss_word_words_output.txt')

# find_loss_dict_from_loss_files_2_target_file(dictRoot + 'loss_dict/loss_医著大成.txt',
#                                              [
#                                                  dictRoot + '医著大成/11899981曹颖甫医著大成_content.txt',
#                                                  dictRoot + '医著大成/11899982费伯雄医著大成_content.txt',
#                                                  dictRoot + '医著大成/11899997恽铁樵医著大成_content.txt',
#                                                  dictRoot + '医著大成/11899998张锡纯医著大成_content.txt',
#                                                  dictRoot + '医著大成/11899999丁甘仁医著大成_content.txt',
#                                                  dictRoot + '医著大成/12873131_content.txt',
#                                              ],
#                                              dictRoot + 'loss_words/loss_word_医著大成.txt')

# doc_2_words(dictRoot + '医著大成/11899981曹颖甫医著大成_content.txt', 100, 25,
#             dictRoot + '医著大成/11899981曹颖甫医著大成_words.txt')
#
# doc_2_words(dictRoot + '医著大成/11899982费伯雄医著大成_content.txt', 100, 25,
#             dictRoot + '医著大成/11899982费伯雄医著大成_words.txt')
#
# doc_2_words(dictRoot + '医著大成/11899982费伯雄医著大成_content.txt', 100, 25,
#             dictRoot + '医著大成/11899982费伯雄医著大成_words.txt')
#
# doc_2_words(dictRoot + '医著大成/11899997恽铁樵医著大成_content.txt', 100, 25,
#             dictRoot + '医著大成/11899997恽铁樵医著大成_words.txt')
#
# doc_2_words(dictRoot + '医著大成/11899998张锡纯医著大成_content.txt', 100, 25,
#             dictRoot + '医著大成/11899998张锡纯医著大成_words.txt')
#
# doc_2_words(dictRoot + '医著大成/11899999丁甘仁医著大成_content.txt', 100, 25,
#             dictRoot + '医著大成/11899999丁甘仁医著大成_words.txt')
#
# doc_2_words(dictRoot + '医著大成/12873131_content.txt', 100, 20,
#             dictRoot + '医著大成/12873131_words.txt')

# doc_2_words(dictRoot + '南怀瑾/12196472南怀瑾著作全集_content.txt', 100, 25,
#             dictRoot + '南怀瑾/12196472南怀瑾著作全集_words.txt')

# json_str = json.dumps(['streaming API'])
# print(json_str)

# 生成的words文件字符串比较
# chars1 = dictOfFile(dictRoot + '医著大成/12873131_content.txt')
# chars2 = dictOfFile(dictRoot + '医著大成/12873131_words.txt')

# loss = subDict(chars1, chars2)
# print(loss)

# for l in loss:
#     print(is_chinese(l))

# all_char = dictOfFile(dictRoot + 'result_all.txt')
# for char in all_char:
#     if not is_chinese(char):
#         print(char)

# process_dict_mapping_barkup_2_mapping()
# get_dict_mapping()

# text = '良可慨耳！炳章按：龟鹿二仙胶，即郑氏所谓龟、鹿加'
# result = mapping_chars(text, get_dict_mapping())
# print(result)

# find_loss_dict_from_loss_files_2_target_file(dictRoot + 'loss_dict/loss_南怀瑾.txt',
#                                              [dictRoot + '南怀瑾/12196472南怀瑾著作全集_content.txt'],
#                                              dictRoot + 'loss_words/loss_word_南怀瑾.txt')

# remove_loss_dict_files(['D:/ocr/ocr_resources/reader/words/'])
# merge_loss_dict_files(['D:/ocr/ocr_resources/reader/words/'], dictRoot + 'loss_dict/loss_336.txt')
#
# dict336 = dictOfFile(dictRoot + 'loss_dict/loss_336.txt')
# chinese_dict = []
# none_chinese_dict = []
# for char in dict336:
#     if is_chinese(char):
#         chinese_dict.append(char)
#     else:
#         none_chinese_dict.append(char)
# write_list_2_dict_file(chinese_dict, dictRoot + 'loss_dict/loss_336_chinese.txt')
# write_list_2_dict_file(none_chinese_dict, dictRoot + 'loss_dict/loss_336_none_chinese.txt')
# merge_words_files(['D:/ocr/ocr_resources/reader/words/'])

# chinese_dict = []
# none_chinese_dict = []
# words_dict = dictOfFile(dictRoot + 'words_dict.txt')
# for char in words_dict:
#     if char != '':
#         if is_chinese(char):
#             chinese_dict.append(char)
#         else:
#             none_chinese_dict.append(char)
# write_list_2_dict_file(chinese_dict, dictRoot + 'loss_dict/words_dict_chinese.txt')
# write_list_2_dict_file(none_chinese_dict, dictRoot + 'loss_dict/words_dict_none_chinese.txt')

# i = 0
# while i < 10000:
#     text = get_word_of_line(
#         "此告愈。按：狂为阳证，总与七情内伤相关。龚商年云：\“推其病\"配合针刺大椎不留针，刺少商（双）加灸", 20, 21)
#     if text.find('龚') < 0:
#         print(text)
#     i = i + 1

# #
# fronts = [msyh_font, simfang_font, simsun_font]
# text = strOfFile(dictRoot + 'loss_dict/words_dict_none_chinese.txt')
# text = strOfFile(dictRoot + 'result_all.txt')
# draw_to_image(msyh_font, text, dictRoot + 'loss_dict/msyh1.jpg')
# draw_to_image(simfang_font, text, dictRoot + 'loss_dict/simfang1.jpg')
# draw_to_image(simsun_font, text, dictRoot + 'loss_dict/simsun1.jpg')

# in_chars, out_chars = check_font_chars(msyh_font, text)
# print(out_chars)

# in_chars, out_chars = check_font_files(msyh_font, [dictRoot + 'result_all.txt'
#     , dictRoot + 'loss_dict/loss_336.txt'])
# write_list_2_dict_file(in_chars, dictRoot + 'font/in_chars.txt')
# write_list_2_dict_file(out_chars, dictRoot + 'font/out_chars.txt')

# merge_dict_folders(['D:/ocr/ocr_resources/reader/words/'], dictRoot + 'font/all_chars_no_clear.txt')
#
# merge_dict = merge_dict_files([dictRoot + 'font/all_chars_no_clear.txt',
#                                dictRoot + 'result_all.txt',
#                                dictRoot + 'ppocr_keys_v1.txt',
#                                dictRoot + 'font/in_chars.txt',
#                                dictRoot + 'font/out_chars.txt',
#                                dictRoot + 'font/in_chars_dot.txt',
#                                dictRoot + 'font/all_chars.txt',
#                                ], [])
# write_list_2_dict_file(merge_dict, dictRoot + 'font/all_chars.txt')

# in_chars, out_chars = check_font_files(msyh_font, [dictRoot + 'font/all_chars.txt'])
# write_list_2_dict_file(subDict(in_chars, get_dict_ignore() + get_dict_dot() + get_dict_mapping()),
#                        dictRoot + 'font/in_chars.txt')
# write_list_2_dict_file(out_chars, dictRoot + 'font/out_chars.txt')

# text = strOfFile(dictRoot + 'font/in_chars.txt')
# draw_to_image(msyh_font, text, dictRoot + 'font/msyh1.jpg')
# # msyh1 比下面的多𠙶 𦰡 𨱏 龿
# draw_to_image(simfang_font, text, dictRoot + 'font/simfang1.jpg')
# draw_to_image(simsun_font, text, dictRoot + 'font/simsun1.jpg')

# text = strOfFile(dictRoot + 'ignore.txt')
# draw_to_image(simfang_font, text, dictRoot + 'font/simfang1.jpg')

# merge_words_files([reader_root + 'words'], 'D:/ocr/ocr_resources/reader/all_words/all_words.txt')

# process_dict_words_of_folder(['D:/ocr/ocr_resources/reader/'], 100, 10)
# process_dict_of_folder(['D:/ocr/ocr_resources/reader/'])

# filter_dict_words(reader_root + 'all_words.txt', dictRoot + 'font/in_chars.txt',
#                   dictRoot + 'font/out_chars.txt', reader_root + 'in_chars_words.txt')
#
# dict_words_2_doc(dictRoot + 'font/in_chars_words.txt', reader_root + 'img/img_words.txt')
# 出现的char整理
# file_2_single_dict_file(reader_root + 'words/10057373~实用精神疾病诊治与护理_words.txt', reader_root + 'dict')
# 从all_words文件中提取words,追加到单个字对应的txt中
# folder_2_single_dict_file(reader_root + 'words_10', reader_root + 'dict_10')
# 根据in_chars.txt从字典库中抽取随机的行数组成in_chars_words.txt,每行需要清理掉不在字体库支持的文字
# get_dict_words_from_folder(get_dict_from_file(dictRoot + 'font/in_chars.txt'), reader_root + 'dict/', 1000,
#                            reader_root + 'all_words/in_chars_words.txt')

# 这里保证下一步出现的char都在字体中能正常显示
# mapping_values = get_dict_mapping_value()
# # 清理掉不在字体库支持的文字
# dict_in = merge_dict(get_dict_from_file(dictRoot + 'font/in_chars.txt') + get_dict_dot(),
#                      mapping_values)
# # print(1)
# clear_not_in_char_of_file(reader_root + 'all_words/in_chars_words.txt',
#                           dict_in,
#                           reader_root + 'all_words/in_chars_words_clear.txt')

# 检查window font下最能写的字体
# chars_text = get_txt_from_file(dictRoot + 'font/in_chars_dot.txt').replace('\n', '')
# # chars_text = get_txt_from_file(dictRoot + 'font/in_chars.txt')
# check_all_font_of_folder('C:/Windows/Fonts/', chars_text)
# 失败
# draw_to_image(msyh_font, chars_text, dictRoot + 'font/arial.jpg')

# dict_filter = sub_dict('font下最能写的字体𠵣𠵨', dict_in)
# print(dict_filter)

# chars_text = get_txt_from_file(reader_root + 'all_words/in_chars_words_clear.txt').replace('\n', '')
# draw_large_text_to_image(msyh_font, chars_text, reader_root + 'all_words/in_chars_words_clear.jpg')

# merge_into_result_all()

# 处理in_chars_dot中有不在字体支持的符号
# in_chars, out_chars = check_font_files(simfang_font, [dictRoot + 'font/in_chars_dot.txt'])
# old_out_dict = get_dict_from_file(dictRoot + 'font/out_chars.txt')
# write_list_2_dict_file(merge_dict(old_out_dict, out_chars), dictRoot + 'font/out_chars.txt')
# write_list_2_dict_file(in_chars, dictRoot + 'font/in_chars_dot.txt')
#
# gen_front_image_2_check()

# 检测所有字体对in_cha的支持
# check_in_char()

# print(line_num)
# filter_dict = get_dict_from_file(dictRoot + 'font/in_chars.txt')
# get_char_num_mapping('D:/ocr/ocr_resources/reader/all_words/char_num_sort.txt', 201, 500,
#                      dictRoot + 'font/201~500~in_chars.txt', filter_dict)

# uuid = str(uuid.uuid1())
#
# print(uuid[0:8])  # +
# print(uuid[19:23])
# print(uuid[0:8] + uuid[19:23])

# txt = "fskhdalkf%safaaaa%s" % ('111', 2)
# print(txt)

# # 清理掉不在字体库支持的文字
# # 这里保证下一步出现的char都在字体中能正常显示
# mapping_values = get_dict_mapping_value()
# # 清理掉不在字体库支持的文字
# dict_in = merge_dict(get_dict_from_file(dictRoot + 'font/in_chars.txt') + get_dict_dot(),
#                      mapping_values)
# # 这个效率太低,且字符的取得频率不对,已经废弃 由text_render从char_in读取字符,然后从具体的字符文件冲取数替代
# # 这个方法不并发,请至少执行2次以上,执行一次备份一次,用多余一次文件大小一致的文件
# clear_not_in_char_of_file(reader_root + 'all_words/in_chars_words_10.txt',
#                           dict_in,
#                           reader_root + 'all_words/in_chars_words_clear_10.txt')


# # 通过多线程 转换_content==>_dict
# process_dict_of_folder(['D:/ocr/ocr_resources/reader/'])
# # dict==>all_chars_no_clear
# merge_dict_folders(['D:/ocr/ocr_resources/reader/words_10/'], dictRoot + 'font/all_chars_no_clear.txt')
#
# merge_dict = merge_dict_files([dictRoot + 'font/all_chars_no_clear.txt',
#                                dictRoot + 'result_all.txt',
#                                dictRoot + 'ppocr_keys_v1.txt',
#                                dictRoot + 'font/in_chars.txt',
#                                dictRoot + 'font/out_chars.txt',
#                                dictRoot + 'font/in_chars_dot.txt',
#                                dictRoot + 'font/all_chars.txt',
#                                reader_root + ''
#                                ], [])
# merge_dict = merge_dict(merge_dict, get_dict_mapping_key())
# write_list_2_dict_file(merge_dict, dictRoot + 'font/all_chars.txt')
# #
# in_chars, out_chars = check_font_files(simfang_font, [dictRoot + 'font/all_chars.txt'])
#
# ignore = get_dict_ignore()
# dot = get_dict_dot()
# mapping = get_dict_mapping_key()
# sub = ignore + dot + mapping
#
# write_list_2_dict_file(sub_dict(in_chars, sub),
#                        dictRoot + 'font/in_chars.txt')
# write_list_2_dict_file(out_chars, dictRoot + 'font/out_chars.txt')

