import collections
import json
import jieba
import re

# # with open(r'D:\translation2019zh\translation2019zh_train.json', mode='r', encoding='utf-8') as f:
# #     total_lines = sum(1 for _ in f)
# #     print(total_lines)
# #     f.close()
# total_lines = 5161434
# selected_data = []
# n = total_lines //10
# with open(r'D:\translation2019zh\translation2019zh_train.json', mode='r', encoding='utf-8') as f:
#     for _ in range(n):
#         line = f.readline()
#         item = json.loads(line)
#         selected_data.append(item)
#
# with open('10percent_of_dataset.json', 'w', encoding='utf-8') as f:
#     for item in selected_data:
#         json_str = json.dumps(item, ensure_ascii=False)
#         f.write(json_str + '\n')
#     f.close()


english_list = []
chinese_list = []
with open('10percent_of_dataset.json', 'r', encoding='utf-8') as f:
    lines = f.readlines()
    for line in lines:
        line.strip()
        data = json.loads(line)
        english_list.append(data["english"])
        chinese_list.append(data["chinese"])

    f.close()
# print(len(english_list))
# print(len(chinese_list))
def ch_tokenize(lines):
    return [jieba.lcut(line) for line in lines]

def en_tokenize(lines):
    pattern = re.compile(r"""
        \d+[:/]\d+       # 匹配比例符号如 1:1 或 3/4
        |\w+['’]\w+      # 匹配带省略号的单词如 don't、we’ll
        |[^\W_]+         # 匹配常规单词（排除下划线）
        |[^\w\s]         # 匹配标点符号
    """, re.VERBOSE | re.UNICODE)

    return [re.findall(pattern, line.lower()) for line in lines]


english_list = en_tokenize(english_list)
chinese_list = ch_tokenize(chinese_list)

with open('en_tokenized.json', 'w', encoding='utf-8') as f:
    json.dump(english_list, f, ensure_ascii=False)
    f.close()

with open('ch_tokenized.json', 'w', encoding='utf-8') as f:
    json.dump(chinese_list, f, ensure_ascii=False)
    f.close()

