import os
import re

import mindspore.dataset.text as text
from mindspore.dataset.text import JiebaMode
from tqdm import tqdm

# 中文采用结巴分词
jieba_dict_path = r"E:\mindspore\tests\ut\data\dataset\jiebadict"
jieba_hmm_file = os.path.join(jieba_dict_path, "hmm_model.utf8")
jieba_mp_file = os.path.join(jieba_dict_path, "jieba.dict.utf8")
cn_tokenizer_op = text.JiebaTokenizer(jieba_hmm_file, jieba_mp_file, mode=JiebaMode.HMM, with_offsets=False)


# 英文分词规则
def en_tokenize_op(sentence: str):
    sentence = sentence.rstrip()
    return [tok.lower() for tok in re.findall(r'\w+|[^\w\s]', sentence)]


txt_path = "cmn_processed.txt"
# 读取文件
with open(txt_path, "r", encoding="utf-8") as f:
    lines = f.readlines()
    lines = [line.strip().split("\t") for line in lines]

    n = len(lines)


def pre_process_data(sentences_list, l, r, list1, list2):
    print(f"Process {l} - {r} ......\n")
    for i in tqdm(range(l, r)):
        en_sentence = en_tokenize_op(sentences_list[i][0])
        ch_sentence = cn_tokenizer_op(sentences_list[i][1])
        list1[i] = en_sentence
        list2[i] = ch_sentence


if __name__ == '__main__':

    from multiprocessing import Process, Manager, cpu_count

    # 创建Manager 对象并创建两个共享列表
    English_sentences = Manager().list()
    Chinese_sentences = Manager().list()
    # 初始化两个列表
    for i in range(n):
        English_sentences.append([])
        Chinese_sentences.append([])

    # 定义进程数
    num_processes = cpu_count()
    # 计算每个进程的任务处理范围
    l = 0
    r = n // num_processes
    scope_list = [(l, r)]
    for _ in range(num_processes - 2):
        l = r
        r = l + n // num_processes
        scope_list.append((l, r))
    # 最后一个进程处理范围
    l = scope_list[-1][1]
    r = n
    scope_list.append((l, r))
    print(scope_list)

    # 创建进程
    processes = [Process(target=pre_process_data, args=(lines, l, r, English_sentences, Chinese_sentences)) for l, r in scope_list]

    for p in processes:
        p.start()

    for p in processes:
        p.join()

    print("All process done!")

    print("Example of pre-processed data:")
    print(English_sentences[-1])
    print(Chinese_sentences[-1])

    max_len_en = -1
    max_len_ch = -1

    dir_saved = "./datas"
    cn_path = os.path.join(dir_saved, "cmn_chinese.txt")
    en_path = os.path.join(dir_saved, "cmn_english.txt")
    with open(cn_path, "w", encoding="utf-8") as f1, open(en_path, "w", encoding="utf-8") as f2:
        for i in range(n):
            en_sentence = "\t".join(English_sentences[i])
            ch_sentence = "\t".join(Chinese_sentences[i])
            f1.write(ch_sentence + "\n")
            f2.write(en_sentence + "\n")
            max_len_en = max(max_len_en, len(English_sentences[i]))
            max_len_ch = max(max_len_ch, len(Chinese_sentences[i]))

    print(f"Max length of English sentence: {max_len_en}")
    print(f"Max length of Chinese sentence: {max_len_ch}")