import pandas as pd
from sklearn.model_selection import train_test_split
from tokenizer import ChineseTokenizer, EnglishTokenizer
from config import RAW_DATA_DIR, PROCESSED_DATA_DIR, SEQ_LEN, RANDOM_SEED


def process():
    """ 数据预处理 """
    # 1.读取数据
    data = pd.read_csv(
        RAW_DATA_DIR / 'cmn.txt',
        sep='\t', header=None, usecols=[0, 1], names=['en', 'zh'],
        encoding='utf-8'
    )

    # 2.数据清洗
    data = data.dropna()  # 去除空值
    data = data[data['en'].str.strip().ne('') & data['zh'].str.strip().ne('')]  # 去除空字符串

    # 3.划分数据集
    train_data, test_data = train_test_split(data, test_size=0.2, random_state=RANDOM_SEED)

    # 4.构建词表并保存
    ChineseTokenizer.build_vocab(train_data['zh'].tolist(), PROCESSED_DATA_DIR / 'zh_vocab.txt')
    EnglishTokenizer.build_vocab(train_data['en'].tolist(), PROCESSED_DATA_DIR / 'en_vocab.txt')

    # 5.加载词表
    zh_tokenizer = ChineseTokenizer.from_vocab(PROCESSED_DATA_DIR / 'zh_vocab.txt')
    en_tokenizer = EnglishTokenizer.from_vocab(PROCESSED_DATA_DIR / 'en_vocab.txt')

    # 6.构建训练集并保存
    train_data['zh'] = train_data['zh'].apply(lambda x: zh_tokenizer.encode(x, SEQ_LEN, add_sos_eos=False))
    train_data['en'] = train_data['en'].apply(lambda x: en_tokenizer.encode(x, SEQ_LEN, add_sos_eos=True))
    train_data.to_json(PROCESSED_DATA_DIR / 'indexed_train.jsonl', orient='records', lines=True)

    # 计算序列长度（95%分位数）
    # zh_len = train_data['zh'].apply(lambda x: len(x)).quantile(0.95)  # 30
    # en_len = train_data['en'].apply(lambda x: len(x)).quantile(0.95)  # 30
    # print(zh_len, en_len)

    # 7.构建测试集并保存
    test_data['zh'] = test_data['zh'].apply(lambda x: zh_tokenizer.encode(x, SEQ_LEN, add_sos_eos=False))
    test_data['en'] = test_data['en'].apply(lambda x: en_tokenizer.encode(x, SEQ_LEN, add_sos_eos=True))
    test_data.to_json(PROCESSED_DATA_DIR / 'indexed_test.jsonl', orient='records', lines=True)


if __name__ == '__main__':
    process()
