import config
from tokenizer import ChineseTokenizer, EnglishTokenizer
import pandas as pd
from sklearn.model_selection import train_test_split


def process():
    print("开始处理数据")
    df = pd.read_csv(config.RAW_DATA_DIR / 'cmn.txt', sep='\t', header=None, usecols=[0, 1], names=['en', 'zh'])
    df.dropna(inplace=True)

    df = df[df['en'].str.strip().ne('') & df['zh'].str.strip().ne('')]

    train_df, test_df = train_test_split(df, test_size=0.2, shuffle=True)

    # 先确定下seq_len的长度
    # zh_seq_len_about = train_df['zh'].apply(lambda s: len(list(s))).quantile(0.95)
    # en_seq_len_about = train_df['en'].apply(lambda s: len(list(s))).quantile(0.95)
    # print(zh_seq_len_about, en_seq_len_about)

    # 构建词表
    ChineseTokenizer.build_vocab(train_df['zh'].tolist(), config.PROCESSED_DIR / 'zh_vocab.txt')
    EnglishTokenizer.build_vocab(train_df['en'].tolist(), config.PROCESSED_DIR / 'en_vocab.txt')

    chinese_tokenizer = ChineseTokenizer.from_vocab(config.PROCESSED_DIR / 'zh_vocab.txt')
    english_tokenizer = EnglishTokenizer.from_vocab(config.PROCESSED_DIR / 'en_vocab.txt')

    train_df['zh'] = train_df['zh'].apply(lambda x: chinese_tokenizer.encode(x, config.SEQ_LEN, add_sos_eos=False))
    train_df['en'] = train_df['en'].apply(lambda x: english_tokenizer.encode(x, config.SEQ_LEN, add_sos_eos=True))

    train_df.to_json(config.PROCESSED_DIR / 'indexed_train.jsonl', orient='records', lines=True)

    test_df['zh'] = test_df['zh'].apply(lambda x: chinese_tokenizer.encode(x, config.SEQ_LEN, add_sos_eos=False))
    test_df['en'] = test_df['en'].apply(lambda x: english_tokenizer.encode(x, config.SEQ_LEN, add_sos_eos=True))

    test_df.to_json(config.PROCESSED_DIR / 'indexed_test.jsonl', orient='records', lines=True)

    print("数据处理完成")

if __name__ == '__main__':
    process()
