'''
Author: 石沙
Date: 2020-12-20
LastEditTime: 2020-01-03
Desciption: 本模块用作数据集划分：训练集、验证集、测试集的比重为6:2:2
Copyright: 北京贪心科技有限公司版权所有。仅供教学目的使用。
'''

import sys
sys.path.append('..')
import pandas as pd
import configs.settings as conf
import os
from site_packages.utils.job import DataOp
import jieba
from site_packages.ml_libs.nlp.stopwords import Stopwords


stopwords = Stopwords(source='hit')


def read_csv(file):
    """\t分隔符经常导致有些行无法被正常分割，无法用pd.read_csv直接处理，会报错"""
    with open(os.path.join(conf.DATA_PATH_RANKING, file), 'r', encoding='utf-8') as f:
        lines = f.read().strip().split('\n')
        lines = [line.split('\t') for line in lines]
        lines = list(filter(lambda x: len(x) == 3, lines))
    return pd.DataFrame(lines, columns=['question1', 'question2', 'label'])


def preprocess_for_bm25(text):
    words = jieba.lcut(text)
    clean_words = stopwords.clean(words)
    return clean_words


def main():
    files = ['task3_train.txt', 'atec_nlp_sim_train.csv', 'atec_nlp_sim_train_add.csv']
    df_list = []
    for file in files:
        df = read_csv(file)
        df_list.append(df)

    data = pd.concat(df_list)

    # 数据集划分
    row_cnt = data.shape[0]
    train_size = int(0.6 * row_cnt)
    val_size = int(0.2 * row_cnt)

    data = data.sample(frac=1.0)
    data_train = data.head(train_size).copy()
    data_val = data.iloc[train_size: (train_size + val_size), :].copy()
    data_test = data.iloc[(train_size + val_size): row_cnt, :].copy()
    data_train['question1_clean'] = data_train['question1'].apply(preprocess_for_bm25)
    data_train['question2_clean'] = data_train['question2'].apply(preprocess_for_bm25)
    data_train.reset_index(inplace=True)
    data_val.reset_index(inplace=True)
    data_test.reset_index(inplace=True)

    # 数据存储
    DataOp.save(data_train, 'data_train', is_model=False)
    DataOp.save(data_val, 'data_val', is_model=False)
    DataOp.save(data_test, 'data_test', is_model=False)


if __name__ == '__main__':
    main()