# 作者 :南雨
# 时间 : 2022/6/25 16:50
import re
import jieba.posseg as pseg
import pandas as pd
import numpy as np
from dzj.med_qa.med_constant import Med_constant
from sklearn.utils import shuffle

hit_stopwords = [line.strip() for line in open(Med_constant.stop_words_path, encoding="utf-8").readlines()]  # 加载停用词


def load_data(path):
    data = pd.read_csv(path)
    return data


def get_data():
    train_df = load_data(Med_constant.train_path)
    test_df = load_data(Med_constant.test_path)
    valid_df = load_data(Med_constant.vaild_path)

    # 维护字典
    df_group = train_df.groupby(by='label')
    category_list = list(df_group.groups.keys())
    category_dict = dict(zip(category_list, range(len(category_list))))
    train_data = label_to_id(train_df, category_dict)
    test_data = label_to_id(test_df, category_dict)
    valid_data = label_to_id(valid_df, category_dict)
    return train_data, test_data, valid_data, category_dict


def label_to_id(data_df, category_dict):
    """
    标签数值化
    :return:
    """
    labels = []
    for label in data_df["label"]:
        labels.append(category_dict[label])

    label_df = pd.DataFrame(labels, columns=["label_id"])
    df = pd.concat([data_df, label_df], axis=1)
    return df


def find_chinese(text):
    pattern = re.compile(r'[^\u4e00-\u9fa5]')
    sentence = re.sub(pattern, '', text)
    return sentence


def cut_word(sentence):
    """
    中文分词
    :param sentence:
    :return:
    """
    data = pseg.lcut(sentence)
    data_list = []
    for item in data:
        if item.flag == 'x':
            data.remove(item)
        else:
            data_list.append(item.word)
    return data_list


def remove_stopwords(data_list):
    """
    去除停用词
    :param data_list:
    :return:
    """
    temp_list = []
    for word in data_list:
        if word not in hit_stopwords:
            temp_list.append(word)
    return temp_list


def process(data_df):
    data_df["question"] = data_df["question"].apply(find_chinese)
    data_df["question"] = data_df["question"].apply(cut_word)
    data_df["question"] = data_df["question"].apply(remove_stopwords)
    return data_df


def result():
    train_data, test_data, valid_data, category_dict = get_data()
    # train_data = shuffle(train_data).reset_index(drop=True)
    # test_data = shuffle(test_data).reset_index(drop=True)
    # valid_data = shuffle(valid_data).reset_index(drop=True)

    train_data = process(train_data)
    test_data = process(test_data)
    valid_data = process(valid_data)

    train_data.to_csv(Med_constant.train_save_path, index=False)
    test_data.to_csv(Med_constant.test_save_path, index=False)
    valid_data.to_csv(Med_constant.valid_save_path, index=False)


# if __name__ == '__main__':
#     result()
