import jieba

import pandas as pd
import os
from bs4 import BeautifulSoup
from jy.med_qa.med_constant import Med_Const
import re

os.environ['KERAS_BACKEND'] = 'tensorflow'


def load_data():
    data_df = pd.read_csv(Med_Const.train_path, encoding='utf-8')
    test_df = pd.read_csv(Med_Const.test_path, encoding='utf-8')
    data_df = pd.concat([data_df, test_df], axis=0)
    val_df = pd.read_csv(Med_Const.vaild_path, encoding='utf-8')
    data_df = pd.concat([data_df, val_df], axis=0)
    return data_df


def remove_html(text):
    text = BeautifulSoup(text, "html5lib")
    text = str(text.get_text().encode())
    return text


def clean_text(text):
    patten = r"[!\"#$%&'()*+,-./:;<=>?@[\\\]^_`{|}~—！，。？·￥、《》···【】：" "''\s0-9]+"
    re_obj = re.compile(patten)
    text = re_obj.sub('', text)
    return text


def cut_word(text):
    text_list = jieba.lcut(text)
    return text_list


def remove_stop_words(text):
    stop_words = set(i.strip() for i in open(Med_Const.stop_word_path, encoding='utf-8').readlines())
    text_list = []
    for word in text:
        if word not in stop_words:
            text_list.append(word)
    return text_list


def process():
    data_df = load_data()
    # data_df["question"] = data_df["question"].apply(remove_html)
    data_df['question'] = data_df["question"].astype('str')
    data_df["question"] = data_df["question"].apply(clean_text)
    data_df["question"] = data_df["question"].apply(cut_word)
    data_df["question"] = data_df["question"].apply(remove_stop_words)
    data_df['question'] = data_df['question'].astype(str)
    return data_df


