import re
import jieba
import pandas as pd


# 分词
def cut_text(text, stopwords):
    """
    实现效果: 输入一段文本，返回分词后，重新组成的文本，每个词语以" "分割
    input:
        text: 一段由文本组成的字符串
    output:
        cutted_concated: 分词后，重新组成的长字符串
    """

    # 构造数字、字母pat，取出数字、字母
    pat = re.compile("[a-z0-9A-Z]+")
    result = []
    seg_list_1 = jieba.cut(text, cut_all=True)  # 使用jieba进行分词
    for seg in seg_list_1:  # 对分词结束后获得的list重新拼接
        pat_find = re.search(pat, seg)
        if seg not in stopwords and pat_find is None:  # 过滤掉pat匹配的词汇
            seg = ''.join(seg.split())  # 首先对空格进行处理
            if seg != '' and seg != "\n" and seg != "\n\n":
                result.append(seg)
        cutted_concated = " ".join(result)
    return cutted_concated


# 获取训练集
def get_train_label(path='../data/weibo_senti_100k.csv', headers=['label', 'review'], sep=','):
    # 读取训练数据
    trainDF = pd.read_csv(path, encoding="utf-8", names=headers, sep=sep)

    # 提取训练文本
    train = trainDF["review"]
    # 提取训练标签
    label = trainDF["label"]
    return train, label
