import pickle
import re
import pandas as pd
import zhconv
import jieba.posseg as psg
from sklearn.feature_extraction.text import CountVectorizer


def clean_data(content):
    # 2.取出中文
    content = re.sub(r"[\u4e00-\u9fa5]"," ",content)
    # 3.繁体转为简体
    content = zhconv.convert(content,"zh-cn")
    # 4.分词. 过滤词性
    content_pos = psg.cut(content)
    # 需要保留的词性
    allow_pos = ['n', 'nr', 'ns', 'nt', 'v', 'a']
    words = []
    for word,pos in content_pos:
        if pos in allow_pos:
            words.append(word)
    # 转换成str类型
    return " ".join(content)

def clean_data_main():
    # 1.加载原始数据
    data = pd.read_csv("data/01.原始测试集.csv")
    contents,labels = [],[]
    for content,label in zip(data["content"],data["label"]):
        words = clean_data(content)
        if len(words) == 0: # 分词之后是空值的直接跳过
            continue
        contents.append(words)
        labels.append(label)

    # 5保存成训练集
    train_data = pd.DataFrame()
    train_data["content"] = contents
    train_data["label"] = labels
    train_data.to_csv("data/02.清洗_训练集.csv")

# 特征提取，准备模型训练需要的数据
def extract_email_feature():
    data = pd.read_csv("data/02.清洗_训练集.csv")
    # print(data.columns)
    transfer = CountVectorizer(max_features=10000)
    content_feature = transfer.fit_transform(data["content"])

    content_feature_dict = {}
    content_feature_dict["x"] = content_feature.toarray().tolist()
    content_feature_dict["y"] = data["label"].tolist()
    pickle.dump(content_feature_dict, open("data/03-模型训练数据.pkl","wb"),3)

    #==============注意：我们需要保存特征列表，方便后面的测试&测试的时候都是用相同的而调整词列表
    feature_names = transfer.get_feature_names_out()
    pickle.dump(feature_names, open("data/03-模型训练特征.pkl","wb"),3)

if __name__ == '__main__':
    # clean_data_main()
    extract_email_feature()

