from sklearn.feature_extraction.text import TfidfVectorizer
from scipy import sparse
from common import cut_text, get_train_label
import os
import pickle


def textToTFIDF(path, header, name, sep=','):
    # 如果已经训练 则直接加载模型 否则重新训练模型
    if not os.path.exists("../model/matrix/%s_TF_IDF_TRAIN_FIT_MATRIX.npz" % name):
        # 导入停用词表
        with open("../data/stop_words.txt", 'r', encoding="utf-8-sig") as f:
            line = f.readline()
            stopwords = line.split(",")

        train, label = get_train_label(path, header, sep=sep)
        # 对训练文本进行分词
        train_new = train.map(lambda x: cut_text(x, stopwords))

        # 创建tfidf训练器
        tfidfV1 = TfidfVectorizer()
        # 训练tfidf模型
        train_fit = tfidfV1.fit_transform(train_new)

        # 将TF-IDF训练得到的稀疏矩阵保存到文件
        sparse.save_npz("../model/matrix/%s_TF_IDF_TRAIN_FIT_MATRIX.npz" % name, train_fit)

        # 将训练好的tfidf模型保存到文件
        pickle.dump(tfidfV1, open("../model/TF-IDF/%s_TF_IDF_MODEL.pkl" % name, "wb"))
    else:
        print(name + "数据 TF-IDF 模型已进行训练，若需重新训练，删除文件即可")


# 将训练文本转TF-IDF
if __name__ == '__main__':
    textToTFIDF(path="../data/weibo_senti_100k.csv", header=['label', 'review'], name='weibo')
    print('外卖数据转TF-IDF完成')
    textToTFIDF(path="../data/waimai_10k.csv", header=['label', 'review'], name='waimai')
    print('外卖数据转TF-IDF完成')
    textToTFIDF(path="../data/online_shopping_10_cats.csv", header=['cat', 'label', 'review'], name='shopping')
    print('电商数据转TF-IDF完成')
    textToTFIDF(path="../data/train.txt", header=['label', 'review'], name='train', sep="\t")
    print('训练数据转TF-IDF完成')
