from sklearn.utils import Bunch
from sklearn.feature_extraction.text import TfidfVectorizer
from tools import *

def vector_space(stopword_path,bunch_path,space_path,train_tfidf_path=None):
    stopwdlist = readfile(stopword_path).splitlines()
    bunch = readbunchobj(bunch_path)
    tfidf_space = Bunch(target_name=bunch.target_name,label=bunch.label,filenames=bunch.filenames,tdm=[],vocabulary={})

    if train_tfidf_path is not None:
        trainbunch = readbunchobj(train_tfidf_path)
        tfidf_space.vocabulary = trainbunch.vocabulary#导入训练集tf-idf向量空间
        vectorizer = TfidfVectorizer(stop_words=stopwdlist, sublinear_tf=True, max_df=0.8,
                                     vocabulary=trainbunch.vocabulary)
        tfidf_space.tdm = vectorizer.fit_transform(bunch.contents)#计算个词语出现的次数

    else:
        vectorizer = TfidfVectorizer(stop_words=stopwdlist, sublinear_tf=True, max_df=0.8)
        tfidf_space.tdm = vectorizer.fit_transform(bunch.contents)
        tfidf_space.vocabulary = vectorizer.vocabulary_

    writebunchobj(space_path, tfidf_space) 