from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib  #也可以选择pickle等保存模型，请随意
import os

path_doc = '/home/wanglinhui/Desktop/二师兄/small_docs/'
tf_ModelPath = '/home/wanglinhui/Desktop/二师兄/model/'
doc_names = os.listdir(path_doc)
doc_lists = []
# print(doc_names)
for name in doc_names:
	# doc_lists.append(os.path.join(path_doc, name))
	words = []
	with open(os.path.join(path_doc, name), 'r') as reader:
		for line in reader.readlines():
			words.append(line.strip())
	# CountVectorizer需要的输入是有严格形式要求的
	# 输入整体是一个一维列表, 子元素是一个doc, doc内单词以空格隔开
	doc_lists.append(' '.join(words))

#构建词汇统计向量并保存，仅运行首次
# min_df: df是文档频率
tf_vectorizer = CountVectorizer()
tf = tf_vectorizer.fit_transform(doc_lists)

print(tf)
print(tf_vectorizer.vocabulary_)
# joblib.dump(tf_vectorizer, tf_ModelPath)
# joblib.dump(tf, tf_ModelPath)     # joblib.dump()可以将任意Python对象写入文件

# tf_vectorizer = joblib.load(tf_ModelPath)
# print(tf_vectorizer)
# tf_vectorizer.fit_transform(doc_lists)
