# 分词并获取词频向量

import snownlp
from numpy import *
import jieba
import re

# 训练集:标注好,但未分词的文件
filePstv = '口红好评.txt'
fileNgtv = '口红差评.txt'
# 停用词文件
fileStop = '停用词.txt'
# 存总词集词向量列表的文件
fileStoWordVec = '总词集.txt'
# 存积极样本的词频向量
fileFrqPstv = '积极样本原始词频.txt'
# 存消极样本的词频向量
fileFrqNgtv = '消极样本原始词频.txt'
# 总词集,有序的总词集(实际声明不在此)
global vocabSet
global vocabSetLst
# 积极样本和消极样本各取的数目
rowNum = 300
# 每发现一个词,词频向量对应分量增加的数字(实际声明不在此)
global plusNum


# 分词并保存有序词集
def cutWord():
	# 总词集的初始化
	global vocabSet
	vocabSet = set()
	# 积极文件分词
	with open(filePstv, 'r', encoding='utf-8') as f:
		for line in f.readlines()[0:rowNum]:
			'''
			s = snownlp.SnowNLP(line)
			cutLst = s.words
			'''
			cutLst = jieba.cut(line)
			vocabSet = vocabSet | set(cutLst)
	# 消极文件分词
	with open(fileNgtv, 'r', encoding='utf-8') as f:
		for line in f.readlines()[0:rowNum]:
			'''
			s = snownlp.SnowNLP(line)
			cutLst = s.words
			'''
			cutLst = jieba.cut(line)
			vocabSet = vocabSet | set(cutLst)
	# 去除停用词
	with open(fileStop, 'r', encoding='utf-8') as f:
		stopLst = re.split('\n|\\ufeff', f.read())  # 用正则去掉win下记事本的前缀
		###print(stopLst)
		vocabSet = vocabSet - set(stopLst)
		vocabSet = vocabSet - set(['', '\n', '\t', ' '])
	# 集合向量化,并保存分词结果(有序词集)
	global vocabSetLst
	vocabSetLst = list(vocabSet)
	###print(vocabSetLst)
	###print("生成的总词集长是%d" % len(vocabSetLst))
	with open(fileStoWordVec, 'w', encoding='utf-8') as f:
		f.write("\n".join(vocabSetLst))
	# 增量取1/3有序总词集的长度
	global plusNum
	plusNum = int(len(vocabSetLst) / 3)


# 获取积极样本和消极样本的词频向量
def getFrq():
	# 积极样本集词频
	with open(filePstv, 'r', encoding='utf-8') as f1:
		with open(fileFrqPstv, 'w+', encoding='utf-8') as f2:
			f2.truncate()  # 清空要追加写的文件
			for line in f1.readlines()[0:rowNum]:
				allZero = True  # 用这个量查除全是0的样本
				'''
				s = snownlp.SnowNLP(line)
				cutLst = s.words
				'''
				cutLst = jieba.cut(line)
				frqVec = [0 for i in range(len(vocabSetLst))]
				for cutWord in cutLst:
					# 如果在词集列表中
					if cutWord in vocabSet:
						# 对应位置加plusNum,使用词袋模型
						frqVec[vocabSetLst.index(cutWord)] += plusNum
						allZero = False  # 记录这个样本的词频向量并非全是0
				# 对这个样本的所有词循环结束后,判定不是全0向量才写入
				if not allZero:
					# 数字转换成字符串
					frqVec = [str(i) for i in frqVec]
					# 用空格分隔开并写入文件
					f2.write(" ".join(frqVec) + "\n")
	# 消极样本集词频
	with open(fileNgtv, 'r', encoding='utf-8') as f1:
		with open(fileFrqNgtv, 'w+', encoding='utf-8') as f2:
			f2.truncate()  # 清空要追加写的文件
			for line in f1.readlines()[0:rowNum]:
				allZero = True  # 用这个量查除全是0的样本
				'''
				s = snownlp.SnowNLP(line)
				cutLst = s.words
				'''
				cutLst = jieba.cut(line)
				frqVec = [0 for i in range(len(vocabSetLst))]
				for cutWord in cutLst:
					# 如果在词集列表中
					if cutWord in vocabSet:
						# 对应位置加plusNum,使用词袋模型
						frqVec[vocabSetLst.index(cutWord)] += plusNum
						allZero = False  # 记录这个样本的词频向量并非全是0
				# 对这个样本的所有词循环结束后,判定不是全0向量才写入
				if not allZero:
					# 数字转换成字符串
					frqVec = [str(i) for i in frqVec]
					# 用空格分隔开并写入文件
					f2.write(" ".join(frqVec) + "\n")
