#encoding:utf-8
import re

import jieba
from collections import defaultdict

# 情感词典分析的是整个文章的情感倾向。
# s1: 数据清洗，删除停用词（将停用词加载到内存，dict/stopwords），包括标点符号
# s2: 分词（jieba分词）
# s3: 查表统计
from config import basepath


class DictAnalysis:
	def __init__(self):
		self.userDictPath = basepath+'/sentiment/senti_dict/dict/userdict.txt'
		self.stopwordPath = basepath+'/sentiment/senti_dict/dict/baidu_stopwords.txt'
		self.posdictPath = basepath+'/sentiment/senti_dict/dict/emotion_dict/pos_all_dict.txt'
		self.negdictPath = basepath+'/sentiment/senti_dict/dict/emotion_dict/neg_all_dict.txt'

		self.mostdictPath = basepath+'/sentiment/senti_dict/dict/degree_dict/most.txt'  # 权值为2
		self.verydictPath = basepath+'/sentiment/senti_dict/dict/degree_dict/very.txt'  # 权值为1.5
		self.moredictPath = basepath+'/sentiment/senti_dict/dict/degree_dict/more.txt'  # 权值为1.25
		self.ishdictPath = basepath+'/sentiment/senti_dict/dict/degree_dict/ish.txt'  # 权值为0.5
		self.insufficientdictPath = basepath+'/sentiment/senti_dict/dict/degree_dict/insufficiently.txt'  # 权值为0.25
		self.inversedictPath = basepath+'/sentiment/senti_dict/dict/degree_dict/inverse.txt'  # 权值为-1
		self.sentiment_init()
	# phase: 'line'
	# 断句：将字符串根据‘。’等分隔符分割成字符串数组 ['','','']
	def preteat_clause(self,phase):
		#分句
		cut_list = list('。！~？!?…')
		reslist,i,start = [],0,0
		for word in phase:
			if word in cut_list:
				reslist.append(phase[start:i])
				start = i+1
				i += 1
			else:
				i += 1

		if start < len(phase):
			reslist.append(phase[start:])
		# cut_list = list('。！~？!?…')
		# sentences = re.split('[。！~？!?…]', phase)

		return reslist

	# 分词，去掉了停用词
	def cutwords_jieba(self,sentence,userdict='dict/userdict.txt',stopwords='dict/stopwords.txt'):
		stropw = []
		if userdict:
			jieba.load_userdict(self.userDictPath)
			stropw = [line.strip() for line in open(self.stopwordPath,'r',encoding='utf-8').readlines()]

		frequency = defaultdict(int)
		l = list(jieba.cut(sentence))
		for t in l:
			frequency[t] += 1

		texts = [token for token in frequency if frequency[token] > 0]

		rtexts = list(set(texts)-set(stropw))
		return l
	# 读取文件内容，['line1', 'line2']
	def deal_wrap(self,filedict):
		temp = []
		for x in open(filedict,'r',encoding='utf-8').readlines():
			temp.append(x.strip())
		return temp
	# 初始化词典
	def sentiment_init(self):
		# 情感词典
		self.posdict = self.deal_wrap(self.posdictPath)
		self.negdict = self.deal_wrap(self.negdictPath)
		# 程度副词词典
		self.mostdict = self.deal_wrap(self.mostdictPath)   # 权值为2
		self.verydict = self.deal_wrap(self.verydictPath)   # 权值为1.5
		self.moredict = self.deal_wrap(self.moredictPath)  # 权值为1.25
		self.ishdict = self.deal_wrap(self.ishdictPath)   # 权值为0.5
		self.insufficientdict = self.deal_wrap(self.insufficientdictPath)  # 权值为0.25
		self.inversedict = self.deal_wrap(self.inversedictPath)  # 权值为-1

	# 计算程度词语的得分，乘积
	def cal_score(self,word, sentence_score):
		if word in self.mostdict:
			sentence_score *= 2.0
		elif word in self.verydict:
			sentence_score *= 1.75
		elif word in self.moredict:
			sentence_score *= 1.5
		elif word in self.ishdict:
			sentence_score *= 1.2
		elif word in self.insufficientdict:
			sentence_score *= 0.5
		elif word in self.inversedict:
			sentence_score *= -1
		return sentence_score

	def sentiment(self,sentence):
		# i,s,posscore,negscore = 0,0,0,0
		# for word in sentence:
		# 	if word in self.posdict:
		# 		posscore += 1
		# 		for w in sentence[s:i]:
		# 			posscore = self.cal_score(w, posscore)
		# 		s = i + 1
		#
		# 	elif word in self.negdict:
		# 		negscore += 1
		# 		for w in sentence[s:i]:
		# 			negscore = self.cal_score(w, negscore)
		# 		s = i + 1
		# 	i+=1
		i, s, posscore, negscore = 0, 0, 0, 0
		# 程度词 情感词 程度词
		for word in sentence:
			if word in self.posdict:
				if i>0:
					tmp = self.cal_score(sentence[i-1], 1)
				if i<len(sentence)-1:
					tmp = self.cal_score(sentence[i+1], tmp)
				posscore+=tmp

			elif word in self.negdict:
				if i>0:
					tmp = self.cal_score(sentence[i-1], 1)
				if i<len(sentence)-1:
					tmp = self.cal_score(sentence[i+1], tmp)
				negscore += tmp
				s = i + 1
			i += 1

		return posscore,negscore
	# 整片文章分析
	def article_sentiment(self, text:str):
		total_pscore, total_nscore = 0, 0
		for sentiment in self.preteat_clause(text):  # x:str 断句
			c = self.cutwords_jieba(sentiment)  # 分词，去除停用词
			# print(c)
			posscore, negscore = self.sentiment(c)
			total_pscore += posscore
			total_nscore += negscore
		return total_pscore, total_nscore

def predict():
	a = DictAnalysis()

	total_pscore,total_nscore = 0,0


	for tempstr in a.deal_wrap('data/data1.txt'):  # tempstr = 'line'
		sentence_pscore,sentence_nscore = 0,0
		for x in a.preteat_clause(tempstr):  # x:str 断句
			c = a.cutwords_jieba(x,'','')  # 分词，去除停用词
			posscore,negscore = a.sentiment(c)
			sentence_pscore += posscore
			sentence_nscore += negscore

		total_pscore += sentence_pscore
		total_nscore += sentence_nscore
		print(posscore,negscore,posscore-negscore)
		# print('单句：{}得分：\nposscore:{};negscore:{};totalscore:{}\n\n'.format(tempstr,sentence_pscore,sentence_nscore,sentence_pscore-sentence_nscore))


	print('最后总得分：posscore:{};negscore:{};totalscore:{}'.format(total_pscore,total_nscore,total_pscore-total_nscore))

def predict_sentence(sentence):
	a = DictAnalysis()
	l = a.preteat_clause(sentence)
	print(l)
	pos, neg = a.sentiment(sentence)
	print(pos, neg)


if __name__ == '__main__':
    predict_sentence('就把定义精神疾病看作是一种权力。他认为，精神疾病能够定义谁正常、谁不正常；')
	# a = Analysis()
	# pos, neg = a.article_sentiment(open('data/article.txt', 'r', encoding='utf-8').read())
	# print(pos)
	# print(neg)


