# -*- coding: utf-8 -*-
# coding=gbk

import codecs
import argparse
from HownetHelper import *
import codecs

import jieba
import jieba.posseg as pseg

Corpus={}

# parse the document in the corpus and constructe sentenceInfo object
def ParseDocument(file_Path, docName):
	with open(file_Path + docName) as doc:
		# add document to Corpus
		Corpus[docName] = []
		# read document text
		data = doc.read()
		# handle the UTF-8 encoding issue
		if data[:3] == codecs.BOM_UTF8:
		 	data = doc[3:]
		# split the sentence and loop through one by one
		sentenceNo = 0
		for s in data.decode('utf-8').split('。'.decode('utf-8')):
			# pass the sentence without characters
			if len(s) <= 0:
				continue

			sentenceInfo = {
							'RawSentence':s,
							'PositiveWords':[],
							'NegativeWords':[],
							'EmotionEnergy':0, # emotion energy for the sentence. 
							'EmotionScore':0, #this value will be used as confidence value of the emotion words in this sentence  
							'WordCount':0, #how many words current sentence has
							'DocName':docName,
							'SentenceNo':sentenceNo
						}

			sentenceNo += 1

			# cut the words from the sentence
			words = pseg.cut(s)
			# loop through each word
			for w in words:
				# normalize 词性 
				att = w.flag.upper()
				# pass 标点符号
				if att == 'X':
					continue

				sentenceInfo['WordCount'] += 1
				# unifiy tag
				if att == 'A':
					att = 'ADJ'
				# filter word and only handle N, V and ADJ
				if att not in ['ADJ']: #'N', 'V', 
					continue

				# search neigitvie emotion dictionary
				if w.word in NegativeEmotionDict:
					sentenceInfo['EmotionEnergy'] += -1
					sentenceInfo['NegativeWords'].append(w.word.encode("utf-8"))
				# search positifive emotion dicitionar
				elif w.word in PositiveEmotionDict:
					sentenceInfo['EmotionEnergy'] += 1
					sentenceInfo['PositiveWords'].append(w.word.encode("utf-8"))
				# search in Glossary
				elif w.word in Glossary:
					if att in Glossary[w.word]:
						sememeList = Glossary[w.word][att]
						# make sure the word has at least one 义原
						for sememePath in sememeList:
							if len(sememePath) >= 1:
								if sememePath[0] in SememeEmotionOrientation:
									sentenceInfo['EmotionEnergy'] += SememeEmotionOrientation[sememePath[0]]['Score']
			
			if (len(sentenceInfo['PositiveWords']) == 0) and (len(sentenceInfo['NegativeWords']) == 0):
				sentenceInfo['EmotionScore'] = 0
			else:
				sentenceInfo['EmotionScore'] =  sentenceInfo['EmotionEnergy'] * 1.0 / sentenceInfo['WordCount']


			Corpus[docName].append(sentenceInfo)


def Main():
	BuildSememeTree('./hownet/WHOLE.dat')
	BuildGlossary('./hownet/glossary.dat')
	LoadEmotionDictionary('./hownet/cn_negative_emotion.txt', False)
	LoadEmotionDictionary('./hownet/cn_negative_judgement.txt', False)
	LoadEmotionDictionary('./hownet/cn_positive_emotion.txt')
	LoadEmotionDictionary('./hownet/cn_positive_judgement.txt')
	CalculatorSememeEmotionScore()


	# parse documents in the courps
	for i in range(1, 101):
		docName = str(i) + '.txt'
		ParseDocument("./corpus/", docName)

	SentenceList=[]

	# build a sentence list 
	for doc in Corpus:
		for s in Corpus[doc]:
			SentenceList.append(s)

	# sort by EmotionScore
	sortedResult = sorted(SentenceList, key = lambda s : abs(s['EmotionScore']))[::-1]

	outputCount = 0
	outputList = {}
	i = 0
	with codecs.open("result.txt", "w", "utf-8") as f:
		for s in sortedResult: 
			targetList = []
			if 0 < s['EmotionScore']:
				word_polarity = 'pos'
				targetList = s['PositiveWords']
			else:
				word_polarity = 'neg'
				targetList = s['NegativeWords']

			for w in targetList:
				if w in outputList:
					continue
				else:
					outputCount += 1
					outputList[w] = outputCount  # just put the word in key list. The values is meanless

				i += 1
				f.write(str(i).encode('utf-8'))
				f.write(' ')
				f.write(u'1130332046')
				f.write(' ')
				f.write(w.decode("utf-8"))
				f.write(' ')
				f.write(s['DocName'].decode("utf-8"))
				f.write(' ')
				f.write(word_polarity.decode("utf-8"))
				f.write(' ')
				f.write(s['RawSentence'])
				f.write(' ')
				f.write(str(abs(s['EmotionScore'])*100).decode("utf-8"))
				f.write(u'\n')

				# break the foreach for current sentence
				if outputCount >= 100:
					break
			# break the foreach for current document
			if outputCount >= 100:
					break



def PrintListWithChinese(l):
	for x in l:
		print x.decode("utf-8")


def TestPass():
	with open("./corpus/1.txt") as doc:
		data = doc.read()
		if data[:3] == codecs.BOM_UTF8:
			data = doc[3:]
	
	print data.decode("utf-8")

	print "---------------------"
	
	seg_list = jieba.cut(data,cut_all=True)
	print "Full Mode:", "/ ".join(seg_list) #全模式

	print "---------------------"

	seg_list_2 = jieba.cut_for_search(data)
	print ", ".join(seg_list_2)
	
	print "---------------------"

	words = pseg.cut(data)
	for w in words:
		print w.word, w.flag



if __name__=='__main__':
	parser = argparse.ArgumentParser( description="Test")
	parser.add_argument('-m', action='store', dest='model', help='Running model')
	results = parser.parse_args()

	commands = {
		"test": TestPass,
		"run": Main
	}

	if results.model in commands:
		print 'running in {0} mode!'.format(results.model)
		print "---------------------"
		commands[results.model]()
	else:
		print 'running in free mode!'
		print "---------------------"

		# with codecs.open("pru_uni.txt", "w", "utf-8") as f:
		# 	txt = unicode("campeón\n", "utf-8")
		# 	f.write(txt)
		# 	f.write(u'中文\n')


		# BuildSememeTree('./hownet/WHOLE.dat')
		# BuildGlossary('./hownet/glossary.dat')
		# LoadEmotionDictionary('./hownet/cn_negative_emotion.txt', False)
		# LoadEmotionDictionary('./hownet/cn_negative_judgement.txt', False)
		# LoadEmotionDictionary('./hownet/cn_positive_emotion.txt')
		# LoadEmotionDictionary('./hownet/cn_positive_judgement.txt')
		# CalculatorSememeEmotionScore()


		# # docName = "1.txt"
		# # filePath = "./corpus/"+docName

		# for i in range(1, 101):
		# 	docName = str(i) + '.txt'
		# 	ParseDocument("./corpus/",  docName)

		# for doc in Corpus:
		# 	for s in Corpus[doc]:
		# 		print s['RawSentence']
		# 		print s['EmotionEnergy']
		# 		print s['EmotionScore']
		# 		print s['PositiveWords']
		# 		print s['NegativeWords']


				# if sentenceCount > 1:
				# 	break

					# if w.word in NegativeEmotionDict:
					# 	print w.word + " " + NegativeEmotionDict[w.word]
					# elif w.word in PositiveEmotionDict:
					# 	print w.word + " " + PositiveEmotionDict[w.word]



					# att = w.flag.upper()
					# if (att == 'V') or (att == 'A') or (att == 'N'):
						
						# print w.word, w.flag.upper()
					# print w.word, w.flag.upper()

					# TODO: go search words in emotion dictionary
					# 		find words which are not included in dict
					# 		calculator similarity value for these word
		print 'Done'





