# encoding: utf-8

import jieba as fenci
import re
import Util.typeConverse as converter

# 标注符号
companyEntity = "COMPANYEntity"
# 用户词典路径
userDictPath = "D:\项目\荣大语义分析\数据及描述-草稿\测试数据\用户词典\EntityIdentification.txt"

def splitText(Text):
	chaptetList = re.findall("(第[\u4e00-\u9fa5]{1,2}节)", Text)
	chaptetList = list(set(chaptetList))
	chaptetList.sort(key=lambda item: converter.chinese2digits(item.lstrip("第").rstrip("节")))
	textList = list()
	textList.append(re.findall("^([\s\S]*?)目录", Text)[0])
	for i in range(0,len(chaptetList)):
		if i < len(chaptetList)-1:
			textList.append(re.findall(chaptetList[i] + "([\s\S]*?)" + chaptetList[i+1], Text)[1])
		else:
			temp = re.findall(chaptetList[i] + "([\s\S]*?)$", Text)[0]
			textList.append(re.findall(chaptetList[i] + "([\s\S]*?)$", temp)[0])
	return textList

def splitWord(TextList):
	fenciResult = []
	fenci.load_userdict(userDictPath)
	for i in range(0,len(TextList)):
		fenciResult.append(" ".join(fenci.cut(TextList[i])))
	return fenciResult

def entityJoin(Text):
	startPos = 0
	# 公司实体
	# 文件实体
	print(Text.index("《"))
	print(Text.index("》"))
	print(Text[818:825].replace(" ",""))

def entityIdentify(inFile,outFile):
	file_object = open(inFile,'r',encoding='utf-8')
	try:
		all_the_text = file_object.read()
	finally:
		file_object.close()

	# part1:wenben fenjie
	textList = splitText(all_the_text)
	# print(textList)

	# part2:fen ci
	fenciResult = splitWord(textList)
	print(fenciResult[0])

	# part3:shi ti shi bie
	# entityJoin(fenciResult)