import nltk

NPChunker = nltk.RegexpParser(	"""	NN:	{<NN.*>+}
					NN:	{<NNP>+}
					JJ:	{<JJ><,><JJ>}
					NP: 	{<DT|PP\$>?<JJ>*<NN>}
					RR:	{<NP><NN>}
				""", loop = 2)


Aspect_Detector = nltk.RegexpParser(	"""	NN:	{<NN.*>+}
						NN:	{<NNP>+}
						NN:	{<NN.*><,><NN.*>}
						RB:	{<RB>*}
						JJ:	{<RB>?<JJ>+}
						JJ:	{<JJ><,|CC><JJ>}
						DT:	{<DT>+}
						VBD:	{<VBD>+<VBG>}
						CHUN:	{<NN><VBD>?<JJ>}
						CHUN:	{<VBG><IN>}
						CHUN: 	{<DT>?<JJ>*<NN>}
						CHUN:	{<CHUN><VBD>}
						TOT:	{<CHUN><,|CC><CHUN>}
						TOT:	{<TOT><,|CC><CHUN>}
					""", loop = 2)



class systemAgent():
	def __init__(self):
		pass
	def tokenize(review):
		sentences = nltk.sent_tokenize(review)
		results = []
		for sentence in sentences:
			tokens = nltk.word_tokenize(sentence.lower())
			words = nltk.word_tokenize(sentence)
			counter = 0
			tags = nltk.pos_tag(tokens)
			positions = []
			for word in words:
				sent = sentence[counter:]
				tf = sent.index(word) + counter
				tl = len(word)
				tt = tf + tl
				counter += tl
				positions.append((word, tf, tt))
			result = [(word, token, pos, start, end) for ((token,pos),(word,start,end)) in zip(tags, positions)]
			results += result
		return results

	def chunk(tokenized):
		sentence = [(word,pos) for (word,_,pos,_,_) in tokenized]
		result = Aspect_Detector.parse(sentence)
		return result

	def test(r)
		self.chunk(self.tokenize(r)).draw()





			
