from nltk.tag import pos_tag
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from Token import Token
from englishwords import EnglishWords
from stopwords import StopWords
from nltk.stem.wordnet import WordNetLemmatizer
class Document:
	# self.url = ""
	# self.user = ""
	# self.stars = 0
	# self.date = ""
	# self.review = ""
	# self.polarity = 0
	# self.confindence = 0
	def __init__(self, id, url, user, stars, date, review, polarity, confidence, dictionary_checker, stop_word_checker, lemmatizer, consider_pos=True):
		self.id = id
		self.url = url
		self.user = user
		self.stars = stars
		self.date = date
		self.review = review
		self.polarity = polarity
		self.confidence = confidence
		self.sentences = []
		self.words = []
		self.dictionary_checker = dictionary_checker
		self.stop_word_checker = stop_word_checker
		self.lemmatizer = lemmatizer
		self.consider_pos = consider_pos
		self.tokenize()
	def toCSVRow(self):
		row = []
		row.append(self.id.encode('utf-8'))
		row.append(self.url.encode('utf-8'))
		row.append(self.user.encode('utf-8'))
		row.append(self.date.encode('utf-8'))
		row.append(self.stars.encode('utf-8'))
		row.append(self.review.encode('utf-8'))
		row.append(self.polarity.encode('utf-8'))
		row.append(self.confidence.encode('utf-8'))
		return row
	def toXML(self):
		ret = []
		ret.append("<doc id=\"%s\">"%self.id)
		ret.append("<id>" + self.id + "</id>")
		ret.append("<url>" + self.url + "</url>")
		ret.append("<user>" + self.user + "</user>")
		ret.append("<date>" + self.date + "</date>")
		ret.append("<stars>" + self.stars + "</stars>")
		ret.append("<review>" + self.review + "</review>")
		ret.append("<polarity>" + self.polarity + "</polarity>")
		ret.append("<confidence>" + self.confidence + "</confidence>")
		ret.append("</doc>")
		return '\n'.join(ret)
	def sent_tag(self):
		self.sentences = sent_tokenize(self.review)
	def tokenize(self):
		self.sent_tag()
		for sentence in self.sentences:
			words = word_tokenize(sentence)
			pos_tagged_words = pos_tag(words)
			for word, tag in pos_tagged_words:
				token = Token(word, tag, self.lemmatizer, self.consider_pos)
				if (self.dictionary_checker.isEnglishWord(token.word)) and (self.stop_word_checker.isStopWord(token.word)==False):
					self.words.append(token)
	def get_tokens(self):
		result = []
		for word in self.words:
			result.append("%s(%s)" %(word.word, word.pos))
		return ' '.join(result)
	def getStars(self):
		return self.stars
		
if __name__ == "__main__":
	dictionary_checker = EnglishWords("dat/new.english.words.list")
	stop_word_checker = StopWords("dat/stopwords")
	# lemmatizer = WordNetLemmatizer()
	# document = Document(1, "http://www.yelp.com/biz/katana-ya-san-francisco?start=1", "Patrick L", "4/6/2013", "2", "The place is pretty small, perfect for parties of 2, good for parties of 3-4, and okay for 5 or 6. My friends and I ordered the Katana-Ya Ramen and were pretty impressed by the taste. Crunchy gyosa, savory miso broth, tender pieces of pork...all made up this oralicious experience. I would have hoped that they had more spicy options or incremental levels of spice....and offering Siracha just won't do. This place also offered sushi rolls, but I would rate those alone on average, wasn't WOWED by them. I would definitely return to try their ultra spicy ramen.", "1", "1", dictionary_checker, stop_word_checker, lemmatizer)
	# handle = open("xxx.txt", "wb")
	# for sentence in document.sentences:
		# handle.write("%s\n" %sentence)
	# for token in document.words:
		# handle.write("%s:%s " %(token.word, token.pos))
		# print "%s:%s " %(token.word, token.pos)
	# handle.close()
	print stop_word_checker.isStopWord("some")