from tweet_cleanser import TweetCleanser
from nltk.tokenize import word_tokenize
import cPickle
class Review:
	label = None
	text = None
	def __init__(self,label,text):
		print label
		self.label = int(label)
		self.text = text
from text_collection import TextCollection
handle = open("hungrygowhere_review.txt", "r")
texts = handle.read().split("\n")
output_handle = open("tfidf/tfidf", "w")
test_handle = open("test","w")
cleanser = TweetCleanser("dat/english.words.list","dat/stopwords")
reviews = []
i = 1
for text in texts:
	tokens = text.split("\t")
	real_label = tokens[0]
	review_text = tokens[1]
	cleansed_text = cleanser.cleanse(review_text)
	print i
	i = i + 1
	review = Review(real_label,cleansed_text)
	reviews.append(review)
	test_handle.write("%s\t%s \n" %(real_label,cleansed_text))
text_collection = TextCollection()
for review in reviews:
	text_collection.add_text(review.text)
output_collection = open("collection/hungrygowhere_collection", "w")
cPickle.dump(text_collection, output_collection)
output_collection.close()
text_collection_texts = text_collection.texts()
for review in reviews:
	tokens = word_tokenize(review.text)
	string_buffer = []
	if review.label >=7:
		real_label = 1
	elif review.label <=4:
		real_label = -1
	else:
		real_label = 0
	for token in tokens:
		string_buffer.append("%d:%f" %(text_collection.vocab_index(token), text_collection.tfidf(token,review.text))) 
	result = ' '.join(string_buffer)
	print "%d %s\n" %(real_label,result)
	if result.strip() != "":
		output_handle.write("%d %s\n" %(real_label,result))
handle.close()
output_handle.close()