from nltk.tokenize import word_tokenize
from math import log
class TextCollection:
	def __init__(self):
		self.__df_cache = {}
		self.__vocab_index = {}
		self.__texts = []
	def add_text(self, text):
		self.__texts.append(text)
		self.__build__df__(text)
	def add_text_list(self, list):
		for text in list:
			self.add_text(text)
	def texts(self):
		return self.__texts
	def df(self, term):
		if term in self.__df_cache:
			return self.__df_cache[term]
		else:
			return 0
	def tf(self, term, text):
		count = 0
		tokens = word_tokenize(text)
		for token in tokens:
			if term == token:
				count += 1
		return float(count)/len(text)
	def idf(self, term):
		if term in self.__df_cache:
			return log(float(len(self.__texts))/self.__df_cache[term])
		else:			
			return 0
	def tfidf(self, term, text):
		return self.tf(term,text) * self.idf(term)
	def vocab_index(self, term):
		if term in self.__vocab_index:
			return self.__vocab_index[term]
		else:
			return 0
	def __build__df__(self, text):
		tokens = word_tokenize(text)
		dict = {}
		for token in tokens:
			if not token in dict:
				dict[token] = 1
				if token in self.__df_cache:
					self.__df_cache[token] += 1
				else:
					self.__df_cache[token] = 1
					self.__vocab_index[token] = len(self.__vocab_index)+1