#!/usr/bin/env python
#
# This class is used to generate and compare sentence term vectors
#
# requires sudo apt-get install python-numpy
#

# Imports
import re
import porter
from numpy import zeros,dot
from numpy.linalg import norm

class TermVector(object):

	stemmer = None
	key_idx = None
	splitter = None
	

	# input: sentences - the global list of words
	def __init__(self, sentences):
		self.stemmer = porter.PorterStemmer()
		stop_words = ['i','am','the','you'] # replace with real stop words
		all_words = dict()
		self.splitter = re.compile ( "[a-z\-']+", re.I )
		
		# strip all punctuation but - and '
		# convert to lower case
		# store word/occurance in dict
		for sentence in sentences:
			
			# For each word in sentence
			for w in self.splitter.findall(sentence):
				w = w.lower() # or you could pass in lower case words to begin with
  
				if w not in stop_words:
   					ws = self.stemmer.stem(w,0,len(w)-1)
   					all_words.setdefault(ws,0)
   					all_words[ws] += 1	

		# build an index of keys so that we know the word positions for the vector
		self.key_idx = dict() # key-> ( position, count )
		keys = all_words.keys()
		keys.sort()
		#print keys
		for i in range(len(keys)):
			self.key_idx[keys[i]] = (i, all_words[keys[i]])

		del keys
		del all_words
	
	# returns a sparse vector corresonding to the input sentence
    # Example: If the corpus contained [cat, dog, bear] and the input sentence
    # contains cat and bear, [1, 0, 1] will be returned
	def sent_vec(self, sentence):
		v = zeros(len(self.key_idx))
		for word in self.splitter.findall(sentence):
			keydata = self.key_idx.get(self.stemmer.stem(word,0,len(word)-1).lower(), None)
			# keydata (position, global count)
			if keydata: 
				# should probably be += 1
				v[keydata[0]] += 1
		return v

	def cos(self, v1, v2):
		return float(dot(v1,v2) / (norm(v1) * norm(v2)))

	def matrix_sim(self, sentences):
		matrix = zeros([len(sentences), len(sentences)])
		for x in range(0, len(sentences)):
			
			for y in range(0, len(sentences)):
				#print "Comparing %d to %d" %(x,y)
				v1 = self.sent_vec(sentences[x])
				v2 = self.sent_vec(sentences[y])			
				
				matrix[x][y] = self.cos(v1,v2)
		#print matrix
		return matrix
		


############### MAIN ################

if __name__ == '__main__':
	print "Running Test..." 
	doc1 = "I like to eat chicken\nnoodle soup."
	doc2 = "I have read the book \"Chicken noodle soup for the soul\"."
	doc3 = "Another sentence with some words and more words"
	doc4 = "Duplicate number one with some more words and this with that"
	doc5 = "Duplicate number one with some more words and this with that"
	doc6 = "[202] On November 5, 2009, Jobs was named the CEO of the decade by Fortune magazine."
	doc7 = "[202] On November 5, 2009, Jobs was named the CEO of the decade by Fortune magazine."

	print "\nUsing Doc1: %s" % doc1
	print "\nUsing Doc2: %s" % doc2
	print "\nUsing Doc3: %s" % doc3

	tv = TermVector((doc1, doc2, doc3, doc4, doc5, doc6, doc7))
	v1 = tv.sent_vec(doc1)
	v2 = tv.sent_vec(doc2)
	v3 = tv.sent_vec(doc3)
	v4 = tv.sent_vec(doc4)
	v5 = tv.sent_vec(doc5)
	v6 = tv.sent_vec(doc6) 
	v7 = tv.sent_vec(doc7)
	
	
	print "Similarity doc1 vs doc2 %s" % tv.cos(v1,v2)
	print "Similarity doc1 vs doc3 %s" % tv.cos(v1,v3)
	print "Similarity doc4 vs doc5 %s" % tv.cos(v4,v5)
	print "Similarity doc6 vs doc7 %s" % tv.cos(v6,v7)

	tv.matrix_sim((doc1, doc2, doc3, doc4, doc5))


	
