#!/usr/bin/env python
#
# This file contains functions aimed at performing the post processing steps for our project.
# Ideally, this would be where we do any processing that is an improvement to our baseline
# approach by improving the precision of our results. 
#
# Here, we will try to eliminate duplicate results and eliminate unimportant results.
#

# Imports
from Bing import BingSearch
import math
from termVector import TermVector
from DateExtractionRE import DateExtracterRE
from pos_tagging import *
import re


dateExtracter = DateExtracterRE()


# Create the bing search object
bing = BingSearch('1534AC2B77288249689946BFEF23F3F831AFA3D4')

# Function to perform the Bing Search on the Query Term
#
# Inputs: 
#     - the query term to search for
# 
# Returns:
#    - the number of total search results for the given query_term
def bingSearchTotalResults(query_term):
    
    # Check for a logical OR character
    if "|" in query_term:
        #print "sent with boolean operator is: %s" % query_term
        # Remove any boolean OR operators from the query term if they exist
        p = re.compile(r'\|')
        query_term = p.sub('', query_term)
        #print "sent without boolean operator is: %s" % query_term
        
    # Check for a logical AND operator
    if "&" in query_term:
        #print "sent with boolean operator is: %s" % query_term
        # Remove any boolean AND operators from the query term if they exist
        p = re.compile(r'\&')
        query_term = p.sub('', query_term)
       #print "sent without boolean operator is: %s" % query_term
            
#    # Check for a logical NOT operator
#    if "-" in query_term:
#        print "sent with boolean operator is: %s" % query_term
#        # Remove any boolean NOT operators from the query term if they exist
#        p = re.compile(r'\-')
#        query_term = p.sub('', query_term)
#        print "sent without boolean operator is: %s" % query_term

    # Check for a quote character
    if "\"" in query_term:
        #print "sent with boolean operator is: %s" % query_term
        # Remove any quotation characters from the query term if they exist
        p = re.compile(r'\"')
        query_term = p.sub('', query_term)
        #print "sent without boolean operator is: %s" % query_term
    
    # Quoted query_term
    quoted_query_term = "\"" + query_term + "\""
    
    # Search for the given query term    
    response = bing.search_web(query_term, 1, 1)
    
    # Make sure we even got a search response for the sentence
    if "Web" in response['SearchResponse']:
        # NOTE: The 'Total' field is the total number of search results
        total_search_results = response['SearchResponse']['Web']['Total']
    else:
        print "We did not get a search response for the current sentence!"
        
        # Otherwise, we didn't even get a search result, so that means 0 results
        total_search_results = 0
    
    return total_search_results


# Function to take in a list of sentences and get the total number of 
# search results from Bing for each sentence
#
# Inputs:
#    - the list of sentences
#
# Outputs: 
#    - the list of search results for each sentence
def getSearchTotalResultsForSentences(events):
   
    # Loop over each event
    for event in events:
        
        # Get the total number of search results for the sentence
        # event.searchResult = bingSearchTotalResults(event.sentence)
        event.searchResult = 0


# Function to calculate average of a set of values
#
# Inputs:
#    - a list of values
#
# Outputs:
#    - the average value
def getAverageValue(list_values):
    
    sum = 0
    if len(list_values) == 0:
        return 0
    
    # Loop over the list of values
    for value in list_values:
        
        # Sum up the values
        sum = sum + value
        
    return (sum / len(list_values))

# Function to calculate the standard deviation of a set of values
#
# Inputs:
#    - a list of values
#    - the avg of the list of values
#
# Outputs:
#    - the standard deviation
def getStandardDeviation(list_values, avg_value):
    
    sum_differences = 0
    deviation = 0
    if len(list_values) == 0:
        return 0
    
    for value in list_values:
        
        deviation = math.pow((value - avg_value),2)
        
        sum_differences = sum_differences + deviation
        
    return math.sqrt(sum_differences/len(list_values))

# Function to find minimum value
def getMinimumValue(list_values):
    
    min = 999999999
    
    for value in list_values:
        if value < min:
            min = value
            
    return min

# Fucntion to find maximum value
def getMaximumValue(list_values):
    
    max = 0
    
    for value in list_values:
        if value > max:
            max = value
    
    return max

def detectDuplicates(events):
	
	# Get list of sentences	
	sentences = []
	for event in events:
		str = dateExtracter.removeDate(event.sentence)
		sentences.append(str)

	tv = TermVector(sentences)
	matrix = tv.matrix_sim(sentences)

	for x in range(len(matrix)):
		row = matrix[x]
		for y in range(x,len(row)):
			similarity = matrix[x][y]
			# skip x=y
			if x == y:
				continue			

			if similarity > 0.5:
				#print similarity
				#print events[x].sentence
				#print events[y].sentence
				#print "\n"
				# cosine sim > 0.5
				# similarity is associative sim(x,y) = sim(y,x)
				events[x].duplicates.append((events[y], similarity))
				events[y].duplicates.append((events[x], similarity))
				
def detectDuplicateVerbs(events):
	
    # Get list of sentences	
    verbs = []
    for event in events:
        v = getVerbsFromSentence(event.sentence)
        verbs.append(v)

    for x in range(len(verbs)):
        for y in range(x,len(verbs)):
            # skip x=y
            if x == y:
                continue

            similarity = calculateVerbSimilarity(verbs[x], verbs[y])
			

            if similarity >= 0.5:
                events[x].duplicateVerbs.append((events[y], similarity))
                events[y].duplicateVerbs.append((events[x], similarity))


##
# Removes duplicate events based on verbs from eventlist, logs output to fileName
#
def removeDuplicateVerbs(eventList, fileName):
    outputFile = open(fileName, 'w')
    count = 0
    for event in eventList:
        if len(event.duplicateVerbs) == 0:
            # no duplicates, continue
            continue

        outputFile.write("\n\nFor event: %s (%s)" % (event.sentence, getVerbsFromSentence(event.sentence)))
        for similarEvent in event.duplicateVerbs:			
            outputFile.write("\n\t%f:%s (%s)" % (similarEvent[1], similarEvent[0].sentence, getVerbsFromSentence(similarEvent[0].sentence)))

            # The event may have already been removed
            if similarEvent[0].bRemoved == False:
                outputFile.write("\n\tRemoving event")
                eventList.remove(similarEvent[0])
                similarEvent[0].bRemoved = True

    outputFile.close()

##
# Removes duplicate events from eventlist, logs output to fileName
#
def removeDuplicates(eventList, fileName):
	
    outputFile = open(fileName, 'w')
    count = 0
    for event in eventList:
        if len(event.duplicates) == 0:
			# no duplicates, continue
            continue

        outputFile.write("\n\nFor event: %s" % (event.sentence))
        for similarEvent in event.duplicates:			
            outputFile.write("\n\t%f:%s" % (similarEvent[1], similarEvent[0].sentence))
            outputFile.write("\n\t" + str(event.bRemoved))

			# The event may have already been removed
            if similarEvent[0].bRemoved == False:
                outputFile.write("\n\tRemoving event: %s" % (similarEvent[0].sentence))
                # The event may have already been removed
                eventList.remove(similarEvent[0])
                similarEvent[0].bRemoved = True

    outputFile.close()
			

# function to detect duplicate sentences within years
def detectDuplicatesByYear(events):

	# only compare sentences within the same year
    if len(events) == 0:
        return

    yearEvents = []
    year = events[0].date.year
    for event in events:
        if event.date.year != year:
            year = event.date.year
            # process list
            detectDuplicates(yearEvents)
            # start new year events
            yearEvents = []
            yearEvents.append(event)
        else:
            yearEvents.append(event)

    detectDuplicates(yearEvents)

def detectDuplicatesSameDayVerb(events):

    # only compare sentences within the same day
    if len(events) == 0:
        return

    dayEvents = []
    date = events[0].date
    for event in events:
        if event.date != date:
            date = event.date
            detectDuplicateVerbs(dayEvents)
            dayEvents = []
            dayEvents.append(event)
        else:
            dayEvents.append(event)

    detectDuplicateVerbs(dayEvents)
        

# Function to remove duplicate events on the same day that have similar verbs
def removeDuplicatesSameDayVerbOld(events):
    
    i = 0
    
    # Output file for logging
    outputFile = open("output/sameDayDuplicatesRemoved.txt", 'w')
    
    # Make a copy of the events
    eventsCopy = events
    
    # Loop over the copy of the events
    while i < (len(eventsCopy)-1):
        firstEvent = eventsCopy[i]
        nextEvent = eventsCopy[i+1]
        
        # Grab the date for each event
        firstDate = firstEvent.date
        secondDate = nextEvent.date
        
        if firstDate == secondDate:
            outputFile.write("The dates are the same: %s - %s\n" % (firstDate, secondDate))
            
            # Get the verbs from each sentence
            firstEventVerbs = getVerbsFromSentence(firstEvent.sentence)
            secondEventVerbs = getVerbsFromSentence(nextEvent.sentence)
            
            outputFile.write("\tThe first event had verbs: %s\n" % firstEventVerbs)
            outputFile.write("\tThe second event had verbs: %s\n" % secondEventVerbs)
            
            # Check the similarity between the verbs
            verbSimilarity = calculateVerbSimilarity(firstEventVerbs, secondEventVerbs)
            
            # If the verb similarity is > 0.5, then we want to remove it
            if verbSimilarity > 0.5:
                outputFile.write("INFO: Verb similarity was > 0.5, we want to remove the sentence!\n")
                outputFile.write("REMOVED sentence: %s\n" % firstEvent.sentence)
                outputFile.write("\tMATCHED sentence: %s\n\n\n" % nextEvent.sentence)
                events.remove(firstEvent)
        
        i = i + 1
        
    outputFile.close() 
    
def calculateVerbSimilarity(verbs1, verbs2):
    
    num_verbs1 = len(verbs1)
    num_verbs2 = len(verbs2)
    
    # Keeps count of same verbs between the two sentences
    same_verbs = 0.0
    
    # Keeps the percentage of verbs that are similar
    verb_similarity = 0.0
    
    # See which sentence has more verbs
    if num_verbs1 > num_verbs2:
    
        # Loop over the first verbs
        for verb in verbs1:
        
            # Check if that verb is in the second list of verbs
            if verb in verbs2:
                #print "\t\tFound same verb in sentences: %s" % verb
            
                same_verbs = same_verbs + 1
        
        if num_verbs1 > 0:
            verb_similarity = same_verbs/num_verbs1
        else:
            verb_similarity = 0
        
    else:
        
        # Loop over the second verbs
        for verb in verbs2:
            
            # Check if that verb is in the first list of verbs
            if verb in verbs1:
                #print "\t\tFound same verb in sentences: %s" % verb
                
                same_verbs = same_verbs + 1
        
        if num_verbs2 > 0:
            verb_similarity = same_verbs/num_verbs2
        else:
            verb_similarity = 0
        
    #print "--> Verb similarity is: %f" % verb_similarity
    return verb_similarity
    

# Function to get median value
def getMedianValue(list_values):
    
    median = 0
    if len(list_values) == 0:
        return 0
    
    # sort the values
    list_values.sort()
    
    # If even number of values
    if len(list_values) % 2 == 0:
        # Grab the the two center values
        upper_index = int(math.ceil((len(list_values)+1)/2))
        median_upper = list_values[upper_index]
        lower_index = upper_index - 1
        median_lower = list_values[lower_index]
        
        # Calculate the average of the two center values
        median = (median_lower + median_upper)/2

    else:
        # Grab the middle value
        index = int(math.ceil(len(list_values)/2))
        median = list_values[index]

    return median

def display_search_statistics(total_search_list):
	
	# Calculate the average total_query_count
	avg_total_query_count = getAverageValue(total_search_list)
	print "avg_total_query_count = %s" % avg_total_query_count

	# Calculate the standard deviation of the total_query_counts
	std_deviation = getStandardDeviation(total_search_list, avg_total_query_count)
	print "std_deviation = %s" % std_deviation

	# Calculate the minimum value
	min_value = getMinimumValue(total_search_list)
	print "min_value = %s" % min_value

	# Calculate the maximum value
	max_value = getMaximumValue(total_search_list)
	print "max_value = %s" % max_value

	# Calculate the median value
	median_value = getMedianValue(total_search_list)
	print "median_value = %s" % median_value

# Function to calculate the term frequency for each word in a given sentence
#
# Inputs:
#    - the sentence to find term frequencies within
#
# Returns:
#    - an array of tuples: (word, count) for each word in the input sentence 
#    (i.e. ["the"] = 10, ["store"] = 2, etc.)
#
def calcTermFrequencyForSentence(sentence):
    
    # Loop over each word in the sentence and count each word in it
    
    return None
    

# Function to calculate the term frequency for each sentence in a given web page
#
# Inputs:
#    - the list of sentences in the web page
#
# Outputs:
#    - an array of tuples: (word, count) for each word found in the web page
#    (i.e. ["the"] = 10, ["store"] = 2, etc.)
def calcTermFrequencyForWebPage(sentences):
    
    # Loop over each sentence in the web page and calculate the term frequencies
    # in that sentence (i.e. call calcTermFrequencyForSentence)
    
    return None
