#!/usr/bin/env python
#
# This file is the main controller of the project. This source file is executed as the main
# entry point of the source. 
#

# Imports
import urllib
import json as m_json
import urllib2
import os
from Presentation import *
from Bing import BingSearch
from sentence_extraction import *
from entity_extraction import *
from DateExtractionRE import DateExtracterRE
from post_processing import *
from termVector import TermVector
from Event import Event
from pos_tagging import getPOSTagsForSentence


# Function to get the query term from the user (via prompt).
#
# Input: n/a
# Output: String representation of the query term
def getUserQueryTerm():
    
    # Prompt the user for the query term
    query_term = raw_input("Enter the query term: ")
    
    # TODO: remove the print
    print "The user entered: ", query_term
    
    # TODO: user input verification would occur here (making sure we got valid input)
    
    return query_term


# Function to get the maximum number of pages to search for results.
#
# Input: n/a
# Output: Integer representation of the max search results
def getMaxSearchResults():
    
    # Prompt the user for the query term
    max_search_results = input("Enter the maximum number of search results (integer): ")
    
    # TODO: remove the print
    print "The user entered: ", max_search_results
    
    # TODO: user input verification would occur here (making sure we got valid input)
    
    return max_search_results
    

# Function to perform the Bing Search on the Query Term
#
# Inputs: 
#     - the maximum number of search results to return
#     - the query term to search for
# 
# Returns a list of lists of results [ [results 0 - 20] [results 20 - 40] ...]
def bingSearch(maxNumResults, query_term):

    results = []
    urls_dir = "./search_output/urls/"
    if not os.path.exists(urls_dir):
        os.makedirs(urls_dir)
    out_dir = "./search_output/" + query_term + "/"
    directory = os.path.dirname(out_dir)

    if not os.path.exists(directory):
        os.makedirs(directory)

    
    # get search results
    for i in range(maxNumResults / 20):
        bing = BingSearch('1534AC2B77288249689946BFEF23F3F831AFA3D4')    
        response = bing.search_web(query_term, 20, i*20)

        results.append(response)

        print "\n"
        print "The query term searched for: %s" % (query_term)
        
        #
        # NOTE: The 'Total' field is the total number of search results
        #
        total_search_results = response['SearchResponse']['Web']['Total']
        print "Total number of search results available: %s" % (total_search_results)
        
        temp = response['SearchResponse']['Web']['Results']
        print "Number of search results considered for this run: %s" % (len(temp))
        print "\n"
        
    # Also, forcibly search for query term + "timeline" to see if web page timelines exist
    # for this entity
    # We just want to grab the first 5 pages of timelines
    if maxNumResults != 0:
        bing = BingSearch('1534AC2B77288249689946BFEF23F3F831AFA3D4')    
        
        # Append "timeline" to the query term
        query_term_timeline = query_term + " timeline"
        
        response = bing.search_web(query_term_timeline, 5, 0)

        results.append(response)

        print "\n"
        print "The query term searched for: %s" % (query_term)
        
        #
        # NOTE: The 'Total' field is the total number of search results
        #
        total_search_results = response['SearchResponse']['Web']['Total']
        print "Total number of search results available: %s" % (total_search_results)
        
        temp = response['SearchResponse']['Web']['Results']
        print "Number of search results considered for this run: %s" % (len(temp))
        print "\n"


    # Initialize count
    count = 0
    
    # Loop over all of the result pages we downloaded
    # download pages 
    for item in results:
    
        webResult = item['SearchResponse']['Web']['Results']
        
        print len(webResult)
    
        for hit in webResult:
            count = count + 1
            print "*****************************************************"
            print "[%d] Title: %s" % (count, hit['Title'])
            print "[%d] URL: %s" % (count, hit['Url'])
            url = hit['Url']
            filename = "./search_output/" + query_term + "/" + str(count) +  "_page_" + hit['Title']
            if not os.path.exists(filename):
                try:
                    networkObj = urllib.urlretrieve(hit['Url'], filename)
                    filename = networkObj[0]
                    f = open(filename,'r')
                    temp = f.read()
                    f.close()

                    f = open(filename, 'w')
                    f.write(url+"\n\n")

                    f.write(temp)
                    f.close()
                    print "Downloaded File: %s" % filename
                except:
                    print "Could not handle filename: %s" % filename
            else:
                print "Already downloaded : %s" % hit['Title']
    

    # return directory with pages
    return out_dir

def hexdump( chars, sep, width ):
    while chars:
        line = chars[:width]
        chars = chars[width:]
        line = line.ljust( width, '\000' )
        print "%s%s%s" % ( sep.join( "%02x" % ord(c) for c in line ),
            sep, quotechars( line ))

def quotechars( chars ):
    return ''.join( ['.', c][c.isalnum()] for c in chars )  

def filter_non_printable(str):
  return ''.join([c for c in str if ord(c) > 31 or ord(c) == 9])

def removeNonAscii(s): return "".join(i for i in s if ord(i)<128)

######################################################################
### Simulated main()
######################################################################

# List of sentences obtained from any source
sentences_list = []

# Get the query from the user
query = getUserQueryTerm()

# Get the max number of search results from the user
max_results = getMaxSearchResults()

# Perform the Bing Search
results = bingSearch(max_results, query)

# Initialize count
count = 0

# Initialize date extractor
de = DateExtracterRE()

# Create output directory if it doesn't exist
out_dir = "./output/"
directory = os.path.dirname(out_dir)
if not os.path.exists(directory):
    os.makedirs(directory)


#
# Loop over the results of the search
# results = "./test/"
for filename in os.listdir(results):
    count = count + 1
    print "%s%s" % (results, filename)
    # Create a file for the entity sentences of each URL
    filename_output_ent_date = "./output/" + str(count) + "_output_ent_date"
    filename_output_ent_no_date = "./output/" + str(count) + "_output_ent_no_date"
    
    # Get the URL for the given page
    file_url = open(results+filename, 'r')
    url = file_url.readline()
    file_url.close()
    
    # Extract the sentences from the URL
    sentences = ExtractSentences(results+filename, count)
    
    # Extract sentences that contain the query term
    entity_sentences = ExtractExactEntitySentences(sentences, query)
    
    # Check if we have entity sentences
    if len(entity_sentences) > 0:
        # Try to open the input file for writing
        file_output_no_date = open(filename_output_ent_no_date, 'w')
        
		# write sentences
        for ent_sent in entity_sentences:
            if not ent_sent is None:
                # Print out the interesting info to the output files
                file_output_no_date.write(" ---- Sentence: ----\n")
                file_output_no_date.write(ent_sent)
                file_output_no_date.write("\n")

        file_output_no_date.close()
    
    # Extract sentences that contain a date
    try:
        entity_date_sentences = de.extractDates(entity_sentences, url)
    except:
        print "ERROR: call to extractDates() failed, ignoring for now!\n"
    
    # Add the sentences from the current URL to the global list of sentences
    sentences_list.append(entity_date_sentences)

    print "Found %d dates in %d sentences" %( len(entity_date_sentences), len(entity_sentences))
    
    # Check if we have entity sentences
    if len(entity_date_sentences) > 0:
        # Try to open the input file for writing
        file_output = open(filename_output_ent_date, 'w')
        
        # Try to write the file
        for ent_date_sent in entity_date_sentences:    
            # Split up the entity_date_sentence into its datetime and string representations
            (datetime, sentence, url_event) = ent_date_sent
            
            # Print out the interesting info to the output files
            file_output.write("---- Datetime = %s ----\n" % str(datetime))
            file_output.write("---- Sentence: ----\n")
            file_output.write(str(sentence))
            file_output.write("\n")

        file_output.close()
    
# After we have processed all of the search results, log each sentence we found in one file

file_sentences = open("output/sentences_total_output.txt", 'w')

# Initialize a count for the total number of sentences we got from all sources
total_sentences = 0

# Initialize a setnence_list that is to hold all of the sentences
global_sentences_list = []

# Loop over the sentences list
for sentence_list in sentences_list:
    for sentence in sentence_list:
        file_sentences.write(" ~~ Sentence that contains the entity: ~~\n")
        file_sentences.write(str(sentence))
        file_sentences.write("\n")
        
        # increment the total number of sentences we go
        total_sentences = total_sentences + 1     
        
        # Add the sentence to the global list
        global_sentences_list.append(sentence)       
        
# Print out message about how many total sentences we go
print "\nINFO: We got a total of %s sentences from %s web pages!" % (total_sentences,count)
 
file_sentences.close()
    
# Now try to sort the global list of sentences
global_sentences_list.sort()

# List of events
events = []

# After we have the sorted list of sentences, output it to a new file

file_sentences_sorted = open("output/sentences_total_output_sorted.txt", 'w')

# Loop over the sentences list
for sorted_sentence in global_sentences_list:
    file_sentences_sorted.write(" ~~ Sentence that contains the entity: ~~\n")
    file_sentences_sorted.write(str(sorted_sentence))
    file_sentences_sorted.write("\n")    
    event = Event(sorted_sentence[0], sorted_sentence[1], sorted_sentence[2])
    events.append(event)
    

file_sentences_sorted.close()


# Use events from here out
del global_sentences_list

#calculateSimularity(global_sentence_list, "

#    
# Now we should do some post-processing on the data we got
#

print "\nBefore removing bad sentences (total events: %d)" % len(events)
before = len(events)
events = RemoveBadSentences(events)
after = len(events)
print "After removing bad sentences, removed %d events (total events: %d)" % ((before - after), len(events))

print "\nBefore duplicate detection (cosine similarity) ..."
detectDuplicatesByYear(events)
before = len(events)
removeDuplicates(events, "./output/duplicates")
after = len(events)
print "After duplicate detection (cosine similarity), removed %d events (total events: %d)" % ((before - after), len(events))

#
# Now, try to remove events that have the same date (up to day) and mention the same verbs
#
print "\nBefore duplicate detection (same day, verbs)... (total events: %d)" % len(events)
before = len(events)
detectDuplicatesSameDayVerb(events)
removeDuplicateVerbs(events, "output/sameDayDuplicatesRemoved.txt")
after = len(events)
print "After duplicate detection (same day, verbs), removed %d events (total events: %d)\n" % ((before - after), len(events))

# Display the output events to the HTML file
display(events, "./displayOutput.html")

# Calculate the importance of each sentence based on BING search results for the sentence
getSearchTotalResultsForSentences(events)

# Keep a list of the total search results for each sentence
total_search_results_list = []
    
# After we have the sorted list of sentences, output it to a new file
file_search_results_count = open("output/sentences_total_search_results.txt", 'w')

# Loop over the results of getting total search result counts
for event in events:
    file_search_results_count.write("For sentence: %s\n" % event.sentence)
    file_search_results_count.write("--> Got search results count: %s\n\n" % event.searchResult)
    
    # Append the search result count for this event to the global list of search resutls counts
    total_search_results_list.append(event.searchResult)    
    
file_search_results_count.close()

print "\n__ Statistics of searches: __"
display_search_statistics(total_search_results_list)
print "__ End of Statistics __\n"

# File for outputting the sorted events based on search result count
file_search_results_sorted = open("output/sentences_total_search_results_sorted.txt", 'w')
output_csv_file = open("output/sentences_total_search_results_sorted_csv", "w")
pos_tagged_output = open("output/sentences_total_pos_tagged.txt", 'w')

# Sort the events based on search results (a semblance of importance of the event)
sorted_search_events = sorted(events, key=lambda event: event.searchResult)
for ev in sorted_search_events:
    file_search_results_sorted.write("Event sorted: count=%s, sentence=%s\n\n" % (ev.searchResult, ev.sentence))
    output_csv_file.write("%s=%s\n" % (ev.searchResult, ev.sentence))
    
    # Get the POS tagged version of the sentence
    pos_sentence = getPOSTagsForSentence(ev.sentence)
    pos_tagged_output.write("Sentence: %s\n" % ev.sentence)
    pos_tagged_output.write("POS tags: \n%s" % pos_sentence)
    pos_tagged_output.write("\n\n\n")
    
file_search_results_sorted.close()
output_csv_file.close()
pos_tagged_output.close()



            
