#!/usr/bin/env python
#
# This file contains functions aimed at extracting sentences from the source content
# of a given web page. 
#
# The main function to call is ExtractSentences() by passing in the string
# of a filename containing the web page content.
#

# Imports
import re
import nltk
from BeautifulSoup import BeautifulSoup

def removeNonAscii(s): return "".join(i for i in s if ord(i)<128)

#
# Function to extract sentences from a given file.
#
# Inputs: 
#  filename: Filename of the file to be parsed
#  count: count of the webpage
# Output: List of sentences from the source file
#
def ExtractSentences(filename, count):
    
    # List of sentences
    sentences = []
    
    # Try to open the input file for reading
    try:
        file = open(filename, 'r')
    except:
        print "ERROR: Could not open file: %s!" % (filename)
        
    # Try to read in the entire file
    try:
        contents = file.read()
    except:
        print "ERROR: Could not read the file contents!"
        
    # Extract the usable contents from the web page source
    usable_page_content = ExtractUsableContent(contents)
    
    # Output the usable page contents to a file
    filename_output_html_body = "./output/" + str(count) + "_output_html_body"
    
    # Try to open an output file for writing
    try:
        file_output_html_body = open(filename_output_html_body, 'w')
    except:
         print "ERROR: Could not open file: %s!" % (filename_output_html_body)
        
    # Try to write the file
    try:
        file_output_html_body.writelines(usable_page_content.group())
    except:
        print "ERROR: Could not write the file: %s!" % (filename_output_html_body)
    finally: 
        file_output_html_body.close()
	
	if usable_page_content is None:
		return sentences
    
    # Strip out all of the HTML contents from the usable content from the page
    html_stripped_contents = StripHTMLTags(usable_page_content)
    
    # Remove all non-ascii chars
    html_stripped_contents = removeNonAscii(html_stripped_contents)
    
    # Remove the references from the HTML (&#160;)
    p = re.compile(r'&#160;')
    html_stripped_contents = p.sub(' ', html_stripped_contents)
    
    # Remove the ampersands from the HTML (&amp;)
    p = re.compile(r'&amp;')
    html_stripped_contents = p.sub('&', html_stripped_contents)
    
    # Remove the spaces from the HTML (&nbsp;)
    p = re.compile(r'&nbsp;')
    html_stripped_contents = p.sub(' ', html_stripped_contents)
    
    # Remove the quotes from the HTML (&quot;)
    p = re.compile(r'&quot;')
    html_stripped_contents = p.sub('"', html_stripped_contents)
    
    # Remove the dashes from the HTML (&mdash;)
    p = re.compile(r'&mdash;')
    html_stripped_contents = p.sub('--', html_stripped_contents)
    
    # Remove the dashes from the HTML (&ndash;)
    p = re.compile(r'&ndash;')
    html_stripped_contents = p.sub('-', html_stripped_contents)
    
    # Remove the dashes from the HTML (&#8211;)
    p = re.compile(r'&#8211;')
    html_stripped_contents = p.sub('-', html_stripped_contents)
    
    # Remove the dashes from the HTML (&#x2013;)
    p = re.compile(r'&#x2013;')
    html_stripped_contents = p.sub('-', html_stripped_contents)
    
    # Remove the dashes from the HTML (&#x2014;)
    p = re.compile(r'&#x2014;')
    html_stripped_contents = p.sub('--', html_stripped_contents)
    
    # Remove the carriage returns from the HTML (&#13;)
    p = re.compile(r'&#13;')
    html_stripped_contents = p.sub('', html_stripped_contents)
    
    # Remove the copyright symbols from the HTML (&copy;)
    p = re.compile(r'&copy;')
    html_stripped_contents = p.sub('', html_stripped_contents)
    
    # Remove the dashes from the HTML (&#8212;)
    p = re.compile(r'&#8212;')
    html_stripped_contents = p.sub('--', html_stripped_contents)
    
    # Remove the soft hyphens from the HTML (&shy;)
    p = re.compile(r'&shy;')
    html_stripped_contents = p.sub('', html_stripped_contents)
    
    # Remove the proper ellipsis from the HTML (&hellip;)
    p = re.compile(r'&hellip;')
    html_stripped_contents = p.sub('...', html_stripped_contents)
    
    # Remove the horizontal ellipsis from the HTML (&#8230;)
    p = re.compile(r'&#8230;')
    html_stripped_contents = p.sub('...', html_stripped_contents)
    
    # Remove the vertical bar from the HTML (&#124;)
    p = re.compile(r'&#124;')
    html_stripped_contents = p.sub('|', html_stripped_contents)
    
    # Remove the apostrophes from the HTML (&#039;)
    p = re.compile(r'&#039;')
    html_stripped_contents = p.sub('\'', html_stripped_contents)
    
    # Remove the apostrophes from the HTML (&#39;)
    p = re.compile(r'&#39;')
    html_stripped_contents = p.sub('\'', html_stripped_contents)
    
    # Remove the right double angle quotes from the HTML (&raquo;)
    p = re.compile(r'&raquo;')
    html_stripped_contents = p.sub('>>', html_stripped_contents)
    
    # Remove the right double angle quotes from the HTML (&#187;)
    p = re.compile(r'&#187;')
    html_stripped_contents = p.sub('>>', html_stripped_contents)
    
    # Remove the left double angle quotes from the HTML (&laquo;)
    p = re.compile(r'&laquo;')
    html_stripped_contents = p.sub('<<', html_stripped_contents)
    
    # Remove the left double angle quotes from the HTML (&#171;)
    p = re.compile(r'&#171;')
    html_stripped_contents = p.sub('<<', html_stripped_contents)
    
    # Remove the right single curly quote from the HTML (&rsquo;)
    p = re.compile(r'&rsquo;')
    html_stripped_contents = p.sub('\'', html_stripped_contents)
    
    # Remove the right single curly quote from the HTML (&#8217;)
    p = re.compile(r'&#8217;')
    html_stripped_contents = p.sub('\'', html_stripped_contents)
    
    # Remove the left double quotation from the HTML (&#8220;)
    p = re.compile(r'&#8220;')
    html_stripped_contents = p.sub('\'\'', html_stripped_contents)
    
    # Remove the right double quotation from the HTML (&#8221;)
    p = re.compile(r'&#8221;')
    html_stripped_contents = p.sub('\'\'', html_stripped_contents)
    
    # Remove the right double quote from the HTML (&rdquo;)
    p = re.compile(r'&rdquo;')
    html_stripped_contents = p.sub('\'\'', html_stripped_contents)
    
    # Remove the @ from the HTML (&rdquo;)
    p = re.compile(r'&#64;')
    html_stripped_contents = p.sub('@', html_stripped_contents)
    
    # Remove the < from the HTML (&lt;)
    p = re.compile(r'&lt;')
    html_stripped_contents = p.sub('<', html_stripped_contents)
    
    # Remove the > from the HTML (&gt;)
    p = re.compile(r'&gt;')
    html_stripped_contents = p.sub('>', html_stripped_contents)
    
    # Remove the middle dot from the HTML (&middot;)
    p = re.compile(r'&middot;')
    html_stripped_contents = p.sub('.', html_stripped_contents)
    
    # Remove the bullets from the HTML (&bull;)
    p = re.compile(r'&bull;')
    html_stripped_contents = p.sub('.', html_stripped_contents)
    
    # Remove the left single quote from the HTML (&lsquo;)
    p = re.compile(r'&lsquo;')
    html_stripped_contents = p.sub('`', html_stripped_contents)
    
    # Remove the left single quote from the HTML (&#8216;)
    p = re.compile(r'&#8216;')
    html_stripped_contents = p.sub('`', html_stripped_contents)
    
    # Remove any other special character
    p = re.compile(r'&#[0-9]{2,4};')
    html_stripped_contents = p.sub('', html_stripped_contents)
    
    # Remove any URLs from the contents
    p = re.compile(r"(http://[^ ]+)")
    html_stripped_contents = p.sub('', html_stripped_contents)
    p = re.compile(r"(https://[^ ]+)")
    html_stripped_contents = p.sub('', html_stripped_contents)
    
    # Attempt to remove extra whitespace chars
    p = re.compile(r'\s+')
    html_stripped_contents = p.sub(' ', html_stripped_contents) 
    
    # remove "!ignored html"
    p = re.compile(r'!ignored html')
    html_stripped_contents = p.sub(' ', html_stripped_contents)

    # remove sentence token
    p = re.compile(r'This is a blank sentence\.')
    html_stripped_contents = p.sub(' ', html_stripped_contents)
    
    # Output the HTML stripped page contents to a file
    filename_output_html_stripped = "./output/" + str(count) + "_output_html_stripped"
    
    # Try to open an output file for writing
    try:
        file_output_html_stripped = open(filename_output_html_stripped, 'w')
    except:
         print "ERROR: Could not open file: %s!" % (filename_output_html_stripped)
        
    # Try to write the file
    try:
        file_output_html_stripped.writelines(html_stripped_contents)
    except:
        print "ERROR: Could not write the file: %s!" % (filename_output_html_stripped)
    finally: 
        file_output_html_stripped.close()
    
    # Tokenize the HTML-free file contents into sentences
    tokenized_sentences = TokenizeContentsIntoSentences(html_stripped_contents)
    
    # Output the extracted sentences to a file
    filename_output_tokenized_sentences = "./output/" + str(count) + "_output_tokenized"

    
    # Try to open an output file for writing
    try:
        file_output_tokenized_sentences = open(filename_output_tokenized_sentences, 'w')
    except:
         print "ERROR: Could not open file: %s!" % (filename_output_tokenized_sentences)
    
    num_words_per_sentence = 28
    num_chars_per_word = 5
    max_sentence_len = num_words_per_sentence * num_chars_per_word

    
    # Loop over the tokenized sentences and add them to the list of sentences
    for sentence in tokenized_sentences:

           	

    	# Check if the sentence is less than 26 words (we assume words are typically 5 characters)
    	# Note: Also need to remove all whitespace chars from the sentence first
        sentence_copy = sentence
        
        p = re.compile(r'\s+')
        sentence_copy_no_whitespace = p.sub('', sentence_copy) 

        # Check if the sentence is less than or equal to 28*5 chars
        if len(sentence_copy_no_whitespace) > (max_sentence_len):
            # remove sentence
            continue

    	
		# Append the sentence if it meets the length requirements
        sentences.append(sentence)
        
        # Try to write the file
        try:
            file_output_tokenized_sentences.write("---- Sentence: ----\n")
            file_output_tokenized_sentences.write(sentence)
            file_output_tokenized_sentences.write("\n")
        except:
            print "ERROR: Could not write the file: %s!" % (filename_output_tokenized_sentences)
            
    # Close file
    if file_output_html_body:
        file_output_html_body.close()
    
    # Close the file
    file.close()

    file_output_tokenized_sentences.close()
    
    
    # Return the list of sentences
    return sentences

#
# Removes events with sentences that contain longer or shorter than normal words
#
def RemoveBadSentences(events):

    max_chars_per_word = 7.2
    min_chars_per_word = 3.2

    # Output for removed sentences
    filename_output_removed_sentences = "./output/bad_sentences.txt"
    file_output_removed_stentences = open(filename_output_removed_sentences, 'w')

    for i in xrange(len(events) - 1, -1, -1):

        words = events[i].sentence.split()

        if len(words) > 30:
            # remove sentence
            file_output_removed_stentences.write("\n\nAvg word len(2): %f\n" % avg_word_len)
            file_output_removed_stentences.write("%s\n" % events[i].sentence)
            del events[i]
            continue
            

        total = 0.0
        for word in words:
            total = total + len(word)

        avg_word_len = total/len(words)

        if avg_word_len < min_chars_per_word or avg_word_len > max_chars_per_word:
            # remove sentence
            file_output_removed_stentences.write("\nAvg word len: %f\n" % avg_word_len)
            file_output_removed_stentences.write("%s\n" % events[i].sentence)
            del events[i]




    file_output_removed_stentences.close()
    return events

#
# Function to extract the usable content from a given web page's source
# (Currently, we just extract the contents of the body).
# 
# Input: String representing the file contents
# Output: String representing the desired file contents
#
def ExtractUsableContent(fileContents):    
    
    # Compile a regular expression to match the <body> contents of the web page
    regex_compiled = re.compile('<body.*>.*</body>', re.I | re.M | re.S)

    # See if the regular expression matches the document's contents
    match = regex_compiled.search(fileContents)
    
    # Return the matched content
    return match


#
# Function to strip the HTML content from the web page source 
# (Removes the HTML tags).
#
# Input: String representing the file contents
# Output: String representing the HTML-free contents
#
def StripHTMLTags(fileContents):
    
    #
    # TODO: Find a complete and final way to do this (needs verification)
    #
    # Create a regular expression to match all HTML tags
    #regex_compiled = re.compile(r'<[^<]*?/?>')
    
    # Substitute all HTML tags for empty strings
    #contents_without_html_tags = regex_compiled.sub('', str(fileContents.group()))
    
    # Return the HTML-free string
    #return contents_without_html_tags
    
    
    # TODO: Use Beautiful Soup to strip HTML tags?
    #return '\n.This is a blank sentence.\n    '.join(BeautifulSoup(fileContents.group()).findAll(text=True))
    #return ''.join(BeautifulSoup(fileContents.group()).findAll(text=True))

    results = []
    soup = BeautifulSoup(fileContents.group())
    # Find all paragraphs
    l1 = soup.findAll('p')

    # For each paragraph, retrieve all text
    for i in range(0, len(l1)):
        soup2 = BeautifulSoup(str(l1[i]))
        results.append(''.join(soup2.findAll(text=True)))
    
  
    # Combine text from paragraphs
    return '\n.This is a blank sentence.\n  '.join(results)    


#
# Function to tokenize the given file contents (string) into sentences.
#
# Input: String representing the file contents
# Output: A list of sentences tokenized from the input
#
def TokenizeContentsIntoSentences(fileContents):
    
    # Tokenize the sentences (use the nltk punkt sentence tokenizer?)
    # TODO: Is there a better tokenizer?
    
    # Load the desired tokenizer
    sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
    
    # Tokenize the input into a list of sentences
    sents = sent_tokenizer.tokenize(fileContents)
    
    # Return the sentences
    return sents
