"""
RottenTomatoesReviewParser.py - Scrapes user review data from
rottentomatoes.com.

Author: Nick Garvey
"""

from bs4 import BeautifulSoup
from xml.dom import minidom
import codecs
import datetime
import hashlib
import multiprocessing
import os
import re
import time
import urllib2

# Example MOVIE_URLs:
MOVIE_URLS = [
    r'http://www.rottentomatoes.com/m/mirror_mirror_2012/',
    r'http://www.rottentomatoes.com/m/the_raven_2012/',
    r'http://www.rottentomatoes.com/m/journey_to_the_center_of_the_earth_2_3d/',
    r'http://www.rottentomatoes.com/m/lockout_2012/',
    r'http://www.rottentomatoes.com/m/a-thousand-words/',
    r'http://www.rottentomatoes.com/m/tim_and_erics_billion_dollar_movie/',
    r'http://www.rottentomatoes.com/m/jaws_2/',
    r'http://www.rottentomatoes.com/m/we_have_a_pope/',
    r'http://www.rottentomatoes.com/m/mothers-day/',
    r'http://www.rottentomatoes.com/m/the_iron_lady/'
]

# Default is the folder RottenTomatoesScrape in the data directory
OUTPUT_PATH = os.path.join('data', "RottenTomatoesScrape")

# Setting a number higher than the number of pages is fine, the page will be
# fetched, but no reviews will be attempted to be parsed if none exist
PAGES_TO_PARSE = 10

def parse_and_write_review(review, output_dir):
    """Parse a user review and write it to an xml file.

    Parameters:
        review     - The review to parse.
        output_dir - The directory to output the xml file to.
    """
    # Create the XML document
    review_doc = minidom.Document()
    root_node = review_doc.createElement("review")
    
    # Begin parsing
    review_box = review.find_parent("div",
        {"class" : "media_block bottom_divider"})
    user_info = review_box.find("div", {"class" : "fl"})
        
    # User ID
    user_link = user_info.find("a")
    if user_link:
        user_id = user_link['href'].split('/')[3]
    else:
        user_id = "none_" + hashlib.md5(user_info.text).hexdigest()
    id_node = review_doc.createElement("user_id")
    id_node.appendChild(review_doc.createTextNode(user_id))
    root_node.appendChild(id_node)
    
    # User Name
    # The name should be the first non-empty text node
    user_name = user_info.find(text=lambda s: s.strip()).strip()
    name_node = review_doc.createElement("user_name")
    name_node.appendChild(review_doc.createTextNode(user_name))
    root_node.appendChild(name_node)
    
    # Rating
    rating = float(review_box.find("span",
        {"class" : "stars"})['class'][2][-2:]) / 10
    rating_node = review_doc.createElement("rating")
    rating_node.appendChild(review_doc.createTextNode(str(rating)))
    root_node.appendChild(rating_node)
    
    # Review Text
    review_text = re.sub(r"\s+", " ", review.text.strip())
    review_text_node = review_doc.createElement("review_text")
    review_text_node.appendChild(review_doc.createTextNode(review_text))
    root_node.appendChild(review_text_node)
    
    # Date
    review_date_str = review_box.find("span", {"class" : "fr"}).text.strip()
    review_date = str(datetime.datetime.strptime(
        review_date_str, "%B %d, %Y").date())
    date_node = review_doc.createElement("date")
    date_node.appendChild(review_doc.createTextNode(review_date))
    root_node.appendChild(date_node)
    
    # Super Reviewer
    super_reviewer = str(user_info.find("span",
        {"class" : "superreviewer"}) != None)
    super_reviewer_node = review_doc.createElement("super_reviewer")
    super_reviewer_node.appendChild(review_doc.createTextNode(super_reviewer))
    root_node.appendChild(super_reviewer_node)
    
    review_doc.appendChild(root_node)

    with codecs.open(os.path.join(output_dir, user_id + ".xml"),
        'w', 'utf-8') as xml_out:
            review_doc.writexml(xml_out, addindent="  ", newl="\n",
                                encoding='utf-8')
        
        
def get_and_write_reviews_from_url(url, output_dir, lock):
    """Get all of the user reviews from a url and output them as xml files.

    Parameters:
        url        - The url of the movie to get the user reviews for.
        output_dir - The directory to output the xml files to.
        lock       - A lock object needed for multithreading.
    """
    req = urllib2.Request(url, headers={"User-Agent" : ''})

    with lock:
        print "Fetching: " + req.get_full_url()
    
    response = urllib2.urlopen(req)

    soup = BeautifulSoup(response.read().replace("<br/>", ' '))
    
    reviews = soup.find_all("p", {"class" : "user_review"})
    
    # Filter out reviews that are marked as "want to see", "not interested", or
    # 0 stars, as 0 stars almost always means the user didn't submit a rating,
    # not a 0/5 review.
    reviews = filter(lambda rev: not rev.parent.parent.find_all("span",
        {"class" : "interest"}), reviews)
    
    with lock:
        print "Writing reviews from " + req.get_full_url()

    for review in reviews:
        parse_and_write_review(review, output_dir)

    with lock:
        print "Finished writing reviews from " + req.get_full_url()


if __name__ == '__main__':
    start_time = time.time()
    
    try:
        if not MOVIE_URLS and not OUTPUT_PATH:
            print "Error: Set the global variables before running this program"
            exit(1)
    
        # Needed to fix a bug that shows up in strptime when using threads
        # See: http://bugs.python.org/issue7980
        datetime.datetime.strptime('', '')
        
        for movie_url in MOVIE_URLS:
            base_url = movie_url + "reviews/?type=user&page="
            
            movie_name = re.match(r"http://[^/]*/m/([^/]*).*", base_url) \
                .group(1)
            
            movie_out_dir = os.path.join(OUTPUT_PATH, movie_name)
            
            print "Writing to " + movie_out_dir
            print
        
            if not os.path.exists(movie_out_dir):
                os.makedirs(movie_out_dir)
            elif not os.path.isdir(movie_out_dir):
                raise Exception(movie_out_dir + " is the name of a file, the \
                    output directory cannot be created.")
            
            threads = []
            lock = multiprocessing.Lock()
            for page_num in range(1, PAGES_TO_PARSE + 1):
                thread = multiprocessing.Process(
                    target=get_and_write_reviews_from_url,
                    args=(base_url + str(page_num), movie_out_dir, lock))
                threads.append(thread)
                thread.start()
        
            for thread in threads:
                thread.join()
        
            print    
            print "Finished writing to " + movie_out_dir

    finally:
        print
        print "Execution time: %.2f seconds" % (time.time() - start_time)