'''
Created on Feb 19, 2011

@author: matt
'''

import urllib
import urllib2
import json
from xml.sax.saxutils import unescape
import random
import re

import heavenlyfodder.markov.builder

#    dca_woeid = 12765838

def filter_tweets(tweets):
    ''' Removes things from the tweets contained within 'tweets'
    '''
    
    clean_text = tweets.strip()
    
    # remove all retweets
    clean_text = re.sub(r'\n.*RT .*\n', '\n', clean_text)
#    clean_text = re.sub(r' and ', ' & ', clean_text)
    clean_text = re.sub(r'\x85', '', clean_text)
    
    return clean_text

def fetch_top_trending_topics (woeid=None):
    ''' Returns the query for the top Twitter trending topics for the specified 
        WOEID, unless woeid is not specified, in which case the top global TTs
        are returned.
        
        Note that the query returned is "HTTP escaped" and thus suitable for
        use as the "q=" parameter of a Twitter search request.
    '''
    
    # if the caller didn't specify a woeid, then use 1, which will cause
    # Twitter to return global trends

    if woeid == None:
        woeid = '1'
    
    base_url = 'http://api.twitter.com/1/trends/' + woeid + '.json'
    
    # send the request and get the response
    response = urllib2.urlopen(base_url)
    response_list = json.load(response)
    
    tt_search_queries = []
    for trend in response_list[0]['trends']:
        # this is to deal with trending topics that use non-ASCII UNICODE
        # characters (e.g., fraternity names); if we didn't encode as UTF-8,
        # an exception would be raised.
        trend_name = unicode(trend['name']).encode("utf-8")
        tt_search_queries.append(trend_name)
        print 'trend:', trend_name, 'URL:', trend['url']

    return tt_search_queries


# TODO: return the tweets in a form that the markov builder can use
def grab_popular_tweets (search_term='#fb', page=1):
    '''
    Returns a tuple whose first value is a str containing the returned tweets
    and whose second value is an int specifying the next page of tweets (or
    None if there's no next page)
    '''
    
    base_url = 'http://search.twitter.com/search.json'
    
    replies_per_page = 50
    
    search_opts_dict = {
        'q' : search_term,  # the search term
        'rpp' : replies_per_page,  # replies per page
        'lang' : 'en', # language
        'page' : page
    }
    
    # build the full URL; NOTE that if you have %23 already in it, this will
    # turn it into %2523
    search_opts = urllib.urlencode(search_opts_dict)
    
    # Note: we could specify a dictionary that contains our HTTP headers,
    # but we don't need to do that now
    
    # build the HTTP request
    request = urllib2.Request(base_url, search_opts)
    
    print 'opts are:', search_opts
    
    # send the request and get the response
    response = urllib2.urlopen(request)
    
    # print out the string we got back
#        returned_json = response.read()
    
    response_dict = json.load(response)
    
    # The actual tweet text is stored in:
    #    results > (each result) > text
    
    result = ''

    # TODO: may want to keep track of how many tweets we've read so far and
    # stop once we've gotten "enough"
    
    # iterate through the tweets
    for tweet in response_dict['results']:
        
        # TODO: skip tweets containing " RT "
        
        # the unescape() replaces escaped values (e.g., &apos) with their
        # single-character equivalents
        utf8_tweet = unicode(tweet['text']).encode('utf-8')
        unescaped_tweet = unescape(utf8_tweet, {"&apos;": "'", "&quot;": '"'})
        result += unescaped_tweet + '\n'
#        print 'From', tweet['from_user'], '-', unescaped_tweet
    
    next_page = None
    if response_dict.has_key('next_page'):
        next_page = page + 1
        print '--- Next page is', next_page, '---'
        
    return (result, next_page)

    # TODO: if we haven't read enough tweets yet, grab the tweets from the
    # next page. NOTE: there's a next page iff the JSON contains the key
    # 'next_page'
    
#print unicode(u'xa1').encode("utf-8")

def dump_to_file (tweets, filename):
    out_file = open(filename, 'w')
    out_file.write(tweets)
#    for tweet in titles:
#        out_file.write(title + '\n')
        
    out_file.close()

if __name__ == "__main__":
    print '-- Markov Twitter Bot --'
    
#    print unicode(u'xa1').encode("utf-8")
#    exit(1)

    # TODO: add commandline arguments (a la markov.builder) to specify the
    # search term, etc.
    
    # TODO: verify that we're sending a USER-AGENT when doing the search query
    # (and if not, send one)
    
    # Ideally, we'd be able to stop at the end of a tweet OR at the end of a
    # sentence (i.e., after a period)
    
    # DC: fetch_top_trending_topics('2514815') # '44418' = London
    tt_queries = fetch_top_trending_topics('2514815')
    random_tt_query = random.choice(tt_queries)
    print 'Randomly-chosen TT URL:' + random_tt_query
    tweets = ''
    next_page = 1
    while next_page and next_page < 20:
        (these_tweets, next_page) = grab_popular_tweets(random_tt_query, next_page)
        tweets += these_tweets
        print 'Next page is ', next_page
            
    tweets = filter_tweets(tweets)
    
    dump_to_file(tweets, '/Users/matt/devel/movies/tweets.txt')
    
#    print tweets
    
    # TODO: instantiate a Markov builder, pass it the tweets, get the results,
    # etc.
    
    # TODO:
    # - figure out why "%23tigerblood" doesn't get any results