# tweet.py - Twitter utilities.
# Copyright 2009 Rob Myers <rob@robmyers.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

################################################################################
# Imports
################################################################################

import glob
import logging
import re

import urllib
import feedparser
import twitter

################################################################################
# Tweet
################################################################################

class Tweet:
    def __init__ (self, _who, _what, _where):
        '''Configure the Tweet with basic useful information'''
        self.who = _who
        self.what = _what
        self.where = _where

    # Make unicode safe
    #def __repr__ (self):
    #    return u"@"+ self.who + u"-" + self.where + ":" + self.what

    def is_retweet (self):
        '''Is the Tweet message a retweet?'''
        return text_is_retweet (self.what)

    def shorten_url (self,url):
        '''Call ur1.ca for a short URL'''
        params = urllib.urlencode({'longurl': url})
        page = urllib.urlopen ("http://ur1.ca/", params).read ()
        # FIXME Fragile. Assumes first url on returned page is the one we want
        return re.search ('http://ur1.ca/[^"]+', page, re.M).group (0)

    def to_retweet (self):
        '''Return a string with the Tweet message formatted as a retweet'''
        separator = ' '
        retweet = 'RT @' + self.who + separator + self.what
        if (len (retweet) > 140):
            short_url = self.shorten_url (self.where)
            truncate = 140 - (len (short_url) + len (separator))
            retweet = retweet[0:truncate] + separator + short_url
        return retweet

def twitter_term_search (term, exclude_retweets=False):
    '''Search for a single term'''
    q = "q=" + urllib.quote_plus (term)
    return twitter_search(q, exclude_retweets=exclude_retweets)

def twitter_any_search (terms, exclude_retweets=False, exclude_users=[]):
    '''Search for terms from the list of terms'''
    # Handle API limitation
    if (len (terms) > 10):
        logging.debug ("Ors has a limit of 14, truncating to 10 to be safe")
        terms = terms[:10]
    q = "ors=" + urllib.quote_plus (" ".join (terms))
    return twitter_search(q, exclude_retweets=exclude_retweets, 
                          exclude_users=exclude_users)

                          
def twitter_search (query, exclude_retweets=False, exclude_users=[]):
    '''Get the Tweet objects for the given search from Twitter'''
    logging.debug (u"Twitter search for: " + query)
    parsed_atom = feedparser.parse('http://search.twitter.com/search.atom?' +
                                   query)['entries']
    logging.debug ("Atom for Twitter search results: " + str (parsed_atom))
    tweets = parse_twitter_search_results (parsed_atom,
                                           exclude_retweets=exclude_retweets,
                                           exclude_users=exclude_users)
    logging.debug ("Tweet objects: " + str (tweets))
    return tweets

''' 
Added by Nick Renny
Some twitter geo api search functions - simply extending robs originals with the geo parameter
'''
def geo_twitter_any_search (terms, latlng, exclude_retweets=False, exclude_users=[]):
    '''Search for terms from the list of terms'''
    # Handle API limitation
    if (len (terms) > 10):
        logging.debug ("Ors has a limit of 14, truncating to 10 to be safe")
        terms = terms[:10]
    q = "ors=" + urllib.quote_plus (" ".join (terms))
    q = q + "&geocode=" + str(latlng)
    return twitter_search(q, exclude_retweets=exclude_retweets, 
                          exclude_users=exclude_users)
                          


def strip_html_tags (html):
    '''Strip html tags, hopefully leave smileys unharmed.'''
    return re.sub(r'<[^>]+>', '', html)

def text_is_retweet (text):
    '''Is the text a retweet?'''
    return (text.lstrip()[0:2].lower() == 'rt')

def parse_twitter_search_results (search_results, exclude_retweets=False,
                                  exclude_users=[]):
    '''Parse the list of *usable* feed items into tweet objects'''
    tweets = []
    for result in search_results:
        # FIXME for efficiency break on first space only, not every space
        tweet_user = result.author.split(' ')[0]
        text = strip_html_tags (result.content[0].value)
        url = result.link
        if (not (tweet_user in exclude_users)):
            # If we are not excluding retweets, or if we are but it's not one
            if ((not exclude_retweets) or (not text_is_retweet (text))):
                tweet = Tweet (tweet_user, text, url)
                tweets.append (tweet)
    return tweets
