import urllib, urllib2, simplejson
from datetime import datetime
from django.utils.http import urlquote
from time import sleep
import re

htmlCodes = [
    ['&', '&amp;'],
    ['<', '&lt;'],
    ['>', '&gt;'],
    ['"', '&quot;'],
]

htmlCodesReversed = htmlCodes[:]
htmlCodesReversed.reverse()

def htmlDecode(s, codes=htmlCodesReversed):
    """ Returns the ASCII decoded version of the given HTML string. This does
        NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode()."""
    for code in codes:
        s = s.replace(code[1], code[0])
    return s

hash_regex = re.compile(r'#[0-9a-zA-Z+_]*',re.IGNORECASE)
user_regex = re.compile(r'@[0-9a-zA-Z+_]*',re.IGNORECASE)
url_regex = re.compile(
    r'https?://' # http:// or https://
    r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' #domain...
    r'localhost|' #localhost...
    r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
    r'(?::\d+)?' # optional port
    r'(?:[/?]\S+|/?)', re.IGNORECASE)

def search_result(query=['#fimu', 'fimu2010'], url=None):
    """ Return the tweets result of the search in JSON

    >>> results = search_result(['google'])
    >>> for s in results['results']:
    ...     print s['from_user'], ' : ', s['text']
    ...
    """
    
    if url:
        theurl = "http://search.twitter.com/search.json%s" % url
    else:
        theurl = "http://search.twitter.com/search.json?q=%s&rpp=2" % "+OR+".join([urllib.quote(q) for q in query])

    handle = urllib2.Request(theurl)

    try:
        return simplejson.load(urllib2.urlopen(handle))
    except IOError, e:
        # This is reached when allocated API requests to IP are completed.
        print "parsing the search json from search.twitter.com, failed"
    return False

def create_list_from_result(results):
    """ Extract the tweet information from the JSON search results
    
    >>> results = search_result(['google'])
    >>> tweets = create_list_from_result(results)
    >>> for tweet in tweets:
    ...     print tweet['user'], 'at', tweet['date'], 'writes', tweet['text']
    """
    tweets = []
    if results:
        for s in results['results']:
            tweet = s['text']
            for tl in url_regex.finditer(tweet):
                tweet = tweet.replace(tl.group(0),
                                      '<a href="'+tl.group(0)+'">'+
                                      tl.group(0)+'</a>')

            for tt in user_regex.finditer(tweet):
                url_tweet = tt.group(0).replace('@','')
                tweet = tweet.replace(tt.group(0),
                                      '<a href="http://twitter.com/'+
                                      url_tweet+'" title="'+
                                      tt.group(0)+'">'+
                                      tt.group(0)+'</a>')
            
            for th in hash_regex.finditer(tweet):
                url_hash = th.group(0).replace('#','%23')
                if len ( th.group(0) ) > 2:
                    tweet = tweet.replace(th.group(0),
                                          '<a href="http://search.twitter.com/search?q='+
                                          url_hash+'" title="'+
                                          th.group(0)+'">'+
                                          th.group(0)+'</a>');

            tweets.append({'user': s['from_user'], 
                           'avatar': s['profile_image_url'],
                           'source': htmlDecode(s['source']),
                           'date': datetime.strptime(s['created_at'], "%a, %d %b %Y %H:%M:%S +0000"),
                           'text': tweet})
            
    return tweets


if __name__=='__main__':

    url = None

    while True:
        results = search_result(['#twitter'], url)
        url = results['refresh_url']
        messages = create_list_from_result(results)

        for tweet in messages:
            print tweet['user'], 'at', tweet['date'], 'writes :', tweet['text']

        sleep(2)
        print '\n' , '=============== REFRESH============', '\n'
