import urllib2
import re
import BeautifulSoup
import info
import anydbm

def authentication(username, password, url):
    passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
    turl = re.sub("http://", "", url)
    passman.add_password(None, "twitter.com", username, password)

    authhandler = urllib2.HTTPBasicAuthHandler(passman)

    opener = urllib2.build_opener(authhandler)
    urllib2.install_opener(opener)

def get_xml(cache, last_id):
    if last_id != "":
        str = "?since_id=%s" % last_id
    else:
        str = ""
    url = "http://twitter.com/statuses/user_timeline.xml%s" % str

    if cache.has_key(url):
        return cache[url]

    xml = urllib2.urlopen(url).read()

    cache[url] = xml

    return xml

import time
import calendar

def convert_tw_time(tw_time):
    utc = time.strptime(tw_time, "%a %b %d %H:%M:%S +0000 %Y")
    secs = calendar.timegm(utc)
    return time.localtime(secs)

import re

def convert_tw_text(tw_text):
    tw_text = re.sub("(http://[^ \n\r<\)]+)", \
                     "<a href='\\1'>\\1</a>", tw_text)
    tw_text = re.sub("@([a-zA-Z0-9]+)", \
                     "<a href='http://twitter.com/\\1'>\\1</a>", tw_text)
    return tw_text

def make_html(user_id, tw_id, tw_text):
    html = convert_tw_text(tw_text)
    id_html = "<a href='http://twitter.com/%s/status/%s'>#</a>" % \
               (user_id, tw_id)
    html = "%s %s" % (html, id_html)

    return html

import shelve

def tw_crawl():
    #cache = anydbm.open("cache","c")
    data = {}
    authentication(info.tw_user, info.tw_passwd, "twitter.com")
    try:
        last_id = open('last_id').read()
    except:
        last_id = ""
    xml = get_xml({}, last_id)
    soup = BeautifulSoup.BeautifulSoup(xml)
    status_list = soup.findAll("status")
    status_list.reverse()
    for status in status_list:
        tw_time_str = status.find("created_at").contents[0]
        tw_id = status.find("id").contents[0]
        tw_text = status.find("text").contents[0]
        tw_time = convert_tw_time(tw_time_str)
        tw_id = str(tw_id)
        data[tw_id] = (time.mktime(tw_time), tw_text)
    if len(status_list) > 0:
        open('last_id','w').write(tw_id)

    return data

def tw_get_new_html(tw_user, data, title_type):
    # time, text

    data_list = []

    new_last_time = 0
    for id in data:
        t = data[id][0]
        data_list.append((id, data[id][1], data[id][0]))
        # id, text, time
        new_last_time = max (new_last_time, t)

    title = time.strftime(title_type, time.localtime(new_last_time))

    if (data_list == []):
        return None, None

    last_day_time = new_last_time - 24 * 60 * 60 # 24 hour or 1day

    data_list.sort(lambda x,y:int(x[0])-int(y[0]))
    data_list = filter (lambda x:x[2] >= last_day_time, data_list)

    html_list = map(lambda x:"<li>%s</li>"% make_html(tw_user, x[0], x[1]), \
                    data_list)
    html = "<ul>\n%s\n</ul>" % "\n".join(html_list)

    return title, html


import os
import blog

if __name__ == '__main__':
    data = tw_crawl()
    title, html = tw_get_new_html(info.tw_user, data, info.title_type)
    if (title != None):
        blog.blog_post(info.blog_user, info.blog_passwd, \
                       info.blog_id, info.blog_api_url, \
                       title, html, info.publish)
