import sys
#import traceback
import os
import urllib2
import urllib
from datetime import datetime
import time
from xml.dom.minidom import parseString
import tempfile

ISOFORMAT = '%Y-%m-%dT%H:%M:%S'
CACHEDIR = tempfile.mkdtemp()

class TVKaistaURLopener(urllib.FancyURLopener):
    """
    Custom URLopener for TVKaista url operations.
    Used to override interactive prompt_user_passwd()
    and User-Agent string (defined in version).
    """
    
    def __init__(self, user, passwd, 
                 version = 'tvkaista_client.py'):
        """Set initial username and password."""
        self.version = version
        self.user = user
        self.passwd = passwd
        urllib.FancyURLopener.__init__(self)
    
    def prompt_user_passwd(self, host, realm):
        return (self.user, self.passwd)

def _parse_rss_date(datastr):
    """
    Parse date format which exists in TVKaista's RSS feed.
    Return datetime object.
    """
    # Python 2.4 compliant
    dt = time.strptime(datastr.replace(' +0000', ''), 
                           '%a, %d %b %Y %H:%M:%S')
    dt = datetime.fromtimestamp(time.mktime(dt))
    return dt

def _create_iso_timestamp(dt):
    """Return ISO timestamp string created from a datetime object."""
    return dt.strftime(ISOFORMAT)

def _parse_iso_timestamp(datastr):
    """
    Parse date format which exists in TVKaista's RSS feed.
    Return datetime object.
    """
    # Python 2.4 compatible
    dt = time.strptime(ISOFORMAT)
    dt = datetime.fromtimestamp(time.mktime(dt))
    return dt

def get_rss_dom(url, cachedir = None):
    """
    Fetch rss feed from url and return dom tree.
    NOTE: currently this caches feed for a (hard-coded) while.
    """
    # NOTE: while developing rss feed data is cached
    global CACHEDIR
    cachefile = url.replace('http://', '_cache_').replace('/', '_')
    if cachedir:
        cachefile = os.path.join(cachedir, cachefile)
    else:
        try: # global variable CACHEDIR
            cachefile = os.path.join(CACHEDIR, cachefile)
        except NameError:
            pass
            raise
    print cachefile
    cachefile_timestamp = cachefile + ".timestamp"
    timestamp = 0
    if os.path.isfile(cachefile_timestamp):
        try:
            f = open(cachefile_timestamp, "rt")
            timestamp = float(f.read())
            f.close()
        except:
            pass
    # FIXME: hard-coded cache expire time 600
    if os.path.isfile(cachefile) and (time.time() - timestamp) < 600: 
        print "TVK: Using cache"
        f = open(cachefile, "rt")
        xml = f.read()
        f.close()
        xml = open(cachefile, "rt").read()
    else:
        print "get_rss_dom(%s)" % url
        pagehandle = urllib.urlopen(url)
        xml = pagehandle.read()
        pagehandle.close()
        try:
        # Cache rss data to a file
            f = open(cachefile, "wt")
            f.write(xml)
            f.close()
            # Write current timestamp to a side-car file
            f = open(cachefile_timestamp, "wt")
            f.write(str(time.time()))
            f.close()
        except:
            print "ERROR: failed to write TVKAISTA RSS FEED cache file"
    dom = parseString(xml)
    return dom

def get_rss_items(dom):
    """
    Return all items in 'dom' as a list of dictionaries.
    Only tags defined in 'tags' list are handled.
    """
    item_tags = ['title', 'description', 'link', 'guid', 'source', ]
    timestamps = ['pubDate']
    items = dom.getElementsByTagName("item")
    item_dicts = []
    for item in items:
        data = {}
        for tag in item_tags:
            #if item.getElementsByTagName(tag):
            try: # FIXME: ugly try/except here
                data[tag] = item.getElementsByTagName(tag)[0].firstChild.data
            #else:
            except:
                data[tag] = u''
        for tag in timestamps:
            if item.getElementsByTagName(tag):
                data[tag] = _parse_rss_date(item.getElementsByTagName(tag)[0].firstChild.data)
        media = []
        for c in item.getElementsByTagName("media:content"):
            media_data = {}
            media_data['url'] = c.getAttribute("url")
            media_data['type'] = c.getAttribute("type")
            try:
                media_data['duration'] = int(c.getAttribute("duration"))
                media_data['filesize'] = int(c.getAttribute("fileSize"))
                media_data['bitrate'] = int(c.getAttribute("bitrate"))
            except:
                pass
            media.append(media_data)
        data['media'] = media
        if item.getElementsByTagName("media:thumbnail"):
            data['thumbnail'] = item.getElementsByTagName("media:thumbnail")[0].getAttribute("url")
        item_dicts.append(data)
    return item_dicts

def get_rss_header(dom):
    """
    Return header tags and all items in 'dom' as a list of dictionaries.
    Only tags defined in 'tags' list are handled.
    """
    header_tags = ['title', 'description', 'link', 'guid', 'language', ]
    timestamps = ['pubDate', 'lastBuildDate', ]
    header = {}
    # Get the first real element node inside <rss> element
    for node in dom.documentElement.childNodes:
        if node.nodeType == dom.ELEMENT_NODE:
            root = node 
            break
    # Find predefined nodes and append them to the header
    for node in root.childNodes:
        if node.nodeName in header_tags:
            header[node.nodeName] = node.firstChild.data
        elif node.nodeName in timestamps:
            header[node.nodeName] = _parse_rss_date(node.firstChild.data)
    return header

def get_rss_dict(dom):
    data = get_rss_header(dom)
    data['items'] = get_rss_items(dom)
    return data

def get_data(url):
    dom = get_rss_dom(url)
    return get_rss_dict(dom)

if __name__ == '__main__':
    url = 'http://alpha.tvkaista.fi/feed/'
    pwfile = 'username.txt'
    if os.path.isfile(pwfile):
        user = eval(open(pwfile, 'rt').read())
        username = user['username']
        password = user['password']
    else:
        username = raw_input("TVKaista username: ")
        password = raw_input("TVKaista password: ")
        f = open(pwfile, 'wt')
        f.write(repr({'username': username, 'password': password}))
    urllib._urlopener = TVKaistaURLopener(username, password)
    #import tempfile
    #CACHEDIR = tempfile.mkdtemp()
    print "Using CACHEDIR for RSS-feeds: %s" % (CACHEDIR)
    # Get channel list
    rss = get_data(url)
    print rss
    sys.exit()
