#!/usr/bin/env python

"""
    Fetch selected TV .torrents using tvrss.net's RSS feed.
"""
import shelve, xml.dom.minidom, time, os, os.path, urllib,re, sys, BitTorrent.bencode, copy

try:
    from configuration import *
except:
    print "Please copy configuration.py.sample to configuration.py and edit it before running this program."
    sys.exit(0)

# This will hold the status of the programs (the keys start with 'Prog')
# and also cache the XML feeds (as a tuple, together with a timestamp)
infoStore = shelve.open(os.path.expanduser(infoStoreFilename))


# Have we downloaded some of the programmes in the list above? If so, use the stored information instead.
for prog, initial in subs.iteritems():
        subs[prog] = infoStore.get("Prog: %s" % prog, initial)
        if subs[prog] is None:
                subs[prog] = initial

# Check when we last ran. If no record, pretend it was a week ago.
lastCheckedDate = infoStore.get('lastCheckedDate', time.time() - 3600 * 24 * 7)
ageSeconds = time.time() - lastCheckedDate

# If we ran less than an hour ago, don't run.
if ageSeconds < 3600:
    print "last checked %d minutes ago; it's too soon to run again." % (ageSeconds / 60)
    sys.exit(0)

# Use US TZ, to make sure we look at the right feeds.
os.environ["TZ"]="US/Pacific"
time.tzset()

def debug(*args):
    #print " ".join(str(x) for x in args)
    pass

def getText(node):
    """
        Helper method to get the text out of a DOM element
    """
    result = ""
    for c in node.childNodes:
            if c.nodeType == c.TEXT_NODE: result += c.data
    return result

def error(msg):
    """
        Report an error
    """
    # for now, just print it. Maybe email it in the future.
    sys.stderr.write("%s\n" % msg)

def getLength(torrent):
    """
        This works out how big a torrent is.
    """
    metainfo = BitTorrent.bencode.bdecode(torrent)
    info = metainfo['info']
    if info.has_key('length'):
        # let's assume we just have a file
        return info['length']
    else:
        return sum([file['length'] for file in info['files']])


# Check again on the day we last checked (to be super safe) and then every day up to today.
# (if we check in the future, the feed will be blank)

# Until I work out tvrss's semantics, I check an extra day into the past, to be safe. If I try to get today's RSS, it sometimes comes up empty!
# I have extended this further back, to cater for torrents sources which are temporarily down. This way, if a site goes down, we can tolerate
# it as long as it doesn't last more than a couple of days.
# Because we cache the old days, this is a fairly inexpensive change.
dateToCheck = lastCheckedDate - 72 * 3600 
while dateToCheck < time.time():
        # This is the format used in the TVRSS XML/RSS URL to specify the date the RSS is for.
        dateString = time.strftime("%Y-%m-%d",time.localtime(dateToCheck))
        # Perform the substitution using the above date
        RSSURL = RSSURLBASE % dateString
        RSSData = None
        # Fetch the RSS data from the cache if possible - otherwise, use HTTP
        try:
            # Get the data from the cache, if we can
            RSSData, rssStorageTimestamp = infoStore[RSSURL] # we don't care about the timestamp, just the XML data
            print "Using cached data for %s" % dateString
        except KeyError:
            # Otherwise, download it
            try:
                RSSData = urllib.urlopen(RSSURL).read()
                print "Downloaded RSS for %s" % dateString
            except Exception, ex:
                error("Failed to retrieve data from the URL: %s" % RSSURL)
                raise
                

        try:
            dom = xml.dom.minidom.parseString(RSSData)
        except:
            error("The data fetched at the following URL is not valid XML: %s" % RSSURL)
            raise

        items = dom.getElementsByTagName("item")

        # iterate through the items (ie: torrents) in the RSS
        for i in items:
            title = getText(i.getElementsByTagName("title")[0])
            link = getText(i.getElementsByTagName("link")[0])
            description = getText(i.getElementsByTagName("description")[0])
            dDict = dict() # e.g. 'Episode': '2'
            debug("description: %s" % description)
            for d in description.split("; "):
                try:
                    k, v = d.split(": ",1)
                except ValueError, ex:
                    print "Description part is '%s', which is not what I expected" % d
                    raise
                dDict[k] = v
            debug("Found RSS item: %s" % repr(dDict))
            if dDict['Show Title'].startswith('-'):
                    continue # parsing error. thinks it's season/epi when it's just 2008-01-20 or something similar
            enclosure = i.getElementsByTagName("enclosure")[0].attributes
            for prog, previousOne in subs.iteritems():
                debug( "\tComparing %s to %s" % (prog, dDict["Show Name"]))
                if re.match(prog, dDict["Show Name"]):
                    debug( "\t\tYes!")
                    debug(dDict)
                    thisOne = None # either a date or a season/episode tuple
                    revisedOne = None # This is previousOne plus thisOne
                    isNewEpisode = False
                    if "Season" in dDict.keys():
                        thisOne = (int(dDict["Season"]), int(dDict["Episode"]))
                        # previousOne is a (season, episode) tuple, compare it the old way
                        if isinstance(previousOne, tuple):
                            isNewEpisode = ( thisOne > previousOne )
                            revisedOne = set([(thisOne[0], i + 1) for i in range(thisOne[1])])
                        else:
                            # Otherwise, look in the set
                            try:
                                isNewEpisode = not ( thisOne in previousOne )
                            except TypeError, ex:
                                    print "Can't find previous for %s" % str(prog)
                                    previousOne = set()
                            debug( "thisOne: ", str(thisOne))
                            debug( "previousOne: ", str(previousOne))
                            revisedOne = copy.copy(previousOne)
                            revisedOne.add(thisOne)
                        
                    if "Episode Date" in dDict.keys():
                        debug("using date comparison")
                        thisOne = dDict["Episode Date"].replace("-","")
                        if len(thisOne) == 0:
                            thisOne = dateString.replace("-","")
                        isNewEpisode = ( thisOne > previousOne )

                    debug( "\t\tIs new episode: %s" % str(isNewEpisode))
                    if isNewEpisode:
                        debug( "one")
                        #if isinstance(thisOne, tuple) and (thisOne[0] != previousOne[0] or int(thisOne[1]) != 1 + int(previousOne[1])):
                        #    # New season
                        #    print "For %s, previous was %s and this one is %s. I assume this is OK." % (title, str(previousOne), str(thisOne))
                        #print "Yes, this is newer!"
                        url = getText(enclosure["url"])
                        #print "url is: %s" % url
                        torrent = None
                        try:
                            torrent = urllib.urlopen(url).read()
                        except Exception, ex:
                            print "Problem fetching the torrent '%s'. Skipping it." % url
                            continue
                        length = None
                        debug( "two")
                        try:
                            length = getLength(torrent) / 1048576 # convert to Mb
                        except Exception, ex:
                            print "Couldn't interpret the contents of the torrent '%s', so I'm skipping it!" % url
                            print repr(ex)
                        else:
                          # do this only if there wasn't an error with the torrent
                          debug( "three")
                          if length <= maxSizeMb:
                            try:
                                debug( "four")
                                file(os.path.join(destinationFolder, "RSS: %s %s.torrent" % (title.replace("/","-"), str(thisOne))),"w").write(torrent)
                                print "Downloaded torrent: %s" % title
                                subs[prog] = thisOne
                                infoStore["Prog: %s" % prog] = revisedOne
                                print "%s: %s" % (prog, revisedOne)
                                debug( "five")
                            except IOError, ex:
                                print "IO problems with the torrent '%s', so I'm skipping it! (Does the directory %s exist, and is it writable?)" % (url, destinationFolder)
                                print ex
                            except Exception, ex:
                                print "Couldn't understand the torrent '%s', so I'm skipping it!" % url
                                print repr(ex)
                          else:
                                print "Torrent for %s is too big (%dMb)" % (prog, length)
        # Only cache this XML if it's not today's - as today's one may not yet be fully generated
        if dateToCheck + 3600 * 24 < time.time() and RSSURL not in infoStore.keys():
            print "Caching RSS data from %s" % dateString
            infoStore[RSSURL]=(RSSData, dateToCheck)
        dateToCheck += 3600 * 24

infoStore['lastCheckedDate'] = time.time()

# Delete cached data older than X days
cutoffTime = time.time() - 3600 * 24 * maxCacheDays
for k,v in infoStore.iteritems():
    if not k.startswith("http:"): continue
    url, timestamp = v
    if timestamp < cutoffTime:
        print "Purging %s from the cache." % time.strftime("%Y-%m-%d",time.localtime(timestamp))
        del infoStore[k]

