#!/usr/bin/python
# -*- coding: utf-8 -*-
#
#    pyFeeder - Python feed parser and file downloader
#         http://thelazy.net/~terra/pyfeeder/
#
#    Copyright 2008, Mikael Turøy (terrasque@thelazy.net)
# 
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#

from lib import feedparser
from lib.config import Config
from datetime import datetime
import re, urllib, shutil
import os.path
import getopt, sys

config = "config"
loglevel = "NoSet"
vlevel = "NoSet"
testrun = False

def usage():
    """Print valid command line options."""
    print """pyFeeder is a small program for parsing RSS feeds and download matching files.

Command line parmeters:

 -c <file>	 Config file
 --config=<file> Config file
 -l <0-5>	 Level of entries written to log file.
 -v <0-5>	 Level of entries written to screen.
 -h		 This help
 --test		 Only does a testrun, no downloads will occur
 --firsttime     Only save matches to history, don't download
"""

firsttime = False

try:
    opts, args = getopt.getopt(sys.argv[1:], \
      "c:hv:l:", ["help", "config=", "firsttime", "test"])
except getopt.GetoptError, err:
    print str(err)
    usage()
    sys.exit(2)
for o, a in opts:
    if o in ("-c", '--config'):
        config = a
    elif o in ("--help", "-h"):
        usage()
        sys.exit()
    elif o == "-v":
        vlevel = int(a)
    elif o == "-l":
        loglevel = int(a)
    elif o == "--firsttime":
        firsttime = True
    elif o == "--test":
	testrun = True

# Load config and get values from there.
# C will have the config information.
configfile = file(config)
C = Config(configfile)
configfile.close()
logf = file(C.logfile, 'a')
if vlevel == "NoSet":
    vlevel = C.consolelevel
if loglevel == "NoSet":
    loglevel = C.loglevel

# This is the history file, that stores all titles that have been previously 
# downloaded. This is to stop the system from re-downloading the same files.
history = []
if os.path.isfile(C.history):
    historyfile = file(C.history)
    for line in historyfile:
        history.append(line.strip())
    historyfile.close()

# Precompile the regexes, and add the other config info to the list entry.
regexes = []
for x in C.regexes:
    if x.nomatch == '':
        regexes.append((re.compile(x.match), x))
    else:
        regexes.append((re.compile(x.match), x, re.compile(x.nomatch)))

# This gets all log messages, formats them, and writes it to log file/console, 
# depending on the level of the message and the logging / console level set.
def log(message, level=1):
    """Print log entry to file, screen."""
    now = datetime.strftime(datetime.now(), "%d/%m/%y %H:%M:%S")
    msg = "%s [%s]> %s"% (now, level, message)
    if loglevel >= level:
        logf.write(msg+"\n")
    if vlevel >= level:
        print msg

# This function takes an entry, finds the file to download, and downloads it.
def getEntry(entry, path, filetype, infodict):
    """Download file from RSS entry. Run external program if configured.
    
    Keyword arguments:
    entry    -- The RSS Entry.
    path     -- The folder path to where file shall be downloaded.
    filetype -- Mime type of file to download.
    infodict -- Information for external program. 
    
    """
    url = None
    # Enclosures are used to put more links in one rss entry.
    # For example so that main link can point to a html file.
    # If no enclosures are in the rss, use the default link given.
    if entry.has_key('enclosures'):
        log(" Entry have enclosures.", 4)
        for x in entry.enclosures:
            if x.type == filetype:
                url = x.href
    else:
        url = entry.link
    if url == None:
        # This happens if there is an "enclosures" link list, but none of the files
        # are of the right type. Maybe we should fall back on the main link here..
        log('ERROR! Could not locate URL!', 0)
        return
    log(" Downloading URL %s" % url, 4)
    f, i = urllib.urlretrieve(url)
    
    # Check if the server gives an filename in the Content-Disposition header.
    if i.has_key('Content-Disposition'):
        name = i['Content-Disposition'][23:-1] #FIXME : ugly hack.. 
        # I'm not sure if all webservers will give the name on the exact same spot..
        # Ideally, this should find the 'filename="' part and cut from there. Or use a regex.
    else:
        # If not, get filename from url.  
        name = os.path.basename(urllib.unquote(url))
    shutil.copyfile(f, os.path.join(path, name))
    urllib.urlcleanup() #Clean temporary file
    log(" Saved as %s." % os.path.join(path, name), 2)
    if 'program' in C:
        infodict['filename'] = name
        infodict['fullname'] = os.path.join(path, name)
        log("Running external program, as set in config", 5)
        log(" Program parameters : " + C.program % infodict, 6)
        os.system(C.program % infodict)

def ReadFeed(feed):
    """Read feed, send matching entries to getEntry."""
    # This is the loop that runs through and checks all feeds for matches.
    F = feedparser.parse(feed.url)
    log('Parsing RSS entry "%s" (%s).' % (feed.name, F.feed.title), 3)
    for entry in F.entries:
        if entry.title in history:
            log("Skipping '%s', found in history." % entry.title, 6)
            continue
        log("Checking '%s' for matches" % entry.title, 6)
        for regex in regexes:
            match = regex[0].search(entry.title)
            nomatch = False
            if regex[1].nomatch != "":
                nomatch = regex[2].search(entry.title)
            log(" Result : '%s' - Title : '%s' - Regex name : '%s' - Regex : '%s'" % (bool(match), entry.title, regex[1].name, regex[1].match), 7)
            #"match" is the match regex result, and "nomatch" is the nomatch regex result (or False if the nomatch entry is '')
            if match and not nomatch:
                log('Found match on regex "%s" for title "%s"'%(regex[1].name, entry.title), 1)
		if testrun:
			print "Doing testrun, no download"
			continue
                if match.groupdict().has_key('ni'):
                    if 'ni' in entry.keys():
                        if int(entry.ni) >= int(match.group('ni')):
                            log('Entry "%s" have lower number increase than config for "%s", skipping.' % (entry.title, regex[1].name) , 5)
                            continue
                            # FIXME : Update config with new value.
                if not firsttime:
                    # Build infodict for external program
                    infodict = {
                        'feedname' : feed.name,
                        'regex' : regex[1].name,
                        'entrytitle' : entry.title
                    }
                    getEntry(entry, C.path, C.filetype, infodict)
                else:
                    log(" Running in First Time mode, only logging match.", 2)
                    # Write the title to the history file, and update the internal history list.
                histf = file(C.history, 'a')
                histf.write("%s\n" % entry.title)
                histf.close()
                history.append(entry.title)

# And now, lets get started.
log("Starting program.", 3)
for x in C.feeds:
    try:
        ReadFeed(x)
    except:
        log("An error occured reading feed %s." % x.name, 0)
