#!/usr/bin/python
from __future__ import with_statement
import os
from datetime import date, timedelta, datetime
import time # convert unix time
import calendar # convert unix time
import feedparser # rss parser - http://www.feedparser.org/
import PyRSS2Gen # rss generator - http://www.dalkescientific.com/Python/PyRSS2Gen.html
import ConfigParser, re #parsing user settings
import sqlite3
import logging

logging.basicConfig(level=logging.DEBUG, filename='tvnzbfilter.log', filemode='a')

# precompile highdef regex for performance
pattern_hd = re.compile( r'(?:720p|x264)', re.IGNORECASE )
# unique nzb id number
pattern_id = re.compile( r'.*nzb/(\d+)$')

database_version = "02"
database_name = "tv.db"

class TvFilter:
	"""parse the stored file and the current rss to return an aggregate rss feed"""
	def __init__(self):
		"""load configuration file and initialize database"""
		#load configuration
		config = ConfigParser.ConfigParser()
		config.read("config.txt")
		try:
			# length of time in days to store old rss items (needs to be negative)
			STORAGE_LENGTH = -1 * config.getint("settings", "storage_length")
			#difference between tvnzb server time and local server time
			TIMEZONE_DIFFERENCE = config.getint("settings", "timezone_difference")
			# user selected shows to display
			INTERESTING_SHOWS_SD = config.get("shows", "Standard-Def")
			INTERESTING_SHOWS_HD = config.get("shows", "High-Def")
		except ( ConfigParser.NoSectionError, ConfigParser.NoOptionError ):
			ex = Exception('Error loading config file: missing section or option')
			logging.error( ex )
			raise ex
		except ConfigParser.ParsingError:
			ex = Exception('Error parsing configuration file')
			logging.error( ex )
			raise ex
		
		self.storage_length = STORAGE_LENGTH
		#	Timezone correction
		self.timezone_delta = timedelta(hours = TIMEZONE_DIFFERENCE)
		self.interesting_shows_sd = [ int(x) for x in re.split(r',\s*', INTERESTING_SHOWS_SD) if x != '']		
		self.interesting_shows_hd = [ int(x) for x in re.split(r',\s*', INTERESTING_SHOWS_HD) if x != '']
		
		# check database and prepare tables
		self.__check_database()
		#	store database and cursor as an attribute for later use
		self.db_connection = sqlite3.connect( database_name )
		# return rows as dictionaries, not tuples
		def dict_factory(cursor, row):
		    d = {}
		    for idx,col in enumerate(cursor.description):
		        d[col[0]] = row[idx]
		    return d
		self.db_connection.row_factory = dict_factory	
		self.db_cursor = self.db_connection.cursor()

	def __check_database(self):
		#initialize db
		db_conn = sqlite3.connect( database_name )
		db_cursor = db_conn.cursor()
		
		# check if database is valid and current
		try:
			r = db_cursor.execute('select db_version from revision where rowid=1')
			version = r.fetchone()
			if version == None or version[0] != database_version:
				raise Exception
		except (sqlite3.OperationalError, Exception), exc:
			logging.info( "Database is an incompatible version or missing, rebuilding..." )
			
			# remake db
			os.remove( database_name )
			db_conn = sqlite3.connect( database_name )
			db_cursor = db_conn.cursor()
			
			# prep table
			db_cursor.execute('create table revision (db_version char(10))')
			db_cursor.execute('''create table episodes
					(nzbid integer primary key,
					showid integer not null, 
					title text not null, 
					link text not null, 
					season int, 
					episode int, 
					description text, 
					length integer, 
					pubdate text)''')
			db_cursor.execute('''create table shows
					(showid integer primary key, 
					title text not null, 
					quality text not null)''')
			# add current database version
			db_cursor.execute('insert into revision values (?);', (database_version,))
			db_conn.commit()
			logging.info( "Database built" )
		
		db_cursor.close()

	def get_rss(self):
		"""return rss document of interesting shows"""
		rss = None
		try:
			rss = self.__parse()
		except Exception, ex:
			logging.error( ex )
		finally:
			if rss is None:
				rss = "Error encountered. Please check log file for more information."
		return rss
		
	def update(self):
		"""update rss information and return sucess or failure for use in cronjobs"""
		try:
			self.__parse()
			return True
		except Exception, ex:
			logging.error( ex )
			return False
			
	def __parse(self):
		"""parse the stored file and the current rss to return an aggregate rss feed"""
			
		def is_interesting(ep):
			"""check if we are interested in show by id and quality"""
			if not int(ep['show_id']) in self.interesting_shows_hd + self.interesting_shows_hd:
				return False
		
			#	compare quality to user setting	
			if bool( pattern_hd.search(ep.title) ):
				return int(ep['show_id']) in self.interesting_shows_hd
			else:
				return int(ep['show_id']) in self.interesting_shows_sd
	
		#	Get the data
		try:
			parsed_feed = feedparser.parse('http://www.tvnzb.com/tvnzb.rss')
			if not parsed_feed.feed.has_key('title'):
				raise Exception('Failed to load tvnzb.com rss')
		except Exception, ex:
			logging.error( ex )
			f.close()
			return str(ex)
		
		#	store current scrape in db
		# todo: check if interesting first
		logging.info( 'Adding new scrape...' )
		for episode in parsed_feed.entries:		
			if is_interesting(episode):
				logging.debug('Entry %s interesting' % (episode.title,))
				nzb_id = pattern_id.match(episode.link).group(1)
				#date is seconds since epoch, inverse of time.gmtime()
				ep = (nzb_id, episode['show_id'], episode.title, episode.link, episode['season'], episode['episode'], 
					episode['description'], episode.enclosures[0].length, calendar.timegm(episode['modified_parsed']))
				self.db_cursor.execute('replace into episodes values (?,?,?,?,?,?,?,?,?)', ep)			
		self.db_connection.commit()
		
		#remove old entries from db
		logging.info( 'Deleting old entries...' )
		oldest_allowed = datetime.today() + timedelta(days = self.storage_length ) - self.timezone_delta
		oldest_allowed_timestamp = int(time.mktime(oldest_allowed.timetuple()))
		self.db_cursor.execute('delete from episodes where pubdate<?', (oldest_allowed_timestamp,) )
		num_entries = len(self.db_cursor.execute('select * from episodes').fetchall())
		logging.info( 'Database now has %s episodes',  num_entries)
		self.db_connection.commit()
		
		# handle show_id tag generation
		class TvNzbRSS2Item(PyRSS2Gen.RSSItem):
			def publish_extensions(self, handler):
				handler.startElement("show_id", {})
				handler.characters(self.show_id)
				handler.endElement("show_id")
		
		items = []
		for row in self.db_cursor.execute('select * from episodes order by pubdate desc'):
				#print(row)
				release = TvNzbRSS2Item(
			        title = row['title'],
			        link = row['link'],
			        description = row['description'],
			        guid = row['link'],
			        pubDate = datetime.fromtimestamp(float(row['pubdate'])),
					enclosure = PyRSS2Gen.Enclosure(row['link'],
					 	row['length'],
					 	"application/x-nzb" )
			        )

				release.show_id = str(row['showid'])
				items.append(release)
		
		# make the RSS2 object
		# Try to grab some info from the orig feed
		rss = PyRSS2Gen.RSS2(
		    title = "tvNZBfilter",
		    link = "http://code.google.com/p/tvnzbfilter/",
		    description = "Shows: " + ", ".join([str(i) for i in self.interesting_shows_hd + self.interesting_shows_sd]) +
							"; Number of stored eps: " + str(num_entries),
    
	    	items = items
		)
		
		#output xml doc
		return rss.to_xml()
