APIKEY="06B511234FFFC95F"

import sys
sys.path.insert(0, "..")
sys.path.insert(0, ".")
from scraperBase import scraperBase
import random
random.seed()

from utils import *
import re
import xml.dom.minidom
from xml.dom.minidom import Node
from xml.dom.ext import PrettyPrint
THETVDB_URL = re.compile("thetvdb.com/.*id=(?P<id>[0-9]*)",re.IGNORECASE)

class Scraper(scraperBase):
	name = "TheTvDB"
	
	def __init__(self,debug=False):
		scraperBase.__init__(self,debug)
		self.mirrors = None
	
	def loadMirrors(self):	
		#TODO : do not cache this long, a day maybe?
		mirrors = self.HTTP_Get("Main","Mirrors","http://www.thetvdb.com/api/%s/mirrors.xml" % APIKEY)
		#print "DEBUG : using mirrors.xml"
		#mirrors = open("scrapers/mirrors.xml").read()
		
		doc = xml.dom.minidom.parseString(mirrors)
		mirrorList = []
		
		for mirror in doc.getElementsByTagName("Mirror"):
			typeMask = int(mirror.getElementsByTagName("typemask")[0].firstChild.data)
			mirror_xml = False
			mirror_banners = False
			mirror_zip = False
			if typeMask & 1:
				mirror_xml = True
			if typeMask & 2:
				mirror_banners = True
			if typeMask & 4:
				mirror_zip = True
			
			mirrorList.append({
				"id":int(mirror.getElementsByTagName("id")[0].firstChild.data),
				"path":mirror.getElementsByTagName("mirrorpath")[0].firstChild.data,
				"xml":mirror_xml,
				"banners":mirror_banners,
				"zip":mirror_zip,
			})
			
			
		self.mirrors = {
			"xml":filter(lambda x:x["xml"],mirrorList),
			"banners":filter(lambda x:x["banners"],mirrorList),
			"zip":filter(lambda x:x["zip"],mirrorList)}
		
	def GetMirror(self,type):
		if not self.Mirrors:
			self.loadMirrors()
		return self.mirrors[type][random.randint(0,len(self.mirrors[type]) -1)]["path"]
		
	def Search(self,name,year=None):
		#http://www.thetvdb.com/api/GetSeries.php?seriesname=?
		search = name
		if year:
			search = search + " " + year
		search = search.replace(" ","+")
		
		#TODO : Should I be using a mirror for this?
		#results = self.HTTP_Get("search",search,"http://www.thetvdb.com/api/GetSeries.php?seriesname=%s" % (search))
		print "DEBUG: using search.xml"
		results = open("search.xml").read()
		
		seriesArray = []
		
		doc = xml.dom.minidom.parseString(results)
		for serie in doc.getElementsByTagName("Series"):
			
			s = {"id":int(GetElement(serie,"seriesid")),
				 "name":GetElement(serie,"SeriesName"),
				 "released":GetElement(serie,"FirstAired")}
			seriesArray.append(s)
			
		return seriesArray
		
	def ParseDetails(self,results):
		import cStringIO
		from zipfile import ZipFile
		zip = ZipFile(cStringIO.StringIO(results),"r")
		
		doc = xml.dom.minidom.parseString(zip.read("en.xml"))
		
		r = LoadElements(doc, {
			'SeriesName':'title',
			'Rating':'rating',
			'Overview':'plot_short',
			'Status':'status',
		})
		
		episodeList = []
		for episode in doc.getElementsByTagName("Episode"):
			episodeList.append(LoadElements(episode,{
				'EpisodeName':'name',
				'EpisodeNumber':'episode',
				'SeasonNumber':'season',
				'airsafter_season':'airsafter_season',
				'airsbefore_episode':'airsbefore_episode',
				'airsbefore_season':'airsbefore_season',
			}))
			
		r['tags'] = filter(lambda x: x, GetElement(doc,"Genre").split("|"))
		r['tags'].append("Series")
		r['episodes'] = episodeList
		return r	
	
	def GetDetails(self,url):
		id = THETVDB_URL.findall(url)[0]
		results = self.HTTP_Get("details",id,"%s/api/%s/series/%s/all/en.zip" % (self.GetMirror("xml"),APIKEY,id))
		#results = open("scrapers/80348.zip").read()
		
		return ParseDetails(results)

def DEBUG_ParseDetails():
	x = Scraper(debug=True)
	x.ParseDetails(open("scrapers/80348.zip").read())

if __name__ == "__main__":
	#print x.Search("Chuck")
	#print x.GetDetails("goat")
	import cProfile
	import pstats

	
	print "Profiling ParseDetails scrapers/80348.zip"
	cProfile.run("DEBUG_ParseDetails()","Profiler.tmp")
	p = pstats.Stats("Profiler.tmp")
	#p.sort_stats('cumulative').print_stats(10)
	p.sort_stats('cumulative').print_stats(0)


