# -*- coding: UTF-8 -*-

import os
import urllib
import xml.dom.minidom
from WebCrawl import Content,Webcrawler

#1 : Open the wanted list

path = os.path.join(os.path.dirname(__file__),"wanted.in")
file = open(path,"r")
lines = file.readlines()
file.close()

serieslist = []

#2 : Common Mirror choosing


url = "http://www.thetvdb.com/api/6F53113AF4976B22/mirrors.xml"
content = urllib.urlopen(url).read()
dom = xml.dom.minidom.parseString(content)
mirrorpath =  dom.getElementsByTagName("mirrorpath")[0].firstChild.data


for name in lines:
    input = name.replace(" ", "+")
    url2 = "http://www.thetvdb.com/api/GetSeries.php?seriesname="+input
    content2 = urllib.urlopen(url2).read()
    dom = xml.dom.minidom.parseString(content2)
    
    print name
    #Check if something was really found    
    if(content2 != '<?xml version="1.0" encoding="UTF-8" ?>\n<Data>\n</Data>'):
        seriesid = dom.getElementsByTagName("seriesid")[0].firstChild.data
        print "Found"
    else:
        seriesid = "0"
        print"Not found"
    
    if(seriesid != "0"):
        url3 = mirrorpath+"/api/6F53113AF4976B22/series/"+seriesid+"/all"
        content3 = urllib.urlopen(url3).read()
    
        dom = xml.dom.minidom.parseString(content3)
        infonode = dom.getElementsByTagName("Series")[0]
        #print infonode.toxml()
        name = dom.getElementsByTagName("SeriesName")[0].firstChild.data
        
        if(dom.getElementsByTagName("Rating")[0].firstChild != None):
            rating = int(float(dom.getElementsByTagName("Rating")[0].firstChild.data) *10)
        else:
            rating = 50
        
        if(dom.getElementsByTagName("Overview")[0].firstChild != None):
            syn = dom.getElementsByTagName("Overview")[0].firstChild.data
            syn.replace('…',"...")
        else:
            syn = "Not available"
        
        if(dom.getElementsByTagName("FirstAired")[0].firstChild != None):
            year = dom.getElementsByTagName("FirstAired")[0].firstChild.data
            year = year[0:4]
        else:
            year = ""
        
        if(dom.getElementsByTagName("Genre")[0].firstChild != None):
            genre = dom.getElementsByTagName("Genre")[0].firstChild.data
            genre = genre.split("|")[1]
            ctype = "Series"
        else:
            genre = "Unknown"
        
        if(dom.getElementsByTagName("Actors")[0].firstChild != None):
            actor = dom.getElementsByTagName("Actors")[0].firstChild.data
            actor = actor.split('|')[1]
        else:
            actor = "John Doe"
        
        if(dom.getElementsByTagName("poster")[0].firstChild != None):
            pic = dom.getElementsByTagName("poster")[0].firstChild.data
            pic = "http://www.thetvdb.com/banners/" + pic
        else:
            pic = ""
        #print infonode.toxml()
        c = Content(name,rating,syn,year,genre,ctype,actor,pic)
        serieslist.append(c)

        
        



#Clean File
file = open(path,"w")
file.write("")
file.close()

spider = Webcrawler()
print "opening connection to db"
spider.openDBConn("127.0.0.1",3306, "root", "root", "cr_database")
print "pushing"
for k in serieslist:
    #print k.toStr()
    #print "\n"
    k.pushToDatabase(spider.conn)
    

    
