'''
Created on Apr 26, 2011

@author: GSE
'''
#WebCrawler1
#First, fetch a list of releases to DVD, max of 20
#Then,save as object
#Save objects in list
#print it

 
from WebCrawl import Webcrawler,Content
 
print "Fetching data"
spider = Webcrawler()
json = spider.getDvdReleases(20)

link = ""
movieJson = ""
movies = []

for i in json["movies"]:
    link= i["links"]["self"] 
    movieJson = spider.getMovieByLink(link)
    title = movieJson["title"]
    score = movieJson["ratings"]["audience_score"]
    syn = movieJson["synopsis"]
    year = movieJson["year"]
    genre = movieJson["genres"][0]
    actor = movieJson['abridged_cast'][0]['name']
    pic = movieJson['posters']['detailed']
    movie = Content(title,score,syn,year,genre,"Movie",actor,pic)
    movies.append(movie)
    
print "opening connection to db"
spider.openDBConn("127.0.0.1",3306, "root", "root", "cr_database")
print "pushing"
for j in movies:
    print j.toStr()
    print "\n"
    j.pushToDatabase(spider.conn)
    
print "done, cleaning up"
spider.closeDbConn()  
    
        