import feedparser
import datetime
from future import Future
import commands
import re
import os
import shutil

hit_list = [ "http://www.youkioske.com/rss/category/26/published/", "http://www.youkioske.com/rss/category/1/published/" ] # list of feeds to pull down

revistas = ["el-periodico-de-catalunya", "diario-sport", "mundo-deportivo", "diario-marca"]

# pull down all feeds
future_calls = [Future(feedparser.parse,rss_url) for rss_url in hit_list]
# block until they are all in
feeds = [future_obj() for future_obj in future_calls]

entries = []
for feed in feeds:
  entries.extend( feed[ "items" ] )
    
sorted_entries = sorted(entries, key=lambda entry: entry["date_parsed"])
sorted_entries.reverse()

meses = ['enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre']
hoy = datetime.date.today()
fecha = "-%s-%s-%s" % (hoy.day, meses[hoy.month-1], hoy.year)
revistas_de_hoy = [x+fecha for x in revistas]


pdf_re = re.compile('PDF (.+)\.pdf creado en (.+)')

for entri in sorted_entries:
  l=entri.link.strip("/")
  l=l[l.rfind("/")+1:]
  print l
  if l in revistas_de_hoy:
    print "Bajando %s" % (l,)
#PDF issuu_kizler_erger.pdf creado en ./issu_kizler_erger/
    ( stat, output ) = commands.getstatusoutput( "java -jar jissuuDownloader.jar -d %s" % (entri.link,) )
    if( stat == 0 ):
      s=pdf_re.search(output)
      shutil.move(s.group(2)+s.group(1)+'.pdf','/media/datos1/bajadas/Dropbox/prensa/'+l+'.pdf')
      shutil.rmtree(s.group(2))
    else:
      print "Command failed, here is the output: %s" % output
