#!/usr/bin/env python
"""
Bajador automatico de subtitulos
fcasco@gmail.com

Sat Jan  8 14:04:39 ART 2011
"""
from cStringIO import StringIO
import logging
import os
import re
import sys
import tempfile
import urllib2
from xml.etree import ElementTree as ET
import zipfile

def get_params():
    "Returns configuration options"
    params = {}
      # Nombre de la pelicula o serie a buscar
    params['name'] = 'Shrek.Forever.After'
      # Url desde donde se bajan los subtitulos
    params['url'] = 'http://www.subdivx.com/index.php?buscar=%s&accion=5&masdesc=&subtitulos=1&realiza_b=1'
      # Expresion regular para reconocer las lineas con los links
    params['link_flag'] = 'id="buscador_detalle_sub"'
      # Expression regular para extraer informacion de la linea del link
    params['link_extr'] = '.*buscador_detalle_sub">(?P<desc>.*?)</div>.*(?P<url>http://www.subdivx.com/bajar.php.*?)"'
      # Subtitle filename extensions
    params['subs_exts'] = ('srt', )
    params['dest_dir'] = ''
    return params

def search_subs(name, url):
    "Searchs for links to subtitle files"
    search_results = urllib2.urlopen(url % name)
    return search_results.readlines()

def extract_links(data, link_flag, link_extr):
    """ Extracs links to subtitle from html
        @param link_flag: string with regular expresion to find the lines where the links are
        @param link_extr: string with regular expresion to extract the url of the link
    """
    links = []
    r_flag = re.compile(link_flag)
    r_extr = re.compile(link_extr)
    for line in data:
        if r_flag.search(line):
            match = r_extr.search(line)
            if match:
                links.append(match.groupdict())
    return links

def download_sub(url, temp_dir, dest_dir):
    " Downloads a subtitle from url "
    pass


def __main__(argv=None):
    if argv is None:
        argv = sys.argv
    res = 0

    params = get_params()
    if not params['dest_dir']:
        params['dest_dir'] = os.path.join(os.getcwd(), params['name'])
    if os.path.exists(params['dest_dir']):
        print 'Output directory already exists'
        return 1

    print 'Searching for subtitles for %s' % params['name']
    html = search_subs(params['name'], params['url'])
    links = extract_links(html, params['link_flag'], params['link_extr'])
    print 'Found %0d links' % len(links)
    for i, link in enumerate(links):
        tmp_dir = tempfile.mkdtemp(prefix='%s-subs' % params['name'])
        sub_fname = os.path.join(tmp_dir, 'sub-%04d' % i)
        print 'Downloading subtitle %04d to %s...' % (i, sub_fname)
        with open(sub_fname, 'w') as out_file:
            out_file.write(urllib2.urlopen(link['url']).read())
        if zipfile.is_zipfile(sub_fname):
            print 'Extracting files from %s' % sub_fname
            zipped = zipfile.ZipFile(sub_fname)
            for name in zipped.namelist():
                if name[:-3] in params['subs_exts']:
                    zipped.extract(name)
#
#
#
#    if len(links) > 0:
#        exact = [x for x in links if x['desc'].find(name) > 0]
#        if len(exact) > 0:
#            with open('.'.join((name, 'srt')), 'w') as o:
#                    o.write(urllib2.urlopen(exact[0]['url']).read())
#    downloads = []
#    for link in links:
#        new_file = tempfile.TemporaryFile()
#        new_file.write(urllib2.urlopen(link['url']).read())
#        downloads.append(new_file)


    #print [x['url'] for x in links]

    return res

if __name__ == '__main__':
    sys.exit(__main__())
