#!/usr/bin/env python

#TODO:
#make all the functions here faster, if possible

import re
import os
import urllib

site = 'http://itascan.info/'

pages = []

def gather_manga_list():
    a = urllib.urlopen(site)
    reg = re.compile('value=.*</option>') 
    l = reg.findall(a.read())
    n = [el.split('>')[1].split('<')[0] for el in l]
    url = [el.split('"')[1].split('"')[0] for el in l]
    return n, map(lambda x: site + x, url)

def get_img_url(url, n):
    global pages
    if not pages: 
        pages = (url, get_pages(url))
    if pages[0] != url:
        pages = (url, get_pages(url))
    url = 'http://itascan.info/' + pages[1][int(n)]
    path, name = os.path.split(pages[1][int(n)])
    reg = re.compile('[0-9]+', re.IGNORECASE)
    name = re.sub("[()']", '', reg.findall(name)[-1] + os.path.splitext(name)[1])
    return url.strip("'"), os.sep.join(path[4:].split('/')[2:]), name

def avaiable_chap(manga):
    a = urllib.urlopen(manga)
    reg = re.compile("Capitolo .*</a>")
    l = reg.findall(a.read())
    a.close()
    n = [el.split('>')[1].split('<')[0] for el in l]
    url = [el.split('"')[1].split('"')[0] for el in l]
    return n, url

def get_pages(url):
    a = urllib.urlopen(url)
    reg = re.compile('var Slides = new Array(\(.+?\));', re.S)
    pag = reg.findall(a.read())[0].split(',')
    a.close()
    return map(lambda el: el[4:][:-1].strip("\'"), pag)