#!/usr/bin/env python

#CHANGELOG
#rewritten everything using SoupStrainer, trying to reduce memory and time usage
# - gather_manga_list is now about 4 times faster than the previous implementation
# - get_pages is *much faster* than before (and shorter)
# - get_img_url is a bit faster
# - avaiable_chap is about 10 times faster

import os
import re
import sys
import urllib

sys.path.append(os.path.split(os.getcwd())[0])

from BeautifulSoup import BeautifulSoup, SoupStrainer #beautifulsoup is imported from the parent folder

site = 'http://www.onemanga.com'
pages = ()

class MyOpener(urllib.FancyURLopener):
    version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.9.0.2) Gecko/2008091620 Firefox/3.0.2'


urlopen = MyOpener().open

def gather_manga_list():
    page = urlopen(site)
    opt = SoupStrainer('option')
    soup = BeautifulSoup(page.read(), parseOnlyThese=opt)
    page.close()
    urls, names, tags = [], [], [str(tag) for tag in soup]
    reg = re.compile('(".*")?>(.+?)<')
    for i in tags:
        sr = reg.search(i)
        urls.append(site + '/' + sr.group(1).strip('"'))
        names.append(sr.group(2))
    return names, urls
   
def get_img_url(url, n):
    global pages
    if not pages:
        pages = (url, get_pages(url))
    if pages[0] != url:
        pages = (url, get_pages(url))
    a = urlopen(url + pages[1][n] + '/')
    img = SoupStrainer('img')
    soup = BeautifulSoup(a.read(), parseOnlyThese=img)
    page = [tag for tag in soup]
    a.close()
    n = os.sep.join([el.replace('_', ' ') for el in url.split('/')[-3:] if el])
    img_url = re.search('(http://[./a-zA-Z0-9_-]*)', str(page[0])).groups()[0]
    return img_url, n, img_url.split('/')[-1]
    
def avaiable_chap(manga):
    a = urlopen(manga)
    soup = BeautifulSoup(a.read(), parseOnlyThese=SoupStrainer('td'))
    a.close()
    td = [row for row in soup]
    url = [td[i].a['href'].encode('utf-8') for i in range(0, len(td), 3)]
    name = [td[i].a.string.encode('utf-8').split()[-1] for i in range(0, len(td), 3)]
    url.reverse()
    name.reverse()
    return name, url
    
def get_pages(url):
    a = urlopen(url)
    data = a.read()
    if "Begin reading" in data:
        url = data.split("Begin reading ")[0].rsplit("<li><a href=\"",1)[1].split("\">")[0]
        return get_pages(site + url)
    else:
        urls = [url for url in BeautifulSoup(data, parseOnlyThese=SoupStrainer('option'))]
        urls = [re.search(">(.*)<", str(url)).group(1) for url in urls[1:]]
        return urls