# -*- coding: utf-8 -*-
#*******************************************************************************
#   
#   http://www.popomh.com/manhua/\d+.html 解析器                                   
#                                                                                         
#*******************************************************************************
from base import *

tag='popomh'

#---------------------------- 验证模块 ----------------------------

# 每个解析模块都含有这个函数，用来判断链接是否符合该模块
def url_judgement(url):
    if re.match(r'http://www.popomh.com/manhua/\d+.html',url):
        return valid_check(url)
    return False

# 根据内容检测是否可解析
def valid_check(url):
    print('{} catch {}'.format(tag,url))
    content=request_for(url,common_headers);
    if content:
        content=content.decode('utf-8')
        if re.search("无效页面",content):
            return False
        return get_loader(url)
    return False

# 下载器获取
def get_loader(url):
    return PopomhLoader(url,PopomhPageParser,PopomhVolParser)

#---------------------------- 下载器 ----------------------------

# 继承下载器部分
# 如果和基类一样，可以考虑不要
class PopomhLoader(ComicLoader):
    def __init__(self,url,comic_page_parser,comic_vol_parser):
        super().__init__(url,comic_page_parser,comic_vol_parser)

#---------------------------- 解析器 ----------------------------

class PopomhPageParser(ComicPageParser):
    def __init__(self,comic):
        super().__init__(comic)
        self.site_prefix="http://www.popomh.com"
    def work(self,content):
        # title
        title=re.compile(r'<h1>(.*?)<div',re.S).findall(content)[0]
        self.comic.title=title.replace('\r','').replace('\n','').replace('\t','').strip()
        # cover
        cover_content=re.compile(r'<div id="about_style">(.*?)</div>',re.S).findall(content)[0]
        cover=re.compile(r'src=\'(.*?)\' width').findall(cover_content)[0]
        self.comic.cover=cover.strip()
        # descripiton
        des_content=re.compile(r'<li><b title=(.*?)<div',re.S|re.M).findall(content)[0]
        self.comic.description=re.compile(r'</b>(.*?)</li>',re.S|re.M).findall(content)[0].strip()
        # vols
        vol_content=re.compile(r'<ul class=\'cVolUl\'>(.*?)</ul>',re.S).findall(content)
        for vol in vol_content:
            self.feed(vol)
        self.comic.vols.reverse()
    def handle_starttag(self,tag,attrs):
        if tag=='a':
            infos=info_collect(attrs)
            self.comic.vols.append(InfoVol(self.site_prefix+infos['href'],infos['title']))

class PopomhVolParser(ComicVolParser):
    def __init__(self):
        super().__init__()
        self.image_prefix=None
    def work(self,content):
        self.get_image_prefix(content)
        self.get_image_url(content)
        site_prefix="http://www.popomh.com/popo"
        vol_id=re.compile(r'id=\"hdVolID\" value="(.*?)" />').findall(content)[0]
        hds=re.compile(r'id="hdS" value="(.*?)" />').findall(content)[0]
        vol_count=re.compile(r'<input name="hdPageCount" type="hidden" id="hdPageCount" value="(.*?)" />').findall(content)[0]
        vol_count=int(vol_count)
        for idx in range(2,vol_count+1):
            vol_pic_url=site_prefix+vol_id+"/"+str(idx)+".html?s="+hds+"&d=0"
            vol_pic_content=request_for(vol_pic_url).decode('utf-8')
            self.get_image_url(vol_pic_content)
    def get_image_url(self,content):
        pic_content=re.compile(r'\" name=\"(.*?)\" onerror=\"',re.S).findall(content)[0]
        s=pic_content
        x=s[-1]
        w="abcdefghijklmnopqrstuvwxyz"
        xi=w.find(x)+1
        sk=s[-xi-12:-xi-1]
        s=s[0:-xi-12]
        k=sk[0:-1]
        f=sk[-1]
        idx=0
        for c in k:
            s=s.replace(c,str(idx))
            idx+=1
        ss=s.split(f)
        s=[]
        for v in ss:
            s.append(chr(int(v)))
        url=''.join(s)
        self.pic_urls.append((self.image_prefix+url,zero_prefix(self.pic_index)))
        self.pic_index+=1
    def get_image_prefix(self,content):
        domain=re.compile(r'id="hdDomain" value="(.*?)" />',re.S).findall(content)[0]
        self.image_prefix=domain.split('|')[0]