# -*- coding: utf-8 -*-
import urllib
import urllib.request
import urllib.parse
import http
import threading
from html.parser import HTMLParser
import io
import os
import time
import socket
from datetime import datetime
import gzip
import re
import base64
import execjs

# 将10进制转化为(2,36)进制中任意一种
def toRadix(val,base):
    try:
        val=int(val)
        base=int(base)
    except Exception as err:
        print(err)
        return -1
    ans=[]
    if val==0: ans.append('0')
    while val>0:
        m=val%base
        val=int(val/base)
        if 0<=m<=9:
            ans.append(chr(m-0+ord('0')))
        else:
            ans.append(chr(m-10+ord('a')))
    
    ans.reverse()
    return ''.join(ans)
# 转化为36进制
def to36(val):
    return toRadix(val,36)
# 加密程序
def func_e(c,a):
    c,a=int(c),int(a)
    res=None
    if c<a:
        res=''
    else:
        res=func_e(int(c/a),a)
    c=c%a
    if c>35:
        res+=chr(c+29)
    else:
        res+=to36(c)
    return res

def requestFor(url,vol_url=None):

    crequest=urllib.request.Request(url=url)

    if vol_url:
        crequest.add_header('Accept','image/webp,image/*,*/*;q=0.8')
        crequest.add_header('Accpet-Encoding','gzip, deflate, sdch')
        crequest.add_header('Accept-Language','zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4')
        crequest.add_header('Connection','keep-alive')
        crequest.add_header('Referer',vol_url)
    crequest.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36')
    
    #print(crequest.headers)

    while True:
        try:
            cresponse=urllib.request.urlopen(crequest)
        except Exception as e:
            print('Error cresponse part I:',e)
            continue
        else:
            try:
                content=cresponse.read()
            except Exception as e:
                cresponse.close()
                print(e,e.code)
                continue
            else:
                print(cresponse.status)
                for header in cresponse.getheaders():
                    print(header)
                encoding=cresponse.getheader('content-encoding')
                if encoding:
                    if 'gzip' in encoding:
                        content=gzip.decompress(content)
                cresponse.close()
                return content

def down_page(url):
    content=requestFor(url)
    with open('ip.html','wb') as f:
        f.write(content)
    print('down page over')

def down_vol(url):
    content=requestFor(url)
    with open('iv.html','wb') as f:
        f.write(content)
    print('down vol over')

def parse_page():
    class parser(HTMLParser):
        def __init__(self):
            HTMLParser.__init__(self)
            self.count=0
        def handle_starttag(self,tag,attrs):
            if tag=='a':
                infos={}
                for (v,k) in attrs:
                    infos[v]=k
                print(infos['href'],infos['title'])
                self.count+=1
                
    p=parser();
    content=None
    with open('ip.html','rb') as f:
        content=f.read().decode('utf-8','ignore')
    # title
    #title=re.compile(r'<meta property="og:title" content="(.*?)">').findall(content)[0].rstrip()
    # cover
    #cover=re.compile(r'<div class="mhjsbody clearfix">(.*?)<div',re.S).findall(content)[0]
    #print(cover)
    # description
    vols_content=re.compile(r'</h4>(.*?)</div>',re.S|re.M).findall(content)[0]
    print(vols_content)

def parse_vol():
    class parser(HTMLParser):
        def __init__(self):
            HTMLParser.__init__(self)
            self.site_prefix='http://www.fumanhua.net/manhua/'
            self.mid=''
            self.pics=[]
        def handle_starttag(self,tag,attrs):
            if tag=='a':
                print(attrs)
    p=parser()
    content=None
    with open('iv.html','rb') as f:
        content=f.read().decode('utf-8','ignore')
    page_urls_content=re.compile(r'id="pageList">(.*?)</ul>',re.S|re.M).findall(content)[0]
    print(page_urls_content)
    

def work():
    "http://www.52kkm.org/pisa/"
    down_page("http://manhua.dmzj.com/xsyxdlpbknsns/")
    #parse_page()
    #down_vol("http://www.52kkm.org/pisa/778.html")
    #parse_vol()
    pass

def error_test(url):
    content=requestFor(url)
    with open('error.html','wb') as f:
        f.write(content)
    print('down error page over')
    

if __name__=='__main__':
    work()
    #error_test("http://www.mangareader.net/magixxxxoo")
    pass

