import re
import socket
import os
import requests
import urllib.request

url = 'http://www.hhxiee.cc/Comic/1815454/'
##in_code = int(input('输入章节数：'))
##middle_code = url[26:33].strip()
####print(middle_code)
##final_code = url[34:40].strip()
##initial_page = int(final_code)
##print(initial_page)
##total_page = int(initial_page + in_code)
##print(total_page)
z = 1

ServerList = list(range(0, 16))
ServerList[0]="http://img2.hhcomic.net/dm01/"
ServerList[1]="http://img2.hhcomic.net/dm02/"
ServerList[2]="http://img2.hhcomic.net/dm03/"
ServerList[3]="http://img2.hhcomic.net/dm04/"
ServerList[4]="http://img2.hhcomic.net/dm05/"
ServerList[5]="http://img2.hhcomic.net/dm06/"
ServerList[6]="http://img2.hhcomic.net/dm07/"
ServerList[7]="http://img2.hhcomic.net/dm08/"
ServerList[8]="http://img2.hhcomic.net/dm09/"
ServerList[9]="http://img2.hhcomic.net/dm10/"
ServerList[10]="http://img2.hhcomic.net/dm11/"
ServerList[11]="http://img2.hhcomic.net/dm12/"
ServerList[12]="http://img2.hhcomic.net/dm13/"
ServerList[13]="http://8.8.8.8:99/dm14/"
ServerList[14]="http://img2.hhcomic.net/dm15/"
ServerList[15]="http://img2.hhcomic.net/dm16/"
##print(ServerList)


headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/5\
           37.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'}

def unsuan(s,sk):
    k = sk[0:len(sk)-1]
    f = sk[len(sk)-1]
    for i in range(0,len(k)):
        s = re.sub(k[i:i+1],str(i),s)
    ss = s.split(f)
    s = ""
    for q in range(0,len(ss)):
        s += chr(int(ss[q]))
        ##        s = s.append(chr(int(ss[q])))
    return s


def mkdir(path):
    # 引入模块
    import os
 
    # 去除首位空格
    path=path.strip()
    # 去除尾部 \ 符号
    path=path.rstrip("\\")
 
    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists=os.path.exists(path)
 
    # 判断结果
    if not isExists:
        # 如果不存在则创建目录
        print (path+' 创建成功')
        # 创建目录操作函数
        os.makedirs(path)
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        print(path+' 目录已存在')
        return False
 
### 定义要创建的目录
##mkpath= '.\\' + title + '\\web\\'
# 调用函数
##mkdir(mkpath)


def get_tittle():
    global title
    global url
    html = requests.get(new_link, headers = headers)
    html.encoding = 'gb2312'
    title = re.search('<title>(.*?)</title>',html.text,re.S).group(1)
##    title_real = re.search('<title>(.*?) ',html.text,re.S).group(1)
    print (title)

def download_pic():
    z = 0   
    html = requests.get(new_link, headers = headers).content.decode('gb2312')
    try:
        vip = re.search('var PicListUrl = "(.*?)";',html,re.S).group(1)
    except AttributeError as e:
        print('下载完成!')
        exit()
    ##print(vip)
    lll = unsuan(vip, 'tahfcioewrm')
    zz = lll.split('|')
    imgPath = '.\\' + title_real + "\\"+ title+'\\%s.jpg'

    # 定义要创建的目录
    mkpath = '.\\' + title_real+ '\\' + title
    mkdir(mkpath)
   
    for each in zz:
    ##    zzz = zz[each].strip()
        pic_link = ServerList[int(final_code)-1] + each

##        print(pic_link)
##        picture = requests.get(pic_link).content
##        with open('.\\' + title_real + "\\"+ title+'\\%s.jpg' % z, "wb") as code:
##            code.write(picture)
        if os.path.exists('.\\' + title_real + "\\"+ title+'\\%s.jpg' %z):
            print('文件已存在')
            z+=1
            continue
        else:
            try:
                print("正下载第%s个图片...."% z)
                socket.setdefaulttimeout(10)
                urllib.request.urlretrieve(pic_link,imgPath %z)               
                z+=1
            except Exception as e:
                print ('错误信息:',e)
                print ('重新下载..')
                socket.setdefaulttimeout(20)
                print("正下载第%s个图片...."% z)
                urllib.request.urlretrieve(pic_link,imgPath %z)
                z+=1
            
##print(ServerList[int(final_code)-1])

##sub实现翻页
##for i in range(initial_page,total_page+1):

r = requests.get(url,headers = headers)
r.encoding = 'gb2312'
title_real = re.search('var wumiiTitle = "(.*?)";',r.text,re.S).group(1)
##print (title_real)
part_link = re.findall('<li><a href=(.*?) target=_blank>',r.text,re.S)
final_code = part_link[0][-1].strip()
part_link1 = part_link[-1::-1]
##倒叙排列列表

for each in part_link1:
    new_link = 'http://www.hhxiee.cc/' + each
    get_tittle()
    download_pic()

    
##    new_link = re.sub('/xiee/'+middle_code+'/\d+','/xiee/'+middle_code+'/%d'% i,url,re.S)
#有时候sub不能用的原因是反斜杠’\‘用错了
##    print(new_link)
   
##vip = re.findall('src="(.*?)"><script>',html.text)
##

##print(vip)

##sub实现翻页
##for i in range(30,total_page+1):
##    new_link = re.sub('/1243\d+','/1243%d'% i,url,re.S)
####"\d+"是数字的意思
##
####    print (new_link)
##    for x in range(1,total_page1+1):
##        new_link1 = re.sub('v=\d+','v=%d'% x,new_link,re.S)
##
##        html = requests.get(new_link1, headers = headers)
##        html.encoding = 'gb2312'
##        vip = re.findall('src="(.*?)"><script>',html.text)
##        for each in vip:
##            print(each)
##            picture = requests.get(each).content
##            with open("%s.jpg" % z, "wb") as code:
##                code.write(picture)
##                z+=1
##                print("正下载第%s个图片...."% x)
            
    
