import urllib.request
import urllib.parse
import os
import re

def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36 115Browser/8.1.0')
    # proxy_support=urllib.request.ProxyHandler({'http':'127.0.0.1:80'})
    # opener=urllib.request.build_opener(proxy_support)#增加代理访问
    request = urllib.request.urlopen(req)
    print('成功打开网址%s'%url)
    return request.read()
def get_page(url):
    html=url_open(url).decode('utf-8')
    a=re.search('[[][0-9]{4}[]]',html)[0]
    a=re.search('[0-9]{4}',a)[0]
    print('成功获取最大页码%s'%a)
    return a

def find_imgs(url):
    html=url_open(url).decode('utf-8')
    img_addrs=re.findall(r'src="([/]{2}.*\.jpg)',html)
    # img_addrs=[]
    # for each in img_addrs1:
    #     img_addrs.append(each[5:])
    # print(img_addrs)
    print('成功获取%s的全部图片地址'%url)
    return img_addrs

def save_imgs(folder,img_addrs):
    for each in img_addrs:
        filename=each.split('/')[-1]
        with open(filename,'wb') as f:
            img=url_open('http:'+each)
            f.write(img)
            print('成功下载http:%s的图片'%each)

def download(folder='OOXX',startpages='2411',stoppage='2412'):
    if not os.path.isdir(folder):
        print('%s目录不存在，自动创建'%folder)
        os.mkdir(folder)
    os.chdir(folder)
    for i in range(int(startpages),int(stoppage)+1):
        page_url=url+'page-'+str(i)+'#comments'
        img_addrs=find_imgs(page_url)
        save_imgs(folder,img_addrs)

if __name__=='__main__':
    url='http://jandan.net/ooxx/'
    page_num=int(get_page(url))
    print('当前网站总共有%d页'%page_num)
    page=input('请输入要下载的页数范围1-%d页：'%page_num)
    btwpage=page.split('-')
    download(startpages=btwpage[0],stoppage=btwpage[1])

