#爬取图片
import urllib.request
import os
import random

def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36')
    response = urllib.request.urlopen(req)
    html = response.read()
    return html

#获取页数
def get_page(url):
    html = url_open(url).decode('utf-8')

    a = html.find('current-comment-page') + 23
    b = html.find(']', a)

    print(html[a:b])
    return html[a:b]
    
#查找图片
def find_img(url):
    html = url_open(url).decode('utf-8')
    img_addr = []

    a = html.find('img src=')
    while a != -1:
        b = html.find('.jpg', a, a + 255)
        if b != -1:
            img_addr.append(html[a + 9 : b + 4])
            print(img_addr)
        else :
            b = a + 9
            
        a = html.find('img src=', b)
            
    return img_addr

#保存图片
def save_img(folder, img_addr):
    for each in img_addr:
        file_name = each.split('/')[-1]
        with open(file_name, 'wb') as f:
            print('each = ', each)
            each = 'http:' + each
            img = url_open(each)
            f.write(img)
        
        
    
#下载图片
def down_img(folder = 'mm',):
    random_data = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
    
    down_num = int(input('请输入需要下载的图片数量：'))
    if os.path.exists('mm'):
        print('目录已存在')
    else :
        os.mkdir(folder)
        print('创建目录成功')
    os.chdir(folder)

    url = 'http://jandan.net/ooxx'
    page_num = int(get_page(url))

    for i in range(page_num):
        page_num -= i
        x = random.choice(random_data)
        y = random.choice(random_data)
        page_url = url + '/MjAyMDEyMTctO' +  x + '==#comments'
        print('page_url = ', page_url)
        img_addr = find_img(page_url)
        save_img(folder, img_addr)

if __name__ == '__main__':
    down_img()


#req = urllib.request.Request(url)
#response = urllib.request.urlopen(req)
#html = response.read().decode('utf-8')
#print(html)
