import urllib.request
import os


def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('user-agent',
                   'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36')
    response = urllib.request.urlopen(req)
    return response.read()




def get_page(url):
    page_list = []
    html = url_open(url).decode('utf-8')
    a = html.find('href="//jandan.net/ooxx/')
    #a = html.find('href="//jandan.net/ooxx/') + 6
    #b = html.find('#comments',a) +9 ;

    while a != -1:
        b = html.find('#comments', a,a + 255);
        if b != -1:
            page_list.append(html[a+6 : b+9])
        else:
            break
        a = html.find('href="//jandan.net/ooxx/',b)

    #因为爬取的页面上下都有相同的分页器，所以需要list去重
    #截取的是//jandan.net/ooxx/MjAyMDExMTgtOTk=#comments
    print(list(set(page_list)))
    return  list(set(page_list))




def find_img(page_url):
    html = url_open(page_url).decode('utf-8')
    img_addrs = []
    img_addrs_final = []
    a = html.find('img src=')
    while a != -1:
        b = html.find('.jpg',a,a+255)
        if b != -1:
            img_addrs.append(html[a+9:b+4])
        else:
           b = a + 9
        a = html.find('img src=',b)
    for each  in img_addrs:
        print("原始链接：%s" % each)
        print("添加请求协议：%s" % 'http:'+each)
        img_addrs_final.append('http:'+each)
    return img_addrs_final





def save_imgs(folder,img_address_list):
    for each in img_address_list:
        filename = each.split("/")[-1]
        with open(filename,'wb') as f:
            f.write(url_open(each))
    print("爬取完成！！！！！")





def download(folder='ooxx'):
    #如果文件夹不存在，则创建文件夹
    if(not os.path.exists(folder)):
        os.mkdir(folder)
    os.chdir(folder)

    #http://jandan.net/ooxx
    root_url = 'http://jandan.net/ooxx'
    # 从该页面上爬取页面地址
    url = 'http://jandan.net/ooxx/MjAyMDExMTgtMTAw#comments'
    url_protocol = 'http:'
    page_list = get_page(url)

    for i in range(len(page_list)):
        print(i)
        #page_url =root_url + '/' + page_list[i] + '#comments'
        page_url = url_protocol + page_list[i]
        print("链接地址：%s" % page_url)
        img_address_list = find_img(page_url)
        save_imgs(folder,img_address_list)

if __name__ == '__main__':
    download()
