from bs4 import BeautifulSoup    #用于解决所有HTML信息，从中获取所需的信息
from urllib.request import  urlretrieve    #使程序智能化，自动创建一个临时的文件进行保存
from  urllib import parse
import requests,os
#  面向对象    面向过程
#请求头

#程序入口

if __name__ == '__main__':
    list_url = []
    if 'img2' not in os.listdir():
        os.makedirs("img2")
    for num in range(1,4):
        if num == 1:
            url = 'http://www.apbianmin8.com/tupian/tuji2745.aspx'
        else:
            url = 'http://www.apbianmin8.com/tupian/tuji2745_page%d.aspx'%num
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
        req = requests.get(url=url,headers=headers)
        req.encoding = 'utf-8'

        html = req.text
        bf = BeautifulSoup(html,'lxml')
        # print(bf)
        bf.encoding = 'utf-8'
        target_url = bf.find_all(class_ = 'disp_img1')
        # print(target_url)

        for each in target_url:
            each_url = 'http://www.apbianmin8.com/tupian/' + str(each.a.get('href'))
            # print(each_url)
            list_url.append(each.a.img.get('alt') + "=" + each_url)
        print(list_url)
    print("数据采集完成")

    #分割,通过索引取值
    for each_img in list_url:
        img_info = each_img.split('=')
        print(img_info)

        #图片路径
        print("地址",img_info[1])
        img_info_filename = img_info[0] + '.jpg'
        print("名称",img_info_filename)

        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
        # 发送请求

        req_img = requests.get(url=img_info[1],headers=headers)
        req_img.encoding = 'utf-8'
        img_html = req_img.text
        # print(img_html)
        bf_img = BeautifulSoup(img_html,'lxml')
        bf_img.encode('utf-8')
        # print(bf_img)

        img_url = bf_img.find_all('img',id = 'Image2')
        print('img_url',img_url)
        img_url_src = img_url[0].get('src')
        img_url_filename = img_url[0].get('alt')
        print("img_url_filename",img_url_filename)
        print("imh_url_src",img_url_src)
        urlretrieve(url=img_url_src,filename="img2/" + img_info_filename)

