import os
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import requests

if __name__ == '__main__':
    # 源网站
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/76.0.3809.132 Safari/537.36 '
    }

    r = requests.get('https://www.xiachufang.com/', headers=header)
    soup = BeautifulSoup(r.text, features='lxml')

    # 获取图片
    img_list = []
    for img in soup.select('img'):
        if img.has_attr('data-src'):
            img_list.append(img.attrs['data-src'])
        else:
            img_list.append(img.attrs['src'])

    # 初始化本地存储目标
    img_dir = os.path.join(os.curdir, 'images-index')
    # if not os.path.isdir(img_dir):
    #     os.mkdir(img_dir)
    # 批量获取文件名称
    for img in img_list:
        o = urlparse(img)
        filepath = os.path.join(img_dir, o.path[1:])
        if filepath.find('/'):
            filepath = filepath.replace('/', os.sep)
        # 如果图片路径有子目录，创建子目录
        if not os.path.isdir(os.path.dirname(filepath)):
            os.mkdir(os.path.dirname(filepath))
        # 获取原始图片地址
        url = '%s://%s%s' % (o.scheme, o.netloc, o.path)
        # print(url)
        # 开始下载图片
        resp = requests.get(url)
        with open(filepath, 'wb') as f:
            for chunk in resp.iter_content(1024):
                f.write(chunk)
