#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import bs4
import requests
import urllib.request as ur
import ssl
ssl._create_default_https_context = ssl._create_unverified_context 

def load_href(url):
    # 下载urls
    response = requests.get(url)
    soup = bs4.BeautifulSoup(response.text, 'lxml')
    urls = []
    for a  in soup.find_all('a'):
        if a.has_attr('href'):
            href = a['href']
            if  href.startswith("http:") and href != url:
                urls.append(href)
    return urls

proxies = {
} 
 

def load_image(url):
    # 根据url，下载资源
    import random
    response = requests.get(url, proxies=proxies, verify=False)
    soup = bs4.BeautifulSoup(response.text, 'lxml')
    for a in soup.body.find_all('img'):
        if a.has_attr('src'):
            image = a['src']
            try:
                print('loading', image)
                if image.endswith('.jpg') or image.endswith('.jpeg') or image.endswith('.png'):
                    name = '%d.jpeg' % random.randint(0, 10000) # 图片名
                    print('saving', image, 'as', name)
                    ur.urlretrieve(image, 'images/'+name)
                    print('have saved', image, 'as', name)
            except Exception as e:
                print(e)

url = "https://www.meizitu.com"

load_image(url)

# urls = load_href(url)
# for url in urls:
#     print('try to open', url)
#     try:
#         print('loading images in', url)
#         load_image(url)
#     except:
#         print('fail to access', url)
