import urllib.request
import urllib.parse
import re
import os
from bs4 import BeautifulSoup


def quick_mkdir(name):
    """
    当前目录下建一个文件夹, 如果目录存在则无视
    :param name: 文件夹名称
    :return: 新建的文件夹的完整路径
    """
    new_directory = os.getcwd() + '\\' + name + "\\"
    if not os.path.exists(new_directory):
        try:
            os.mkdir(os.getcwd() + '\\' + name)
        except Exception as e:
            print(e)
    return new_directory


def spider(url):
    """
    爬取url的图片并存在downloads下
    :param url:
    :return:
    """
    img_dir = 'downloads'
    quick_mkdir(img_dir)
    resp = urllib.request.urlopen(url)
    htmlText = resp.read()
    imglist = get_imgUrl_re(htmlText)
    x = 0
    for imgurl in imglist:
        print(imgurl[0])
        try:
            urllib.request.urlretrieve(
                imgurl[0], ('./%s/%d.' + imgurl[1]) % (img_dir, x))
        except:
            print('出错了...无视')
        x += 1


def get_imgUrl_re(html):
    return re.findall(r'img src="(.*?\.(jpg|png|jpeg))"', html.decode('UTF-8'))


def get_imgUrl_soup(html):
    soup = BeautifulSoup(html, 'html.parser')
    img_src = soup.findAll("img")
    return map(lambda x: x.get('src'), img_src)


if __name__ == '__main__':
    url = 'https://bcy.net/'
    spider(url)
