#  通过bs4爬取图片下载地址

import requests
from bs4 import BeautifulSoup
import time  #为防止多次requests，ip会被服务器列入黑名单，让程序睡一会

dic_kind = {'卡通':'katong','动漫':'dongman','风景':'fengjing','美女':'meinv','唯美':'weimei','动态':'dongtai','非主流':'feizhuliu','创业':'chuangyi',
            '可爱':'keai','宽屏':'kuanping','3d':'3d','游戏':'youxi','动物':'dongwu',
        '汽车':'qiche','月历':'yueli','手机':'shouji','全部桌面壁纸':'desk'}
wabpage = 'http://bizhi360.com/'
kind = input('输入图片的类型：')
while kind not in dic_kind.keys():
    print('没有该类型的图片！')
    print(list)
    kind = input('输入图片的类型：')
kind2 = dic_kind[kind]
print('正在爬取...')
# 1.获取网站源代码
url = 'http://bizhi360.com/{0}/'.format(kind2)
resp = requests.get(url)
resp.encoding = 'utf-8'

# 2.通过bs4，获取图片部分数据
obj = BeautifulSoup(resp.text,'html.parser')
# 3.从bs4 对象中查找数据；find（）；find_all()
pictures = obj.find_all('li')
for pic in pictures:
    i = pic.find('a')
    picture_name = i.text
    url2 = i.get('href')        # 通过 get(...) 获取href 的内容
    url2 = wabpage + url2.strip('/')
    resp2 = requests.get(url2)  # 跳转到单张图片网页，获取网页源代码
    resp2.encoding = 'utf-8'
    # 获取下载链接
    obj2 = BeautifulSoup(resp2.text,'html.parser')
    pic_down = obj2.find('a',attrs={'class':'download'})
    download_href = pic_down.get('href')
    # 下载图片
    img_resp = requests.get(download_href)
    #img_resp.content  #这里拿到的是字节
    img_name = download_href.split('/')[-1]
    with open('../唯美壁纸/'+img_name,'wb') as f:
        f.write(img_resp.content)
    print('图片名:{0}  链接：{1}'.format(picture_name, download_href))
    time.sleep(1)

print('over!')
