import requests
from bs4 import BeautifulSoup
import os
#请求的网站地址
url = 'http://www.qvc.edu.cn/column.jsp?'
#UA伪装，请求头信息
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
}
# 请求所有的风光图片的网页
def get_photo_info():
    photo_list = []
    for page in range(1,4):
        #发起请求时候携带的参数
        params= {
            'id': '1556089527950',
            'current': page
        }
        page_html = requests.get(url,headers = headers,params=params).content.decode('gbk')
        # print(page_html)
        #创建一个bs对象，然后我们就可以使用beautifulsoup的相关的方法了
        bs = BeautifulSoup(page_html,'lxml')
        #使用beautifulsoup css选择器去选取元素
        wrap_div_list = bs.select('.xyfg1')
        for item in wrap_div_list:
            img_url = item.select ('.xyfg_pic > a')[0]['href']
            img_title =item.find_all('div')[1].a.get_text()
            photo_list.append({'url':img_url,'title':img_title})
    return photo_list

def download_img(url_list):

    for i in url_list:
        # 获取图片
        img = requests.get(i['url'],headers=headers).content
        if not os.path.exists('images'):
            os.mkdir('images')
            with open(f'images/{i["title"]}.jpg',mode='wb') as f:
                f.write(img)
                print(f'{i["title"]}图片存储成功！')
        else:
        # 存储图片
            with open(f'images/{i["title"]}.jpg',mode='wb') as f:
                f.write(img)
                print(f'{i["title"]}图片存储成功！')

def spider():
    url_list = get_photo_info()
    download_img(url_list)

if __name__ == '__main__':
    spider()
