import requests
from bs4 import BeautifulSoup
import os

webPage_url = "https://www.shicimingju.com/bookmark/sidamingzhu.html"
# 请求头 以.开头的key不用输入进来
headers = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "max-age=0",
    "cookie": "Hm_lvt_649f268280b553df1f778477ee743752=1700569256,1700634792; Hm_lpvt_649f268280b553df1f778477ee743752=1700636679",
    "sec-ch-ua": "\"Google Chrome\";v=\"87\", \" Not;A Brand\";v=\"99\", \"Chromium\";v=\"87\"",
    "sec-ch-ua-mobile": "?0",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36"
}
# Request Method response返回的是状态码  <Response [200]>
response = requests.get(webPage_url, headers=headers)
if response.status_code == 200:
    print("请求成功")
# utf-8乱码问题 requests 库自动识别
response.encoding = response.apparent_encoding
print(response.text)
soup = BeautifulSoup(response.text, 'html.parser')
print(soup)  # 直接整齐化
#  我们要的数据是在 class="book-item" 里面
results = soup.find_all('div', class_='book-item')
print(len(results))  # 查这个div有多少个
# 我们将我们查到的数据 遍历出来
for result in results:
    # 查找他的img标签的值
    imgTag = result.find('img')
    print(imgTag)
    # 获取 名字
    dirName = result.find_all('a')[-1].text
    print(dirName)

    print(imgTag)
    # 我们继续获取他的src的值 如果遍历src没有值，加一个None不报错
    imgUrl = imgTag.get('src', None)

    # 图片的headers
    imgheaders = {
        "accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9",
        "cache-control": "no-cache",
        "cookie": "Hm_lvt_649f268280b553df1f778477ee743752=1700569256,1700634792; Hm_lpvt_649f268280b553df1f778477ee743752=1700636686",
        "pragma": "no-cache",
        "referer": "https://www.shicimingju.com/",
        "sec-ch-ua": "\"Google Chrome\";v=\"87\", \" Not;A Brand\";v=\"99\", \"Chromium\";v=\"87\"",
        "sec-ch-ua-mobile": "?0",
        "sec-fetch-dest": "image",
        "sec-fetch-mode": "no-cors",
        "sec-fetch-site": "same-site",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36"
    }
    # 图片的Request Method
    imgresponse = requests.get(imgUrl, imgheaders)

    # 文件名字是利用url做切割命名的
    filename = imgUrl.split('/')[-1]
    # 把获取来的数据存在文件夹里
    if not os.path.exists(dirName):
        os.mkdir(dirName)
    filePath = dirName + '/' + filename
    # 二进制写入模式
    with open(filePath, 'wb') as f:
        # 写入二进制数据
        f.write(imgresponse.content)
        print(filePath + "下载成功")
