import bs4
from bs4 import BeautifulSoup
from urllib import request

url = 'https://movie.douban.com/chart'
headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    # 'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': 'douban-fav-remind=1; bid=6crGx3Btf5A; ll="118126"; __yadk_uid=CYlIsSDQit57Ud2BHVXkrug7BeFeuA5t; _pk_id.100001.4cf6=9f81eb5f5779c597.1718348062.; _ga=GA1.2.1174006631.1718363645; _gid=GA1.2.1208292639.1718363645; _ga_Y4GN1R87RG=GS1.1.1718363644.1.1.1718363664.0.0.0; _vwo_uuid_v2=D8DCBCE54EE1D9DDA34CF2DB8EE2157EA|4e28d615c7a98dafab4aaff075846d43; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1718430009%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DRJLJ3wPx_xWb3sgCtYhHsyPL7D9N0kFgL-2peQybeaTqqCihhVythd-JD5T5qq80GGQv1VnSuEAQGAfk_FDfwK%26wd%3D%26eqid%3Dfd2e4f5c001fedfc00000002666d292f%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1580897647.1671277777.1718363665.1718430009.13; __utmb=30149280.0.10.1718430009; __utmc=30149280; __utmz=30149280.1718430009.13.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=223695111.1114858246.1718348062.1718363665.1718430009.4; __utmb=223695111.0.10.1718430009; __utmc=223695111; __utmz=223695111.1718430009.4.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic',
    'Host': 'movie.douban.com',
    'Referer': 'https://movie.douban.com/?UMIKJG=TBPGO.xml',
    'sec-ch-ua': '"Chromium";v="9", "Not?A_Brand";v="8"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 SLBrowser/9.0.3.5211 SLBChan/105',
}
my_request = request.Request(url=url, headers=headers)
response = request.urlopen(my_request)
content = response.read().decode("utf-8")

soup = BeautifulSoup(content, "lxml")

img_list = soup.select('a[class="nbg"] > img')
# print(img_list)
lst = []
for img in img_list:
    src = img.attrs['src']
    name = img.attrs['alt']
    lst.append([name, src])
for i in lst:
    print(i[0], i[1], sep="========")
    request.urlretrieve(i[1], f"{i[0]}.webp")
