import time
import random
import requests                                        # 导入请求库
from bs4 import BeautifulSoup

headers = {                                            # 模拟浏览器头部信息，向豆瓣服务器发送消息
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36 Edg/91.0.864.37"
}
proxies = ['122.152.196.126', '114.215.174.227', '119.185.30.75']

def fetchmovies(url, headers=headers):
    response = requests.get(url=url, proxies={'http':random.choice(proxies)}, headers=headers)      # 发送GET请求
    print(response.text)
    soup = BeautifulSoup(response.text, "html.parser")     # 解析响应内容
    elements = soup.find_all("div", class_="article")      # 查找电影信息所在HTML元素
    for element in elements[0].find_all("div", class_="item"):  # 查找每部电影的信息
        print('********************************************************')
        # print(element)                                     # 输出电影元素
        title = element.find_all("span", class_="title")
        print(title[0].text.strip())
        star = element.find_all("span", class_="rating_num")
        print(star[0].text.strip())
        info = element.find_all("p", class_="")
        print(info[0].text.strip())
        try:
            quote = element.find_all("p", class_="quote")
            print(quote[0].text.strip())
        except Exception as e:
            print(e)
        image = element.find_all("img")                   # 获取某个桃花图片的网址
        print(image[0].get("src").strip())
        response = requests.get(url=image[0].get("src").strip())                           # 爬取桃花图片
        f = open("豆瓣电影/"+title[0].text.strip()+".jpg", "wb")                    # 保存桃花图片
        f.write(response.content)
        f.close()
        f = open("豆瓣电影/"+title[0].text.strip()+".txt", "w")
        try:
            f.write(title[0].text.strip()+','+quote[0].text.strip())
        except Exception as e:
            f.write(title[0].text.strip()+','+'')
        f.close()
        
for i in range(10):
    #url = "https://movie.douban.com/top250?start={}&filter=".format(25*i)
    url = "https://movie.douban.com/top250?start="+str(25*i)+"&filter="
    print(url)
    fetchmovies(url=url, headers=headers)
    time.sleep(10)
