from bs4 import BeautifulSoup
import requests
# 添加请求头模拟网站爬取网站内容，User-Agent随便从一个网站检查里复制
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                  "AppleWebKit/537.36 (HTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
# 初始化一个列表来存储所有电影标题
movie_titles = []
# 分页爬取请求携带参数
for start_num in range(0, 250, 25):
    content = requests.get(f"http://movie.douban.com/top250?start={start_num}", headers=headers).text
    soup = BeautifulSoup(content, "html.parser")
    all_title = soup.findAll("span", attrs={"class": "title"})
    for title in all_title:
        title_string = title.string
        if "/" not in title_string:
            print(title_string)
            movie_titles.append(title_string)
# 将所有电影标题写入文件   
file_path = "douban.txt"
with open(file_path, "w", encoding="utf-8") as file:
    for title in movie_titles:
        file.write(title + "\n")

print("所有电影标题已写入文件")