# 爬虫练习
import requests
import re

cookies = {
    'bid': '2YbODN5rjU0',
    '__utmz': '30149280.1734417374.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)',
    '__utmz': '223695111.1734419645.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)',
    '_pk_id.100001.4cf6': '4a991c9f7cb9223e.1734419645.',
    '__yadk_uid': 'MW2MT0SImcG9NW7utpjXCvRqy4bXxcFA',
    '_pk_ses.100001.4cf6': '1',
    'ap_v': '0,6.0',
    '__utma': '30149280.1451338679.1734417374.1734417374.1734500002.2',
    '__utmb': '30149280.0.10.1734500002',
    '__utmc': '30149280',
    '__utma': '223695111.41584751.1734419645.1734419645.1734500002.2',
    '__utmb': '223695111.0.10.1734500002',
    '__utmc': '223695111',
}

headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'max-age=0',
    # 'cookie': 'bid=2YbODN5rjU0; __utmz=30149280.1734417374.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmz=223695111.1734419645.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); _pk_id.100001.4cf6=4a991c9f7cb9223e.1734419645.; __yadk_uid=MW2MT0SImcG9NW7utpjXCvRqy4bXxcFA; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1451338679.1734417374.1734417374.1734500002.2; __utmb=30149280.0.10.1734500002; __utmc=30149280; __utma=223695111.41584751.1734419645.1734419645.1734500002.2; __utmb=223695111.0.10.1734500002; __utmc=223695111',
    'priority': 'u=0, i',
    'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'none',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
}

if __name__ == "__main__":
    file1 = open("douban_movies_name.txt", "w", encoding="utf-8")
    for i in range(0, 225 + 1, 25):
        params = {
            'start': i,
            'filter': '',
        }
        response = requests.get('https://movie.douban.com/top250', params=params, cookies=cookies, headers=headers)
        list1 = re.findall(r'<span class="title">(.*?)</span>', response.text, re.S)
        for j in list1:
            if j.isalpha():
                file1.write(f'{j}\n')
    file1.close()
