#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Software : PyCharm
# @Time    : 2025-02-05 08:10
# @Author  : gzh
# @File : DoubanSpider.py

import requests
from bs4 import BeautifulSoup
import time
import csv

class DoubanSpider:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Host': 'movie.douban.com'
        }
        self.base_url = 'https://movie.douban.com/top250?start={}&filter='

    def parse_page(self, html):
        soup = BeautifulSoup(html, 'lxml')
        items = soup.find_all('div', class_='item')

        for item in items:
            # 电影基本信息解析
            title = item.find('span', class_='title').text.strip()
            year = item.find('div', class_='bd').find('br').next_sibling.strip()[:4]
            countries_genre = item.find('div', class_='bd').p.contents[2].strip().split('/')
            country = countries_genre[1].strip()
            genre = countries_genre[2].strip()

            details_url = item.find('div', class_='hd').find('a')['href']

            # 评分信息解析
            rating_num = item.find('span', class_='rating_num').text
            rating_people = item.find('div', class_='star').contents[7].text[:-3]

            # 短评金句解析
            quote_tag = item.find('span', class_='inq')
            quote = quote_tag.text if quote_tag else "无"

            yield {
                'title': title,
                'year': year,
                'country': country,
                'genre': genre,
                'details':details_url,
                'rating': rating_num,
                'votes': rating_people,
                'quote': quote
            }

    def save_to_csv(self, data):
        with open('douban_top250.csv', 'a', newline='', encoding='utf-8-sig') as f:
            writer = csv.DictWriter(f, fieldnames=[
                'title', 'year', 'country',
                'genre', 'details', 'rating', 'votes', 'quote'
            ])
            if f.tell() == 0:
                writer.writeheader()
            writer.writerow(data)

    def run(self):
        for page in range(0, 250, 25):
            url = self.base_url.format(page)
            try:
                response = requests.get(url, headers=self.headers, timeout=10)
                if response.status_code == 200:
                    for data in self.parse_page(response.text):
                        self.save_to_csv(data)
                    print(f'成功爬取第{page // 25 + 1}页数据')
                else:
                    print(f'请求失败，状态码：{response.status_code}')
            except Exception as e:
                print(f'发生异常：{str(e)}')
            time.sleep(1.5)  # 控制爬取速度

if __name__ == '__main__':
    spider = DoubanSpider()
    spider.run()