import requests
import time
import random
import json
from concurrent.futures import ThreadPoolExecutor
from lxml import etree

class OptimizedDouBanSpider:
    def __init__(self):
        self.base_url = "https://movie.douban.com/top250?start={}&filter="
        self.headers = {
            'User-Agent': self._get_random_user_agent(),
            'Referer': 'https://movie.douban.com/',
            'Accept-Language': 'zh-CN,zh;q=0.9',
        }
        self.proxy_pool = []  # 可在此添加代理IP池
        self.session = requests.Session()
        self.max_retries = 3
        self.time_delay = (1, 3)  # 随机延时范围（秒）
        self.output_file = "douban_top250.json"

    def _get_random_user_agent(self):
        """获取随机User-Agent"""
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.4.1 Safari/605.1.15',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Firefox/121.0',
            'Mozilla/5.0 (iPhone; CPU iPhone OS 17_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.4 Mobile/15E148 Safari/604.1'
        ]
        return random.choice(user_agents)

    def get_html(self, url):
        """带重试机制的请求方法"""
        for _ in range(self.max_retries):
            try:
                self.headers['User-Agent'] = self._get_random_user_agent()
                resp = self.session.get(
                    url,
                    headers=self.headers,
                    proxies=random.choice(self.proxy_pool) if self.proxy_pool else None,
                    timeout=15
                )
                resp.raise_for_status()
                resp.encoding = 'utf-8'
                return resp.text
            except requests.exceptions.RequestException as e:
                print(f"请求失败: {str(e)} | 重试剩余次数: {_+1}/{self.max_retries}")
                time.sleep(random.uniform(*self.time_delay))
        return None

    def parse_page(self, html):
        """解析页面数据"""
        if not html:
            return []

        try:
            p = etree.HTML(html)
            movies = []
            
            for item in p.xpath("//div[@class='item']"):
                movie = {
                    'title': item.xpath('.//span[@property="v:itemreviewed"]/text()')[0],
                    'rating': item.xpath('.//span[@property="v:average"]/text()')[0],
                    'quote': ' '.join(item.xpath('.//span[@property="v:summary"]/text()')).strip(),
                    'info': item.xpath('.//div[@class="bd"]/p[@class=""]/text()')[0].strip()
                }
                movies.append(movie)
            
            return movies
        
        except Exception as e:
            print(f"解析失败: {str(e)}")
            return []

    def save_data(self, data):
        """保存数据到JSON文件"""
        try:
            with open(self.output_file, 'a', encoding='utf-8') as f:
                for item in data:
                    json.dump(item, f, ensure_ascii=False)
                    f.write('\n')
            print(f"成功保存 {len(data)} 条数据")
        except Exception as e:
            print(f"保存失败: {str(e)}")

    def run(self):
        """多线程运行主函数"""
        start_time = time.time()
        
        # 创建线程池（建议不超过5个线程）
        with ThreadPoolExecutor(max_workers=3) as executor:
            futures = []
            
            for page in range(10):  # 抓取前10页（250条数据）
                url = self.base_url.format(page * 25)
                print(f"正在处理页面: {url}")
                
                # 提交任务到线程池
                future = executor.submit(self.process_page, url)
                futures.append(future)
            
            # 等待所有任务完成
            for future in futures:
                result = future.result()
                if result:
                    self.save_data(result)

        end_time = time.time()
        print(f"\n爬取完成！耗时: {end_time - start_time:.2f} 秒")
        print(f"共抓取数据: {len(open(self.output_file).readlines())} 条")

    def process_page(self, url):
        """处理单个页面请求"""
        html = self.get_html(url)
        return self.parse_page(html)

if __name__ == "__main__":
    spider = OptimizedDouBanSpider()
    spider.run()