import requests  # 用于发送网页请求
from bs4 import BeautifulSoup  # 用于解析HTML网页
from lxml import etree
from PIL import Image
from io import BytesIO
import os
import pandas as pd
from time import sleep
import threading
from queue import Queue
from fake_useragent import UserAgent
import random
movie_urls=[]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
ua = UserAgent()
data_queue = Queue()  # 用于线程间通信的队列
lock = threading.Lock()  # 线程锁
class MovieInfo:
    def __init__(self,movie_id,cover_url,title,douban_rating,directors,writers,actors,genres,country,language,release_date,synopsis):
        self.movie_id = movie_id
        self.cover_url = cover_url
        self.title = title
        self.douban_rating = douban_rating
        self.directors = directors
        self.writers  = writers
        self.actors= actors
        self.genres = genres
        self.country = country
        self.language = language
        self.release_date = release_date
        self.synopsis = synopsis
def download_image(image_url, movie_id):
    try:
        # 确保images目录存在
        if not os.path.exists('课程实训\images'):
            os.makedirs('课程实训\images')
        
        # 下载图片
        response = requests.get(image_url)
        response.raise_for_status()  # 检查请求是否成功
        
        # 打开图片并转换为JPG
        img = Image.open(BytesIO(response.content))
        
        # 处理透明背景(如果是PNG等格式)
        if img.mode in ('RGBA', 'LA'):
            background = Image.new('RGB', img.size, (255, 255, 255))
            background.paste(img, mask=img.split()[-1])
            img = background
        
        # 保存为JPG
        img.save(f'课程实训/images/{movie_id}.jpg', 'JPEG')
        return True
        
    except Exception as e:
        print(f"保存图片失败: {e}")
        return False
def get_movie_info(movie_url):
    # 根据当前负载动态延迟（如服务器返回速度快则减少延迟）
    delay = random.uniform(1.5, 3) if len(movie_urls) > 100 else random.uniform(0.5, 2)
    sleep(delay)
    response = requests.get(movie_url,headers= headers)
    soup = BeautifulSoup(response.text, 'html.parser')  # 使用BeautifulSoup解析HTML
    tree = etree.HTML(response.text)
    # 提取电影ID
    movie_id = movie_url.split('/')[-2]
    # 提取电影名称
    title = tree.xpath('//*[@id="content"]/h1/span[1]/text()')[0].strip()
    # 下载封面
    cover_url = tree.xpath('//*[@id="mainpic"]/a/img//@src')[0].strip()
    download_image(cover_url, movie_id)
    # 提取豆瓣评分
    rating = tree.xpath('//strong[@property="v:average"]/text()')[0].strip()
    # 提取导演列表
    directors = tree.xpath('//*[@id="info"]/span[1]/span[2]/a/text()')
    # 提取编剧列表
    writers = tree.xpath('//*[@id="info"]/span[2]/span[2]/a/text()')
    # 提取主演列表
    soup = BeautifulSoup(response.text, 'lxml')
    actors = soup.select('a[rel="v:starring"]')
    # 提取所有演员的文本和链接
    actor_list = []
    for actor in actors:
        actor_list.append(actor.get_text(strip=True))
    # 提取类型列表
    genres = tree.xpath('//span[@property="v:genre"]/text()')
     # 提取国家/地区
    country = tree.xpath('//span[contains(text(), "制片国家/地区")]/following::text()[1]')[0].strip()
    # 提取语言
    language = tree.xpath('//span[contains(text(), "语言")]/following::text()[1]')[0].strip()
     # 提取上映日期
    release_dates = tree.xpath('//span[@property="v:initialReleaseDate"]/text()')
     # 提取剧情简介
    synopsis = soup.find('span', property='v:summary')
    if synopsis:
        synopsis = synopsis.text.strip()
    # 创建并返回MovieInfo对象
    movie_info = MovieInfo(movie_id=movie_id,cover_url=cover_url,title=title,douban_rating=rating,directors=directors,writers=writers,
                           actors=actor_list,genres=genres,country=country,language=language,release_date=release_dates,synopsis=synopsis)
    return movie_info
def worker():
    """工作线程函数"""
    i=1
    while True:
        movie_url = data_queue.get()
        if movie_url is None:  # 结束信号
            break
            
        movie_info = get_movie_info(movie_url)
        if movie_info:
            with lock:  # 使用锁保证线程安全
                # 将数据保存到CSV
                df = pd.DataFrame([{
                    'movie_id': movie_info.movie_id,
                    'cover_url': movie_info.cover_url,
                    'title': movie_info.title,
                    'douban_rating': movie_info.douban_rating,
                    'directors': ', '.join(movie_info.directors),
                    'writers': ', '.join(movie_info.writers),
                    'actors': ', '.join(movie_info.actors),
                    'genres': ', '.join(movie_info.genres),
                    'country': movie_info.country,
                    'language': movie_info.language,
                    'release_date': ', '.join(movie_info.release_date),
                    'synopsis': movie_info.synopsis
                }])
                # 追加模式写入CSV
                if not os.path.exists('课程实训\\top250.csv'):
                    df.to_csv('课程实训\\top250.csv', index=False, encoding='utf-8-sig')
                else:
                    df.to_csv('课程实训\\top250.csv', mode='a', header=False, index=False, encoding='utf-8-sig')
                
                print(f"已处理{i}: {movie_info.title}")
                i+=1
        
        data_queue.task_done()

def save_movie_info(movie_urls, num_threads=2):
    # 创建工作线程
    threads = []
    for _ in range(num_threads):
        t = threading.Thread(target=worker)
        t.start()
        threads.append(t)
    # 将任务放入队列
    for url in movie_urls:
        data_queue.put(url)
    # 等待所有任务完成
    data_queue.join()
    # 发送结束信号
    for _ in range(num_threads):
        data_queue.put(None)
    # 等待所有线程结束
    for t in threads:
        t.join()
for page in range(0, 250, 25):
    # 获取列表页
    # sleep(random.uniform(1, 3))  # 随机延迟1~3秒
    url = f'https://movie.douban.com/top250?start={page}'
    response = requests.get(url, headers= headers) # 发送GET请求
    response.encoding = 'utf-8'  # 设置编码为UTF-8，避免乱码
    soup = BeautifulSoup(response.text, 'html.parser')  # 使用BeautifulSoup解析HTML
    tree = etree.HTML(response.text)
    movie_url=(tree.xpath('//div[@class="hd"]/a/@href'))
    movie_urls+=movie_url
save_movie_info(movie_urls, num_threads=3)