from lxml import etree
import requests
import csv
import time
import random

def get_page(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    resp = requests.get(url, headers=headers)
    resp.encoding = 'utf-8'
    tree = etree.HTML(resp.text)
    result_info = ''
    result_grade = ''
    result_comment = ''

    rank = tree.xpath('//*[@id="content"]/div[1]/span[1]/text()')
    name = tree.xpath('//span[@property="v:itemreviewed"]/text()')
    year = tree.xpath('//*[@id="content"]/h1/span[2]/text()')
    info = tree.xpath('// *[ @ id = "info"]')[0]
    detail_info = [text.strip() for text in info.itertext() if text.strip()]
    grade = tree.xpath('//*[@id="interest_sectl"]/div[1]')[0]
    detail_grade = [text.strip() for text in grade.itertext() if text.strip()]
    comment = tree.xpath('//*[@id="content"]/div[2]/div[1]/div[3]')[0]
    detail_comment = [text.strip() for text in comment.itertext() if text.strip()]

    print(f"排名: {rank}")
    print(f"电影名: {name}")
    print(f'上映年份: {year}')
    for i in detail_info:
        result_info = f'{result_info}{i}'
    print(result_info)
    for i in detail_grade:
        result_grade = f'{result_grade} {i}'
    print(result_grade)
    for i in detail_comment:
        result_comment = f'{result_comment}{i}'
    print(result_comment)

    movie_info = {
        '排名': rank[0] if rank else '',
        '电影名': name[0] if name else '',
        '上映年份': year[0].strip('()') if year else '',
        '基本人员信息': ' '.join(detail_info),
        '评分信息': ' '.join(detail_grade),
        '简介': ' '.join(detail_comment),
    }
    time.sleep(random.randint(2, 4))
    return movie_info

def get_detail_page_urls(page_num=10):
    detail_urls = []
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    for page in range(page_num):
        start = page * 25
        url = f'https://movie.douban.com/top250?start={start}&filter='
        response = requests.get(url, headers=headers)
        html = etree.HTML(response.text)
        urls = html.xpath('//div[@class="hd"]/a/@href')
        detail_urls.extend(urls)
        print(f"已获取第{page + 1}页的{len(urls)}个电影详情页链接")
        time.sleep(random.randint(1, 3))
    return detail_urls

def save_to_csv(movies, filename='douban_top250_details.csv'):
    fieldnames = movies[0].keys()
    with open(filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        for movie in movies:
            writer.writerow(movie)
    print(f"已将{len(movies)}部电影信息保存到{filename}")

if __name__ == "__main__":
    print("开始获取电影详情页URL")
    detail_urls = get_detail_page_urls(page_num=10)
    movies = []
    for i, url in enumerate(detail_urls, 1):
        print(f"正在爬取第{i}部电影: {url}")
        movie_details = get_page(url)
        if movie_details:
            movies.append(movie_details)

    print(f"爬取完成，共获取{len(movies)}部电影的详细信息")
    save_to_csv(movies)
