# -*- coding: UTF-8 -*-
"""
@Project , trainee
@File    , Movie_detailed_data.py
@IDE     , PyCharm
@Author  , 2607750505@qq.com
@Date    , 2025/6/16 10:04
"""
"""爬取电影详细信息"""
import json
import re
import pymysql
import requests
from lxml import etree
from bs4 import BeautifulSoup
import random
import time


# 数据库连接配置
def get_conn():
    conn = pymysql.connect(
        host='localhost',
        user='root',
        password='123456',
        database='movie_system',
        charset='utf8mb4'
    )
    return conn, conn.cursor()


# 请求头配置
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
}


def spider_main(spiderTarget, page):
    """爬取指定页的电影详细信息"""
    # 添加随机延迟避免请求过于频繁
    time.sleep(random.uniform(2, 5))

    try:
        # 请求电影列表页
        params = {'start': page}
        movies_all_res = requests.get(spiderTarget, params=params, headers=headers, timeout=10)
        print(f'请求API的状态码: {movies_all_res.status_code}')

        if movies_all_res.status_code != 200:
            print(f'请求失败: {movies_all_res.status_code}')
            return []

        # 解析电影列表
        movies_info = movies_all_res.json().get('data', [])
        if not movies_info:
            print('没有获取到电影数据')
            return []

        result = []

        # 遍历每部电影，获取详细信息
        for movie_info in movies_info:
            result_data = {}

            # 提取基本信息
            result_data['directors'] = ','.join(movie_info.get('directors', []))
            result_data['rate'] = movie_info.get('rate', '')
            result_data['title'] = movie_info.get('title', '')
            result_data['detailLink'] = movie_info.get('url', '')
            result_data['casts'] = ','.join(movie_info.get('casts', []))
            result_data['cover'] = movie_info.get('cover', '')

            # 请求电影详情页
            time.sleep(random.uniform(2, 5))
            detail_res = requests.get(movie_info.get('url', ''), headers=headers, timeout=10)
            print(f'请求详情页的状态码: {detail_res.status_code}')

            if detail_res.status_code != 200:
                continue

            # 解析详情页
            xpath_html = etree.HTML(detail_res.text)
            soup = BeautifulSoup(detail_res.text, 'lxml')

            # 提取详细信息
            year_elem = xpath_html.xpath('//span[@class="year"]/text()')
            result_data['year'] = year_elem[0].strip('()') if year_elem else None

            types = xpath_html.xpath('//span[@property="v:genre"]/text()')
            result_data['types'] = ','.join(types) if types else None

            info_div = soup.find('div', id='info')
            if info_div:
                info_text = str(info_div)
                country = re.findall(r'<span class="pl">制片国家/地区:</span>(.*?)<br/>', info_text)
                result_data['country'] = re.sub(r'\s+', '', country[0]).replace('/', ',') if country else None

                lang = re.findall(r'<span class="pl">语言:</span>(.*?)<br/>', info_text)
                result_data['lang'] = re.sub(r'\s+', '', lang[0]).replace('/', ',') if lang else None

                up_time = soup.find_all('span', property="v:initialReleaseDate")
                if up_time:
                    time_str = up_time[0].get_text() if up_time else ''
                    time_match = re.findall(r'\d*-\d*-\d*', time_str)
                    result_data['time'] = time_match[0] if time_match else None

            # 影片时长
            runtime_elem = soup.find('span', property="v:runtime")
            result_data['movieTime'] = re.findall(r'\d+', runtime_elem.get_text())[0] if runtime_elem else str(
                random.randint(30, 180))

            # 评论数量
            comment_len = xpath_html.xpath('//span[@property="v:votes"]/text()')
            result_data['commentLen'] = comment_len[0] if comment_len else '0'

            # 星级占比
            stars = xpath_html.xpath('//span[@class="rating_per"]/text()')
            result_data['star'] = ','.join(stars) if stars else ''

            # 简介
            summary = soup.find('span', property="v:summary")
            result_data['summary'] = re.sub(r'\s+', '', summary.get_text().strip()) if summary else ''

            # 详情图
            img_list = xpath_html.xpath('//div[@id="related-pic"]/ul/li/a/img/@src')
            result_data['imgList'] = ','.join(img_list) if img_list else ''

            result.append(result_data)

        return result

    except Exception as e:
        print(f'爬取过程中发生错误: {e}')
        return []


def main(limit=10):
    """爬取并保存电影数据"""
    conn, cursor = get_conn()
    spider_target = 'https://movie.douban.com/j/new_search_subjects'

    try:
        # 计算需要爬取的页数
        pages = (limit + 19) // 20  # 每页20条，向上取整

        for page in range(pages):
            # 实际爬取数量，最后一页可能不足20条
            actual_limit = min(20, limit - page * 20)

            print(f'开始爬取第{page + 1}页，预计爬取{actual_limit}条电影')
            data = spider_main(spider_target, page * 20)

            if not data:
                print(f'第{page + 1}页没有数据')
                continue

            # 保存到数据库
            for movie in data[:actual_limit]:
                # 检查是否已存在
                check_sql = "SELECT id FROM movies WHERE title = %s AND year = %s"
                cursor.execute(check_sql, (movie['title'], movie['year']))

                if cursor.fetchone():
                    print(f"电影已存在: {movie['title']}")
                    continue

                # 插入新记录
                insert_sql = """
                    INSERT INTO movies (
                        directors, rate, title, casts, cover, year, types, country, lang, 
                        time, movieTime, commentLen, star, summary, imgList, detailLink
                    ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                """

                params = (
                    movie['directors'], movie['rate'], movie['title'], movie['casts'],
                    movie['cover'], movie['year'], movie['types'], movie['country'],
                    movie['lang'], movie['time'], movie['movieTime'], movie['commentLen'],
                    movie['star'], movie['summary'], movie['imgList'], movie['detailLink']
                )

                cursor.execute(insert_sql, params)

            conn.commit()
            print(f'第{page + 1}页数据保存完成')

    except Exception as e:
        print(f'保存数据时发生错误: {e}')
        conn.rollback()
    finally:
        cursor.close()
        conn.close()


if __name__ == '__main__':
    main(10)  # 默认爬取10条电影数据