# -*- coding: UTF-8 -*-
"""
@Project , trainee
@File    , Movie_review_data.py
@IDE     , PyCharm
@Author  , 2607750505@qq.com
@Date    , 2025/6/16 10:12
"""
import re
import pymysql
import requests
import time
import random
from lxml import etree
import socket

# -*- coding: UTF-8 -*-
"""爬取电影评论数据"""
import re
import pymysql
import requests
import time
import random
from lxml import etree


# 数据库连接配置
def get_conn():
    conn = pymysql.connect(
        host='localhost',
        user='root',
        password='123456',
        database='movie_system',
        charset='utf8mb4'
    )
    return conn, conn.cursor()


# 请求头配置
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    'Connection': 'keep-alive'
}


def spider_main(movie_id, review_limit=20):
    """爬取指定电影的评论"""
    conn, cursor = get_conn()

    try:
        # 获取电影详情链接和名称
        cursor.execute("SELECT detailLink, title FROM movies WHERE id = %s", (movie_id,))
        movie_data = cursor.fetchone()

        if not movie_data:
            print(f"未找到电影ID: {movie_id}")
            return False

        detail_link, movie_name = movie_data
        douban_id = re.findall(r'\d+', detail_link)[0] if detail_link else ''

        if not douban_id:
            print(f"无法从链接中提取豆瓣ID: {detail_link}")
            return False

        # 计算需要爬取的页数
        pages = (review_limit + 19) // 20  # 每页20条，向上取整

        total_saved = 0

        for page in range(pages):
            # 构建评论页URL
            url = f'https://movie.douban.com/subject/{douban_id}/reviews?start={page * 20}'
            print(f"正在爬取评论页 {page + 1}/{pages}: {url}")

            # 添加随机延迟
            time.sleep(random.uniform(3, 8))

            # 发送请求
            response = requests.get(url, headers=headers, timeout=15)

            if response.status_code != 200:
                print(f"请求失败: {response.status_code}")
                continue

            # 解析页面
            xpath_html = etree.HTML(response.text)
            review_divs = xpath_html.xpath('//div[@class="review-list  "]/div')

            if not review_divs:
                print(f"在第{page + 1}页未找到评论")
                continue

            for review_div in review_divs:
                # 提取评论内容
                content_elems = review_div.xpath('.//div[@class="short-content"]/text()')

                if not content_elems:
                    continue

                content = content_elems[0].strip().replace('\n', '').replace('(', '').replace(')', '').replace('  ', '')

                if not content:
                    continue

                # 检查是否已存在
                check_sql = "SELECT id FROM comments WHERE movie_id = %s AND commentContent = %s"
                cursor.execute(check_sql, (movie_id, content))

                if cursor.fetchone():
                    print("评论已存在，跳过")
                    continue

                # 保存评论
                insert_sql = "INSERT INTO comments (movie_id, movieName, commentContent) VALUES (%s, %s, %s)"
                cursor.execute(insert_sql, (movie_id, movie_name, content))
                total_saved += 1

                # 检查是否达到需要的数量
                if total_saved >= review_limit:
                    break

            # 每爬取一页提交一次
            conn.commit()

            # 如果已达到需要的数量，跳出循环
            if total_saved >= review_limit:
                break

        print(f"成功为电影 {movie_name} ({movie_id}) 爬取 {total_saved} 条评论")
        return total_saved > 0

    except Exception as e:
        print(f"爬取评论时发生错误: {e}")
        conn.rollback()
        return False
    finally:
        cursor.close()
        conn.close()


if __name__ == '__main__':
    # 测试爬取特定电影的评论
    # spider_main(1, 10)  # 爬取ID为1的电影的10条评论
    pass