import json
import csv
import requests
from lxml import etree
import re
from pymysql import *
import random

from spiders.spider import USER_AGENTS

# 数据库连接
conn = connect(host='localhost', port=3306, user='root', passwd='123890qwe', database='dbfilm')
cursor = conn.cursor()

def query(sql, params, type='no_select'):
    """执行数据库查询"""
    try:
        params = tuple(params)
        cursor.execute(sql, params)
        if type != 'no_select':
            data_list = cursor.fetchall()
            conn.commit()
            return data_list
        else:
            conn.commit()
            return 'sql执行成功'
    except Exception as e:
        print(f"数据库操作出错: {e}")
        conn.rollback()  # 出错时回滚事务
        return None

# 获取电影数据
allData = query('select * from movies', [], 'select')
print(allData)

def getAllData():
    """获取电影ID并写入CSV文件"""
    allData = query('select * from movies', [], 'select')
    if not allData:
        print("未从数据库获取到电影数据")
        return []

    # 获取实际数据条数，避免超出范围
    data_count = len(allData)
    # 取实际条数和100的较小值作为循环上限
    loop_count = min(data_count, 100)

    with open('./top100MovieId.csv', 'a', newline='') as f:
        for i in range(loop_count):
            # 检查元组是否有足够的元素
            if len(allData[i]) >= 2:
                f.write(str(allData[i][1]) + '\n')
            else:
                print(f"数据不完整，跳过第{i}条记录: {allData[i]}")
    return allData

def spider_main():
    """爬取电影评论并写入数据库"""
    try:
        with open('./top100MovieId.csv', 'r') as f:
            for line in f.readlines():
                # 提取电影ID
                id_match = re.findall('\d+', line)
                if not id_match:
                    print(f"无法从行中提取电影ID: {line}")
                    continue
                mId = id_match[0]
                for j in range(2):  # 爬取2页评论
                    base_url = f'https://movie.douban.com/subject/{mId}/reviews?start={j * 20}'
                    headers = {
                        "User-Agent": random.choice(USER_AGENTS),
                        "Cookie": 'bid=PqAVbM5QlYI; ll="118360"; _vwo_uuid_v2=DD2A1C0F897BE8419CC81A2F0BF9CB78B|c314ed5c401d7eca61e8912d7f470972; _pk_id.100001.8cb4=528102be93b62978.1754745091.; ct=y; dbcl2="290508954:3ElXQMu14fE"; push_noty_num=0; push_doumail_num=0; __utmv=30149280.29050; __yadk_uid=1aPyDcWIo0DBfvY1jJp4eVRbsGqtaoUv; ck=zM3U; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1754832971%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D; _pk_ses.100001.8cb4=1; ap_v=0,6.0; frodotk_db="5b6ff09b61eaa456124fd1d225bfed02"; _TDID_CK=1754832971386; 6333762c95037d16=Ub1wdAoApmRya4VOEleF%2BYv8vmegkN0krKZNVZLxCJshvyGlW7cd3JRSzI4wSCMAirr0EdnpxWaCpufoUuDTj%2FQl0A6BIT6Cu%2BzMLvxIQF2orvaRiZ6bT3c6245y1vp0MRrb16eqLUq2S5EaacEqxKZV55M7jn1Xruj2wTBHlUriW7YFgk878SYJjSuOLqn3n8Gl8A0tmWZdL0zK4SAb3K9EKb4Pc%2BDQHEUSD5lGjZ1KacXsCIn%2BUFt%2BaRG5SXz7Nl1CW6ImaBYAPL8Kc%2BXxA2kuzMkQisGjSzlxZpexbdgLXu5l4ncD4w%3D%3D; __utma=30149280.1074194882.1754495139.1754747498.1754832975.10; __utmc=30149280; __utmz=30149280.1754832975.10.4.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmb=30149280.2.10.1754832975',
                        "Referer": "https://movie.douban.com/",
                        "Accept": "application/json, text/javascript, */*; q=0.01",
                        "Accept-Language": "zh-CN,zh;q=0.9",
                        "Connection": "keep-alive"
                    }
                    try:
                        resp = requests.get(base_url, headers=headers, timeout=10)
                        resp.raise_for_status()  # 检查请求是否成功
                    except requests.exceptions.RequestException as e:
                        print(f"请求失败: {e}，URL: {base_url}")
                        continue
                    xpathHtml = etree.HTML(resp.text)
                    # 获取电影名
                    name_elements = xpathHtml.xpath('//*[@id="content"]/div/div[2]/div[1]/div[2]/a/text()')
                    if name_elements:
                        # 处理电影名，避免索引错误
                        movieName = name_elements[0][2:] if len(name_elements[0]) >= 2 else name_elements[0]
                        print(f"成功获取电影名: {movieName}")
                    else:
                        print(f"无法获取电影名，XPath 结果为空。URL: {base_url}")
                        print("网页内容预览:", resp.text[:500])
                        continue  # 没有电影名则跳过当前页面
                    # 获取评论内容
                    divs = xpathHtml.xpath('//div[contains(@class, "review-list")]/div')  # 修正列表容器XPath
                    print(f"找到 {len(divs)} 条评论")
                    for div in divs:
                        # 修正评论内容XPath，提取所有文本节点
                        content_parts = div.xpath('.//div[contains(@class, "short-content")]//text()')
                        if content_parts:
                            # 合并文本片段并清洗
                            comment = ''.join(content_parts).strip()
                            if comment:
                                query("insert into comments(movieName,commentContent)values(%s,%s)",
                                      [movieName, comment])
                                print(f"插入评论: {comment[:20]}...")
                            else:
                                print("评论内容为空，跳过插入")
                        else:
                            print("未提取到评论内容，跳过插入")
                    # 如果需要爬取多页可以删除下面的break，当前是只爬取1页
                    break
    except Exception as e:
        print(f"程序执行出错: {e}")

if __name__ == '__main__':
    # getAllData()  # 根据需要开启
    spider_main()