import requests
import json
import time
import random
from fake_useragent import UserAgent

# 设置请求头
ua = UserAgent()
headers = {
    'User-Agent': ua.random,
    'Referer': 'https://maoyan.com/films/',
    'Accept': 'application/json',
    'Accept-Encoding': 'gzip, deflate, br',
    'Connection': 'keep-alive'
}

def get_movie_comments(movie_id, offset=0, limit=15):
    """
    获取指定电影的评论数据
    :param movie_id: 电影ID
    :param offset: 偏移量
    :param limit: 每页数量
    :return: 评论数据列表
    """
    url = f'https://m.maoyan.com/mmdb/comments/movie/{movie_id}.json'
    params = {
        'offset': offset,
        'limit': limit,
        'startTime': int(time.time() * 1000)  # 当前时间戳
    }

    try:
        response = requests.get(url, headers=headers, params=params)
        response.raise_for_status()
        data = response.json()

        comments = []
        # 合并常规评论和热门评论
        if data.get('cmts'):
            comments.extend(data['cmts'])
        if data.get('hcmts'):
            comments.extend(data['hcmts'])

        return comments if comments else []
    except Exception as e:
        print(f"获取评论失败: {e}")
        return []

def parse_comment(comment):
    """
    解析单条评论数据
    """
    return {
        'user_id': comment.get('userId'),
        'nickname': comment.get('nickName'),
        'avatar': comment.get('avatarurl'),
        'content': comment.get('content'),
        'score': comment.get('score'),
        'time': comment.get('startTime'),
        'approve': comment.get('approve'),
        'reply': comment.get('reply'),
        'city': comment.get('cityName', '未知'),
        'tags': [tag['name'] for tag in comment.get('tagList', {}).get('fixed', [])] if comment.get('tagList') else []
    }

def get_all_comments(movie_id, max_count=100):
    """
    获取指定电影的所有评论(最多max_count条)
    """
    all_comments = []
    offset = 0
    limit = 15  # 每次请求获取的评论数
    last_comment_time = None  # 记录最后一条评论的时间

    while len(all_comments) < max_count:
        comments = get_movie_comments(movie_id, offset, limit)
        if not comments:
            break

        # 检查是否有重复评论
        if last_comment_time and comments[0]['startTime'] == last_comment_time:
            break

        for comment in comments:
            parsed_comment = parse_comment(comment)
            # 去重检查
            if not any(c['content'] == parsed_comment['content'] for c in all_comments):
                all_comments.append(parsed_comment)

            if len(all_comments) >= max_count:
                break

        last_comment_time = comments[-1]['startTime']
        offset += limit
        time.sleep(random.uniform(1, 2))  # 增加延迟防止被封

        print(f"已获取 {len(all_comments)} 条评论")

    return all_comments[:max_count]

def save_comments_to_file(comments, filename='talk_text.txt'):
    """
    将评论保存到JSON文件
    :param comments: 评论数据列表
    :param filename: 保存文件名，默认为'maoyan_comments.json'
    """
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(comments, f, ensure_ascii=False, indent=2)
    print(f"评论数据已保存到 {filename}")

def main():
    # 示例：爬取《F1：狂飙飞车》的评论(电影ID:1454962)
    movie_id = 1454962
    max_comments = 100  # 设置要爬取的评论数量

    print(f"开始爬取电影ID为 {movie_id} 的评论...")
    comments = get_all_comments(movie_id, max_comments)

    if comments:
        output_file = f'talk_text_{movie_id}.txt'
        save_comments_to_file(comments, output_file)

        # 打印前5条评论
        print("\n前5条评论示例:")
        for i, comment in enumerate(comments[:5], 1):
            print(f"{i}. {comment['nickname']} (来自: {comment['city']}, 评分: {comment['score']}):")
            print(f"   {comment['content']}")
            print(f"   时间: {comment['time']} 点赞: {comment['approve']} 回复: {comment['reply']}")
            print(f"   标签: {', '.join(comment['tags'])}")
            print()
    else:
        print("未能获取到任何评论")

if __name__ == '__main__':
    main()