import os
import csv
import logging
from googleapiclient.discovery import build
import pandas as pd


# 配置日志
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

# 配置你的API密钥
API_KEY = "AIzaSyCBB9KZyljtWBnPcc2PZrbtwPgO3FcW6RA"  # 替换为你的实际API密钥 
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"


# 设置代理环境变量（适配 Clash、V2Ray 等本地代理工具，可选）
USE_PROXY = True
if USE_PROXY:
    os.environ["HTTP_PROXY"] = "http://127.0.0.1:7890"
    os.environ["HTTPS_PROXY"] = "http://127.0.0.1:7890"


def get_video_comments_count(video_id):
    """获取单个视频的评论数"""
    youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=API_KEY)
    try:
        request = youtube.videos().list(part="statistics", id=video_id)
        response = request.execute()
        if response['items']:
            stats = response['items'][0]['statistics']
            return int(stats.get('commentCount', 0))
        return 0
    except Exception as e:
        logging.error(f"获取视频 {video_id} 评论数时出错: {str(e)}")
        return 0

def search_videos_by_keyword(keyword, max_results=50):
    """通过关键词搜索视频"""
    youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=API_KEY)
    try:
        search_response = youtube.search().list(
            q=keyword,
            part="id,snippet",
            maxResults=min(max_results, 50),
            type="video",
            order="viewCount"
        ).execute()
        video_ids = [item['id']['videoId'] for item in search_response.get('items', [])]
        return video_ids
    except Exception as e:
        logging.error(f"搜索视频时出错: {str(e)}")
        return []

def main():
    # 用户配置参数
    SEARCH_KEYWORD = "New energy RV"  # 修改为你要搜索的类型/关键词
    MAX_RESULTS = 100        # 需要获取的视频总数
    OUTPUT_FILE = "RV_comments_data.xlsx"

    # 获取视频ID列表
    all_video_ids = []
    next_page_token = None

    while len(all_video_ids) < MAX_RESULTS:
        try:
            youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=API_KEY)
            search_response = youtube.search().list(
                q=SEARCH_KEYWORD,
                part="id,snippet",
                maxResults=min(50, MAX_RESULTS - len(all_video_ids)),
                type="video",
                order="viewCount",
                pageToken=next_page_token
            ).execute()

            video_ids = [item['id']['videoId'] for item in search_response.get('items', [])]
            all_video_ids.extend(video_ids)

            next_page_token = search_response.get('nextPageToken')
            if not next_page_token or len(all_video_ids) >= MAX_RESULTS:
                break

        except Exception as e:
            logging.error(f"分页搜索时出错: {str(e)}")
            break

    # 获取评论数并保存结果
    with open(OUTPUT_FILE, 'w', newline='', encoding='utf-8-sig') as csvfile:
        # writer = csv.writer(csvfile)
        # writer.writerow(['视频ID', '评论数', '视频标题', '视频链接'])
        data = []

        for vid in all_video_ids[:MAX_RESULTS]:
            # 获取评论数
            comment_count = get_video_comments_count(vid)

            # 获取视频标题
            try:
                youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=API_KEY)
                video_response = youtube.videos().list(part="snippet", id=vid).execute()
                title = video_response['items'][0]['snippet']['title'] if video_response.get('items') else "N/A"
                video_url = f"https://www.youtube.com/watch?v={vid}"
                # writer.writerow([vid, comment_count, title, video_url])

                data.append([vid, comment_count, title, video_url])

                logging.info(f"视频 {title[:30]}... 评论数: {comment_count}")
            except Exception as e:
                logging.error(f"获取视频 {vid} 信息时出错: {str(e)}")

        df = pd.DataFrame(data, columns=['视频ID', '评论数', '视频标题', '视频链接'])
        df.to_excel(OUTPUT_FILE, index=False, engine='openpyxl')

if __name__ == "__main__":
    main()