import os
import time
import googleapiclient.discovery
import googleapiclient.errors
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from langdetect import detect, DetectorFactory

DetectorFactory.seed = 0

#API_KEY = "your_API_KEY"替换成你自己的api_key
API_KEY = "XXXXXXXXXXXXXXXXXXXXXXXX"

#替换成你需要查看博主的channel_id,这里是实例
CHANNEL_ID = "XXXXXXXXXXXXXXXXXXXXXXX"

# 设置代理（如需）
proxies = {
    'http': 'http://127.0.0.1:7890',
    'https': 'http://127.0.0.1:7890',
}
os.environ['http_proxy'] = proxies['http']
os.environ['https_proxy'] = proxies['https']


youtube = googleapiclient.discovery.build("youtube", "v3", developerKey=API_KEY)

def get_video_data(api_client, channel_id):
    resp = api_client.channels().list(part="contentDetails", id=channel_id).execute()
    uploads_playlist = resp['items'][0]['contentDetails']['relatedPlaylists']['uploads']

    video_data = {}
    token = None
    while True:
        pl_resp = api_client.playlistItems().list(
            part="snippet",
            playlistId=uploads_playlist,
            maxResults=50,
            pageToken=token
        ).execute()
        for item in pl_resp['items']:
            vid = item['snippet']['resourceId']['videoId']
            title = item['snippet']['title']
            video_data[vid] = {'title': title, 'views': 0, 'likes': 0}
        token = pl_resp.get('nextPageToken')
        if not token:
            break

    vid_list = list(video_data.keys())
    for i in range(0, len(vid_list), 50):
        batch = vid_list[i:i+50]
        stat_resp = api_client.videos().list(part="statistics", id=','.join(batch)).execute()
        for item in stat_resp['items']:
            vid = item['id']
            stats = item['statistics']
            video_data[vid]['views'] = int(stats.get('viewCount', 0))
            video_data[vid]['likes'] = int(stats.get('likeCount', 0))

    return video_data


def fetch_top_comments(api_client, video_id, max_comments=200):
    comments = []
    resp = api_client.commentThreads().list(
        part="snippet",
        videoId=video_id,
        order="relevance",
        textFormat="plainText",
        maxResults=max_comments
    ).execute()

    for item in resp.get('items', []):
        text = item['snippet']['topLevelComment']['snippet']['textDisplay']
        try:
            if detect(text) == 'en':
                comments.append(text)
        except:
            continue
    return comments

if __name__ == '__main__':

    data = get_video_data(youtube, CHANNEL_ID)

    sorted_videos = sorted(data.items(), key=lambda x: x[1]['views'], reverse=True)[:10]
    top10 = [{'id': vid, 'title': v['title'], 'views': v['views'], 'likes': v['likes']} for vid, v in sorted_videos]


    for video in top10:
        vid = video['id']
        title = video['title']
        print(f"\n=== Comments for video: {title} (ID: {vid}) ===")
        comments = fetch_top_comments(youtube, vid, max_comments=200)
        if not comments:
            print("No English comments found.")
            continue
        for idx, comment in enumerate(comments, start=1):
            print(f"{idx}. {comment}")
        time.sleep(1)  # 避免 API 请求超限


    all_comments = []
    for video in top10:
        all_comments.extend(fetch_top_comments(youtube, video['id'], max_comments=200))

    vectorizer = CountVectorizer(stop_words='english')
    X = vectorizer.fit_transform(all_comments)
    counts = X.toarray().sum(axis=0)
    words = vectorizer.get_feature_names_out()

    freq_df = pd.DataFrame({'word': words, 'count': counts})
    top_words = freq_df.sort_values(by='count', ascending=False).head(200)

    pd.set_option('display.max_rows', 2000)
    pd.set_option('display.max_colwidth', None)
    pd.set_option('display.width', None)

    print("\nTop 200 high-frequency words:")
    print(top_words.to_string(index=False))

    plt.figure(figsize=(12, 6))
    plt.barh(
        y=top_words['word'].head(20)[::-1],
        width=top_words['count'].head(20)[::-1]
    )
    plt.xlabel('Count')
    plt.title('Top 20 frequent words in comments')
    plt.tight_layout()
    plt.show()