import pandas as pd
import re
from playwright.sync_api import sync_playwright
import os

# 定义正则表达式模式
pattern = re.compile(r'vi/([^/]+)')

# 读取 Excel 文件
dir_path = os.path.dirname(__file__)
input_file = os.path.join(dir_path, 'youtube_channel_301_400.xlsx')
output_file = os.path.join(dir_path, 'video_ids.csv')  # 修改为 CSV 文件

# 读取 Excel 文件中的数据
df = pd.read_excel(input_file, sheet_name=0)
# header = pd.read_excel(input_file, nrows=0).columns
# df = pd.read_excel(input_file, skiprows=300, nrows=100, header=None)
# df.columns = header

# output_file = 'youtube_channel_301_400.xlsx'
# df.to_excel(os.path.join(dir_path, output_file), index=False)

# custom_browser_path = r"C:\Users\wangy\AppData\Local\ms-playwright\chromium-1134\chrome-win\chrome.exe"

# 读取已处理的 URL
if os.path.exists(output_file):
    processed_df = pd.read_csv(output_file)
    if 'channel' in processed_df.columns:
        processed_channels = set(processed_df['channel'].dropna().tolist())
    else:
        processed_channels = set()
else:
    processed_channels = set()

# 过滤掉已经处理过的 URL
urls_to_process = [url for url in df['Channel'] if url not in processed_channels]
print(f"""一共 {len(df['Channel'].dropna().tolist())} 个频道，已获取 {len(processed_channels)} 个频道，还需获取 {len(urls_to_process)} 个频道""")
exit()

def run(playwright, urls):
    browser = playwright.chromium.launch(headless=True)
    results = []
    for url in urls:
        page = browser.new_page()
        page.goto(url)
        page.wait_for_load_state('networkidle')

        # 初始滚动距离
        y = 1000
        # 初始元素数量
        previous_element_count = 0

        while True:
            # 执行 JavaScript 滚动页面
            page.evaluate(f"window.scrollTo(0, {y})")
            y += 10000  # 每次增加 10000
            page.wait_for_timeout(2 * 1000)

            # 获取当前元素数量
            xpath_locator = '//*[@id="thumbnail"]/yt-image/img[contains(@src, "https://i.ytimg.com/vi/")]'
            try:
                page.wait_for_selector(xpath_locator, timeout=5000)
            except Exception as e:
                print(f"等待选择器时出错: {e}")
                break

            current_element_count = len(page.query_selector_all(xpath_locator))

            # 如果元素数量没有增加，说明已经到达页面底部
            if current_element_count == previous_element_count:
                break

            # 更新元素数量
            previous_element_count = current_element_count

        # 提取所有视频 ID
        video_ids = []
        for img in page.query_selector_all(xpath_locator):
            try:
                src = img.get_attribute('src')
                match = pattern.search(src)
                if match:
                    video_ids.append(match.group(1))
            except Exception as e:
                print(f"解析图片链接时出错: {e}")

        # 关闭页面
        # page.close()

        # 将结果添加到结果列表
        for video_id in video_ids:
            results.append({'channel': url, 'video_id': video_id})
        save_results(results, output_file)

    # 关闭浏览器
    browser.close()

# 将新获取的数据保存到 CSV 文件
def save_results(results, output_file):
    result_df = pd.DataFrame(results)
    if not os.path.exists(output_file):
        result_df.to_csv(output_file, index=False, encoding='utf-8-sig')
    else:
        result_df.to_csv(output_file, mode='a', index=False, encoding='utf-8-sig', header=False)
    print(f"结果已保存到 {output_file}, 共 {len(results)} 个视频id")

with sync_playwright() as playwright:
    run(playwright, urls_to_process)


print(f"结果已保存到 {output_file}")