import requests
from lxml import html
import time
from datetime import datetime, timedelta
import csv
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

class YouTubeShortSpider:
    def __init__(self, use_proxy=False, proxy_url=None):
        self.start_time = time.time()  # 记录开始时间
        
        # 设置Chrome选项
        chrome_options = Options()
        if use_proxy and proxy_url:
            chrome_options.add_argument(f'--proxy-server={proxy_url}')
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-software-rasterizer')
        chrome_options.add_argument('--disable-webgl')
        chrome_options.add_argument('--ignore-gpu-blacklist')
        chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
        
        self.driver = webdriver.Chrome(options=chrome_options)

    def format_time_duration(self, seconds):
        """格式化执行时间"""
        duration = timedelta(seconds=int(seconds))
        hours = duration.seconds // 3600
        minutes = (duration.seconds % 3600) // 60
        seconds = duration.seconds % 60
        
        time_parts = []
        if hours > 0:
            time_parts.append(f"{hours}小时")
        if minutes > 0:
            time_parts.append(f"{minutes}分钟")
        if seconds > 0 or not time_parts:
            time_parts.append(f"{seconds}秒")
            
        return "".join(time_parts)

    def get_shorts_by_tag(self, tag, num_videos):
        """获取指定标签的短视频链接"""
        phase_start_time = time.time()  # 记录阶段开始时间
        
        url = f'https://www.youtube.com/hashtag/{tag}/shorts'
        self.driver.get(url)
        video_urls = set()  # 使用集合去重
        
        max_attempts = 50  # 最大尝试次数
        no_new_content_count = 0  # 连续未获取新内容的次数
        previous_count = 0  # 上一次的视频数量
        
        for attempt in range(max_attempts):
            # 滚动到页面底部加载更多内容
            self.driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
            time.sleep(2)  # 等待内容加载
            
            # 获取页面源码并使用lxml解析
            page_source = self.driver.page_source
            tree = html.fromstring(page_source)
            
            # 使用xpath获取视频链接
            new_urls = tree.xpath('//*[@id="content"]/ytm-shorts-lockup-view-model-v2/ytm-shorts-lockup-view-model/a/@href')
            
            # 处理链接
            for url in new_urls:
                if not url or '/shorts/' not in url:
                    continue
                
                # 确保URL是完整的
                if not url.startswith('http'):
                    url = 'https://www.youtube.com' + url
                
                video_urls.add(url)
                if len(video_urls) >= num_videos:
                    break
            
            current_count = len(video_urls)
            print(f"已获取 {current_count}/{num_videos} 个视频链接")
            
            # 检查是否有新内容
            if current_count == previous_count:
                no_new_content_count += 1
                print(f"连续 {no_new_content_count} 次未获取到新内容")
                if no_new_content_count >= 5:  # 连续5次没有新内容则退出
                    print("长时间未获取到新内容，停止爬取")
                    break
            else:
                no_new_content_count = 0  # 重置计数器
                
            previous_count = current_count
            
            # 如果已经获取足够的链接，退出循环
            if len(video_urls) >= num_videos:
                break
            
            # 如果达到最大尝试次数，退出循环
            if attempt == max_attempts - 1:
                print(f"已达到最大尝试次数 {max_attempts}，停止爬取")
        
        # 计算并显示爬取阶段耗时
        phase_duration = time.time() - phase_start_time
        print(f"\n爬取视频链接耗时: {self.format_time_duration(phase_duration)}")
        
        return list(video_urls)[:num_videos]

    def save_to_csv(self, tag, video_urls):
        """保存视频链接到CSV文件"""
        phase_start_time = time.time()  # 记录阶段开始时间
        
        timestamp = time.strftime("%Y%m%d%H%M%S")
        filename = f"shorts_{tag}_{timestamp}.csv"
        
        try:
            with open(filename, 'w', newline='', encoding='utf-8-sig') as f:
                writer = csv.writer(f)
                writer.writerow(['视频链接'])  # 写入表头
                
                for url in video_urls:
                    writer.writerow([url])
            
            # 计算并显示保存阶段耗时
            phase_duration = time.time() - phase_start_time
            print(f"保存文件耗时: {self.format_time_duration(phase_duration)}")
            print(f"\n结果已保存到CSV文件: {filename}")
            
        except Exception as e:
            print(f"保存CSV文件时出错: {str(e)}")

    def __del__(self):
        """清理资源并显示总耗时"""
        if hasattr(self, 'driver'):
            self.driver.quit()
        
        # 计算并显示总耗时
        total_duration = time.time() - self.start_time
        print(f"\n程序总耗时: {self.format_time_duration(total_duration)}")

def main():
    print(f"程序开始执行时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    
    # 从环境变量获取代理配置
    USE_PROXY = False
    PROXY_URL = "socks5://127.0.0.1:7890"  # 如果需要代理，请修改这里
    
    spider = YouTubeShortSpider(
        use_proxy=USE_PROXY,
        proxy_url=PROXY_URL if USE_PROXY else None
    )
    
    # 获取用户输入
    tag = input("请输入要搜索的标签(不需要输入#): ").strip()
    num_videos = int(input("请输入要获取的视频数量: ").strip())
    
    print(f"\n标签页面链接: https://www.youtube.com/hashtag/{tag}/shorts")
    print(f"\n正在获取 #{tag} 标签下的短视频链接...")
    
    # 获取视频链接
    video_urls = spider.get_shorts_by_tag(tag, num_videos)
    
    if not video_urls:
        print("未找到相关视频")
        return
    
    # 保存结果
    spider.save_to_csv(tag, video_urls)

if __name__ == "__main__":
    main()