import sys
import os
import logging
import time
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import requests

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.crawler import MusicCrawler

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def main():
    # 创建爬虫实例
    crawler = MusicCrawler()
    
    # 首先解析首页，查找是否有视频相关的分类或链接
    logger.info(f"解析首页查找视频分类链接")
    
    # 尝试获取首页内容，首先使用Selenium
    homepage_content = None
    try:
        logger.info("尝试使用Selenium获取首页...")
        homepage_content = crawler.get_page(crawler.base_url, use_selenium=True)
    except Exception as e:
        logger.warning(f"Selenium获取首页失败: {e}")
        
    # 如果Selenium失败，尝试使用requests直接获取
    if not homepage_content:
        try:
            logger.info("尝试使用requests获取首页...")
            response = requests.get(crawler.base_url, timeout=30)
            response.raise_for_status()
            response.encoding = response.apparent_encoding
            homepage_content = response.text
        except Exception as e:
            logger.error(f"requests获取首页失败: {e}")
            return
    
    logger.info("成功获取首页内容")
    
    # 保存页面前2000个字符用于调试
    with open("homepage_sample.html", "w", encoding="utf-8") as f:
        f.write(homepage_content[:2000])
    
    logger.info("已保存页面前2000个字符到 homepage_sample.html")
    
    try:
        soup = BeautifulSoup(homepage_content, 'html.parser')
        
        # 查找所有链接，寻找包含'video'或'视频'的链接
        logger.info("查找包含'video'或'视频'关键词的链接")
        
        # 先尝试使用之前成功的分类容器选择器
        category_container = soup.select_one('.ilingku_fl')
        
        if category_container:
            logger.info("找到分类容器，查找视频相关分类")
            category_links = category_container.find_all('a')
            video_categories = []
            
            for link in category_links:
                href = link.get('href', '')
                text = link.get_text().strip()
                
                # 检查链接或文本是否包含视频相关关键词
                if 'video' in href.lower() or '视频' in text:
                    full_url = urljoin(crawler.base_url, href)
                    video_categories.append((text, full_url))
                    logger.info(f"找到视频分类: {text} - {full_url}")
        
        # 如果在分类容器中没找到，查找页面中所有包含视频关键词的链接
        if not video_categories:
            logger.info("在全页面查找视频相关链接")
            all_links = soup.find_all('a')
            
            for link in all_links:
                href = link.get('href', '')
                text = link.get_text().strip()
                title = link.get('title', '')
                
                # 检查链接或文本是否包含视频相关关键词
                if ('video' in href.lower() or '视频' in text or 'video' in title.lower() or 'mv' in href.lower() or 'MV' in text):
                    full_url = urljoin(crawler.base_url, href)
                    # 避免重复添加相同的URL
                    if not any(url == full_url for _, url in video_categories):
                        video_categories.append((text, full_url))
                        logger.info(f"找到视频相关链接: {text} - {full_url}")
        
        # 如果找到视频分类，尝试访问第一个分类页面
        if video_categories:
            logger.info(f"总共找到 {len(video_categories)} 个视频相关链接")
            
            # 测试访问第一个视频链接
            first_video_text, first_video_url = video_categories[0]
            logger.info(f"\n测试访问第一个视频链接: {first_video_text} - {first_video_url}")
            
            video_page_content = crawler.get_page(first_video_url, use_selenium=True)
            
            if video_page_content:
                # 保存视频页面前1000个字符
                with open("first_video_page_sample.html", "w", encoding="utf-8") as f:
                    f.write(video_page_content[:1000])
                
                logger.info("已保存第一个视频页面的前1000个字符到 first_video_page_sample.html")
                
                # 快速检查页面内容
                soup_video = BeautifulSoup(video_page_content[:5000], 'html.parser')
                
                # 查找用户提供的视频列表容器结构
                logger.info("检查页面中是否存在视频列表结构")
                
                # 查找标题中的视频信息
                page_title = soup_video.title.get_text() if soup_video.title else ""
                logger.info(f"页面标题: {page_title}")
                
                # 查找所有的ul和li元素，看看是否有类似视频列表的结构
                all_ul = soup_video.find_all('ul')
                logger.info(f"页面中找到 {len(all_ul)} 个ul元素")
                
                # 显示前3个ul元素的部分结构
                for i, ul in enumerate(all_ul[:3]):
                    li_count = len(ul.find_all('li'))
                    logger.info(f"ul {i+1}: 包含 {li_count} 个li元素")
                    
                    # 检查是否有图片和链接的结构
                    for li in ul.find_all('li')[:2]:
                        img = li.find('img')
                        a = li.find('a')
                        if img and a:
                            logger.info(f"  找到包含图片和链接的li元素")
                            break
        else:
            logger.warning("未找到任何视频相关的分类或链接")
            
    except Exception as e:
        logger.error(f"查找视频分类失败: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()