import sys
import os
import logging
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 网站基础URL
base_url = "https://music.2t58.com"

# 尝试直接访问可能的视频页面URLs
video_page_candidates = [
    f"{base_url}/video/list.html",
    f"{base_url}/mv/list.html",
    f"{base_url}/list/video.html",
    f"{base_url}/list/mv.html",
    f"{base_url}/video.html",
    f"{base_url}/mv.html",
]

def get_page_content(url):
    """使用requests获取页面内容，禁用SSL验证"""
    try:
        logger.info(f"尝试获取页面: {url}")
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        # 禁用SSL验证以解决证书不匹配问题
        response = requests.get(url, headers=headers, timeout=30, verify=False)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        logger.info(f"成功获取页面: {url}")
        return response.text
    except Exception as e:
        logger.error(f"获取页面失败: {url}, 错误: {e}")
        return None

def find_video_links_in_homepage():
    """在首页查找视频相关链接"""
    url = base_url
    content = get_page_content(url)
    
    if not content:
        return []
    
    # 保存页面前2000个字符用于调试
    with open("simple_homepage_sample.html", "w", encoding="utf-8") as f:
        f.write(content[:2000])
    
    logger.info("已保存页面前2000个字符到 simple_homepage_sample.html")
    
    try:
        soup = BeautifulSoup(content, 'html.parser')
        
        # 查找所有链接，寻找包含'video'或'视频'的链接
        video_categories = []
        all_links = soup.find_all('a')
        
        logger.info(f"在首页找到 {len(all_links)} 个链接")
        
        for link in all_links:
            href = link.get('href', '')
            text = link.get_text().strip()
            title = link.get('title', '')
            
            # 检查链接或文本是否包含视频相关关键词
            if ('video' in href.lower() or '视频' in text or 'video' in title.lower() or 'mv' in href.lower() or 'MV' in text):
                full_url = urljoin(base_url, href)
                # 避免重复添加相同的URL
                if not any(url == full_url for _, url in video_categories):
                    video_categories.append((text, full_url))
                    logger.info(f"找到视频相关链接: {text} - {full_url}")
        
        return video_categories
    except Exception as e:
        logger.error(f"解析首页失败: {e}")
        return []

def check_candidate_video_pages():
    """检查可能的视频页面URL"""
    valid_video_pages = []
    
    for url in video_page_candidates:
        content = get_page_content(url)
        if content:
            # 简单检查页面是否包含视频相关内容
            if '视频' in content or 'MV' in content.upper() or 'video' in content.lower():
                logger.info(f"找到有效的视频页面: {url}")
                valid_video_pages.append(url)
                
                # 保存页面前1000个字符
                filename = f"video_page_{len(valid_video_pages)}.html"
                with open(filename, "w", encoding="utf-8") as f:
                    f.write(content[:1000])
                
                logger.info(f"已保存页面到 {filename}")
    
    return valid_video_pages

def main():
    logger.info("开始查找视频相关页面...")
    
    # 1. 首先检查预定义的可能视频页面
    valid_video_pages = check_candidate_video_pages()
    
    if valid_video_pages:
        logger.info(f"\n成功找到 {len(valid_video_pages)} 个有效的视频页面:")
        for i, page in enumerate(valid_video_pages):
            logger.info(f"{i+1}. {page}")
    else:
        logger.info("\n未找到预定义的视频页面，尝试从首页查找...")
        
        # 2. 从首页查找视频链接
        video_categories = find_video_links_in_homepage()
        
        if video_categories:
            logger.info(f"\n从首页找到 {len(video_categories)} 个视频相关链接:")
            for i, (text, url) in enumerate(video_categories):
                logger.info(f"{i+1}. {text} - {url}")
        else:
            logger.warning("\n未找到任何视频相关页面或链接")
            logger.info("\n请尝试手动检查网站结构，找到正确的视频页面URL")

if __name__ == "__main__":
    main()