# 音乐网站爬虫核心模块

import os
import sys
import re  # 添加re模块导入
# 添加Selenium相关导入
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import requests
from bs4 import BeautifulSoup
import time
import random
from urllib.parse import urljoin
from datetime import datetime
from config import settings
from utils import setup_logger, get_random_user_agent, retry_on_error, write_log_to_file, get_song_id_from_url, parse_page_number
from db_manager import DBManager
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.core.utils import ChromeType

logger = setup_logger('crawler')


class MusicCrawler:
    """音乐网站爬虫类"""
    def __init__(self):
        self.base_url = settings.BASE_URL
        self.headers = settings.HEADERS.copy()
        self.timeout = settings.TIMEOUT
        self.delay = settings.DELAY
        self.wait_timeout = 10  # WebDriverWait超时时间
        self.headless = True  # 默认使用无头模式
        self.db_manager = DBManager()
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        
        # 记录已爬取的URL，避免重复爬取
        self.crawled_urls = set()
        
        # Selenium相关属性
        self.driver = None
        self.wait = None

    def set_random_user_agent(self):
        """设置随机User-Agent"""
        self.session.headers['User-Agent'] = get_random_user_agent()
        
    def setup_driver(self):
        """设置Selenium WebDriver"""
        try:
            chrome_options = Options()
            if self.headless:
                chrome_options.add_argument("--headless")
            chrome_options.add_argument("--no-sandbox")
            chrome_options.add_argument("--disable-dev-shm-usage")
            chrome_options.add_argument("--disable-gpu")
            chrome_options.add_argument("--window-size=1920,1080")
            chrome_options.add_argument("--disable-blink-features=AutomationControlled")
            chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
            chrome_options.add_experimental_option('useAutomationExtension', False)
            
            # 添加选项以忽略SSL错误和HTTPS相关限制
            chrome_options.add_argument("--ignore-certificate-errors")
            chrome_options.add_argument("--allow-insecure-localhost")
            chrome_options.add_argument("--unsafely-treat-insecure-origin-as-secure=http://music.2t58.com")
            chrome_options.add_argument("--disable-web-security")
            
            # 优先使用Chrome for Testing
            chrome_binary = None
            if os.path.exists("/Applications/Google Chrome for Testing.app/Contents/MacOS/Google Chrome for Testing"):
                chrome_binary = "/Applications/Google Chrome for Testing.app/Contents/MacOS/Google Chrome for Testing"
                logger.info("使用Chrome for Testing")
                # Chrome for Testing 通常与较新版本的ChromeDriver兼容性更好
                chrome_options.add_argument("--remote-debugging-port=9222")
            elif os.path.exists("/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"):
                chrome_binary = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
                logger.info("使用Google Chrome")
            
            if chrome_binary:
                chrome_options.binary_location = chrome_binary
            
            # 尝试多种方式启动ChromeDriver
            driver = None
            
            # 方法1: 使用webdriver-manager自动下载最新稳定版
            try:
            #     logger.info("尝试使用webdriver-manager自动下载ChromeDriver...")
            #     # 使用ChromeType.GOOGLE获取最新稳定版
            #     service = Service(ChromeDriverManager(chrome_type=ChromeType.GOOGLE).install())
            #     driver = webdriver.Chrome(service=service, options=chrome_options)
            #     logger.info("使用webdriver-manager成功启动ChromeDriver")
            # except Exception as e:
            #     logger.warning(f"webdriver-manager方式失败: {e}")
            #     logger.info("✅ 但我们知道系统ChromeDriver可以正常工作")
            #     logger.info("✅ Selenium可能会自动处理驱动管理")
                
            #     # 方法2: 尝试使用系统ChromeDriver
            #     try:
                logger.info("尝试使用系统ChromeDriver...")
                driver = webdriver.Chrome(options=chrome_options)
                logger.info("使用系统ChromeDriver成功")
            except Exception as e2:
                logger.warning(f"系统ChromeDriver方式失败: {e2}")
                
                # 方法3: 尝试指定ChromeDriver路径
                try:
                    logger.info("尝试指定ChromeDriver路径...")
                    # 查找可能的ChromeDriver路径
                    possible_paths = [
                        "/usr/local/bin/chromedriver",
                        "/usr/bin/chromedriver",
                        os.path.expanduser("~/chromedriver"),
                    ]
                    
                    for path in possible_paths:
                        if os.path.exists(path) and os.access(path, os.X_OK):
                            logger.info(f"找到可用的ChromeDriver: {path}")
                            service = Service(path)
                            driver = webdriver.Chrome(service=service, options=chrome_options)
                            logger.info("使用指定路径的ChromeDriver成功")
                            break
                    else:
                        raise Exception("未找到可用的ChromeDriver")
                        
                except Exception as e3:
                    logger.error(f"所有ChromeDriver启动方式都失败")
                    logger.error(f"错误1: {e}")
                    logger.error(f"错误2: {e2}")
                    logger.error(f"错误3: {e3}")
                    raise Exception("无法启动ChromeDriver")
            
            if driver:
                self.driver = driver
                # 设置等待对象
                self.wait = WebDriverWait(self.driver, self.wait_timeout)
                
                # 执行反检测脚本
                self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
                
                logger.info("Selenium WebDriver设置完成")
                return True
            else:
                raise Exception("ChromeDriver启动失败")
                
        except Exception as e:
            logger.error(f"WebDriver设置失败: {e}")
            return False

    @retry_on_error()
    def get_page(self, url, use_selenium=False):
        """获取网页内容"""
        # 避免重复爬取
        if url in self.crawled_urls:
            logger.info(f"URL已爬取过，跳过: {url}")
            return None
        
        # 记录已爬取的URL
        self.crawled_urls.add(url)
        
        try:
            # 设置随机User-Agent
            self.set_random_user_agent()
            
            logger.info(f"正在获取页面: {url}")
            
            if use_selenium:
                # 使用Selenium处理JavaScript渲染的页面
                page_source = None
                try:
                    # 设置WebDriver
                    if self.setup_driver():
                        try:
                            self.driver.get(url)
                            time.sleep(self.delay * 2)  # 给JavaScript执行时间
                            page_source = self.driver.page_source
                        finally:
                            # 确保关闭driver
                            if self.driver:
                                self.driver.quit()
                                self.driver = None
                                self.wait = None
                    else:
                        logger.error("WebDriver设置失败，无法获取页面")
                except Exception as e:
                    logger.error(f"使用Selenium获取页面失败: {e}")
                    if self.driver:
                        self.driver.quit()
                        self.driver = None
                        self.wait = None
                
                return page_source
            else:
                # 使用requests获取页面
                response = self.session.get(url, timeout=self.timeout)
                response.raise_for_status()
                response.encoding = response.apparent_encoding  # 设置正确的编码
                return response.text
        except Exception as e:
            logger.error(f"获取页面失败: {url}, 错误: {e}")
            write_log_to_file(f"获取页面失败: {url}, 错误: {e}", "ERROR")
            return None

    def parse_homepage(self, force_reload=False):
        """解析首页，获取分类和热门推荐"""
        url = self.base_url
        
        # 如果需要强制重新加载，从已爬取URL集合中移除该URL
        if force_reload and url in self.crawled_urls:
            self.crawled_urls.remove(url)
        
        # 使用Selenium获取页面，因为该网站可能需要JavaScript渲染
        page_content = self.get_page(url, use_selenium=True)
        
        if not page_content:
            return None
        
        try:
            soup = BeautifulSoup(page_content, 'html.parser')
            
            # 解析分类信息
            categories = []
            
            # 根据music.2t58.com/list/new.html的实际HTML结构调整选择器
            # 分类信息在div.ilingku_fl元素下的li标签中
            category_container = soup.select_one('div.ilingku_fl')
            if category_container:
                logger.info("找到div.ilingku_fl元素")
                category_elements = category_container.select('li a')
                logger.info(f"在div.ilingku_fl下找到 {len(category_elements)} 个分类链接")
                
                for element in category_elements:
                    category_name = element.get_text().strip()
                    href = element.get('href', '')
                    category_url = urljoin(self.base_url, href)
                    
                    # 获取父元素的class属性
                    parent_class = element.parent.get('class', [])
                    if isinstance(parent_class, str):
                        parent_class = [parent_class]
                    
                    # 简化过滤条件，只过滤空名称和空URL
                    if category_name and href:
                        # 添加详细日志，记录每个分类的处理情况
                        logger.debug(f"处理分类: {category_name}, URL: {href}, 父元素class: {parent_class}")
                        
                        # 不过滤移动设备专用分类，因为测试显示这些分类也有价值
                        # 只过滤当前页面链接
                        if category_name != '网络最新榜':
                            categories.append({
                                'name': category_name,
                                'url': category_url
                            })
                            logger.debug(f"添加分类: {category_name}")
            else:
                logger.info("未找到div.ilingku_fl元素，尝试备用选择器")
                # 备用选择器，兼容其他可能的页面结构
                category_elements = soup.select('div.nav a') or soup.select('ul.category li a')
                
                for element in category_elements:
                    category_name = element.get_text().strip()
                    category_url = urljoin(self.base_url, element.get('href', ''))
                    
                    if category_name and category_url.startswith(self.base_url):
                        categories.append({
                            'name': category_name,
                            'url': category_url
                        })
            
            # 解析热门推荐
            hot_recommendations = []
            
            # 这里需要根据实际网站结构调整选择器
            # 例如：热门歌曲列表
            hot_song_elements = soup.select('div.hot-songs li a') or soup.select('div.recommend li a')
            
            for element in hot_song_elements:
                song_title = element.get_text().strip()
                song_url = urljoin(self.base_url, element.get('href', ''))
                
                if song_title and song_url.startswith(self.base_url):
                    hot_recommendations.append({
                        'title': song_title,
                        'url': song_url
                    })
            
            return {
                'categories': categories,
                'hot_recommendations': hot_recommendations
            }
        except Exception as e:
            logger.error(f"解析首页失败: {e}")
            return None

    def parse_category_page(self, url):
        """解析分类页面，获取歌曲列表"""
        page_content = self.get_page(url)
        
        if not page_content:
            return [], None
        
        try:
            soup = BeautifulSoup(page_content, 'html.parser')
            
            # 解析歌曲列表
            songs = []
            
            # 这里需要根据实际网站结构调整选择器
            # 例如：歌曲列表项
            song_elements = soup.select('.play_list li .name') or soup.select('.ilingkuplay_list li .name')
            
            for element in song_elements:
                try:
                    # 提取歌曲信息
                    # 根据实际网站HTML结构调整选择器
                    # 实际HTML结构: <div class="name"><a href="/song/eG5rdm12a2Nj.html" target="_mp3" title="周文凯 - 苟活之重生MP3下载">周文凯 - 苟活之重生</a></div>
                    a_element = element.select_one('a')
                    
                    if not a_element:
                        continue
                    
                    # 从a标签的文本中分离歌手和歌曲名（格式：歌手 - 歌曲名）
                    full_text = a_element.get_text().strip()
                    if ' - ' in full_text:
                        artist, title = full_text.split(' - ', 1)
                    else:
                        artist = '未知歌手'
                        title = full_text
                    
                    # 获取歌曲页面URL（不是直接的下载链接）
                    song_page_url = urljoin(self.base_url, a_element.get('href', ''))
                    
                    # 解析歌曲详情页面，获取真正的媒体链接（JavaScript加载后的媒体链接）
                    logger.info(f"正在解析歌曲详情页: {song_page_url}")
                    song_detail = self.parse_song_detail(song_page_url)
                    
                    if song_detail:
                        # 提取媒体链接
                        download_url = song_detail.get('url', '')
                        
                        # 提取其他信息
                        song_id = song_detail.get('song_id', '')
                        album = song_detail.get('album', '')
                        lyrics = song_detail.get('lyrics', '')
                        cover_url = song_detail.get('cover_url', '')
                    else:
                        # 如果解析详情页失败，使用基本信息
                        song_id = get_song_id_from_url(song_page_url)
                        if not song_id:
                            song_id = f"{title}_{artist}".replace(' ', '_').lower()
                        
                        download_url = ''
                        album = ''
                        lyrics = ''
                        cover_url = ''
                    
                    song_data = {
                        'song_id': song_id,
                        'title': title,
                        'artist': artist,
                        'album': album,
                        'url': download_url,  # 这里是真正的媒体链接
                        'source_url': song_page_url,  # 歌曲页面URL
                        'lyrics': lyrics,
                        'cover_url': cover_url,
                        'status': 'pending'
                    }
                    
                    songs.append(song_data)
                except Exception as e:
                    logger.warning(f"解析歌曲信息失败: {e}")
                    continue
            
            # 解析下一页URL
            next_page_url = None
            
            # 根据用户提供的HTML结构，通过文本"下一页"查找元素
            next_page_element = None
            for a in soup.select('.page a'):
                if a.get_text().strip() == '下一页':
                    next_page_element = a
                    break
            
            # 如果没有找到，尝试旧的方式作为备用
            if not next_page_element:
                next_page_element = soup.select_one('a.next-page') or soup.select_one('a.npage')
            
            if next_page_element:
                next_page_url = urljoin(self.base_url, next_page_element.get('href', ''))
                
                # 验证URL是否有效
                if not next_page_url.startswith(self.base_url):
                    next_page_url = None
            
            return songs, next_page_url
        except Exception as e:
            logger.error(f"解析分类页面失败: {url}, 错误: {e}")
            return [], None

    def parse_song_detail(self, url):
        """解析歌曲详情页，获取详细信息"""
        # 使用Selenium处理JavaScript渲染的页面
        page_content = self.get_page(url, use_selenium=True)
        
        if not page_content:
            return None
        
        try:
            soup = BeautifulSoup(page_content, 'html.parser')
            
            # 提取歌曲信息
            # 1. 提取标题和歌手（格式：歌手 - 歌曲名）
            title_artist_text = ''
            title_artist_element = soup.select_one('div.djname h1')
            if title_artist_element:
                title_artist_text = title_artist_element.get_text().strip()
                # 移除可能的刷新按钮文本
                if '刷新' in title_artist_text:
                    title_artist_text = title_artist_text.split('刷新')[0].strip()
            
            # 解析标题和歌手
            if ' - ' in title_artist_text:
                artist, title = title_artist_text.split(' - ', 1)
            else:
                title = title_artist_text or '未知歌曲'
                artist = '未知歌手'
            
            print(f"解析歌曲详情页: {url},title_artist_element {title_artist_element},title_artist_text {title_artist_text}, 标题: {title}, 歌手: {artist}")

            # 2. 提取专辑信息（从歌词区域的第一行提取）
            album = '未知专辑'
            lrc_list = soup.select_one('ul#lrc_list')
            if lrc_list:
                first_lyric_line = lrc_list.find('li')
                if first_lyric_line and first_lyric_line.get_text().strip():
                    # 检查第一行是否包含专辑信息
                    first_line_text = first_lyric_line.get_text().strip()
                    if '(' in first_line_text and ')' in first_line_text:
                        album_match = re.search(r'\((.*?)\)', first_line_text)
                        if album_match:
                            album = album_match.group(1)
            
            # 3. 提取歌曲时长
            duration = soup.select_one('span.jp-duration').get_text().strip() if soup.select_one('span.jp-duration') else ''
            
            # 4. 提取下载链接（从audio标签的src属性）
            download_url = ''
            audio_element = soup.select_one('audio#jp_audio_0')
            if audio_element and audio_element.get('src'):
                download_url = audio_element.get('src')
            else:
                # 备用方案：尝试其他可能的audio标签
                audio_element = soup.select_one('audio')
                if audio_element and audio_element.get('src'):
                    download_url = audio_element.get('src')
                else:
                    # 尝试查找其他可能的媒体链接元素
                    source_element = soup.select_one('source')
                    if source_element and source_element.get('src'):
                        download_url = source_element.get('src')
                    else:
                        # 尝试从页面中的脚本标签中提取媒体链接
                        script_elements = soup.find_all('script')
                        for script in script_elements:
                            script_text = script.string
                            if script_text:
                                # 查找可能的媒体链接模式
                                match = re.search(r'https?://[^"\']+\.(mp3|m4a|ogg)', script_text)
                                if match:
                                    download_url = match.group()
                                    break
                        else:
                            # 最后尝试原来的方式
                            download_element = soup.select_one('a.download-btn') or soup.select_one('a.down-mp3')
                            download_url = urljoin(self.base_url, download_element.get('href', '')) if download_element else ''
            
            # 确保下载URL是绝对路径
            if download_url and not download_url.startswith('http'):
                download_url = urljoin(self.base_url, download_url)
            
            # 5. 提取歌词（从ul#lrc_list中提取所有li标签的文本）
            lyrics = ''
            if lrc_list:
                lyric_lines = []
                for li in lrc_list.find_all('li'):
                    line_text = li.get_text().strip()
                    if line_text and not line_text.startswith('词：') and not line_text.startswith('曲：') and not line_text.startswith('编曲：'):
                        lyric_lines.append(line_text)
                lyrics = '\n'.join(lyric_lines)
            else:
                # 备用方案
                lyrics_element = soup.select_one('div.lyrics') or soup.select_one('div.lyric-content')
                if lyrics_element:
                    lyrics = lyrics_element.get_text().strip()
            
            # 6. 提取封面URL
            cover_url = ''
            cover_element = soup.select_one('img#mcover')
            if cover_element and cover_element.get('src'):
                cover_url = urljoin(self.base_url, cover_element.get('src'))
            else:
                # 备用方案
                cover_element = soup.select_one('img.cover') or soup.select_one('div.album-cover img')
                if cover_element and cover_element.get('src'):
                    cover_url = urljoin(self.base_url, cover_element.get('src'))
            
            # 提取歌曲ID
            song_id = get_song_id_from_url(url) or get_song_id_from_url(download_url)
            
            if not song_id:
                # 如果URL中没有ID，使用其他方式生成
                song_id = f"{title}_{artist}".replace(' ', '_').lower()
            
            song_data = {
                'song_id': song_id,
                'title': title,
                'artist': artist,
                'album': album,
                'duration': duration,
                'url': download_url,
                'source_url': url,
                'lyrics': lyrics,
                'cover_url': cover_url,
                'status': 'pending'
            }
            logger.info(f"成功解析歌曲: {title} - {artist}")
            return song_data
        except Exception as e:
            logger.error(f"解析歌曲详情失败: {url}, 错误: {e}")
            return None

    def crawl_media_category(self, category_url, media_type='audio', max_pages=10):
        """爬取媒体分类，先获取列表并存储基本信息"""
        if not category_url:
            logger.error(f"{media_type}分类URL为空")
            return 0
        
        logger.info(f"开始爬取{media_type}分类: {category_url}")
        write_log_to_file(f"开始爬取{media_type}分类: {category_url}", "INFO")
        
        current_url = category_url
        page_count = 0
        total_media = 0
        
        start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        
        try:
            while current_url and page_count < max_pages:
                # 检查是否已爬取过该URL
                if current_url in self.crawled_urls:
                    logger.info(f"URL已爬取过，跳过: {current_url}")
                    break
                
                # 先检查是否会在解析列表时标记URL为已爬取
                if current_url in self.crawled_urls:
                    self.crawled_urls.remove(current_url)
                
                # 根据媒体类型选择对应的解析方法
                if media_type == 'audio':
                    media_items, next_page_url = self.parse_category_page(current_url)
                    insert_method = self.db_manager.insert_song
                else:
                    media_items, next_page_url = self.parse_video_list(current_url)
                    insert_method = self.db_manager.insert_video
                
                if media_items:
                    # 保存媒体基本信息到数据库
                    for media in media_items:
                        # 确保媒体类型字段存在
                        if 'media_type' not in media:
                            media['media_type'] = media_type
                        
                        # 检查是否已存在
                        if media_type == 'audio':
                            exists = self.db_manager.check_song_exists(media.get('song_id'))
                        else:
                            exists = self.db_manager.check_video_exists(media.get('video_id'))
                        
                        if not exists:
                            insert_method(media)
                            total_media += 1
                        else:
                            logger.info(f"{media_type}已存在，跳过")
                    
                    logger.info(f"已爬取第 {page_count + 1} 页，本页 {len(media_items)} 个{media_type}，总计 {total_media} 个{media_type}")
                
                # 准备下一页
                current_url = next_page_url
                page_count += 1
                
                # 随机延迟，避免被封
                time.sleep(self.delay + random.uniform(0, self.delay))
            
            # 记录爬取结果
            end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            self.db_manager.insert_crawl_record(
                category_url,
                'success',
                start_time,
                end_time,
                total_media
            )
            
            logger.info(f"{media_type}分类爬取完成: {category_url}, 共爬取 {page_count} 页，{total_media} 个{media_type}")
            write_log_to_file(f"{media_type}分类爬取完成: {category_url}, 共爬取 {page_count} 页，{total_media} 个{media_type}", "INFO")
            
            return total_media
        except Exception as e:
            logger.error(f"{media_type}分类爬取失败: {category_url}, 错误: {e}")
            write_log_to_file(f"{media_type}分类爬取失败: {category_url}, 错误: {e}", "ERROR")
            
            # 记录失败结果
            end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            self.db_manager.insert_crawl_record(
                category_url,
                'failed',
                start_time,
                end_time,
                total_media,
                str(e)
            )
            
            return total_media
    
    def crawl_category(self, category_url, max_pages=10):
        """爬取整个分类的所有歌曲（兼容旧接口）"""
        return self.crawl_media_category(category_url, 'audio', max_pages)

    def crawl_all_categories(self, max_pages=5):
        """爬取所有分类"""
        logger.info("开始爬取所有分类")
        write_log_to_file("开始爬取所有分类", "INFO")
        
        # 解析首页获取分类
        homepage_data = self.parse_homepage()
        

        
        if not homepage_data or not homepage_data.get('categories'):
            logger.error("未能获取分类信息")
            return 0
        
        total_songs = 0
        
        # 遍历所有分类
        for category in homepage_data['categories']:
            category_url = category.get('url')
            category_name = category.get('name')
            
            if category_url:
                logger.info(f"开始爬取分类: {category_name}")
                songs_count = self.crawl_category(category_url, max_pages)
                total_songs += songs_count
                
                # 分类之间增加延迟
                time.sleep(self.delay * 2)
        
        logger.info(f"所有分类爬取完成，共爬取 {total_songs} 首歌曲")
        write_log_to_file(f"所有分类爬取完成，共爬取 {total_songs} 首歌曲", "INFO")
        
        return total_songs

    def search_songs(self, keyword, max_pages=3):
        """搜索歌曲"""
        logger.info(f"开始搜索歌曲: {keyword}")
        write_log_to_file(f"开始搜索歌曲: {keyword}", "INFO")
        
        # 构建搜索URL（需要根据实际网站的搜索API调整）
        search_url = f"{self.base_url}search.php?keyword={requests.utils.quote(keyword)}"
        
        current_url = search_url
        page_count = 0
        total_songs = 0
        
        start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        
        try:
            while current_url and page_count < max_pages:
                # 解析搜索结果页面
                songs, next_page_url = self.parse_category_page(current_url)
                
                if songs:
                    # 保存歌曲信息到数据库
                    for song in songs:
                        self.db_manager.insert_song(song)
                    
                    song_count = len(songs)
                    total_songs += song_count
                    logger.info(f"搜索结果第 {page_count + 1} 页，本页 {song_count} 首歌曲，累计 {total_songs} 首歌曲")
                
                # 准备下一页
                current_url = next_page_url
                page_count += 1
                
                # 随机延迟
                time.sleep(self.delay + random.uniform(0, self.delay))
            
            # 记录搜索结果
            end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            self.db_manager.insert_crawl_record(
                search_url,
                'success',
                start_time,
                end_time,
                total_songs
            )
            
            logger.info(f"搜索完成: {keyword}, 共爬取 {page_count} 页，{total_songs} 首歌曲")
            write_log_to_file(f"搜索完成: {keyword}, 共爬取 {page_count} 页，{total_songs} 首歌曲", "INFO")
            
            return total_songs
        except Exception as e:
            logger.error(f"搜索失败: {keyword}, 错误: {e}")
            write_log_to_file(f"搜索失败: {keyword}, 错误: {e}", "ERROR")
            
            # 记录失败结果
            end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            self.db_manager.insert_crawl_record(
                search_url,
                'failed',
                start_time,
                end_time,
                total_songs,
                str(e)
            )
            
            return total_songs

    def parse_video_list(self, url):
        """解析视频列表页面，获取视频信息列表和下一页URL"""
        logger.info(f"正在解析视频列表页面: {url}")
        
        # 使用Selenium获取页面内容
        page_content = self.get_page(url, use_selenium=True)
        
        if not page_content:
            logger.error(f"无法获取视频列表页面: {url}")
            return [], None
        
        try:
            soup = BeautifulSoup(page_content, 'html.parser')
            
            # 解析视频列表
            videos = []
            next_page_url = None  # 初始化next_page_url变量
            
            # 使用用户提供的视频列表容器选择器
            video_list_container = soup.select_one('.video_list')
            
            if video_list_container:
                # 查找视频项列表
                video_items = video_list_container.select('ul li')
                
                for item in video_items:
                    try:
                        # 提取视频图片
                        img_element = item.select_one('.pic img')
                        image_url = img_element.get('src') if img_element else ''
                        if image_url and not image_url.startswith('http'):
                            image_url = urljoin(self.base_url, image_url)
                        
                        # 提取视频链接和标题
                        name_a_element = item.select_one('.name a')
                        if not name_a_element:
                            continue
                        
                        video_page_url = urljoin(self.base_url, name_a_element.get('href', ''))
                        full_title = name_a_element.get_text().strip()
                        
                        # 解析标题和歌手（格式：标题 - 歌手）
                        if ' - ' in full_title:
                            title, artist = full_title.split(' - ', 1)
                        else:
                            title = full_title
                            artist = '未知歌手'
                        
                        # 解析视频详情页面，获取真正的媒体链接
                        video_detail = self.parse_video_detail(video_page_url)
                        
                        if video_detail:
                            # 提取媒体链接
                            download_url = video_detail.get('url', '')
                            
                            # 提取其他信息
                            video_id = video_detail.get('video_id', '')
                            duration = video_detail.get('duration', '')
                            cover_url = video_detail.get('cover_url', image_url)  # 使用图片URL作为封面
                        else:
                            # 如果解析详情页失败，使用基本信息
                            video_id = get_song_id_from_url(video_page_url)
                            if not video_id:
                                video_id = f"{title}_{artist}".replace(' ', '_').lower()
                            
                            download_url = ''
                            duration = ''
                            cover_url = image_url
                        
                        video_data = {
                            'video_id': video_id,
                            'title': title,
                            'artist': artist,
                            'url': download_url,
                            'source_url': video_page_url,
                            'cover_url': cover_url,
                            'duration': duration,
                            'media_type': 'video',
                            'status': 'pending'
                        }
                        
                        videos.append(video_data)
                    except Exception as e:
                        logger.warning(f"解析视频信息失败: {e}")
                        continue
                
                # 解析下一页URL
                next_page_element = None
                for a in soup.select('a'):
                    if a.get_text().strip() == '下一页':
                        next_page_element = a
                        break
                
                # 如果没有找到，尝试旧的方式作为备用
                if not next_page_element:
                    next_page_element = soup.select_one('a.next-page') or soup.select_one('a.npage')
                
                if next_page_element:
                    next_page_url = urljoin(self.base_url, next_page_element.get('href', ''))
                    
                    # 验证URL是否有效
                    if not next_page_url.startswith(self.base_url):
                        next_page_url = None
            else:
                logger.warning(f"未找到视频列表容器 .video_list: {url}")
                
                # 尝试使用音频歌曲列表的选择器作为备选
                song_elements = soup.select('.play_list li .name') or soup.select('.ilingkuplay_list li .name')
                
                if song_elements:
                    logger.info(f"找到 {len(song_elements)} 个可能的视频元素，使用歌曲列表解析方式")
                    
                    for element in song_elements:
                        try:
                            a_element = element.select_one('a')
                            
                            if not a_element:
                                continue
                            
                            full_text = a_element.get_text().strip()
                            if ' - ' in full_text:
                                artist, title = full_text.split(' - ', 1)
                            else:
                                artist = '未知歌手'
                                title = full_text
                            
                            video_page_url = urljoin(self.base_url, a_element.get('href', ''))
                            
                            # 标记为视频类型
                            video_data = {
                                'video_id': get_song_id_from_url(video_page_url) or f"{title}_{artist}".replace(' ', '_').lower(),
                                'title': title,
                                'artist': artist,
                                'url': '',
                                'source_url': video_page_url,
                                'cover_url': '',
                                'duration': '',
                                'media_type': 'video',
                                'status': 'pending'
                            }
                            
                            videos.append(video_data)
                        except Exception as e:
                            logger.warning(f"解析视频信息失败: {e}")
                            continue
                
                # 尝试在备选解析方式中也查找下一页
                next_page_element = None
                for a in soup.select('a'):
                    if a.get_text().strip() == '下一页':
                        next_page_element = a
                        break
                
                # 如果没有找到，尝试旧的方式作为备用
                if not next_page_element:
                    next_page_element = soup.select_one('a.next-page') or soup.select_one('a.npage')
                
                if next_page_element:
                    next_page_url = urljoin(self.base_url, next_page_element.get('href', ''))
                    
                    # 验证URL是否有效
                    if not next_page_url.startswith(self.base_url):
                        next_page_url = None
            
            return videos, next_page_url
        except Exception as e:
            logger.error(f"解析视频列表页面失败: {url}, 错误: {e}")
            return [], None
    
    def parse_video_detail(self, url):
        """解析视频详情页，获取视频详细信息"""
        logger.info(f"正在解析视频详情页: {url}")
        
        # 使用Selenium处理JavaScript渲染的页面
        page_content = self.get_page(url, use_selenium=True)
        
        if not page_content:
            return None
        
        try:
            soup = BeautifulSoup(page_content, 'html.parser')
            
            # 提取视频信息
            # 1. 提取标题和歌手（格式：歌手 - 歌曲名 或 标题 - 歌手）
            title_artist_text = ''
            title_artist_element = soup.select_one('div.djname h1')
            
            if title_artist_element:
                title_artist_text = title_artist_element.get_text().strip()
                # 移除可能的刷新按钮文本
                if '刷新' in title_artist_text:
                    title_artist_text = title_artist_text.split('刷新')[0].strip()
            
            # 解析标题和歌手
            title = '未知视频'
            artist = '未知歌手'
            
            if ' - ' in title_artist_text:
                # 尝试两种可能的格式
                if title_artist_text.count(' - ') == 1:
                    # 可能是 '歌手 - 标题' 或 '标题 - 歌手'
                    parts = title_artist_text.split(' - ', 1)
                    # 这里我们假设第二种格式 '标题 - 歌手'
                    title, artist = parts
                else:
                    # 更复杂的情况，需要根据实际数据调整
                    title = title_artist_text
            else:
                title = title_artist_text or '未知视频'
            
            logger.info(f"解析视频详情页: {url}, 标题: {title}, 歌手: {artist}")
            
            # 2. 提取视频时长
            duration = soup.select_one('span.jp-duration').get_text().strip() if soup.select_one('span.jp-duration') else ''
            
            # 3. 提取视频链接（尝试从video标签的src属性）
            download_url = ''
            video_element = soup.select_one('video')
            
            if video_element and video_element.get('src'):
                download_url = video_element.get('src')
            else:
                # 备用方案1：尝试source标签
                source_element = soup.select_one('source')
                if source_element and source_element.get('src'):
                    download_url = source_element.get('src')
                else:
                    # 备用方案2：尝试从页面中的脚本标签中提取视频链接
                    script_elements = soup.find_all('script')
                    for script in script_elements:
                        script_text = script.string
                        if script_text:
                            # 查找可能的视频链接模式，包括常见的视频格式
                            match = re.search(r'https?://[^"\']+\.(mp4|avi|mov|flv|wmv|mkv)', script_text)
                            if match:
                                download_url = match.group()
                                break
                    else:
                        # 最后尝试从a标签中查找下载链接
                        download_element = soup.select_one('a.download-btn') or soup.select_one('a.down-video') or soup.select_one('a.down-mp3')
                        if download_element:
                            download_url = urljoin(self.base_url, download_element.get('href', ''))
                        
                        # 增强：专门查找plug/down.php类型的链接
                        if not download_url:
                            # 查找可能包含plug/down.php的a标签
                            down_php_links = soup.select('a[href*="plug/down.php"]')
                            if down_php_links:
                                download_url = urljoin(self.base_url, down_php_links[0].get('href', ''))
                                logger.info(f"找到plug/down.php链接: {download_url}")
                            else:
                                # 在所有a标签中查找可能的下载链接
                                all_links = soup.find_all('a', href=True)
                                for link in all_links:
                                    href = link.get('href', '')
                                    if 'down.php' in href or 'download' in href.lower():
                                        download_url = urljoin(self.base_url, href)
                                        logger.info(f"找到潜在下载链接: {download_url}")
                                        break
            
            # 确保下载URL是绝对路径
            if download_url and not download_url.startswith('http'):
                download_url = urljoin(self.base_url, download_url)
            
            # 新增：处理302重定向的下载链接
            # 检查是否是down.php类型的链接
            if download_url and 'plug/down.php' in download_url:
                logger.info(f"检测到重定向下载链接: {download_url}")
                try:
                    # 使用HEAD请求获取重定向后的真实地址，避免下载整个文件
                    # 禁用重定向跟随，这样我们可以直接获取Location头
                    with requests.head(download_url, allow_redirects=False, verify=False, timeout=10) as response:
                        if response.status_code == 302 and 'Location' in response.headers:
                            real_download_url = response.headers['Location']
                            logger.info(f"获取到真实视频地址: {real_download_url}")
                            download_url = real_download_url
                except Exception as e:
                    logger.warning(f"获取重定向地址失败: {e}")
            
            # 4. 提取封面URL
            cover_url = ''
            cover_element = soup.select_one('img#mcover') or soup.select_one('img.cover')
            if cover_element and cover_element.get('src'):
                cover_url = urljoin(self.base_url, cover_element.get('src'))
            
            # 提取视频ID
            video_id = get_song_id_from_url(url) or get_song_id_from_url(download_url)
            
            if not video_id:
                # 如果URL中没有ID，使用其他方式生成
                video_id = f"{title}_{artist}".replace(' ', '_').lower()
            
            video_data = {
                'video_id': video_id,
                'title': title,
                'artist': artist,
                'url': download_url,
                'source_url': url,
                'cover_url': cover_url,
                'duration': duration,
                'media_type': 'video'
            }
            
            logger.info(f"成功解析视频: {title} - {artist}")
            return video_data
        except Exception as e:
            logger.error(f"解析视频详情失败: {url}, 错误: {e}")
            return None
    
    def crawl_video_category(self, category_url, max_pages=10):
        """爬取整个视频分类的所有视频（兼容旧接口）"""
        return self.crawl_media_category(category_url, 'video', max_pages)
        
    def process_pending_media(self, media_type='all', limit=100):
        """处理数据库中状态为pending的媒体对象，获取详细信息并更新"""
        logger.info(f"开始处理状态为pending的{media_type}媒体对象，限制: {limit}")
        
        processed_count = 0
        
        try:
            # 获取待处理的媒体对象
            if media_type == 'all':
                # 先处理音频
                pending_audios = self.db_manager.get_pending_songs(limit)
                for audio in pending_audios:
                    if self._process_single_media(audio, 'audio'):
                        processed_count += 1
                        
                # 再处理视频
                pending_videos = self.db_manager.get_pending_videos(limit - processed_count)
                for video in pending_videos:
                    if self._process_single_media(video, 'video'):
                        processed_count += 1
                        if processed_count >= limit:
                            break
            elif media_type == 'audio':
                pending_audios = self.db_manager.get_pending_songs(limit)
                for audio in pending_audios:
                    if self._process_single_media(audio, 'audio'):
                        processed_count += 1
            else:
                pending_videos = self.db_manager.get_pending_videos(limit)
                for video in pending_videos:
                    if self._process_single_media(video, 'video'):
                        processed_count += 1
            
            logger.info(f"处理完成，共处理 {processed_count} 个{media_type}媒体对象")
            return processed_count
        except Exception as e:
            logger.error(f"处理媒体对象失败: {e}")
            return processed_count
            
    def _process_single_media(self, media, media_type):
        """处理单个媒体对象，获取详细信息并更新数据库"""
        try:
            source_url = media.get('source_url')
            if not source_url:
                logger.warning(f"{media_type}对象缺少source_url")
                return False
            
            logger.info(f"正在处理{media_type}对象: {media.get('title')}，URL: {source_url}")
            
            # 根据媒体类型选择对应的解析方法
            if media_type == 'audio':
                detail = self.parse_song_detail(source_url)
                update_method = self.db_manager.update_song
                media_id_key = 'song_id'
            else:
                detail = self.parse_video_detail(source_url)
                update_method = self.db_manager.update_video
                media_id_key = 'video_id'
            
            if detail:
                # 合并原有信息和新信息
                updated_media = {**media, **detail}
                updated_media['status'] = 'completed'
                
                # 更新数据库
                update_method(updated_media[media_id_key], updated_media)
                logger.info(f"成功更新{media_type}对象: {updated_media.get('title')}")
                return True
            else:
                # 更新状态为failed
                if media_type == 'audio':
                    self.db_manager.update_song_status(media['song_id'], 'failed')
                else:
                    self.db_manager.update_video_status(media['video_id'], 'failed')
                logger.warning(f"获取{media_type}详情失败: {media.get('title')}")
                return False
        except Exception as e:
            logger.error(f"处理{media_type}对象时发生错误: {e}")
            # 更新状态为error
            if media_type == 'audio':
                self.db_manager.update_song_status(media.get('song_id'), 'error', str(e))
            else:
                self.db_manager.update_video_status(media.get('video_id'), 'error', str(e))
            return False
    
    def close(self):
        """关闭爬虫，清理资源"""
        if self.db_manager:
            self.db_manager.close()
        logger.info("爬虫已关闭")