#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
音乐网站爬虫主程序
"""

import os
import sys
# 将项目根目录添加到Python搜索路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import argparse
import time
from datetime import datetime
from config import settings
from crawler import MusicCrawler
from downloader import MusicDownloader
from db_manager import DBManager
from utils import setup_logger, write_log_to_file, format_file_size

logger = setup_logger('main')


def print_banner():
    """打印程序横幅"""
    banner = r"""
    =================================================================
             音乐网站爬虫 - 支持音乐信息爬取和下载
    =================================================================
    """
    print(banner)


def parse_arguments():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='音乐网站爬虫工具')
    
    # 子命令
    subparsers = parser.add_subparsers(dest='command', help='可用命令')
    
    # 爬取命令
    crawl_parser = subparsers.add_parser('crawl', help='爬取音乐信息')
    crawl_parser.add_argument('--all', action='store_true', help='爬取所有分类')
    crawl_parser.add_argument('--category', type=str, help='爬取指定分类URL')
    crawl_parser.add_argument('--search', type=str, help='搜索歌曲')
    crawl_parser.add_argument('--max-pages', type=int, default=5, help='最大爬取页数')
    
    # 下载命令
    download_parser = subparsers.add_parser('download', help='下载音乐')
    download_parser.add_argument('--all', action='store_true', help='下载所有待下载的歌曲')
    download_parser.add_argument('--count', type=int, default=100, help='下载歌曲数量')
    download_parser.add_argument('--artist-dir', action='store_true', default=True, help='按歌手分类保存')
    download_parser.add_argument('--album-dir', action='store_true', help='按专辑分类保存')
    download_parser.add_argument('--concurrent', type=int, help='并发下载数')
    
    # 状态命令
    status_parser = subparsers.add_parser('status', help='查看爬虫状态')
    
    # 帮助命令已被argparse默认添加，无需手动添加
    # parser.add_argument('-h', '--help', action='help', help='显示帮助信息')
    
    return parser.parse_args()


def crawl_music(args):
    """执行爬取操作"""
    crawler = MusicCrawler()
    
    try:
        if args.all:
            # 爬取所有分类
            logger.info("开始爬取所有音乐分类...")
            write_log_to_file("开始爬取所有音乐分类", "INFO")
            total_songs = crawler.crawl_all_categories(max_pages=args.max_pages)
            logger.info(f"爬取完成，共获取 {total_songs} 首歌曲信息")
            write_log_to_file(f"爬取完成，共获取 {total_songs} 首歌曲信息", "INFO")
        elif args.category:
            # 爬取指定分类
            category_url = args.category
            if not category_url.startswith('http'):
                category_url = f"{settings.BASE_URL}{category_url}"
            
            logger.info(f"开始爬取分类: {category_url}")
            write_log_to_file(f"开始爬取分类: {category_url}", "INFO")
            total_songs = crawler.crawl_category(category_url, max_pages=args.max_pages)
            logger.info(f"分类爬取完成，共获取 {total_songs} 首歌曲信息")
            write_log_to_file(f"分类爬取完成，共获取 {total_songs} 首歌曲信息", "INFO")
        elif args.search:
            # 搜索歌曲
            keyword = args.search
            logger.info(f"开始搜索歌曲: {keyword}")
            write_log_to_file(f"开始搜索歌曲: {keyword}", "INFO")
            total_songs = crawler.search_songs(keyword, max_pages=args.max_pages)
            logger.info(f"搜索完成，共获取 {total_songs} 首歌曲信息")
            write_log_to_file(f"搜索完成，共获取 {total_songs} 首歌曲信息", "INFO")
        else:
            logger.error("请指定爬取方式: --all, --category 或 --search")
            print("使用帮助: python main.py crawl -h")
    finally:
        crawler.close()


def download_music(args):
    """执行下载操作"""
    db_manager = DBManager()
    downloader = MusicDownloader()
    
    try:
        # 获取待下载的歌曲
        logger.info("正在获取待下载的歌曲...")
        pending_songs = db_manager.get_pending_songs(limit=args.count)
        
        if not pending_songs:
            logger.info("没有待下载的歌曲")
            return
        
        logger.info(f"找到 {len(pending_songs)} 首待下载的歌曲")
        write_log_to_file(f"开始下载 {len(pending_songs)} 首歌曲", "INFO")
        
        # 执行批量下载
        success_list, failed_list = downloader.batch_download(
            pending_songs,
            artist_dir=args.artist_dir,
            album_dir=args.album_dir,
            max_workers=args.concurrent
        )
        
        # 更新数据库中的歌曲状态
        for song in success_list:
            db_manager.update_song_status(
                song['song_id'],
                'downloaded',
                file_path=song.get('file_path'),
                file_size=song.get('file_size')
            )
        
        for song in failed_list:
            db_manager.update_song_status(song['song_id'], 'failed')
        
        # 统计下载结果
        total_size = sum(song.get('file_size', 0) for song in success_list)
        
        logger.info(f"下载完成: 成功 {len(success_list)} 首, 失败 {len(failed_list)} 首")
        logger.info(f"下载总大小: {format_file_size(total_size)}")
        
        write_log_to_file(
            f"下载完成: 成功 {len(success_list)} 首, 失败 {len(failed_list)} 首, 总大小: {format_file_size(total_size)}", 
            "INFO"
        )
        
    finally:
        db_manager.close()


def show_status():
    """显示爬虫状态"""
    db_manager = DBManager()
    
    try:
        # 查询数据库中的歌曲状态统计
        if db_manager.db_type == 'sqlite':
            cursor = db_manager.db.cursor()
            
            # 统计总歌曲数
            cursor.execute("SELECT COUNT(*) FROM songs")
            total = cursor.fetchone()[0]
            
            # 统计待下载歌曲数
            cursor.execute("SELECT COUNT(*) FROM songs WHERE status = 'pending'")
            pending = cursor.fetchone()[0]
            
            # 统计已下载歌曲数
            cursor.execute("SELECT COUNT(*) FROM songs WHERE status = 'downloaded'")
            downloaded = cursor.fetchone()[0]
            
            # 统计下载失败歌曲数
            cursor.execute("SELECT COUNT(*) FROM songs WHERE status = 'failed'")
            failed = cursor.fetchone()[0]
            
            # 统计总下载大小
            cursor.execute("SELECT SUM(file_size) FROM songs WHERE status = 'downloaded'")
            total_size = cursor.fetchone()[0] or 0
            
        elif db_manager.db_type == 'mongodb':
            songs_collection = db_manager.db['songs']
            
            # 统计总歌曲数
            total = songs_collection.count_documents({})
            
            # 统计待下载歌曲数
            pending = songs_collection.count_documents({'status': 'pending'})
            
            # 统计已下载歌曲数
            downloaded = songs_collection.count_documents({'status': 'downloaded'})
            
            # 统计下载失败歌曲数
            failed = songs_collection.count_documents({'status': 'failed'})
            
            # 统计总下载大小
            total_size = 0
            for song in songs_collection.find({'status': 'downloaded'}, {'file_size': 1}):
                total_size += song.get('file_size', 0)
        
        # 打印状态信息
        print("\n===== 爬虫状态 =====")
        print(f"总歌曲数: {total}")
        print(f"待下载: {pending}")
        print(f"已下载: {downloaded}")
        print(f"下载失败: {failed}")
        print(f"已下载大小: {format_file_size(total_size)}")
        print("==================\n")
        
    finally:
        db_manager.close()


def main():
    """主函数"""
    print_banner()
    
    # 解析命令行参数
    args = parse_arguments()
    
    if not args.command:
        print("请指定命令，使用 -h 查看帮助")
        return
    
    # 根据命令执行相应操作
    if args.command == 'crawl':
        crawl_music(args)
    elif args.command == 'download':
        download_music(args)
    elif args.command == 'status':
        show_status()
    else:
        print(f"未知命令: {args.command}")
        print("使用帮助: python main.py -h")


if __name__ == '__main__':
    try:
        main()
    except KeyboardInterrupt:
        logger.info("程序被用户中断")
        write_log_to_file("程序被用户中断", "INFO")
    except Exception as e:
        logger.error(f"程序运行出错: {e}")
        write_log_to_file(f"程序运行出错: {e}", "ERROR")
        sys.exit(1)