#!/usr/bin/env python3
"""
网站文件下载脚本
从配置文件读取网站和文件信息，并行爬取和下载文件
支持两种模式：
1. static - 静态HTML爬取模式（适合静态网站）
2. browser - 浏览器模式（适合需要JavaScript渲染的动态网站，需要Playwright）
"""

import json
import os
import asyncio
import aiohttp
from pathlib import Path
from typing import Dict, List, Optional
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup
import fnmatch
import argparse


class FileDownloader:
    def __init__(self, config_path: str = "config/repos.json", show_progress: bool = True, debug: bool = False):
        self.config_path = config_path
        self.show_progress = show_progress
        self.debug = debug
        self.config = self._load_config()
        self.playwright = None
        self.browser = None
        
    def _load_config(self) -> Dict:
        """加载配置文件"""
        with open(self.config_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    
    async def init_playwright(self):
        """初始化Playwright（仅在需要时）"""
        if self.playwright is None:
            try:
                from playwright.async_api import async_playwright
                self.playwright = await async_playwright().start()
                self.browser = await self.playwright.chromium.launch(headless=True)
                print("✓ 浏览器模式已初始化")
            except ImportError:
                print("❌ 错误: 浏览器模式需要安装 Playwright")
                print("请运行: pip install playwright && playwright install chromium")
                raise
    
    async def close_playwright(self):
        """关闭Playwright"""
        if self.browser:
            await self.browser.close()
        if self.playwright:
            await self.playwright.stop()
    
    async def fetch_page(self, session: aiohttp.ClientSession, url: str) -> str:
        """获取网页内容（静态模式）"""
        async with session.get(url) as response:
            return await response.text()
    
    def extract_download_links(self, html_content: str, base_url: str) -> List[str]:
        """从HTML中提取所有下载链接（静态模式）"""
        soup = BeautifulSoup(html_content, 'html.parser')
        links = []
        
        for tag in soup.find_all(['a', 'link']):
            href = tag.get('href')
            if href:
                # 转换为绝对URL
                absolute_url = urljoin(base_url, href)
                links.append(absolute_url)
        
        return links
    
    def match_files(self, links: List[str], patterns: List[str]) -> List[str]:
        """根据模式匹配文件链接"""
        matched = []
        for link in links:
            # 获取URL路径的最后一部分作为文件名
            parsed_url = urlparse(link)
            filename = os.path.basename(parsed_url.path)
            
            for pattern in patterns:
                if fnmatch.fnmatch(filename, pattern):
                    matched.append(link)
                    break
        
        return matched
    
    def format_size(self, size_bytes: int) -> str:
        """格式化文件大小"""
        for unit in ['B', 'KB', 'MB', 'GB']:
            if size_bytes < 1024.0:
                return f"{size_bytes:.2f} {unit}"
            size_bytes /= 1024.0
        return f"{size_bytes:.2f} TB"
    
    async def download_file(
        self, 
        session: aiohttp.ClientSession, 
        url: str, 
        filename: str
    ) -> bool:
        """下载单个文件"""
        try:
            async with session.get(url) as response:
                if response.status != 200:
                    print(f"下载失败: {filename} (状态码: {response.status})")
                    return False
                
                total_size = int(response.headers.get('content-length', 0))
                
                if self.show_progress:
                    print(f"开始下载: {filename} ({self.format_size(total_size)})")
                else:
                    print(f"下载中: {filename} ({self.format_size(total_size)})")
                
                downloaded = 0
                chunk_size = 8192
                
                with open(filename, 'wb') as f:
                    async for chunk in response.content.iter_chunked(chunk_size):
                        f.write(chunk)
                        downloaded += len(chunk)
                        
                        if self.show_progress and total_size > 0:
                            progress = (downloaded / total_size) * 100
                            bar_length = 50
                            filled_length = int(bar_length * downloaded // total_size)
                            bar = '█' * filled_length + '-' * (bar_length - filled_length)
                            print(f'\r{filename}: |{bar}| {progress:.1f}% {self.format_size(downloaded)}/{self.format_size(total_size)}', end='', flush=True)
                
                if self.show_progress:
                    print()  # 换行
                
                print(f"下载完毕: {filename}")
                return True
                
        except Exception as e:
            print(f"下载出错: {filename} - {str(e)}")
            return False
    
    async def download_file_from_path(self, file_path: str, target_name: str) -> bool:
        """从本地路径复制/移动文件"""
        try:
            if os.path.exists(file_path):
                import shutil
                shutil.copy2(file_path, target_name)
                file_size = os.path.getsize(target_name)
                print(f"下载完毕: {target_name} ({self.format_size(file_size)})")
                return True
            else:
                print(f"文件不存在: {file_path}")
                return False
        except Exception as e:
            print(f"复制文件出错: {target_name} - {str(e)}")
            return False
    
    async def process_static_mode(
        self,
        session: aiohttp.ClientSession,
        website_url: str,
        config: Dict
    ) -> List[str]:
        """处理静态HTML模式"""
        print(f"使用静态HTML模式")
        
        # 获取网页内容
        html_content = await self.fetch_page(session, website_url)
        
        # 提取所有链接
        all_links = self.extract_download_links(html_content, website_url)
        print(f"找到 {len(all_links)} 个链接")
        
        # 调试模式：显示所有找到的链接
        if self.debug:
            print("\n找到的所有链接:")
            for i, link in enumerate(all_links, 1):
                print(f"  {i}. {link}")
            print()
        
        # 匹配需要下载的文件
        file_patterns = config.get('files', [])
        matched_links = self.match_files(all_links, file_patterns)
        print(f"匹配到 {len(matched_links)} 个文件")
        
        return matched_links
    
    async def process_browser_mode(
        self,
        website_url: str,
        config: Dict
    ) -> List[Dict]:
        """处理浏览器模式（支持JavaScript渲染）"""
        print(f"使用浏览器模式")
        
        # 确保Playwright已初始化
        if self.browser is None:
            await self.init_playwright()
        
        # 创建新页面
        page = await self.browser.new_page()
        
        # 设置下载路径
        download_path = os.path.abspath(".")
        
        # 存储下载信息
        downloads = []
        download_tasks = []
        
        try:
            # 访问网页
            print(f"正在访问: {website_url}")
            await page.goto(website_url, wait_until="networkidle", timeout=60000)
            
            # 获取需要点击的元素
            click_elements = config.get('click_elements', [])
            
            if click_elements:
                print(f"需要点击 {len(click_elements)} 个元素")
                
                for element_text in click_elements:
                    try:
                        print(f"正在点击: {element_text}")
                        
                        # 等待下载开始
                        async with page.expect_download() as download_info:
                            # 尝试多种选择器
                            try:
                                await page.get_by_text(element_text).click(timeout=5000)
                            except:
                                # 如果精确文本找不到，尝试部分匹配
                                await page.locator(f"text={element_text}").first.click(timeout=5000)
                        
                        # 获取下载对象
                        download = await download_info.value
                        filename = download.suggested_filename
                        filepath = os.path.join(download_path, filename)
                        
                        print(f"开始下载: {filename}")
                        
                        # 保存文件
                        await download.save_as(filepath)
                        
                        file_size = os.path.getsize(filepath) if os.path.exists(filepath) else 0
                        downloads.append({
                            'filename': filename,
                            'filepath': filepath,
                            'size': file_size
                        })
                        
                        print(f"下载完毕: {filename} ({self.format_size(file_size)})")
                        
                    except Exception as e:
                        print(f"点击元素失败: {element_text} - {str(e)}")
            
            # 如果有文件模式匹配，提取所有链接
            file_patterns = config.get('files', [])
            if file_patterns:
                # 获取所有链接
                links = await page.eval_on_selector_all('a[href]', 
                    '(elements) => elements.map(e => e.href)')
                
                if self.debug:
                    print(f"\n浏览器模式找到 {len(links)} 个链接:")
                    for i, link in enumerate(links, 1):
                        print(f"  {i}. {link}")
                    print()
                
                # 匹配文件
                matched_links = self.match_files(links, file_patterns)
                
                # 下载匹配的文件
                for link in matched_links:
                    try:
                        # 直接导航到下载链接
                        await page.goto(link, timeout=30000)
                        await asyncio.sleep(2)
                    except Exception as e:
                        print(f"下载链接失败: {link} - {str(e)}")
        
        finally:
            await page.close()
        
        return downloads
    
    async def process_website(
        self, 
        session: aiohttp.ClientSession, 
        website_url: str, 
        config: Dict
    ) -> None:
        """处理单个网站的下载任务"""
        print(f"\n开始处理网站: {website_url}")
        
        try:
            mode = config.get('mode', 'static')
            
            if mode == 'browser':
                # 浏览器模式
                downloads = await self.process_browser_mode(website_url, config)
                
                # 处理重命名
                rename_list = config.get('rename', [])
                for idx, download_info in enumerate(downloads):
                    if rename_list and idx < len(rename_list):
                        old_path = download_info['filepath']
                        new_name = rename_list[idx]
                        if old_path != new_name and os.path.exists(old_path):
                            os.rename(old_path, new_name)
                            print(f"✓ 重命名: {download_info['filename']} -> {new_name}")
                    else:
                        print(f"✓ 完成: {download_info['filename']}")
                
            else:
                # 静态模式
                matched_links = await self.process_static_mode(session, website_url, config)
                
                if not matched_links:
                    print(f"警告: 未找到匹配的文件")
                    return
                
                # 获取重命名配置
                rename_list = config.get('rename', [])
                
                # 顺序下载文件
                for idx, link in enumerate(matched_links):
                    parsed_url = urlparse(link)
                    original_filename = os.path.basename(parsed_url.path)
                    
                    # 确定最终文件名
                    if rename_list and idx < len(rename_list):
                        final_filename = rename_list[idx]
                    else:
                        final_filename = original_filename
                    
                    # 下载文件
                    success = await self.download_file(session, link, final_filename)
                    
                    if success:
                        print(f"✓ 成功: {final_filename}")
                    else:
                        print(f"✗ 失败: {final_filename}")
                    
        except Exception as e:
            print(f"处理网站出错: {website_url} - {str(e)}")
            import traceback
            if self.debug:
                traceback.print_exc()
    
    async def run(self) -> None:
        """运行下载任务"""
        print("=" * 60)
        print("网站文件下载脚本")
        print("=" * 60)
        
        try:
            # 创建异步HTTP会话
            timeout = aiohttp.ClientTimeout(total=3600)  # 1小时超时
            async with aiohttp.ClientSession(timeout=timeout) as session:
                # 创建所有网站的下载任务（并行）
                tasks = []
                for website_url, config in self.config.items():
                    task = self.process_website(session, website_url, config)
                    tasks.append(task)
                
                # 并行执行所有网站的下载任务
                await asyncio.gather(*tasks)
        
        finally:
            # 清理Playwright资源
            await self.close_playwright()
        
        print("\n" + "=" * 60)
        print("所有下载任务完成")
        print("=" * 60)


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='网站文件下载脚本')
    
    # 检测是否在Docker环境中
    default_config = 'config/repos.json'
    if os.path.exists('/downloads/config/repos.json'):
        default_config = '/downloads/config/repos.json'
    
    parser.add_argument(
        '--config', 
        default=default_config, 
        help='配置文件路径 (默认: config/repos.json 或 /downloads/config/repos.json)'
    )
    parser.add_argument(
        '--no-progress', 
        action='store_true', 
        help='不显示进度条'
    )
    parser.add_argument(
        '--debug', 
        action='store_true', 
        help='调试模式，显示详细信息'
    )
    
    args = parser.parse_args()
    
    # 创建下载器并运行
    downloader = FileDownloader(
        config_path=args.config,
        show_progress=not args.no_progress,
        debug=args.debug
    )
    
    # 运行异步任务
    asyncio.run(downloader.run())


if __name__ == "__main__":
    main()
