# 版本: 0.5.0.20240803
# 作者: 陈振玺
# 功能: 文件下载工具，遵循爬虫最佳实践

"""File downloader module with web scraping best practices."""

import os
import time
import argparse
from pathlib import Path
from typing import Optional
from urllib.parse import urlparse, unquote
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError


class FileDownloader:
    """文件下载器，遵循爬虫最佳实践。"""
    
    def __init__(self, delay: float = 1.0, timeout: int = 30, retries: int = 3):
        """
        初始化下载器。
        
        Args:
            delay: 请求间隔时间（秒）
            timeout: 请求超时时间（秒）
            retries: 重试次数
        """
        self.delay = delay
        self.timeout = timeout
        self.retries = retries
        
        # 设置User-Agent，模拟真实浏览器
        self.headers = {
            'User-Agent': (
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
                'AppleWebKit/537.36 (KHTML, like Gecko) '
                'Chrome/91.0.4472.124 Safari/537.36'
            ),
            'Accept': '*/*',
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
    
    def _get_filename_from_url(self, url: str) -> str:
        """从URL中提取文件名。
        
        Args:
            url: 下载链接
            
        Returns:
            文件名
        """
        parsed_url = urlparse(url)
        filename = os.path.basename(parsed_url.path)
        
        # 如果路径中没有文件名，使用查询参数或默认名称
        if not filename or '.' not in filename:
            if 'fasta' in url.lower():
                filename = 'downloaded_sequence.fasta'
            else:
                filename = 'downloaded_file.txt'
        
        # URL解码文件名
        return unquote(filename)
    
    def _get_filename_from_headers(self, response) -> Optional[str]:
        """从响应头中获取文件名。
        
        Args:
            response: HTTP响应对象
            
        Returns:
            文件名或None
        """
        content_disposition = response.headers.get('Content-Disposition')
        if content_disposition:
            # 解析 Content-Disposition 头
            if 'filename=' in content_disposition:
                filename = content_disposition.split('filename=')[1]
                # 移除引号
                filename = filename.strip('"\'')
                return unquote(filename)
        return None
    
    def download(self, url: str, output_path: str) -> bool:
        """下载文件。
        
        Args:
            url: 下载链接
            output_path: 输出文件路径
            
        Returns:
            下载是否成功
        """
        print(f"开始下载: {url}")
        
        # 确保输出目录存在
        output_dir = os.path.dirname(output_path)
        if output_dir:
            Path(output_dir).mkdir(parents=True, exist_ok=True)
        
        for attempt in range(self.retries):
            try:
                # 创建请求
                request = Request(url, headers=self.headers)
                
                # 发送请求
                with urlopen(request, timeout=self.timeout) as response:
                    # 检查响应状态
                    if response.status != 200:
                        raise HTTPError(url, response.status, f"HTTP {response.status}", response.headers, None)
                    
                    # 注意：使用用户指定的输出文件名，不从响应头修改
                    
                    # 获取文件大小
                    content_length = response.headers.get('Content-Length')
                    total_size = int(content_length) if content_length else None
                    
                    print(f"保存到: {output_path}")
                    if total_size:
                        print(f"文件大小: {total_size:,} 字节")
                    
                    # 下载文件
                    downloaded_size = 0
                    chunk_size = 8192
                    
                    with open(output_path, 'wb') as f:
                        while True:
                            chunk = response.read(chunk_size)
                            if not chunk:
                                break
                            
                            f.write(chunk)
                            downloaded_size += len(chunk)
                            
                            # 显示进度
                            if total_size:
                                progress = (downloaded_size / total_size) * 100
                                print(f"\r下载进度: {progress:.1f}% ({downloaded_size:,}/{total_size:,} 字节)", end='', flush=True)
                            else:
                                print(f"\r已下载: {downloaded_size:,} 字节", end='', flush=True)
                    
                    print(f"\n下载完成: {output_path}")
                    return True
                    
            except (URLError, HTTPError, OSError) as e:
                print(f"\n第 {attempt + 1} 次尝试失败: {e}")
                if attempt < self.retries - 1:
                    print(f"等待 {self.delay} 秒后重试...")
                    time.sleep(self.delay)
                else:
                    print("所有重试都失败了")
                    return False
            
            # 请求间隔，避免过于频繁的请求
            if attempt < self.retries - 1:
                time.sleep(self.delay)
        
        return False


def main():
    """命令行入口点。"""
    parser = argparse.ArgumentParser(
        description='文件下载工具 - 遵循爬虫最佳实践',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""示例:
  download -i https://www.ebi.ac.uk/ena/browser/api/fasta/LT903945?download=true -o sequence.fasta
  download -i https://example.com/file.txt -o /path/to/output.txt
        """
    )
    
    parser.add_argument(
        '-i', '--input', '--url',
        required=True,
        help='要下载的文件URL'
    )
    
    parser.add_argument(
        '-o', '--output',
        required=True,
        help='输出文件路径（包含文件名）'
    )
    
    parser.add_argument(
        '--delay',
        type=float,
        default=1.0,
        help='请求间隔时间（秒），默认1.0'
    )
    
    parser.add_argument(
        '--timeout',
        type=int,
        default=30,
        help='请求超时时间（秒），默认30'
    )
    
    parser.add_argument(
        '--retries',
        type=int,
        default=3,
        help='重试次数，默认3'
    )
    
    parser.add_argument(
        '--version',
        action='version',
        version='%(prog)s 1.0.0'
    )
    
    args = parser.parse_args()
    
    # 创建下载器
    downloader = FileDownloader(
        delay=args.delay,
        timeout=args.timeout,
        retries=args.retries
    )
    
    # 执行下载
    success = downloader.download(args.input, args.output)
    
    if success:
        print("下载成功！")
        exit(0)
    else:
        print("下载失败！")
        exit(1)


if __name__ == '__main__':
    main()