import os
import re
import asyncio
import aiohttp
import aiofiles
from typing import Optional, Dict, Any
from datetime import datetime
from pathlib import Path
from src.core.progress_manager import ProgressManager
from src.infrastructure.log_decorators import log_operation
from src.infrastructure.config import ConfigurationManager

class DownloadUtils:
    def __init__(self, config: Dict[str, Any], progress_manager: ProgressManager):
        self.chunk_size = ConfigurationManager.get_value('download.chunk_size', 1024 * 1024)
        self.max_threads = ConfigurationManager.get_value('download.max_threads', 5)
        self.timeout = ConfigurationManager.get_value('download.timeout', 30)
        self.retry_times = ConfigurationManager.get_value('download.retry_times', 3)
        
        temp_path = ConfigurationManager.get_value('storage.temp_path', './downloads/temp')
        self.temp_path = Path(temp_path)
        self.temp_path.mkdir(parents=True, exist_ok=True)
        
        self.progress_manager = progress_manager
        
    @log_operation("download_utils")
    async def download_file(self, url: str, save_path: str, task_id: str) -> str:
        """并发分片下载文件"""
        temp_file = self.temp_path / f"{hash(url)}.download"
        resume_pos = temp_file.stat().st_size if temp_file.exists() else 0
        
        try:
            async with aiohttp.ClientSession() as session:
                async with session.head(url) as response:
                    if response.status != 200:
                        raise Exception(f"Failed to get file size: {response.status}")
                    
                    file_size = int(response.headers.get('content-length', 0))
                    self.progress_manager.create_task(task_id, file_size)
                    
                if resume_pos >= file_size:
                    os.rename(temp_file, save_path)
                    self.progress_manager.complete_task(task_id)
                    return save_path
                    
                # 计算分片
                chunk_size = max(file_size // self.max_threads, self.chunk_size)
                chunks = [
                    (start, min(start + chunk_size, file_size))
                    for start in range(resume_pos, file_size, chunk_size)
                ]
                
                # 创建下载任务
                tasks = [
                    self._download_chunk(
                        session, url, temp_file, start, end, 
                        lambda size: self.progress_manager.update_progress(
                            task_id, 
                            resume_pos + size,
                            'start'
                        )
                    )
                    for start, end in chunks
                ]
                
                # 并发执行下载任务
                await asyncio.gather(*tasks)
                
                # 下载完成，移动文件
                os.rename(temp_file, save_path)
                self.progress_manager.complete_task(task_id)
                return save_path
                
        except Exception as e:
            self.progress_manager.fail_task(task_id, str(e))
            raise
            
    async def _download_chunk(self, session: aiohttp.ClientSession, 
                            url: str, temp_file: Path, 
                            start: int, end: int,
                            progress_callback=None) -> None:
        """下载指定范围的文件块"""
        headers = {'Range': f'bytes={start}-{end}'}
        
        for retry in range(self.retry_times):
            try:
                async with session.get(url, headers=headers) as response:
                    if response.status == 206:  # Partial Content
                        async with aiofiles.open(temp_file, 'ab') as f:
                            await f.seek(start)
                            async for chunk in response.content.iter_chunked(8192):
                                await f.write(chunk)
                                if progress_callback:
                                    await progress_callback(len(chunk))
                        return
                    
            except Exception as e:
                if retry == self.retry_times - 1:
                    raise Exception(f"Failed to download chunk: {e}")
                await asyncio.sleep(1)  # 重试前等待
                
    @log_operation("download_utils")
    async def download_media(self, urls: list, save_dir: str, 
                           media_type: str) -> list:
        """批量下载媒体文件"""
        save_path = Path(save_dir) / media_type
        save_path.mkdir(parents=True, exist_ok=True)
        
        async def download_one(url: str) -> str:
            file_name = f"{hash(url)}{Path(url).suffix}"
            return await self.download_file(
                url, 
                str(save_path / file_name)
            )
            
        tasks = [download_one(url) for url in urls]
        return await asyncio.gather(*tasks) 

def get_save_directory(base_path: str, platform: str, author: str, title: Optional[str] = None) -> str:
    """
    生成保存目录路径
    
    Args:
        base_path: 基础保存路径
        platform: 平台名称
        author: 作者名称
        title: 文章标题（用于创建子目录）
        
    Returns:
        str: 完整的保存目录路径
    """
    # 获取当前日期作为目录名
    date_str = datetime.now().strftime('%Y%m%d')
    
    # 清理作者名称中的非法字符
    safe_author = re.sub(r'[\\/:*?"<>|]', '_', author)
    safe_author = safe_author[:30] if len(safe_author) > 30 else safe_author
    safe_author = safe_author or '未知作者'
    
    # 如果有标题，清理标题中的非法字符作为目录名
    if title:
        safe_title = re.sub(r'[\\/:*?"<>|]', '_', title)
        # 限制目录名长度
        safe_title = safe_title[:50] if len(safe_title) > 50 else safe_title
    else:
        safe_title = datetime.now().strftime('%H%M%S')
    
    # 构建目录路径：base_path/platform/date/author/title/
    save_dir = os.path.join(base_path, platform, date_str, safe_author, safe_title)
    
    # 创建目录
    os.makedirs(save_dir, exist_ok=True)
    
    return save_dir
