"""
文件存储服务
"""
import uuid
from typing import AsyncIterator, List, Dict, Any
from pathlib import Path
import asyncio
from axiom_boot.logging.setup import get_logger

from sqlalchemy import select
from sqlalchemy.orm import selectinload

from axiom_boot.database import BaseService, transactional
from axiom_boot.di import service, autowired
from axiom_boot.filesystem import FileSystemService
from axiom_boot.conf.manager import Settings
from axiom_boot.core.exceptions import NotFoundException, BusinessException
from axiom_boot.api.models import PaginationParams, PaginatedResponse

from src.storage.models.sys_file_storage import SysFileStorage
from src.storage.mapper.sys_file_storage_mapper import SysFileStorageMapper
from src.storage.models.api_models import (
    FolderCreateDTO, FolderTreeNodeVO, ItemsMoveDTO, ItemRenameDTO, FileStorageVO,
    StorageFilterParams
)

logger = get_logger(__name__)


@service()
class StorageService(BaseService[SysFileStorage, SysFileStorageMapper]):
    """
    封装文件存储管理的业务逻辑。
    - 使用邻接表模型管理文件/文件夹层级。
    - 使用 FileSystemService 处理物理文件（本地或S3）。
    - 使用 SysFileStorageMapper 处理数据库元数据。
    """
    fs_service: FileSystemService = autowired()
    settings: Settings = autowired()

    def __init__(self, mapper: SysFileStorageMapper = autowired()):
        super().__init__(mapper)

    async def get_folder_tree(self, filters: StorageFilterParams) -> list[FolderTreeNodeVO]:
        """
        获取指定存储桶下的完整文件夹树。
        """
        all_folders = await self.find_by_filters(
            bucket=filters.bucket,
            is_dir=1
        )
        
        folder_map = {str(folder.id): FolderTreeNodeVO.model_validate(folder) for folder in all_folders}
        root_nodes = []

        for folder in all_folders:
            node = folder_map[str(folder.id)]
            if folder.parent_id is None:
                root_nodes.append(node)
            elif str(folder.parent_id) in folder_map:
                parent_node = folder_map[str(folder.parent_id)]
                parent_node.children.append(node)
        
        return root_nodes

    async def list_items_in_folder(
        self,
        filters: StorageFilterParams,
        pagination: PaginationParams
    ) -> PaginatedResponse[FileStorageVO]:
        """
        获取指定文件夹下的文件和子文件夹列表（分页）。
        """
        query_filters = {"bucket": filters.bucket}
        if filters.name:
            query_filters["name__like"] = f"%{filters.name}%"
        
        if filters.is_dir is not None:
            query_filters["is_dir"] = filters.is_dir

        if filters.parent_id is not None:
            all_child_ids = await self._get_all_descendant_ids(filters.parent_id)
            if not all_child_ids:
                return PaginatedResponse.create(items=[], total=0, page=pagination.page, page_size=pagination.page_size)
            query_filters["id__in"] = all_child_ids
            
            paginated_db_items = await self.page(pagination=pagination, filters=query_filters)

        else:
            query_filters["parent_id__is"] = None
            paginated_db_items = await self.page(pagination=pagination, filters=query_filters)

        vo_items = [FileStorageVO.model_validate(item) for item in paginated_db_items.items]
        
        return PaginatedResponse[FileStorageVO](
            items=vo_items,
            total=paginated_db_items.total,
            page=paginated_db_items.page,
            page_size=paginated_db_items.page_size,
        )

    @transactional
    async def create_folder(self, dto: FolderCreateDTO) -> SysFileStorage:
        """
        创建新文件夹。
        """
        if dto.parent_id:
            parent_folder = await self.find_one_by_filters(id=dto.parent_id, is_dir=1)
            if not parent_folder:
                raise BusinessException("指定的父文件夹不存在或无效")

        existing = await self.find_one_by_filters(
            parent_id=dto.parent_id, 
            name=dto.name, 
            bucket=dto.bucket, 
            is_dir=1
        )
        if existing:
            raise BusinessException(f"文件夹 '{dto.name}' 已存在")

        folder = SysFileStorage(
            parent_id=dto.parent_id,
            bucket=dto.bucket,
            name=dto.name,
            is_dir=1,
        )
        return await self.save(folder)

    @transactional
    async def upload_file(
        self,
        filename: str,
        mime_type: str,
        size_bytes: int,
        stream: AsyncIterator[bytes],
        bucket: str,
        parent_id: int | None,
    ) -> SysFileStorage:
        """
        处理文件上传。
        """
        file_suffix = Path(filename).suffix
        
        # 构建包含文件夹结构的存储路径
        if parent_id:
            parent_folder = await self.find_one_by_filters(id=parent_id, is_dir=1)
            if not parent_folder:
                raise BusinessException("指定的父文件夹不存在或无效")
            
            # 构建文件夹路径
            folder_path = await self._build_folder_path_by_id(parent_id)
            storage_key = f"{folder_path}/{uuid.uuid4()}{file_suffix}"
        else:
            storage_key = f"{uuid.uuid4()}{file_suffix}"
            
        provider = self.settings.filesystem.provider
        
        await self.fs_service.save(
            stream=stream,
            path=storage_key,
            bucket_name=bucket,
            ContentType=mime_type,
        )

        # 注意：不需要手动管理session，BaseService已经通过事务注解处理
        if parent_id:
            parent_folder = await self.find_one_by_filters(id=parent_id, is_dir=1)
            if not parent_folder:
                raise BusinessException("指定的父文件夹不存在或无效")

        file_meta = SysFileStorage(
            parent_id=parent_id,
            bucket=bucket,
            name=filename,
            is_dir=0,
            storage_key=storage_key,
            provider=provider,
            mime_type=mime_type,
            size_bytes=size_bytes, 
        )
        return await self.save(file_meta)
    
    async def batch_upload_files(self, files_data: List[Dict[str, Any]], bucket: str, 
                                parent_id: int | None = None) -> List[SysFileStorage]:
        """
        批量上传文件 - 通用文件上传能力
        
        Args:
            files_data: 文件数据列表，每个元素包含：
                - filename: 文件名
                - mime_type: MIME类型  
                - size_bytes: 文件大小
                - stream: 文件流
            bucket: 存储桶名
            parent_id: 父文件夹ID
            
        Returns:
            上传成功的文件记录列表
        """
        uploaded_files = []
        
        for file_data in files_data:
            try:
                uploaded_file = await self.upload_file(
                    filename=file_data["filename"],
                    mime_type=file_data["mime_type"],
                    size_bytes=file_data["size_bytes"],
                    stream=file_data["stream"],
                    bucket=bucket,
                    parent_id=parent_id
                )
                uploaded_files.append(uploaded_file)
            except Exception as e:
                # 记录单个文件上传失败，但继续处理其他文件
                logger.error(f"文件 {file_data.get('filename', 'unknown')} 上传失败: {e}")
                continue
        
        return uploaded_files
    
    async def _build_folder_path_by_id(self, folder_id: int) -> str:
        """
        根据文件夹ID构建完整的文件夹路径
        
        Args:
            folder_id: 文件夹ID
            
        Returns:
            文件夹路径
        """
        path_parts = []
        current_id = folder_id
        
        while current_id:
            folder = await self.find_one_by_filters(id=current_id, is_dir=1)
            if not folder:
                break
            path_parts.append(folder.name)
            current_id = folder.parent_id
        
        # 反转路径，因为我们是从子到父构建的
        path_parts.reverse()
        return "/".join(path_parts)
        
    @transactional
    async def rename_item(self, item_id: int, dto: ItemRenameDTO):
        """
        重命名文件或文件夹。
        """
        item = await self.find_by_pk_or_fail(item_id)

        existing = await self.find_one_by_filters(
            parent_id=item.parent_id,
            name=dto.new_name,
            bucket=item.bucket,
            is_dir=item.is_dir
        )
        if existing and existing.id != item.id:
            raise BusinessException(f"名称 '{dto.new_name}' 已被占用")

        await self.update_by_pk(item_id, {"name": dto.new_name})

    @transactional
    async def move_items(self, dto: ItemsMoveDTO):
        """
        移动一个或多个文件/文件夹到新的父目录下。
        """
        if dto.target_parent_id:
            target_folder = await self.find_one_by_filters(id=dto.target_parent_id, is_dir=1)
            if not target_folder:
                raise BusinessException("目标文件夹不存在或无效")
        
        items_to_move = await self.find_by_filters(id__in=dto.item_ids)
        if not items_to_move:
            return

        for item in items_to_move:
            if item.id == dto.target_parent_id:
                raise BusinessException("不能将文件夹移动到其自身内部")
            
            existing = await self.find_one_by_filters(
                parent_id=dto.target_parent_id,
                name=item.name,
                is_dir=item.is_dir,
                bucket=item.bucket
            )
            if existing:
                raise BusinessException(f"目标文件夹中已存在同名项目 '{item.name}'")
        
        for item in items_to_move:
            item.parent_id = dto.target_parent_id
        
        await self.batch_save(items_to_move)

    @transactional
    async def delete_item(self, item_id: int):
        """
        删除一个文件或文件夹（递归删除）。
        """
        item = await self.find_by_pk_or_fail(item_id)

        if item.is_dir:
            all_child_ids = await self._get_all_descendant_ids(item.id)
            all_ids_to_delete = [item.id] + all_child_ids
            
            files_to_delete_physically = await self.find_by_filters(
                id__in=all_ids_to_delete,
                is_dir=0
            )
            
            delete_tasks = [
                self.fs_service.delete(path=f.storage_key, bucket_name=f.bucket)
                for f in files_to_delete_physically if f.storage_key
            ]
            if delete_tasks:
                await asyncio.gather(*delete_tasks)
            
            await self.soft_delete_by_pks(all_ids_to_delete)
        else:
            if item.storage_key:
                await self.fs_service.delete(path=item.storage_key, bucket_name=item.bucket)
            await self.delete_by_pk(item_id)

    async def _get_all_descendant_ids(self, folder_id: int) -> list[int]:
        """
        递归获取一个文件夹下的所有子项（文件和子文件夹）的ID。
        """
        all_ids = []
        children = await self.find_by_filters(parent_id=folder_id)
        for child in children:
            all_ids.append(child.id)
            if child.is_dir:
                all_ids.extend(await self._get_all_descendant_ids(child.id))
        return all_ids

    async def get_local_file_stream(self, file_id: int) -> tuple[AsyncIterator[bytes] | None, str | None]:
        """
        获取本地文件的流和MIME类型。
        """
        file_meta = await self.find_by_pk(file_id)
        if not file_meta or file_meta.provider != "local" or not file_meta.storage_key:
            return None, None

        file_stream = await self.fs_service.read(
            path=file_meta.storage_key,
            bucket_name=file_meta.bucket
        )
        return file_stream, file_meta.mime_type

    async def is_bucket_empty(self, bucket_name: str) -> bool:
        """
        检查指定的存储桶是否为空。
        """
        count = await self.count(bucket=bucket_name)
        return count == 0
