"""
File Storage Service
文件存储服务 - 文件上传、下载、存储管理，支持分片上传和断点续传
"""

import os
import uuid
import hashlib
import mimetypes
from typing import Optional, Dict, Any, List, BinaryIO
from datetime import datetime, timedelta
from pathlib import Path
import asyncio
import aiofiles
import structlog

from ..core.config import settings
from ..core.exceptions import (
    FileUploadError,
    FileDownloadError,
    FileValidationError,
    FileSizeExceededError,
    FileNotFoundError,
    ValidationError,
)

# 配置结构化日志
logger = structlog.get_logger(__name__)


class FileUploadSession:
    """文件上传会话"""

    def __init__(
        self,
        session_id: str,
        file_name: str,
        file_size: int,
        chunk_size: int = 1024 * 1024,  # 1MB
        total_chunks: int = 0
    ):
        self.session_id = session_id
        self.file_name = file_name
        self.file_size = file_size
        self.chunk_size = chunk_size
        self.total_chunks = total_chunks or self._calculate_total_chunks()
        self.uploaded_chunks: set = set()
        self.created_at = datetime.utcnow()
        self.updated_at = datetime.utcnow()

    def _calculate_total_chunks(self) -> int:
        """计算总分片数"""
        return (self.file_size + self.chunk_size - 1) // self.chunk_size

    def get_chunk_info(self, chunk_index: int) -> Dict[str, Any]:
        """获取分片信息"""
        start_byte = chunk_index * self.chunk_size
        end_byte = min(start_byte + self.chunk_size, self.file_size)
        
        return {
            "index": chunk_index,
            "start_byte": start_byte,
            "end_byte": end_byte,
            "size": end_byte - start_byte
        }

    def is_chunk_uploaded(self, chunk_index: int) -> bool:
        """检查分片是否已上传"""
        return chunk_index in self.uploaded_chunks

    def mark_chunk_uploaded(self, chunk_index: int) -> None:
        """标记分片已上传"""
        self.uploaded_chunks.add(chunk_index)
        self.updated_at = datetime.utcnow()

    def is_complete(self) -> bool:
        """检查是否上传完成"""
        return len(self.uploaded_chunks) == self.total_chunks

    def get_progress(self) -> float:
        """获取上传进度"""
        if self.total_chunks == 0:
            return 0.0
        return (len(self.uploaded_chunks) / self.total_chunks) * 100


class FileStorageService:
    """文件存储服务类"""

    def __init__(self):
        self.upload_sessions: Dict[str, FileUploadSession] = {}
        self.base_upload_path = Path(settings.file_storage.upload_dir)
        self.base_content_path = Path(settings.file_storage.content_storage_path)
        self.max_file_size = settings.file_storage.max_upload_size_mb * 1024 * 1024
        self.allowed_extensions = self._parse_allowed_extensions()
        
        # 确保目录存在
        self.base_upload_path.mkdir(parents=True, exist_ok=True)
        self.base_content_path.mkdir(parents=True, exist_ok=True)

    def _parse_allowed_extensions(self) -> set:
        """解析允许的文件扩展名"""
        extensions_str = settings.file_storage.allowed_extensions
        return {ext.strip().lower() for ext in extensions_str.split(",") if ext.strip()}

    def _generate_file_path(
        self,
        file_name: str,
        content_type: str,
        user_id: uuid.UUID
    ) -> Path:
        """生成文件存储路径"""
        # 获取文件扩展名
        file_ext = Path(file_name).suffix.lower()
        
        # 生成唯一文件名
        unique_name = f"{uuid.uuid4()}{file_ext}"
        
        # 按日期和用户组织目录结构
        date_path = datetime.utcnow().strftime("%Y/%m/%d")
        user_path = str(user_id)
        
        # 根据内容类型选择基础路径
        if content_type == "upload":
            base_path = self.base_upload_path / user_path / date_path
        else:
            base_path = self.base_content_path / content_type / user_path / date_path
        
        base_path.mkdir(parents=True, exist_ok=True)
        
        return base_path / unique_name

    def _calculate_file_hash(self, file_path: str) -> str:
        """计算文件哈希值 - 用于文件完整性检查，非安全用途"""
        hash_md5 = hashlib.md5(usedforsecurity=False)
        with open(file_path, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        return hash_md5.hexdigest()

    def _get_mime_type(self, file_name: str) -> str:
        """获取MIME类型"""
        mime_type, _ = mimetypes.guess_type(file_name)
        return mime_type or "application/octet-stream"

    def _validate_file_extension(self, file_name: str) -> bool:
        """验证文件扩展名"""
        file_ext = Path(file_name).suffix.lower().lstrip(".")
        return file_ext in self.allowed_extensions

    def _validate_file_size(self, file_size: int) -> None:
        """验证文件大小"""
        if file_size > self.max_file_size:
            raise FileSizeExceededError(
                f"文件大小超出限制: {file_size / (1024*1024):.1f}MB > {self.max_file_size / (1024*1024):.1f}MB"
            )

    async def validate_file_content(self, file_path: Path, expected_type: str = None) -> bool:
        """
        验证文件内容

        Args:
            file_path: 文件路径
            expected_type: 期望的文件类型

        Returns:
            bool: 验证是否通过
        """
        try:
            # 检查文件是否存在
            if not file_path.exists():
                return False

            # 检查文件大小
            file_size = file_path.stat().st_size
            if file_size == 0:
                raise FileValidationError("文件为空")

            # 检查文件头（魔术字节）
            async with aiofiles.open(file_path, 'rb') as f:
                header = await f.read(16)
                
                # 基本的文件类型验证
                if expected_type:
                    if expected_type == "image" and not header.startswith((b'\xFF\xD8\xFF', b'\x89PNG', b'GIF')):
                        raise FileValidationError("不是有效的图片文件")
                    elif expected_type == "video" and not header.startswith((b'\x00\x00\x00', b'ftyp', b'FLV')):
                        raise FileValidationError("不是有效的视频文件")
                    elif expected_type == "audio" and not header.startswith((b'ID3', b'RIFF', b'OggS')):
                        raise FileValidationError("不是有效的音频文件")

            return True

        except Exception as e:
            logger.error("文件内容验证失败", file_path=str(file_path), error=str(e))
            raise FileValidationError(f"文件内容验证失败: {str(e)}")

    async def upload_file(
        self,
        file_data: BinaryIO,
        file_name: str,
        content_type: str,
        user_id: uuid.UUID,
        validate_content: bool = True
    ) -> Dict[str, Any]:
        """
        上传文件

        Args:
            file_data: 文件数据
            file_name: 文件名
            content_type: 内容类型 (image, video, audio, upload)
            user_id: 用户ID
            validate_content: 是否验证文件内容

        Returns:
            Dict[str, Any]: 上传结果

        Raises:
            FileValidationError: 文件验证失败
            FileSizeExceededError: 文件大小超出限制
        """
        logger.info("上传文件", 
                   file_name=file_name, 
                   content_type=content_type, 
                   user_id=str(user_id))

        try:
            # 验证文件扩展名
            if not self._validate_file_extension(file_name):
                raise FileValidationError(f"不支持的文件类型: {file_name}")

            # 生成文件路径
            file_path = self._generate_file_path(file_name, content_type, user_id)
            
            # 保存文件
            total_size = 0
            async with aiofiles.open(file_path, 'wb') as f:
                while True:
                    chunk = file_data.read(8192)
                    if not chunk:
                        break
                    await f.write(chunk)
                    total_size += len(chunk)

            # 验证文件大小
            self._validate_file_size(total_size)

            # 验证文件内容
            if validate_content:
                await self.validate_file_content(file_path, content_type)

            # 计算文件哈希
            file_hash = self._calculate_file_hash(file_path)

            # 获取MIME类型
            mime_type = self._get_mime_type(file_name)

            logger.info("文件上传成功", 
                       file_name=file_name, 
                       file_path=str(file_path),
                       file_size=total_size)

            return {
                "success": True,
                "file_path": str(file_path),
                "file_name": file_name,
                "file_size": total_size,
                "file_hash": file_hash,
                "mime_type": mime_type,
                "relative_path": str(file_path.relative_to(self.base_upload_path if content_type == "upload" else self.base_content_path))
            }

        except Exception as e:
            # 清理已上传的文件
            if 'file_path' in locals() and file_path.exists():
                try:
                    file_path.unlink()
                except Exception as cleanup_error:
                    logger.warning("临时文件清理失败", file_path=str(file_path), error=str(cleanup_error))
            
            logger.error("文件上传失败", file_name=file_name, error=str(e))
            raise FileUploadError(f"文件上传失败: {str(e)}")

    async def start_chunked_upload(
        self,
        file_name: str,
        file_size: int,
        content_type: str,
        user_id: uuid.UUID,
        chunk_size: int = 1024 * 1024  # 1MB
    ) -> Dict[str, Any]:
        """
        开始分片上传

        Args:
            file_name: 文件名
            file_size: 文件大小
            content_type: 内容类型
            user_id: 用户ID
            chunk_size: 分片大小

        Returns:
            Dict[str, Any]: 上传会话信息

        Raises:
            FileValidationError: 参数验证失败
            FileSizeExceededError: 文件大小超出限制
        """
        logger.info("开始分片上传", 
                   file_name=file_name, 
                   file_size=file_size, 
                   user_id=str(user_id))

        try:
            # 验证参数
            if not file_name or file_size <= 0:
                raise FileValidationError("文件名和文件大小不能为空")

            self._validate_file_extension(file_name)
            self._validate_file_size(file_size)

            # 创建上传会话
            session_id = str(uuid.uuid4())
            upload_session = FileUploadSession(
                session_id=session_id,
                file_name=file_name,
                file_size=file_size,
                chunk_size=chunk_size
            )

            self.upload_sessions[session_id] = upload_session

            return {
                "session_id": session_id,
                "chunk_size": chunk_size,
                "total_chunks": upload_session.total_chunks,
                "uploaded_chunks": list(upload_session.uploaded_chunks)
            }

        except Exception as e:
            logger.error("开始分片上传失败", file_name=file_name, error=str(e))
            raise FileUploadError(f"开始分片上传失败: {str(e)}")

    async def upload_chunk(
        self,
        session_id: str,
        chunk_index: int,
        chunk_data: BinaryIO
    ) -> Dict[str, Any]:
        """
        上传分片

        Args:
            session_id: 上传会话ID
            chunk_index: 分片索引
            chunk_data: 分片数据

        Returns:
            Dict[str, Any]: 上传结果

        Raises:
            FileUploadError: 上传失败
        """
        logger.info("上传分片", session_id=session_id, chunk_index=chunk_index)

        try:
            # 获取上传会话
            session = self.upload_sessions.get(session_id)
            if not session:
                raise FileUploadError("上传会话不存在或已过期")

            # 验证分片索引
            if chunk_index < 0 or chunk_index >= session.total_chunks:
                raise FileValidationError(f"无效的分片索引: {chunk_index}")

            # 检查分片是否已上传
            if session.is_chunk_uploaded(chunk_index):
                return {
                    "success": True,
                    "chunk_index": chunk_index,
                    "already_uploaded": True,
                    "progress": session.get_progress()
                }

            # 生成分片文件路径
            chunk_file_path = self._get_chunk_file_path(session_id, chunk_index)
            chunk_file_path.parent.mkdir(parents=True, exist_ok=True)

            # 保存分片
            chunk_size = 0
            async with aiofiles.open(chunk_file_path, 'wb') as f:
                while True:
                    data = chunk_data.read(8192)
                    if not data:
                        break
                    await f.write(data)
                    chunk_size += len(data)

            # 验证分片大小
            chunk_info = session.get_chunk_info(chunk_index)
            if chunk_size != chunk_info["size"]:
                logger.warning(f"分片大小不匹配: 期望 {chunk_info['size']}, 实际 {chunk_size}")

            # 标记分片已上传
            session.mark_chunk_uploaded(chunk_index)

            return {
                "success": True,
                "chunk_index": chunk_index,
                "chunk_size": chunk_size,
                "progress": session.get_progress()
            }

        except Exception as e:
            logger.error("上传分片失败", session_id=session_id, chunk_index=chunk_index, error=str(e))
            raise FileUploadError(f"上传分片失败: {str(e)}")

    async def complete_chunked_upload(
        self,
        session_id: str,
        content_type: str,
        user_id: uuid.UUID
    ) -> Dict[str, Any]:
        """
        完成分片上传

        Args:
            session_id: 上传会话ID
            content_type: 内容类型
            user_id: 用户ID

        Returns:
            Dict[str, Any]: 完成结果

        Raises:
            FileUploadError: 完成失败
        """
        logger.info("完成分片上传", session_id=session_id, user_id=str(user_id))

        try:
            # 获取上传会话
            session = self.upload_sessions.get(session_id)
            if not session:
                raise FileUploadError("上传会话不存在或已过期")

            # 检查是否所有分片都已上传
            if not session.is_complete():
                missing_chunks = set(range(session.total_chunks)) - session.uploaded_chunks
                raise FileUploadError(f"还有 {len(missing_chunks)} 个分片未上传")

            # 合并分片
            final_file_path = self._merge_chunks(session, content_type, user_id)

            # 验证合并后的文件
            await self.validate_file_content(final_file_path)

            # 计算文件哈希
            file_hash = self._calculate_file_hash(final_file_path)

            # 获取MIME类型
            mime_type = self._get_mime_type(session.file_name)

            # 清理分片文件
            await self._cleanup_chunks(session_id)

            # 清理上传会话
            del self.upload_sessions[session_id]

            logger.info("分片上传完成", 
                       session_id=session_id, 
                       file_path=str(final_file_path))

            return {
                "success": True,
                "file_path": str(final_file_path),
                "file_name": session.file_name,
                "file_size": session.file_size,
                "file_hash": file_hash,
                "mime_type": mime_type
            }

        except Exception as e:
            logger.error("完成分片上传失败", session_id=session_id, error=str(e))
            raise FileUploadError(f"完成分片上传失败: {str(e)}")

    async def download_file(self, file_path: str) -> Optional[Dict[str, Any]]:
        """
        下载文件

        Args:
            file_path: 文件路径

        Returns:
            Optional[Dict[str, Any]]: 文件信息，文件不存在返回None

        Raises:
            FileNotFoundError: 文件不存在
            FileDownloadError: 下载失败
        """
        logger.info("下载文件", file_path=file_path)

        try:
            file_path_obj = Path(file_path)
            
            # 检查文件是否存在
            if not file_path_obj.exists() or not file_path_obj.is_file():
                raise FileNotFoundError(f"文件不存在: {file_path}")

            # 获取文件信息
            file_size = file_path_obj.stat().st_size
            file_name = file_path_obj.name
            mime_type = self._get_mime_type(file_name)

            logger.info("文件下载成功", file_path=file_path, file_size=file_size)

            return {
                "file_path": str(file_path_obj),
                "file_name": file_name,
                "file_size": file_size,
                "mime_type": mime_type,
                "file_stream": file_path_obj
            }

        except FileNotFoundError:
            raise
        except Exception as e:
            logger.error("文件下载失败", file_path=file_path, error=str(e))
            raise FileDownloadError(f"文件下载失败: {str(e)}")

    async def delete_file(self, file_path: str) -> bool:
        """
        删除文件

        Args:
            file_path: 文件路径

        Returns:
            bool: 是否成功删除
        """
        logger.info("删除文件", file_path=file_path)

        try:
            file_path_obj = Path(file_path)
            
            if file_path_obj.exists():
                file_path_obj.unlink()
                logger.info("文件删除成功", file_path=file_path)
                return True
            else:
                logger.warning("文件不存在，无需删除", file_path=file_path)
                return False

        except Exception as e:
            logger.error("文件删除失败", file_path=file_path, error=str(e))
            return False

    def get_file_info(self, file_path: str) -> Optional[Dict[str, Any]]:
        """
        获取文件信息

        Args:
            file_path: 文件路径

        Returns:
            Optional[Dict[str, Any]]: 文件信息，文件不存在返回None
        """
        try:
            file_path_obj = Path(file_path)
            
            if not file_path_obj.exists() or not file_path_obj.is_file():
                return None

            stat = file_path_obj.stat()
            
            return {
                "file_path": str(file_path_obj),
                "file_name": file_path_obj.name,
                "file_size": stat.st_size,
                "created_at": datetime.fromtimestamp(stat.st_ctime),
                "modified_at": datetime.fromtimestamp(stat.st_mtime),
                "mime_type": self._get_mime_type(file_path_obj.name),
                "is_readable": os.access(file_path_obj, os.R_OK),
                "is_writable": os.access(file_path_obj, os.W_OK)
            }

        except Exception as e:
            logger.error("获取文件信息失败", file_path=file_path, error=str(e))
            return None

    def cleanup_expired_files(self, max_age_hours: int = 24) -> int:
        """
        清理过期的临时文件

        Args:
            max_age_hours: 最大保留小时数

        Returns:
            int: 清理的文件数量
        """
        cutoff_time = datetime.utcnow() - timedelta(hours=max_age_hours)
        cleaned_count = 0

        try:
            # 清理上传目录中的临时文件
            for temp_dir in [self.base_upload_path, self.base_content_path]:
                if temp_dir.exists():
                    for file_path in temp_dir.rglob("*"):
                        if file_path.is_file():
                            file_mtime = datetime.fromtimestamp(file_path.stat().st_mtime)
                            if file_mtime < cutoff_time:
                                try:
                                    file_path.unlink()
                                    cleaned_count += 1
                                    logger.debug("清理过期文件", file_path=str(file_path))
                                except Exception as e:
                                    logger.warning("清理文件失败", file_path=str(file_path), error=str(e))

            logger.info("清理过期文件完成", cleaned_count=cleaned_count, max_age_hours=max_age_hours)
            return cleaned_count

        except Exception as e:
            logger.error("清理过期文件失败", error=str(e))
            return 0

    def get_storage_statistics(self) -> Dict[str, Any]:
        """
        获取存储统计信息

        Returns:
            Dict[str, Any]: 存储统计信息
        """
        try:
            upload_stats = self._get_directory_stats(self.base_upload_path)
            content_stats = self._get_directory_stats(self.base_content_path)
            
            return {
                "upload_directory": {
                    "path": str(self.base_upload_path),
                    "total_size": upload_stats["total_size"],
                    "file_count": upload_stats["file_count"],
                    "directory_count": upload_stats["directory_count"]
                },
                "content_directory": {
                    "path": str(self.base_content_path),
                    "total_size": content_stats["total_size"],
                    "file_count": content_stats["file_count"],
                    "directory_count": content_stats["directory_count"]
                },
                "total_size": upload_stats["total_size"] + content_stats["total_size"],
                "total_files": upload_stats["file_count"] + content_stats["file_count"]
            }

        except Exception as e:
            logger.error("获取存储统计失败", error=str(e))
            return {}

    # 私有辅助方法

    def _get_chunk_file_path(self, session_id: str, chunk_index: int) -> Path:
        """获取分片文件路径 - 使用安全的临时目录"""
        import tempfile
        temp_dir = Path(tempfile.gettempdir()) / "media_create_uploads" / session_id
        temp_dir.mkdir(parents=True, exist_ok=True, mode=0o700)  # 设置安全权限
        return temp_dir / f"chunk_{chunk_index:06d}"

    async def _merge_chunks(
        self,
        session: FileUploadSession,
        content_type: str,
        user_id: uuid.UUID
    ) -> Path:
        """合并分片"""
        # 生成最终文件路径
        final_file_path = self._generate_file_path(session.file_name, content_type, user_id)
        final_file_path.parent.mkdir(parents=True, exist_ok=True)

        # 合并所有分片
        async with aiofiles.open(final_file_path, 'wb') as final_file:
            for chunk_index in range(session.total_chunks):
                chunk_file_path = self._get_chunk_file_path(session.session_id, chunk_index)
                
                if not chunk_file_path.exists():
                    raise FileUploadError(f"分片 {chunk_index} 不存在")

                async with aiofiles.open(chunk_file_path, 'rb') as chunk_file:
                    while True:
                        data = await chunk_file.read(8192)
                        if not data:
                            break
                        await final_file.write(data)

        return final_file_path

    async def _cleanup_chunks(self, session_id: str) -> None:
        """清理分片文件"""
        try:
            chunk_dir = Path(tempfile.gettempdir()) / "media_create_uploads" / session_id
            if chunk_dir.exists():
                for chunk_file in chunk_dir.glob("chunk_*"):
                    try:
                        chunk_file.unlink()
                    except Exception as file_error:
                        logger.debug("分片文件清理失败", chunk_file=str(chunk_file), error=str(file_error))
                try:
                    chunk_dir.rmdir()
                except Exception as dir_error:
                    logger.debug("分片目录清理失败", chunk_dir=str(chunk_dir), error=str(dir_error))
        except Exception as e:
            logger.warning("清理分片文件失败", session_id=session_id, error=str(e))

    def _get_directory_stats(self, directory: Path) -> Dict[str, Any]:
        """获取目录统计信息"""
        total_size = 0
        file_count = 0
        directory_count = 0

        try:
            if directory.exists():
                for item in directory.rglob("*"):
                    if item.is_file():
                        total_size += item.stat().st_size
                        file_count += 1
                    elif item.is_dir():
                        directory_count += 1
        except Exception as e:
            logger.error("获取目录统计失败", directory=str(directory), error=str(e))

        return {
            "total_size": total_size,
            "file_count": file_count,
            "directory_count": directory_count
        }