"""
大文件上传和处理服务
支持分片上传、断点续传、文件验证和处理
"""
import json
import os
import hashlib
import shutil
from typing import Dict, List, Optional, Any, BinaryIO
from pathlib import Path
from datetime import datetime
import asyncio
from fastapi import UploadFile, HTTPException
import aiofiles

from app.core.config import settings
from app.core.logging import get_logger

logger = get_logger(__name__)


class FileUploadService:
    """文件上传服务"""
    
    def __init__(self):
        self.upload_dir = Path(settings.UPLOAD_DIR)
        self.temp_dir = self.upload_dir / "temp"
        self.processed_dir = self.upload_dir / "processed"
        self.max_file_size = settings.MAX_FILE_SIZE
        self.allowed_types = settings.ALLOWED_FILE_TYPES
        
        # 创建必要的目录
        self._ensure_directories()
    
    def _ensure_directories(self):
        """确保必要的目录存在"""
        self.upload_dir.mkdir(parents=True, exist_ok=True)
        self.temp_dir.mkdir(parents=True, exist_ok=True)
        self.processed_dir.mkdir(parents=True, exist_ok=True)
    
    async def validate_file(self, file: UploadFile) -> Dict[str, Any]:
        """验证文件"""
        try:
            # 检查文件类型
            file_extension = file.filename.split('.')[-1].lower() if '.' in file.filename else ''
            if file_extension not in self.allowed_types:
                return {
                    "valid": False,
                    "error": f"File type {file_extension} not allowed. Allowed types: {self.allowed_types}"
                }
            
            # 检查文件大小（通过读取文件内容来验证）
            content = await file.read()
            file_size = len(content)
            
            if file_size > self.max_file_size:
                return {
                    "valid": False,
                    "error": f"File size {file_size} exceeds maximum allowed size {self.max_file_size}"
                }
            
            # 重置文件指针
            await file.seek(0)
            
            # 计算文件哈希
            file_hash = hashlib.md5(content).hexdigest()
            
            return {
                "valid": True,
                "file_size": file_size,
                "file_hash": file_hash,
                "file_extension": file_extension
            }
            
        except Exception as e:
            logger.error(f"File validation error: {e}")
            return {"valid": False, "error": str(e)}
    
    async def save_file(self, file: UploadFile, user_id: int, custom_name: Optional[str] = None) -> Dict[str, Any]:
        """保存文件"""
        try:
            # 验证文件
            validation = await self.validate_file(file)
            if not validation["valid"]:
                raise HTTPException(status_code=400, detail=validation["error"])
            
            # 生成文件名
            if custom_name:
                filename = f"{user_id}_{custom_name}"
            else:
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                original_name = Path(file.filename).stem
                filename = f"{user_id}_{timestamp}_{original_name}"
            
            # 添加文件扩展名
            file_extension = validation["file_extension"]
            full_filename = f"{filename}.{file_extension}"
            
            # 保存文件
            file_path = self.upload_dir / full_filename
            
            async with aiofiles.open(file_path, "wb") as f:
                content = await file.read()
                await f.write(content)
            
            # 记录文件信息
            file_info = {
                "filename": full_filename,
                "original_name": file.filename,
                "file_path": str(file_path),
                "file_size": validation["file_size"],
                "file_hash": validation["file_hash"],
                "user_id": user_id,
                "uploaded_at": datetime.now().isoformat(),
                "file_extension": file_extension
            }
            
            logger.info(f"File uploaded successfully: {file_info}")
            
            return {
                "success": True,
                "file_info": file_info,
                "message": "File uploaded successfully"
            }
            
        except Exception as e:
            logger.error(f"File upload error: {e}")
            return {"success": False, "error": str(e)}
    
    async def chunked_upload_init(self, filename: str, total_size: int, user_id: int) -> Dict[str, Any]:
        """初始化分片上传"""
        try:
            # 验证文件大小
            if total_size > self.max_file_size:
                raise HTTPException(
                    status_code=400,
                    detail=f"File size {total_size} exceeds maximum allowed size {self.max_file_size}"
                )
            
            # 生成上传ID
            upload_id = hashlib.md5(f"{filename}_{user_id}_{datetime.now()}".encode()).hexdigest()
            
            # 创建临时目录
            upload_temp_dir = self.temp_dir / upload_id
            upload_temp_dir.mkdir(parents=True, exist_ok=True)
            
            # 保存上传信息
            upload_info = {
                "upload_id": upload_id,
                "filename": filename,
                "total_size": total_size,
                "user_id": user_id,
                "chunk_count": 0,
                "uploaded_size": 0,
                "status": "uploading",
                "created_at": datetime.now().isoformat(),
                "chunks_received": [],
                "temp_dir": str(upload_temp_dir)
            }
            
            # 保存上传信息到文件
            info_file = upload_temp_dir / "upload_info.json"
            async with aiofiles.open(info_file, "w") as f:
                await f.write(json.dumps(upload_info, indent=2))
            
            return {
                "success": True,
                "upload_id": upload_id,
                "message": "Chunked upload initialized"
            }
            
        except Exception as e:
            logger.error(f"Chunked upload init error: {e}")
            return {"success": False, "error": str(e)}
    
    async def upload_chunk(
        self, 
        upload_id: str, 
        chunk_number: int, 
        chunk_data: bytes, 
        user_id: int
    ) -> Dict[str, Any]:
        """上传文件分片"""
        try:
            # 验证上传ID
            upload_temp_dir = self.temp_dir / upload_id
            if not upload_temp_dir.exists():
                raise HTTPException(status_code=404, detail="Upload session not found")
            
            # 读取上传信息
            info_file = upload_temp_dir / "upload_info.json"
            async with aiofiles.open(info_file, "r") as f:
                upload_info = json.loads(await f.read())
            
            # 验证用户权限
            if upload_info["user_id"] != user_id:
                raise HTTPException(status_code=403, detail="Unauthorized access to upload session")
            
            # 保存分片
            chunk_filename = f"chunk_{chunk_number:06d}"
            chunk_path = upload_temp_dir / chunk_filename
            
            async with aiofiles.open(chunk_path, "wb") as f:
                await f.write(chunk_data)
            
            # 更新上传信息
            upload_info["chunk_count"] += 1
            upload_info["uploaded_size"] += len(chunk_data)
            upload_info["chunks_received"].append(chunk_number)
            
            # 保存更新后的信息
            async with aiofiles.open(info_file, "w") as f:
                await f.write(json.dumps(upload_info, indent=2))
            
            return {
                "success": True,
                "upload_id": upload_id,
                "chunk_number": chunk_number,
                "chunk_size": len(chunk_data),
                "uploaded_size": upload_info["uploaded_size"],
                "total_size": upload_info["total_size"],
                "progress": (upload_info["uploaded_size"] / upload_info["total_size"]) * 100
            }
            
        except Exception as e:
            logger.error(f"Chunk upload error: {e}")
            return {"success": False, "error": str(e)}
    
    async def complete_chunked_upload(self, upload_id: str, user_id: int) -> Dict[str, Any]:
        """完成分片上传并合并文件"""
        try:
            upload_temp_dir = self.temp_dir / upload_id
            if not upload_temp_dir.exists():
                raise HTTPException(status_code=404, detail="Upload session not found")
            
            # 读取上传信息
            info_file = upload_temp_dir / "upload_info.json"
            async with aiofiles.open(info_file, "r") as f:
                upload_info = json.loads(await f.read())
            
            # 验证用户权限
            if upload_info["user_id"] != user_id:
                raise HTTPException(status_code=403, detail="Unauthorized access to upload session")
            
            # 合并文件分片
            filename = upload_info["filename"]
            final_path = self.upload_dir / filename
            
            # 按顺序合并分片
            chunks_received = sorted(upload_info["chunks_received"])
            
            async with aiofiles.open(final_path, "wb") as final_file:
                for chunk_num in chunks_received:
                    chunk_path = upload_temp_dir / f"chunk_{chunk_num:06d}"
                    if chunk_path.exists():
                        async with aiofiles.open(chunk_path, "rb") as chunk_file:
                            chunk_data = await chunk_file.read()
                            await final_file.write(chunk_data)
            
            # 验证文件大小
            file_size = final_path.stat().st_size
            if file_size != upload_info["total_size"]:
                raise HTTPException(
                    status_code=400, 
                    detail=f"File size mismatch: expected {upload_info['total_size']}, got {file_size}"
                )
            
            # 计算文件哈希
            async with aiofiles.open(final_path, "rb") as f:
                content = await f.read()
                file_hash = hashlib.md5(content).hexdigest()
            
            # 更新上传信息
            upload_info["status"] = "completed"
            upload_info["final_path"] = str(final_path)
            upload_info["file_hash"] = file_hash
            upload_info["completed_at"] = datetime.now().isoformat()
            
            # 保存最终信息
            async with aiofiles.open(info_file, "w") as f:
                await f.write(json.dumps(upload_info, indent=2))
            
            # 清理临时文件（可选）
            # shutil.rmtree(upload_temp_dir)
            
            return {
                "success": True,
                "upload_id": upload_id,
                "filename": filename,
                "file_path": str(final_path),
                "file_size": file_size,
                "file_hash": file_hash,
                "message": "File upload completed successfully"
            }
            
        except Exception as e:
            logger.error(f"Chunked upload completion error: {e}")
            return {"success": False, "error": str(e)}
    
    async def get_upload_status(self, upload_id: str, user_id: int) -> Dict[str, Any]:
        """获取上传状态"""
        try:
            upload_temp_dir = self.temp_dir / upload_id
            if not upload_temp_dir.exists():
                return {"status": "not_found"}
            
            info_file = upload_temp_dir / "upload_info.json"
            if not info_file.exists():
                return {"status": "invalid"}
            
            async with aiofiles.open(info_file, "r") as f:
                upload_info = json.loads(await f.read())
            
            # 验证用户权限
            if upload_info["user_id"] != user_id:
                return {"status": "unauthorized"}
            
            return {
                "status": upload_info["status"],
                "upload_id": upload_id,
                "filename": upload_info["filename"],
                "total_size": upload_info["total_size"],
                "uploaded_size": upload_info["uploaded_size"],
                "chunk_count": upload_info["chunk_count"],
                "chunks_received": upload_info["chunks_received"],
                "progress": (upload_info["uploaded_size"] / upload_info["total_size"]) * 100,
                "created_at": upload_info["created_at"]
            }
            
        except Exception as e:
            logger.error(f"Get upload status error: {e}")
            return {"status": "error", "error": str(e)}


class FileProcessor:
    """文件处理器"""
    
    def __init__(self):
        self.processed_dir = Path(settings.UPLOAD_DIR) / "processed"
        self.processed_dir.mkdir(parents=True, exist_ok=True)
    
    async def process_image(self, file_path: str, operations: List[str]) -> Dict[str, Any]:
        """处理图片文件"""
        try:
            from PIL import Image
            import io
            
            path = Path(file_path)
            if not path.exists():
                raise HTTPException(status_code=404, detail="File not found")
            
            # 打开图片
            with Image.open(path) as img:
                original_format = img.format
                
                # 应用处理操作
                for operation in operations:
                    if operation == "resize":
                        img = img.resize((800, 600))  # 默认大小
                    elif operation == "compress":
                        # 压缩图片
                        img = img.copy()
                    elif operation == "convert":
                        # 转换为JPEG格式
                        if img.mode != "RGB":
                            img = img.convert("RGB")
                        original_format = "JPEG"
                
                # 保存处理后的图片
                processed_filename = f"processed_{path.stem}.{original_format.lower()}"
                processed_path = self.processed_dir / processed_filename
                
                img.save(processed_path, format=original_format, quality=85)
            
            return {
                "success": True,
                "original_path": file_path,
                "processed_path": str(processed_path),
                "operations": operations,
                "message": "Image processed successfully"
            }
            
        except Exception as e:
            logger.error(f"Image processing error: {e}")
            return {"success": False, "error": str(e)}
    
    async def process_text_file(self, file_path: str, operations: List[str]) -> Dict[str, Any]:
        """处理文本文件"""
        try:
            path = Path(file_path)
            if not path.exists():
                raise HTTPException(status_code=404, detail="File not found")
            
            # 读取文件内容
            async with aiofiles.open(path, "r", encoding="utf-8") as f:
                content = await f.read()
            
            processed_content = content
            
            # 应用处理操作
            for operation in operations:
                if operation == "compress":
                    # 简单的文本压缩（去除多余空格）
                    processed_content = ' '.join(processed_content.split())
                elif operation == "analyze":
                    # 文本分析（字数统计等）
                    word_count = len(processed_content.split())
                    char_count = len(processed_content)
                    line_count = processed_content.count('\n') + 1
                    
                    analysis = {
                        "word_count": word_count,
                        "char_count": char_count,
                        "line_count": line_count
                    }
                
            # 保存处理后的文件
            processed_filename = f"processed_{path.stem}.txt"
            processed_path = self.processed_dir / processed_filename
            
            async with aiofiles.open(processed_path, "w", encoding="utf-8") as f:
                await f.write(processed_content)
            
            result = {
                "success": True,
                "original_path": file_path,
                "processed_path": str(processed_path),
                "operations": operations,
                "message": "Text file processed successfully"
            }
            
            # 添加分析结果
            if "analysis" in locals():
                result["analysis"] = analysis
            
            return result
            
        except Exception as e:
            logger.error(f"Text file processing error: {e}")
            return {"success": False, "error": str(e)}
    
    async def process_pdf_file(self, file_path: str, operations: List[str]) -> Dict[str, Any]:
        """处理PDF文件"""
        try:
            # 这里可以实现PDF处理功能
            # 需要安装PyPDF2或pdfplumber等库
            
            # 示例：提取文本内容
            processed_filename = f"processed_{Path(file_path).stem}.txt"
            processed_path = self.processed_dir / processed_filename
            
            # 在实际实现中，这里会调用PDF处理库
            # 目前返回占位符结果
            async with aiofiles.open(processed_path, "w", encoding="utf-8") as f:
                await f.write("PDF processing result would be here")
            
            return {
                "success": True,
                "original_path": file_path,
                "processed_path": str(processed_path),
                "operations": operations,
                "message": "PDF file processed (placeholder)"
            }
            
        except Exception as e:
            logger.error(f"PDF processing error: {e}")
            return {"success": False, "error": str(e)}


# 创建全局服务实例
file_upload_service = FileUploadService()
file_processor = FileProcessor()