from sqlalchemy.orm import Session
from app.database.models import UploadTask, UploadChunk, User, File
from app.utils.minio_client import minio_client
from datetime import datetime, timedelta
import uuid
import logging
import math
from typing import Dict, Any, List, Optional, Tuple
from fastapi import HTTPException
import json
from sqlalchemy.exc import SQLAlchemyError
from botocore.exceptions import ClientError
from pathlib import Path
import os
import tempfile

# 配置日志记录器
logger = logging.getLogger(__name__)

def create_upload_task(
    db: Session,
    user_id: int,
    file_name: str,
    file_size: int,
    chunk_size: int,
    mime_type: Optional[str] = None,
    directory_id: int = 0
) -> Dict[str, Any]:
    """
    创建一个新的上传任务
    
    Args:
        db: 数据库会话
        user_id: 用户ID
        file_name: 文件名
        file_size: 文件大小(字节)
        chunk_size: 分片大小(字节)
        mime_type: MIME类型
        directory_id: 目录ID，默认为0（根目录）
        
    Returns:
        包含任务信息的字典
    """
    # 计算总分片数
    total_chunks = math.ceil(file_size / chunk_size)
    
    # 生成任务ID
    task_id = str(uuid.uuid4())
    
    # 设置过期时间(7天后)
    expires_at = datetime.now() + timedelta(days=7)
    
    try:
        # 初始化MinIO分片上传
        bucket_name = minio_client.bucket_name
        object_name = f"temp/{user_id}/{task_id}/{file_name}"
        
        logger.info(f"初始化MinIO分片上传: bucket={bucket_name}, object={object_name}")
        
        # 确保mime_type不为None
        if mime_type is None:
            # 如果mime_type为None，根据文件扩展名设置默认的mime_type
            file_ext = os.path.splitext(file_name)[1].lower()
            mime_type = get_default_mime_type(file_ext)
            logger.debug(f"未提供MIME类型，根据扩展名'{file_ext}'设置为默认值: {mime_type}")
        
        # 调用S3客户端创建分片上传
        result = minio_client.s3_client.create_multipart_upload(
            Bucket=bucket_name,
            Key=object_name,
            ContentType=mime_type
        )
        
        upload_id = result['UploadId']
        logger.debug(f"获取到MinIO上传ID: {upload_id}")
        
        # 创建上传任务记录
        upload_task = UploadTask(
            id=task_id,
            user_id=user_id,
            file_name=file_name,
            file_size=file_size,
            chunk_size=chunk_size,
            total_chunks=total_chunks,
            uploaded_chunks=0,
            mime_type=mime_type,
            status="pending",
            upload_id=upload_id,
            expires_at=expires_at,
            directory_id=directory_id
        )
        
        db.add(upload_task)
        
        # 预创建所有分片记录
        for chunk_number in range(1, total_chunks + 1):
            chunk = UploadChunk(
                task_id=task_id,
                chunk_number=chunk_number,
                chunk_size=min(chunk_size, file_size - (chunk_number - 1) * chunk_size),
                status="pending"
            )
            db.add(chunk)
            
        db.commit()
        
        return {
            "task_id": task_id,
            "upload_id": upload_id,
            "chunk_size": chunk_size,
            "total_chunks": total_chunks,
            "expires_at": expires_at
        }
        
    except SQLAlchemyError as e:
        db.rollback()
        logger.error(f"数据库错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"创建上传任务失败: {str(e)}")
    except ClientError as e:
        db.rollback()
        logger.error(f"MinIO错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"初始化MinIO上传失败: {str(e)}")
    except Exception as e:
        db.rollback()
        logger.error(f"创建上传任务失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"创建上传任务失败: {str(e)}")
    

def upload_chunk(
    db: Session,
    user_id: int,
    task_id: str,
    chunk_number: int,
    chunk_data: bytes
) -> Dict[str, Any]:
    """
    上传一个文件分片
    
    Args:
        db: 数据库会话
        user_id: 用户ID
        task_id: 上传任务ID
        chunk_number: 分片序号
        chunk_data: 分片数据
        
    Returns:
        包含上传结果的字典
    """
    try:
        # 查询上传任务
        task = db.query(UploadTask).filter(
            UploadTask.id == task_id,
            UploadTask.user_id == user_id
        ).first()
        
        if not task:
            raise HTTPException(status_code=404, detail="上传任务不存在")
            
        if task.status == "completed":
            raise HTTPException(status_code=400, detail="上传任务已完成")
            
        if task.status == "failed":
            raise HTTPException(status_code=400, detail="上传任务已失败")
        
        # 查询分片信息
        chunk = db.query(UploadChunk).filter(
            UploadChunk.task_id == task_id,
            UploadChunk.chunk_number == chunk_number
        ).first()
        
        if not chunk:
            raise HTTPException(status_code=404, detail=f"分片{chunk_number}不存在")
            
        if chunk.status == "uploaded":
            # 分片已上传，直接返回成功
            return {
                "task_id": task_id,
                "chunk_number": chunk_number,
                "etag": chunk.etag,
                "uploaded_chunks": task.uploaded_chunks,
                "total_chunks": task.total_chunks
            }
        
        # 上传分片到MinIO
        bucket_name = minio_client.bucket_name
        object_name = f"temp/{user_id}/{task_id}/{task.file_name}"
        
        logger.debug(f"上传分片: task_id={task_id}, chunk={chunk_number}, size={len(chunk_data)}")
        
        # 上传分片
        response = minio_client.s3_client.upload_part(
            Bucket=bucket_name,
            Key=object_name,
            PartNumber=chunk_number,
            UploadId=task.upload_id,
            Body=chunk_data
        )
        
        etag = response["ETag"].strip('"')
        
        # 更新分片状态
        chunk.status = "uploaded"
        chunk.etag = etag
        chunk.upload_time = datetime.now()
        
        # 更新任务状态
        task.uploaded_chunks += 1
        task.status = "uploading"
        
        db.commit()
        
        return {
            "task_id": task_id,
            "chunk_number": chunk_number,
            "etag": etag,
            "uploaded_chunks": task.uploaded_chunks,
            "total_chunks": task.total_chunks
        }
        
    except HTTPException:
        raise
    except SQLAlchemyError as e:
        db.rollback()
        logger.error(f"数据库错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"上传分片失败: {str(e)}")
    except ClientError as e:
        db.rollback()
        logger.error(f"MinIO错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"上传到MinIO失败: {str(e)}")
    except Exception as e:
        db.rollback()
        logger.error(f"上传分片失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"上传分片失败: {str(e)}")


def complete_upload(
    db: Session,
    user_id: int,
    task_id: str
) -> Dict[str, Any]:
    """
    完成分片上传，合并所有分片
    
    Args:
        db: 数据库会话
        user_id: 用户ID
        task_id: 上传任务ID
        
    Returns:
        包含文件信息的字典
    """
    try:
        # 查询上传任务
        task = db.query(UploadTask).filter(
            UploadTask.id == task_id,
            UploadTask.user_id == user_id
        ).first()
        
        if not task:
            raise HTTPException(status_code=404, detail="上传任务不存在")
            
        if task.status == "completed":
            raise HTTPException(status_code=400, detail="上传任务已完成")
            
        if task.status == "failed":
            raise HTTPException(status_code=400, detail="上传任务已失败")
        
        logger.info(f"检查上传进度: task_id={task_id}, uploaded_chunks={task.uploaded_chunks}, total_chunks={task.total_chunks}")
            
        if task.uploaded_chunks < task.total_chunks:
            # 查询实际已上传的分片数量
            uploaded_count = db.query(UploadChunk).filter(
                UploadChunk.task_id == task_id,
                UploadChunk.status == "uploaded"
            ).count()
            
            logger.info(f"数据库中实际已上传分片数: {uploaded_count}")
            
            if uploaded_count < task.total_chunks:
                missing_chunks = []
                chunks = db.query(UploadChunk).filter(
                    UploadChunk.task_id == task_id,
                    UploadChunk.status != "uploaded"
                ).all()
                for chunk in chunks:
                    missing_chunks.append(chunk.chunk_number)
                
                raise HTTPException(
                    status_code=400, 
                    detail=f"还有分片未上传完成。已上传: {uploaded_count}/{task.total_chunks}, 缺失分片: {missing_chunks}"
                )
            else:
                # 更新任务的已上传分片数
                task.uploaded_chunks = uploaded_count
                db.commit()
        
        # 查询所有分片
        chunks = db.query(UploadChunk).filter(
            UploadChunk.task_id == task_id
        ).order_by(UploadChunk.chunk_number).all()
        
        # 准备分片信息
        multipart_upload = []
        for chunk in chunks:
            if chunk.status != "uploaded" or not chunk.etag:
                raise HTTPException(status_code=400, detail=f"分片{chunk.chunk_number}未完成上传")
                
            multipart_upload.append({
                'PartNumber': chunk.chunk_number,
                'ETag': chunk.etag
            })
            
        logger.info(f"所有分片验证通过，开始合并文件: task_id={task_id}")
        
        # 合并分片
        bucket_name = minio_client.bucket_name
        temp_object_name = f"temp/{user_id}/{task_id}/{task.file_name}"
        
        # 调用S3客户端完成分片上传
        result = minio_client.s3_client.complete_multipart_upload(
            Bucket=bucket_name,
            Key=temp_object_name,
            UploadId=task.upload_id,
            MultipartUpload={'Parts': multipart_upload}
        )
        
        # 从MinIO获取用户相关信息
        # 根据文件类型确定目标目录
        from app.utils.file_utils import detect_file_type, get_file_extension, get_mime_type
        from app.services.file_service import calculate_file_hash, build_storage_path, get_file_create_time
        
        file_type = detect_file_type(task.file_name, task.mime_type)
        extension = get_file_extension(task.file_name)
        
        # 计算文件哈希值
        # 为了计算哈希值，需要先下载临时文件
        temp_file = None
        temp_path = None
        try:
            temp_file = tempfile.NamedTemporaryFile(delete=False)
            temp_path = temp_file.name
            temp_file.close()  # 立即关闭文件句柄
            
            # 下载合并后的文件到临时文件
            minio_client.s3_client.download_file(
                Bucket=bucket_name,
                Key=temp_object_name,
                Filename=temp_path
            )
            
            # 计算文件哈希值
            with open(temp_path, 'rb') as f:
                content = f.read()
                file_hash = calculate_file_hash(content)
                
            # 获取文件创建时间
            create_time = get_file_create_time(temp_path, task.file_name)
            
            # 构建存储路径，与普通上传保持一致
            target_object_name = build_storage_path(
                user_id=user_id,
                file_type=file_type,
                create_time=create_time,
                original_filename=task.file_name,
                file_hash=file_hash,
                directory_id=task.directory_id
            )
            
            # 将临时文件复制到最终位置
            minio_client.s3_client.copy_object(
                Bucket=bucket_name,
                CopySource={"Bucket": bucket_name, "Key": temp_object_name},
                Key=target_object_name
            )
            
            # 删除临时文件
            minio_client.s3_client.delete_object(
                Bucket=bucket_name,
                Key=temp_object_name
            )
            
            # 更新任务状态
            task.status = "completed"
            db.commit()
            
            # 创建文件记录
            file_info = File(
                user_id=user_id,
                file_name=task.file_name,
                file_path=target_object_name,
                file_size=task.file_size,
                file_type=file_type,
                extension=extension,
                mime_type=task.mime_type,
                upload_time=datetime.now(),
                create_time=create_time,
                hash=file_hash,
                is_deleted=False,
                directory_id=task.directory_id
            )
            
            db.add(file_info)
            db.commit()
            db.refresh(file_info)
            
            # 获取文件名（存储路径的最后一部分）
            filename = os.path.basename(file_info.file_path)
            
            return {
                "file_id": file_info.id,
                "filename": filename,
                "original_filename": task.file_name,
                "storage_path": file_info.file_path,
                "file_size": file_info.file_size,
                "content_type": file_info.mime_type,
                "file_type": file_info.file_type,
                "create_time": file_info.create_time,
                "hash": file_info.hash,
                "is_duplicate": False
            }
        finally:
            # 清理临时文件
            if temp_path and os.path.exists(temp_path):
                try:
                    os.unlink(temp_path)
                except Exception as e:
                    logger.warning(f"清理临时文件失败: {str(e)}")
        
    except HTTPException:
        raise
    except SQLAlchemyError as e:
        db.rollback()
        logger.error(f"数据库错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"完成上传失败: {str(e)}")
    except ClientError as e:
        db.rollback()
        logger.error(f"MinIO错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"MinIO操作失败: {str(e)}")
    except Exception as e:
        db.rollback()
        logger.error(f"完成上传失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"完成上传失败: {str(e)}")


def get_upload_status(
    db: Session,
    user_id: int,
    task_id: str
) -> Dict[str, Any]:
    """
    获取上传任务状态
    
    Args:
        db: 数据库会话
        user_id: 用户ID
        task_id: 上传任务ID
        
    Returns:
        包含任务状态的字典
    """
    try:
        # 查询上传任务
        task = db.query(UploadTask).filter(
            UploadTask.id == task_id,
            UploadTask.user_id == user_id
        ).first()
        
        if not task:
            raise HTTPException(status_code=404, detail="上传任务不存在")
            
        # 查询所有分片
        chunks = db.query(UploadChunk).filter(
            UploadChunk.task_id == task_id
        ).all()
        
        chunks_info = []
        for chunk in chunks:
            chunks_info.append({
                "chunk_number": chunk.chunk_number,
                "status": chunk.status
            })
            
        # 计算上传进度
        progress = 0
        if task.total_chunks > 0:
            progress = (task.uploaded_chunks / task.total_chunks) * 100
            
        return {
            "task_id": task.id,
            "file_name": task.file_name,
            "file_size": task.file_size,
            "status": task.status,
            "uploaded_chunks": task.uploaded_chunks,
            "total_chunks": task.total_chunks,
            "progress": progress,
            "chunks": chunks_info,
            "create_time": task.create_time,
            "update_time": task.update_time,
            "expires_at": task.expires_at
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取上传状态失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取上传状态失败: {str(e)}")


def abort_upload(
    db: Session,
    user_id: int,
    task_id: str
) -> Dict[str, Any]:
    """
    取消上传任务
    
    Args:
        db: 数据库会话
        user_id: 用户ID
        task_id: 上传任务ID
        
    Returns:
        包含操作结果的字典
    """
    try:
        # 查询上传任务
        task = db.query(UploadTask).filter(
            UploadTask.id == task_id,
            UploadTask.user_id == user_id
        ).first()
        
        if not task:
            raise HTTPException(status_code=404, detail="上传任务不存在")
            
        if task.status == "completed":
            raise HTTPException(status_code=400, detail="上传任务已完成，无法取消")
        
        # 取消MinIO上传
        bucket_name = minio_client.bucket_name
        object_name = f"temp/{user_id}/{task_id}/{task.file_name}"
        
        try:
            minio_client.s3_client.abort_multipart_upload(
                Bucket=bucket_name,
                Key=object_name,
                UploadId=task.upload_id
            )
        except Exception as e:
            logger.warning(f"取消MinIO上传失败: {str(e)}")
        
        # 更新任务状态
        task.status = "failed"
        db.commit()
        
        return {
            "task_id": task_id,
            "message": "上传任务已取消"
        }
        
    except HTTPException:
        raise
    except Exception as e:
        db.rollback()
        logger.error(f"取消上传任务失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"取消上传任务失败: {str(e)}")


def clean_expired_tasks(db: Session) -> Dict[str, Any]:
    """
    清理过期的上传任务
    
    Args:
        db: 数据库会话
        
    Returns:
        包含清理结果的字典
    """
    try:
        now = datetime.now()
        
        # 查找过期任务
        expired_tasks = db.query(UploadTask).filter(
            UploadTask.expires_at < now,
            UploadTask.status.in_(["pending", "uploading"])
        ).all()
        
        count = 0
        for task in expired_tasks:
            # 取消MinIO上传
            bucket_name = minio_client.bucket_name
            object_name = f"temp/{task.user_id}/{task.id}/{task.file_name}"
            
            try:
                minio_client.s3_client.abort_multipart_upload(
                    Bucket=bucket_name,
                    Key=object_name,
                    UploadId=task.upload_id
                )
            except Exception as e:
                logger.warning(f"取消MinIO上传失败: {str(e)}")
            
            # 更新任务状态
            task.status = "failed"
            count += 1
            
        db.commit()
        
        return {
            "cleaned_tasks": count,
            "message": f"已清理{count}个过期上传任务"
        }
        
    except Exception as e:
        db.rollback()
        logger.error(f"清理过期任务失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"清理过期任务失败: {str(e)}")


# 工具函数
def detect_file_type_from_name_mime(file_name: str, mime_type: Optional[str] = None) -> str:
    """根据文件名和MIME类型检测文件类型"""
    from app.utils.file_utils import detect_file_type
    return detect_file_type(file_name, mime_type)

def get_default_mime_type(extension: str) -> str:
    """根据文件扩展名获取默认的MIME类型
    
    Args:
        extension: 文件扩展名(包含点号)
        
    Returns:
        str: MIME类型
    """
    mime_map = {
        '.jpg': 'image/jpeg',
        '.jpeg': 'image/jpeg',
        '.png': 'image/png',
        '.gif': 'image/gif',
        '.bmp': 'image/bmp',
        '.webp': 'image/webp',
        '.svg': 'image/svg+xml',
        '.mp4': 'video/mp4',
        '.avi': 'video/x-msvideo',
        '.mov': 'video/quicktime',
        '.wmv': 'video/x-ms-wmv',
        '.flv': 'video/x-flv',
        '.mkv': 'video/x-matroska',
        '.webm': 'video/webm',
        '.mp3': 'audio/mpeg',
        '.wav': 'audio/wav',
        '.ogg': 'audio/ogg',
        '.m4a': 'audio/mp4',
        '.flac': 'audio/flac',
        '.pdf': 'application/pdf',
        '.doc': 'application/msword',
        '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
        '.xls': 'application/vnd.ms-excel',
        '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
        '.ppt': 'application/vnd.ms-powerpoint',
        '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
        '.txt': 'text/plain',
        '.csv': 'text/csv',
        '.zip': 'application/zip',
        '.rar': 'application/x-rar-compressed',
        '.7z': 'application/x-7z-compressed',
        '.tar': 'application/x-tar',
        '.gz': 'application/gzip',
    }
    
    return mime_map.get(extension, 'application/octet-stream') 