# app/services/file_service.py
from app import db
from flask import current_app,request
import os
import shutil
import uuid
from datetime import datetime
from app.models import FileUploadTask, FileChunk, File


class FileService:

    @staticmethod
    def create_upload_task(uploader_id, filename, size, mime_type, total_chunks):
        """创建上传任务 - 修改：不设置 file_id"""
        task_id = str(uuid.uuid4())

        # 创建上传任务（file_id 设为 NULL）
        task = FileUploadTask(
            task_id=task_id,
            file_id=None,  # 这里不设置 file_id
            uploader_id=uploader_id,
            original_name=filename,
            mime_type=mime_type,
            total_size=size,
            total_chunks=total_chunks
        )

        db.session.add(task)
        db.session.commit()
        return task_id

    @staticmethod
    def save_chunk(task_id, chunk_index, chunk_data, uploader_id):
        """保存文件分片 - 与之前相同，不需要修改"""
        task = FileUploadTask.query.get(task_id)
        if not task:
            raise ValueError('Invalid task ID')

        if task.uploader_id != uploader_id:
            raise PermissionError('Unauthorized access')

        # 创建上传目录（如果不存在）
        upload_dir = os.path.join(current_app.config['UPLOAD_FOLDER'], 'temp', task_id)
        os.makedirs(upload_dir, exist_ok=True)

        # 保存分片文件
        chunk_filename = f"{chunk_index}.part"
        chunk_path = os.path.join(upload_dir, chunk_filename)
        chunk_data.save(chunk_path)

        # 记录分片信息
        chunk = FileChunk(
            id=str(uuid.uuid4()),
            task_id=task_id,
            chunk_index=chunk_index,
            chunk_size=os.path.getsize(chunk_path),
            stored_path=chunk_path
        )

        db.session.add(chunk)
        task.uploaded_chunks += 1

        # 检查是否全部完成
        if task.uploaded_chunks == task.total_chunks:
            task.status = 'completed'

        db.session.commit()

        return {
            'uploaded_chunks': task.uploaded_chunks,
            'status': task.status
        }

    @staticmethod
    def complete_upload(task_id:str, uploader_id:str, channel_id:str,direct_chat_id:str):
        """完成上传并合并文件 - 修改：最后才设置 file_id"""
        task = FileUploadTask.query.get(task_id)
        if not task:
            raise ValueError('Invalid task ID')

        if task.uploader_id != uploader_id:
            raise PermissionError('Unauthorized access')

        if task.status != 'completed':
            raise ValueError('Upload not completed')

        # 合并文件
        upload_dir = os.path.join(current_app.config['UPLOAD_FOLDER'], 'temp', task_id)
        chunks = FileChunk.query.filter_by(task_id=task_id).order_by(FileChunk.chunk_index).all()

        # 确保有正确的分片数量
        if len(chunks) != task.total_chunks:
            raise ValueError('Missing chunks')

        # 创建目标文件路径
        file_id = str(uuid.uuid4())  # 在这里生成 file_id
        file_ext = os.path.splitext(task.original_name)[1]
        stored_name = f"{file_id}{file_ext}"
        stored_path = os.path.join(current_app.config['UPLOAD_FOLDER'], stored_name)

        # 确保目标目录存在
        os.makedirs(os.path.dirname(stored_path), exist_ok=True)

        # 合并文件
        with open(stored_path, 'wb') as f:
            for chunk in chunks:
                chunk_path = chunk.stored_path
                with open(chunk_path, 'rb') as cf:
                    f.write(cf.read())


        # 创建文件记录
        base_url = current_app.config['BASE_URL']  # 添加这个配置
        file_url = f"{base_url}/api/files/uploads/{stored_name}"
        if not channel_id and not direct_chat_id:
            file = File(
                file_id=file_id,
                uploader_id=uploader_id,
                original_name=task.original_name,
                stored_name=stored_name,
                mime_type=task.mime_type,
                size=task.total_size,
                file_url=file_url
            )
        elif channel_id:
            file = File(
                file_id=file_id,
                uploader_id=uploader_id,
                original_name=task.original_name,
                stored_name=stored_name,
                channel_id=channel_id,
                mime_type=task.mime_type,
                size=task.total_size,
                file_url=file_url
            )
        elif direct_chat_id:
            file = File(
                file_id=file_id,
                uploader_id=uploader_id,
                original_name=task.original_name,
                stored_name=stored_name,
                direct_chat_id=direct_chat_id,
                mime_type=task.mime_type,
                size=task.total_size,
                file_url=file_url
            )
        db.session.add(file)

        # 更新上传任务的 file_id
        task.file_id = file_id
        db.session.add(task)

        # 清理临时分片和任务记录
        db.session.query(FileChunk).filter_by(task_id=task_id).delete()
        db.session.delete(task)
        db.session.commit()

        # 清理文件分片
        try:
            shutil.rmtree(upload_dir)
        except Exception as e:
            current_app.logger.error(f"Error cleaning upload dir: {e}")

        return file

    @staticmethod
    def delete_file(file_id,user_id):
        """删除文件及其所有相关数据"""
        # 查询文件记录
        file = File.query.get(file_id)
        if not file:
            raise ValueError('未找到文件')
            # 检查权限 - 只有上传者可以删除自己的文件
        if file.uploader_id != user_id:
            raise ValueError('无权操作')

        # 构建文件路径
        file_path = os.path.join(current_app.config['UPLOAD_FOLDER'], file.stored_name)
        # 删除物理文件
        try:
            if os.path.exists(file_path):
                os.remove(file_path)
                current_app.logger.info(f"文件删除成功: {file_path}")
        except Exception as e:
            current_app.logger.error(f"删除失败: {e}")
            raise

        # 删除数据库记录
        db.session.delete(file)
        db.session.commit()

        # 返回删除成功消息
        return True

    @staticmethod
    def get_user_files(user_id):
        """获取用户的所有文件"""
        files = File.query.filter_by(uploader_id=user_id).all()
        return [
            {
                'file_id': f.file_id,
                'file_name': f.original_name,
                'file_url': f.file_url,
                'mime_type': f.mime_type,
                'size': f.size,
                'created_at': f.created_at.isoformat(),
                'type': FileService.get_file_type(f.mime_type)
            } for f in files
        ]

    @staticmethod
    def get_files_by_type(user_id, mime_pattern):
        """按类型获取用户的文件"""
        # 支持使用 SQL 的 LIKE 语法 (例如 'image/%', 'application/pdf')
        files = File.query.filter(
            File.uploader_id == user_id,
            File.mime_type.like(mime_pattern)
        ).all()

        return [
            {
                'file_id': f.file_id,
                'file_name': f.original_name,
                'file_url': f.file_url,
                'mime_type': f.mime_type,
                'size': f.size,
                'created_at': f.created_at.isoformat(),
                'type': FileService.get_file_type(f.mime_type)
            } for f in files
        ]

    @staticmethod
    def get_file_type(mime_type):
        """根据MIME类型确定文件类型"""
        if mime_type.startswith('image/'):
            return 'image'
        if mime_type.startswith('video/'):
            return 'video'
        if mime_type.startswith('audio/'):
            return 'audio'
        if mime_type == 'application/pdf':
            return 'pdf'
        if mime_type in ['application/msword',
                         'application/vnd.openxmlformats-officedocument.wordprocessingml.document']:
            return 'document'
        if 'spreadsheet' in mime_type:
            return 'spreadsheet'
        if 'presentation' in mime_type:
            return 'presentation'
        if mime_type.startswith('text/'):
            return 'text'
        return 'other'

    @staticmethod
    def get_user_files(user_id:str,pageCur:int,pageSize:int):
        fileCount = File.query.filter_by(uploader_id=user_id).count()
        files = (File.query.filter_by(uploader_id=user_id)
            .order_by(File.created_at.desc())
            .offset((pageCur - 1) * pageSize)
            .limit(pageSize)
            .all())

        fileList = []
        for file in files:
            fileList.append({
                'file_id': file.file_id,
                'uploader_id': file.uploader_id,
                'channel_id': file.channel_id,
                'direct_chat_id': file.direct_chat_id,
                'original_name': file.original_name,
                'stored_name': file.stored_name,
                'mime_type': file.mime_type,
                'size': file.size,
                'file_url': file.file_url,
                'created_at': file.created_at
            })

        result = {
            'total': fileCount,
            'pageSize': pageSize,
            'pageCur': pageCur,
            'fileList':fileList
        }

        return result