package com.docmgmt.mvp.service;

import com.docmgmt.mvp.dto.FileUploadResponse;
import com.docmgmt.mvp.dto.KbSyncMessage;
import com.docmgmt.mvp.entity.FileEntity;
// import com.docmgmt.mvp.entity.FileOperationLogEntity; // TODO: Create this entity
import com.docmgmt.mvp.entity.UserEntity;
import com.docmgmt.mvp.exception.BusinessException;
import com.docmgmt.mvp.mapper.FileMapper;
// import com.docmgmt.mvp.mapper.FileOperationLogMapper; // TODO: Create this mapper
import com.docmgmt.mvp.mapper.UserMapper;
import io.minio.MinioClient;
import io.minio.PutObjectArgs;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;

import java.io.*;
import java.nio.file.*;
import java.security.MessageDigest;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;

@Slf4j
@Service
@RequiredArgsConstructor
public class FileUploadService {

    private final MinioClient minioClient;
    private final FileMapper fileMapper;
    private final UserMapper userMapper;
    private final RabbitTemplate rabbitTemplate;
    // private final FileOperationLogMapper fileOperationLogMapper; // TODO: Create this mapper

    @Value("${minio.bucket-name:doc-storage}")
    private String bucketName;

    @Value("${file.max-size:524288000}")
    private Long maxFileSize;

    @Value("${file.chunk-size:5242880}") // 5MB per chunk
    private Long chunkSize;

    @Value("${file.upload-temp-dir:/tmp/chunked-uploads}")
    private String uploadTempDir;

    // 分片上传会话缓存 (uploadId -> ChunkUploadSession)
    private final Map<String, ChunkUploadSession> chunkSessions = new ConcurrentHashMap<>();

    @Transactional(rollbackFor = Exception.class)
    public FileUploadResponse upload(MultipartFile file, Long userId, Long folderId, String privacyLevel) {
        log.info("File upload: name={}, size={}, user={}", file.getOriginalFilename(), file.getSize(), userId);

        // Validate file
        validateFile(file, userId);

        // Calculate MD5
        String md5Hash = calculateMD5(file);
        FileEntity existingFile = fileMapper.selectByMd5(md5Hash);
        if (existingFile != null) {
            return FileUploadResponse.builder().fileId(existingFile.getId()).isDuplicate(true).build();
        }

        // Upload to MinIO
        String objectName = generateObjectName(file.getOriginalFilename());
        try (InputStream inputStream = file.getInputStream()) {
            minioClient.putObject(PutObjectArgs.builder().bucket(bucketName).object(objectName)
                .stream(inputStream, file.getSize(), -1).contentType(file.getContentType()).build());
        } catch (Exception e) {
            throw new BusinessException("MinIO upload failed: " + e.getMessage());
        }

        // Insert into database
        FileEntity fileEntity = new FileEntity();
        fileEntity.setFileName(file.getOriginalFilename());
        fileEntity.setFileSize(file.getSize());
        fileEntity.setMimeType(file.getContentType());
        fileEntity.setStoragePath(objectName);
        fileEntity.setMd5Hash(md5Hash);
        fileEntity.setFolderId(folderId != null ? folderId : 0L); // 默认文件夹ID为0（根目录）
        fileEntity.setFilePath(folderId != null ? "/" + folderId + "/" + file.getOriginalFilename() : "/" + file.getOriginalFilename()); // 设置文件路径
        fileEntity.setFileExtension(getFileExtension(file.getOriginalFilename())); // 设置文件扩展名
        fileEntity.setUploadedBy(userId);
        fileEntity.setOwnerId(userId); // 设置所有者为上传用户
        fileEntity.setPrivacyLevel(privacyLevel != null ? privacyLevel : "shared");
        fileEntity.setRagflowDocId(null);
        fileEntity.setRagflowSyncStatus("pending"); // 设置初始同步状态
        fileMapper.insert(fileEntity);

        // 更新用户存储使用量（原子操作）
        updateStorageUsedAtomic(userId, file.getSize());

        // 记录操作日志
        logFileOperation(userId, fileEntity.getId(), "UPLOAD",
            String.format("上传文件: %s (%.2fMB)", file.getOriginalFilename(), file.getSize() / 1024.0 / 1024.0));

        // Send MQ message
        rabbitTemplate.convertAndSend("ragflow.sync.exchange", "file.uploaded",
            new KbSyncMessage(fileEntity.getId(), "UPLOAD"));

        log.info("文件上传成功: fileId={}, fileName={}", fileEntity.getId(), fileEntity.getFileName());

        return FileUploadResponse.builder().fileId(fileEntity.getId()).isDuplicate(false).build();
    }

    private void validateFile(MultipartFile file, Long userId) {
        if (file.isEmpty()) throw new BusinessException("File is empty");
        if (file.getSize() > maxFileSize) throw new BusinessException("File size exceeds limit");
        
        UserEntity user = userMapper.selectById(userId);
        Long storageUsed = fileMapper.selectStorageUsedByUser(userId);
        if (storageUsed + file.getSize() > user.getStorageQuota()) {
            throw new BusinessException("Storage quota exceeded");
        }
    }

    private String calculateMD5(MultipartFile file) {
        try (InputStream is = file.getInputStream()) {
            MessageDigest md = MessageDigest.getInstance("MD5");
            byte[] buffer = new byte[8192];
            int bytesRead;
            while ((bytesRead = is.read(buffer)) != -1) {
                md.update(buffer, 0, bytesRead);
            }
            byte[] digest = md.digest();
            StringBuilder sb = new StringBuilder();
            for (byte b : digest) sb.append(String.format("%02x", b));
            return sb.toString();
        } catch (Exception e) {
            throw new BusinessException("MD5 calculation failed");
        }
    }

    private String generateObjectName(String filename) {
        String extension = getFileExtension(filename);
        String uuid = UUID.randomUUID().toString().replace("-", "");
        return LocalDateTime.now().format(java.time.format.DateTimeFormatter.ofPattern("yyyy/MM/dd"))
            + "/" + uuid + "." + extension;
    }

    /**
     * 获取文件扩展名
     */
    private String getFileExtension(String filename) {
        if (filename == null || !filename.contains(".")) {
            return "";
        }
        return filename.substring(filename.lastIndexOf('.') + 1).toLowerCase();
    }

    /**
     * 记录文件操作日志（v3.0新增）
     * TODO: Implement when FileOperationLogEntity and FileOperationLogMapper are created
     */
    private void logFileOperation(Long userId, Long fileId, String operation, String details) {
        // Temporarily disabled until FileOperationLogEntity and FileOperationLogMapper are created
        log.debug("文件操作: userId={}, fileId={}, operation={}, details={}", userId, fileId, operation, details);
        /*
        try {
            FileOperationLogEntity log = new FileOperationLogEntity();
            log.setUserId(userId);
            log.setFileId(fileId);
            log.setOperation(operation);
            log.setOperationDetails(details);
            log.setOperationTime(LocalDateTime.now());

            fileOperationLogMapper.insert(log);
        } catch (Exception e) {
            // 日志记录失败不应影响业务流程
            log.error("记录文件操作日志失败: userId={}, fileId={}, operation={}", userId, fileId, operation, e);
        }
        */
    }

    /**
     * 原子更新用户存储使用量（避免并发问题）
     */
    private void updateStorageUsedAtomic(Long userId, Long deltaSize) {
        int updated = userMapper.updateStorageUsedAtomic(userId, deltaSize);
        if (updated == 0) {
            log.warn("更新用户存储使用量失败: userId={}, deltaSize={}", userId, deltaSize);
        }
    }

    // ==================== 分片上传功能 (v3.0新增) ====================

    /**
     * 初始化分片上传
     *
     * @param userId       用户ID
     * @param fileName     文件名
     * @param fileSize     文件总大小
     * @param totalChunks  总分片数
     * @return 上传会话ID
     */
    public String initChunkUpload(Long userId, String fileName, Long fileSize, Integer totalChunks) {
        log.info("初始化分片上传: userId={}, fileName={}, fileSize={}, totalChunks={}",
                userId, fileName, fileSize, totalChunks);

        // 校验配额
        UserEntity user = userMapper.selectById(userId);
        Long storageUsed = fileMapper.selectStorageUsedByUser(userId);
        if (storageUsed + fileSize > user.getStorageQuota()) {
            throw new BusinessException("存储配额不足");
        }

        // 创建上传会话
        String uploadId = UUID.randomUUID().toString().replace("-", "");
        ChunkUploadSession session = new ChunkUploadSession();
        session.setUserId(userId);
        session.setFileName(fileName);
        session.setFileSize(fileSize);
        session.setTotalChunks(totalChunks);
        session.setUploadedChunks(new HashSet<>());
        session.setCreatedAt(LocalDateTime.now());

        chunkSessions.put(uploadId, session);

        // 创建临时目录
        Path tempDir = Paths.get(uploadTempDir, uploadId);
        try {
            Files.createDirectories(tempDir);
        } catch (IOException e) {
            throw new BusinessException("创建临时目录失败: " + e.getMessage());
        }

        log.info("分片上传会话已创建: uploadId={}", uploadId);
        return uploadId;
    }

    /**
     * 上传单个分片
     *
     * @param uploadId    上传会话ID
     * @param chunkIndex  分片索引（从0开始）
     * @param chunkFile   分片文件
     */
    public void uploadChunk(String uploadId, Integer chunkIndex, MultipartFile chunkFile) {
        log.debug("上传分片: uploadId={}, chunkIndex={}, size={}", uploadId, chunkIndex, chunkFile.getSize());

        ChunkUploadSession session = chunkSessions.get(uploadId);
        if (session == null) {
            throw new BusinessException("上传会话不存在或已过期");
        }

        // 保存分片到临时目录
        Path chunkPath = Paths.get(uploadTempDir, uploadId, String.format("chunk_%d", chunkIndex));
        try (InputStream inputStream = chunkFile.getInputStream()) {
            Files.copy(inputStream, chunkPath, StandardCopyOption.REPLACE_EXISTING);
            session.getUploadedChunks().add(chunkIndex);
            log.debug("分片已保存: uploadId={}, chunkIndex={}, path={}", uploadId, chunkIndex, chunkPath);
        } catch (IOException e) {
            throw new BusinessException("保存分片失败: " + e.getMessage());
        }
    }

    /**
     * 合并分片并完成上传
     *
     * @param uploadId      上传会话ID
     * @param userId        用户ID
     * @param folderId      文件夹ID
     * @param privacyLevel  隐私级别
     * @return 文件上传响应
     */
    @Transactional(rollbackFor = Exception.class)
    public FileUploadResponse mergeChunks(String uploadId, Long userId, Long folderId, String privacyLevel) {
        log.info("合并分片: uploadId={}", uploadId);

        ChunkUploadSession session = chunkSessions.get(uploadId);
        if (session == null) {
            throw new BusinessException("上传会话不存在或已过期");
        }

        // 检查是否所有分片都已上传
        if (session.getUploadedChunks().size() != session.getTotalChunks()) {
            throw new BusinessException(String.format("分片未全部上传: %d/%d",
                    session.getUploadedChunks().size(), session.getTotalChunks()));
        }

        Path tempDir = Paths.get(uploadTempDir, uploadId);
        Path mergedFile = Paths.get(uploadTempDir, uploadId + "_merged");

        try {
            // 合并分片
            try (OutputStream outputStream = Files.newOutputStream(mergedFile)) {
                for (int i = 0; i < session.getTotalChunks(); i++) {
                    Path chunkPath = Paths.get(uploadTempDir, uploadId, String.format("chunk_%d", i));
                    Files.copy(chunkPath, outputStream);
                }
            }

            // 计算MD5
            String md5Hash = calculateMD5FromFile(mergedFile);

            // 检查是否重复
            FileEntity existingFile = fileMapper.selectByMd5(md5Hash);
            if (existingFile != null) {
                cleanupChunkSession(uploadId, tempDir, mergedFile);
                return FileUploadResponse.builder().fileId(existingFile.getId()).isDuplicate(true).build();
            }

            // 上传到MinIO
            String objectName = generateObjectName(session.getFileName());
            try (InputStream inputStream = Files.newInputStream(mergedFile)) {
                minioClient.putObject(PutObjectArgs.builder()
                        .bucket(bucketName)
                        .object(objectName)
                        .stream(inputStream, session.getFileSize(), -1)
                        .build());
            }

            // 保存到数据库
            FileEntity fileEntity = new FileEntity();
            fileEntity.setFileName(session.getFileName());
            fileEntity.setFileSize(session.getFileSize());
            fileEntity.setMimeType(detectContentType(session.getFileName()));
            fileEntity.setStoragePath(objectName);
            fileEntity.setMd5Hash(md5Hash);
            fileEntity.setFolderId(folderId);
            fileEntity.setUploadedBy(userId);
            fileEntity.setPrivacyLevel(privacyLevel != null ? privacyLevel : "shared");
            fileEntity.setRagflowDocId(null);
            fileMapper.insert(fileEntity);

            // 更新用户存储使用量
            updateStorageUsedAtomic(userId, session.getFileSize());

            // 记录操作日志
            logFileOperation(userId, fileEntity.getId(), "UPLOAD",
                    String.format("分片上传文件: %s (%.2fMB, %d个分片)",
                            session.getFileName(), session.getFileSize() / 1024.0 / 1024.0, session.getTotalChunks()));

            // 发送MQ消息
            rabbitTemplate.convertAndSend("ragflow.sync.exchange", "file.uploaded",
                    new KbSyncMessage(fileEntity.getId(), "UPLOAD"));

            // 清理临时文件
            cleanupChunkSession(uploadId, tempDir, mergedFile);

            log.info("分片上传完成: uploadId={}, fileId={}", uploadId, fileEntity.getId());

            return FileUploadResponse.builder().fileId(fileEntity.getId()).isDuplicate(false).build();

        } catch (Exception e) {
            log.error("合并分片失败: uploadId={}", uploadId, e);
            cleanupChunkSession(uploadId, tempDir, mergedFile);
            throw new BusinessException("合并分片失败: " + e.getMessage());
        }
    }

    /**
     * 清理分片上传会话
     */
    private void cleanupChunkSession(String uploadId, Path tempDir, Path mergedFile) {
        try {
            // 删除临时目录
            if (Files.exists(tempDir)) {
                Files.walk(tempDir)
                        .sorted(Comparator.reverseOrder())
                        .forEach(path -> {
                            try {
                                Files.deleteIfExists(path);
                            } catch (IOException e) {
                                log.warn("删除临时文件失败: {}", path, e);
                            }
                        });
            }

            // 删除合并文件
            Files.deleteIfExists(mergedFile);

            // 移除会话
            chunkSessions.remove(uploadId);

        } catch (IOException e) {
            log.warn("清理临时文件失败: uploadId={}", uploadId, e);
        }
    }

    /**
     * 从文件计算MD5
     */
    private String calculateMD5FromFile(Path filePath) throws Exception {
        try (InputStream is = Files.newInputStream(filePath)) {
            MessageDigest md = MessageDigest.getInstance("MD5");
            byte[] buffer = new byte[8192];
            int bytesRead;
            while ((bytesRead = is.read(buffer)) != -1) {
                md.update(buffer, 0, bytesRead);
            }
            byte[] digest = md.digest();
            StringBuilder sb = new StringBuilder();
            for (byte b : digest) {
                sb.append(String.format("%02x", b));
            }
            return sb.toString();
        }
    }

    /**
     * 检测文件Content-Type
     */
    private String detectContentType(String fileName) {
        try {
            Path path = Paths.get(fileName);
            String contentType = Files.probeContentType(path);
            return contentType != null ? contentType : "application/octet-stream";
        } catch (Exception e) {
            return "application/octet-stream";
        }
    }

    /**
     * 分片上传会话数据结构
     */
    public static class ChunkUploadSession {
        private Long userId;
        private String fileName;
        private Long fileSize;
        private Integer totalChunks;
        private Set<Integer> uploadedChunks;
        private LocalDateTime createdAt;
        
        // ==================== Getter and Setter 方法（为编译添加）====================
        
        public Long getUserId() {
            return userId;
        }
        
        public void setUserId(Long userId) {
            this.userId = userId;
        }
        
        public String getFileName() {
            return fileName;
        }
        
        public void setFileName(String fileName) {
            this.fileName = fileName;
        }
        
        public Long getFileSize() {
            return fileSize;
        }
        
        public void setFileSize(Long fileSize) {
            this.fileSize = fileSize;
        }
        
        public Integer getTotalChunks() {
            return totalChunks;
        }
        
        public void setTotalChunks(Integer totalChunks) {
            this.totalChunks = totalChunks;
        }
        
        public Set<Integer> getUploadedChunks() {
            return uploadedChunks;
        }
        
        public void setUploadedChunks(Set<Integer> uploadedChunks) {
            this.uploadedChunks = uploadedChunks;
        }
        
        public LocalDateTime getCreatedAt() {
            return createdAt;
        }
        
        public void setCreatedAt(LocalDateTime createdAt) {
            this.createdAt = createdAt;
        }
    }
}