package com.app.project.service.impl;

import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.core.util.IdUtil;
import cn.hutool.core.util.RandomUtil;
import com.app.project.common.ErrorCode;
import com.app.project.constant.FileConstant;
import com.app.project.exception.BusinessException;
import com.app.project.exception.ThrowUtils;
import com.app.project.manager.CosManager;
import com.app.project.model.dto.file.ChunkUploadRequest;
import com.app.project.model.dto.file.MergeChunkRequest;
import com.app.project.service.BigFileUploadService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.HashOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import javax.annotation.Resource;
import java.io.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;

@Service
@Slf4j
public class BigFileUploadServiceImpl implements BigFileUploadService {

    @Resource
    private CosManager cosManager;

    @Resource
    private RedisTemplate<String, Object> redisTemplate;

    // 文件状态的 Redis key
    private static final String FILE_STATUS_KEY_PREFIX = "file_status:";

    @Override
    public Map<String, Object> checkFileStatus(String fileHash, Long userId) {
        // 定义该文件在 Redis 中的 key
        String statusKey = FILE_STATUS_KEY_PREFIX + userId + ":" + fileHash;
        // 定义返回结果
        HashMap<String, Object> result = new HashMap<>();

        // 检查文件是否上传完成
        Boolean isCompleted = (Boolean) redisTemplate.opsForHash().get(statusKey, "completed");
        if (isCompleted != null && isCompleted) {
            // 文件上传成功
            result.put("status", "completed");
            result.put("url", redisTemplate.opsForHash().get(statusKey, "url"));
            return result;
        }

        // 获取已上传的分片索引
        Set<Object> uploadedChunks = redisTemplate.opsForHash().keys(statusKey);
        if (uploadedChunks.isEmpty()) {
            // 文件未上传
            result.put("status", "pending");
            result.put("uploadedChunks", new Integer[0]);
            return result;
        }
        // 将已上传的分片索引转换为整数列表
        List<Integer> uploadedChunkIndexes = uploadedChunks.stream()
                .map(obj -> Integer.valueOf(obj.toString()))
                .collect(Collectors.toList());

        result.put("uploadedChunks", uploadedChunkIndexes);
        result.put("status", "uploading");
        return result;

    }

    /**
     * 上传分片（cos)
     * @param chunkUploadRequest
     * @param chunkFile
     * @param userId
     * @return
     */
    @Override
    public Boolean uploadChunk(ChunkUploadRequest chunkUploadRequest, MultipartFile chunkFile, Long userId) {
        try {
            // 定义该文件在 Redis 中的 key
            String statusKey = FILE_STATUS_KEY_PREFIX + userId + ":" + chunkUploadRequest.getFileHash();
            // 构建切片存储路径
            String chunkPath = String.format("/chunks/%s/%s/%s_%d", chunkUploadRequest.getBiz(), userId, chunkUploadRequest.getFileHash(), chunkUploadRequest.getChunkIndex());

            // 创建临时文件
            File tempFile = File.createTempFile("chunk_", null);
            // 将上传的文件写入临时文件
            chunkFile.transferTo(tempFile);

            // 上传切片到COS
            cosManager.putObject(chunkPath, tempFile);

            // 将上传的切片信息保存到 Redis 中 (使用 Hash 类型，key 为分片索引，value 为分片路径)
            redisTemplate.opsForHash().put(statusKey, chunkUploadRequest.getChunkIndex().toString(), chunkPath);

            // 设置过期时间
            redisTemplate.expire(statusKey, 24, TimeUnit.HOURS);

            // 删除临时文件
            boolean deleteRes = tempFile.delete();
            return deleteRes;

        } catch (IOException e) {
            log.error("上传切片失败: {}", e.getMessage(), e);
            ThrowUtils.throwIf(true, ErrorCode.SYSTEM_ERROR, "分片上传失败");
            return false;
        }

    }

    /**
     * 合并分片(cos)
     * @param mergeChunkRequest
     * @param userId
     * @return
     */
    @Override
    public String mergeChunks(MergeChunkRequest mergeChunkRequest, Long userId) {
        String statusKey = FILE_STATUS_KEY_PREFIX + userId + ":" + mergeChunkRequest.getFileHash();
        int totalChunks = mergeChunkRequest.getTotalChunks();

        // 获取文件已上传的分片信息
        Map<Object, Object> chunks = redisTemplate.opsForHash().entries(statusKey);

        // 检查分片数量是否正确
        if (chunks.size() != totalChunks) {
            throw new BusinessException(ErrorCode.OPERATION_ERROR, "文件缺失，请重新上传");
        }

        // 构建分片路径列表
        String[] chunkPaths = new String[totalChunks];
        for (int i = 0; i < totalChunks; i++) {
            String path = (String) chunks.get(String.valueOf(i));
            ThrowUtils.throwIf(path == null, ErrorCode.OPERATION_ERROR, "切片" + i + "不存在");
            chunkPaths[i] = path;
        }

        String uuid = IdUtil.fastSimpleUUID();
        String fileName = uuid + "-" + mergeChunkRequest.getFileName();
        String finalPath = String.format("/%s/%s/%s", mergeChunkRequest.getBiz(), userId, fileName);

        // 使用更大的线程池，提高并发度
        ExecutorService downloadExecutor = Executors.newFixedThreadPool(Math.min(16, totalChunks));
        ExecutorService processExecutor = Executors.newFixedThreadPool(2);

        try {
            // 使用BlockingQueue实现生产者-消费者模式，边下载边合并
            BlockingQueue<ChunkData> chunkQueue = new LinkedBlockingQueue<>(8); // 限制队列大小避免内存溢出
            AtomicInteger downloadCounter = new AtomicInteger(0);
            AtomicReference<Exception> downloadException = new AtomicReference<>();

            // 异步下载线程 - 生产者
            Future<Void> downloadFuture = processExecutor.submit(() -> {
                List<Future<Void>> downloadTasks = new ArrayList<>();

                for (int i = 0; i < totalChunks; i++) {
                    final int chunkIndex = i;
                    final String chunkPath = chunkPaths[i];

                    downloadTasks.add(downloadExecutor.submit(() -> {
                        try {
                            ByteArrayOutputStream baos = new ByteArrayOutputStream();
                            cosManager.getObjectToStream(chunkPath, baos);

                            ChunkData chunkData = new ChunkData(chunkIndex, baos.toByteArray());

                            // 阻塞放入队列，控制内存使用
                            while (!chunkQueue.offer(chunkData, 100, TimeUnit.MILLISECONDS)) {
                                if (downloadException.get() != null) {
                                    return null;
                                }
                            }

                            downloadCounter.incrementAndGet();
                            return null;
                        } catch (Exception e) {
                            downloadException.set(e);
                            throw new RuntimeException("下载分片失败: " + chunkPath, e);
                        }
                    }));
                }

                // 等待所有下载完成
                for (Future<Void> task : downloadTasks) {
                    task.get();
                }

                // 添加结束标记
                chunkQueue.offer(new ChunkData(-1, null));
                return null;
            });

            // 使用CompositeByteBuf或直接流式合并上传 - 消费者
            Future<String> mergeFuture = processExecutor.submit(() -> {
                try (PipedOutputStream pos = new PipedOutputStream();
                     PipedInputStream pis = new PipedInputStream(pos, 16 * 1024 * 1024)) { // 16MB管道缓冲区

                    // 异步上传
                    Future<Void> uploadFuture = downloadExecutor.submit(() -> {
                        try {
                            cosManager.putObjectFromStream(finalPath, pis);
                            return null;
                        } catch (Exception e) {
                            throw new RuntimeException("上传失败", e);
                        }
                    });

                    // 按序合并写入
                    Map<Integer, byte[]> chunkBuffer = new HashMap<>(); // 缓存乱序到达的分片
                    int expectedChunkIndex = 0;

                    try (BufferedOutputStream bos = new BufferedOutputStream(pos, 256 * 1024)) { // 256KB写缓冲区
                        while (true) {
                            ChunkData chunkData = chunkQueue.poll(10, TimeUnit.SECONDS);

                            if (chunkData == null) {
                                if (downloadException.get() != null) {
                                    throw downloadException.get();
                                }
                                continue; // 继续等待
                            }

                            // 结束标记
                            if (chunkData.index == -1) {
                                break;
                            }

                            // 缓存分片
                            chunkBuffer.put(chunkData.index, chunkData.data);

                            // 按序写入连续的分片
                            while (chunkBuffer.containsKey(expectedChunkIndex)) {
                                byte[] data = chunkBuffer.remove(expectedChunkIndex);
                                bos.write(data);
                                expectedChunkIndex++;

                                // 定期刷新，避免缓冲区过大
                                if (expectedChunkIndex % 4 == 0) {
                                    bos.flush();
                                }
                            }
                        }

                        bos.flush();
                    }

                    uploadFuture.get(120, TimeUnit.SECONDS);
                    return FileConstant.COS_HOST + finalPath;

                } catch (Exception e) {
                    throw new RuntimeException("合并上传失败", e);
                }
            });

            // 等待所有操作完成
            downloadFuture.get(180, TimeUnit.SECONDS);
            String fileUrl = mergeFuture.get(180, TimeUnit.SECONDS);

            // 异步删除分片文件，不阻塞返回
            CompletableFuture.runAsync(() -> {
                try {
                    cosManager.deleteObjects(Arrays.asList(chunkPaths));

                } catch (Exception e) {
                    log.warn("清理分片文件失败", e);
                }
            }, downloadExecutor);

            redisTemplate.delete(statusKey);
            // 立即设置完成状态
            Map<String, Object> newStatus = new HashMap<>();
            newStatus.put("completed", true);
            newStatus.put("url", fileUrl);
            redisTemplate.opsForHash().putAll(statusKey, newStatus);
            redisTemplate.expire(statusKey, 24 * 30, TimeUnit.HOURS);

            return fileUrl;

        } catch (Exception e) {
            log.error("合并文件失败: {}", e.getMessage(), e);
            redisTemplate.delete(statusKey);
            CompletableFuture.runAsync(() -> {
                try {
                    cosManager.deleteObject(finalPath);
                } catch (Exception ignored) {
                }
            });
            ThrowUtils.throwIf(true, ErrorCode.SYSTEM_ERROR, "合并文件失败");
            return null;
        } finally {
            downloadExecutor.shutdown();
            processExecutor.shutdown();
            try {
                if (!downloadExecutor.awaitTermination(5, TimeUnit.SECONDS)) {
                    downloadExecutor.shutdownNow();
                }
                if (!processExecutor.awaitTermination(5, TimeUnit.SECONDS)) {
                    processExecutor.shutdownNow();
                }
            } catch (InterruptedException e) {
                downloadExecutor.shutdownNow();
                processExecutor.shutdownNow();
                Thread.currentThread().interrupt();
            }
        }
    }

    // 传递分片数据
    private static class ChunkData {
        final int index;
        final byte[] data;

        ChunkData(int index, byte[] data) {
            this.index = index;
            this.data = data;
        }
    }


    /**
     * 上传方法切片（本地)
     * @param chunkUploadRequest
     * @param chunkFile
     * @param userId
     * @return
     */
    @Override
    public Boolean uploadChunkLocal(ChunkUploadRequest chunkUploadRequest, MultipartFile chunkFile, Long userId) {
        try {
            // 定义该文件在 Redis 中的 key
            String statusKey = FILE_STATUS_KEY_PREFIX + userId + ":" + chunkUploadRequest.getFileHash();

            // 创建本地存储目录结构：resources/chunks/biz/userId/fileHash/
            String chunksBaseDir = getChunksStoragePath();
            String chunkDir = String.format("%s/%s/%s/%s",
                    chunksBaseDir,
                    chunkUploadRequest.getBiz(),
                    userId,
                    chunkUploadRequest.getFileHash());

            File chunkDirFile = new File(chunkDir);
            if (!chunkDirFile.exists()) {
                chunkDirFile.mkdirs();
            }

            // 构建切片本地存储路径
            String chunkFileName = String.format("chunk_%d", chunkUploadRequest.getChunkIndex());
            String chunkPath = chunkDir + File.separator + chunkFileName;
            File chunkLocalFile = new File(chunkPath);

            // 直接将上传的文件保存到本地
            chunkFile.transferTo(chunkLocalFile);

            // 将上传的切片信息保存到 Redis 中 (使用 Hash 类型，key 为分片索引，value 为本地分片路径)
            redisTemplate.opsForHash().put(statusKey, chunkUploadRequest.getChunkIndex().toString(), chunkPath);

            // 设置过期时间
            redisTemplate.expire(statusKey, 24, TimeUnit.HOURS);

            return true;

        } catch (IOException e) {
            log.error("上传切片失败: {}", e.getMessage(), e);
            ThrowUtils.throwIf(true, ErrorCode.SYSTEM_ERROR, "分片上传失败");
            return false;
        }
    }

    /**
     * 合并切片（本地)
     * @param mergeChunkRequest
     * @param userId
     * @return
     */
    public String mergeChunksLocal(MergeChunkRequest mergeChunkRequest, Long userId) {
        String statusKey = FILE_STATUS_KEY_PREFIX + userId + ":" + mergeChunkRequest.getFileHash();
        int totalChunks = mergeChunkRequest.getTotalChunks();

        // 获取文件已上传的分片信息
        Map<Object, Object> chunks = redisTemplate.opsForHash().entries(statusKey);

        // 检查分片数量是否正确
        if (chunks.size() != totalChunks) {
            throw new BusinessException(ErrorCode.OPERATION_ERROR, "文件缺失，请重新上传");
        }

        // 构建分片本地路径列表并验证文件存在
        File[] chunkFiles = new File[totalChunks];
        for (int i = 0; i < totalChunks; i++) {
            String chunkPath = (String) chunks.get(String.valueOf(i));
            ThrowUtils.throwIf(chunkPath == null, ErrorCode.OPERATION_ERROR, "切片" + i + "不存在");

            File chunkFile = new File(chunkPath);
            ThrowUtils.throwIf(!chunkFile.exists(), ErrorCode.OPERATION_ERROR, "切片文件" + i + "不存在");
            chunkFiles[i] = chunkFile;
        }

        String uuid = IdUtil.fastSimpleUUID();
        String fileName = uuid + "-" + mergeChunkRequest.getFileName();
        String finalPath = String.format("/%s/%s/%s", mergeChunkRequest.getBiz(), userId, fileName);

        ExecutorService executor = Executors.newFixedThreadPool(2);

        try {
            // 直接流式合并上传，本地文件读取速度极快
            try (PipedOutputStream pos = new PipedOutputStream();
                 PipedInputStream pis = new PipedInputStream(pos, 64 * 1024 * 1024)) { // 64MB管道缓冲区

                // 异步上传线程
                Future<Void> uploadFuture = executor.submit(() -> {
                    try {
                        cosManager.putObjectFromStream(finalPath, pis);
                        return null;
                    } catch (Exception e) {
                        throw new RuntimeException("上传合并文件失败", e);
                    }
                });

                // 主线程超快速读取本地切片文件并写入流
                try (BufferedOutputStream bos = new BufferedOutputStream(pos, 2 * 1024 * 1024)) { // 2MB写缓冲区
                    byte[] buffer = new byte[2 * 1024 * 1024]; // 2MB读缓冲区

                    for (File chunkFile : chunkFiles) {
                        try (BufferedInputStream bis = new BufferedInputStream(
                                new FileInputStream(chunkFile), 2 * 1024 * 1024)) { // 2MB读缓冲区

                            int bytesRead;
                            while ((bytesRead = bis.read(buffer)) != -1) {
                                bos.write(buffer, 0, bytesRead);
                            }
                        }
                    }
                }

                // 等待上传完成
                uploadFuture.get(180, TimeUnit.SECONDS);
            }

            String fileUrl = FileConstant.COS_HOST + finalPath;

            // 异步清理本地切片文件和Redis记录
            CompletableFuture.runAsync(() -> {
                try {
                    // 删除本地切片文件和目录
                    for (File chunkFile : chunkFiles) {
                        if (chunkFile.exists()) {
                            chunkFile.delete();
                        }
                    }

                    // 删除切片目录（如果为空）
                    File chunkDir = chunkFiles[0].getParentFile();
                    if (chunkDir.exists() && chunkDir.isDirectory() && chunkDir.list().length == 0) {
                        chunkDir.delete();

                        // 递归删除空的父目录
                        File parentDir = chunkDir.getParentFile();
                        while (parentDir != null && parentDir.list().length == 0 &&
                                !parentDir.getName().equals("chunks")) {
                            File toDelete = parentDir;
                            parentDir = parentDir.getParentFile();
                            toDelete.delete();
                        }
                    }



                } catch (Exception e) {
                    log.warn("清理本地切片文件失败", e);
                }
            });

            // 删除Redis记录
            redisTemplate.delete(statusKey);
            // 立即设置完成状态并返回
            Map<String, Object> newStatus = new HashMap<>();
            newStatus.put("completed", true);
            newStatus.put("url", fileUrl);
            redisTemplate.opsForHash().putAll(statusKey, newStatus);
            redisTemplate.expire(statusKey, 24 * 30, TimeUnit.HOURS);

            return fileUrl;

        } catch (Exception e) {
            log.error("合并文件失败: {}", e.getMessage(), e);

            redisTemplate.delete(statusKey);
            CompletableFuture.runAsync(() -> {
                try {
                    cosManager.deleteObject(finalPath);
                } catch (Exception ignored) {}
            });

            ThrowUtils.throwIf(true, ErrorCode.SYSTEM_ERROR, "合并文件失败");
            return null;
        } finally {
            executor.shutdown();
            try {
                if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    executor.shutdownNow();
                }
            } catch (InterruptedException e) {
                executor.shutdownNow();
                Thread.currentThread().interrupt();
            }
        }
    }


    // 获取切片存储根路径的方法
    private String getChunksStoragePath() {
        // 方案1: 使用项目根目录下的chunks文件夹
        String projectPath = System.getProperty("user.dir");
        String chunksPath = projectPath + File.separator + "chunks";

        // 方案2: 使用系统临时目录（如果你不想在项目目录下存储）
        // String chunksPath = System.getProperty("java.io.tmpdir") + File.separator + "app_chunks";

        File chunksDir = new File(chunksPath);
        if (!chunksDir.exists()) {
            chunksDir.mkdirs();
        }

        return chunksPath;
    }

    // 定时清理过期切片的方法
    @Scheduled(fixedRate = 3600000) // 每小时执行一次
    public void cleanExpiredChunks() {
        try {
            String chunksPath = getChunksStoragePath();
            File chunksDir = new File(chunksPath);

            if (!chunksDir.exists()) {
                return;
            }

            long now = System.currentTimeMillis();
            long expireTime = 25 * 60 * 60 * 1000; // 25小时过期（比Redis过期时间多1小时）

            cleanDirectoryRecursively(chunksDir, now - expireTime);

        } catch (Exception e) {
            log.warn("清理过期切片失败", e);
        }
    }

    // 清理本地切片
    private void cleanDirectoryRecursively(File dir, long expireTimestamp) {
        if (!dir.exists() || !dir.isDirectory()) {
            return;
        }

        File[] files = dir.listFiles();
        if (files == null) {
            return;
        }

        for (File file : files) {
            if (file.isDirectory()) {
                cleanDirectoryRecursively(file, expireTimestamp);
                // 如果目录为空，删除它
                if (file.list().length == 0) {
                    file.delete();
                }
            } else if (file.lastModified() < expireTimestamp) {
                file.delete();
            }
        }
    }


}
