package com.liaoyifan.core.util;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Stream;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import lombok.extern.log4j.Log4j2;
import org.redisson.api.RMap;
import org.redisson.api.RSet;
import org.springframework.util.Assert;
import org.springframework.util.DigestUtils;
import org.springframework.web.multipart.MultipartFile;

@SuppressWarnings("unused")
@NoArgsConstructor(access = AccessLevel.PRIVATE)
@Log4j2
public class Upload {

    // Redis键前缀
    private static final String UPLOADED_RATE_LIMIT_PREFIX = "core_upload:rate_limit:";
    private static final String UPLOADED_CHUNKS_PREFIX = "core_upload:chunks:";
    private static final String UPLOADED_MAPPING_KEY = "core_upload:mapping";
    // 默认缓冲区大小
    private static final int BUFFER_SIZE = 8 * 1024;
    // 默认限速
    private static final long LIMIT_KB = 2048;
    // 分片临时存储目录
    private static final Path TEMP_DIR;
    // 最终文件存储目录
    private static final Path TARGET_DIR;
    // 主目录
    private static final String USER_HOME;

    static {
        USER_HOME = System.getProperty("user.home");
        TEMP_DIR = Paths.get(getDefaultTempDir()).normalize();
        TARGET_DIR = Paths.get(getDefaultTargetDir()).normalize();
        createDirIfNotExists(TEMP_DIR);
        createDirIfNotExists(TARGET_DIR);
    }

    /**
     * 计算文件的MD5哈希值作为fileKey
     *
     * @param file 要计算哈希值的文件
     * @return 文件的MD5哈希值
     */
    public static String hash(MultipartFile file) throws IOException {
        return DigestUtils.md5DigestAsHex(file.getBytes());
    }

    public static boolean existsFile(String fileHash) {
        return getPathByHash(fileHash) != null;
    }

    /**
     * 检查已上传的分片索引
     *
     * @param fileHash 文件哈希值（作为唯一标识）
     * @return 已上传的分片索引列表（按升序排序）
     */
    public static List<Integer> existsChunks(String fileHash) {
        Assert.hasText(fileHash, "fileHash cannot be empty");
        RSet<Integer> chunksSet = getUploadedChunks(fileHash);
        return chunksSet != null && !chunksSet.isEmpty()
                ? (new ArrayList<>(chunksSet.readAll())).stream().sorted().toList()
                : Collections.emptyList();
    }

    /**
     * 保存分片到临时目录
     *
     * @param clientKey 客户标识
     * @param file 分片文件
     * @param fileHash 文件哈希值（作为唯一标识）
     * @param chunkIndex 分片索引
     * @return 分片保存路径
     */
    public static String start(
            String clientKey, MultipartFile file, String fileHash, int chunkIndex)
            throws IOException {
        return start(clientKey, file, fileHash, chunkIndex, LIMIT_KB);
    }

    /**
     * 保存分片到临时目录
     *
     * @param clientKey 客户标识
     * @param file 分片文件
     * @param fileHash 文件哈希值（作为唯一标识）
     * @param chunkIndex 分片索引
     * @param rateLimit 每个客户的总速率限制（KB/s）
     * @return 分片保存路径
     */
    public static String start(
            String clientKey, MultipartFile file, String fileHash, int chunkIndex, long rateLimit)
            throws IOException {
        Assert.hasText(clientKey, "clientKey cannot be empty");
        Assert.notNull(file, "chunk file cannot be null");
        Assert.hasText(fileHash, "fileHash cannot be empty");
        Assert.isTrue(chunkIndex >= 0, "chunkIndex must be non-negative");

        // 上传限速
        checkRateLimit(clientKey, file.getSize(), rateLimit);

        // 创建分片存储目录：tempDir/fileHash
        Path chunkDir = TEMP_DIR.resolve(fileHash);
        createDirIfNotExists(chunkDir);

        // 构建分片文件路径
        Path chunkPath = chunkDir.resolve(String.valueOf(chunkIndex));

        // 保存分片文件
        file.transferTo(chunkPath.toFile());

        // 记录分片索引到Redis（设置过期时间）
        RSet<Integer> rSet = getUploadedChunks(fileHash);
        rSet.add(chunkIndex);
        rSet.expire(Duration.ofDays(30));

        return chunkPath.toString();
    }

    /**
     * 合并分片为完整文件，自动在文件名后添加UUID
     *
     * @param fileHash 文件哈希值（作为唯一标识）
     * @param fileName 原始文件名
     * @param totalChunks 总分片数（用于校验完整性）
     * @return 合并后的文件路径
     */
    public static String merge(String fileHash, String fileName, int totalChunks)
            throws IOException {
        Assert.hasText(fileHash, "fileHash cannot be empty");
        Assert.hasText(fileName, "fileName cannot be empty");
        Assert.isTrue(totalChunks > 0, "totalChunks must be greater than 0");
        // 生成带UUID的文件名
        String fileNameWithUUID = FileName.addUUID(fileName);
        // 1. 安全校验：防止路径遍历攻击
        Path targetPath = TARGET_DIR.resolve(fileNameWithUUID).normalize();
        if (!targetPath.startsWith(TARGET_DIR)) {
            throw new SecurityException("Invalid file path: " + fileNameWithUUID);
        }
        // 2. 校验分片完整性
        RSet<Integer> uploadedChunksSet = getUploadedChunks(fileHash);
        int uploadedCount = uploadedChunksSet.size();
        if (uploadedCount != totalChunks) {
            throw new IllegalStateException(
                    "Missing chunks, cannot merge. Uploaded: "
                            + uploadedCount
                            + ", Total: "
                            + totalChunks);
        }
        // 3. 获取排序后的分片索引
        List<Integer> sortedChunkIndices =
                new ArrayList<>(uploadedChunksSet.readAll()).stream().sorted().toList();
        // 4. 创建临时输出文件（避免合并失败污染目标文件）
        Path tempOutput = targetPath.resolveSibling(targetPath.getFileName() + ".tmp");
        createDirIfNotExists(tempOutput.getParent());
        try (BufferedOutputStream out =
                new BufferedOutputStream(Files.newOutputStream(tempOutput))) {
            Path chunkDir = TEMP_DIR.resolve(fileHash);
            for (int chunkIndex : sortedChunkIndices) {
                Path chunkPath = chunkDir.resolve(String.valueOf(chunkIndex));
                try (BufferedInputStream in =
                        new BufferedInputStream(Files.newInputStream(chunkPath))) {
                    byte[] buffer = new byte[BUFFER_SIZE];
                    int bytesRead;
                    while ((bytesRead = in.read(buffer)) != -1) {
                        out.write(buffer, 0, bytesRead);
                    }
                }
            }
        } catch (IOException e) {
            Files.deleteIfExists(tempOutput);
            cleanup(fileHash);
            throw e;
        }
        // 5. 原子操作：重命名临时文件到目标位置
        Files.move(tempOutput, targetPath, StandardCopyOption.REPLACE_EXISTING);
        log.info(
                "Successfully merged {} chunks for file {} to {}",
                totalChunks,
                fileHash,
                targetPath);
        // 6. 清理临时文件和Redis记录
        cleanup(fileHash);
        String absoluteFile = targetPath.toString();
        RMap<String, String> store = getUploadedMapping();
        store.put(fileHash, absoluteFile);
        return absoluteFile;
    }

    public static File getFileByHash(String fileHash) throws IOException {
        Path path = getPathByHash(fileHash);
        if (path == null) {
            throw new FileNotFoundException(fileHash);
        }
        return path.toFile();
    }

    public static File getFileByName(String fileName) throws IOException {
        Assert.hasText(fileName, "fileName cannot be empty");
        Path filePath = TARGET_DIR.resolve(fileName).normalize();
        if (Files.exists(filePath) && Files.isRegularFile(filePath)) {
            return filePath.toFile(); // 文件有效
        }
        throw new FileNotFoundException(fileName);
    }

    /**
     * 清理临时分片文件
     *
     * @param fileHash 文件哈希值（作为唯一标识）
     */
    public static void cleanup(String fileHash) {
        Assert.hasText(fileHash, "fileHash cannot be empty");
        Path chunkDir = TEMP_DIR.resolve(fileHash);
        try {
            deleteDirectory(chunkDir);
            getUploadedChunks(fileHash).delete();
            log.info("Cleaned up temporary files for {}", fileHash);
        } catch (IOException e) {
            log.error("Failed to clean up temporary files for {}", fileHash, e);
        }
    }

    private static Path getPathByHash(String fileHash) {
        Assert.hasText(fileHash, "fileHash cannot be empty");
        // 1. 从Redis查询hash对应的文件路径
        String storePath = getUploadedMapping().get(fileHash);
        if (storePath == null) {
            return null;
        }
        // 2. 校验文件是否真实存在（防止Redis记录存在但文件被删除的情况）
        Path filePath = Paths.get(storePath).normalize();
        if (Files.exists(filePath) && Files.isRegularFile(filePath)) {
            return filePath; // 文件有效
        }
        return null;
    }

    /**
     * 检查用户上传是否超过速率限制
     *
     * @param clientKey 客户标识
     * @param bytes 本次上传字节数
     * @param rateLimit 窗口内允许的最大字节数
     */
    private static void checkRateLimit(String clientKey, long bytes, long rateLimit) {
        if (rateLimit <= 0 || bytes <= 0) {
            return;
        }
        Assert.hasText(clientKey, "clientKey cannot be blank");
        if (!Redis.tryAcquire(
                UPLOADED_RATE_LIMIT_PREFIX + clientKey, rateLimit * 1024, 1, bytes, 60)) {
            throw new RuntimeException("上传速率异常，等60s后仍未获得许可（本次" + bytes + "字节）");
        }
    }

    /** 递归删除目录（使用NIO API） */
    private static void deleteDirectory(Path dir) throws IOException {
        if (!Files.exists(dir)) {
            return;
        }
        try (Stream<Path> stream = Files.walk(dir)) {
            stream.sorted(Comparator.reverseOrder())
                    .forEach(
                            path -> {
                                try {
                                    Files.deleteIfExists(path);
                                } catch (IOException e) {
                                    log.warn("Failed to delete file: {}", path, e);
                                }
                            });
        }
    }

    private static RSet<Integer> getUploadedChunks(String fileHash) {
        return Redis.getSet(UPLOADED_CHUNKS_PREFIX + fileHash);
    }

    private static RMap<String, String> getUploadedMapping() {
        return Redis.getMap(UPLOADED_MAPPING_KEY);
    }

    /** 根据操作系统获取默认临时目录 */
    private static String getDefaultTempDir() {
        return Paths.get(USER_HOME, "core_upload", "temp").toString();
    }

    /** 根据操作系统获取默认目标文件目录 */
    private static String getDefaultTargetDir() {
        return Paths.get(USER_HOME, "core_upload", "target").toString();
    }

    /** 检查目录是否存在，不存在则创建 */
    public static void createDirIfNotExists(Path dirPath) {
        try {
            if (!Files.exists(dirPath)) {
                Files.createDirectories(dirPath);
                log.info("Created directory: {}", dirPath);
            }
        } catch (IOException e) {
            throw new RuntimeException("Failed to create directory: " + dirPath, e);
        }
    }
}
