package com.ksyun.campus.metaserver.services.impl;

import com.ksyun.campus.metaserver.config.MinfsProperties;
import com.ksyun.campus.metaserver.domain.*;
import com.ksyun.campus.metaserver.services.IMetaService;
import com.ksyun.campus.metaserver.services.MetadataSyncService;
import com.ksyun.campus.metaserver.services.ReplicaAllocationMonitor;
import com.ksyun.campus.metaserver.storage.MetadataStore;
import com.ksyun.campus.metaserver.zk.ZooKeeperService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

/**
 * MetaServer 核心服务实现类
 * 负责实现文件系统的核心操作，包括目录和文件的创建、删除、查询等功能
 * 以及块分配、写入提交等I/O操作
 */
@Slf4j
@Service
public class MetaServiceImpl implements IMetaService {
    
    /**
     * 元数据存储接口，用于持久化文件系统元数据
     */
    @Autowired
    private MetadataStore metadataStore;
    
    /**
     * ZooKeeper服务，用于分布式协调和DataServer管理
     */
    @Autowired
    private ZooKeeperService zooKeeperService;
    
    /**
     * MinFS配置属性
     */
    @Autowired
    private MinfsProperties properties;
    
    @Autowired
    private ReplicaAllocationMonitor allocationMonitor;
    
    /**
     * 元数据同步服务，用于主从节点间的数据同步
     */
    @Autowired
    private MetadataSyncService syncService;
    
    /**
     * 根目录的文件ID常量
     */
    private static final String ROOT_FILE_ID = "root";
    
    /**
     * 初始化文件系统
     * 创建根目录并设置集群配置
     */
    @Override
    public void initializeFileSystem() throws Exception {
        // 检查根目录是否已存在
        Optional<Inode> rootInode = metadataStore.getInode(ROOT_FILE_ID);
        if (rootInode.isPresent()) {
            log.info("File system already initialized");
            return;
        }
        
        // 创建根目录
        Inode root = Inode.builder()
                .fileId(ROOT_FILE_ID)
                .type(FileType.DIR)
                .name("/")
                .parentId(null)
                .ctime(LocalDateTime.now())
                .mtime(LocalDateTime.now())
                .size(0)
                .replication(0)
                .owner("root")
                .group("root")
                .permissions(755)
                .build();
        
        // 存储根目录inode和路径缓存
        metadataStore.putInode(root);
        metadataStore.putPathCache("/", ROOT_FILE_ID);
        
        // 初始化集群配置
        ClusterConfig config = ClusterConfig.defaultConfig();
        config.setReplicationFactor(properties.getCluster().getReplicationFactor());
        config.setBlockSizeBytes(properties.getCluster().getBlockSize());
        metadataStore.putClusterConfig(config);
        
        log.info("File system initialized with root directory");
    }
    
    /**
     * 递归创建目录
     * 
     * @param path 目录路径
     * @param permissions 权限
     * @param owner 所有者
     * @param group 组
     * @return 创建的目录inode
     */
    @Override
    public Inode mkdirs(String path, int permissions, String owner, String group) throws Exception {
        log.debug("Creating directories: {}", path);
        
        // 如果是根目录，直接返回根目录inode
        if (path.equals("/")) {
            return metadataStore.getInode(ROOT_FILE_ID).orElseThrow();
        }
        
        // 检查路径是否已存在
        Optional<String> existingFileId = resolvePath(path);
        if (existingFileId.isPresent()) {
            Inode existing = metadataStore.getInode(existingFileId.get()).orElseThrow();
            if (existing.isDirectory()) {
                // 如果已存在且是目录，直接返回
                return existing;
            } else {
                // 如果已存在但是文件，抛出异常
                throw new IllegalArgumentException("Path already exists as file: " + path);
            }
        }
        
        // 先创建父目录
        String parentPath = getParentPath(path);
        Inode parentInode = mkdirs(parentPath, 755, owner, group);
        
        // 创建当前目录
        String dirName = getFileName(path);
        String fileId = generateFileId();
        
        // 构建目录inode
        Inode dirInode = Inode.builder()
                .fileId(fileId)
                .type(FileType.DIR)
                .name(dirName)
                .parentId(parentInode.getFileId())
                .ctime(LocalDateTime.now())
                .mtime(LocalDateTime.now())
                .size(0)
                .replication(0)
                .owner(owner)
                .group(group)
                .permissions(permissions)
                .build();
        
        // 原子操作：创建inode和目录项
        metadataStore.putInode(dirInode);
        metadataStore.putDentry(parentInode.getFileId(), dirName, fileId);
        metadataStore.putPathCache(path, fileId);
        
        // 发布同步事件到从节点
        publishDirectoryCreateEvent(path, dirInode);
        
        log.info("Created directory: {} with fileId: {}", path, fileId);
        return dirInode;
    }
    
    /**
     * 创建文件
     * 
     * @param path 文件路径
     * @param replication 副本数
     * @param permissions 权限
     * @param owner 所有者
     * @param group 组
     * @return 创建的文件inode
     */
    @Override
    public Inode createFile(String path, int replication, int permissions, String owner, String group) throws Exception {
        log.debug("Creating file: {}", path);
        
        // 检查路径是否已存在
        Optional<String> existingFileId = resolvePath(path);
        if (existingFileId.isPresent()) {
            // 如果已存在，抛出异常
            throw new IllegalArgumentException("Path already exists: " + path);
        }
        
        // 确保父目录存在
        String parentPath = getParentPath(path);
        Inode parentInode = mkdirs(parentPath, 755, owner, group);
        
        // 创建文件
        String fileName = getFileName(path);
        String fileId = generateFileId();
        
        // 构建文件inode
        Inode fileInode = Inode.builder()
                .fileId(fileId)
                .type(FileType.FILE)
                .name(fileName)
                .parentId(parentInode.getFileId())
                .ctime(LocalDateTime.now())
                .mtime(LocalDateTime.now())
                .size(0)
                .replication(replication)
                .owner(owner)
                .group(group)
                .permissions(permissions)
                .build();
        
        // 原子操作：创建inode和目录项
        metadataStore.putInode(fileInode);
        metadataStore.putDentry(parentInode.getFileId(), fileName, fileId);
        metadataStore.putPathCache(path, fileId);
        
        // 发布同步事件到从节点
        publishFileCreateEvent(path, fileInode);
        
        log.info("Created file: {} with fileId: {}", path, fileId);
        return fileInode;
    }
    
    /**
     * 删除文件或目录
     * 
     * @param path 路径
     * @param recursive 是否递归删除
     */
    @Override
    public void delete(String path, boolean recursive) throws Exception {
        log.debug("Deleting path: {}, recursive: {}", path, recursive);

        // 不允许删除根目录
        if (path.equals("/")) {
            throw new IllegalArgumentException("Cannot delete root directory");
        }

        // 解析路径获取文件ID
        Optional<String> fileIdOpt = resolvePath(path);
        if (!fileIdOpt.isPresent()) {
            // 路径不存在，抛出异常
            throw new IllegalArgumentException("Path not found: " + path);
        }

        // 获取inode
        String fileId = fileIdOpt.get();
        Inode inode = metadataStore.getInode(fileId).orElseThrow();

        // 根据类型选择不同的删除方法
        if (inode.isDirectory()) {
            deleteDirectoryWithSync(inode, recursive, path);
        } else {
            deleteFileWithSync(inode, path);
        }

        // 清理路径缓存
        metadataStore.deletePathCache(path);

        log.info("Deleted: {}", path);
    }

    /**
     * 获取文件或目录状态
     * 
     * @param path 路径
     * @return 状态信息
     */
    @Override
    public StatInfo getStatus(String path) throws Exception {
        // 解析路径获取文件ID
        Optional<String> fileIdOpt = resolvePath(path);
        if (!fileIdOpt.isPresent()) {
            // 路径不存在，抛出异常
            throw new IllegalArgumentException("Path not found: " + path);
        }
        
        // 获取inode并转换为StatInfo
        Inode inode = metadataStore.getInode(fileIdOpt.get()).orElseThrow();
        return convertToStatInfo(path, inode);
    }
    
    /**
     * 列出目录内容
     * 
     * @param path 目录路径
     * @return 目录项列表
     */
    @Override
    public List<StatInfo> listStatus(String path) throws Exception {
        // 解析路径获取文件ID
        Optional<String> fileIdOpt = resolvePath(path);
        if (!fileIdOpt.isPresent()) {
            // 路径不存在，抛出异常
            throw new IllegalArgumentException("Path not found: " + path);
        }
        
        // 获取inode
        Inode inode = metadataStore.getInode(fileIdOpt.get()).orElseThrow();
        if (!inode.isDirectory()) {
            // 不是目录，抛出异常
            throw new IllegalArgumentException("Path is not a directory: " + path);
        }
        
        // 获取目录下的所有项
        List<StatInfo> entries = new ArrayList<>();
        List<String> children = metadataStore.listDentries(inode.getFileId());
        
        // 遍历子项，获取每个子项的状态信息
        for (String childName : children) {
            Optional<String> childFileId = metadataStore.getDentry(inode.getFileId(), childName);
            if (childFileId.isPresent()) {
                Inode childInode = metadataStore.getInode(childFileId.get()).orElseThrow();
                String childPath = path.endsWith("/") ? path + childName : path + "/" + childName;
                entries.add(convertToStatInfo(childPath, childInode));
            }
        }
        
        return entries;
    }
    
    /**
     * 获取文件的块信息
     * 
     * @param fileId 文件ID
     * @return 块信息列表
     */
    @Override
    public List<BlockInfo> getFileBlocks(String fileId) throws Exception {
        // 直接从元数据存储获取文件的块信息
        return metadataStore.getFileBlocks(fileId);
    }
    
    /**
     * 为文件分配块
     * 
     * @param fileId 文件ID
     * @param bytes 需要的字节数
     * @return 分配的块信息列表
     */
    @Override
    public List<BlockInfo> allocateBlocks(String fileId, long bytes) throws Exception {
        // 获取文件inode
        Inode fileInode = metadataStore.getInode(fileId).orElseThrow();
        if (!fileInode.isFile()) {
            // 不是文件，抛出异常
            throw new IllegalArgumentException("Not a file: " + fileId);
        }
        
        // 获取集群配置
        ClusterConfig config = metadataStore.getClusterConfig().orElse(ClusterConfig.defaultConfig());
        long blockSize = config.getBlockSizeBytes();
        // 计算需要分配的块数
        int numBlocks = (int) Math.ceil((double) bytes / blockSize);
        
        // 准备分配块
        List<BlockInfo> allocatedBlocks = new ArrayList<>();
        List<BlockInfo> existingBlocks = metadataStore.getFileBlocks(fileId);
        int startIdx = existingBlocks.size();
        
        // 分配每个块
        for (int i = 0; i < numBlocks; i++) {
            int blkIdx = startIdx + i;
            long blkSize = Math.min(blockSize, bytes - (i * blockSize));
            
            // 为块选择DataServer
            List<DataServerInfo> selectedDs = selectDataServersForBlock(fileId, blkIdx);
            List<BlockInfo.ReplicaInfo> replicas = selectedDs.stream()
                    .map(ds -> BlockInfo.ReplicaInfo.builder()
                            .dsId(ds.getDsId())
                            .host(ds.getHost())
                            .port(ds.getPort())
                            .role(BlockInfo.ReplicaInfo.ReplicaRole.REPLICA)
                            .build())
                    .collect(Collectors.toList());
            // 将第一个副本标记为主副本
            if (!replicas.isEmpty()) {
                replicas.get(0).setRole(BlockInfo.ReplicaInfo.ReplicaRole.PRIMARY);
            }
            // 创建块信息
            BlockInfo blockInfo = BlockInfo.builder()
                    .fileId(fileId)
                    .blkIdx(blkIdx)
                    .size(blkSize)
                    .replicas(replicas)
                    .build();
            
            // 存储块信息并添加到结果列表
            metadataStore.putBlock(fileId, blkIdx, blockInfo);
            allocatedBlocks.add(blockInfo);
            
            // 记录分配统计信息
            allocationMonitor.recordAllocation(fileId, blkIdx, selectedDs);
        }
        
        // 发布块分配同步事件
        if (syncService != null) {
            Map<String, Object> eventData = new HashMap<>();
            eventData.put("fileId", fileId);
            eventData.put("bytes", bytes);
            eventData.put("numBlocks", numBlocks);
            eventData.put("startIdx", startIdx);
            eventData.put("blocks", allocatedBlocks.stream().map(block -> {
                Map<String, Object> blockData = new HashMap<>();
                blockData.put("blkIdx", block.getBlkIdx());
                blockData.put("size", block.getSize());
                // 安全地处理可能为null的副本列表
                List<BlockInfo.ReplicaInfo> replicas = block.getReplicas();
                if (replicas == null) {
                    replicas = new ArrayList<>();
                }
                blockData.put("replicas", replicas.stream().map(replica -> {
                    Map<String, Object> replicaData = new HashMap<>();
                    replicaData.put("dsId", replica.getDsId());
                    replicaData.put("host", replica.getHost());
                    replicaData.put("port", replica.getPort());
                    replicaData.put("role", replica.getRole().toString());
                    return replicaData;
                }).collect(Collectors.toList()));
                return blockData;
            }).collect(Collectors.toList()));
            
            syncService.publishSyncEvent(
                MetadataSyncService.SyncEventType.BLOCK_ALLOCATE,
                fileId, // 使用fileId作为path
                eventData
            );
            
            log.debug("发布块分配同步事件: {} blocks for file {}", numBlocks, fileId);
        }

        log.info("Allocated {} blocks for file {}", numBlocks, fileId);
        return allocatedBlocks;
    }
    
    /**
     * 提交文件写入
     * 
     * @param fileId 文件ID
     * @param size 文件大小
     * @param blocks 块信息列表
     * @param checksum 校验和
     */
    @Override
    public void commitWrite(String fileId, long size, List<BlockInfo> blocks, String checksum) throws Exception {
        // 获取文件inode
        Inode fileInode = metadataStore.getInode(fileId).orElseThrow();
        
        // 更新文件大小和校验和
        fileInode.setSize(size);
        fileInode.setMtime(LocalDateTime.now());
        if (checksum != null) {
            fileInode.setChecksum(checksum);
        }
        
        // 存储更新后的inode
        metadataStore.putInode(fileInode);
        
        // 更新块大小，保留原有的副本信息
        for (BlockInfo block : blocks) {
            // 获取现有的块信息以保留副本数据
            Optional<BlockInfo> existingBlock = metadataStore.getBlock(fileId, block.getBlkIdx());
            if (existingBlock.isPresent()) {
                BlockInfo existing = existingBlock.get();
                // 保留原有的副本信息，只更新大小（处理null副本）
                List<BlockInfo.ReplicaInfo> replicas = existing.getReplicas();
                if (replicas == null) {
                    log.warn("Block {}:{} has null replicas, using empty list", fileId, block.getBlkIdx());
                    replicas = new ArrayList<>();
                }
                
                BlockInfo updatedBlock = BlockInfo.builder()
                        .fileId(existing.getFileId())
                        .blkIdx(existing.getBlkIdx())
                        .size(block.getSize())  // 更新为新的大小
                        .replicas(replicas)  // 保留原有副本信息（确保不为null）
                        .build();
                metadataStore.putBlock(fileId, block.getBlkIdx(), updatedBlock);
            } else {
                // 如果块不存在，直接存储（不应该发生，但作为后备）
                // 确保副本列表不为null
                if (block.getReplicas() == null) {
                    block = BlockInfo.builder()
                            .fileId(block.getFileId())
                            .blkIdx(block.getBlkIdx())
                            .size(block.getSize())
                            .replicas(new ArrayList<>())
                            .build();
                }
                metadataStore.putBlock(fileId, block.getBlkIdx(), block);
            }
        }
        
        // 发布块提交同步事件
        if (syncService != null) {
            Map<String, Object> eventData = new HashMap<>();
            eventData.put("fileId", fileId);
            eventData.put("size", size);
            eventData.put("checksum", checksum);
            eventData.put("mtime", fileInode.getMtime().toString());
            eventData.put("blocks", blocks.stream().map(block -> {
                Map<String, Object> blockData = new HashMap<>();
                blockData.put("blkIdx", block.getBlkIdx());
                blockData.put("size", block.getSize());
                // 安全地处理可能为null的副本列表
                List<BlockInfo.ReplicaInfo> replicas = block.getReplicas();
                if (replicas == null) {
                    replicas = new ArrayList<>();
                }
                blockData.put("replicas", replicas.stream().map(replica -> {
                    Map<String, Object> replicaData = new HashMap<>();
                    replicaData.put("dsId", replica.getDsId());
                    replicaData.put("host", replica.getHost());
                    replicaData.put("port", replica.getPort());
                    replicaData.put("role", replica.getRole().toString());
                    return replicaData;
                }).collect(Collectors.toList()));
                return blockData;
            }).collect(Collectors.toList()));
            
            syncService.publishSyncEvent(
                MetadataSyncService.SyncEventType.BLOCK_COMMIT,
                fileId, // 使用fileId作为path
                eventData
            );
            
            log.debug("发布块提交同步事件: fileId={}, size={}", fileId, size);
        }
        
        log.info("Committed write for file {} with size {}", fileId, size);
    }
    
    /**
     * 解析路径获取文件ID
     * 
     * @param path 文件路径
     * @return 文件的内部ID
     */
    @Override
/**
 * 将文件的逻辑路径解析为系统内部唯一的文件ID。
 *
 * 在分布式文件系统中，用户看到的路径（如 /user/data.txt）并不直接对应底层存储位置，
 * 系统会为每个文件分配一个唯一的 fileId（通常是UUID或自增ID）作为内部标识。
 * 这个方法会根据路径去元数据服务查找对应的 fileId，并返回给调用者。

 * @param path 逻辑路径，例如 "/user/test/data.txt"
 * @return 文件对应的内部 fileId
 * @throws Exception 如果路径不存在或解析过程中出错
 */
    public String resolvePathToFileId(String path) throws Exception {
        // 调用内部方法 resolvePath，根据路径查找对应的 fileId
        // 返回值是 Optional<String>，可能为空（路径不存在）
        Optional<String> fileIdOpt = resolvePath(path);

        // 如果 Optional 为空，说明路径在元数据中没有找到
        // 抛出异常，调用方需要捕获并处理
        if (!fileIdOpt.isPresent()) {
            throw new IllegalArgumentException("Path not found: " + path);
        }

        // 如果找到了 fileId，则直接返回实际的 fileId 字符串
        return fileIdOpt.get();
    }

    /**
     * 轮询计数器，用于实现轮询负载均衡
     * 使用AtomicInteger确保线程安全
     */
    private final AtomicInteger roundRobinCounter = new AtomicInteger(0);
    
    /**
     * 为块选择DataServer，使用轮询+容量权重的混合策略
     * 
     * @param fileId 文件ID
     * @param blkIdx 块索引
     * @return 选择的DataServer列表
     */
    private List<DataServerInfo> selectDataServersForBlock(String fileId, int blkIdx) {
        // 获取所有注册的DataServer
        List<DataServerInfo> allDs = zooKeeperService.getCurrentDataServers();
        log.info("Total registered DataServers: {}", allDs.size());
        
        // 获取所有可用的健康DataServer
        List<DataServerInfo> availableDs = allDs.stream()
                .filter(ds -> {
                    boolean healthy = ds.isHealthy(properties.getHeartbeat().getTimeoutSeconds());
                    boolean active = ds.getStatus() == DataServerInfo.DataServerStatus.ACTIVE;
                    log.debug("DataServer {}: healthy={}, active={}, status={}", 
                        ds.getDsId(), healthy, active, ds.getStatus());
                    return healthy && active;
                })
                .collect(Collectors.toList());
        
        log.info("Available healthy DataServers for block {}:{}: {}", 
            fileId, blkIdx, availableDs.size());
        
        // 如果没有可用的DataServer，抛出异常
        if (availableDs.isEmpty()) {
            log.error("No healthy DataServers available! All DataServers: {}", 
                allDs.stream().map(ds -> String.format("%s(healthy=%s,status=%s)", 
                    ds.getDsId(), ds.isHealthy(properties.getHeartbeat().getTimeoutSeconds()), ds.getStatus()))
                    .collect(Collectors.toList()));
            throw new RuntimeException("No healthy DataServers available");
        }
        
        // 确定副本因子，不超过可用DataServer数量
        int replicationFactor = Math.min(properties.getCluster().getReplicationFactor(), availableDs.size());
        log.info("Replication factor for block {}:{}: {} (configured: {}, available DataServers: {})", 
            fileId, blkIdx, replicationFactor, properties.getCluster().getReplicationFactor(), availableDs.size());
        
        // 使用轮询+容量权重的混合策略选择DataServer
        return selectDataServersWithRoundRobinAndCapacity(availableDs, replicationFactor, fileId, blkIdx);
    }
    
    /**
     * 轮询+容量权重的DataServer选择策略
     * 
     * 策略说明：
     * 1. 首先根据剩余容量对DataServer进行加权排序
     * 2. 然后使用轮询机制确保负载均衡
     * 3. 保证选择的副本不会在同一个DataServer上
     * 
     * @param availableDs 可用的DataServer列表
     * @param replicationFactor 副本因子
     * @param fileId 文件ID（用于日志）
     * @param blkIdx 块索引（用于日志）
     * @return 选择的DataServer列表
     */
    private List<DataServerInfo> selectDataServersWithRoundRobinAndCapacity(
            List<DataServerInfo> availableDs, int replicationFactor, String fileId, int blkIdx) {
        
        List<DataServerInfo> selectedDs = new ArrayList<>();
        Set<String> selectedDsIds = new HashSet<>();
        
        // 第一步：按剩余容量权重排序（容量越大权重越高）
        List<WeightedDataServer> weightedServers = calculateCapacityWeights(availableDs);
        
        // 第二步：使用轮询+权重选择副本
        for (int replica = 0; replica < replicationFactor; replica++) {
            DataServerInfo selected = selectNextDataServerWithRoundRobin(
                    weightedServers, selectedDsIds, replica);
            
            if (selected != null) {
                selectedDs.add(selected);
                selectedDsIds.add(selected.getDsId());
                
                log.debug("Selected DataServer {} for replica {} of block {}:{}, " +
                        "available capacity: {}MB, usage: {:.1f}%", 
                        selected.getDsId(), replica, fileId, blkIdx,
                        selected.getAvailableBytes() / (1024 * 1024),
                        selected.getUsageRatio() * 100);
            }
        }
        
        // 验证选择结果
        if (selectedDs.size() < replicationFactor) {
            log.warn("Could only select {} DataServers out of required {} for block {}:{}", 
                    selectedDs.size(), replicationFactor, fileId, blkIdx);
        }
        
        // 确保没有重复的DataServer
        Set<String> uniqueIds = selectedDs.stream()
                .map(DataServerInfo::getDsId)
                .collect(Collectors.toSet());
        
        if (uniqueIds.size() != selectedDs.size()) {
            log.error("Duplicate DataServer selected for block {}:{}, this should not happen!", 
                    fileId, blkIdx);
            throw new RuntimeException("Duplicate DataServer selected for replicas");
        }
        
        log.info("Successfully allocated {} replicas for block {}:{} on DataServers: {}", 
                replicationFactor, fileId, blkIdx, 
                selectedDs.stream().map(DataServerInfo::getDsId).collect(Collectors.toList()));
        
        return selectedDs;
    }
    
    /**
     * 计算基于容量的权重
     * 剩余容量越大，权重越高
     */
    private List<WeightedDataServer> calculateCapacityWeights(List<DataServerInfo> dataServers) {
        List<WeightedDataServer> weightedServers = new ArrayList<>();
        
        // 计算总的可用容量
        long totalAvailableCapacity = dataServers.stream()
                .mapToLong(DataServerInfo::getAvailableBytes)
                .sum();
        
        if (totalAvailableCapacity == 0) {
            // 如果所有节点都满了，给每个节点相等的权重
            for (DataServerInfo ds : dataServers) {
                weightedServers.add(new WeightedDataServer(ds, 1.0));
            }
        } else {
            // 根据剩余容量计算权重
            for (DataServerInfo ds : dataServers) {
                double weight = (double) ds.getAvailableBytes() / totalAvailableCapacity;
                // 确保最小权重，避免某些节点永远不被选择
                weight = Math.max(weight, 0.1);
                weightedServers.add(new WeightedDataServer(ds, weight));
            }
        }
        
        // 按权重降序排序
        weightedServers.sort((a, b) -> Double.compare(b.weight, a.weight));
        
        return weightedServers;
    }
    
    /**
     * 使用轮询机制选择下一个DataServer
     * 结合权重和轮询，确保负载均衡
     */
    private DataServerInfo selectNextDataServerWithRoundRobin(
            List<WeightedDataServer> weightedServers, Set<String> excludeIds, int replicaIndex) {
        
        // 过滤掉已选择的DataServer
        List<WeightedDataServer> candidates = weightedServers.stream()
                .filter(ws -> !excludeIds.contains(ws.dataServer.getDsId()))
                .collect(Collectors.toList());
        
        if (candidates.isEmpty()) {
            return null;
        }
        
        // 第一个副本：选择权重最高的（容量最大的）
        if (replicaIndex == 0) {
            return candidates.get(0).dataServer;
        }
        
        // 后续副本：使用加权轮询
        // 根据权重创建加权列表，权重高的节点出现次数更多
        List<DataServerInfo> weightedList = createWeightedRoundRobinList(candidates);
        
        if (weightedList.isEmpty()) {
            // 备选方案：直接轮询
            int index = roundRobinCounter.getAndIncrement() % candidates.size();
            return candidates.get(index).dataServer;
        }
        
        // 使用轮询计数器选择
        int index = roundRobinCounter.getAndIncrement() % weightedList.size();
        return weightedList.get(index);
    }
    
    /**
     * 创建加权轮询列表
     * 权重高的DataServer在列表中出现更多次
     */
    private List<DataServerInfo> createWeightedRoundRobinList(List<WeightedDataServer> candidates) {
        List<DataServerInfo> weightedList = new ArrayList<>();
        
        // 将权重转换为整数倍数（乘以100并取整）
        for (WeightedDataServer ws : candidates) {
            int count = Math.max(1, (int) (ws.weight * 100));
            for (int i = 0; i < count; i++) {
                weightedList.add(ws.dataServer);
            }
        }
        
        // 打乱列表顺序，增加随机性
        Collections.shuffle(weightedList);
        return weightedList;
    }
    
    /**
     * 带权重的DataServer包装类
     */
    private static class WeightedDataServer {
        final DataServerInfo dataServer;
        final double weight;
        
        WeightedDataServer(DataServerInfo dataServer, double weight) {
            this.dataServer = dataServer;
            this.weight = weight;
        }
    }
    
    /**
     * 解析路径获取文件ID
     * 
     * @param path 路径
     * @return 文件ID（如果存在）
     */
    private Optional<String> resolvePath(String path) throws Exception {
        // 先尝试从路径缓存获取
        Optional<String> cached = metadataStore.getPathCache(path);
        if (cached.isPresent()) {
            // 验证缓存的fileId是否仍然有效
            Optional<Inode> cachedInode = metadataStore.getInode(cached.get());
            if (cachedInode.isPresent()) {
                return cached;
            } else {
                // 缓存的fileId已无效，清理缓存
                metadataStore.deletePathCache(path);
            }
        }
        
        // 遍历路径组件
        String[] components = path.split("/");
        String currentFileId = ROOT_FILE_ID;
        
        for (String component : components) {
            if (component.isEmpty()) continue;
            
            // 获取子项的文件ID
            Optional<String> childFileId = metadataStore.getDentry(currentFileId, component);
            if (!childFileId.isPresent()) {
                return Optional.empty();
            }
            currentFileId = childFileId.get();
        }
        
        // 缓存结果
        metadataStore.putPathCache(path, currentFileId);
        return Optional.of(currentFileId);
    }
    
    /**
     * 将Inode转换为StatInfo
     * 
     * @param path 路径
     * @param inode Inode对象
     * @return StatInfo对象
     */
    private StatInfo convertToStatInfo(String path, Inode inode) {
        StatInfo.StatInfoBuilder builder = StatInfo.builder()
                .path(path)
                .type(inode.getType())
                .size(inode.getSize())
                .mtime(inode.getMtime())
                .replication(inode.getReplication())
                .owner(inode.getOwner())
                .group(inode.getGroup())
                .permissions(inode.getPermissions());
        
        // 如果是文件，获取副本数据
        if (inode.getType() == FileType.FILE) {
            try {
                List<BlockInfo> blocks = metadataStore.getFileBlocks(inode.getFileId());
                List<ReplicaData> replicaDataList = new ArrayList<>();
                
                // 收集所有副本信息
                Set<String> dataServerIds = new HashSet<>();
                for (BlockInfo block : blocks) {
                    if (block.getReplicas() != null) {
                        for (BlockInfo.ReplicaInfo replica : block.getReplicas()) {
                            dataServerIds.add(replica.getDsId());
                        }
                    }
                }
                
                // 转换为ReplicaData格式
                for (String dsId : dataServerIds) {
                    ReplicaData replicaData = new ReplicaData();
                    replicaData.dsNode = dsId;
                    replicaDataList.add(replicaData);
                }
                
                builder.replicaData(replicaDataList);
                
            } catch (Exception e) {
                // 如果获取副本数据失败，记录日志但不影响其他信息的返回
                log.warn("Failed to get replica data for file {}: {}", inode.getFileId(), e.getMessage());
                builder.replicaData(null);
            }
        }
        
        return builder.build();
    }
    
    /**
     * 获取父路径
     * 
     * @param path 路径
     * @return 父路径
     */
    private String getParentPath(String path) {
        if (path.equals("/")) {
            return "/";
        }
        int lastSlash = path.lastIndexOf('/');
        return lastSlash == 0 ? "/" : path.substring(0, lastSlash);
    }
    
    /**
     * 获取文件名
     * 
     * @param path 路径
     * @return 文件名
     */
    private String getFileName(String path) {
        int lastSlash = path.lastIndexOf('/');
        return path.substring(lastSlash + 1);
    }
    
    /**
     * 生成唯一的文件ID
     * 
     * @return 文件ID
     */
    private String generateFileId() {
        return UUID.randomUUID().toString().replace("-", "");
    }
    
    /**
     * 删除目录并发布同步事件
     * 
     * @param dirInode 目录Inode
     * @param recursive 是否递归删除
     * @param dirPath 目录路径
     */
    private void deleteDirectoryWithSync(Inode dirInode, boolean recursive, String dirPath) throws Exception {
        // 获取目录下的所有项
        List<String> children = metadataStore.listDentries(dirInode.getFileId());
        
        // 如果目录不为空且不是递归删除，抛出异常
        if (!children.isEmpty() && !recursive) {
            throw new IllegalArgumentException("Directory not empty and recursive=false");
        }
        
        // 先删除子项，并为每个子项发布同步事件
        for (String childName : children) {
            Optional<String> childFileId = metadataStore.getDentry(dirInode.getFileId(), childName);
            if (childFileId.isPresent()) {
                Inode childInode = metadataStore.getInode(childFileId.get()).orElseThrow();
                String childPath = dirPath.endsWith("/") ? dirPath + childName : dirPath + "/" + childName;
                
                if (childInode.isDirectory()) {
                    // 递归删除子目录
                    deleteDirectoryWithSync(childInode, true, childPath);
                } else {
                    // 删除子文件
                    deleteFileWithSync(childInode, childPath);
                }
                
                // 清理子路径的缓存
                metadataStore.deletePathCache(childPath);
                
                // 删除目录项
                metadataStore.deleteDentry(dirInode.getFileId(), childName);
            }
        }
        
        // 删除当前目录
        metadataStore.deleteInode(dirInode.getFileId());
        if (dirInode.getParentId() != null) {
            metadataStore.deleteDentry(dirInode.getParentId(), dirInode.getName());
        }
        
        // 发布空目录删除同步事件（此时目录已经是空的）
        publishEmptyDirectoryDeleteEvent(dirPath, dirInode);
    }
    
    /**
     * 删除文件并发布同步事件
     * 
     * @param fileInode 文件Inode
     * @param filePath 文件路径
     */
    private void deleteFileWithSync(Inode fileInode, String filePath) throws Exception {
        // 删除所有块
        List<BlockInfo> blocks = metadataStore.getFileBlocks(fileInode.getFileId());
        for (BlockInfo block : blocks) {
            metadataStore.deleteBlock(fileInode.getFileId(), block.getBlkIdx());
            // TODO: 在DataServer上调度块删除
        }
        
        // 删除inode和目录项
        metadataStore.deleteInode(fileInode.getFileId());
        if (fileInode.getParentId() != null) {
            metadataStore.deleteDentry(fileInode.getParentId(), fileInode.getName());
        }
        
        // 发布文件删除同步事件
        publishFileDeleteEvent(filePath, fileInode);
    }
    

    

    
    /**
     * 发布目录创建同步事件
     */
    private void publishDirectoryCreateEvent(String path, Inode dirInode) {
        try {
            Map<String, Object> eventData = new HashMap<>();
            eventData.put("fileId", dirInode.getFileId());
            eventData.put("name", dirInode.getName());
            eventData.put("parentId", dirInode.getParentId());
            eventData.put("permissions", dirInode.getPermissions());
            eventData.put("owner", dirInode.getOwner());
            eventData.put("group", dirInode.getGroup());
            eventData.put("ctime", dirInode.getCtime().toString());
            eventData.put("mtime", dirInode.getMtime().toString());
            
            syncService.publishSyncEvent(
                MetadataSyncService.SyncEventType.DIRECTORY_CREATE,
                path,
                eventData
            );
            
            log.debug("发布目录创建同步事件: {}", path);
        } catch (Exception e) {
            log.error("发布目录创建同步事件失败: {}", path, e);
        }
    }
    
    /**
     * 发布文件创建同步事件
     */
    private void publishFileCreateEvent(String path, Inode fileInode) {
        try {
            Map<String, Object> eventData = new HashMap<>();
            eventData.put("fileId", fileInode.getFileId());
            eventData.put("name", fileInode.getName());
            eventData.put("parentId", fileInode.getParentId());
            eventData.put("permissions", fileInode.getPermissions());
            eventData.put("owner", fileInode.getOwner());
            eventData.put("group", fileInode.getGroup());
            eventData.put("replication", fileInode.getReplication());
            eventData.put("size", fileInode.getSize());
            eventData.put("ctime", fileInode.getCtime().toString());
            eventData.put("mtime", fileInode.getMtime().toString());
            
            syncService.publishSyncEvent(
                MetadataSyncService.SyncEventType.FILE_CREATE,
                path,
                eventData
            );
            
            log.debug("发布文件创建同步事件: {}", path);
        } catch (Exception e) {
            log.error("发布文件创建同步事件失败: {}", path, e);
        }
    }
    
    /**
     * 发布文件删除同步事件
     */
    private void publishFileDeleteEvent(String path, Inode fileInode) {
        try {
            Map<String, Object> eventData = new HashMap<>();
            eventData.put("fileId", fileInode.getFileId());
            eventData.put("name", fileInode.getName());
            eventData.put("parentId", fileInode.getParentId());
            
            syncService.publishSyncEvent(
                MetadataSyncService.SyncEventType.FILE_DELETE,
                path,
                eventData
            );
            
            log.debug("发布文件删除同步事件: {}", path);
        } catch (Exception e) {
            log.error("发布文件删除同步事件失败: {}", path, e);
        }
    }
    
    /**
     * 发布空目录删除同步事件
     */
    private void publishEmptyDirectoryDeleteEvent(String path, Inode dirInode) {
        try {
            Map<String, Object> eventData = new HashMap<>();
            eventData.put("fileId", dirInode.getFileId());
            eventData.put("name", dirInode.getName());
            eventData.put("parentId", dirInode.getParentId());
            eventData.put("recursive", false); // 空目录删除，不需要递归
            
            syncService.publishSyncEvent(
                MetadataSyncService.SyncEventType.DIRECTORY_DELETE,
                path,
                eventData
            );
            
            log.debug("发布空目录删除同步事件: {}", path);
        } catch (Exception e) {
            log.error("发布空目录删除同步事件失败: {}", path, e);
        }
    }
    

}
