package com.nanohadoop.hdfs;

import com.nanohadoop.hdfs.utils.FileUtils;
import com.nanohadoop.hdfs.utils.NetworkUtils;
import com.nanohadoop.hdfs.NameNode.BlockLocation;
import com.nanohadoop.hdfs.NameNode.DataNodeInfo;
import com.nanohadoop.utils.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.util.List;

/**
 * HDFS客户端，提供文件操作接口
 */
public class HDFSClient {
    private static final Logger log = LoggerFactory.getLogger(HDFSClient.class);

    private final NetworkUtils networkUtils;
    private final String nameNodeHost;
    private final int nameNodePort;
    private final long blockSize;

    public HDFSClient(String nameNodeHost, int nameNodePort) {
        this.nameNodeHost = nameNodeHost;
        this.nameNodePort = nameNodePort;
        this.networkUtils = new NetworkUtils();
        this.blockSize = Configuration.getInstance().getLong("dfs.block.size", 64 * 1024 * 1024);
    }

    public boolean createFile(String path) throws IOException {
        return networkUtils.requestNameNode(nameNodeHost, nameNodePort, "CREATE", path);
    }

    /**
     * 创建文件并写入内容
     */
    public boolean createFile(String path, String content) throws IOException {
        if (!createFile(path)) {
            return false;
        }

        // 创建临时文件
        File tempFile = File.createTempFile("hdfs", "tmp");
        FileUtils.writeStringToFile(tempFile, content);

        // 写入HDFS
        write(path, tempFile);

        // 删除临时文件
        tempFile.delete();
        return true;
    }

    /**
     * 检查文件是否存在
     */
    public boolean fileExists(String path) throws IOException {
        return exists(path);
    }

    /**
     * 读取文件内容，返回字符串
     */
    public String readFile(String path) throws IOException {
        File tempFile = File.createTempFile("hdfs", "tmp");
        read(path, tempFile);
        String content = FileUtils.readFileToString(tempFile);
        tempFile.delete();
        return content;
    }

    /**
     * 读取文件内容，返回行列表
     */
    public List<String> readFileToList(String path) throws IOException {
        File tempFile = File.createTempFile("hdfs", "tmp");
        read(path, tempFile);
        List<String> lines = FileUtils.readFileToList(tempFile);
        tempFile.delete();
        return lines;
    }

    public void write(String path, File localFile) throws IOException {
        log.debug("Writing file to HDFS: " + path + ", size=" + localFile.length());
        if (!exists(path)) {
            throw new IOException("File does not exist: " + path);
        }

        // 将文件分块
        List<File> blocks = FileUtils.splitFile(localFile, blockSize);
        log.debug("Split file into " + blocks.size() + " blocks");
        
        for (int i = 0; i < blocks.size(); i++) {
            File block = blocks.get(i);
            long blockId = System.currentTimeMillis() + i;
            log.debug("Processing block " + i + ", size=" + block.length() + ", blockId=" + blockId);
            
            List<DataNodeInfo> locations = networkUtils.requestBlockLocations(
                nameNodeHost, nameNodePort, path, blockId, block.length());
                
            if (locations.isEmpty()) {
                throw new IOException("No DataNode available");
            }

            log.debug("Got " + locations.size() + " locations for block " + blockId);

            // 写入所有副本
            byte[] blockData = FileUtils.readFile(block.getPath());
            for (DataNodeInfo datanode : locations) {
                log.debug("Writing block " + blockId + " to DataNode: " + datanode.getId());
                networkUtils.writeBlock(datanode.getId(), blockId, blockData);
            }
        }
        
        // 清理临时块文件
        for (File block : blocks) {
            if (!block.delete()) {
                log.debug("Warning: Failed to delete temporary block file: " + block);
            }
        }
    }

    public void read(String path, File localFile) throws IOException {
        log.debug("Reading file from HDFS: " + path);
        List<BlockLocation> blockLocations = networkUtils.getFileBlockLocations(
            nameNodeHost, nameNodePort, path);
            
        log.debug("Found " + blockLocations.size() + " blocks for file: " + path);
        if (blockLocations.isEmpty()) {
            throw new IOException("File not found or empty: " + path);
        }

        try (FileOutputStream fos = new FileOutputStream(localFile)) {
            for (BlockLocation location : blockLocations) {
                log.debug("Processing block: " + location.getBlockId());
                List<DataNodeInfo> datanodes = location.getLocations();
                log.debug("Block " + location.getBlockId() + " has " + datanodes.size() + " replicas");
                
                boolean succeeded = false;
                for (DataNodeInfo datanode : datanodes) {
                    try {
                        log.debug("Trying to read block " + location.getBlockId() + 
                            " from DataNode: " + datanode.getId());
                        byte[] blockData = networkUtils.readBlock(
                            datanode.getId(), location.getBlockId());
                        log.debug("Read " + blockData.length + " bytes from block " + 
                            location.getBlockId());
                        fos.write(blockData);
                        succeeded = true;
                        break;
                    } catch (IOException e) {
                        log.debug("Failed to read from DataNode " + datanode.getId() + 
                            ": " + e.getMessage());
                        continue;
                    }
                }
                
                if (!succeeded) {
                    throw new IOException("Failed to read block " + location.getBlockId() + 
                        " from any DataNode");
                }
            }
        }
    }

    public boolean delete(String path) throws IOException {
        return networkUtils.requestNameNode(nameNodeHost, nameNodePort, "DELETE", path);
    }

    public boolean exists(String path) throws IOException {
        return networkUtils.requestNameNode(nameNodeHost, nameNodePort, "EXISTS", path);
    }

    public FileStatus getFileStatus(String path) throws IOException {
        return networkUtils.getFileStatus(nameNodeHost, nameNodePort, path);
    }

    public List<FileStatus> listFiles(String path) throws IOException {
        return networkUtils.listFiles(nameNodeHost, nameNodePort, path);
    }

    /**
     * 文件状态信息类
     */
    public static class FileStatus implements Serializable {
        private final String path;
        private final long length;
        private final boolean isDirectory;
        private final int replication;
        private final long blockSize;

        public FileStatus(String path, long length, boolean isDirectory, 
                         int replication, long blockSize) {
            this.path = path;
            this.length = length;
            this.isDirectory = isDirectory;
            this.replication = replication;
            this.blockSize = blockSize;
        }

        public String getPath() { return path; }
        public long getLength() { return length; }
        public boolean isDirectory() { return isDirectory; }
        public int getReplication() { return replication; }
        public long getBlockSize() { return blockSize; }
    }

    public static void main(String[] args) {
        HDFSClient client = new HDFSClient("localhost", 9000);
        try {
            // 示例：写入文件
            File inputFile = new File("local-example.txt");
            FileUtils.writeStringToFile(inputFile, "Hello, Mini Hadoop!");
            client.write("/example.txt", inputFile);

            // 示例：读取文件
            File outputFile = new File("local-output.txt");
            client.read("/example.txt", outputFile);
            log.debug("File content: " + FileUtils.readFileToString(outputFile));
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}