package com.gzy.fisherybackend.service.impl;

import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;

import javax.annotation.PostConstruct;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import com.gzy.fisherybackend.service.HdfsService;

import lombok.extern.slf4j.Slf4j;

/**
 * HDFS文件服务实现类
 */
@Slf4j
@Service
public class HdfsServiceImpl implements HdfsService {

    private Configuration configuration;
    private FileSystem fileSystem;

    @Value("${hdfs.uri}")
    private String hdfsUri;

    @Value("${hdfs.user}")
    private String hdfsUser;

    @Value("${hdfs.path}")
    private String uploadPath;

    @PostConstruct
    public void init() throws URISyntaxException, IOException, InterruptedException {
        log.info("初始化HDFS: {}", hdfsUri);
        // 设置系统属性，禁用本地库加载
        // 注意：hadoop.home.dir已经在FisheryBackendApplication中设置，这里不需要再设置

        // 获取文件系统
        configuration = new Configuration();
        // 设置使用纯Java实现，不使用本地库
        configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        configuration.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");

        // 禁用Hadoop本地库检查
        configuration.setBoolean("fs.hdfs.impl.disable.cache", true);
        // 设置HDFS的URI
        configuration.set("fs.defaultFS", hdfsUri);

        // 增加RPC缓冲区大小设置
        configuration.setInt("io.file.buffer.size", 65536);
        configuration.setInt("ipc.client.connection.maxidletime", 3000);
        configuration.setInt("ipc.client.connect.max.retries", 100);
        configuration.setInt("ipc.client.connect.timeout", 10000);
        // 获取文件系统
        fileSystem = FileSystem.get(new URI(hdfsUri), configuration, hdfsUser);
        log.info("HDFS初始化成功，URI: {}", hdfsUri);
    }

    @Override
    public String uploadFile(MultipartFile file) throws IOException {
        // 确保上传目录存在
        ensureDirectoryExists(uploadPath);

        // 生成HDFS上的文件路径
        String fileName = file.getOriginalFilename();
        String hdfsFilePath = uploadPath + fileName;
        Path path = new Path(hdfsFilePath);

        try (InputStream inputStream = new BufferedInputStream(file.getInputStream());
             FSDataOutputStream outputStream = fileSystem.create(path, true)) {

            byte[] buffer = new byte[4096];
            int bytesRead;
            while ((bytesRead = inputStream.read(buffer)) != -1) {
                outputStream.write(buffer, 0, bytesRead);
            }
            outputStream.flush();
            log.info("文件上传成功: {}", hdfsFilePath);
            return hdfsFilePath;
        } catch (IOException e) {
            log.error("文件上传失败", e);
            throw e;
        }
    }

    @Override
    public byte[] downloadFile(String hdfsFilePath) throws IOException {
        Path path = new Path(hdfsFilePath);
        if (!fileSystem.exists(path)) {
            throw new IOException("文件不存在: " + hdfsFilePath);
        }

        try (FSDataInputStream inputStream = fileSystem.open(path)) {
            int fileLength = (int) fileSystem.getFileStatus(path).getLen();
            byte[] buffer = new byte[fileLength];
            inputStream.readFully(0, buffer);
            log.info("文件下载成功: {}", hdfsFilePath);
            return buffer;
        } catch (IOException e) {
            log.error("文件下载失败", e);
            throw e;
        }
    }

    @Override
    public boolean deleteFile(String hdfsFilePath) throws IOException {
        Path path = new Path(hdfsFilePath);
        if (fileSystem.exists(path)) {
            boolean result = fileSystem.delete(path, false);
            if (result) {
                log.info("文件删除成功: {}", hdfsFilePath);
            } else {
                log.warn("文件删除失败: {}", hdfsFilePath);
            }
            return result;
        }
        return false;
    }

    /**
     * 确保目录存在，如果不存在则创建
     */
    private void ensureDirectoryExists(String directory) throws IOException {
        Path path = new Path(directory);
        if (!fileSystem.exists(path)) {
            fileSystem.mkdirs(path);
            log.info("创建目录: {}", directory);
        }
    }
}