package com.experiment.aicommunityback.service.Impl;

import com.experiment.aicommunityback.service.HdfsSrevice;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import javax.servlet.ServletOutputStream;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;

@Service
public class HdfsServiecImp implements HdfsSrevice {

    @Value("${hadoop.namenode}")
    private String url;

    /**
     * 文件系统实例化
     *
     * @return Fs文件系统
     */
    public FileSystem getFileSystem() throws IOException, InterruptedException {
        URI uri = null;
        try {
            uri = new URI(url);
        } catch (URISyntaxException e) {
            System.out.println("URI生成错误：" + e.getMessage());
            e.printStackTrace();
        }
        Configuration configuration = new Configuration();
        configuration.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
        configuration.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
        String user = "root";
        return FileSystem.get(uri, configuration, user);
    }


    /**
     * 删除
     *
     * @param path
     */
    public boolean deleteFile(Path path) {
        FileSystem fs = null;
        try {
            fs = getFileSystem();
        } catch (IOException | InterruptedException e) {
            System.out.println("fs生成异常:" + e.getMessage());
            e.printStackTrace();
            return false; // 无法获取文件系统直接返回false
        }

        boolean result = false;
        try {
            if (fs.delete(path, true)) {
                result = true;
            }
        } catch (IOException ioe) {
            System.out.println("删除文件异常:" + ioe.getMessage());
            ioe.printStackTrace();
        } finally {
            if (fs != null) {
                try {
                    fs.close();
                } catch (IOException ioe) {
                    System.out.println("关闭文件系统异常:" + ioe.getMessage());
                    ioe.printStackTrace();
                }
            }
        }
        return result;
    }


    /**
     * 上传
     *
     * @param hdfsPath-localpath
     */
    public boolean uploadFile(MultipartFile file, Path hdfsPath) {
        FileSystem fs = null;
        try {
            fs = getFileSystem();
        } catch (IOException | InterruptedException e) {
            System.out.println("fs生成异常:" + e.getMessage());
            e.printStackTrace();
            return false; // 无法获取文件系统直接返回false
        }

        try (
                InputStream inputStream = file.getInputStream(); // 获取文件的输入流
                FSDataOutputStream outputStream = fs.create(hdfsPath) // 创建HDFS的输出流
        ) {
            // 将文件从本地输入流复制到HDFS输出流
            byte[] buffer = new byte[1024];
            int bytesRead;
            while ((bytesRead = inputStream.read(buffer)) != -1) {
                outputStream.write(buffer, 0, bytesRead);
            }
            // 确保所有数据都已经被写入到输出流
            outputStream.flush();
        } catch (IOException ioException) {
            System.out.println("上传出错: " + ioException.getMessage());
            ioException.printStackTrace();
        } finally {
            // 手动关闭FileSystem实例
            if (fs != null) {
                try {
                    fs.close();
                } catch (IOException e) {
                    System.out.println("关闭FileSystem出错: " + e.getMessage());
                    e.printStackTrace();
                }
            }
        }
        // 文件上传成功
        System.out.println("文件上传成功");
        return true;
    }
//      fs.copyFromLocalFile(localpath,hdfspath);//复制本地文件,不返回任何值（void）
    // fs.moveFromLocalFile()剪切本地文件，支持同时上传多个本地文件

    /**
     * 获取图片
     * @param hdfsPath 图片路径
     * */
    public byte[] getImageOfByte(Path hdfsPath) {
        FileSystem fs = null;
        try {
            fs = getFileSystem();
            if (fs.exists(hdfsPath)) {
                System.out.println("路径错误");
                return new byte[0];
            }
        } catch (IOException e) {
            System.out.println("IO异常" + e.getMessage());
            e.printStackTrace();
            return new byte[0]; // 无法获取文件系统直接返回false
        } catch (InterruptedException e) {
            System.out.println("fs生成异常:" + e.getMessage());
            e.printStackTrace();
            return new byte[0];
        }
        // 获取fs后进行操作
        InputStream in = null;
        byte[] imageBytes = new byte[0]; // 初始化为空数组
        try {
            if (fs.exists(hdfsPath)) {
                in = fs.open(hdfsPath);
                imageBytes = new byte[in.available()];
                in.read(imageBytes);
            }
        } catch (IOException e) {
            System.out.println("获取图片过程出错");
            e.printStackTrace();
        } finally {
            if (in != null) {
                try {
                    in.close();
                } catch (IOException e) {
                    e.printStackTrace(); // 同样，你可能想记录日志或者处理异常
                }
            }
            if (fs != null) {
                try {
                    fs.close();
                } catch (IOException e) {
                    e.printStackTrace(); // 日志或异常处理
                }
            }
        }
        return imageBytes;

    }




}
