package com.chenjj.bigdata.hbase.client;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;

public class HdfsClientUtil {

    private  Logger logger = LoggerFactory.getLogger(HdfsClientUtil.class);

    private  static volatile HdfsClientUtil instance;

    private FileSystem fs;

    private  static Configuration _conf = new Configuration();

    private HdfsClientUtil(){
        try {
            fs = FileSystem.get(_conf);
            Runtime.getRuntime().addShutdownHook( new Thread(){
                @Override
                public void run() {
                    logger.info("HdfsClientUtil hook run, instance:{}",instance);
                    if (instance != null){
                        instance.release();
                    }
                    logger.info("HdfsClientUtil hook run success, instance:{}",instance);
                }
            });
        } catch (IOException e) {
            logger.error("FileSystem init failed",e);
        }
    }

    /**
     * 获取运行环境的HdfsClientUtil，如果有执行过过setInstance则运行环境为指定conf的运行环境，否则为系统默认的运行环境。
     * @return
     */
    public static HdfsClientUtil getInstance(){
        if (instance == null) {
            synchronized (HdfsClientUtil.class) {
                if (instance == null) {
                    instance = new HdfsClientUtil();
                }
            }
        }
        return instance;
    }

    /**
     * 手动指定HdfsClient实例的运行时环境
     * @param conf
     * @return
     */
    public static void setInstance(Configuration conf){
        if (instance == null) {
            synchronized (HdfsClientUtil.class) {
                if (instance == null) {
                    _conf = conf;
                    instance = new HdfsClientUtil();
                }
            }
        }else {
            throw new RuntimeException("HdfsClientUtil已经被初始化，为了线程安全，请不要在运行时修改实例");
        }
    }


    /**
     * 上传
     * @param src
     * @param dst
     */
    public void put(String src,String dst) {
        try {
            fs.copyFromLocalFile(new Path(src),new Path(dst));
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
        }
    }


    /**
     * 下载
     * @param src
     * @param dst
     */
    public void get(String src,String dst){
        try {
            fs.copyToLocalFile(new Path(src),new Path(dst));
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
        }
    }

    /**
     * 删除文件/目录
     * @param path
     */
    public void rm(String path){
        try {
            fs.delete(new Path(path),true);
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
        }
    }


    /**
     * 使用org.apache.hadoop.io.compress.GzipCodec压缩文件
     * @param src 源文件  e.g. /user/hadoop/aa.txt
     * @param dst 目标文件 e.g. /user/hadoop/text.gz
     */
    public void compress(String src,String dst) throws Throwable {
        FSDataInputStream in = null;
        CompressionOutputStream out = null;
        try{
            Class<?> codecClass = Class.forName("org.apache.hadoop.io.compress.GzipCodec");
            CompressionCodec codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, fs.getConf());
            //指定压缩文件路径
            FSDataOutputStream outputStream = fs.create(new Path(dst));
            //指定要被压缩的文件路径
            in = fs.open(new Path(src));
            //创建压缩输出流
            out = codec.createOutputStream(outputStream);
            IOUtils.copyBytes(in, out, fs.getConf());
        }catch (Throwable e){
            throw  new HdfsClientException(e);
        }finally {
            if (in != null){
                IOUtils.closeStream(in);
            }
            if (out!= null){
                IOUtils.closeStream(out);
            }
        }


    }

    /**
     * 解压缩文件
     * @param src 待解压的文件 e.g /user/hadoop/text.gz
     * @param dst 解压后的文件 e.g /user/hadoop/text
     */
    public void uncompress(String src,String dst) throws HdfsClientException {
        Path inputPath = new Path(src);
        CompressionCodecFactory factory = new CompressionCodecFactory(fs.getConf());
        CompressionCodec codec = factory.getCodec(inputPath);
        if(codec == null){
            throw new HdfsClientException("no codec found for " + src);
        }
        InputStream in = null;
        OutputStream out = null;
        try {
            Configuration conf = new Configuration();
            in = codec.createInputStream(fs.open(inputPath));
            out = fs.create(new Path(dst));
            IOUtils.copyBytes(in, out, conf);
        } catch (IOException e) {
            throw new HdfsClientException(e);
        } finally{
            IOUtils.closeStream(out);
            IOUtils.closeStream(in);
        }
    }

    /**
     * 打开一个hdfs的文件
     * @param filePath
     * @return
     */
    public InputStream open(String filePath){
        try {
            return fs.open(new Path(filePath));
        } catch (IOException e) {
            throw new RuntimeException(e);
        } finally {
        }
    }

    /**
     * 创建一个文件，如果文件存在，则直接覆盖
     * @param filePath
     * @return
     */
    public OutputStream create(String filePath){
        try {
            return fs.create(new Path(filePath),true);
        } catch (IOException e) {
            throw new RuntimeException(e);
        } finally {
        }
    }

    /**
     * 释放资源，将释放对hdfs的连接，并且销毁HdfsClientUtil实例。
     * 为了线程安全，由hook执行
     */
    private void release(){
        if (fs!=null){
            try {
                fs.close();
                instance = null;
                logger.info("HdfsClientUtil release success.");
            } catch (IOException e) {
                logger.error("",e);
            }
        }
    }
}
