package cn.getech.data.development.utils;

import cn.getech.data.development.config.properties.BdpJobConfig;
import cn.getech.data.development.config.properties.BdpModelConfig;
import cn.getech.data.development.constant.DataDevelopmentBizExceptionEnum;
import cn.getech.data.development.dto.FileModelDto;
import cn.getech.data.intelligence.common.exception.RRException;
import org.apache.commons.lang.StringUtils;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.servlet.http.HttpServletResponse;
import java.io.*;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.UUID;

/**
 * hdfs工具类
 */
public class HdfsUtil {
    private static Logger logger = LoggerFactory.getLogger(HdfsUtil.class);
    public DistributedFileSystem dfs;
    public Configuration conf;

    public BdpJobConfig bdpJobConfig;

    public HdfsUtil(BdpJobConfig bdpJobConfig) throws Exception {
        this.bdpJobConfig = bdpJobConfig;
//        Configuration conf=new Configuration(false);
        conf = new Configuration(false);
        String ns = bdpJobConfig.getNamespace();
        String[] nameNodesAddr = bdpJobConfig.getNamenodestr().split(",");
        String[] nameNodes = {"nn1", "nn2"};
        conf.set("fs.defaultFS", "hdfs://" + ns);
        conf.set("dfs.nameservices", ns);
        conf.set("dfs.ha.namenodes." + ns, nameNodes[0] + "," + nameNodes[1]);
        conf.set("dfs.namenode.rpc-address." + ns + "." + nameNodes[0], nameNodesAddr[0]);
        conf.set("dfs.namenode.rpc-address." + ns + "." + nameNodes[1], nameNodesAddr[1]);
        conf.set("dfs.client.failover.proxy.provider." + ns, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
        conf.set("dfs.client.use.datanode.hostname", "true");
        conf.set("dfs.socket.timeout", "6000000");
        //路径权限
        conf.set("fs.permissions.umask-mode","000");
        String hdfsRPCUrl = "hdfs://" + ns + ":" + bdpJobConfig.getNameport();
        dfs = new DistributedFileSystem();
        dfs.initialize(URI.create(hdfsRPCUrl), conf);
    }
    public HdfsUtil(String namespace, String namenodestr,Integer nameport) throws Exception {
//        Configuration conf=new Configuration(false);
        conf = new Configuration(false);
        String ns = namespace;
        String[] nameNodesAddr = namenodestr.split(",");
        String[] nameNodes = {"nn1", "nn2"};
        conf.set("fs.defaultFS", "hdfs://" + ns);
        conf.set("dfs.nameservices", ns);
        conf.set("dfs.ha.namenodes." + ns, nameNodes[0] + "," + nameNodes[1]);
        conf.set("dfs.namenode.rpc-address." + ns + "." + nameNodes[0], nameNodesAddr[0]);
        conf.set("dfs.namenode.rpc-address." + ns + "." + nameNodes[1], nameNodesAddr[1]);
        conf.set("dfs.client.failover.proxy.provider." + ns, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
        conf.set("dfs.client.use.datanode.hostname", "true");
        conf.set("dfs.socket.timeout", "6000000");
        //路径权限
        conf.set("fs.permissions.umask-mode","000");
        String hdfsRPCUrl = "hdfs://" + ns + ":" + nameport;
        dfs = new DistributedFileSystem();
        dfs.initialize(URI.create(hdfsRPCUrl), conf);
    }
    public HdfsUtil(BdpModelConfig bdpModelConfig) throws Exception {
//        Configuration conf=new Configuration(false);
        conf = new Configuration(false);
        String ns = bdpModelConfig.getNamespace();
        String[] nameNodesAddr = bdpModelConfig.getNamenodestr().split(",");
        String[] nameNodes = {"nn1", "nn2"};
        conf.set("fs.defaultFS", "hdfs://" + ns);
        conf.set("dfs.nameservices", ns);
        conf.set("dfs.ha.namenodes." + ns, nameNodes[0] + "," + nameNodes[1]);
        conf.set("dfs.namenode.rpc-address." + ns + "." + nameNodes[0], nameNodesAddr[0]);
        conf.set("dfs.namenode.rpc-address." + ns + "." + nameNodes[1], nameNodesAddr[1]);
        conf.set("dfs.client.failover.proxy.provider." + ns, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
        conf.set("dfs.socket.timeout", "6000000");
        conf.set("dfs.client.use.datanode.hostname", "true");
        //路径权限
        conf.set("fs.permissions.umask-mode","000");
        String hdfsRPCUrl = "hdfs://" + ns + ":" + bdpModelConfig.getNameport();
        dfs = new DistributedFileSystem();
        dfs.initialize(URI.create(hdfsRPCUrl), conf);
    }

    /**
     * 上传方法
     *
     * @param fileContent 内容
     * @param dst         目标路径
     * @return 成功还是失败
     */
    public boolean writeFile(byte[] fileContent, String dst) {
        Path dstPath = new Path(dst);
        try {
            if (dfs.exists(dstPath)) {
                dfs.delete(dstPath, true);
            }
            if (!dfs.exists(dstPath.getParent())) {
                dfs.mkdirs(dstPath.getParent());
            }
            //Init output stream
            FSDataOutputStream outputStream = dfs.create(dstPath);
            //Cassical output stream usage
            outputStream.write(fileContent);
            outputStream.close();
        } catch (IOException ie) {
            logger.error(ie.getMessage());
            return false;
        }
        return true;
    }


    /**
     * 追加文件的内容到新的文件里面
     *
     * @param dst         目标路径
     * @param srcPath     源路径
     * @return 成功还是失败
     */
    public boolean writeAppendFile(String dst,String srcPath) {
        Path olderDstPath = new Path(dst);
        Path olderSrcPath = new Path(srcPath);
        try {
            FSDataInputStream inputStream = dfs.open(olderDstPath);
            FSDataOutputStream outputStream = dfs.append(olderSrcPath);
            IOUtils.copyBytes(inputStream,outputStream,4096,true);
        } catch (IOException ie) {
            logger.error(ie.getMessage());
            return false;
        }
        return true;
    }

    /**
     * 上传方法
     *
     * @param fileContent 内容
     * @param dst         目标路径
     * @param partitions  分区路径
     * @param writeType   追加方式 2:追加  1：覆盖
     * @return 成功还是失败
     */
    public boolean writeFile(byte[] fileContent, String dst, String partitions, String fileName, Integer writeType) {
        String rodownDataDst = dst + "/"+ UUID.randomUUID()+ fileName;
        if (StringUtils.isNotEmpty(partitions)) {
            rodownDataDst = dst + "/" + partitions + UUID.randomUUID()+fileName ;
            dst = dst + "/" + partitions;
        }
        Path dstPath = new Path(dst);
        try {
            if (Objects.equals(2, writeType)) {
                //追加
            } else {
                //覆盖
                if (dfs.exists(dstPath)) {
                    dfs.delete(dstPath, true);
                }
            }
            dstPath = new Path(rodownDataDst);
            if (!dfs.exists(dstPath.getParent())) {
                //创建777的目录，防止其他用户访问hive的数据有权限问题
                Path pPath=dstPath.getParent();
                // 不起作用
                //dfs.mkdirs(pPath,new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
                dfs.mkdirs(pPath);
                dfs.setPermission(pPath,new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
            }
            //Init output stream
            FSDataOutputStream outputStream = dfs.create(dstPath);
            //Cassical output stream usage
            outputStream.write(fileContent);
            outputStream.close();
        } catch (IOException ie) {
            logger.error(ie.getMessage());
            return false;
        }
        return true;
    }

    /**
     * @param hdfsUrl 绝对路径
     * @return
     * @throws IOException
     */
    //TODO 改为返回流格式
    public BufferedInputStream readFile(String hdfsUrl, HttpServletResponse response) {
        FSDataInputStream fis = null;
//        Configuration conf = getConf();
        Path path = new Path(hdfsUrl);
        BufferedInputStream bis = null;
        try {
            FileSystem fs = FileSystem.get(URI.create(hdfsUrl), conf);

            if (fs.exists(path)) {
                fis = fs.open(path);
                bis = new BufferedInputStream(fis);
            } else {
                throw new RRException(DataDevelopmentBizExceptionEnum.MODEL_NOT_FOUND_IN_HDFS.getMessage());
            }
        } catch (IOException | IllegalArgumentException e) {
            logger.error("文件读取错误{}",hdfsUrl,e);
        }
        return bis;
//        finally {
//            try{
//                outputStream.flush();
//                outputStream.close();
//                fis.close();
//            }catch (IOException e){
//                e.printStackTrace ();
//            }
//        }
    }



    /**
     * @param hdfsUrl 绝对路径
     * @return
     * @throws IOException
     */
    public FSDataInputStream readFile(String hdfsUrl) {
        FSDataInputStream fis = null;
//        Configuration conf = getConf();
        Path path = new Path(hdfsUrl);
        try {
            FileSystem fs = FileSystem.get(URI.create(hdfsUrl), conf);
            if (fs.exists(path)) {
                fis = fs.open(path);
            } else {
                throw new RRException(DataDevelopmentBizExceptionEnum.MODEL_NOT_FOUND_IN_HDFS.getMessage());
            }
        } catch (IOException | IllegalArgumentException e) {
            logger.error(e.getMessage());
        }
        return fis;
    }


    /**
     * @param hdfsUrl 绝对路径
     * @return
     * @throws IOException
     */
    public List<FileModelDto> readFile(String hdfsUrl,List<String> fileNames) {
        Path path = new Path(hdfsUrl);
        List<FileModelDto> restList = new ArrayList<>();
        try {
            FileSystem fs = FileSystem.get(URI.create(hdfsUrl), conf);
            if (fs.exists(path)) {
                FileStatus[] fileStatuses = fs.listStatus(path);
                for (FileStatus fileStatus : fileStatuses) {
                    if(fileStatus.isFile()){
                        InputStream in = fs.open(fileStatus.getPath());
                        FileModelDto fileModelDto = new FileModelDto();
                        if(fileNames.contains(fileStatus.getPath().getName())){
                            fileModelDto.setFileName(fileStatus.getPath().getName());
                            fileModelDto.setFileInputstream(in);
                            restList.add(fileModelDto);
                        }
                    }
                }
                return restList;
            } else {
                throw new RRException(DataDevelopmentBizExceptionEnum.MODEL_NOT_FOUND_IN_HDFS.getMessage());
            }
        } catch ( Exception e) {
            logger.error(e.getMessage());
        }
        return restList;
    }


    /**
     * 获取路径下所有文件的路径进而获取table名字
     *
     * @param dir
     * @throws IOException
     */
    public List<String> dbTableNameList(String dir) throws IOException {
//        Configuration conf = getConf();
        FileSystem fs = FileSystem.get(conf);
        FileStatus[] stats = null;
        List<String> tableNameList = new ArrayList<>();
        //判断路径是否存在
        if (fs.exists(new Path(dir))) {
            stats = fs.listStatus(new Path(dir));
            for (int i = 0; i < stats.length; i++) {
                if (stats[i].isDirectory()) {
                    tableNameList.add(stats[i].getPath().getName());
                } else {
                    System.out.println(stats[i].getPath().toString());
                }
            }
            fs.close();
        }
        return tableNameList;
    }


    public boolean delete(String dst) {
        Path dstPath = new Path(dst);
        try {
            if (dfs.exists(dstPath)) {
                dfs.delete(dstPath, true);
            }
        } catch (IOException ie) {
            logger.error(ie.getMessage());
            return false;
        }
        return true;
    }

    public boolean mkdir(String dst) {
        Path dstPath = new Path(dst);
        try {
            if (!dfs.exists(dstPath)) {
                dfs.mkdirs(dstPath, FsPermission.getDefault());
            }
        } catch (IOException ie) {
            logger.error(ie.getMessage());
            return false;
        }
        return true;
    }

    public boolean rename(String source, String target) {
        Path sourcePath = new Path(source);
        Path targetPath = new Path(target);
        try {
            if (dfs.exists(sourcePath)) {
                dfs.rename(sourcePath, targetPath);
            }
        } catch (IOException ie) {
            logger.error(ie.getMessage());
            return false;
        }
        return true;
    }

    public void close() {
        try {
            dfs.close();
        } catch (IOException e) {
            logger.error(e.getMessage());
        }
    }

    public String readFile2(String hdfsUrl) {
        FSDataInputStream fis = null;
        Path path = new Path(hdfsUrl);
        BufferedReader bis = null;
        String str = "";
        try {
            FileSystem fs = FileSystem.get(URI.create(hdfsUrl), conf);

            if (fs.exists(path)) {
                fis = fs.open(path);
                InputStreamReader isr = new InputStreamReader(fis, "utf-8");
                bis = new BufferedReader(isr);
                String line;
                while ((line = bis.readLine()) != null) {
                    str = str + line + "\n";
                }
            } else {
                throw new RRException("获取hdfs上的数据异常！url:"+hdfsUrl);
            }

        } catch (UnsupportedEncodingException ex) {
            ex.printStackTrace();
        } catch (IOException ex) {
            ex.printStackTrace();
        } catch (Exception e) {
            logger.error(e.getMessage());
        }
        return str;
    }
    public String readFileData(String hdfsUrl) {
        FSDataInputStream fis = null;
        Path path = new Path(hdfsUrl);
        BufferedReader bis = null;
        String str = "";
        try {
            FileSystem fs = FileSystem.get(URI.create(hdfsUrl), conf);
            if (fs.exists(path)) {
                fis = fs.open(path);
                InputStreamReader isr = new InputStreamReader(fis, "utf-8");
                bis = new BufferedReader(isr);
                String line;
                while ((line = bis.readLine()) != null) {
                    str = str + line + "\n";
                }
            } else{
                logger.warn("hdfs文件{}不存在",hdfsUrl);
            }
            fs.close();
        } catch (IOException ex) {
            logger.error("获取异常");
        } catch (Exception e) {
            logger.error(e.getMessage());
        }finally {
            try {
                if (bis != null) {
                    bis.close();
                }
                if (fis != null) {
                    fis.close();
                }
            } catch (IOException ignored) { }
        }
        return str;
    }
    public Long getTableOrDbSize(String dir) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        Long tableSize ;
        //判断路径是否存在
        if (fs.exists(new Path(dir))) {
            FileStatus stats = fs.getFileStatus(new Path(dir));
            tableSize = fs.getContentSummary(stats.getPath()).getLength();
            fs.close();
            return tableSize;
        }
        return null;
    }
    public Long getFileNum(String dir) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        Long fileByn ;
        //判断路径是否存在
        if (fs.exists(new Path(dir))) {
            FileStatus stats = fs.getFileStatus(new Path(dir));
            fileByn = fs.getContentSummary(stats.getPath()).getFileCount();
            fs.close();
            return fileByn;
        }
        return null;
    }
//    public static void main(String[] args) throws Exception {
//        System.setProperty("HADOOP_USER_NAME", "hdfs");
//
//        BdpJobConfig bdpJobConfig = new BdpJobConfig();
//        bdpJobConfig.setZkurl("bigdata-test-1,bigdata-test-3,bigdata-test-5:2180");
//        bdpJobConfig.setNamenodestr("bigdata-test-1:8020,bigdata-test-2:8020");
//        bdpJobConfig.setNameport(8020);
//        bdpJobConfig.setNamespace("ns");
//        bdpJobConfig.setJoblib("/bdp/sparkLib");
//
//        HdfsUtil util = new HdfsUtil(bdpJobConfig);
//
//        //util.writeFile("{\"id\":1,\"name\":\"zenithnw\"}".getBytes(),"/bdp/jobconfig/test.json");
//        util.dfs.close();
//    }


}
