package org.jst4me.hadoop.util;

import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.jst4me.common.StringUtil;
import org.jst4me.common.exceptions.ArgumentException;
import org.jst4me.hadoop.util.exceptions.DirectoryExistException;
import org.jst4me.hadoop.util.exceptions.NotInitException;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;

public class HDFSAccessInterfaceImpl implements HDFSAccessInterface {

    private Configuration conf = null;

    private FileSystem hdfs = null;

    public  HDFSAccessInterfaceImpl() throws IOException{
        conf = new Configuration();
        conf.addResource("core-site.xml");
        conf.addResource("hdfs-site.xml");
        conf.addResource("yarn-site.xml");
        conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
        //conf.set("fs.hdfs.impl", DistributedFileSystem.class.getName());
        //conf.set("fs.file.impl", LocalFileSystem.class.getName());
        try {
            hdfs = FileSystem.get(conf);
        } catch (IOException e) {
            throw e;
        }
    }

    @Override
    public void init(String serverPath) throws ArgumentException,IOException {
         if(StringUtil.isNullOrEmpty(serverPath)){
             throw new ArgumentException("serverPath is null or empty!");
         }
         //String[] temp = serverPath.split(":");
         java.util.regex.Pattern pattern = java.util.regex.Pattern.compile("^hdfs://(.*):(\\d*)$");
         java.util.regex.Matcher matcher = pattern.matcher(serverPath);
         if(matcher.matches()){
            String server = matcher.group(1);
            String port = matcher.group(2);
            conf.set("fs.defaultFS","hdfs://" + server + ":" + port);
            conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
            conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
            this.hdfs = FileSystem.get(conf);
         } else {
             throw new ArgumentException("serverPath is not valid");
         }
    }

    @Override
    public InputStream downToInputStream(String hdfsFilePath) throws NotInitException, ArgumentException,IOException{
        if (StringUtil.isNullOrEmpty(hdfsFilePath)) {
            throw new ArgumentException("hdfsFilePath can not be null or empty!");
        }
        checkInit();
        Path path = new Path(hdfsFilePath);
        if(hdfs.exists(path)){
            return hdfs.open(path);
        } else {
            throw new IOException("file not on hadoop file system！");
        }
    }

    @Override
    public void append(String localFilePath, String hdfsFilePath) throws NotInitException, ArgumentException, IOException {
        if (StringUtil.isNullOrEmpty(localFilePath, true)) {
            throw new ArgumentException("localFilePath can not be null or empty!");
        }
        checkInit();
        File localFile = new File(localFilePath);
        if(localFile.exists()){
            Path path = new Path(hdfsFilePath);
            if (this.hdfs.exists(path)) {
               FSDataOutputStream fsDataOutputStream =  hdfs.append(path);
               InputStream inputStream = new FileInputStream(localFile);
                IOUtils.copy(inputStream, fsDataOutputStream);
                inputStream.close();
                fsDataOutputStream.close();
            }else{
                throw new ArgumentException("file is not on hadoop file system!");
            }
        }else{
            throw new ArgumentException("local file is not exists");
        }
    }

    @Override
    public void close() throws NotInitException, IOException {
        checkInit();

        this.hdfs.close();
    }

    @Override
    public void copyFileToHDFS(String localFilePath, String hdfsFilePath,boolean delSrc, boolean override) throws IOException, NotInitException, ArgumentException {
        if (StringUtil.isNullOrEmpty(localFilePath, true) || StringUtil.isNullOrEmpty(hdfsFilePath, true)) {
            throw new ArgumentException("localFilePath or hdfsFilePath can not be null or empty!");
        }
        File localFile = new File(localFilePath);
        if(!localFile.exists()){
            throw new ArgumentException("localFile is not exists！");
        }
        this.checkInit();
        this.hdfs.copyFromLocalFile(delSrc,override,new Path(localFilePath), new Path(hdfsFilePath));
    }

    @Override
    public FileSystem getFileSystem() throws  NotInitException{
        if(this.hdfs == null) {
            throw new NotInitException("hdfs not init");
        }
        return this.hdfs;
    }

    @Override
    public void makeDir(String filePathOnHDFS) throws IOException, NotInitException,ArgumentException, DirectoryExistException {
        if(StringUtil.isNullOrEmpty(filePathOnHDFS,true)){
            throw new ArgumentException("filePathOnHDFS can not be null or empty!");
        }
        checkInit();
        Path path = new Path(filePathOnHDFS);
        if (hdfs.exists(path)) {
            throw new DirectoryExistException();
        }
        hdfs.mkdirs(path);
    }

    private void checkInit() throws  NotInitException{
        if(this.hdfs == null){
            throw new NotInitException("hdfs not init!");
        }
    }
}
