package cas.ihep.fs.hdfs;

import cas.ihep.fs.UnifiedBlock;
import cas.ihep.fs.UnifiedFile;
import cas.ihep.fs.impl.UnifiedFileSystemImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Time;

import java.io.Closeable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;

abstract class HdfsBlock implements UnifiedBlock,Closeable{
    HdfsFile parent;
    LocatedBlock block;
    ClientDatanodeProtocol datanode;
    String blockPath;
    HdfsBlock(HdfsFile file) {
        parent=file;
        blockPath=null;
    }
    void init(LocatedBlock blk)throws IOException{
        block=blk;
        datanode=getLocalDatanodeProxy(parent.hdfsFileSystem.dfsClient,parent.hdfsFileSystem.hdpConf,block);
    }

    public void close()throws IOException{
        if(datanode!=null){
            try{
                RPC.stopProxy(datanode);
            }finally {
                datanode=null;
            }
        }
    }

    public void cancel(){}

    public long id(){
        return block.getBlock().getBlockId();
    }

    public long size(){
        return block.getBlockSize();
    }

    static class HdfsReadableBlock extends HdfsBlock{

        HdfsReadableBlock(HdfsFile file,LocatedBlock block) throws IOException {
            super(file);
            init(block);
        }

        @Override
        public String absolutePath() throws IOException {
            if(blockPath==null && datanode!=null) {
                BlockLocalPathInfo info=datanode.getBlockLocalPathInfo(block.getBlock(),block.getBlockToken());
                return blockPath=info.getBlockPath();
            }
            return blockPath;
        }

        @Override
        public int permission() {
            return UnifiedFile.PERMISSION_READ;
        }
    }

    static class HdfsWritableBlock extends HdfsBlock{
        HdfsWritableBlock(HdfsFile file) throws IOException{
            super(file);
            if(!canWrite){
                throw new IOException("Unsupported operation");
            }
            LocatedBlock blk=addBlock(file.path().getPath(),file.fileStatus.getFileId());
            init(blk);
        }

        private LocatedBlock addBlock(String src,long fileId)  throws IOException {
            DFSClient dfsClient=parent.hdfsFileSystem.dfsClient;
            DFSClient.Conf dConf=dfsClient.getConf();
            try {
                int retries = DFSClientConfAdapter.nBlockWriteLocateFollowingRetry(dConf);
                ClientProtocol namenode= dfsClient.getNamenode();
                long sleeptime = 400;
                long localstart = Time.now();
                while (true) {
                    try {
                        return (LocatedBlock) addBlock4Hep.invoke(namenode,src, DFSClientAdapter.clientName(dfsClient), fileId);
                    }catch (InvocationTargetException ite){
                        if(ite.getTargetException() instanceof RemoteException){
                            RemoteException e=(RemoteException)ite.getTargetException();
                            IOException ue =
                                    e.unwrapRemoteException(FileNotFoundException.class,
                                            AccessControlException.class,
                                            NSQuotaExceededException.class,
                                            DSQuotaExceededException.class,
                                            UnresolvedPathException.class);
                            if (ue != e) {
                                throw ue; // no need to retry these exceptions
                            }

                            if (NotReplicatedYetException.class.getName().
                                    equals(e.getClassName())) {
                                if (retries == 0) {
                                    throw e;
                                } else {
                                    --retries;
                                    DFSClient.LOG.info("Exception while adding a block", e);
                                    if (Time.now() - localstart > 5000) {
                                        DFSClient.LOG.info("Waiting for replication for "
                                                + (Time.now() - localstart) / 1000
                                                + " seconds");
                                    }
                                    try {
                                        DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
                                                + " retries left " + retries);
                                        Thread.sleep(sleeptime);
                                        sleeptime <<=1;
                                    } catch (InterruptedException ie) {
                                        DFSClient.LOG.warn("Caught exception ", ie);
                                    }
                                }
                            } else {
                                throw e;
                            }
                        }
                    }
                }
            }catch (IllegalAccessException ignored){
                throw new IOException(ignored);
            }
        }
        @Override
        public String absolutePath() throws IOException {
            if(blockPath==null && datanode!=null){
                try {
                    Object rets[]=(Object[])getStorageTypes.invoke(block);
                    StringBuilder dir=new StringBuilder((String)createTmp4Hep.invoke(datanode,rets[0],block.getBlock()));
                    if(dir.charAt(dir.length()-1)!='/'){
                        dir.append('/');
                    }
                    return blockPath=dir.append(block.getBlock().getBlockName()).toString();
                } catch (IllegalAccessException e) {
                    throw new IOException(e);
                }catch (InvocationTargetException e){
                    Throwable ite=e.getTargetException();
                    if(ite instanceof IOException){
                        throw (IOException)ite;
                    }else{
                        throw new IOException(ite);
                    }
                }
            }
            return blockPath;
        }

        void calculatefileCheckSums(ExtendedBlock extendedBlock) throws IOException {
            File srcFile = new File(blockPath);
            File metaFile = new File(blockPath + "_" + extendedBlock.getGenerationStamp() + ".meta");
            HdfsFileCheckSum sum = new HdfsFileCheckSum(super.parent.hdfsFileSystem.createChecksum());
            sum.calculateChunkedSums(srcFile, metaFile);
            extendedBlock.setNumBytes(srcFile.length());
        }

        private void completeBlock(ExtendedBlock last, String src, long fileId) throws IOException {
            long localstart = Time.now();
            long localTimeout = 400;
            boolean fileComplete;
            DFSClient dfsClient=parent.hdfsFileSystem.dfsClient;
            DFSClient.Conf dConf=dfsClient.getConf();
            int hdfsTimeout=DFSClientConfAdapter.hdfsTimeout(dConf);
            int retries = DFSClientConfAdapter.nBlockWriteLocateFollowingRetry(dConf);
            ClientProtocol namenode=dfsClient.getNamenode();
            String cName=DFSClientAdapter.clientName(dfsClient);
            boolean cRunning=DFSClientAdapter.clientRunning(dfsClient);
            do{
                fileComplete =
                        namenode.complete(src, cName, last, fileId);
                if (!fileComplete) {
                    //final int hdfsTimeout = dfsClient.getHdfsTimeout();
                    if (!cRunning ||
                            (hdfsTimeout > 0 && localstart + hdfsTimeout < Time.now())) {
                        String msg = "Unable to close file because dfsclient " +
                                " was unable to contact the HDFS servers." +
                                " clientRunning " + cRunning +
                                " hdfsTimeout " + hdfsTimeout;
                        DFSClient.LOG.info(msg);
                        throw new IOException(msg);
                    }
                    try {
                        if (retries == 0) {
                            throw new IOException("Unable to close file because the last block"
                                    + " does not have enough number of replicas.");
                        }
                        retries--;
                        Thread.sleep(localTimeout);
                        localTimeout <<=1;
                        if (Time.now() - localstart > 5000) {
                            DFSClient.LOG.info("Could not complete " + src + " retrying...");
                        }
                    } catch (InterruptedException ie) {
                        DFSClient.LOG.warn("Caught exception ", ie);
                    }
                }
            }while (!fileComplete);
        }

        public void close()throws IOException{
            ExtendedBlock extendedBlock=block.getBlock();
            calculatefileCheckSums(extendedBlock);
            try {
                finish.invoke(datanode,extendedBlock);
            } catch (IllegalAccessException e) {
                throw new IOException(e);
            }catch (InvocationTargetException e){
                Throwable target=e.getTargetException();
                if(target instanceof IOException){
                    throw (IOException)target;
                }
                throw new IOException(target);
            }
            completeBlock(extendedBlock,parent.path().getPath(),parent.fileStatus.getFileId());
        }

        @Override
        public int permission() {
            return UnifiedFile.PERMISSION_WRITE;
        }

        private static final boolean canWrite;
        private static final Method createTmp4Hep,finish,addBlock4Hep,getStorageTypes;
        static{
            Class<ClientDatanodeProtocol> cdpKlass=ClientDatanodeProtocol.class;
            Class<ClientProtocol> cpKlass=ClientProtocol.class;
            boolean writable=false;
            Method mCreateTmp4Hep=null,mFinish=null,mAddBlock4Hep=null,mGetStorageTypes=null;
            try {
                Class<?> klass=Class.forName("org.apache.hadoop.hdfs.StorageType");
                mCreateTmp4Hep=cdpKlass.getMethod("createTmp4Hep",klass, ExtendedBlock.class);
                mAddBlock4Hep=cpKlass.getMethod("addBlock4Hep",String.class,String.class,long.class);
                mFinish=cdpKlass.getMethod("finish",ExtendedBlock.class);
                mGetStorageTypes=LocatedBlock.class.getMethod("getStorageTypes");
                writable=true;
            } catch (NoSuchMethodException|ClassNotFoundException ignored) {
            }
            canWrite=writable;
            createTmp4Hep=mCreateTmp4Hep;
            finish=mFinish;
            addBlock4Hep=mAddBlock4Hep;
            getStorageTypes=mGetStorageTypes;
        }
    }

    private static ClientDatanodeProtocol getLocalDatanodeProxy(DFSClient dfsClient, Configuration conf, LocatedBlock blk)throws IOException{
        DatanodeInfo localNode = null;
        for(DatanodeInfo datanode : blk.getLocations()) {
            if(UnifiedFileSystemImpl.localhostIP.equals(datanode.getIpAddr())) {
                localNode = datanode;
                break;
            }
        }
        if(localNode==null){
            return null;
        }
        DFSClient.Conf dConf=dfsClient.getConf();
        return DFSUtil.createClientDatanodeProtocolProxy(localNode, conf,
                DFSClientConfAdapter.socketTimeout(dConf),DFSClientConfAdapter.connectToDnViaHostname(dConf),blk);
    }
}
