package cas.ihep.fs.hdfs;

import cas.ihep.fs.UnifiedBlock;
import cas.ihep.fs.UnifiedFile;
import cas.ihep.fs.UnifiedFileSystem;
import com.google.common.io.Closer;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.namenode.RetryStartFileException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException;

import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Objects;

abstract class HdfsFile implements UnifiedFile,Closeable{

    HdfsFileSystem hdfsFileSystem;
    private Path hdfsPath;
    HdfsFileStatus fileStatus;
    List<UnifiedBlock> blocks;

    Closer closer;

    HdfsFile(HdfsFileSystem fs,Path path){
        hdfsFileSystem=fs;
        hdfsPath=path;
        blocks=new ArrayList<>();
        closer=Closer.create();
    }

    public void close()throws IOException{
        if(closer!=null){
            try{
                closer.close();
            }finally {
                closer=null;
            }
        }
    }

    public void free(){
    }

    public void persist(){
    }

    public void delete()throws IOException{
        close();
        hdfsFileSystem.hdfs.delete(hdfsPath,true);
    }

    @Override
    public UnifiedBlock block(int index) {
        return blocks.get(index);
    }

    public URI path(){
        return hdfsPath.toUri();
    }

    public long length(){
        return fileStatus.getLen();
    }

    static class HdfsFileIn extends HdfsFile{
        HdfsFileIn(HdfsFileSystem fs, Path path) throws IOException {
            super(fs, path);
            String lPath=path.toUri().getPath();
            super.fileStatus=fs.dfsClient.getFileInfo(lPath);
            LocatedBlocks blocks=fs.dfsClient.getLocatedBlocks(lPath,0, super.fileStatus.getLen());
            for (LocatedBlock blk:blocks.getLocatedBlocks()) {
                super.blocks.add(super.closer.register(new HdfsBlock.HdfsReadableBlock(this, blk)));
            }
        }

        @Override
        public int permission() {
            return PERMISSION_READ;
        }

        @Override
        public UnifiedBlock next() throws IOException {
            throw new IOException("Unsupported operation");
        }
    }

    static class HdfsFileOut extends HdfsFile{
        HdfsFileOut(HdfsFileSystem fs, Path path, UnifiedFileSystem.CreateOptions options) throws IOException {
            super(fs, path);
            super.fileStatus=createFile(path.toUri().getPath(),
                    FsPermission.createImmutable(options.permission()),options.overwrite() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
                    : EnumSet.of(CreateFlag.CREATE), options.createParent(),options.replication(),options.blockSize());
        }
        private static final int CREATE_RETRY_COUNT = 10;
        private static final CryptoProtocolVersion[] SUPPORTED_CRYPTO_VERSIONS =
                CryptoProtocolVersion.supported();
        HdfsFileStatus createFile(String src, FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
                                  short replication, long blockSize) throws IOException {
            HdfsFileStatus stat;
            int retryCount = CREATE_RETRY_COUNT;
            ClientProtocol namenode=hdfsFileSystem.dfsClient.getNamenode();
            while (true) {
                try {
                    stat = namenode.create(src, masked, DFSClientAdapter.clientName(hdfsFileSystem.dfsClient),
                            new EnumSetWritable<CreateFlag>(flag), createParent, replication,
                            blockSize, SUPPORTED_CRYPTO_VERSIONS);
                    break;
                } catch (RemoteException re) {
                    IOException e = re.unwrapRemoteException(
                            AccessControlException.class,
                            DSQuotaExceededException.class,
                            FileAlreadyExistsException.class,
                            FileNotFoundException.class,
                            ParentNotDirectoryException.class,
                            NSQuotaExceededException.class,
                            RetryStartFileException.class,
                            SafeModeException.class,
                            UnresolvedPathException.class,
                            SnapshotAccessControlException.class,
                            UnknownCryptoProtocolVersionException.class);
                    if (e instanceof RetryStartFileException) {
                        if (retryCount > 0) {
                            retryCount--;
                        } else {
                            throw new IOException("Too many retries because of encryption" +
                                    " zone operations", e);
                        }
                    } else {
                        throw e;
                    }
                }
            }
            //Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
            Objects.requireNonNull(stat,"HdfsFileStatus should not be null!");
            DFSClientAdapter.beginFileLease(hdfsFileSystem.dfsClient,stat.getFileId());
            return stat;
        }

        public void close()throws IOException{
            try {
                super.close();
            }finally {
                DFSClientAdapter.endFileLease(hdfsFileSystem.dfsClient, fileStatus.getFileId());
            }
        }

        @Override
        public int permission() {
            return PERMISSION_WRITE;
        }

        @Override
        public UnifiedBlock next() throws IOException {
            UnifiedBlock block= closer.register(new HdfsBlock.HdfsWritableBlock(this));
            blocks.add(block);
            return block;
        }
    }
}
