package cas.ihep.fs.hdfs;

import cas.ihep.fs.UnifiedAbstractFileSystem;
import cas.ihep.fs.UnifiedFile;
import cas.ihep.fs.UnifiedFileSystem;
import com.google.common.io.Closer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.util.DataChecksum;

import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.EnumSet;

public class HdfsFileSystem implements UnifiedAbstractFileSystem {

    FileSystem hdfs;
    DFSClient dfsClient;
    Configuration hdpConf;
    private Closer closer;

    public HdfsFileSystem(UnifiedFileSystem.Configuration uConf)throws IOException{
        hdpConf=uConf.hadoopConf();
        closer=Closer.create();
        defaultChecksumOpt=getChecksumOptFromConf(hdpConf);
        try {
            hdfs=closer.register(FileSystem.get(hdpConf));
            if(hdfs instanceof DistributedFileSystem){
                dfsClient=((DistributedFileSystem)hdfs).getClient();
            }else {
                dfsClient = closer.register(new DFSClient(new URI(hdpConf.get("fs.defaultFS")), hdpConf));
            }
        }catch (URISyntaxException ignored){}
    }

    @Override
    public UnifiedFile open(URI path, UnifiedFileSystem.OpenOptions options) throws IOException {
        return new HdfsFile.HdfsFileIn(this,new Path(path));
    }

    @Override
    public UnifiedFile create(URI path,UnifiedFileSystem.CreateOptions options) throws IOException {
        return new HdfsFile.HdfsFileOut(this,new Path(path),options);
    }

    @Override
    public InputStream openStream(URI path, UnifiedFileSystem.OpenOptions options) throws IOException {
        return hdfs.open(new Path(path));
    }

    @Override
    public OutputStream createStream(URI path, UnifiedFileSystem.CreateOptions options) throws IOException {
        return hdfs.create(new Path(path), new FsPermission(options.permission()),options.overwrite() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
                : EnumSet.of(CreateFlag.CREATE), options.bufferSize(),options.replication(),options.blockSize(), null);
    }

    @Override
    public void persist(URI path) throws IOException {
        throw new IOException("Unsupported operation");
    }

    @Override
    public void persist(UnifiedFile file) throws IOException {
        throw new IOException("Unsupported operation");
    }

    @Override
    public void free(URI path) throws IOException {
        throw new IOException("Unsupported operation");
    }

    @Override
    public void free(UnifiedFile file) throws IOException {
        throw new IOException("Unsupported operation");
    }

    @Override
    public void delete(URI path) throws IOException {
        dfsClient.delete(path.getPath(),true);
    }

    @Override
    public void delete(UnifiedFile file) throws IOException {
        if(file instanceof HdfsFile){
            dfsClient.delete(file.path().getPath(),true);
            return;
        }
        throw new IOException("Unsupported operation");
    }

    @Override
    public void mkdir(URI path, UnifiedFileSystem.CreateOptions options) throws IOException {
        short sh=options.permission();
        byte low= (byte) (sh&7);
        byte mid= (byte) ((sh>>3)&7);
        byte high= (byte) ((sh>>6)&7);
        if((high & 6)!=0){
            high|=1;
        }
        if((mid&6)!=0){
            mid|=1;
        }
        if((low&6)!=0){
            low|=1;
        }
        sh= (short) ((sh&0xfe00)|(high<<6)|(mid<<3)|low);
        hdfs.mkdirs(new Path(path),new FsPermission(sh));
    }

    @Override
    public void close() throws IOException {
        if(closer!=null){
            try {
                closer.close();
            }finally {
                closer=null;
            }
        }
    }

    private DataChecksum.Type getChecksumType(Configuration conf) {
        String checksum = conf.get("dfs.checksum.type", "CRC32C");
        try {
            return DataChecksum.Type.valueOf(checksum);
        } catch (IllegalArgumentException var4) {
            LOG.warn("Bad checksum type: " + checksum + ". Using default " + "CRC32C");
            return DataChecksum.Type.valueOf("CRC32C");
        }
    }

    private static final Log LOG= LogFactory.getLog(HdfsBlock.class);
    private Options.ChecksumOpt getChecksumOptFromConf(Configuration conf) {
        DataChecksum.Type type = getChecksumType(conf);
        int bytesPerChecksum = conf.getInt("dfs.bytes-per-checksum", 512);
        return new Options.ChecksumOpt(type, bytesPerChecksum);
    }

    private Options.ChecksumOpt defaultChecksumOpt;

    DataChecksum createChecksum() {
        Options.ChecksumOpt myOpt = Options.ChecksumOpt.processChecksumOpt(defaultChecksumOpt, null);
        DataChecksum dataChecksum = DataChecksum.newDataChecksum(myOpt.getChecksumType(), myOpt.getBytesPerChecksum());
        if (dataChecksum == null) {
            throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt=null, default=" + defaultChecksumOpt + ", effective=null");
        } else {
            return dataChecksum;
        }
    }

    public FileSystem getHdfs() {
        return hdfs;
    }

    public DFSClient getDfsClient() {
        return dfsClient;
    }
}
