package com.sjgs.gis.fs.store;

import com.sjgs.common.utils.DirUtils;
import com.sjgs.common.utils.FileNameUtils;
import com.sjgs.gis.common.FileSystemFactory;
import com.sjgs.gis.fs.HdfsResource;
import com.sjgs.gis.domain.Chunk;
import com.sjgs.gis.domain.DataFile;
import com.sjgs.gis.domain.enumeration.ChunkStatus;
import com.sjgs.gis.domain.enumeration.DataFileType;
import com.sjgs.gis.fs.ChunkStore;
import com.sjgs.gis.fs.FSStore;
import com.sjgs.gis.service.ChunkService;
import com.sjgs.gis.errors.HDFSIOException;
import com.sjgs.gis.errors.InternalServerErrorException;
import com.sjgs.gis.errors.InvalidChunkException;
import com.sjgs.gis.errors.LocalFileIOException;
import com.sjgs.gis.utils.RegionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.ResourceRegion;
import org.springframework.http.HttpHeaders;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Mono;

import java.io.*;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.*;

/**
 * @author jxw
 * @apiNote 大于1G文件后端存储服务
 */
@Service
@ConditionalOnProperty(value = "filesystem.hdfs.enabled", havingValue = "true")
public class HDFSStore implements FSStore,ChunkStore {

    private static final Log logger = LogFactory.getLog(HDFSStore.class);

    @Value("${filesystem.hdfs.host:hdfs://localhost:9009}")
    private String hdfsHost;
    @Value("${filesystem.rootDir:/data}")
    private String rootDir;
    @Value("${filesystem.checkRetry:2}")
    private int checkRetry;
    @Value("${filesystem.writeRetry:5}")
    private int writeRetry;
    private final String DEFAULT_COMPRESS_CODEC = "org.apache.hadoop.io.compress.DeflateCodec";

    @Autowired
    ChunkService chkservice;

    public HDFSStore() {
        this.registryFS();
    }

    @Override
    public void registryFS() {
        logger.info("registry hdfs to store providers");
        FileSystemFactory.registry(DataFileType.HDFS.name(), HDFSStore.class);
    }

    /**
     * make dir
     *
     * @param dir
     */
    public boolean mkdir(String dir) {
        if (StringUtils.isBlank(dir) || dir == "\\" || dir == "/") {
            return false;
        }
        String url = hdfsHost + dir;
        try {
            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
            conf.set("fs.hdfs.impl.disable.cache", "true");
            FileSystem hdfs = FileSystem.get(URI.create(url), conf);
            hdfs.mkdirs(new Path(url));
            return true;
        } catch (IOException e) {
            logger.error(e.getMessage());
        }
        return false;
    }

    @Override
    public boolean rmdir(String dir) {
        return false;
    }

    /**
     * list files/directories/links names under a directory, not include embed
     * objects
     *
     * @param dir a folder path may like '/tmp/testdir'
     * @return List<String> list of file names
     * @throws IOException file io exception
     */
    @Override
    public List<String> getFiles(String dir) {
        if (StringUtils.isBlank(dir)) {
            return new ArrayList<String>();
        }
        String url = hdfsHost + dir;
        List<String> names = new ArrayList<String>();
        Configuration conf = new Configuration();
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("fs.hdfs.impl.disable.cache", "true");
        FileSystem hdfs = null;
        try {
            hdfs = FileSystem.get(URI.create(url), conf);
            FileStatus[] stats = hdfs.listStatus(new Path(url));
            for (int i = 0; i < stats.length; ++i) {
                if (stats[i].isFile()) {
                    // regular file
                    names.add(stats[i].getPath().toString());
                } else if (stats[i].isDirectory()) {
                    // dir
                    names.add(stats[i].getPath().toString());
                } else if (stats[i].isSymlink()) {
                    // is s symlink in linux
                    names.add(stats[i].getPath().toString());
                }
            }
            return names;
        } catch (IOException e) {
            logger.error(e.getMessage());
        } finally {
            try {
                if (hdfs != null)
                    hdfs.close();
            } catch (IOException e) {
                logger.error(e.getMessage());
            }
        }
        return names;
    }

    @Override
    public boolean pushFile(String sourcePath, String targetPath) {
        if (StringUtils.isBlank(sourcePath) || StringUtils.isBlank(targetPath)) {
            return false;
        }
        String url = hdfsHost + targetPath;
        Configuration conf = new Configuration();
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("fs.hdfs.impl.disable.cache", "true");
        FileSystem hdfs = null;
        try {
            hdfs = FileSystem.get(URI.create(url), conf);
            Path src = new Path(sourcePath);
            Path dst = new Path(url);
            hdfs.copyFromLocalFile(src, dst);
            return true;
        } catch (IOException e) {
            logger.error(e.getMessage());
        } finally {
            try {
                if (hdfs != null)
                    hdfs.close();
            } catch (IOException e) {
                logger.error(e.getMessage());
            }
        }
        return false;
    }

    @Override
    public boolean createFile(String fsFile, String content) {
        if (StringUtils.isBlank(fsFile) || null == content) {
            return false;
        }
        String url = hdfsHost + fsFile;
        Configuration conf = new Configuration();
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("fs.hdfs.impl.disable.cache", "true");
        FileSystem hdfs = null;
        try {
            hdfs = FileSystem.get(URI.create(url), conf);
            FSDataOutputStream os = hdfs.create(new Path(url));
            os.write(content.getBytes("UTF-8"));
            os.close();
            return true;
        } catch (IOException e) {
            logger.error(e.getMessage());
        } finally {
            try {
                if (hdfs != null)
                    hdfs.close();
            } catch (IOException e) {
                logger.error(e.getMessage());
            }
        }
        return false;
    }

    @Override
    public boolean remove(String path) {
        if (StringUtils.isBlank(path)) {
            return false;
        }
        boolean isDeleted = false;
        String url = hdfsHost + path;
        Configuration conf = new Configuration();
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("fs.hdfs.impl.disable.cache", "true");
        FileSystem hdfs = null;
        try {
            hdfs = FileSystem.get(URI.create(url), conf);
            Path filepath = new Path(url);
            isDeleted = hdfs.delete(filepath, true);
        } catch (IOException e) {
            throw new HDFSIOException(e.getMessage());
        } finally {
            try {
                if (hdfs != null)
                    hdfs.close();
            } catch (IOException e) {
                logger.error(e.getMessage());
            }
        }
        return isDeleted;
    }

    @Override
    public boolean rename(String oldName, String newName) {
        return false;
    }

    @Override
    public Mono<String> mergeChunks(DataFile datafile, Long chunkcount) {
        String hdfsfile = DirUtils.getDir(datafile.getDir()) + FileNameUtils.getFileName(datafile.getFkey());
        String chunkDir = DirUtils.getFileDir(rootDir, datafile.getDir(), datafile.getFkey());
        if (Files.notExists(Paths.get(chunkDir))) {
            throw new InvalidChunkException();
        }
        Configuration conf = new Configuration();
        conf.setBoolean("dfs.support.append", true);
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
        conf.setBoolean("dfs.client.block.write.replace-datanode-on-failure.enable", true);
        conf.set("fs.hdfs.impl.disable.cache", "true");
        //CompressionCodecFactory factory = new CompressionCodecFactory(conf);
        //CompressionCodec codec = factory.getCodecByClassName(DEFAULT_COMPRESS_CODEC);
        //HDFS system rpc client
        return chkservice.findAllByFkeyAndStatus(datafile.getFkey(), ChunkStatus.FINISHED.ordinal())
                .sort(Comparator.comparing(Chunk::getIndex))
                .collectList()
                .flatMap(chunks -> {
                    if (chunkcount != chunks.size()) {
                        return Mono.just("");
                    }
                    FileSystem hdfs = null;
                    long offset = 0L;
                    String hdfsurl = hdfsHost + hdfsfile;
                    try {
                        hdfs = FileSystem.get(new URI(hdfsurl), conf, "root");
                        FSDataOutputStream outputStream = hdfs.create(new Path(hdfsurl), true);
                        for (Chunk chunk : chunks) {
                            String chunkPath = chunkDir + "/" + chunk.getIndex();
                            if (Files.notExists(Paths.get(chunkPath)))
                                break;
                            if (offset > 0) {
                                outputStream = hdfs.append(new Path(hdfsurl), 4096);
                            }
                            File chunkFile = new File(chunkPath);
                            FSDataInputStream input = hdfs.open(new Path(chunkPath));
                            IOUtils.copyBytes(input, outputStream, 4096, true);
                            IOUtils.closeStream(input);
                            IOUtils.closeStream(outputStream);
                            offset += chunkFile.length();
                        }
                        if (offset == datafile.getFsize()) {
                            return Mono.just(hdfsurl);
                        }
                    } catch (IOException e) {
                        throw new LocalFileIOException("read local chunk error:" + datafile.getFkey());
                    } catch (InterruptedException e) {
                        throw new HDFSIOException("hdfs error:" + hdfsurl);
                    } catch (URISyntaxException e) {
                        throw new HDFSIOException("hdfs error:" + hdfsurl);
                    } finally {
                        try {
                            if (hdfs != null)
                                hdfs.close();
                        } catch (IOException e) {
                            logger.error(e.getMessage());
                        }
                    }
                    return Mono.just("");
                });
    }

    /**
     * create file
     *
     * @param sourcePath
     * @param sourcePath
     */
    @Override
    public boolean copyFile(String sourcePath, String targetPath){
        if (StringUtils.isBlank(sourcePath) || StringUtils.isBlank(targetPath)) {
            return false;
        }
        boolean result = false;
        String src_url = hdfsHost + sourcePath;
        String dest_url = hdfsHost + targetPath;
        FileSystem hdfs = null;
        FSDataOutputStream out = null;
        Configuration conf = new Configuration();
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("fs.hdfs.impl.disable.cache", "true");
        try {
            //打开一个BufferedInputStream字节输入流
            InputStream in = new BufferedInputStream(new FileInputStream(sourcePath));
            FileSystem.get(URI.create(src_url), conf);
            out = hdfs.create(new Path(dest_url));
            //这里是creat()方法表示新创建一个文件，如果想在一个文件上追加，请用append()方法。
            IOUtils.copyBytes(in, out, 4096, true);
            IOUtils.closeStream(out);
            in.close();
            result = true;
        } catch (IOException e) {
            throw new HDFSIOException(e.getMessage());
        } finally {
            try {
                if (out != null)
                    out.close();
                if (hdfs != null)
                    hdfs.close();
            } catch (IOException e) {
                logger.error(e.getMessage());
            }
        }
        return result;
    }

    public Mono<Resource> getResourceByFkey(DataFile df) {
        String url = df.getUrl();
        Configuration conf = new Configuration();
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("dfs.client.datanode-restart.timeout", "60");
        conf.set("dfs.bytes-per-checksum", "1024");
        conf.set("dfs.client.socket-timeout", "90000");
        conf.set("dfs.client.cached.conn.retry", "1");
        conf.set("fs.hdfs.impl.disable.cache", "true");
        CompressionCodecFactory factory = new CompressionCodecFactory(conf);
        return Mono.<Resource>create(monoSink -> {
            try {
                Long start = System.currentTimeMillis();
                FileSystem hdfs = FileSystem.get(URI.create(url), conf);
                HdfsResource resource = new HdfsResource(url, hdfs, factory);
                monoSink.success(resource);
                logger.info("localfs getResourceByFkey cost time :" + String.valueOf((System.currentTimeMillis() - start)) + "ms");
            } catch (MalformedURLException e) {
                throw new InternalServerErrorException("url is invalid");
            } catch (IOException e) {
                monoSink.error(new HDFSIOException(url));
                throw new HDFSIOException(url);
            }
        });
    }

    public Mono<ResourceRegion> getRegion(Mono<Resource> resourceMono, HttpHeaders headers) {
        return resourceMono.map(resource -> {
            return RegionUtils.getResourceRegion(resource, headers);
        });
    }
}
