package com.sjgs.gis.fs.store;

import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.gridfs.GridFSBucket;
import com.mongodb.client.gridfs.GridFSBuckets;
import com.mongodb.client.gridfs.GridFSDownloadStream;
import com.mongodb.client.gridfs.GridFSUploadStream;
import com.mongodb.client.gridfs.model.GridFSFile;
import com.mongodb.client.gridfs.model.GridFSUploadOptions;
import com.sjgs.common.Constants;
import com.sjgs.common.utils.DirUtils;
import com.sjgs.common.utils.FileNameUtils;
import com.sjgs.gis.common.FileSystemFactory;
import com.sjgs.gis.domain.Chunk;
import com.sjgs.gis.domain.DataFile;
import com.sjgs.gis.domain.enumeration.ChunkStatus;
import com.sjgs.gis.domain.enumeration.DataFileType;
import com.sjgs.gis.fs.ChunkStore;
import com.sjgs.gis.fs.FSStore;
import com.sjgs.gis.service.ChunkService;
import com.sjgs.gis.errors.GridFSIOException;
import com.sjgs.gis.errors.InvalidChunkException;
import com.sjgs.gis.errors.LocalFileIOException;
import com.sjgs.gis.utils.RegionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.bson.Document;
import org.bson.types.ObjectId;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.ResourceRegion;
//import org.springframework.data.mongodb.core.SimpleMongoClientDatabaseFactory;
import org.springframework.data.mongodb.core.SimpleMongoClientDbFactory;
import org.springframework.data.mongodb.core.convert.MappingMongoConverter;
import org.springframework.data.mongodb.core.query.Criteria;
import org.springframework.data.mongodb.core.query.Query;
import org.springframework.data.mongodb.gridfs.GridFsOperations;
import org.springframework.data.mongodb.gridfs.GridFsResource;
import org.springframework.data.mongodb.gridfs.GridFsTemplate;
import org.springframework.http.HttpHeaders;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Mono;

import java.io.*;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.*;

import static com.sjgs.common.Constants.CHUNK_SIZE_LOW;

/**
 * @author jxw
 * @apiNote 4M~1G文件后端存储服务
 */
@Service
@ConditionalOnBean(MongoClient.class)
@ConditionalOnProperty(value = "filesystem.gridfs.enabled", havingValue = "true")
public class GridFSStore implements FSStore,ChunkStore {

    private static final Log logger = LogFactory.getLog(GridFSStore.class);

    @Value("${filesystem.rootDir:/data}")
    private String rootDir;
    @Value("${filesystem.checkRetry:2}")
    private int checkRetry;
    @Value("${filesystem.writeRetry:5}")
    private int writeRetry;
    //TODO 自定义database,bucket
    @Value("${spring.data.mongodb.grid-fs-database:fs}")
    private String DEFAULT_DATABASE;
    @Value("${filesystem.gridfs.bucket:gis}")
    private String DEFAULT_BUCKET;

    @Autowired
    ChunkService chkservice;

    @Autowired
    GridFsOperations mongofsGridFsOperations;

    @Autowired
    GridFSBucket mongofsGridFSBucket;

    private final SimpleMongoClientDbFactory mongofsSimpleMongoClientDbFactory;
    private final MappingMongoConverter mongofsMappingMongoConverter;
    private final MongoClient mongofsMongoClient;

    public GridFSStore(SimpleMongoClientDbFactory mongofsSimpleMongoClientDbFactory, MappingMongoConverter mongofsMappingMongoConverter, MongoClient mongofsMongoClient) {
        this.mongofsSimpleMongoClientDbFactory = mongofsSimpleMongoClientDbFactory;
        this.mongofsMappingMongoConverter = mongofsMappingMongoConverter;
        this.mongofsMongoClient = mongofsMongoClient;
        this.registryFS();
    }

    public GridFsOperations getGridFsOperations(String bucketName) {
        if (bucketName == DEFAULT_BUCKET || bucketName == null) {
            return mongofsGridFsOperations;
        } else {
            return new GridFsTemplate(mongofsSimpleMongoClientDbFactory, mongofsMappingMongoConverter, bucketName);
        }
    }

    public MongoDatabase getMongoDatabase(String databaseName) {
        if (databaseName == DEFAULT_DATABASE || databaseName == null) {
            return mongofsMongoClient.getDatabase(DEFAULT_DATABASE);
        } else {
            return mongofsMongoClient.getDatabase(databaseName);
        }
    }

    public GridFSBucket getBucket(String databaseName, String bucketName) {
        MongoDatabase database = getMongoDatabase(databaseName);
        if (bucketName == DEFAULT_BUCKET || bucketName == null) {
            return mongofsGridFSBucket;
        } else {
            return GridFSBuckets.create(database, bucketName);
        }
    }

    @Override
    public void registryFS() {
        logger.info("registry gridfs to store providers");
        FileSystemFactory.registry(DataFileType.GRIDFS.name(), GridFSStore.class);
    }

    /**
     * @param dir
     * @return
     */
    @Override
    public boolean mkdir(String dir) {
        String path = DirUtils.dir2Path(dir);
        String databaseName = DEFAULT_DATABASE;
        String bucketName = DEFAULT_BUCKET;
        try {
            if (path.split("\\.").length > 2) {
                databaseName = path.substring(0, path.indexOf("."));
                bucketName = path.substring(path.indexOf(".") + 1, path.length());
            }
            GridFSBuckets.create(getMongoDatabase(databaseName), bucketName);
            return true;
        } catch (Exception e) {
            ;
        }
        return false;
    }

    @Override
    public boolean rmdir(String dir) {
        String path = DirUtils.dir2Path(dir);
        String databaseName = DEFAULT_DATABASE;
        String bucketName = DEFAULT_BUCKET;
        try {
            if (path.split("\\.").length > 2) {
                databaseName = path.substring(0, path.indexOf("."));
                bucketName = path.substring(path.indexOf(".") + 1, path.length());
            }
            GridFSBucket gridFSBucket = GridFSBuckets.create(getMongoDatabase(databaseName), bucketName);
            gridFSBucket.drop();
            return true;
        } catch (Exception e) {
            ;
        }
        return false;
    }

    @Override
    public boolean createFile(String path, String content) {
        return false;
    }

    @Override
    public boolean remove(String path) {
        return false;
    }

    @Override
    public boolean rename(String oldName, String newName) {
        return false;
    }

    @Override
    public boolean pushFile(String sourcePath, String targetPath) {
        return false;
    }

    @Override
    public List<String> getFiles(String dir) {
        return null;
    }

    @Override
    public boolean copyFile(String sourcePath, String targetPath) {
        return false;
    }

    @Override
    public Mono<String> mergeChunks(DataFile datafile, Long chunkcount) {
        String chunkDir = DirUtils.getFileDir(rootDir, datafile.getDir(), datafile.getFkey());
        if (Files.notExists(Paths.get(chunkDir))) {
            throw new InvalidChunkException();
        }
        try {
            int chunk_size = (int) Constants.CHUNK_SIZE_LOW;
            //1G以上片4M分片
            if (datafile.getFsize() > Constants.CHUNK_SIZE_LOW * 1024) {
                chunk_size = (int) Constants.CHUNK_SIZE_HIGH;
            }
            GridFSUploadOptions options = new GridFSUploadOptions()
                    .chunkSizeBytes(chunk_size)
                    .metadata(new Document("fkey", datafile.getFkey())
                            .append("filename", FileNameUtils.getFileName(datafile.getFkey()))
                            .append("hash", FileNameUtils.getFileHash(datafile.getFkey())));
            //TODO 自定义database,bucket
            GridFSUploadStream out = getBucket(DEFAULT_DATABASE, DEFAULT_BUCKET).openUploadStream(datafile.getFkey(), options);
            return chkservice.findAllByFkeyAndStatus(datafile.getFkey(), ChunkStatus.FINISHED.ordinal())
                    .sort(Comparator.comparing(Chunk::getIndex))
                    .collectList()
                    .flatMap(chunks -> {
                        if (chunkcount != chunks.size()) {
                            return Mono.just("");
                        }
                        long offset = 0L;
                        try {
                            for (Chunk chunk : chunks) {
                                String chunkPath = chunkDir + "/" + chunk.getIndex();
                                if (Files.notExists(Paths.get(chunkPath))) break;
                                File chunkFile = new File(chunkPath);
                                BufferedInputStream bis = new BufferedInputStream(new FileInputStream(chunkFile));
                                org.apache.commons.io.IOUtils.copy(bis, out, 4096);
                                offset += chunkFile.length();
                            }
                            if (offset == datafile.getFsize()) {
                                return Mono.just(out.getObjectId().toHexString());
                            }
                        } catch (IOException e) {
                            throw new LocalFileIOException("read local chunk error:" + datafile.getFkey());
                        } finally {
                            out.close();
                        }
                        return Mono.just("");
                    });
        } catch (Exception e) {
            throw new GridFSIOException(datafile.getFkey());
        }
    }

    public Mono<Resource> getResourceByFkey(DataFile df) {
        String url = df.getUrl();

        return Mono.<Resource>create(monoSink -> {
            try {
                Long start = System.currentTimeMillis();
                //TODO 自定义database,bucket
                GridFSFile gridFSFile = getGridFsOperations(DEFAULT_BUCKET).findOne(Query.query(Criteria.where("_id").is(url)));
                GridFSDownloadStream in = getBucket(DEFAULT_DATABASE, DEFAULT_BUCKET).openDownloadStream(new ObjectId(url));
                GridFsResource resource = new GridFsResource(gridFSFile, in);
                monoSink.success(resource);
                logger.info("gridfs getResourceByFkey cost time :" + String.valueOf((System.currentTimeMillis() - start)) + "ms");
            } catch (Exception e) {
                monoSink.error(new GridFSIOException(url));
            }
        });
    }

    public Mono<ResourceRegion> getRegion(Mono<Resource> resourceMono, HttpHeaders headers) {
        return resourceMono.map(resource -> {
            return RegionUtils.getResourceRegion(resource, headers);
        });
    }

}
