package com.zoro.barn.commons.mongodb.dao;

import com.mongodb.MongoClientSettings;
import com.mongodb.MongoCredential;
import com.mongodb.ServerAddress;
import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoClients;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.gridfs.GridFSBucket;
import com.mongodb.client.gridfs.GridFSBuckets;
import com.mongodb.client.gridfs.GridFSFindIterable;
import com.mongodb.client.gridfs.model.GridFSFile;
import com.mongodb.client.gridfs.model.GridFSUploadOptions;
import com.zoro.barn.commons.mongodb.property.BarnMongoProperties;
import org.apache.commons.lang3.StringUtils;
import org.bson.Document;
import org.bson.types.ObjectId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;

/**
 * @author zhaoxingwu
 */
public class MongoDao {

    private static final Logger LOGGER = LoggerFactory.getLogger(MongoDao.class);

    private static final String SHA_KEY = "sha";
    private final MongoClient client;
    private final Map<String, MongoDatabase> databaseMap = new ConcurrentHashMap<>();

    public MongoDao(BarnMongoProperties property) {

        List<ServerAddress> addresses = new ArrayList<>();
        if (property.isCluster()) {
            LOGGER.info("cluster mode, hosts:{},ports:{}",property.getHosts(),property.getPorts());
            if (StringUtils.isBlank(property.getHosts())) {
                throw new RuntimeException("mongo config isCluster is true ,but didn't figure hosts ,it's format like this : first.host,second.host");
            }
            String[] hosts = property.getHosts().split(BarnMongoProperties.HOST_SP);
            if (hosts.length == 0) {
                throw new RuntimeException("mongo config isCluster is true ,but didn't figure hosts ,it's format like this : first.host,second.host");
            }
            int[] ports = new int[hosts.length];
            if (StringUtils.isNotBlank(property.getPorts())) {
                String[] portsStr = property.getPorts().split(BarnMongoProperties.HOST_SP);

                if (portsStr.length == 0) {
                    Arrays.fill(ports, BarnMongoProperties.DEFAULT_PORT);
                } else {

                    if (portsStr.length != hosts.length) {
                        throw new RuntimeException("mongo config hosts was figured " + hosts.length + " hosts, but no same amount of ports was figured");
                    }
                    for (int i = 0; i < portsStr.length; i++) {
                        ports[i] = Integer.parseInt(portsStr[i]);
                    }
                }
            }else {
                Arrays.fill(ports, BarnMongoProperties.DEFAULT_PORT);
            }
            for (int i = 0; i < hosts.length; i++) {
                addresses.add(new ServerAddress(hosts[i], ports[i]));
            }

        } else {
            LOGGER.info("single mode, host:{},port:{}",property.getHost(),property.getPort());
            addresses.add(new ServerAddress(property.getHost(), property.getPort()));
        }

        MongoClientSettings.Builder b = MongoClientSettings.builder()
                .applyToClusterSettings(builder ->
                        builder.serverSelectionTimeout(property.getSelectionTimeout(), TimeUnit.MILLISECONDS)
                                .hosts(addresses))
                // 集群情况下也不需要改动这里，哪怕这里的host指向的是 secondary 节点，在写数据时也不会有问题，应该是驱动包处理了
                .applyToSocketSettings(builder ->
                        builder.readTimeout(property.getReadTimeout(), TimeUnit.MILLISECONDS)
                                .connectTimeout(property.getConnectionTimeout(), TimeUnit.MILLISECONDS))
                .applyToConnectionPoolSettings(builder -> {
                            int minSize = property.getMinSize();
                            if (minSize <= 0) {
                                minSize = 5;
                            }
                            int max = property.getMaxSize();
                            if (max < minSize) {
                                max = minSize;
                            }

                            builder.maxWaitTime(property.getMaxWait(), TimeUnit.MILLISECONDS)
                                    .maxSize(max).minSize(minSize);
                        }
                );
        if (property.getUserName() != null) {

            b.credential(MongoCredential.createCredential(property.getUserName(), property.getAuthDb(), property.getPassword() == null ? "".toCharArray() :
                    property.getPassword().toCharArray()));
        }
        client = MongoClients.create(b.build());
    }

    /**
     * 获取Database 对象，先不考虑并发的问题，因为就算是并发了，顶多是多创建一个Database对象
     *
     * @param dbName database name
     * @return database
     */
    private MongoDatabase getDatabase(String dbName) {
        if (StringUtils.isBlank(dbName)) {
            throw new NullPointerException("database name can't be null");
        }
        MongoDatabase db = this.databaseMap.get(dbName);
        if (db == null) {
            db = this.client.getDatabase(dbName);
            this.databaseMap.put(dbName, db);
        }
        return db;
    }

    private GridFSBucket getGridFSBucket(String dbName) {
        MongoDatabase db = getDatabase(dbName);
        return GridFSBuckets.create(db);
    }

    /**
     * upload
     *
     * @param db       databases name
     * @param is       inputStream
     * @param fileName file name
     * @return file id, stored in mongodb
     */
    public String upload(String db, InputStream is, String fileName, Map<String, Object> meta, String sha) {
        GridFSUploadOptions options = new GridFSUploadOptions();
        if (meta == null) {
            meta = new HashMap<>();
        }
        if (StringUtils.isNotBlank(sha)) {
            meta.put(SHA_KEY, sha);
        }
        Document document = new Document(meta);
        options.metadata(document);
        GridFSBucket gridFSBucket = getGridFSBucket(db);
        ObjectId id = gridFSBucket.uploadFromStream(fileName, is, options);
        return id.toString();

    }

    /**
     * delete a file stored in mongo
     *
     * @param dbName databases name
     * @param id     file id
     */
    public void deleteById(String dbName, String id) {
        GridFSBucket gridFSBucket = getGridFSBucket(dbName);
        gridFSBucket.delete(new ObjectId(id));
    }

    /**
     * download a file
     *
     * @param dbName database name
     * @param id     id
     * @return OutputStream
     */
    public ByteArrayOutputStream download(String dbName, String id) {
        GridFSBucket gridFSBucket = getGridFSBucket(dbName);
        ByteArrayOutputStream os = new ByteArrayOutputStream();
        gridFSBucket.downloadToStream(new ObjectId(id), os);
        return os;
    }

    /**
     * this query will find data by gaven param and sha string,but a string 'metadata.' will be added to the front of  param's key
     * return the first result's id
     *
     * @param dbName database name
     * @param sha    sha
     * @param meta   other param
     * @return first result's id
     */
    public String findSameFile(String dbName, String sha, Map<String, Object> meta) {

        Map<String, Object> parameter = new HashMap<>();
        if (meta != null && meta.size() > 0) {
            Set<Map.Entry<String, Object>> paramEntrySet = meta.entrySet();
            for (Map.Entry<String, Object> entry : paramEntrySet) {
                parameter.put("metadata." + entry.getKey(), entry.getValue());
            }
        }

        parameter.put("metadata." + SHA_KEY, sha);

        GridFSBucket gridFSBucket = getGridFSBucket(dbName);
        Document document = new Document(parameter);
        GridFSFindIterable it = gridFSBucket.find(document);
        GridFSFile file = it.first();
        return file == null ? null : file.getObjectId().toString();
    }
}
