/*
 * Copyright 2020-2025 the original author or authors.
 * You cannot use this file unless authorized by the author.
 */
package org.ipig.commons.helper;


import org.apache.commons.lang3.StringUtils;
import org.ipig.commons.conf.mongo.MongoConf;
import org.ipig.constants.SymbolCnst;

import java.util.HashMap;
import java.util.Map;

/**
 * SparkMongoHelper
 * com.mongodb.spark.config.MongoInputConfig
 * com.mongodb.spark.config.MongoOutputConfig
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @version $Id: SparkMongoHelper.java 3 2020-01-21 09:36:33Z ts <comchnts@163.com> $
 * @since 1.0
 */
public final class SparkMongoHelper {
    /**
     * KEY_READ_PREFERENCE_NAME
     */
    public static final String KEY_READ_PREFERENCE_NAME = "readPreference.name";
//    public static final String KEY_READ_PREFERENCE_NAME = ReadConfig.readPreferenceNameProperty();
    /**
     * KEY_WRITE_CONCERN
     */
    public static final String KEY_WRITE_CONCERN = "writeConcern.w";
    /**
     * KEY_MONGODB
     */
    public static final String KEY_MONGODB = "mongodb://";
    /**
     * KEY_COLLECTION
     */
    public static final String KEY_COLLECTION = "collection";
//    public static final String KEY_COLLECTION = ReadConfig.collectionNameProperty();

    /**
     * KEY_DATABASE
     */
    public static final String KEY_DATABASE = "database";
//    public static final String KEY_DATABASE = ReadConfig.databaseNameProperty();
    /**
     * KEY_URI
     */
    public static final String KEY_URI = "uri";
//    public static final String KEY_URI = MongoSharedConfig.mongoURIProperty();
    /**
     * KEY_PARTITIONER
     */
    public static final String KEY_PARTITIONER = "partitioner";
    //	private static final String KEY_PARTITIONER = ReadConfig.partitionerProperty();

    /**
     * mongodb 管道属性
     */
    public static final String PIPELINE = "pipeline";
    /**
     * 连接类型
     */
    public static final String COM_MONGODB_SPARK_SQL = "com.mongodb.spark.sql";

    /**
     * partitionSizeMB
     */
    private static final String SPARK_MONGODB_INPUT_PARTITIONER_OPTIONS_PARTITION_SIZE_MB = "spark.mongodb.input.partitionerOptions.partitionSizeMB";

    /**
     * secondaryPreferred
     */
    public static final String READ_PREFERENCE_SECONDARY_PREFERRED = "secondaryPreferred";

    /**
     * primaryPreferred
     */
    public static final String READ_PREFERENCE_PRIMARY_PREFERRED = "primaryPreferred";


    /**
     * 获取连接器参数
     *
     * @param conf           MongoConf
     * @param collectionName 集合名
     * @return Map<String, String>
     */
    public static Map<String, String> getConnectorParams(MongoConf conf, String collectionName) {
        return getSparkMongoConnectorParams(conf, collectionName);
    }

    /**
     * 获取连接器参数
     *
     * @param conf           MongoConf
     * @param collectionName 集合名
     * @return Map<String, String>
     */
    public static Map<String, String> getSparkMongoConnectorParams(MongoConf conf, String collectionName) {
        Map<String, String> map = new HashMap<String, String>();
        StringBuilder sb = new StringBuilder();
        String uri = sb.append(KEY_MONGODB).append(conf.getUserName()).append(SymbolCnst.COLON).append(conf.getPassword()).append(SymbolCnst.AT)
                .append(conf.getAddress()).append(SymbolCnst.SLASH).append(conf.getDbName()).append(SymbolCnst.DOT).append(collectionName).toString();
        //新版已解决数据加载不全的BUG，不再需要自己定义
        //		map.put(KEY_PARTITIONER, MongoSparkSamplePartitioner.class.getName());
        map.put(SPARK_MONGODB_INPUT_PARTITIONER_OPTIONS_PARTITION_SIZE_MB, "64");
        map.put(KEY_URI, uri);
        map.put(KEY_DATABASE, conf.getDbName());
        map.put(KEY_COLLECTION, collectionName);
        map.put(KEY_READ_PREFERENCE_NAME, READ_PREFERENCE_SECONDARY_PREFERRED);
//        map.put(KEY_WRITE_CONCERN, "majority");
        return map;
    }

    /**
     * 获取连接器参数
     *
     * @param uri
     * @param collectionName
     * @param dbBase
     * @param readPreference
     * @return
     */
    public static Map<String, String> getConnectorParams(String uri, String collectionName, String dbBase,
                                                         String readPreference) {
        Map<String, String> map = new HashMap<String, String>();
        map.put(KEY_COLLECTION, collectionName);
        map.put(KEY_URI, uri);
        map.put(KEY_DATABASE, dbBase);
        map.put(KEY_READ_PREFERENCE_NAME,
                StringUtils.isBlank(readPreference) ? READ_PREFERENCE_PRIMARY_PREFERRED : readPreference);
        return map;
    }
}
