package dacp.etl.kafka.hdfs.connect.utils;

import java.util.Map;

import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
import org.apache.kafka.common.config.ConfigDef.Type;
import org.apache.kafka.common.config.ConfigDef.Width;

import com.google.common.collect.Lists;

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig;

public class CommonSinkConnectorConfig extends HdfsSinkConnectorConfig {
 
	// yw group
	public static final String NAMES_CLASS_CONFIG = "names.class";
	private static final String NAMES_CLASS_DOC = " name of HDFS.AVL file";
	public static final String NAMES_CLASS_DEFAULT = "dacp.etl.kafka.hdfs.connect.names.DefaultNames";
	private static final String NAMES_CLASS_DISPLAY = "Names Class";

	public static final String HDFS_FILENAME_FORMAT = "hdfs.name.fmt";
	private static final String HDFS_FILENAME_FORMAT_DOC = "hdfs file names fmt";
	public static final String HDFS_FILENAME_FORMAT_DEFAULT = "H110001{hour_id}{partition%02d}{random%09d}";
	private static final String HDFS_FILENAME_FORMAT_DISPLAY = "HDFS File Name Format";

	// process group
	private static final String PROCESS_GROUP = "Process";
	public static final String PS_SPLILT_COLS = "split.cols";
	private static final String PS_SPLILT_COLS_DISPLAY = "The index of split cols"; 

	public static final String PS_TODATETIME_COLS = "to.datetime.cols";
	private static final String PS_TODATETIME_COLS_DISPLAY = "The index of to datetime cols"; 
	
	public static final String PS_TODATETIME_FMT = "to.datetime.fmt";
	private static final String PS_TODATETIME_FMT_DISPLAY = "The format of datatime process"; 
	private static final String PS_TODATETIME_FMT_DEFAULT = "yyyyMMddHHmmss";

	public static final String PS_DELETE_CHAR_COLS = "del.char.cols";
	private static final String PS_DELETE_CHAR_COLS_DISPLAY = "The index of to delelte char cols"; 
	
	public static final String PS_DELETE_CHAR = "del.char";
	private static final String PS_DELETE_CHAR_DISPLAY = "The char to delete"; 
	private static final String PS_DELETE_CHAR_DEFAULT = "f";
	
	public static final String PS_RECORD_VAL_SPLIT_CHAR = "record.split.char";
	private static final String PS_RECORD_VAL_SPLIT_CHAR_DISPLAY = "The char of record to spilit"; 
	private static final String PS_RECORD_VAL_SPLIT_CHAR_DEFAULT = "|";
	
	public static final String PS_RECORD_VAL_JOIN_CHAR = "record.join.char";
	private static final String PS_RECORD_VAL_JOIN_CHAR_DISPLAY = "The char of record to join"; 
	private static final String PS_RECORD_VAL_JOIN_CHAR_DEFAULT = "|";
	
	
	//File concurrency	
	private static final String CONCURRENCY_GROUP = "Concurrency";
	public static final String TMP_FILE_WRITE_CONCURRENCY_MODE = "tmpfile.write.concurrency.mode";
	private static final String TMP_FILE_WRITE_CONCURRENCY_MODE_DISPLAY = "tmpfile.write.concurrency.mode, choose partition or task"; 
	private static final String TMP_FILE_WRITE_CONCURRENCY_MODE_DEFAULT = "partition";

	
	private static final String ONLYID_GROUP = "OnlyId";
	public static final String INST_ID = "inst.id";
	private static final String INST_ID_DISPLAY = "inst.id"; 
	private static final String INST_ID_DEFAULT = "";
 
	
	public static final String TASK_ID = "task.id";
	private static final String TASK_ID_DISPLAY = "task.id";
 
	
	static {
		ConfigDef config = getConfig();
		config.define(NAMES_CLASS_CONFIG, Type.STRING, NAMES_CLASS_DEFAULT, Importance.MEDIUM, NAMES_CLASS_DOC,
				CONNECTOR_GROUP, 12, Width.MEDIUM, NAMES_CLASS_DISPLAY)
		.define(HDFS_FILENAME_FORMAT, Type.STRING,
						HDFS_FILENAME_FORMAT_DEFAULT, Importance.MEDIUM, HDFS_FILENAME_FORMAT_DOC, CONNECTOR_GROUP, 13,
						Width.MEDIUM, HDFS_FILENAME_FORMAT_DISPLAY);
		
		int orderInGroup = 1;
		config.define(PS_SPLILT_COLS, Type.LIST, Lists.newArrayList(), Importance.HIGH, PS_SPLILT_COLS_DISPLAY, PROCESS_GROUP, orderInGroup++, Width.MEDIUM, PS_SPLILT_COLS_DISPLAY)
		.define(PS_RECORD_VAL_SPLIT_CHAR, Type.STRING, PS_RECORD_VAL_SPLIT_CHAR_DEFAULT, Importance.HIGH, PS_RECORD_VAL_SPLIT_CHAR_DISPLAY, PROCESS_GROUP, orderInGroup++, Width.MEDIUM, PS_RECORD_VAL_SPLIT_CHAR_DISPLAY)
		.define(PS_RECORD_VAL_JOIN_CHAR, Type.STRING, PS_RECORD_VAL_JOIN_CHAR_DEFAULT, Importance.HIGH, PS_RECORD_VAL_JOIN_CHAR_DISPLAY, PROCESS_GROUP, orderInGroup++, Width.MEDIUM, PS_RECORD_VAL_JOIN_CHAR_DISPLAY);

		config.define(PS_TODATETIME_COLS, Type.LIST, Lists.newArrayList(), Importance.MEDIUM, PS_TODATETIME_COLS_DISPLAY, PROCESS_GROUP, orderInGroup++, Width.MEDIUM, PS_TODATETIME_COLS_DISPLAY)
		.define(PS_TODATETIME_FMT, Type.STRING, PS_TODATETIME_FMT_DEFAULT, Importance.MEDIUM, PS_TODATETIME_FMT_DISPLAY, PROCESS_GROUP, orderInGroup++, Width.MEDIUM, PS_TODATETIME_FMT_DISPLAY)
		.define(PS_DELETE_CHAR_COLS, Type.LIST, Lists.newArrayList(), Importance.MEDIUM, PS_DELETE_CHAR_COLS_DISPLAY, PROCESS_GROUP, orderInGroup++, Width.MEDIUM, PS_DELETE_CHAR_COLS_DISPLAY)
		.define(PS_DELETE_CHAR, Type.STRING, PS_DELETE_CHAR_DEFAULT, Importance.MEDIUM, PS_DELETE_CHAR_DISPLAY, PROCESS_GROUP, orderInGroup++, Width.MEDIUM, PS_DELETE_CHAR_DISPLAY);

		orderInGroup = 1;
		config.define(TMP_FILE_WRITE_CONCURRENCY_MODE, Type.STRING, TMP_FILE_WRITE_CONCURRENCY_MODE_DEFAULT, Importance.MEDIUM, TMP_FILE_WRITE_CONCURRENCY_MODE_DISPLAY, CONCURRENCY_GROUP, orderInGroup++, Width.MEDIUM, TMP_FILE_WRITE_CONCURRENCY_MODE_DISPLAY)

		;
		
		orderInGroup = 1;
		config.define(INST_ID, Type.STRING, INST_ID_DEFAULT, Importance.HIGH, INST_ID_DISPLAY, ONLYID_GROUP, orderInGroup++, Width.MEDIUM, INST_ID_DISPLAY)
		.define(TASK_ID, Type.INT, Importance.HIGH, TASK_ID_DISPLAY, ONLYID_GROUP, orderInGroup++, Width.MEDIUM, TASK_ID_DISPLAY)

		;
	} 
	
	public CommonSinkConnectorConfig(Map<String, String> props) {
		super(props);
	}
}
