package cn.ac.iie.ulss.indexer.runenvs;

import cn.ac.iie.common.util.config.ConfigureUtil;
import cn.ac.iie.common.util.misc.MiscTools;
import cn.ac.iie.ulss.indexer.metastore.MetastoreClientPool;
import cn.ac.iie.ulss.indexer.worker.IndexControler;
import cn.ac.iie.ulss.indexer.worker.RocketDataPuller;
import cn.ac.iie.ulss.indexer.writer.LuceneConfig;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.I0Itec.zkclient.ZkClient;
import org.apache.avro.Protocol;
import org.apache.avro.Schema;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.log4j.Logger;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;

public class GlobalParas {

    public static Logger log = Logger.getLogger(GlobalParas.class.getName());
    public static ConfigureUtil cfg = null;
    public static Protocol docProtocol = null;
    public static Protocol docsProtocol = null;
    public static Schema utl_docschema = null;
    public static int hlwthreadpoolSize = 120;
    public static int submitTimeout = 1;
    public static int writeMinNumOnce = 10000;
    /*
     * for lucene
     */
    public static final Object mergeControlLock = new Object();
    public static int luceneMaxThreadStates = 8;
    public static int luceneWriterMaxParallelNum = 6; //设置lucene的最大并发数，防止并发过大浪费资源
    public static int commitgab = 200;
    public static AtomicInteger closeDelay = new AtomicInteger(15); //that is 15 seconds
    public static String mergePolicy = "";
    public static int maxParallelMergeFileNum = 3;
    public static AtomicInteger currentMergeFileNum = new AtomicInteger(0);
    public static ConcurrentHashMap<String, String> mergeDevice = new ConcurrentHashMap<String, String>(50, 0.8F);
    public static ConcurrentHashMap<String, LuceneConfig> luceneConfigMap = new ConcurrentHashMap<String, LuceneConfig>(20, 0.8F);
    public static ConcurrentHashMap<String, List<String>> dbName2tableName = new ConcurrentHashMap<String, List<String>>();
    public static String hugesizeHashVals = "0,";
    /*
     * for rocket
     */
    public static List<Long> truncateFileId = new ArrayList<Long>();
    public static String rocketNameServer = "";
    public static String rocketConsumeGroupPrefix = "";
    public static String rocketProducerGroupPrefix = "";
    public static String errorDataTopicSuffix = "";
    public static int rocketMinConsumeThread = 4;
    public static int rocketMaxConsumeThread = 8;
    public static int rocketPullDatabufferSize = 500;
    public static int rocketConsumeMessageBatchMaxSize = 32;
    public static int rocketPullBatchSize = 32;
    public static int consumeConcurrentlyMaxSpan = 65535;
    public static int pullThresholdForQueue = 10;
    public static int rocketConsumesetMaxMessageSizeMB = 8;
    public static int clientCallbackExecutorThreads = 40;
    public static int compressMsgBodyOverHowmuchInKB = 512;
    public static String error_file_mq = "error_file_mq";
    public static String rocketConsumTopics = "";
    public static ConcurrentHashMap<String, String> consumeTopicsMap = new ConcurrentHashMap<String, String>();
    public static HashMap<String, String> topicConfig = new HashMap<String, String>();
    /*
     */
    public static int dispatcherThreadNum = 4;
    /*
     *  for index control
     */
    public static ConcurrentHashMap<Long, Long> id2PreCreateMap = new ConcurrentHashMap<Long, Long>(); //file_id到createIndex的映射
    public static ConcurrentHashMap<Long, IndexControler> id2Createindex = new ConcurrentHashMap<Long, IndexControler>(); //file_id到createIndex的映射
    public static ConcurrentHashMap<Long, Long> removedFileId = new ConcurrentHashMap<Long, Long>(); //存储已经删除了的文件，是有问题的文件
    public static ConcurrentHashMap<Long, Long> diskErrorFileId = new ConcurrentHashMap<Long, Long>(); //存储损坏文件无法写入，比如磁盘无法写入导致的，一旦写入失败就再也不会恢复
    /*
     */
    public static ConcurrentHashMap<Long, Long> metaORdeviceErrorFileId = new ConcurrentHashMap<Long, Long>();//从元数据获取数据信息时出错，比如应该放到节点1对应队列的文件放到了节点2对应的文件，这种情况是会发生的
    public static ConcurrentHashMap<Long, Long> delayErrorFileId = new ConcurrentHashMap<Long, Long>();//文件延迟关闭错误
    /*
     * for metastore,metastore client pool
     */
    public static String metaStoreCientUrl = "";
    public static int metaStoreCientPort = 0;
    public static int clientPoolSize = 6;
    public static int readbufSize = 20;
    public static MetastoreClientPool clientPool = null;
    /*
     *
     */
    public static int maxDelaySeconds = 200;

    /*
     * for lucene  metapool
     */
    public static ConcurrentHashMap<String, ArrayBlockingQueue<ConcurrentHashMap<String, LinkedHashMap<String, Field>>>> tablename2table2idx2LuceneFieldMap = new ConcurrentHashMap<String, ArrayBlockingQueue<ConcurrentHashMap<String, LinkedHashMap<String, Field>>>>();
    public static ConcurrentHashMap<String, ArrayBlockingQueue<ConcurrentHashMap<String, LinkedHashMap<String, List<FieldSchema>>>>> tablename2table2idx2colMap = new ConcurrentHashMap<String, ArrayBlockingQueue<ConcurrentHashMap<String, LinkedHashMap<String, List<FieldSchema>>>>>();
    public static ConcurrentHashMap<String, ArrayBlockingQueue<ConcurrentHashMap<String, LinkedHashMap<String, FieldType>>>> tablename2table2idx2LuceneFieldTypeMap = new ConcurrentHashMap<String, ArrayBlockingQueue<ConcurrentHashMap<String, LinkedHashMap<String, FieldType>>>>();

    /*
     * for metastore
     */
    public static String metaStoreCient = "";
    /*
     * for meta
     */
    public static ConcurrentHashMap<String, List<Index>> tablename2indexs = new ConcurrentHashMap<String, List<Index>>(10, 0.8F);
    public static ConcurrentHashMap<String, LinkedHashMap<String, List<FieldSchema>>> table2idx2colMap = new ConcurrentHashMap<String, LinkedHashMap<String, List<FieldSchema>>>(50, 0.8F);
    public static ConcurrentHashMap<String, Schema> schemaname2Schema = new ConcurrentHashMap<String, Schema>(20, 0.8F);
    public static ConcurrentHashMap<String, Table> tablename2Table = new ConcurrentHashMap<String, Table>(10, 0.8F); //db+table name 到table对象的映射
    public static ConcurrentHashMap<String, LinkedHashMap<String, Field>> table2idx2LuceneFieldMap = new ConcurrentHashMap<String, LinkedHashMap<String, Field>>(50, 0.8F);
    public static ConcurrentHashMap<String, String> tablename2Schemaname = new ConcurrentHashMap<String, String>(10, 0.8F); //schema的名字到name到table对象的映射
    public static ConcurrentHashMap<String, String> tablename2Mqname = new ConcurrentHashMap<String, String>(10, 0.8F); //表名到（原始）消息队列名的映射
    public static ConcurrentHashMap<String, LinkedHashMap<String, FieldType>> table2idx2LuceneFieldTypeMap = new ConcurrentHashMap<String, LinkedHashMap<String, FieldType>>(50, 0.8F);
    /*
     *扩展开关
     */
    public static boolean isTestMode = false;
    public static boolean isSendMetric = true;
    public static int metricBuffersize = 100;
    public static int metricMsgDispatchWorkerNum = 2;
    /*
     */
    public static int compressMsgBodyOverHowmuch = 1;//int KB
    public static int maxMessageSize = 1024 * 8;
    public static int producerClientCallbackExecutorThreads = 20;
    public static int sendMsgTimeout = 8000;
    public static int retryTimesWhenSendFailed = 8;
    /*
     * for db
     */
    /*
     * for  misc
     */
    public static String unclosedFilePath = "";
    public static String cachePath = "";
    public static int intervalWriterKey = -1;
    public static int duplicateNum = 2;
    public static String hostName = "";
    public static int httpServerPool = 50;
    public static AtomicBoolean isShouldExit = new AtomicBoolean(false);
    public static AtomicBoolean isUpdaterShouldExit = new AtomicBoolean(false);
    /*
     * for sys
     */
    public static String ip = "";
    /*
     * for  avro  schema
     */
    public static String docs_protocal = "";
    public static Schema docsSchema = null;
    public static String docsSchemaContent = null;
    public static String docs_schema_name = "doc_schema_name";
    public static String docs = "docs";
    public static String docs_set = "doc_set";
    public static String user_desc = "user_desc";
    public static ConcurrentHashMap<String, String> schemaname2schemaContent = new ConcurrentHashMap<String, String>(20, 0.8F);

    /*
     * for zk
     */
    public static String zkUrls = "";
    public static String mszkUrl = "";
    public static ZkClient zkClient = null;
    public static int updateStausMilis = 60000;
    /*
     * for   statVolume
     */
    public static String staticsMq = null;
    public static String statVolumeSchemaContent = null;
    public static Schema statVolumeSchema = null;
    /*
     */
    public static int initMetaInfoThreadnum = 4;
    /*
     * for some ?
     */
    public static int writeInnerpoolBatchDrainSize = 15;
    public static int writeIndexThreadNum = 4; //每个数据源索引创建线程的数量，default is 4
    /*
     * 
     */
    public static ConcurrentHashMap<String, AtomicLong> consumeTopic = new ConcurrentHashMap<String, AtomicLong>();
    public static ConcurrentHashMap<String, AtomicLong> getdataStatics = new ConcurrentHashMap<String, AtomicLong>();
    public static RocketDataPuller datapuller = null;
    /*
     */
    public static int badfileinfoReserveSeconds = 3600;
    public static int badfileinfoCheckGab = 60;
    /*
     */
    public static int indexerBufferFullSleepMilis = 600;
    /*
     *  for thread pool
     */

    public static void init() {
        try {
            cfg = new ConfigureUtil("config.properties");
        } catch (Exception e) {
            log.error(e + ",the program will exit", e);
            return;
        }
        GlobalParas.hostName = MiscTools.getHostName();
        GlobalParas.ip = MiscTools.getIpAddress();
        
        /*
        * for zk and metastore
        */
        GlobalParas.zkUrls = cfg.getProperty("zkUrl");
        GlobalParas.mszkUrl = cfg.getProperty("mszkUrl");
        GlobalParas.zkClient = new ZkClient(GlobalParas.zkUrls);
        GlobalParas.updateStausMilis = cfg.getIntProperty("updateStausMilis");
        GlobalParas.metaStoreCient = cfg.getProperty("metaStoreCient");
        GlobalParas.metaStoreCientUrl = GlobalParas.metaStoreCient.split("[:]")[0];
        GlobalParas.metaStoreCientPort = Integer.parseInt(GlobalParas.metaStoreCient.split("[:]")[1]);
        GlobalParas.clientPoolSize = Integer.parseInt(cfg.getProperty("clientPoolSize"));
        GlobalParas.cachePath = cfg.getProperty("persiCachePath");
        GlobalParas.unclosedFilePath = cfg.getProperty("unclosedFilePath");

        /*
         * for  fast write data
         */
        GlobalParas.writeIndexThreadNum = cfg.getIntProperty("writeNumPerRead");
        GlobalParas.writeInnerpoolBatchDrainSize = cfg.getIntProperty("writeInnerpoolBatchDrainSize");
        GlobalParas.writeMinNumOnce = cfg.getIntProperty("writeMinNumOnce");
        GlobalParas.readbufSize = cfg.getIntProperty("readBufNum");
        GlobalParas.submitTimeout = cfg.getIntProperty("submitTimeout");
        /*
         *for http
         */
        GlobalParas.httpServerPool = cfg.getIntProperty("httpServerPoolNum");
        GlobalParas.maxDelaySeconds = cfg.getIntProperty("maxDelayTime");
        GlobalParas.commitgab = cfg.getIntProperty("commitgab");
        /*
         * for Threadpool
         */      
        GlobalParas.hlwthreadpoolSize = Integer.parseInt(cfg.getProperty("hlwrzthreadpoolSize"));
        GlobalParas.luceneWriterMaxParallelNum = Integer.parseInt(cfg.getProperty("luceneWriterMaxParaThread"));
        GlobalParas.initMetaInfoThreadnum = cfg.getIntProperty("initMetaInfoThreadnum");
        GlobalParas.clientPool = new MetastoreClientPool(GlobalParas.clientPoolSize, GlobalParas.metaStoreCientUrl, GlobalParas.metaStoreCientPort);
        /*
         * 扩展开关 
         */
        GlobalParas.isTestMode = cfg.getBooleanProperty("isTestMode");
        GlobalParas.isSendMetric = cfg.getBooleanProperty("isSendMetric");
        GlobalParas.staticsMq = cfg.getProperty("staticsMq");
        GlobalParas.metricBuffersize = cfg.getIntProperty("metricBuffersize");
        GlobalParas.metricMsgDispatchWorkerNum = cfg.getIntProperty("metricMsgDispatchWorkerNum");

        /*
         * for lucene
         */
        GlobalParas.mergePolicy = cfg.getProperty("mergePolicy");
        GlobalParas.luceneMaxThreadStates = Integer.parseInt(cfg.getProperty("luceneMaxThreadStates"));
        GlobalParas.maxParallelMergeFileNum = Integer.parseInt(cfg.getProperty("maxParallelMergeFileNum"));
        GlobalParas.luceneConfigMap = initDataWriterConfig();

        /*
         * for rocket
         */
        GlobalParas.rocketNameServer = cfg.getProperty("rocketNameServer");
        GlobalParas.rocketConsumeGroupPrefix = cfg.getProperty("rocketConsumeGroupPrefix");
        GlobalParas.rocketProducerGroupPrefix = cfg.getProperty("rocketProducerGroupPrefix");
        GlobalParas.rocketMinConsumeThread = cfg.getIntProperty("rocketMinConsumeThread");
        GlobalParas.rocketMaxConsumeThread = cfg.getIntProperty("rocketMaxConsumeThread");
        GlobalParas.rocketConsumeMessageBatchMaxSize = cfg.getIntProperty("rocketConsumeMessageBatchMaxSize");
        GlobalParas.rocketPullBatchSize = cfg.getIntProperty("rocketPullBatchSize");
        GlobalParas.consumeConcurrentlyMaxSpan = cfg.getIntProperty("consumeConcurrentlyMaxSpan");
        GlobalParas.error_file_mq = cfg.getProperty("error_file_mq");
        GlobalParas.clientCallbackExecutorThreads = cfg.getIntProperty("clientCallbackExecutorThreads");
        GlobalParas.compressMsgBodyOverHowmuchInKB = cfg.getIntProperty("compressMsgBodyOverHowmuchInKB");
        GlobalParas.retryTimesWhenSendFailed = cfg.getIntProperty("retryTimesWhenSendFailed");
        GlobalParas.sendMsgTimeout = cfg.getIntProperty("sendMsgTimeout");

        GlobalParas.rocketPullDatabufferSize = cfg.getIntProperty("rocketDatabufferSize");

        GlobalParas.rocketConsumTopics = cfg.getProperty("rocketConsumTopics");
        for (String s : GlobalParas.rocketConsumTopics.split("[,]")) {
            GlobalParas.consumeTopicsMap.put(s, s);
            GlobalParas.getdataStatics.put(GlobalParas.hostName + "_" + s, new AtomicLong());
        }
        GlobalParas.topicConfig = cfg.getRegxProperty("rocketConsumTopicsConfig");
        log.info("the topics config is " + GlobalParas.topicConfig);
        GlobalParas.pullThresholdForQueue = cfg.getIntProperty("pullThresholdForQueue");
        GlobalParas.badfileinfoCheckGab = cfg.getIntProperty("badfileinfoCheckGab");
        GlobalParas.badfileinfoReserveSeconds = cfg.getIntProperty("badfileinfoReserveSeconds");
        GlobalParas.indexerBufferFullSleepMilis = cfg.getIntProperty("indexerBufferFullSleepMilis");

    }

    private static ConcurrentHashMap<String, LuceneConfig> initDataWriterConfig() {
        String luceneConfig = cfg.getProperty("dataWriter_config");
        LuceneConfig config = new LuceneConfig();
        config.setTableName(luceneConfig.split("[|]")[0]);
        config.setLuceneBufferRam(Integer.parseInt(luceneConfig.split("[|]")[1]));
        config.setLuceneBufferDocs(Integer.parseInt(luceneConfig.split("[|]")[2]));
        config.setMergePolicyMinMergeMB(Integer.parseInt(luceneConfig.split("[|]")[3]));
        config.setMergePolicyMaxMergeMB(Integer.parseInt(luceneConfig.split("[|]")[4]));
        config.setMergePolicyMergeFactor(Integer.parseInt(luceneConfig.split("[|]")[5]));
        config.setMaxMergeMB_Forcemerge(Integer.parseInt(luceneConfig.split("[|]")[6]));
        config.setNormalForceMergeMB((Integer.parseInt(luceneConfig.split("[|]")[7])));
        config.setNormalForceMerge_segmemt_num(Integer.parseInt(luceneConfig.split("[|]")[8]));
        config.setAbnormalForceMergeMB(Integer.parseInt(luceneConfig.split("[|]")[9]));
        config.setAbnormalForceMerge_segmemt_num(Integer.parseInt(luceneConfig.split("[|]")[10]));
        config.setMergePolicyMaxMergeDocs(Integer.parseInt(luceneConfig.split("[|]")[11]));
        config.setMaxdocs_singlefile(Integer.parseInt(luceneConfig.split("[|]")[12]));
        luceneConfigMap.put(config.getTableName(), config);
        return luceneConfigMap;
    }
}
