package cn.ac.iie.ulss.dataredistribution.server;

import cn.ac.iie.ulss.dataredistribution.commons.GlobalVariables;
import cn.ac.iie.ulss.dataredistribution.commons.RuntimeEnv;
import cn.ac.iie.ulss.dataredistribution.config.Configuration;
import cn.ac.iie.ulss.dataredistribution.tools.HNode;
import cn.ac.iie.ulss.dataredistribution.handler.CountThread;
import cn.ac.iie.ulss.dataredistribution.handler.DataAccepter;
import cn.ac.iie.ulss.dataredistribution.handler.GetErrorFile;
import cn.ac.iie.ulss.dataredistribution.handler.GetMessageFromMetaStore;
import cn.ac.iie.ulss.dataredistribution.handler.PrintEnvironment;
import cn.ac.iie.ulss.dataredistribution.handler.TransmitThread;
import cn.ac.iie.ulss.dataredistribution.tools.DataProducer;
import cn.ac.iie.ulss.dataredistribution.tools.MetaStoreClientPool;
import cn.ac.iie.ulss.dataredistribution.tools.INode;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hive.conf.HiveConf;

/**
 *
 * @author: evan
 * @date: 2014-10-15
 */
public class StartDataRedistributionServer {

    static final String CONFIGURATIONFILENAME = "data_redistribution.properties";
    static SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd:HH");
    static String mqTopicName = null;
    static Map<String, ConcurrentLinkedQueue[]> topicToDataPool = null;
    static Map<String, DataProducer> topicToProducer = null;
    static int transmitthreadsize = 0;
    static DataAccepter accepter = null;
    static Integer datapoolcount = null;
    static ConcurrentHashMap<String, AtomicLong> tableToSendThreadCount = null;
    static GetErrorFile errorfileconsumer = null;
    static ConcurrentHashMap<String, AtomicLong> tableToAcceptCount = null;
    static ConcurrentHashMap<String, AtomicLong> tableToCount = null;
    static ConcurrentHashMap<String, ConcurrentHashMap<HNode, ConcurrentHashMap<INode, ConcurrentLinkedQueue>>> tableToMSGStation = null;
    static Logger logger = null;

    static {
        PropertyConfigurator.configure("log4j.properties");
        logger = Logger.getLogger(StartDataRedistributionServer.class.getName());
    }

    /**
     *
     * main function
     */
    public static void main(String[] arg) {
        logger.info("intializing data_redistribution client...");
        try {
            init();
            run();
        } catch (Exception ex) {
            logger.info(ex);
        }
    }

    /**
     *
     * init the Environment and the Global Variables
     */
    private static void init() throws Exception {
        getInfoFromConfigFile();//load配置文件
        setGlobalVariables();//设置用户全局变量
        getGlobalVariables();//获取用户全局变量
        initMetaStoreClient();//建立metastore的连接池
    }

    private static void getInfoFromConfigFile() throws Exception {
        logger.info("getting configuration from configuration file " + CONFIGURATIONFILENAME);
        Configuration conf = Configuration.getConfiguration(CONFIGURATIONFILENAME);
        if (conf == null) {
            throw new Exception("reading " + CONFIGURATIONFILENAME + " is failed.");
        }

        logger.info("initializng runtime enviroment...");
        if (!RuntimeEnv.initialize(conf)) {
            throw new Exception("initializng runtime enviroment is failed");
        }
    }

    private static void setGlobalVariables() {
        logger.info("initializng Global Variables...");
        GlobalVariables.initialize();
    }

    private static void getGlobalVariables() {
        mqTopicName = (String) RuntimeEnv.getParam(RuntimeEnv.MQ_TOPIC_NAME);
        datapoolcount = (Integer) RuntimeEnv.getParam(RuntimeEnv.DATA_POOL_COUNT);
        topicToDataPool = (Map<String, ConcurrentLinkedQueue[]>) RuntimeEnv.getParam(GlobalVariables.TOPIC_TO_DATAPOOL);
        transmitthreadsize = ((Integer) RuntimeEnv.getParam(RuntimeEnv.TRANSMIT_THREAD_SIZE));
        topicToProducer = (Map<String, DataProducer>) RuntimeEnv.getParam(GlobalVariables.TOPIC_TO_PRODUCER);
        accepter = (DataAccepter) RuntimeEnv.getParam(GlobalVariables.CONSUMER);
        errorfileconsumer = (GetErrorFile) RuntimeEnv.getParam(GlobalVariables.ERRORFILECONSUMER);
        tableToSendThreadCount = (ConcurrentHashMap<String, AtomicLong>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_SENDTHREAD_COUNT);
        tableToAcceptCount = (ConcurrentHashMap<String, AtomicLong>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_ACCEPTCOUNT);
        tableToCount = (ConcurrentHashMap<String, AtomicLong>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_SENDCOUNT);
        tableToMSGStation = (ConcurrentHashMap<String, ConcurrentHashMap<HNode, ConcurrentHashMap<INode, ConcurrentLinkedQueue>>>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_MSGSTATION);
    }

    private static void initMetaStoreClient() {
        String metaStoreClientString = (String) RuntimeEnv.getParam(RuntimeEnv.METASTORE_CLIENT);
        int metaStoreClientPoolSize = (Integer) RuntimeEnv.getParam(RuntimeEnv.METASTORE_CLIENT_POOL_SIZE);
        HiveConf hc = new HiveConf();
        hc.set("hive.metastore.uris", "thrift://" + metaStoreClientString);
        MetaStoreClientPool newmscp = new MetaStoreClientPool(metaStoreClientPoolSize, hc);
        RuntimeEnv.addParam(GlobalVariables.METASTORE_CLIENT_POOL, newmscp);
    }

    /**
     *
     * run the main server
     */
    private static void run() throws FileNotFoundException, IOException, InterruptedException {
        getSchema();//获取数据的schema
        putVariablesOfTopic();//为每一个topic配置相应的信息
        InitProducer();//启动RocketMQ的Producer
        InitErrorFileConsumer();//启动metaq的consumer，用来接收从持久化模块发送过来的文件相关信息
        startAccept();//启动RocketMQ的consumer
        getMessageFromMetaStore();//启动消费划分规则改变的相关队列
        printStatitics();//启动打印统计信息的后台线程
        printEnvironment();//启动打印工程运行状态的后台线程
        doShutDownWork();//为工程建立shutdownHook
    }

    private static void getSchema() throws FileNotFoundException, IOException {
        File fDocs = new File("./docs.json");
        byte[] bytesDocs = new byte[1024];
        int readDocs = 0;
        FileInputStream inDocs = new FileInputStream(fDocs);
        ByteArrayOutputStream outDocs = new ByteArrayOutputStream();
        while ((readDocs = inDocs.read(bytesDocs, 0, 1024)) > 0) {
            outDocs.write(bytesDocs, 0, readDocs);
        }
        String docsSchema = new String(outDocs.toByteArray()).toLowerCase();
        RuntimeEnv.addParam(GlobalVariables.DOCS_SCHEMA_CONTENT, docsSchema.toLowerCase());
        logger.info("get the schema docs is " + docsSchema);

        File fDoc = new File("./doc.json");
        byte[] bytesDoc = new byte[1024];
        int readDoc = 0;
        FileInputStream inDoc = new FileInputStream(fDoc);
        ByteArrayOutputStream outDoc = new ByteArrayOutputStream();
        while ((readDoc = inDoc.read(bytesDoc, 0, 1024)) > 0) {
            outDoc.write(bytesDoc, 0, readDoc);
        }
        String docSchema = new String(outDoc.toByteArray()).toLowerCase();
        RuntimeEnv.addParam(GlobalVariables.DOC_SCHEMA_CONTENT, docSchema.toLowerCase());
        logger.info("get the schema doc is " + docSchema);

        inDocs.close();
        inDoc.close();
    }

    private static void putVariablesOfTopic() {
        String[] topics = mqTopicName.split("\\|");
        if (topics.length == 0) {
            logger.info("there is no topic to consumer!");
            System.exit(0);
        } else {
            for (String topic : topics) {
                ConcurrentLinkedQueue[] dataPool = new ConcurrentLinkedQueue[datapoolcount];
                for (int i = 0; i < dataPool.length; i++) {
                    ConcurrentLinkedQueue clq = new ConcurrentLinkedQueue();
                    dataPool[i] = clq;
                    for (int j = 0; j < transmitthreadsize; j++) {
                        int number = transmitthreadsize * i + j;
                        TransmitThread dtm = new TransmitThread(dataPool[i], topic);
                        Thread tdtm = new Thread(dtm);
                        tdtm.setName("TransmitThread－" + topic + "-" + number);
                        tdtm.start();
                    }
                }
                topicToDataPool.put(topic, dataPool);
            }
        }
    }

    private static void InitProducer() {
        String[] topics = mqTopicName.split("\\|");
        if (topics.length == 0) {
            logger.info("there is no topic to consumer!");
            System.exit(0);
        } else {
            for (String topic : topics) {
                DataProducer dp = new DataProducer(topic);
                dp.init();
                topicToProducer.put(topic, dp);
            }
        }
    }

    private static void InitErrorFileConsumer() {
        errorfileconsumer.init();
        errorfileconsumer.pullDataFromQ();
    }

    private static void startAccept() {
        accepter.init();
        accepter.pullDataFromQ();
    }

    private static void getMessageFromMetaStore() {
        GetMessageFromMetaStore gmfms = new GetMessageFromMetaStore(); // getting the change message from the metastore
        Thread tgmfms = new Thread(gmfms);
        tgmfms.setName("GetMessageFromMetaStore");
        tgmfms.start();
    }

    private static void printStatitics() {
        CountThread act = new CountThread();
        Thread tact = new Thread(act);
        tact.setName("AcceptCountThread");
        tact.start();
    }

    private static void printEnvironment() {
        PrintEnvironment pe = new PrintEnvironment();
        Thread tpe = new Thread(pe);
        tpe.setName("PrintEnvironment");
        tpe.start();
    }

    private static void doShutDownWork() {
        Runtime.getRuntime().addShutdownHook(new Thread() {
            public void run() {
                accepter.shutdown();

                for (String topic : topicToDataPool.keySet()) {
                    ConcurrentLinkedQueue[] dataPool = topicToDataPool.get(topic);
                    for (int i = 0; i < dataPool.length; i++) {
                        while (true) {
                            if (!dataPool[i].isEmpty()) {
                                try {
                                    Thread.sleep(2000);
                                } catch (Exception ex) {
                                    //donothing
                                }
                                continue;
                            } else {
                                break;
                            }
                        }
                        logger.info("the dataPool for " + topic + " is empty!");
                    }
                }

                ConcurrentHashMap<String, ConcurrentLinkedQueue> uselessDataStore = (ConcurrentHashMap<String, ConcurrentLinkedQueue>) RuntimeEnv.getParam(GlobalVariables.USELESS_DATA_STORE);
                for (String topic : uselessDataStore.keySet()) {
                    ConcurrentLinkedQueue clq = uselessDataStore.get(topic);
                    if (clq != null) {
                        while (true) {
                            if (!clq.isEmpty()) {
                                logger.info("the queue of uselessDataStore for " + topic + " is not empty");
                                try {
                                    Thread.sleep(2000);
                                } catch (Exception ex) {
                                    //donothing
                                }
                                continue;
                            } else {
                                break;
                            }
                        }
                    }
                    logger.info("the uselessData for " + topic + " is empty!");
                }

                for (String table : tableToMSGStation.keySet()) {
                    ConcurrentHashMap<HNode, ConcurrentHashMap<INode, ConcurrentLinkedQueue>> chm = tableToMSGStation.get(table);
                    for (HNode rn : chm.keySet()) {
                        ConcurrentHashMap<INode, ConcurrentLinkedQueue> chm2 = chm.get(rn);
                        for (INode sn : chm2.keySet()) {
                            while (true) {
                                if (chm2.get(sn).isEmpty()) {
                                    break;
                                } else {
                                    try {
                                        Thread.sleep(2000);
                                    } catch (Exception ex) {
                                    }
                                }
                            }
                        }
                    }
                }

                for (String table : tableToSendThreadCount.keySet()) {
                    AtomicLong threadSize = tableToSendThreadCount.get(table);
                    while (true) {
                        if (threadSize.longValue() > 0) {
                            try {
                                Thread.sleep(2000);
                            } catch (Exception ex) {
                                //donothing
                            }
                            continue;
                        } else {
                            break;
                        }
                    }
                    logger.info("the sendThreadCount for " + table + " is empty!");
                }

                errorfileconsumer.shutdown();

                for (String topic : topicToProducer.keySet()) {
                    topicToProducer.get(topic).shutdown();
                }


                Date date = new Date();
                String time = dateFormat.format(date);
                for (String topic : tableToAcceptCount.keySet()) {
                    logger.info(time + " this hour final accept " + tableToAcceptCount.get(topic) + " messages from " + topic);
                }

                for (String rule : tableToCount.keySet()) {
                    logger.info(time + " this hour final send " + tableToCount.get(rule) + " messages for " + rule);
                }
            }
        });
    }
}
