package cn.ac.iie.ulss.dataredistribution.handler;

import cn.ac.iie.ulss.dataredistribution.commons.GlobalVariables;
import cn.ac.iie.ulss.dataredistribution.commons.RuntimeEnv;
import cn.ac.iie.ulss.dataredistribution.consistenthashing.DynamicAllocate;
import cn.ac.iie.ulss.dataredistribution.consistenthashing.NodeLocator;
import cn.ac.iie.ulss.dataredistribution.tools.HNode;
import cn.ac.iie.ulss.dataredistribution.tools.MetaStoreClientPool;
import cn.ac.iie.ulss.dataredistribution.tools.RData;
import cn.ac.iie.ulss.dataredistribution.tools.Rule;
import cn.ac.iie.ulss.dataredistribution.tools.INode;
import java.io.ByteArrayInputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.avro.Protocol;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.tools.PartitionFactory;
import org.apache.log4j.PropertyConfigurator;
import org.apache.thrift.TException;

/**
 *
 * @author: evan
 * @date: 2014-10-15
 */
public class BlockEmitter {

    int filenumber = 0;
    ConcurrentLinkedQueue[] dataPool = null;
    String topic = null;
    String docsSchemaContent = null;
    int dataPoolSize = 20000;
    Protocol protocol = null;
    Schema docsschema = null;
    DatumReader<GenericRecord> docsreader = null;
    DatumWriter<GenericRecord> write = null;
    DataFileWriter<GenericRecord> dataFileWriter = null;
    ConcurrentHashMap<String, AtomicLong> tableToAcceptCount = null;
    AtomicLong version = null;
    long localversion = 0;
    ByteArrayInputStream docsin = null;
    BinaryDecoder docsdecoder = null;
    GenericRecord docsGr = null;
    GenericArray msgSet = null;
    Iterator<ByteBuffer> msgitor = null;
    AtomicBoolean getMoreMessage = new AtomicBoolean(false);
    ConcurrentHashMap<String, Map<Utf8, Utf8>> tableToDocDesc = null;
    ConcurrentHashMap<String, String> keyWordToType = null;
    int uselessfile = 1;
    byte[] onedata = null;
    int datasenderThreadSize = 2;
    ConcurrentHashMap<String, String> tableToTopic = null;
    ConcurrentHashMap<String, AtomicLong> tableToSendCount = null;
    ConcurrentHashMap<String, AtomicLong> tableToSendThreadCount = null;
    ConcurrentHashMap<String, Rule> tableToRule = null;
    ConcurrentHashMap<String, ArrayList<HNode>> tableToNodes = null;
    ConcurrentHashMap<String, ConcurrentHashMap<HNode, ConcurrentHashMap<INode, ConcurrentLinkedQueue>>> tableToMSGStation = null;
    static org.apache.log4j.Logger logger = null;

    static {
        PropertyConfigurator.configure("log4j.properties");
        logger = org.apache.log4j.Logger.getLogger(BlockEmitter.class.getName());
    }

    public BlockEmitter(ConcurrentLinkedQueue[] dataPool, String topic, int filenumber, AtomicLong version) {
        this.dataPool = dataPool;
        this.topic = topic;
        this.filenumber = filenumber;
        this.version = version;
    }

    public void init() {
        docsSchemaContent = (String) RuntimeEnv.getParam(GlobalVariables.DOCS_SCHEMA_CONTENT);
        dataPoolSize = (Integer) RuntimeEnv.getParam(RuntimeEnv.ACCEPT_SIZE);
        protocol = Protocol.parse(docsSchemaContent);
        docsschema = protocol.getType(GlobalVariables.DOCS);
        docsreader = new GenericDatumReader<GenericRecord>(docsschema);
        write = new GenericDatumWriter<GenericRecord>(docsschema);
        dataFileWriter = new DataFileWriter<GenericRecord>(write);
        tableToAcceptCount = (ConcurrentHashMap<String, AtomicLong>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_ACCEPTCOUNT);
        uselessfile = (Integer) RuntimeEnv.getParam(RuntimeEnv.USELESS_FILE);
        tableToDocDesc = (ConcurrentHashMap<String, Map<Utf8, Utf8>>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_DOC_DESC);
        keyWordToType = (ConcurrentHashMap<String, String>) RuntimeEnv.getParam(GlobalVariables.KEYWORD_TO_TYPE);
        tableToTopic = (ConcurrentHashMap<String, String>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_TOPIC);
        tableToSendCount = (ConcurrentHashMap<String, AtomicLong>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_SENDCOUNT);
        tableToSendThreadCount = (ConcurrentHashMap<String, AtomicLong>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_SENDTHREAD_COUNT);
        tableToRule = (ConcurrentHashMap<String, Rule>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_RULE);
        tableToNodes = (ConcurrentHashMap<String, ArrayList<HNode>>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_NODES);
        datasenderThreadSize = (Integer) RuntimeEnv.getParam(RuntimeEnv.DATASENDER_THREAD_SIZE);
        tableToMSGStation = (ConcurrentHashMap<String, ConcurrentHashMap<HNode, ConcurrentHashMap<INode, ConcurrentLinkedQueue>>>) RuntimeEnv.getParam(GlobalVariables.TABLE_TO_MSGSTATION);
    }

    synchronized void emit(byte[] message) {
        if (getMoreMessage.compareAndSet(false, true)) {
            checkEnvironment();
        }
        if (message != null) {
            addCount(message);
            if (dataPoolCount() >= dataPoolSize || localversion < version.longValue()) {
                getMoreMessage.compareAndSet(true, false);
            }
        } else {
            logger.info("one message is null for " + topic);
        }
    }

    private synchronized void checkEnvironment() {
        synchronized (version) {
            if (localversion < version.longValue()) {
                localversion = version.longValue();
            } else {
                int i = 0;
                while (dataPoolCount() > dataPoolSize / 2) {
                    try {
                        Thread.sleep(100);
                    } catch (Exception ex) {
                        //donothing
                    }
                    i++;
                    if (i % 40 == 0) {
                        logger.info("check the environment for " + topic + " " + filenumber);
                    }
                }
                version.incrementAndGet();
                localversion = version.longValue();
            }
        }
    }

    private void addCount(byte[] msg) {
        docsin = new ByteArrayInputStream(msg);
        docsdecoder = DecoderFactory.get().binaryDecoder(docsin, null);
        try {
            docsGr = docsreader.read(null, docsdecoder);
        } catch (Exception ex) {
            logger.info("split the data package from the topic " + topic + " in the dataPool wrong " + ex, ex);
            if (uselessfile == 1) {
                storeUselessData(topic, msg);
            }
            return;
        }

        String table = ((Utf8) docsGr.get(GlobalVariables.DOC_SCHEMA_NAME)).toString();
        tableToTopic.put(table, topic);

        Map<Utf8, Utf8> docDesc = (Map<Utf8, Utf8>) docsGr.get(GlobalVariables.DOC_DESC);
        tableToDocDesc.put(table, docDesc);

        for (Utf8 u : docDesc.keySet()) {
            keyWordToType.put(table + u.toString(), docDesc.get(u).toString());
        }

        if (!tableToMSGStation.containsKey(table)) {
            synchronized (tableToMSGStation) {
                if (!tableToMSGStation.containsKey(table)) {
                    tableToAcceptCount.put(table, new AtomicLong(0L));
                    tableToSendCount.put(table, new AtomicLong(0L));
                    tableToSendThreadCount.put(table, new AtomicLong(0L));
                    getRuleFromMeta(table);
                    setMSGStationForRule(table);
                    initDataSender(table);
                    CreateFileThread cft = new CreateFileThread(table);
                    Thread tcft = new Thread(cft);
                    tcft.setName("CreateFileThread-" + table);
                    tcft.start();
                }
            }
        }

        msgSet = (GenericData.Array<GenericRecord>) docsGr.get(GlobalVariables.DOC_SET);
        msgitor = msgSet.iterator();
        while (msgitor.hasNext()) {
            onedata = ((ByteBuffer) msgitor.next()).array();
            dataPool[filenumber].offer(new RData(table, onedata));
        }

        AtomicLong acceptCount = tableToAcceptCount.get(table);
        acceptCount.addAndGet(msgSet.size());
    }

    /**
     *
     * place the useless data to the uselessDataStore
     */
    private void storeUselessData(String topic, byte[] data) {
        ConcurrentHashMap<String, ConcurrentLinkedQueue> uselessDataStore = (ConcurrentHashMap<String, ConcurrentLinkedQueue>) RuntimeEnv.getParam(GlobalVariables.USELESS_DATA_STORE);
        synchronized (RuntimeEnv.getParam(GlobalVariables.SYN_STORE_USELESSDATA)) {
            if (uselessDataStore.containsKey(topic)) {
                ConcurrentLinkedQueue clq = uselessDataStore.get(topic);
                clq.offer(data);
            } else {
                ConcurrentLinkedQueue sdQueue = new ConcurrentLinkedQueue();
                sdQueue.offer(data);
                uselessDataStore.put(topic, sdQueue);
                StoreUselessDataThread sudt = new StoreUselessDataThread(sdQueue, topic);
                Thread tsudt = new Thread(sudt);
                tsudt.setName("StoreUselessDataThread-" + topic);
                tsudt.start();
                logger.info("start a StoreUselessDataThread for " + topic);
            }
        }
    }

    private long dataPoolCount() {
        long count = 0;
        for (int i = 0; i < dataPool.length; i++) {
            count += dataPool[i].size();
        }
        return count;
    }

    private void getRuleFromMeta(String t) {
        logger.info("getting the rule for " + t + " from the metaStore");
        Rule rule = null;
        ArrayList nodes = new ArrayList<HNode>();
        NodeLocator nodelocator = null;
        String partType = null;
        String keywords = null;
        MetaStoreClientPool mscp = (MetaStoreClientPool) RuntimeEnv.getParam(GlobalVariables.METASTORE_CLIENT_POOL);
        while (true) {
            MetaStoreClientPool.MetaStoreClient cli = mscp.getClient();
            IMetaStoreClient icli = cli.getHiveClient();
            try {
                Table table = icli.getTable(t.split("\\.")[0], t.split("\\.")[1]);
                cli.release();
                List<FieldSchema> allSplitKeys = table.getFileSplitKeys();
                List<FieldSchema> splitKeys = new ArrayList<FieldSchema>();

                Long version = 0L;
                for (FieldSchema fs : allSplitKeys) {
                    if (fs.getVersion() >= version) {
                        version = fs.getVersion();
                    }
                }

                for (FieldSchema fs : allSplitKeys) {
                    if (fs.getVersion() == version) {
                        splitKeys.add(fs);
                    }
                }
                logger.info("the fieldScahma " + version + " has " + splitKeys.size() + " splitKeys");

                if (splitKeys.isEmpty()) {
                    logger.info("There is no splitkeys in the table " + t);
                    Thread.sleep(10000);
                    continue;
                }

                String split_name_l1 = "";
                String split_name_l2 = "";
                String part_type_l1 = "";
                String part_type_l2 = "";
                int l1_part_num = 0;
                int l2_part_num = 0;

                List<PartitionFactory.PartitionInfo> pis = PartitionFactory.PartitionInfo.getPartitionInfo(splitKeys);

                if (pis.size() == 2) {
                    StringBuilder tmps = new StringBuilder();
                    tmps.append(t.split("\\.")[0]);
                    tmps.append("|");
                    tmps.append(t.split("\\.")[1]);
                    String unit = null;
                    String interval = null;
                    for (PartitionFactory.PartitionInfo pinfo : pis) {
                        if (pinfo.getP_level() == 1) {         //分区是第几级？一级还是二级(现在支持一级interval、hash和一级interv、二级hash 三种分区方式)
                            split_name_l1 = pinfo.getP_col();  //使用哪一列进行分区
                            part_type_l1 = pinfo.getP_type().getName(); //这级分区的方式，是hash还是interval ?
                            l1_part_num = pinfo.getP_num();   //分区有多少个，如果是hash的话n个分区，那么特征值就是0,1,2 。。。 n-1

                            if ("interval".equalsIgnoreCase(part_type_l1)) {
                                if (pis.get(0).getArgs().size() < 2) {
                                    logger.error("get the table's partition unit and interval error");
                                    Thread.sleep(10000);
                                    continue;
                                } else {
                                    //Y year,M mponth,W week,D day,H hour，M minute。 现在只支持H D W， 因为月和年的时间并不是完全确定的，因此无法进行精确的分区，暂时不支持；分钟级的单位太小，暂时也不支持
                                    List<String> paras = pinfo.getArgs();
                                    unit = paras.get(0);
                                    interval = paras.get(1);
                                    tmps.append("|interval|");
                                    tmps.append(unit);
                                    tmps.append("|");
                                    tmps.append(interval);
                                }
                            } else {
                                logger.error(t + " this system only support the interval for the first partition");
                                Thread.sleep(10000);
                                continue;
                            }
                        }

                        if (pinfo.getP_level() == 2) {
                            split_name_l2 = pinfo.getP_col();
                            part_type_l2 = pinfo.getP_type().getName();
                            l2_part_num = pinfo.getP_num();
                            //keywords = part_type_l2;
                            if ("hash".equalsIgnoreCase(part_type_l2)) {
                                tmps.append("|hash|");
                                tmps.append(version);
                                partType = tmps.toString();
                            } else {
                                logger.error(t + " this system only support the hash for the second partition");
                                Thread.sleep(10000);
                                continue;
                            }
                        }
                    }
                    keywords = split_name_l1 + "|" + split_name_l2;
                    nodes = new ArrayList<HNode>();
                    for (int i = 0; i < l2_part_num; i++) {
                        HNode node = new HNode("" + i);
                        node.setHashNum(l2_part_num);
                        node.setKeywords(keywords);
                        node.setPartType(partType);
                        nodes.add(node);
                    }
                    tableToNodes.put(t, nodes);
                    DynamicAllocate dynamicallocate = new DynamicAllocate();
                    dynamicallocate.setNodes(nodes);
                    nodelocator = dynamicallocate.getHashCodeNodeLocator();
                    String urlEnd = (String) RuntimeEnv.getParam(RuntimeEnv.URL_END);
                    logger.info(t + " the partitionRules is " + partType + " " + keywords);
                    rule = new Rule(t, urlEnd, nodes, keywords, nodelocator, partType);
                    tableToRule.put(t, rule);
                    break;
                } else {
                    logger.error(t + " this system only support the two levelpartitions");
                    Thread.sleep(10000);
                    continue;
                }
            } catch (TException tex) {
                logger.error("the metastore is bad " + tex, tex);
                try {
                    icli.close();
                } catch (Exception e) {
                }
                mscp.addMetaStoreClient();
            } catch (Exception ex) {
                logger.error(ex, ex);
                cli.release();
            }
        }
    }

    private void setMSGStationForRule(String table) {
        Rule rule = tableToRule.get(table);
        ConcurrentHashMap<HNode, ConcurrentHashMap<INode, ConcurrentLinkedQueue>> msgTransferStation = new ConcurrentHashMap<HNode, ConcurrentHashMap<INode, ConcurrentLinkedQueue>>();
        ArrayList nodeurls = rule.getNodes();
        for (Iterator itit = nodeurls.iterator(); itit.hasNext();) {
            HNode node = (HNode) itit.next();
            ConcurrentHashMap<INode, ConcurrentLinkedQueue> chm = new ConcurrentHashMap<INode, ConcurrentLinkedQueue>();
            msgTransferStation.put(node, chm);
        }
        tableToMSGStation.put(table, msgTransferStation);
    }

    private void initDataSender(String table) {
        for (int i = 0; i < datasenderThreadSize; i++) {
            DataSenderThread dst = new DataSenderThread(table);
            Thread tdst = new Thread(dst);
            tdst.setName("DataSenderThread-" + table);
            tdst.start();
        }
    }
}
