/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package cn.ac.iie.ulss.indexer.worker;

import cn.ac.iie.store.hbase.hbaseop.HbaseUtilOp;
import cn.ac.iie.ulss.indexer.writer.LuceneConfig;
import cn.ac.iie.ulss.indexer.mqproducer.RocketMQProducer;
import cn.ac.iie.ulss.indexer.metric.DataIndexerMetricReportor;
import cn.ac.iie.ulss.indexer.metastore.MetastoreWrapper;
import cn.ac.iie.ulss.indexer.runenvs.Constants;
import cn.ac.iie.ulss.indexer.runenvs.DefaultConstants;
import cn.ac.iie.ulss.indexer.runenvs.GlobalParas;
import cn.ac.iie.ulss.indexer.writer.DataSourceConfig;
import cn.ac.iie.ulss.indexer.writer.LuceneFileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.exception.ZkNodeExistsException;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hive.metastore.api.SFile;
import org.apache.hadoop.hive.metastore.api.SFileLocation;
import org.apache.log4j.Logger;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;

public class IndexControler implements Runnable {

    public static Logger log = Logger.getLogger(IndexControler.class.getName());
    //public AtomicInteger tableChangeFlag = null;
    public String dbName;
    public String tbName;
    public String tableType;
    public String timeLable;
    public AtomicInteger tableChangeFlag = new AtomicInteger(0);
    public AtomicBoolean truncateFlag = new AtomicBoolean(true);
    public Long file_id;
    public DataSourceConfig dc;
    public AtomicBoolean preEnd;
    public AtomicBoolean isEnd;
    public boolean isOver = false;
    public AtomicBoolean isAllOver;
    public AtomicBoolean isShouldNewRaw;
    public ArrayBlockingQueue inbuffer;
    public LuceneFileWriter normalLucWriter;
    private IndexWriter iw = null;
    private List<SFile> Listsf = null; //实际上有且只有一个文件
    AtomicBoolean isDiskBad;
    AtomicBoolean isFileStatusBad;
    AtomicLong willWriteNum;
    AtomicLong failWriteNum;
    ExecutorService threadPool;
    private LuceneConfig luceneConfig = null;
    private boolean isOuterIndexEnable = false;
    private HashSet<String> outerIndexFieldsSet = new HashSet<String>();
    private ConcurrentHashMap<String, HashSet<Integer>> numberSets = new ConcurrentHashMap<String, HashSet<Integer>>();
    private ArrayList<String> hbaseKeyColumns = null;
    private ArrayList<String> hbaseStoreColumns = null;

    public IndexControler(DataSourceConfig dsc, ArrayBlockingQueue buf, LuceneFileWriter norLucWriter, long f_id) {
        dc = dsc;
        dbName = dsc.getDbName();
        tbName = dsc.getTbName();        
        timeLable = dsc.getTimeLable();
        tableChangeFlag.getAndSet(Constants.TABLE_NORMAL_TIME);
        truncateFlag.set(true);
        preEnd = new AtomicBoolean(false);
        isEnd = new AtomicBoolean(false);
        isAllOver = new AtomicBoolean(false);
        isShouldNewRaw = new AtomicBoolean(true);
        normalLucWriter = norLucWriter;
        inbuffer = buf;
        file_id = f_id;
        iw = this.normalLucWriter.getWriterMap().get(0);
        /*
         * fix me
         */
        if (!GlobalParas.isTestMode) {
            Listsf = this.normalLucWriter.getSfMap().get(0);
        }
        isDiskBad = new AtomicBoolean(false);
        isFileStatusBad = new AtomicBoolean(false);
        /*
         */
        willWriteNum = new AtomicLong(0);
        failWriteNum = new AtomicLong(0);

        if ((luceneConfig = GlobalParas.luceneConfigMap.get(this.tbName)) == null) {
            luceneConfig = GlobalParas.luceneConfigMap.get(Constants.writer_default_config);
        }
        tableType = GlobalParas.tableTypes.get(this.dbName + DefaultConstants.tableFullnNameDelimiter + this.tbName);
        hbaseKeyColumns = GlobalParas.hbaseKeyColumns.get(this.dbName + DefaultConstants.tableFullnNameDelimiter + this.tbName);
        hbaseStoreColumns = GlobalParas.hbaseStoreColumns.get(this.dbName + DefaultConstants.tableFullnNameDelimiter + this.tbName);
    }

    @Override
    public void run() {
        int tone_up = 2;
        int sleepMilis = 40;
        int printGab = 1000;
        int printCount = 0;

        ArrayBlockingQueue<LucenePoolWriter> writerPool = new ArrayBlockingQueue<LucenePoolWriter>(GlobalParas.luceneWriterMaxParallelNum * tone_up);
        ArrayList<Future> executeResultPool = new ArrayList<Future>(GlobalParas.luceneWriterMaxParallelNum * tone_up);
        Iterator<Future> it = null;
        Future tmpFuture = null;
        Date currentTime = new Date();
        Date commitTime = new Date();

        int currentParallelNum = 0;
        this.isOver = false;
        boolean isWaitingShutSignal = true;
        int bufferEmptyCount = 0;
        long currentDocNum = 0;
        long oldDocNum = 0;
        long lastWriteTime = System.currentTimeMillis();
        long tmp = System.currentTimeMillis();
        long begin = System.currentTimeMillis();

        for (int i = 0; i < GlobalParas.luceneWriterMaxParallelNum * tone_up; i++) {
            LucenePoolWriter pw = new LucenePoolWriter();
            pw.dataWriter = new LuceneDataWriter();
            pw.result = null;
            try {
                writerPool.put(pw);
            } catch (Exception ex) {
                log.error(ex, ex);
            }
        }

        GenericRecord tmpGenericRecord = null;
        GenericArray docSet = null;
        int bufferSizeInfact = 0;
        LuceneDataWriter dataWriter = null;
        LucenePoolWriter pw = null;
        Future future = null;
        boolean isRecordToomany = false;
        int sendBadInfoTimes = 0;
        while (true) {
            if (isOver) {
                log.info("the index control thread stop done");
                break;
            }
            try {
                Thread.sleep(sleepMilis);
            } catch (InterruptedException ex) {
            }
            if (this.inbuffer.isEmpty()) {
                bufferEmptyCount++;
            } else {
                bufferEmptyCount = 0;
            }

            if (!GlobalParas.isShouldExit.get()) {
                if (bufferEmptyCount >= (1000 / sleepMilis) * (GlobalParas.maxDelaySeconds - GlobalParas.maxDelaySeconds / 50)) {
                    if (!preEnd.get()) {
                        log.info("set the file pre end to true for safe");
                        preEnd.set(true);
                    }
                }
                if (bufferEmptyCount >= (1000 / sleepMilis) * GlobalParas.maxDelaySeconds) { //连续循环检测1200次发现数据都是空的，表明在这段时间内（300秒）都没有数据发送过来，那么文件就自动关闭
                    isEnd.set(true);
                }
            } else {
                if (isWaitingShutSignal) {
                    bufferEmptyCount = 0;
                    isWaitingShutSignal = false;
                    log.info("receive the kill signal,will do something ...");
                }
                if (bufferEmptyCount >= (1000 / sleepMilis) * GlobalParas.closeDelay.get()) {
                    isEnd.set(true);
                    log.info("the file will close,will set the write thread close");
                }
            }

            it = executeResultPool.iterator();
            while (it.hasNext()) {   /*得到并发数，控制单个文件消耗的并发数，否则会造成单个文件消耗过多的线程，indexerwriter的最高效率的并发数为8，超过此并发数就无法提高效率，浪费资源 */

                tmpFuture = it.next();
                if (tmpFuture.isDone()) {
                    it.remove();
                }
            }
            currentParallelNum = executeResultPool.size();
            if (currentParallelNum == 0 && this.tableChangeFlag.get() == Constants.TABLE_CHAGING_TIME) {
                this.tableChangeFlag.getAndSet(Constants.TABLE_CHAGED_TIME);
                try {
                    Thread.sleep(1000L);
                } catch (InterruptedException ex) {

                }
            }
            printCount++;
            if (printCount % (printGab / sleepMilis) == 0) {
                if (currentParallelNum >= 4) {
                    log.info("before dispatch new task,the parallel num is " + currentParallelNum + ",the data pool size is " + this.inbuffer.size());
                }
            }

            if (!this.inbuffer.isEmpty() && currentParallelNum < GlobalParas.luceneWriterMaxParallelNum) {  //并发数不超过限制，并且输入缓冲区中不为空
                bufferEmptyCount = 0;
                try {
                    pw = null;
                    future = null;
                    pw = writerPool.take();
                    future = pw.result;
                } catch (Exception ex) {
                    log.error(ex, ex);
                }
                if (future == null || (future != null && future.isDone())) {
                    dataWriter = pw.dataWriter;
                    if (dataWriter != null) {
                        dataWriter.dbName = this.dbName;
                        dataWriter.tableName = this.tbName;
                        dataWriter.file_id = this.file_id;
                        dataWriter.isDiskBad = this.isDiskBad;
                        dataWriter.willwriteDocNum = this.willWriteNum;
                        dataWriter.failwriteDocNum = this.failWriteNum;
                        dataWriter.writer = this.normalLucWriter.getWriterMap().get(0);
                        dataWriter.isOuterIndexEable = this.isOuterIndexEnable;
                        dataWriter.outerIndexFieldsSet = this.outerIndexFieldsSet;
                        dataWriter.numberSets = this.numberSets;
                        dataWriter.hbaseKeyColumns = this.hbaseKeyColumns;
                        dataWriter.hbaseStoreColumns = this.hbaseStoreColumns;
                        dataWriter.tableType = this.tableType;
                        bufferSizeInfact = 0;
                        tmp = System.currentTimeMillis();
                        while (true) {
                            //bufferSizeInfact = 0; bfs修改
                            tmpGenericRecord = null;
                            if (this.tableChangeFlag.get() == Constants.TABLE_NORMAL_TIME) {
                                try {
                                    tmpGenericRecord = (GenericRecord) this.inbuffer.poll(50, TimeUnit.MILLISECONDS);
                                } catch (Exception ex) {
                                    log.error(ex, ex);
                                }
                            }

                            if (tmpGenericRecord != null) {
                                dataWriter.innerDataBuf.add(tmpGenericRecord);
                                bufferSizeInfact += ((GenericData.Array<GenericRecord>) tmpGenericRecord.get(GlobalParas.docs_set)).size();
                            }
                            if ((bufferSizeInfact >= GlobalParas.writeMinNumOnce || (System.currentTimeMillis() - tmp) >= 1000 * GlobalParas.submitTimeout) && truncateFlag.get() == true) {
                                log.info("will submit one task ...");
                                Future f = threadPool.submit(dataWriter);
                                pw.result = f;
                                executeResultPool.add(f);
                                currentParallelNum++;
                                lastWriteTime = System.currentTimeMillis();
                                break;
                            }
                        }
                    } else {
                        log.warn("null command,it is wrong ...");
                    }
                }
                try {

                    writerPool.put(pw);
                } catch (Exception e) {
                    log.error(e, e);
                }
            }
            if (printCount % (printGab / sleepMilis) == 0) {
                if (currentParallelNum >= 4) {
                    log.info("after dispatch new task,the parallel num is " + currentParallelNum + ",the data pool size is " + this.inbuffer.size());
                }
                printCount = 0;
            }

            currentTime = new Date();
            if (currentTime.getTime() - commitTime.getTime() >= GlobalParas.commitgab * 1000) {
                for (String fieldName : this.numberSets.keySet()) {
                    HashSet<Integer> tmpHashSet = this.numberSets.get(fieldName);
                    if (tmpHashSet.size() >= 1) {
                        int inserBatchSize = tmpHashSet.size();
                        String rowkey = this.dbName + DefaultConstants.tableFullnNameDelimiter + this.tbName + ";"
                                + System.currentTimeMillis() + ";" + this.file_id + ";" + fieldName;
                        this.safeFlushOuterIndex(GlobalParas.outerindexHbaseTable, rowkey, tmpHashSet);
                        log.info("insert " + inserBatchSize + " outer index to hbase use milis " + (System.currentTimeMillis() - currentTime.getTime()));
                    }
                }
                try {
                    commitTime.setTime(currentTime.getTime());
                    log.info("now do commit for " + this.dbName + "." + this.tbName + ",ram size in KB " + iw.ramSizeInBytes() / 1024);
                    begin = System.currentTimeMillis();
                    if (iw.numDocs() != 0) {
                        iw.commit();
                    }
                    /*
                     * 每次commit成功后就把isDiskBad设置成false,commit不成功时会抛异常,this.isDiskBad.set(false)
                     * 就不会执行相当于为磁盘的写入增加了一种定时检测、恢复机可以使文件写入从异常状态中恢复
                     */
                    this.isDiskBad.set(false);

                    currentDocNum = iw.maxDoc();
                    log.info("done commit for " + this.dbName + "." + this.tbName + ",increase doc num " + (currentDocNum - oldDocNum) + ",use " + (System.currentTimeMillis() - begin) + " ms,ram size in KB " + iw.ramSizeInBytes() / 1024);
                    oldDocNum = currentDocNum;
                    if (currentDocNum >= this.luceneConfig.getMaxdocs_singlefile()) {
                        if (sendBadInfoTimes < 1) {
                            String information = file_id + "|" + GlobalParas.tablename2Mqname.get(this.dbName + "." + this.tbName) + "|||" + GlobalParas.hostName;
                            log.info("there are too many docs in the file,will open a new file,doc num is : " + currentDocNum + ",the information is " + information);
                            RocketMQProducer.sendMessage(GlobalParas.error_file_mq, information.getBytes(), -1);
                            MetastoreWrapper.setFileBad(file_id);
                            sendBadInfoTimes++;
                        }
                        isRecordToomany = true;
                    }

                    if (GlobalParas.isSendMetric) {
                        DataIndexerMetricReportor.sendMetric(GlobalParas.ip + "_" + System.nanoTime(), this.tbName, this.dbName, (currentDocNum - oldDocNum), "normal", "out", "", "DP");
                    }
                } catch (CorruptIndexException ex) {
                    log.error(ex, ex);
                    if (GlobalParas.isSendMetric) {
                        DataIndexerMetricReportor.sendMetric(GlobalParas.ip + "_" + System.nanoTime(), this.tbName, this.dbName, this.willWriteNum.get(), "abnormal", "out", ex.toString(), "DP");
                        this.willWriteNum.set(0l);
                    }
                } catch (IOException ex) {
                    log.error(ex, ex);
                    if (GlobalParas.isSendMetric) {
                        DataIndexerMetricReportor.sendMetric(GlobalParas.ip + "_" + System.nanoTime(), this.tbName, this.dbName, this.willWriteNum.get(), "abnormal", "out", ex.toString(), "DP");
                        this.willWriteNum.set(0l);
                    }
                } catch (Exception ex) {
                    log.error(ex, ex);
                    if (GlobalParas.isSendMetric) {
                        DataIndexerMetricReportor.sendMetric(GlobalParas.ip + "_" + System.nanoTime(), this.tbName, this.dbName, this.willWriteNum.get(), "abnormal", "out", ex.toString(), "DP");
                        this.willWriteNum.set(0l);
                    }
                }
            }
            if (!isEnd.get()) {
                continue;
            } else {
                log.info("will clear the data and flush to the disk,then commit,close ... ");
                isOver = true;
            }
        }

        long file_legth = 0l;
        long doc_num = 0l;
        long bgbg = 0;
        log.info("the normal index writer begin to commit,the last time,wait ...");
        String device = "";
        try {
            log.info("the normal index writer " + iw.toString() + " commit the last time");
            begin = System.currentTimeMillis();
            bgbg = begin;
            for (String fieldName : this.numberSets.keySet()) {
                HashSet<Integer> tmpHashSet = this.numberSets.get(fieldName);
                int inserBatchSize = tmpHashSet.size();
                String rowkey = this.dbName + DefaultConstants.tableFullnNameDelimiter + this.tbName + ";"
                        + System.currentTimeMillis() + ";" + this.file_id + ";" + fieldName;
                this.safeFlushOuterIndex(GlobalParas.outerindexHbaseTable, rowkey, tmpHashSet);
                log.info("insert " + inserBatchSize + " outer index to hbase use milis " + (System.currentTimeMillis() - currentTime.getTime()));
            }
            if (iw.numDocs() != 0) {
                iw.commit();
            }
            doc_num = iw.maxDoc();
            log.info("at last indexwriter commit,doc num is " + doc_num + ",use " + (System.currentTimeMillis() - begin) + " ms,ram size in KB " + iw.ramSizeInBytes() / 1024);
            begin = System.currentTimeMillis();
            try {
                String[] ss = iw.getDirectory().listAll();
                if (ss != null) {
                    for (int i = 0; i < ss.length; i++) {
                        file_legth += iw.getDirectory().fileLength(ss[i]);
                    }
                }
            } catch (Exception e) {
                log.error(e, e);
            }
            log.info("caculate the file size use " + (System.currentTimeMillis() - begin) + " ms,file size in MB : " + file_legth / 1000000);

            if (file_legth / 1000000 <= this.luceneConfig.getAbnormalForceMergeMB()) {
                SFile file = Listsf.get(0);
                try {
                    for (SFileLocation lo : file.getLocations()) {
                        if (GlobalParas.hostName.equalsIgnoreCase(lo.getNode_name())) {
                            device = lo.getDevid();
                            break;
                        }
                    }
                } catch (Exception e) {
                    log.error(e, e);
                }
                /* begin of merge调度器*/
                while (true) {
                    synchronized (GlobalParas.mergeControlLock) {
                        if ((GlobalParas.currentMergeFileNum.get() < GlobalParas.maxParallelMergeFileNum)) {   //merge并发数小于预定值，并且一个device不能有多于一个merge操作
                            if (!GlobalParas.mergeDevice.contains(device)) {
                                GlobalParas.currentMergeFileNum.addAndGet(1);
                                log.info("put device " + device + " to the device map，will begin a new merge");
                                GlobalParas.mergeDevice.put(device, device);
                                break;
                            } else {
                                log.info("device " + device + " is in merge state ，will wait ... ");
                            }
                        } else {
                            log.warn("now forceMerge file num is " + GlobalParas.currentMergeFileNum.get() + ",will wait ...");
                        }
                    }
                    try {
                        Thread.sleep(1000 * 30);
                    } catch (Exception e) {
                        log.error(e, e);
                    }
                }
                /*end of merge调度器*/

                begin = System.currentTimeMillis();
                if (file_legth / 1000000 >= this.luceneConfig.getNormalForceMergeMB()) {   //非正常大小，但处于可控范围内
                    log.info("will begin abnormal forceMerge,now forceMerge file num is " + GlobalParas.currentMergeFileNum.get());
                    try {
                        iw.forceMerge(this.luceneConfig.getAbnormalForceMerge_segmemt_num(), true);
                    } catch (Exception e) {
                        log.error(e, e);
                    } finally {
                        GlobalParas.currentMergeFileNum.decrementAndGet();
                    }
                } else {  //正常大小
                    log.info("will begin normal forceMerge,now forceMerge file num is " + GlobalParas.currentMergeFileNum.get() + ",force merge it to " + this.luceneConfig.getNormalForceMerge_segmemt_num());
                    try {
                        iw.forceMerge(this.luceneConfig.getNormalForceMerge_segmemt_num(), true);
                    } catch (Exception e) {
                        log.error(e, e);
                    } finally {
                        GlobalParas.currentMergeFileNum.decrementAndGet();
                    }
                }
                log.info("at last indexwriter forceMerge done,doc num in is " + (doc_num = iw.maxDoc()) + ",use " + (System.currentTimeMillis() - begin) + " ms ");
            } else {  //非正常大小，已经失控，不能merge
                log.warn("the file is huge,so will not merge,size in MB : " + file_legth / 1000000);
            }

            log.info("at last indexwriter commit、forceMerge all done, doc num in it " + (doc_num = iw.maxDoc()) + ",use time in total is " + (System.currentTimeMillis() - bgbg) + " ms ");

            begin = System.currentTimeMillis();
            iw.close(true);
            log.info("close indexwriter use  --> " + (System.currentTimeMillis() - begin) + " ms ");

            log.info("at last data size in buffer is 0 ? --> " + this.inbuffer.size());
            if (GlobalParas.isSendMetric) {
                DataIndexerMetricReportor.sendMetric(GlobalParas.ip + "_" + System.nanoTime(), this.tbName, this.dbName, (currentDocNum - oldDocNum), "normal", "out", "", "DP");
            }
        } catch (Exception e) {
            log.error(e, e);
            if (GlobalParas.isSendMetric) {
                DataIndexerMetricReportor.sendMetric(GlobalParas.ip + "_" + System.nanoTime(), this.tbName, this.dbName, this.willWriteNum.get(), "abnormal", "out", e.toString(), "DP");
                this.willWriteNum.set(0l);
            }
        } finally {
            GlobalParas.mergeDevice.remove(device);
        }

        //关闭文件时需要将文件的状态写入对应对象中
        if (!GlobalParas.isTestMode) {
            if (isFileStatusBad.get() || isRecordToomany) {
                Listsf.get(0).setLoad_status(1);
            }
            /*
             *  fix me
             */
            int rep_nr = 2;
            if ("t_ybrz".contains(this.tbName)) {
                rep_nr = 1;
            }

            //副本数
            if (MetastoreWrapper.closeFile(Listsf.get(0), doc_num, file_legth, rep_nr)) {
                // MetastoreWrapper.retryCloseUnclosedFile(GlobalParas.unclosedFilePath);
                log.info("close the file " + this.file_id + " succeed");
            } else {
                MetastoreWrapper.writeFileInfo(GlobalParas.unclosedFilePath, file_id, doc_num, file_legth);//将未关闭的文件写入单独的文件中
                log.warn("close current file:" + this.file_id + " " + doc_num + " " + file_legth + "fail,there is no need to close the unclosed file");
            }
        }
        /*
         * ---- 打印正在在写入的文件的信息，以便线上分析 ----
         */
        String tb = "";
        int number = 0;
        HashMap<String, Integer> tb2FileMap = new HashMap<String, Integer>();
        for (IndexControler c : GlobalParas.id2Createindex.values()) {
            tb = c.dbName + "." + c.tbName;
            if (!tb2FileMap.containsKey(tb)) {
                number = 1;
            } else {
                number = tb2FileMap.get(tb) + 1;
            }
            tb2FileMap.put(tb, number);
        }
        log.info("close lucene sfile done，now write file number is " + GlobalParas.id2Createindex.size() + "," + tb2FileMap.toString());
        /*
         */
        isAllOver.set(true);
    }

    private void safeFlushOuterIndex(HTable table, String rowKey, HashSet<Integer> numberSet) {
        synchronized (this.iw) {
            HbaseUtilOp.hbaseSimplePutData(table, rowKey, numberSet);
            this.numberSets.clear();
        }
    }

    public static void main(String[] args) {
        ZkClient zc = new ZkClient("192.168.120.221:3181");
        String path = "/dfsaafs/dsfa";
        try {
            if (!zc.exists(path)) {
                try {
                    zc.createPersistent(path, true);
                } catch (final ZkNodeExistsException e) {
                    e.printStackTrace();
                } catch (final Exception e) {
                    e.printStackTrace();
                }
            }
        } catch (Exception ex) {
        }
    }
}
//
//
//
//if (this.inbuffer.size() >= Indexer.writeInnerpoolSize || System.currentTimeMillis() - lastWriteTime >= 1000 * 3) {
//if (!this.inbuffer.isEmpty() && currentParallelNum < Indexer.luceneWriterMaxParallelNum) {  //并发数不超过限制，并且输入缓冲区中不为空
/*
 * 
 this.inbuffer.drainTo(dataWriter.innerDataBuf, Indexer.writeInnerpoolBatchDrainSize);
 for (GenericRecord ms : dataWriter.innerDataBuf) {
 docSet = (GenericData.Array<GenericRecord>) ms.get(Indexer.docs_set);
 bufferSizeInfact += docSet.size();
 docSet = null;
 * 
 * 
 }
 */
