package cn.genmer.test.security.machinelearning.deeplearning4j.text;

import org.deeplearning4j.models.sequencevectors.interfaces.SequenceIterator;
import org.deeplearning4j.models.word2vec.VocabWord;
import org.deeplearning4j.models.word2vec.wordstore.VocabCache;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.DataSet;
import org.nd4j.linalg.dataset.api.DataSetPreProcessor;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.factory.Nd4j;

import java.util.ArrayList;
import java.util.List;

public class SegmentIterator implements DataSetIterator {

        private static final long serialVersionUID = -649505930720554358L;

        private int batchSize;
        private int vocabSize;
        private int maxLength;
        private SequenceIterator<VocabWord> iter1;
        private SequenceIterator<VocabWord> iter2;
        private VocabCache<VocabWord> vocabCache1;
        private VocabCache<VocabWord> vocabCache2;

        private boolean toTestSet;

        public SegmentIterator(int batchSize, int vocabSize, SequenceIterator<VocabWord> iter1, SequenceIterator<VocabWord> iter2,
                               VocabCache<VocabWord> vocabCache1, VocabCache<VocabWord> vocabCache2) {
            this.batchSize = batchSize;
            this.vocabSize = vocabSize;
            this.iter1 = iter1;
            this.iter2 = iter2;
            this.vocabCache1 = vocabCache1;
            this.vocabCache2 = vocabCache2;

        }

        public VocabCache<VocabWord> getVocabCache1() {
            return vocabCache1;
        }

        public VocabCache<VocabWord> getVocabCache2() {
            return vocabCache2;
        }

        public int getMaxLen(){
            return maxLength;
        }

        @Override
        public DataSet next(int num) {

            if( toTestSet ){
                reset();
                batchSize = num;
            }

            /*--Contruct Input Sequence--*/
            List<List<VocabWord>> iter1List = new ArrayList<>(batchSize);       //Innner List means each train sentence
                                                                                //List of List store a @batchSize train data

            for (int i = 0; i < batchSize && iter1.hasMoreSequences(); i++) {
                iter1List.add(iter1.nextSequence().getElements());
            }
            /*--Finish Contructing Input Sequence--*/
            /*-------------------------------------*/
            /*--Contruct Output Sequence--*/
            List<List<VocabWord>> iter2List = new ArrayList<>(batchSize);
            for (int i = 0; i < batchSize && iter2.hasMoreSequences(); i++) {
                iter2List.add(iter2.nextSequence().getElements());
            }
            /*--Finish Contructing Output Sequence--*/
            /*--------------------------------------*/
            int numExamples = Math.min(iter1List.size(), iter2List.size());     //ensure input/output have same number
            int in1Length = 0;
            int in2Length = 0;
            /*reserve maximum capacity of input/output sentence*/
            for (int i = 0; i < numExamples; i++) {
                in1Length = Math.max(in1Length, iter1List.get(i).size());
            }
            for (int i = 0; i < numExamples; i++) {
                in2Length = Math.max(in2Length, iter2List.get(i).size());
            }
            maxLength = Math.max(in1Length, in2Length);
            /*finish reserving maximum capacity of input/output sentence*/
            /*--------------------------------------*/
            //2 inputs here, and 1 output
            //First input: a sequence of word indexes for iter1 words
            //Second input: a sequence of word indexes for iter2 words (shifted by 1, with an additional 'go' class as first time step)
            //Output: sequence of word indexes for iter2 words (with an additional 'stop' class as the last time step)
            //Also need mask arrays

            INDArray features = Nd4j.create(numExamples, 1, maxLength);  //32*39 Matrix, 32:batchSize, 39:max lenghth of 32 sentence
                                                                         //data format for each row(haffman index for each word):
                                                                         //283.0 128.0 ... 10.0 0.0 0.0 ... 0.0
            INDArray labels = Nd4j.create(numExamples, 2, maxLength);   //2 represents the number of classifications
            //
            INDArray featuresMask = Nd4j.zeros(numExamples, maxLength);
            INDArray labelsMask = Nd4j.zeros(numExamples, maxLength);

            int[] origin = new int[3];
            int[] mask = new int[2];
            for (int i = 0; i < numExamples; i++) {
                List<VocabWord> list = iter1List.get(i);
                origin[0] = i;                        //arr(0) store the index of batch sentence
                mask[0] = i;

                int j = 0;                          //index of the word in the sentence
                for (VocabWord vw : list) {         //traverse the list which store an entire sentence
                    origin[2] = j;
                    features.putScalar(origin, vw.getIndex());
                    //
                    mask[1] = j;
                    featuresMask.putScalar(mask, 1.0);  //Word is present (not padding) for this example + time step -> 1.0 in features mask
                    ++j;
                }
                //
                int idx = iter2List.get(i).get(0).getIndex();
                int lastIdx = list.size();
                labels.putScalar(new int[]{i,idx,lastIdx-1},1.0);   //Set label: [0,1] for negative, [1,0] for positive
                labelsMask.putScalar(new int[]{i,lastIdx-1},1.0);   //Specify that an output exists at the final time step for this example
            }

            return new DataSet(features, labels, featuresMask, labelsMask);
        }

    @Override
    public int inputColumns() {
        return 0;
    }

    @Override
    public int totalOutcomes() {
        return 0;
    }

    @Override
    public boolean resetSupported() {
        return false;
    }

    @Override
    public boolean asyncSupported() {
        return false;
    }


    @Override
        public void reset() {
            iter1.reset();
            iter2.reset();
        }

    @Override
    public int batch() {
        return 0;
    }

    @Override
    public void setPreProcessor(DataSetPreProcessor preProcessor) {

    }

    @Override
    public DataSetPreProcessor getPreProcessor() {
        return null;
    }

    @Override
    public List<String> getLabels() {
        return null;
    }

    @Override
        public boolean hasNext() {
            return iter1.hasMoreSequences() && iter2.hasMoreSequences();
        }

        @Override
        public DataSet next() {
            return next(batchSize);
        }

        //此处省略一些需要实现的父类以及接口的未实现的方法
}