package cn.edu.bjtu.core;

import cn.edu.bjtu.core.api.SparkRuntimeNeuronNetwork;
import org.deeplearning4j.iterator.CnnSentenceDataSetIterator;
import org.deeplearning4j.iterator.LabeledSentenceProvider;
import org.deeplearning4j.iterator.provider.CollectionLabeledSentenceProvider;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.api.DataSet;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;

import java.io.File;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;

/**
 * Created by alex on 17/6/16.
 */
public class NetworkModel implements Serializable,SparkRuntimeNeuronNetwork{
    TextCategorizationManager labelManager = null;
    protected NeuronNetwork net =  null;
    protected TokenizerFactory tf = null;
    WordVectors w2v = null;

    public NetworkModel(){
        switch (SparkConfiguration.TYPE){
            case ComputationGraph:
                net = new SparkComputationGraph();
                break;
            default:
                throw new RuntimeException("Not Support");
        }

    }
    public void load(){
        try {
            File f   =  new File(this.getClass().getResource(SparkConfiguration.W2V).toURI());
            //Spark 环境中可能有问题
            w2v = WordVectorSerializer.readWord2VecModel(f);
            net.load();
            TextCategorizationManager.get().restore();
            labelManager = TextCategorizationManager.get();
        }catch (Exception e){
            throw new RuntimeException(e.getMessage(),e);
        }
    }

    @Override
    public INDArray[] output(INDArray... input) {
        return net.output(input);
    }


    @Override
    public TextCategorizationManager.ClassificationPair[] predictDocument(String doc) {

        List<String> docs = new ArrayList<String>(1);
        docs.add(doc);
        List<String> labels = new ArrayList<>(1);
        //FIXME:这个可能需要改,不能为1
        labels.add("1");
        LabeledSentenceProvider lsp = new CollectionLabeledSentenceProvider(docs,labels);
        INDArray[] res = null;
        DataSetIterator dsi = getCNNDataSet(tf,lsp,null,SparkConfiguration.SEN_LEN,32);
        while(dsi.hasNext()){
            DataSet ds = dsi.next();
            res = net.output(ds.getFeatures());
        }
        INDArray labelVal = res[0];
        return TextCategorizationManager.get().getDesc(labelVal);

    }

    private DataSetIterator getCNNDataSet(TokenizerFactory tf, LabeledSentenceProvider sp, WordVectors w2v,int senLen,int batch){
        return new CnnSentenceDataSetIterator.Builder()
                .tokenizerFactory(tf)
                .sentenceProvider(sp)
                .wordVectors(w2v)
                .minibatchSize(batch)
                .maxSentenceLength(senLen)
                .useNormalizedWordVectors(false)
                .build();
    }



}
