package com.rizzo.back.helper;

import com.aliasi.chunk.AbstractCharLmRescoringChunker;
import com.aliasi.chunk.Chunk;
import com.aliasi.hmm.HiddenMarkovModel;
import com.aliasi.hmm.HmmDecoder;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;
import com.aliasi.util.AbstractExternalizable;
import com.rizzo.back.util.domain.SentenceData;
import org.apache.commons.lang.StringUtils;
import org.apache.lucene.index.memory.AnalyzerUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.io.ClassPathResource;
import org.springframework.core.io.Resource;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.*;

/**
 * TODO
 */
public class SentenceHelper {

    private static final Logger LOGGER = LoggerFactory.getLogger(SentenceHelper.class);

    private String markovModelFileName;
    private String entityModelFileName;

    private TokenizerFactory tokenizerFactory;
    private SentenceModel sentenceModel;
    private HmmDecoder decoder;
    private AbstractCharLmRescoringChunker chunker;

    public void init() throws IOException, ClassNotFoundException {
        // load the hidden Markov Model from markovFile into the decoder
        Resource markovClassPathResource = new ClassPathResource(this.markovModelFileName);
        Resource entityClassPathResource = new ClassPathResource(this.entityModelFileName);
        File markovFile = markovClassPathResource.getFile();
        File entityFile = entityClassPathResource.getFile();
        if (!(markovFile.exists())) {
            throw new IllegalStateException("Hidden Markov Model File not found! " + markovFile);
        }
        if (!markovFile.canRead()) {
            throw new IOException("Hidden Markov Model File is not readable! " + markovFile);
        }
        if (!(entityFile.exists())) {
            throw new IllegalStateException("Entity Model File not found! " + entityFile);
        }
        if (!entityFile.canRead()) {
            throw new IOException("Entity Model File is not readable! " + entityFile);
        }
        // configure the decoder with the hmm file
        ObjectInputStream objectInputStream = null;
        try {
            objectInputStream = new ObjectInputStream(new FileInputStream(markovFile));
            HiddenMarkovModel hmm = (HiddenMarkovModel) objectInputStream.readObject();
            this.decoder = new HmmDecoder(hmm);
        } finally {
            if (objectInputStream != null) {
                objectInputStream.close();
            }
        }
        // configure the model-based chunker with the entity file
        this.chunker = (AbstractCharLmRescoringChunker) AbstractExternalizable.readObject(entityFile);
    }

    public String[] getLuceneSentences(String information, int limit) {
        return AnalyzerUtil.getSentences(information, limit);
    }

    public List<String> getLingPipeSentences(String information) {
        List<String> sentences = new ArrayList<String>();
        // tokenize the string
        List<String> tokenList = new ArrayList<String>();
        List<String> whiteList = new ArrayList<String>();
        Tokenizer tokenizer = tokenizerFactory.tokenizer(information.toCharArray(), 0, information.length());
        tokenizer.tokenize(tokenList, whiteList);
        String[] tokens = new String[tokenList.size()];
        String[] whites = new String[whiteList.size()];
        tokenList.toArray(tokens);
        whiteList.toArray(whites);
        int[] sentenceBoundaries = this.sentenceModel.boundaryIndices(tokens, whites);
        if (!(sentenceBoundaries.length < 1)) {
            int sentStartTok = 0;
            int sentEndTok = 0;
            for (int sentenceBoundary : sentenceBoundaries) {
                sentEndTok = sentenceBoundary;
                StringBuffer stringBuffer = new StringBuffer();
                for (int j = sentStartTok; j <= sentEndTok; j++) {
                    stringBuffer.append(tokens[j]).append(whites[j + 1]);
                }
                sentences.add(stringBuffer.toString());
                sentStartTok = sentEndTok + 1;
            }
        }
        return sentences;
    }

    public SentenceData extractPosInfoFromSentence(String sentence) {
        SentenceData sentenceData = new SentenceData();
        sentenceData.setInitialSentence(sentence);
        char[] cs = sentence.toCharArray();
        Tokenizer tokenizer = this.tokenizerFactory.tokenizer(cs, 0, cs.length);
        String[] tokens = tokenizer.tokenize();
        sentenceData.setTokens(tokens);
        // then get the tags
        String[] tags = decoder.firstBest(tokens);
        sentenceData.setTags(tags);
        return sentenceData;
    }

    public String extractEntitiesFromSentence(SentenceData sentenceData) {
        int maxNBest = (int) (Math.sqrt((double) sentenceData.getInitialSentence().length()));
        StringBuffer xmlOutput = new StringBuffer();
        char[] cs = sentenceData.getInitialSentence().toCharArray();
        Iterator it = chunker.nBestChunks(cs, 0, cs.length, maxNBest);
        // hash for the start locations of entities
        HashMap<String, String> startH = new HashMap<String, String>();
        // hash for the end locations of entities
        HashMap<String, String> endH = new HashMap<String, String>();
        // hash for the types of entities
        HashMap<String, String> typeH = new HashMap<String, String>();
        String[] entities = new String[maxNBest];
        for (int i = 0; i < maxNBest; i++) {
            entities[i] = ""; // *-- initialize the list of entities
        }
        for (int i = 0; it.hasNext(); i++) {
            Chunk chunk = (Chunk) it.next();
            int start = chunk.start();
            int end = chunk.end();
            String ent = sentenceData.getInitialSentence().substring(start, end);
            String type = chunk.type();
            // populate the hashes and the entities array
            startH.put(ent, String.valueOf(start));
            endH.put(ent, String.valueOf(end));
            typeH.put(ent, type);
            entities[i] = ent;
        }
        // sort the entities in descending order of length and pick the
        // largest non-overlapping entities
        Arrays.sort(entities, new StringLenComparator());
        int sentenceLen = sentenceData.getInitialSentence().length();
        BitSet occupied = new BitSet(sentenceLen); // flags to indicate if the
        // character of the sentenceData is
        // part of an entity
        BitSet validEntity = new BitSet(maxNBest); // flags to indicate if the
        // entity should be added to
        // the list of returned ents.
        int validEntities = 0;
        for (int i = 0; i < entities.length; i++) {
            if (startH.get(entities[i]) == null || startH.get(entities[i]).equals("")) {
                continue;
            }
            int start = Integer.parseInt(startH.get(entities[i]));
            int end = Integer.parseInt(endH.get(entities[i]));

            // *-- check for overlap
            boolean overlap = false;
            for (int j = start; j < end; j++) {
                if (occupied.get(j)) {
                    overlap = true;
                }
            }
            // *-- add to the list of entities, if no overlap
            validEntity.set(i, (!overlap));
            if (!overlap) {
                for (int j = start; j < end; j++) {
                    occupied.set(j, true);
                }
            }
            validEntities++;
        }
        // *-- build the arrays of entity types, start, and end locations
        int[] startL = new int[validEntities];
        int[] endL = new int[validEntities];
        int entCount = 0;
        for (int i = 0; i < entities.length; i++) {
            if (!validEntity.get(i)) {
                continue;
            }
            int start = Integer.parseInt(startH.get(entities[i]));
            int end = Integer.parseInt(endH.get(entities[i]));
            startL[entCount] = start;
            endL[entCount] = end;
            entCount++;
        }
        // *-- sort start and end arrays in ascending order
        int[] newStartL = new int[entCount];
        int[] newEndL = new int[entCount];
        for (int i = 0; i < entCount; i++) {
            newStartL[i] = startL[i];
            newEndL[i] = endL[i];
        }
        Arrays.sort(newStartL);
        Arrays.sort(newEndL);
        // *-- build the output string buffer with tags for entities
        int currentLoc = 0;
        for (int i = 0; i < entCount; i++) {
            if ((newStartL[i] > 0) && (currentLoc < newStartL[i])) {
                xmlOutput.append(sentenceData.getInitialSentence().substring(currentLoc, newStartL[i] - 1));
            }
            String entity = sentenceData.getInitialSentence().substring(newStartL[i], newEndL[i]);
            String etype = StringUtils.capitalize(typeH.get(entity));
            xmlOutput.append(" <");
            xmlOutput.append(etype);
            xmlOutput.append("> ");
            xmlOutput.append(entity);
            xmlOutput.append(" </");
            xmlOutput.append(etype);
            xmlOutput.append("> ");
            currentLoc = newEndL[i] + 1;
        }
        if (sentenceData.getInitialSentence().length() >= (newEndL[entCount - 1] + 1)) {
            xmlOutput.append(sentenceData.getInitialSentence().substring(newEndL[entCount - 1] + 1, sentenceData.getInitialSentence().length()));
        }
        // Matcher matcher = collapsePattern.matcher(xmlOutput);
        //return (matcher.replaceAll(" "));
        return xmlOutput.toString();
    }

    static class StringLenComparator implements Comparator<String> {
        public int compare(String o1, String o2) {
            if (o1 == null || o2 == null)
                return (0);
            Integer i1 = o1.length();
            Integer i2 = o2.length();
            return (-i1.compareTo(i2));
        }
    }

    public void setMarkovModelFileName(String markovModelFileName) {
        this.markovModelFileName = markovModelFileName;
    }

    public void setEntityModelFileName(String entityModelFileName) {
        this.entityModelFileName = entityModelFileName;
    }

    public void setSentenceModel(SentenceModel sentenceModel) {
        this.sentenceModel = sentenceModel;
    }

    public void setTokenizerFactory(TokenizerFactory tokenizerFactory) {
        this.tokenizerFactory = tokenizerFactory;
    }
}
