
/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package co.edu.unal.bioingenium.kbmed.knowledge.mapping.impl;

import co.edu.unal.bioingenium.kbmed.knowledge.mapping.api.Mapping;
import co.edu.unal.bioingenium.kbmed.config.Configuration;
import co.edu.unal.bioingenium.kbmed.knowledge.ontology.OntologyMetaData;
import co.edu.unal.bioingenium.kbmed.knowledge.ontology.dao.KnowledgeSourceDAO;
import co.edu.unal.bioingenium.kbmed.knowledge.ontology.dao.KnowledgeSourceDAOFactory;
import co.edu.unal.bioingenium.kbmed.knowledge.ontology.vo.Concept;
import co.edu.unal.bioingenium.kbmed.knowledge.ontology.vo.Descriptor;
import co.edu.unal.bioingenium.kbmed.nlptools.sentencedetector.SentenceDetector;
import co.edu.unal.bioingenium.kbmed.text.filter.FilterPipe;
import co.edu.unal.bioingenium.kbmed.text.index.inverted.io.InvertedIndexIO;
import co.edu.unal.bioingenium.kbmed.knowledge.mapping.vo.ConceptIdentified;
import co.edu.unal.bioingenium.kbmed.text.index.structures.InvertedIndexStrToStr;
import co.edu.unal.bioingenium.kbmed.text.representation.WordList;
import co.edu.unal.bioingenium.kbmed.text.representation.io.WordListIO;
import co.edu.unal.bioingenium.kbmed.util.sout.OutUtil;
import co.edu.unal.bioingenium.kbmed.util.time.TimeUtil;
import ds.tree.RadixTree;
import ds.tree.io.PrefixIndexIO;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.logging.Level;
import java.util.logging.Logger;
import uk.ac.shef.wit.simmetrics.similaritymetrics.AbstractStringMetric;
import uk.ac.shef.wit.simmetrics.similaritymetrics.JaccardSimilarity;

/**
 *
 * @author Alejandro Riveros Cruz
 */
public class SoftMapping implements Mapping {

    private OntologyMetaData ontologyMetaData;
    private FilterPipe tokenPipe;
    private FilterPipe sentencePipe;
    private SentenceDetector sentenceDetector;
    //Index structures
    private RadixTree<Integer> descriptorsPrefixIndex;
    private InvertedIndexStrToStr wordByDescriptorIndex;
    private WordList wordList;
    //Mapping structures
    private Map<String, Descriptor> descriptorByDescriptorId;
    private Map<String, Concept> conceptByConceptId;
    //Constants
    private AbstractStringMetric stringMetric;
    private final double STRING_METRIC_THRESHOLD = 0.9;

    /**
     * @param:
     * @return:
     */
    public SoftMapping(OntologyMetaData ontologyMetaData) {
        this.ontologyMetaData = ontologyMetaData;
        stringMetric = new JaccardSimilarity();
    }

    /**
     * 
     */
    @Override
    public void init() {
        try {
            TimeUtil.init();
            KnowledgeSourceDAO knowledgeSourceDAO = KnowledgeSourceDAOFactory.getKnowledgeSourceDAO(ontologyMetaData);
            System.out.print("Load descriptor by descriptorId from knowledge source...");
            descriptorByDescriptorId = knowledgeSourceDAO.getDescriptorsByDescriptorId(ontologyMetaData.getConceptStatusActive(), ontologyMetaData.getDescriptorStatusActive());
            System.out.print("OK\n");
            System.out.print("Load concept by conceptId from knowledge source...");
            conceptByConceptId = knowledgeSourceDAO.getConceptbyConceptId(ontologyMetaData.getConceptStatusActive());
            System.out.print("OK\n");
            knowledgeSourceDAO.disconnect();
            System.out.println("Total time " + TimeUtil.timeInSeconds() + " secs.\n");
            sentencePipe = new FilterPipe();
            sentencePipe.setRemoveNumbers(true);
            sentencePipe.setRemoveStopWords(false);
            tokenPipe = new FilterPipe();
            tokenPipe.setRemoveStopWords(false);
            System.out.print("Load word by descriptor index...");
            wordByDescriptorIndex = InvertedIndexIO.loadStrToStrInvertedIndex(Configuration.KNOWLEDGE_SOURCE_NAME + "WordByDescriptor.idx");
            System.out.print("OK\n");
            System.out.print("Load descriptors prefix index...");
            descriptorsPrefixIndex = (RadixTree<Integer>) PrefixIndexIO.load(Configuration.KNOWLEDGE_SOURCE_NAME + "DescriptorsWordsPrefix.idx");
            System.out.print("OK\n");
            System.out.print("Load word list...");
            wordList = WordListIO.loadWordList(Configuration.KNOWLEDGE_SOURCE_NAME + "WordByConcept.lst");
            System.out.print("OK\n");
            System.out.print("Load NLP models...");
            sentenceDetector = new SentenceDetector(Configuration.LANGUAGE + "SD.bin.gz");
            System.out.print("OK\n");
        } catch (ClassNotFoundException ex) {
            Logger.getLogger(SoftMapping.class.getName()).log(Level.SEVERE, null, ex);
        } catch (IOException ex) {
            Logger.getLogger(SoftMapping.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    /**
     * @param:
     * @return:
     * @Override
     */
    @Override
    public List<ConceptIdentified> doMapping(String text) {
        Set<String> candidateDescriptors;
        List<ConceptIdentified> identifiedConcepts = null;
        String[] sentences = sentenceDetector.getSentences(text);
        int k;
        for (String sentence : sentences) {
            k = 0;
            System.out.println("\n\n\n***********************************************************");
            System.out.println("SENTENCE:\n" + sentence);
            candidateDescriptors = buildCandidateDescriptorSet(sentence);
            System.out.println("Total candidates: " + candidateDescriptors.size());
            System.out.println("***********************************************************\n");
            identifiedConcepts = evaluateCandidates(sentence, candidateDescriptors);
            Collections.sort(identifiedConcepts);
            System.out.println("Identified concepts:");
            for (ConceptIdentified conceptIdentified : identifiedConcepts) {
                if (conceptIdentified.getScore() > 0.6) {
                    System.out.println("***********************************************************");
                    System.out.println("ConceptId: " + conceptIdentified.getConceptId());
                    System.out.println("Category: " + conceptIdentified.getCategory());
                    System.out.println("Term: " + conceptIdentified.getDescription());
                    System.out.println("Score: " + conceptIdentified.getScore());
                    OutUtil.printArray(conceptIdentified.getDetailedScore());
                    k++;
                    if (k > 100) {
                        break;
                    }
                } else {
                    break;
                }
            }
            if (k == 0) {
                System.out.println("No concepts for this sentence!!!");
            }
        }
        return identifiedConcepts;
    }

    @Override
    public List<ConceptIdentified> evaluateCandidates(String sentence, Set<String> candidateDescriptors) {
//        double[][] allConceptsScores;
//        double[] ICFScores;
//        double totalICF;
        Map<String, ConceptIdentified> conceptIdentifiedsMap = new HashMap<String, ConceptIdentified>();
        List<ConceptIdentified> conceptIdentifieds = new ArrayList<ConceptIdentified>();
        double[][] simMatrix;
        double[] maxScoresInSentence;
        int[] maximumIndex;
        int termMatchingCount, tempLength, connectedComponentsCount, connectedComponentsLength, termNumTokens, sentenceNumToken;
        List<String> termTokens;
        String term, conceptId, category;
        double similarity, score, simMean, termCoverage, cohesiveness;
        ConceptIdentified conceptIdentified;
        List<String> sentenceTokens = sentencePipe.filter(sentence);
        sentenceNumToken = sentenceTokens.size();
        for (String descriptorId : candidateDescriptors) {

            conceptId = descriptorByDescriptorId.get(descriptorId).getConceptID();

            category = conceptByConceptId.get(conceptId).getCategory();
            score = simMean = 0;
            term = descriptorByDescriptorId.get(descriptorId).getTerm();

            termTokens = tokenPipe.filter(term);
            termNumTokens = termTokens.size();
            if (termNumTokens == 0) {
                continue;
            }
            // Building the ISUB score matrix
            simMatrix = new double[termNumTokens][sentenceNumToken];
            for (int i = 0; i < termNumTokens; i++) {
                for (int j = 0; j < sentenceNumToken; j++) {
                    similarity = stringMetric.getSimilarity(termTokens.get(i), sentenceTokens.get(j));
                    if (similarity > STRING_METRIC_THRESHOLD) {
                        simMatrix[i][j] = similarity;
                    }
                }
            }
            // Find the maximum for each row
            maximumIndex = new int[termNumTokens];
            for (int i = 0; i < termNumTokens; i++) {
                maximumIndex[i] = 0;
                for (int j = 1; j < sentenceNumToken; j++) {
                    if (simMatrix[i][maximumIndex[i]] < simMatrix[i][j]) {
                        maximumIndex[i] = j;
                    }
                }
                if (simMatrix[i][maximumIndex[i]] == 0) {
                    maximumIndex[i] = Integer.MIN_VALUE;
                }
            }
            // Compute the mean of the ISUB maximun scores for the best matching path
            for (int i = 0; i < termNumTokens; i++) {
                if (maximumIndex[i] >= 0) {
                    simMean += simMatrix[i][maximumIndex[i]];
                }
            }
            simMean = simMean / (double) termNumTokens;
            // Coverage : number of words matched by the sentence / number of words in the term
            termMatchingCount = 0;
            for (int i = 0; i < termNumTokens; i++) {
                if (maximumIndex[i] >= 0) {
                    termMatchingCount++;
                }
            }
            termCoverage = (double) termMatchingCount / (double) termNumTokens;// * ((double) sentenceNumToken / 2));
            termCoverage = 0;

            // Cohesiveness: the proportion of connected words involved in the matching ??? TODO fix this algorithm
            connectedComponentsCount = 1;
            connectedComponentsLength = 0;
            if (termNumTokens > 1) {
                for (int i = 1; i < termNumTokens; i++) {
                    tempLength = 1;
                    while (i < termTokens.size() && (Math.abs(maximumIndex[i] - maximumIndex[i - 1])) < 2) {
                        tempLength++;
                        i++;
                    }
                    if (tempLength > 1) {
                        connectedComponentsCount++;
                        connectedComponentsLength += tempLength;
                    }
                }
            } else {
                if (maximumIndex[0] >= 0) {
                    connectedComponentsLength = 1;
                }
            }


            cohesiveness = ((double) connectedComponentsLength / (double) termTokens.size()) / (double) connectedComponentsCount;

            score = 0.5 * simMean + 0 * termCoverage + 0.5 * cohesiveness;

//            if (conceptId.equalsIgnoreCase(TARGETCONCEPT)) {
//
//                for (String t : termTokens) {
//                    System.out.println(t);
//                }
//                OutUtil.printMatrix("Similarity matrix", simMatrix);
//                OutUtil.printArray("Maximun index", maximumIndex);
//                System.out.println("Score: " + score);
//                System.out.println("simMean: " + simMean + " termCoverage:  " + termCoverage + " cohesiveness:  " + cohesiveness);
//                System.out.println("connectedComponentsLength= " + connectedComponentsLength);
//                System.out.println("termTokens.size()= " + termTokens.size());
//                System.out.println("connectedComponentsCount= " + connectedComponentsCount);
//                System.out.println("");
//
//
//            }

            //Adding the conceptIdentified

            if (conceptIdentifiedsMap.containsKey(conceptId)) {
                if (conceptIdentifiedsMap.get(conceptId).getScore() < score) {
                    conceptIdentified = new ConceptIdentified();
                    conceptIdentified.setConceptId(conceptId);
                    conceptIdentified.setDescription(term);
                    conceptIdentified.setCategory(category);
                    conceptIdentified.setScore(score);
                    conceptIdentified.setDetailedScore(simMatrix, maximumIndex);
                    conceptIdentifiedsMap.put(conceptId, conceptIdentified);
                }
            } else {
                conceptIdentified = new ConceptIdentified();
                conceptIdentified.setConceptId(conceptId);
                conceptIdentified.setDescription(term);
                conceptIdentified.setCategory(category);
                conceptIdentified.setScore(score);
                conceptIdentified.setDetailedScore(simMatrix, maximumIndex);
                conceptIdentifiedsMap.put(conceptId, conceptIdentified);
            }


        }


        for (String key : conceptIdentifiedsMap.keySet()) {
            conceptIdentifieds.add(conceptIdentifiedsMap.get(key));
        }
//        


//        //ICF Weigthing
//        allConceptsScores = new double[conceptIdentifieds.size()][sentenceTokens.size()];
//        for (int i = 0; i < conceptIdentifieds.size(); i++) {
//            allConceptsScores[i] = conceptIdentifieds.get(i).getDetailedScore();
//        }
//        ICFScores = new double[sentenceTokens.size()];
//        totalICF = 0;
//        for (int j = 0; j < sentenceTokens.size(); j++) {
//            for (int i = 0; i < conceptIdentifieds.size(); i++) {
//                if (allConceptsScores[i][j] > 0) {
//                    ICFScores[j] += 1;
//                }
//            }
//            ICFScores[j] = UtilMath.log2((double) conceptIdentifieds.size() / (1 + ICFScores[j]));
//            totalICF += ICFScores[j];
//        }
//        for (int i = 0; i < sentenceTokens.size(); i++) {
//            ICFScores[i] = ICFScores[i] / totalICF;
//        }
//        for (int i = 0; i < conceptIdentifieds.size(); i++) {
//            conceptIdentifieds.get(i).weitghScore(ICFScores);
//        }
//
//

        //SELECT concepts
        maxScoresInSentence = new double[sentenceTokens.size()];
        for (int i = 0; i < conceptIdentifieds.size(); i++) {
            for (int j = 0; j < sentenceTokens.size(); j++) {
                if (conceptIdentifieds.get(i).getDetailedScore()[j] > 0) {
                    if (conceptIdentifieds.get(i).getScore() >= maxScoresInSentence[j]) {
                        maxScoresInSentence[j] = conceptIdentifieds.get(i).getScore();
                    }
                }
            }
        }
        boolean still;
        for (int i = 0; i < conceptIdentifieds.size(); i++) {
            still = false;
            for (int j = 0; j < sentenceTokens.size(); j++) {
                if (conceptIdentifieds.get(i).getDetailedScore()[j] == maxScoresInSentence[j]) {
                    still = true;
                }
            }
            if (!still) {
                conceptIdentifieds.remove(i);
            }
        }
        return conceptIdentifieds;
    }

    /**
     * @param:
     * @return:
     * @Override
     */
    @Override
    public Set<String> buildCandidateDescriptorSet(String text) {
        Set<String> descriptorIds = new HashSet<String>();
        StringTokenizer tokenizer = new StringTokenizer(text);
        String[] tokens;
        String token = "", prefix = "";
        int chosenIndex, upperBound, lowerBound;
        List<String> sortedWordList = wordList.getSortedWordList();
        tokens = text.split(" ");
//        while (tokenizer.hasMoreTokens()) {
//            token = tokenizer.nextToken();
        for (int i = 0; i < tokens.length; i++) {
            token = tokenPipe.singleFilter(tokens[i]);
            try {
                if (token != null) {
                    if (token.length() > 4) {
                        prefix = token.substring(0, 4);
                    } else {
                        prefix = token;
                    }
                    chosenIndex = descriptorsPrefixIndex.find(prefix);
                    lowerBound = chosenIndex;
                    upperBound = chosenIndex + 1;
                    while (sortedWordList.get(lowerBound).startsWith(prefix)) {
                        if (stringMetric.getSimilarity(token, sortedWordList.get(lowerBound)) >= STRING_METRIC_THRESHOLD) {
                            descriptorIds.addAll(wordByDescriptorIndex.getElement(sortedWordList.get(lowerBound)));
                        }
                        lowerBound--;
                    }
                    while (sortedWordList.get(upperBound).startsWith(prefix)) {
                        if (stringMetric.getSimilarity(token, sortedWordList.get(upperBound)) >= STRING_METRIC_THRESHOLD) {
                            descriptorIds.addAll(wordByDescriptorIndex.getElement(sortedWordList.get(upperBound)));
                        }
                        upperBound++;
                    }
                }
            } catch (NullPointerException ex) {
//                Logger.getLogger(SoftMapping.class.getName()).log(Level.WARNING, "The prefix " + token + ":" + prefix + " is not present in the ontology index", ex);
            }
        }
        return descriptorIds;
    }
    
    /**
     * @param:
     * @return:
     * @Override
     */
    public Set<String> buildCandidateDescriptorSet2(String text) {
        Set<String> descriptorIds = new HashSet<String>();
        String[] tokens;
        String token = "", prefix = "";
        int chosenIndex, upperBound, lowerBound;
        List<String> sortedWordList = wordList.getSortedWordList();
        tokens = text.split(" ");
//        while (tokenizer.hasMoreTokens()) {
//            token = tokenizer.nextToken();
        for (int i = 0; i < tokens.length; i++) {
            token = tokenPipe.singleFilter(tokens[i]);
            try {
                if (token != null) {
                    if (token.length() > 4) {
                        prefix = token.substring(0, 4);
                    } else {
                        prefix = token;
                    }
                    chosenIndex = descriptorsPrefixIndex.find(prefix);
                    lowerBound = chosenIndex;
                    upperBound = chosenIndex + 1;
                    while (sortedWordList.get(lowerBound).startsWith(prefix)) {
                        if (stringMetric.getSimilarity(token, sortedWordList.get(lowerBound)) >= STRING_METRIC_THRESHOLD) {
                            
                            
                            
                            descriptorIds.addAll(wordByDescriptorIndex.getElement(sortedWordList.get(lowerBound)));
                        }
                        lowerBound--;
                    }
                    while (sortedWordList.get(upperBound).startsWith(prefix)) {
                        if (stringMetric.getSimilarity(token, sortedWordList.get(upperBound)) >= STRING_METRIC_THRESHOLD) {
                            
                            
                            
                            
                            descriptorIds.addAll(wordByDescriptorIndex.getElement(sortedWordList.get(upperBound)));
                        }
                        upperBound++;
                    }
                }
            } catch (NullPointerException ex) {
//                Logger.getLogger(SoftMapping.class.getName()).log(Level.WARNING, "The prefix " + token + ":" + prefix + " is not present in the ontology index", ex);
            }
        }
        return descriptorIds;
    }
}
