package co.edu.unal.bioingenium.kbmed.retrieval.knowledge.process;

import co.edu.unal.bioingenium.kbmed.features.CollectionFeatures;
import co.edu.unal.bioingenium.kbmed.knowledge.similarity.NeighborgSimilarityService;
import co.edu.unal.bioingenium.kbmed.knowledge.similarity.SemanticSimilartyService;
import co.edu.unal.bioingenium.kbmed.knowledge.similarity.SemanticSimilartyService.SemanticSimilarityFunction;
import co.edu.unal.bioingenium.kbmed.retrieval.data.RetrievalData;
import co.edu.unal.bioingenium.kbmed.retrieval.knowledge.similarity.CosineExtendend;
import co.edu.unal.bioingenium.kbmed.text.index.structures.InvertedIndexIntToInt;
import co.edu.unal.bioingenium.kbmed.text.representation.vo.Corpus;
import co.edu.unal.bioingenium.kbmed.retrieval.vo.RelevantDocument;
import gnu.trove.map.TIntIntMap;
import gnu.trove.map.TObjectIntMap;
import gnu.trove.map.hash.TIntIntHashMap;
import gnu.trove.map.hash.TObjectIntHashMap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 *
 * @author Ing. Alejandro Riveros Cruz
 */
public class RankByConcepts {

    /**
     *
     */
    private CosineExtendend cosineExtendend;
    /**
     *
     */
    private NeighborgSimilarityService neighborgSimilarityService;
    /**
     *
     */
    private SemanticSimilartyService semanticSimilartyService;

    /**
     *
     */
    public RankByConcepts() {
        cosineExtendend = new CosineExtendend();
        neighborgSimilarityService = NeighborgSimilarityService.getInstance();
        semanticSimilartyService = SemanticSimilartyService.getInstance();
    }

    /**
     *
     * @param queryDocument
     * @param corpus
     * @param conceptByDocument
     * @param features
     * @return
     */
    public RetrievalData retrieveByConcepts(RetrievalData retrievalData, Corpus corpus, InvertedIndexIntToInt conceptByDocument, CollectionFeatures features) {
        //Build the candidate document subset using inverted index
        Logger.getLogger(RankByConcepts.class.getName()).log(Level.INFO, "Building the candidate document set.");
        buildCandidatesSetByConcepts(retrievalData, corpus, conceptByDocument);
        //Evaluate  the document subset using the choosed similarity function
        Logger.getLogger(RankByConcepts.class.getName()).log(Level.INFO, "Evaluating the candidate document set.");
        evaluateCandidatesByConcepts(retrievalData, corpus, features);
        //Sort the results
        return retrievalData;
    }

    /**
     *
     * @param queryDocument
     * @param corpus
     * @param wordByDocument
     * @return
     */
    private void buildCandidatesSetByConcepts(RetrievalData retrievalData, Corpus corpus, InvertedIndexIntToInt conceptByDocumentIndex) {
        //Get the inverted descriptor list
        TObjectIntMap<String> invertedDescriptorList = corpus.getInvertedSortedWordList();
        //Get the candidateConceptsSet reference
        Set<String> candidateDescriptorSet = retrievalData.getCandidateDescriptorSet();
        //Get the expandedDescriptorSet reference
        Set<String> expandedDescriptorSet = retrievalData.getExpandedDescriptorSet();
        //For each descriptor in the query
        Set<String> tempSet;
        for (String descriptorId : retrievalData.getQueryDocument().getWordList().keySet()) {
            //Add the current descriptorId to the candidateDescriptorSet
            candidateDescriptorSet.add(descriptorId);
            //Get the similar descriptorIds
            tempSet = neighborgSimilarityService.getSimilarDescriptorsIds(descriptorId);
            //Add the expanded descriptors
            expandedDescriptorSet.addAll(tempSet);
        }
        //Inititlaizing an empty candidateDocumentsSet
        Set<Integer> candidateDocumentsSet = retrievalData.getCandidateDocumentsSet();
        //For each descriptor in the candidateDescriptorSet
        for (String descriptorId : candidateDescriptorSet) {
            //Add all document from the inverted index
            candidateDocumentsSet.addAll(conceptByDocumentIndex.getElement(invertedDescriptorList.get(descriptorId)));
        }
        //For each descriptor in the expandedDescriptorSet
        for (String descriptorId : expandedDescriptorSet) {
            //Add all document from the inverted index
            candidateDocumentsSet.addAll(conceptByDocumentIndex.getElement(invertedDescriptorList.get(descriptorId)));
        }
    }

    /**
     *
     * @param retrievalData
     * @param corpus
     * @param collectionFeatures
     */
    private void evaluateCandidatesByConcepts(RetrievalData retrievalData, Corpus corpus, CollectionFeatures collectionFeatures) {
        //Create empty result list
        List<RelevantDocument> scoredCandidates = new ArrayList<RelevantDocument>();
        Set<String> candidateDescriptorsIdsSet = new HashSet<String>();
        //Add the candidate descriptors
        candidateDescriptorsIdsSet.addAll(retrievalData.getCandidateDescriptorSet());
        //Add the expanded descriptors
        candidateDescriptorsIdsSet.addAll(retrievalData.getExpandedDescriptorSet());
        TObjectIntHashMap<String> tempDocumentCount = new TObjectIntHashMap<String>();
        TObjectIntHashMap<String> tempGlobalCount = new TObjectIntHashMap<String>();
        Set<String> tempSet;
        for (Integer documentId : retrievalData.getCandidateDocumentsSet()) {
            corpus.getDocumentById(documentId).getWords();
        }
        //Add the descriptors in the documents
//        for (Integer documentId : retrievalData.getCandidateDocumentsSet()) {
//            candidateDescriptorsIdsSet.addAll(corpus.getDocumentById(documentId).getWords());
//        }
        //Create the candidate descriptor list
        List<String> candidateDescriptorIds = new ArrayList<String>(candidateDescriptorsIdsSet);
        //Get the inverted descriptor list
        TObjectIntMap<String> invertedDescriptorList = corpus.getInvertedSortedWordList();
        //Build the corpusIndex - Matrix Index map
        TIntIntMap corpusIndexMatrixIndexMap = new TIntIntHashMap();
        for (int i = 0; i < candidateDescriptorIds.size(); i++) {
            corpusIndexMatrixIndexMap.put(invertedDescriptorList.get(candidateDescriptorIds.get(i)), i);
        }
        float[][] similarityMatrix = semanticSimilartyService.getSemanticSimilaritysByDescriptorIds(candidateDescriptorIds, SemanticSimilarityFunction.JACCARD);
        //For each document in the candidate document set
        float score;
        for (Integer documentId : retrievalData.getCandidateDocumentsSet()) {
            //Calculate the cosine extended similarity
            score = cosineExtendend.getValue(collectionFeatures.getFeatureVector(documentId), retrievalData.getQueryDocument().getFeatureVector(), corpusIndexMatrixIndexMap, similarityMatrix);
            //Add the new relevant document
            scoredCandidates.add(new RelevantDocument(corpus.getSortedDocumentList().get(documentId), score));
        }
        //Sort relevand documents
        Collections.sort(scoredCandidates);
        //Adding results to retrieval data
        retrievalData.setResults(scoredCandidates);
    }
}
