package co.edu.unal.bioingenium.kbmed.retrieval.knowledge.process;

import co.edu.unal.bioingenium.kbmed.config.Configuration;
import co.edu.unal.bioingenium.kbmed.data.PhysicalDocument;
import co.edu.unal.bioingenium.kbmed.data.loader.DocumentLoader;
import co.edu.unal.bioingenium.kbmed.data.loader.DocumentLoaderFactory;
import co.edu.unal.bioingenium.kbmed.features.CollectionFeatures;
import co.edu.unal.bioingenium.kbmed.features.CompactFeatureVector;
import co.edu.unal.bioingenium.kbmed.knowledge.KnowledgeSourceMetaData;
import co.edu.unal.bioingenium.kbmed.knowledge.mapping.api.Mapping;
import co.edu.unal.bioingenium.kbmed.knowledge.mapping.impl.SoftMapping;
import co.edu.unal.bioingenium.kbmed.knowledge.vo.DescriptorData;
import co.edu.unal.bioingenium.kbmed.text.index.structures.InvertedIndexIntToInt;
import co.edu.unal.bioingenium.kbmed.text.representation.vo.Corpus;
import co.edu.unal.bioingenium.kbmed.text.representation.vo.Document;
import co.edu.unal.bioingenium.kbmed.retrieval.weighting.TFIDF;
import co.edu.unal.bioingenium.kbmed.util.io.FileManager;
import gnu.trove.map.TObjectIntMap;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 *
 * @author Ing. Alejandro Riveros Cruz
 */
public class PrepareByConcepts {

    /**
     * 
     */
    public static int FEATURE_TYPE = Configuration.FEATURE_TF_IDF;
    /**
     * 
     */
    private Mapping mapping;

    /**
     * 
     */
    public PrepareByConcepts() {
        if (Configuration.FEATURE_TYPE.equals("frequency")) {
            FEATURE_TYPE = Configuration.FEATURE_FREQUENCY;
        } else if (Configuration.FEATURE_TYPE.equals("relativefrequency")) {
            FEATURE_TYPE = Configuration.FEATURE_RELATIVE_FREQUENCY;
        } else if (Configuration.FEATURE_TYPE.equals("TFIDF")) {
            FEATURE_TYPE = Configuration.FEATURE_TF_IDF;
        }
    }

    /**
     * 
     */
    public void initKnowledgeSource() {
        mapping = new SoftMapping();
        mapping.init();
        mapping.setVerbose(false);
    }

    /**
     * 
     * @param corpusName
     * @param corpusPath
     * @param pruneBellow 
     * @param pruneAbove 
     * @return
     */
    public Corpus buildConceptCorpus(String corpusName, String corpusPath, int pruneBellow, int pruneAbove) {
        try {
            corpusPath = FileManager.toStandardPath(corpusPath);
            System.out.print("Getting file list...");
            List<String> fileList = FileManager.getRecursivelyFileList(corpusPath);
            System.out.print("Success\n");
            return buildConceptCorpus(corpusName, corpusPath, fileList, pruneBellow, pruneAbove);
        } catch (IOException ex) {
            Logger.getLogger(PrepareByConcepts.class.getName()).log(Level.SEVERE, null, ex);
        }
        return null;
    }

    /**
     * 
     * @param corpusName
     * @param corpusPath
     * @param fileList 
     * @param pruneAbove 
     * @param pruneBellow 
     * @return
     */
    public Corpus buildConceptCorpus(String corpusName, String corpusPath, List<String> fileList, int pruneBellow, int pruneAbove) {
        Corpus corpus = new Corpus(corpusName);
        PhysicalDocument physicalDocument;
        DocumentLoader documentLoader;
        int count = 1;
        for (String fileName : fileList) {
            System.out.print((count++) + "/" + fileList.size() + " Parsing " + fileName + "...");
            documentLoader = DocumentLoaderFactory.getDocumentLoader(fileName);
            physicalDocument = documentLoader.parseDocument(corpusPath, fileName);
            if (physicalDocument != null) {
                processConcepts(corpus, physicalDocument.getName(), physicalDocument.getContent());
                System.out.print("Success\n");
            } else {
                System.out.print("Failed\n");
            }
        }
        if (pruneBellow != 0 || pruneAbove != 0) {
            pruneWords(corpus, pruneBellow, pruneAbove);
        }
        return corpus;
    }

    /**
     * 
     * @param corpusName
     * @param corpusData 
     * @param pruneAbove 
     * @param pruneBellow 
     * @param ontologyMetaData 
     * @return
     */
    public Corpus buildConceptCorpus(String corpusName, Map<String, String> corpusData, KnowledgeSourceMetaData ontologyMetaData, int pruneBellow, int pruneAbove) {
        Corpus corpus = new Corpus(corpusName);
        int count = 1;
        for (String documentName : corpusData.keySet()) {
            System.out.print((count++) + "/" + corpusData.size() + " Parsing " + documentName + "...");
            processConcepts(corpus, documentName, corpusData.get(documentName));
            System.out.print("Success\n");
        }
        if (pruneBellow != 0 || pruneAbove != 0) {
            pruneWords(corpus, pruneBellow, pruneAbove);
        }
        return corpus;
    }

    /**
     * 
     * @param corpus
     * @param documentName
     * @param documentContent
     */
    private void processConcepts(Corpus corpus, String documentName, String documentContent) {
        Document document = new Document(documentName);
        Map<String, List<DescriptorData>> mapResults = mapping.doMapping(documentContent);
        for (String sentence : mapResults.keySet()) {
            for (DescriptorData descriptorData : mapResults.get(sentence)) {
                //Adding global occurrence 
                corpus.addWordOccurrence(descriptorData.getDescriptorId());
                if (!document.containsWord(descriptorData.getDescriptorId())) {
                    //Add in-document occurrente
                    corpus.addDocumentOccurence(descriptorData.getDescriptorId());
                }
                //Add document details
                document.addWordOccurrence(descriptorData.getDescriptorId());
            }
        }
        corpus.addDocument(document);
    }

    private void pruneConcepts(Corpus corpus) {
        int totalDocuments = corpus.getTotalDocuments();
        pruneWords(corpus, 5, (int) (totalDocuments * 0.9));

    }

    /**
     * 
     * @param corpus
     * @param minOcurrences
     * @param maxOcurrences
     */
    private void pruneWords(Corpus corpus, int pruneBellow, int pruneAbove) {
        Logger.getLogger(PrepareByConcepts.class.getName()).log(Level.INFO, "Pruning irrelevant word and documents");
        Set<String> itemsToRemove = new HashSet<String>();
        // For each word in the word list
        for (String word : corpus.getWordCount().keySet()) {
            //If word occurrence are in the prune range
            if (corpus.getWordOcurrences(word) < pruneBellow || corpus.getWordOcurrences(word) > pruneAbove) {
                itemsToRemove.add(word);
            }
        }
        //For each word to remove
        for (String word : itemsToRemove) {
            // For each document in corpus
            for (String documentName : corpus.getDocumentCollection().keySet()) {
                corpus.getDocument(documentName).removeWord(word);
            }
            corpus.removeWord(word);
        }
        itemsToRemove.clear();
        //Remove irrelevant documents
        for (String documentName : corpus.getDocumentCollection().keySet()) {
            if (corpus.getDocument(documentName).getTotalWords() == 0) {
                itemsToRemove.add(documentName);
            }
        }
        //For each document to remove
        for (String documentName : itemsToRemove) {
            corpus.removeDocument(documentName);
        }
    }

    /**
     * 
     * @param corpus
     * @param featureType
     * @return
     */
    public CollectionFeatures extractFeatures(Corpus corpus, int featureType) {
        FEATURE_TYPE = featureType;
        return extractFeatures(corpus);
    }

    /**
     *
     * @param corpus 
     * @return 
     */
    public CollectionFeatures extractFeatures(Corpus corpus) {
        Logger.getLogger(PrepareByConcepts.class.getName()).log(Level.INFO, "Extract collection features");
        CollectionFeatures collectionFeatures = new CollectionFeatures(corpus.getName());
        TObjectIntMap<String> invertedWordList = corpus.getInvertedSortedWordList();
        TObjectIntMap<String> invertedDocumentList = corpus.getInvertedSortedDocumentList();
        Document actualDocument;
        CompactFeatureVector featureVector;
        float value;
        switch (FEATURE_TYPE) {
            case Configuration.FEATURE_FREQUENCY:
                for (String documentName : corpus.getDocumentCollection().keySet()) {
                    featureVector = new CompactFeatureVector();
                    actualDocument = corpus.getDocument(documentName);
                    for (String word : actualDocument.getWordList().keySet()) {
                        value = (float) actualDocument.getWordList().get(word);
                        featureVector.setValue(invertedWordList.get(word), value);
                    }
                    collectionFeatures.setFeatureVector(invertedDocumentList.get(documentName), featureVector);
                }
                break;
            case Configuration.FEATURE_RELATIVE_FREQUENCY:
                for (String documentName : corpus.getDocumentCollection().keySet()) {
                    featureVector = new CompactFeatureVector();
                    actualDocument = corpus.getDocument(documentName);
                    for (String word : actualDocument.getWordList().keySet()) {
                        value = (float) actualDocument.getWordList().get(word) / (float) actualDocument.getTotalWords();
                        featureVector.setValue(invertedWordList.get(word), value);
                    }
                    collectionFeatures.setFeatureVector(invertedDocumentList.get(documentName), featureVector);
                }
                break;
            case Configuration.FEATURE_TF_IDF:
                for (String documentName : corpus.getDocumentCollection().keySet()) {
                    featureVector = new CompactFeatureVector();
                    actualDocument = corpus.getDocument(documentName);
                    for (String word : actualDocument.getWordList().keySet()) {
                        value = TFIDF.getTFIDF(actualDocument.getWordOccurrences(word), actualDocument.getTotalWords(), corpus.getTotalDocuments(), corpus.getDocumentOcurrences(word));
                        featureVector.setValue(invertedWordList.get(word), value);
                    }
                    collectionFeatures.setFeatureVector(invertedDocumentList.get(documentName), featureVector);
                }
                break;
        }

        return collectionFeatures;
    }

    /**
     * 
     * @param corpus
     * @return
     */
    public InvertedIndexIntToInt buildConceptByDocumentIndex(Corpus corpus) {
        Logger.getLogger(PrepareByConcepts.class.getName()).log(Level.INFO, "Build word by document index");
        TObjectIntMap<String> invertedWordList = corpus.getInvertedSortedWordList();
        TObjectIntMap<String> invertedDocumentList = corpus.getInvertedSortedDocumentList();
        InvertedIndexIntToInt invertedIndex = new InvertedIndexIntToInt();
        Document document;
        for (String documentName : corpus.getDocumentCollection().keySet()) {
            document = corpus.getDocument(documentName);
            for (String word : document.getWordList().keySet()) {
                invertedIndex.addElement(invertedWordList.get(word), invertedDocumentList.get(documentName));
            }
        }

        return invertedIndex;
    }
}
