package co.edu.unal.bioingenium.kbmed.retrieval.text.process;

import co.edu.unal.bioingenium.kbmed.config.Configuration;
import co.edu.unal.bioingenium.kbmed.data.PhysicalDocument;
import co.edu.unal.bioingenium.kbmed.data.loader.DocumentLoader;
import co.edu.unal.bioingenium.kbmed.data.loader.DocumentLoaderFactory;
import co.edu.unal.bioingenium.kbmed.features.CollectionFeatures;
import co.edu.unal.bioingenium.kbmed.features.CompactFeatureVector;
import co.edu.unal.bioingenium.kbmed.text.filter.FilterPipe;
import co.edu.unal.bioingenium.kbmed.text.index.structures.InvertedIndexIntToInt;
import co.edu.unal.bioingenium.kbmed.text.index.structures.InvertedIndexStrToInt;
import co.edu.unal.bioingenium.kbmed.text.representation.vo.Corpus;
import co.edu.unal.bioingenium.kbmed.text.representation.vo.Document;
import co.edu.unal.bioingenium.kbmed.retrieval.weighting.TFIDF;
import co.edu.unal.bioingenium.kbmed.util.io.FileManager;
import gnu.trove.map.TObjectIntMap;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 *
 * @author Alejandro Riveros Cruz
 */
public class PrepareByWords {

    /**
     * 
     */
    public static int FEATURE_TYPE = Configuration.FEATURE_TF_IDF;
    /**
     * 
     */
    private FilterPipe filterPipe;

    /**
     * 
     */
    public PrepareByWords() {
        filterPipe = new FilterPipe();
        if (Configuration.FEATURE_TYPE.equals("frequency")) {
            FEATURE_TYPE = Configuration.FEATURE_FREQUENCY;
        } else if (Configuration.FEATURE_TYPE.equals("relativefrequency")) {
            FEATURE_TYPE = Configuration.FEATURE_RELATIVE_FREQUENCY;
        } else if (Configuration.FEATURE_TYPE.equals("TFIDF")) {
            FEATURE_TYPE = Configuration.FEATURE_TF_IDF;
        }
    }

    /**
     * 
     * @param corpusName
     * @param corpusPath
     * @param fileList
     * @param pruneBellow
     * @param pruneAbove
     * @return
     */
    public Corpus buildCorpus(String corpusName, String corpusPath, List<String> fileList, int pruneBellow, int pruneAbove) {
        //TODO better corpus generation, the building process must be incremental based on the changes or the aparison of new documents in the collections
        Corpus corpus = new Corpus(corpusName);
        //Initializing varibles
        PhysicalDocument physicalDocument;
        DocumentLoader documentLoader;
        int count = 1;
        for (String fileName : fileList) {
            System.out.print((count++) + "/" + fileList.size() + " Parsing " + fileName + "...");
            documentLoader = DocumentLoaderFactory.getDocumentLoader(fileName);
            physicalDocument = documentLoader.parseDocument(corpusPath, fileName);
            if (physicalDocument != null) {
                processWords(corpus, physicalDocument.getName(), physicalDocument.getContent());
                System.out.print("Success\n");
            } else {
                System.out.print("Failed\n");
            }
        }
        if (pruneBellow != 0 || pruneAbove != 0) {
            pruneWords(corpus, pruneBellow, pruneAbove);
        }
        return corpus;
    }

    /**
     * 
     * @param corpusName
     * @param corpusData
     * @param pruneBellow
     * @param pruneAbove
     * @return
     */
    public Corpus buildCorpus(String corpusName, Map<String, String> corpusData, int pruneBellow, int pruneAbove) {
        Corpus corpus = new Corpus(corpusName);
        int count = 1;
        for (String documentName : corpusData.keySet()) {
            System.out.print((count++) + "/" + corpusData.size() + " Parsing " + documentName + "...");
            processWords(corpus, documentName, corpusData.get(documentName));
            System.out.print("Success\n");
        }
        if (pruneBellow != 0 || pruneAbove != 0) {
            pruneWords(corpus, pruneBellow, pruneAbove);
        }
        return corpus;
    }

    /**
     * 
     * @param corpusName
     * @param corpusPath
     * @param pruneBellow 
     * @param pruneAbove 
     * @return
     */
    public Corpus buildCorpus(String corpusName, String corpusPath, int pruneBellow, int pruneAbove) {
        try {
            corpusPath = FileManager.toStandardPath(corpusPath);
            System.out.print("Getting file list...");
            List<String> fileList = FileManager.getRecursivelyFileList(corpusPath);
            System.out.print("Success\n");
            return buildCorpus(corpusName, corpusPath, fileList, pruneBellow, pruneAbove);
        } catch (IOException ex) {
            Logger.getLogger(PrepareByWords.class.getName()).log(Level.SEVERE, null, ex);
        }
        return null;
    }

    /**
     * 
     * @param corpus
     * @param documentName
     * @param documentContent
     */
    private void processWords(Corpus corpus, String documentName, String documentContent) {
        Document document = new Document(documentName);
        List<String> words = filterPipe.filter(documentContent);
        for (String word : words) {
            //Adding global occurrence 
            corpus.addWordOccurrence(word);
            if (!document.containsWord(word)) {
                //Add in-document occurrente
                corpus.addDocumentOccurence(word);
            }
            //Add document details
            document.addWordOccurrence(word);
        }
        corpus.addDocument(document);
    }

    private void pruneWords(Corpus corpus) {
        int totalDocuments = corpus.getTotalDocuments();
        pruneWords(corpus, 5, (int) (totalDocuments * 0.9));

    }

    /**
     * 
     * @param corpus
     * @param minOcurrences
     * @param maxOcurrences
     */
    private void pruneWords(Corpus corpus, int pruneBellow, int pruneAbove) {
        Logger.getLogger(PrepareByWords.class.getName()).log(Level.INFO, "Pruning irrelevant words and documents");
        Set<String> itemsToRemove = new HashSet<String>();
        // For each word in the word list
        for (String word : corpus.getWordCount().keySet()) {
            //If word occurrence are in the prune range
            if (corpus.getWordOcurrences(word) < pruneBellow || corpus.getWordOcurrences(word) > pruneAbove) {
                itemsToRemove.add(word);
            }
        }
        //TODO fast implementaion using the inverted index
        //For each word to remove
        for (String word : itemsToRemove) {
            // For each document in corpus
            for (String documentName : corpus.getDocumentCollection().keySet()) {
                corpus.getDocument(documentName).removeWord(word);
            }
            corpus.removeWord(word);
        }
        itemsToRemove.clear();
        //Remove irrelevant documents
        for (String documentName : corpus.getDocumentCollection().keySet()) {
            if (corpus.getDocument(documentName).getTotalWords() == 0) {
                itemsToRemove.add(documentName);
            }
        }
        //For each document to remove
        for (String documentName : itemsToRemove) {
            corpus.removeDocument(documentName);
        }
    }

    /**
     * 
     * @param corpus
     * @param featureType
     * @return
     */
    public CollectionFeatures extractFeatures(Corpus corpus, int featureType) {
        FEATURE_TYPE = featureType;
        return extractFeatures(corpus);
    }

    /**
     *
     * @param corpus 
     * @return 
     */
    public CollectionFeatures extractFeatures(Corpus corpus) {
        Logger.getLogger(PrepareByWords.class.getName()).log(Level.INFO, "Extract collection features");
        CollectionFeatures collectionFeatures = new CollectionFeatures(corpus.getName());
        TObjectIntMap<String> invertedWordList = corpus.getInvertedSortedWordList();
        TObjectIntMap<String> invertedDocumentList = corpus.getInvertedSortedDocumentList();
        Document actualDocument;
        CompactFeatureVector featureVector;
        float value;
        switch (FEATURE_TYPE) {
            case Configuration.FEATURE_FREQUENCY:
                for (String documentName : corpus.getDocumentCollection().keySet()) {
                    featureVector = new CompactFeatureVector();
                    actualDocument = corpus.getDocument(documentName);
                    for (String word : actualDocument.getWordList().keySet()) {
                        value = (float) actualDocument.getWordList().get(word);
                        featureVector.setValue(invertedWordList.get(word), value);
                    }
                    collectionFeatures.setFeatureVector(invertedDocumentList.get(documentName), featureVector);
                }
                break;
            case Configuration.FEATURE_RELATIVE_FREQUENCY:
                for (String documentName : corpus.getDocumentCollection().keySet()) {
                    featureVector = new CompactFeatureVector();
                    actualDocument = corpus.getDocument(documentName);
                    for (String word : actualDocument.getWordList().keySet()) {
                        value = (float) actualDocument.getWordList().get(word) / (float) actualDocument.getTotalWords();
                        featureVector.setValue(invertedWordList.get(word), value);
                    }
                    collectionFeatures.setFeatureVector(invertedDocumentList.get(documentName), featureVector);
                }
                break;
            case Configuration.FEATURE_TF_IDF:
                for (String documentName : corpus.getDocumentCollection().keySet()) {
                    featureVector = new CompactFeatureVector();
                    actualDocument = corpus.getDocument(documentName);
                    for (String word : actualDocument.getWordList().keySet()) {
                        value = TFIDF.getTFIDF(actualDocument.getWordOccurrences(word), actualDocument.getTotalWords(), corpus.getTotalDocuments(), corpus.getDocumentOcurrences(word));
                        featureVector.setValue(invertedWordList.get(word), value);
                    }
                    collectionFeatures.setFeatureVector(invertedDocumentList.get(documentName), featureVector);
                }
                break;
        }
        return collectionFeatures;
    }

    /**
     * 
     * @param corpus
     * @return
     */
    public InvertedIndexIntToInt buildWordByDocumentIndex(Corpus corpus) {
        Logger.getLogger(PrepareByWords.class.getName()).log(Level.INFO, "Build word by document index");
        TObjectIntMap<String> invertedWordList = corpus.getInvertedSortedWordList();
        TObjectIntMap<String> invertedDocumentList = corpus.getInvertedSortedDocumentList();
        InvertedIndexIntToInt invertedIndex = new InvertedIndexIntToInt();
        Document document;
        for (String documentName : corpus.getDocumentCollection().keySet()) {
            document = corpus.getDocument(documentName);
            for (String word : document.getWordList().keySet()) {
                invertedIndex.addElement(invertedWordList.get(word), invertedDocumentList.get(documentName));
            }
        }
        return invertedIndex;
    }

    /**
     * 
     * @param corpus
     * @return
     */
    public InvertedIndexStrToInt buildWordByDocumentNameIndex(Corpus corpus) {
        Logger.getLogger(PrepareByWords.class.getName()).log(Level.INFO, "Build word by document name index");
        InvertedIndexStrToInt wordByNameIndex = new InvertedIndexStrToInt();
        List<String> documentNames = corpus.getSortedDocumentList();
        List<String> words;
        for (int idx = 0; idx < documentNames.size(); idx++) {
            words = filterPipe.filter(documentNames.get(idx));
            for (String word : words) {
                wordByNameIndex.addElement(word, idx);
            }
        }
        return wordByNameIndex;
    }

    /**
     * 
     * @param corpus
     * @return
     */
    public InvertedIndexStrToInt buildParentDirectoryByDocumentIndex(Corpus corpus) {
        Logger.getLogger(PrepareByWords.class.getName()).log(Level.INFO, "Build word by document meta index");
        InvertedIndexStrToInt metaByDocumentIndex = new InvertedIndexStrToInt();
        List<String> documentNames = corpus.getSortedDocumentList();
        String[] dirs;
        for (int idx = 0; idx < documentNames.size(); idx++) {
            dirs = documentNames.get(idx).split(File.separator);
            for (int i = 0; i < dirs.length - 1; i++) {
                metaByDocumentIndex.addElement(dirs[i].toLowerCase().intern(), idx);
            }
        }
        return metaByDocumentIndex;
    }
}
