package de.dopichaj.labrador.search.lucene;


import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryNotificationInfo;
import java.lang.management.MemoryPoolMXBean;
import java.lang.management.MemoryUsage;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import javax.management.ListenerNotFoundException;
import javax.management.Notification;
import javax.management.NotificationBroadcaster;
import javax.management.NotificationListener;

import org.apache.commons.lang.Validate;
import org.apache.commons.lang.time.StopWatch;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Searcher;

import de.dopichaj.labrador.index.Index;
import de.dopichaj.labrador.index.TagFactory;
import de.dopichaj.labrador.index.lucene.FragmentIDMap;
import de.dopichaj.labrador.index.lucene.LuceneIndex;
import de.dopichaj.labrador.index.meta.HitFactory;
import de.dopichaj.labrador.search.NEXISearcher;
import de.dopichaj.labrador.search.QueryCategory;
import de.dopichaj.labrador.search.QueryParseException;
import de.dopichaj.labrador.search.SearchException;
import de.dopichaj.labrador.search.erg.ElementRelationshipGraph;
import de.dopichaj.labrador.search.erg.ErgScoreUpdater;
import de.dopichaj.labrador.search.hit.CategorizedHit;
import de.dopichaj.labrador.search.hit.DocHitMap;
import de.dopichaj.labrador.search.hit.Hit;
import de.dopichaj.labrador.search.hit.HitCategoryScoreMap;


/**
 * Does most of the work of the XML retrieval.
 */
public class LuceneNEXISearcher implements NEXISearcher {

    /**
     * A logger for this class
     */
    private static final Logger log =
        Logger.getLogger(LuceneNEXISearcher.class);

    private final Searcher searcher;

    /**
     * The ERG.
     */
    private final ElementRelationshipGraph erg;

    private final Analyzer analyzer;

    private final FragmentIDMap idMap;
    private final HitFactory hitFactory;
    private final TagFactory tagFactory;

    /**
     * The number of documents to keep.
     */
    private final int wantedDocCount;

    /**
     * Initializes the XMLSearchProcessor.
     * 
     * @param erg
     *            The element relationship graph to use.
     * @param de.dopichaj.labrador.index
     *            The index.
     */
    public LuceneNEXISearcher(ElementRelationshipGraph erg, Index index,
        LuceneIndex luceneIndex, int wantedDocCount) throws IOException {

        Validate.notNull(erg);
        Validate.notNull(index);
        Validate.isTrue(wantedDocCount > 0);

        this.erg = erg;
        this.wantedDocCount = wantedDocCount;
        this.analyzer = luceneIndex.getAnalyzer();
        this.searcher = luceneIndex.getSearcher();
        this.hitFactory = index.getHitFactory();

        tagFactory = index.getTagFactory();
        tagFactory.disallowFurtherChanges();

        log.info("Getting Lucene ID document map");
        idMap = luceneIndex.getFragmentIDMap();
        log.info("Finished constructing XMLSearchProcessor");
    }

    public LuceneNEXISearcher(ElementRelationshipGraph erg, final Index index,
        final LuceneIndex luceneIndex)
        throws IOException {
        
        this(erg, index, luceneIndex, Integer.MAX_VALUE);
    }

    public void close() {
        try {
            searcher.close();
        } catch (IOException e) {
            log.error("Error closing searcher", e);
        }
    }

    public DocHitMap nexiSearch(String nexi)
        throws SearchException, QueryParseException {

        Validate.notNull(nexi);

        LuceneQuery queries = new LuceneQuery(analyzer, nexi);
        return search(queries);
    }

    /**
     * Performs most of the search process, including similarity calculation and
     * merging.
     * 
     * @return A linked list with all results already biased by the categories.
     * @throws SearchException
     *             if an error occurs in the search backend
     */
    private DocHitMap search(LuceneQuery queries) throws SearchException {

        try {
            return luceneSearch(queries);
        } catch (IOException e) {
            throw new SearchException("Error executing Lucene de.dopichaj.labrador.search", e);
        } catch (InterruptedException e) {
            throw new SearchException("Error merging results", e);
        }
    }

    /**
     * A hit collector that stores only one copy of each hit.
     */
    private static class HitCollector
        extends org.apache.lucene.search.HitCollector
        implements NotificationListener {
        
        private final QueryCategory category;
        private final HitCategoryScoreMap scores;
        private final FragmentIDMap map;
        private int hitCount = 0;
        private boolean collect = true;

        public HitCollector(QueryCategory category, HitCategoryScoreMap scores,
            FragmentIDMap map) {
            this.category = category;
            this.scores = scores;
            this.map = map;
            
            setupMemoryListener();
        }

        private void setupMemoryListener() {
            MemoryMXBean mem = ManagementFactory.getMemoryMXBean();
            ((NotificationBroadcaster)mem).addNotificationListener(this, null,
                null);
            for (MemoryPoolMXBean memPool : ManagementFactory.getMemoryPoolMXBeans()) {
                if (memPool.isUsageThresholdSupported()) {
                    MemoryUsage memUsage = memPool.getUsage();
                    long max = memUsage.getMax();
                    memPool.setUsageThreshold((long)(max * 0.95));
                }
            }
        }
        
        public void close() {
            MemoryMXBean mem = ManagementFactory.getMemoryMXBean();
            try {
                ((NotificationBroadcaster)mem).removeNotificationListener(this);
            } catch (ListenerNotFoundException e) {
                log.error("Listener not found");
            }
        }

        @Override
        public void collect(int docId, float score) {

            hitCount++;
            if (collect) {
                scores.setScore(category, map.getFragmentID(docId), score);
            }
        }

        public void handleNotification(Notification notification,
            Object handback) {
            
            if (notification.getType().equals(
                MemoryNotificationInfo.MEMORY_THRESHOLD_EXCEEDED)) {
                
                log.warn("Low memory (" + hitCount +
                    " hits already collected in '" + category + "')");
                collect = false;
            }
        }
    }

    /**
     * Perform the actual search operation.
     * 
     * @throws IOException
     * @throws InterruptedException
     */
    private DocHitMap luceneSearch(LuceneQuery queries)
        throws IOException, InterruptedException {

        final DocHitMap results = new DocHitMap();
        final HitCategoryScoreMap scores =
            new HitCategoryScoreMap(queries.getCategories());
        final StopWatch totalTime = new StopWatch();
        totalTime.start();

        // perform the searches in all categories
        for (QueryCategory category : queries.getCategories()) {
            final BooleanQuery query = new BooleanQuery();
            query.add(queries.getQuery(category), BooleanClause.Occur.MUST);
            query.add(queries.getTargetQuery(), BooleanClause.Occur.SHOULD);

            if (log.isInfoEnabled()) {
                log.info("Searching for \"" + query.toString("contents")
                    + "\" in category " + category);
            }

            final HitCollector collector =
                new HitCollector(category, scores, idMap);
            searcher.search(query, null, collector);
            collector.close();
        }

        totalTime.split();
        log.info("Finished Lucene de.dopichaj.labrador.search " + totalTime.getSplitTime());
        scores.close();
        log.info("Closed scores");

        // find the documents
        final List<Integer> fragmentIds = scores.getHits();
        final Map<File, Collection<Integer>> docs =
            hitFactory.groupByDocument(fragmentIds);
        log.info(docs.size() + " matching documents, " + fragmentIds.size()
            + " hits");

        removeLowScoringDocuments(scores, docs);

        // retrieve the meta-information from the de.dopichaj.labrador.index
        // ... for each document
        final ExecutorService executor = Executors.newFixedThreadPool(2);
        final ErgScoreUpdater updater =
            new ErgScoreUpdater(erg, queries.getCategories());
        for (Map.Entry<File, Collection<Integer>> entry : docs.entrySet()) {

            // ... obtain the document IDs
            Collection<Integer> docIds = entry.getValue();
            final File file = entry.getKey();

            // ... merge the category scores to one hit score
            executor.execute(
                new ErgUpdateThread(file, results, scores, docIds, updater,
                    hitFactory));
        }

        executor.shutdown();
        executor.awaitTermination(600, TimeUnit.SECONDS);

        totalTime.stop();
        log.info("Finished de.dopichaj.labrador.search " + "(took " + totalTime + ")");

        return results;
    }

    private void removeLowScoringDocuments(final HitCategoryScoreMap scores,
        final Map<File, Collection<Integer>> docs) {

        final int oldSize = docs.size();
        if (docs.size() > wantedDocCount) {

            final float minScore;
            final Map<Collection<Integer>, Float> highScores =
                new HashMap<Collection<Integer>, Float>();
            final float[] docScores = new float[docs.size()];
            int i = 0;
            for (Collection<Integer> docIds : docs.values()) {
                final float highScore = scores.highestScore(docIds);
                docScores[i] = highScore;
                highScores.put(docIds, highScore);
                i++;
            }
            Arrays.sort(docScores);
            minScore = docScores[docs.size() - wantedDocCount];
            log.info("Minimum score = " + minScore);

            // remove low-scoring documents
            final Iterator<Map.Entry<File, Collection<Integer>>> iterator =
                docs.entrySet().iterator();
            while (iterator.hasNext()) {
                final Map.Entry<File, Collection<Integer>> entry =
                    iterator.next();

                Collection<Integer> docIds = entry.getValue();

                final float documentHighScore = highScores.get(docIds);
                if (documentHighScore < minScore) {

                    iterator.remove();
                    if (log.isDebugEnabled()) {
                        log.debug("Low-scoring document with " + docIds.size()
                            + " hits.");
                    }
                }
            }
        }
        log.info("Removed low-scoring documents; " + docs.size() + "/" + oldSize +
            "remaining");
    }

    private static final class ErgUpdateThread implements Runnable {

        private final File file;

        private final DocHitMap results;

        private final HitCategoryScoreMap scores;

        private final Collection<Integer> docIds;

        private final ErgScoreUpdater updater;

        private final HitFactory hitFactory;

        public ErgUpdateThread(final File file, final DocHitMap results,
            final HitCategoryScoreMap scores, Collection<Integer> docIds,
            ErgScoreUpdater updater, HitFactory hitFactory) {

            this.file = file;
            this.results = results;
            this.scores = scores;
            this.docIds = docIds;
            this.updater = updater;
            this.hitFactory = hitFactory;
        }

        public synchronized void run() {
            // apply ERG
            // ... read the corresponding meta information
            final Collection<CategorizedHit> hits = new ArrayList<CategorizedHit>(
                docIds.size());
            final Collection<Hit> resultHits = new ArrayList<Hit>(docIds.size());
            final List<Integer> wantedIds = new ArrayList<Integer>(docIds);
            final Map<Integer, Hit> hitMap =
                hitFactory.getDocumentHits(file, wantedIds);
            for (int fragmentId : docIds) {
                final Hit hit = hitMap.get(fragmentId);
                assert hit != null : "Hit with ID " + fragmentId +
                    "is null in " + file;

                scores.associate(fragmentId, hit);
                hits.add(scores.getHit(fragmentId));
                resultHits.add(hit);
            }

            // ... add the document's fragments to the results
            results.setHitsFromDoc(file, resultHits);
            updater.performSimilarityCalculation(hits);
        }
    }

    public String getDescription() {
        return "";
    }

}
