package org.nlp2rdf.navigator.benchmark;

import com.google.common.base.Joiner;
import com.google.common.collect.Sets;
import com.hp.hpl.jena.query.QuerySolution;
import com.hp.hpl.jena.rdf.model.RDFNode;
import com.hp.hpl.jena.rdf.model.Resource;
import org.aksw.commons.semweb.sparql.core.ISparqlEndpoint;
import org.aksw.commons.semweb.sparql.core.QueryCollection;
import org.aksw.commons.util.random.RandomUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;

/**
 * Created by Claus Stadler
 * Date: Oct 23, 2010
 * Time: 10:37:16 AM
 */
public class PoolUtils {
    private static final Logger logger = LoggerFactory.getLogger(PoolUtils.class);


    public static Sample<Resource> generateRandomSearchPool(ISparqlEndpoint sparqlEndpoint, Set<String> defaultGraphNames, Set<Resource> positives, int size, Random random)
    {
        // There are two strategies for generating a random search pool: Either use random limits and offsets (might be slow)
        // Or select all resources first, and then do the random selection (also slow)

        // Seems like selecting individual resources without or with distinct with a very high offset (e.g. distinct 8mio, non-distinct 100mio,
        // is extremely slow - therefore select everything
        Set<Resource> resources = MySparqlTasks.getAllTypedDBpediaResources(sparqlEndpoint, defaultGraphNames);
        Set<Resource> candidates = Sets.difference(resources, positives);

        Set<Resource> negatives = RandomUtils.randomSampleSet(candidates, size, random);

        return Sample.createCopy(positives, negatives);

    }

    public static Sample<Resource> generateKeywordSearchPool(ISparqlEndpoint sparqlEndpoint, Set<String> defaultGraphNames, String keywords, Set<Resource> positives)
    {
        Set<Resource> searchHits = MySparqlTasks.getKeywordSearchResult(sparqlEndpoint, defaultGraphNames, keywords);

        return Sample.createCopy(
                Sets.intersection(positives, searchHits),
                Sets.difference(searchHits, positives));
    }




    public static Sample<Resource> generateNavigationPool(ISparqlEndpoint sparqlEndpoint, Set<String> defaultGraphNames, Resource category, Set<Resource> positives, Random random)
    {
        Sample<Resource> pool = Sample.create();
        Set<Resource> remaining = new HashSet<Resource>(positives);
        int numPoolIterations = 0;
        while(!remaining.isEmpty() && (pool.getPositives().size() < 100 && pool.getNegatives().size() < 100)) {
            ++numPoolIterations;

            // FIXME May result in endless loop
            logger.debug("Constructing pool: remaining/+/- = " + Joiner.on("/").join(remaining.size(), pool.getPositives().size(), pool.getNegatives().size()));
            Resource navigationSeed = RandomUtils.randomItem(remaining, random);

            Sample<Resource> tmpPool = MySparqlTasks.getNavigationExamples(sparqlEndpoint, defaultGraphNames, navigationSeed, category, 1);

            // Validate the navigation examples
            // FIXME In fact the tmpPool may contain overlapping +/- examples.
            // This should be fixed in the Task, so we can assume clean data here!
            Sample<Resource> tmpPool2 = Sample.create(
                    Sets.intersection(positives, tmpPool.getPositives()),
                    Sets.difference(tmpPool.getNegatives(), positives));

            remaining.remove(navigationSeed);
            //remaining.removeAll(tmpPool.getPositives());
            pool.addAll(tmpPool2);

        }
        logger.info("poolIterations: " + numPoolIterations);

        return pool;
    }
}
