package org.nlp2rdf.navigator.benchmark.cli;

import com.google.common.base.Joiner;
import com.google.common.collect.TreeMultimap;
import com.hp.hpl.jena.rdf.model.Resource;
import com.hp.hpl.jena.rdf.model.ResourceFactory;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
import org.nlp2rdf.navigator.benchmark.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;

import java.util.HashSet;
import java.util.List;
import java.util.Set;

/**
 * Created by Claus Stadler
 * Date: Oct 24, 2010
 * Time: 10:58:45 PM
 */
public class RunPoolSearchStats {
    private static final Logger logger = LoggerFactory.getLogger(RunPoolSearchStats.class);

    private static Options cliOptions = new Options()
        .addOption("c", "configFile", true, "Spring XML config")
        .addOption("k", "keyword", true, "Keyword to search for");

    public static void main(String[] args)
            throws Exception
    {
        CommandLine commandLine = CliUtils.getGnuCommandLine(cliOptions, args);
        ApplicationContext context = CliUtils.getApplicationContext(commandLine, commandLine.getOptionValue("c"));

        BenchmarkContext con = new BenchmarkContext(context);

        String categoryName = commandLine.getOptionValue("k");

        TreeMultimap<Integer, Resource> categories = MySparqlTasks.getCategoriesWithTypedMembersAndWithoutNumbers(con.getSparqlEndpoint(), con.getGraphNames());

        for(Resource category : categories.get(100)) {
            analyzeSearch(con, category);
        }
    }


    static int counter = 0;

    public static void analyzeSearch(BenchmarkContext con, Resource category) {
        //Resource category = ResourceFactory.createResource(categoryName);

        logger.info("Processing category: " + category);

        Sample<Resource> all = Sample.create(
                MySparqlTasks.getDBpediaCategoryMembers(con.getSparqlEndpoint(), con.getGraphNames(), category),
                null);

        Sample<Resource> allTyped = Sample.create(
                MySparqlTasks.getTypedDBpediaCategoryMembers(con.getSparqlEndpoint(), con.getGraphNames(), category),
                null);

        
        Set<Resource> searchResult = MySparqlTasks.getKeywordSearchResult(con.getSparqlEndpoint(), con.getGraphNames(), category.toString());


        SampleStats<Resource> allStats = SampleStats.create(all, searchResult);
        SampleStats<Resource> allTypedStats = SampleStats.create(allTyped, searchResult);

        //if(allStats.getTrue().getPositives().size() < 10 && allTypedStats.getTrue().getPositives().size() < 10) {
        if(allTypedStats.getTrue().getPositives().size() < 5) {
            // 27 categories remain, where at least 5 positives can be found
            return;
        }
        ++counter;
 
        logger.info("counter = " + counter);

        logger.info("Search result: searchResultSize/posAll/posTyped = " + Joiner.on("/").join(searchResult.size(), all.getPositives().size(), allTyped.getPositives().size()));
        logger.info("typedTruePositives " + allTypedStats.getTrue().getPositives().size());
        logger.info("typedFalsePositives " + allTypedStats.getFalse().getPositives().size());


        logger.info("all: " + SampleStats.formatHumanReadable(allStats));
        logger.info("allTyped: " + SampleStats.formatHumanReadable(allTypedStats));
        //Set<Resource> positives = new HashSet<Resource>(resources);

        logger.info("");
        //Sample<Resource> sample = PoolUtils.generateKeywordSearchPool(con.getSparqlEndpoint(), con.getGraphNames(), keywords, positives);
    }
}
