package br.ufpe.cin.rdfilter.main;

import java.net.URL;
import java.net.MalformedURLException;
import java.util.ArrayList;

import br.ufpe.cin.rdfilter.config.CrawlerConfig;
import br.ufpe.cin.rdfilter.util.URLHandler;

import com.hp.hpl.jena.query.QueryExecution;
import com.hp.hpl.jena.query.QueryExecutionFactory;
import com.hp.hpl.jena.query.QuerySolution;
import com.hp.hpl.jena.query.ResultSet;
import com.hp.hpl.jena.rdf.model.RDFNode;

public class DatasetsExtractor {

	/**
	 * 
	 * @return list of the Datasets URL provenance's prefix in which was fetched triples and stored 
	 * in a repository by the crawler
	 * @throws MalformedURLException
	 */
	public ArrayList<String> extractDatasetsProvenanceURL() throws MalformedURLException{
		
		ArrayList<String> datasetsProvenanceURL = new ArrayList<String>();
		URLHandler urlHandler = new URLHandler();
		
		//SPARQL Query to extract the provenance of all triples retrieved by the crawler following the specified predicates
		String queryProvenances = "PREFIX owl: <http://www.w3.org/2002/07/owl#> " +
								  "PREFIX rdf: <http://www.w3.org/2000/01/rdf-schema#> " + 
								
								  "SELECT DISTINCT ?g2 WHERE {  " + 
								  "GRAPH ?g1 { ?s ?p ?o . } " +
								  "GRAPH ?g2 { ?s2 ?p2 ?o2 . } " +
								  "FILTER (?p = owl:sameAs || ?p = rdf:seeAlso || ?p = owl:equivalentClass ) " + 
								
								  "FILTER (?s2 = ?s || ?s2 = ?o) " +
								
								  "}";
										
		//executing the query into the sparql endpoint to the triplestore that contains the triples retrieved by the crawler
		QueryExecution queryExecution = QueryExecutionFactory.sparqlService(CrawlerConfig.ENDPOINT_SPARQL, queryProvenances);
		ResultSet results = queryExecution.execSelect();
		
		while(results.hasNext()){
			
			QuerySolution solution = results.nextSolution();
			
			// for each provenance URI fetched, the prefix (initial part of the URI) will be obtained and added into the dataset list
			RDFNode tripleProvenance = solution.get("g2");
			URL tripleProvenanceURL = new URL(tripleProvenance.toString());
			
			String tripleProvenancePrefix = urlHandler.getPrefixURL(tripleProvenanceURL); // this method return the prefix of the given URL
			
			if(!datasetsProvenanceURL.contains(tripleProvenancePrefix)){
				datasetsProvenanceURL.add(tripleProvenancePrefix);
			}
		}

		return datasetsProvenanceURL;
	}
	
	/**
	 * 
	 * @param datasetURL
	 * @return SPARQL endpoint URI for the dataset URL provenance's prefix given as parameter, if the endpoint don't exist, it 
	 * will be returned null
	 */
	public String getDatasetEndpoint (String datasetProvenancePrefix){
		
		URLHandler urlHandler = new URLHandler();
		
		// CKAN endpoint that allow performance a query to find out the endpoint of especified dataset
		String CKANEndpoint = "http://semantic.ckan.net/sparql/";
		
		String datasetEndpoint = null;
				
		URL datasetProvenanceURL;
		String datasetProvenanceHost = "";
		try {
			datasetProvenanceURL = new URL(datasetProvenancePrefix);
			datasetProvenanceHost = datasetProvenanceURL.getHost();
		} catch (MalformedURLException e) {
			e.printStackTrace();
		}
		
		
		// query to find out the datasets endpoint whose homepage contains the datasetProvenanceHost
		String queryEndpoint = "PREFIX dcterms: <http://purl.org/dc/terms/> " +
							   "PREFIX void: <http://rdfs.org/ns/void#> " +
							   "PREFIX foaf: <http://xmlns.com/foaf/0.1/>" +
							   "SELECT distinct ?endpoint ?homepage " +
						 	   "WHERE{ " +
							   " ?dataset void:sparqlEndpoint ?endpoint . " +
							   " ?dataset foaf:homepage ?homepage . " +
							   " FILTER regex(str(?homepage),\""+ datasetProvenanceHost + "\")" +
							   "} ";
		
		
		QueryExecution queryExecution = QueryExecutionFactory.sparqlService(CKANEndpoint, queryEndpoint);
		ResultSet results = queryExecution.execSelect();
		
		ArrayList<String> datasetsHomepage = new ArrayList<String>();
		ArrayList<String> datasetsEndpoint = new ArrayList<String>();
		
		// building list of the candidate datasets to the specified provenance prefix
		while(results.hasNext()){	
			QuerySolution solution = results.nextSolution();
			
			RDFNode endpoint = solution.get("endpoint");
			RDFNode homepage = solution.get("homepage");
			
			datasetsHomepage.add(homepage.toString());
			datasetsEndpoint.add(endpoint.toString());
		}
			
		// calculating the dataset's homepage url that is closer of the provenance prefix, and getting the respective endpoint
		if(datasetsHomepage.size() > 0){
		
			String datasetHomepage = urlHandler.getLongestPrefixCommonURL(datasetsHomepage, datasetProvenancePrefix);
			datasetEndpoint = datasetsEndpoint.get(datasetsHomepage.indexOf(datasetHomepage));
		
		}
		
		return datasetEndpoint;
	}
	
	/**
	 * 
	 * @return list of SPARQL endpoints of the datasets whose its triples were extracted by the 
	 * Crawler.
	 * @throws MalformedURLException
	 */
	public ArrayList<String> getDatasetEndpointList() throws MalformedURLException{
		
		ArrayList<String> datasetEndpointList = new ArrayList<String>();
		
		// obtaining the list of datasets provenance from the repository through SPARQL query
		ArrayList<String> datasetsProvenance = this.extractDatasetsProvenanceURL();
	
		// building the list of distinct endpoints of the datasets found out by the Crawler
		String datasetEndpoint;
		for(String datasetProvenance: datasetsProvenance){
			
			datasetEndpoint = this.getDatasetEndpoint(datasetProvenance);
			
			if(datasetEndpoint != null && !datasetEndpointList.contains(datasetEndpoint)){
				datasetEndpointList.add(datasetEndpoint);
			}
		}
		
		return datasetEndpointList;
		
	}
	
}
