package sparql;

import java.io.File;
import java.io.InputStream;
import java.util.List;

import persistance.RDFModel;
import receipe.Recipe;

import com.hp.hpl.jena.rdf.model.Model;
import com.hp.hpl.jena.rdf.model.ModelFactory;
import com.hp.hpl.jena.util.FileManager;

public class RunSparqlAddToModel {

	public static void addAndRun() {
		try {

			Model rdfGraph = ModelFactory.createDefaultModel();

			int i = 0;
			while (true) {
				
				// Adding saved rdf files to model
				
				File f = new File("Recipe" + i + ".rdf");
				if (!f.exists())
					break;
				
				// Creating model for every rdf
				rdfGraph.add(newModel("Recipe" + i + ".rdf"));
				i++;
			}

			// Executing SPARQL

			QueryService qs = new QueryService(rdfGraph);

			// List<String> resultList = qs.getByCategory();
			List<String> resultList = qs
					.getLowCholesterolLowSugarHighViberPastas();

			for (String result : resultList) {
				System.out.println("Agreeing with SPARQL");
				System.out.println("Result: " + result);
				System.out
						.println("---------------------------------------------"
								+ "---------------------------------------------");

			}

		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

	/**
	 * Creating new model
	 */

	public static Model newModel(String path) {
		InputStream resultsStream = FileManager.get().open(path);
		Model rdfGraph = ModelFactory.createDefaultModel();
		rdfGraph.read(resultsStream, null, "RDF/XML");
		return rdfGraph;
	}

// You can call this method to make SPARQL on saved rdfs if you dont want to crawl and distill again.
	
//	public static void main(String[] args) {
//		addAndRun();
//
//	}
}
