/**
 * 
 */
package info.sswap.ontologies.modularity.repo.rdf;

import info.sswap.ontologies.util.URIUtils;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.openrdf.model.Statement;
import org.openrdf.model.URI;
import org.openrdf.model.ValueFactory;
import org.openrdf.model.vocabulary.OWL;
import org.openrdf.model.vocabulary.RDF;
import org.openrdf.model.vocabulary.RDFS;
import org.openrdf.model.vocabulary.XMLSchema;
import org.openrdf.query.GraphQuery;
import org.openrdf.query.GraphQueryResult;
import org.openrdf.query.MalformedQueryException;
import org.openrdf.query.QueryEvaluationException;
import org.openrdf.query.QueryLanguage;
import org.openrdf.repository.Repository;
import org.openrdf.repository.RepositoryConnection;
import org.openrdf.repository.RepositoryException;
import org.openrdf.repository.config.RepositoryConfig;
import org.openrdf.repository.config.RepositoryConfigException;
import org.openrdf.repository.manager.LocalRepositoryManager;
import org.openrdf.repository.manager.RepositoryManager;
import org.openrdf.repository.sail.config.SailRepositoryConfig;
import org.openrdf.sail.nativerdf.config.NativeStoreConfig;
import org.semanticweb.owlapi.apibinding.OWLManager;
import org.semanticweb.owlapi.model.AxiomType;
import org.semanticweb.owlapi.model.IRI;
import org.semanticweb.owlapi.model.OWLAxiom;
import org.semanticweb.owlapi.model.OWLEntity;
import org.semanticweb.owlapi.model.OWLOntology;
import org.semanticweb.owlapi.model.OWLOntologyCreationException;
import org.semanticweb.owlapi.model.OWLOntologyManager;
import org.semanticweb.owlapi.model.OWLSubClassOfAxiom;
import org.semanticweb.owlapi.model.OWLSubDataPropertyOfAxiom;
import org.semanticweb.owlapi.model.OWLSubObjectPropertyOfAxiom;
import org.semanticweb.owlapi.util.OWLOntologyImportsClosureSetProvider;
import org.semanticweb.owlapi.util.OWLOntologyMerger;
import org.semanticweb.owlapi.vocab.OWLRDFVocabulary;

import uk.ac.manchester.cs.owlapi.modularity.ModuleType;
import edu.arizona.bio5.onto.decomposition.AtomSignatureLabelingAlgorithm;
import edu.arizona.bio5.onto.decomposition.AtomicDecomposition;
import edu.arizona.bio5.onto.decomposition.ChiaraDecompositionAlgorithm;
import edu.arizona.bio5.onto.decomposition.MSSLabelingAlgorithm;
import edu.arizona.bio5.onto.decomposition.persistent.ADPersistenceException;
import edu.arizona.bio5.onto.decomposition.persistent.rdf.SesameADPersister;
import edu.arizona.bio5.onto.util.FileUtils;
import edu.arizona.bio5.onto.util.OntologyUtils;

/**
 * @author Populates an RDF based ontology repository
 *
 */
public class PopulateRDFRepository {

	static final File REPO_DIR = new File("test/data/rdf-repo-bot/");
	static final ModuleType MODULE_TYPE = ModuleType.BOT;
	
	private static RepositoryManager sesameManager = null;
	private static Repository adminRepo = null;
	/*
	 * List of onologies to be decomposed and put into the repository
	 */
	private static final String[] ONTO_FILES = new String[]{
		"test/data/PlantAnnotation/po.owl"
		, "test/data/onto/so.obo"
		, "test/data/onto/obo.owl"
		, "test/data/onto/envo.obo"
		, "test/data/onto/oboInOwl.owl"
		, "test/data/onto/plant_trait.obo"
	};
	
	private static final String[] ONTO_URIS = new String[]{
		"http://sswapmeet.sswap.info/sswap/sswap.owl"
		/*, "http://sswapmeet.sswap.info/genome/genome.owl"
		, "http://sswapmeet.sswap.info/map/map.owl"
		, "http://sswapmeet.sswap.info/qtl/qtl.owl"
		, "http://sswapmeet.sswap.info/marker/marker.owl"
		, "http://sswapmeet.sswap.info/sequence/sequence.owl"
		, "http://sswapmeet.sswap.info/sequenceServices/sequenceServices.owl"
		, "http://sswapmeet.sswap.info/taxa/taxa.owl"
		, "http://sswapmeet.sswap.info/trait/trait.owl"
		, "http://sswapmeet.sswap.info/NCBITaxonomyRecord/NCBITaxonomyRecord.owl"*/
	};
	
	private static final String TERM_CLOUD_ONTO_FILE = "test/data/term_cloud.owl";
	
	
	
	public static void main(String[] args) throws Exception {
		//cleanup();
		initRepo();
		
		populate(TERM_CLOUD_ONTO_FILE, RDFOntologyRepository.TERM_CLOUD_DB_NAME, false); 
		
		/*for (String ontoFileName : ONTO_FILES) {
			populate(ontoFileName, null, false);
		}
		*/
		/*
		for (String ontoFileName : ONTO_URIS) {
			populate(ontoFileName, null, true);
		}*/
		
		//populate("/Users/pklinov/work/UA/SSWAP/PO/po.owl", null, false);
		
		adminRepo.shutDown();
		sesameManager.shutDown();
	}

	private static void populate(String ontoSource, String dbName, boolean uri) throws OWLOntologyCreationException, RepositoryConfigException, RepositoryException, ADPersistenceException {
		OWLOntologyManager mgr = OWLManager.createOWLOntologyManager();
		OWLOntology ontology = null;
		
		if (uri) {
			IRI iri = IRI.create(ontoSource);
			
			ontology = mgr.loadOntology(iri);
			dbName = dbName == null ? FileUtils.removeExtension(new File(iri.getFragment())) : dbName;
		}
		else {
			File ontologyFile = new File(ontoSource);
			
			ontology = mgr.loadOntologyFromOntologyDocument(ontologyFile);
			dbName = dbName == null ? FileUtils.removeExtension(ontologyFile) : dbName;
		}
		//add the import closure
		//FIXME Can we introduce a notion of dependency between atomic decompositions
		//to reuse the decomposition of a base ontology in the decompositions of all ontologies that import it?
		OWLOntologyImportsClosureSetProvider provider = new OWLOntologyImportsClosureSetProvider(mgr, ontology);
		OWLOntologyMerger merger = new OWLOntologyMerger(provider);
		OWLOntology closure = merger.createMergedOntology(OWLManager.createOWLOntologyManager(), ontology.getOntologyID().getOntologyIRI());
		
		OntologyUtils.prepareForSyntacticModularity(closure, closure.getOWLOntologyManager());
		//decompose it
		AtomicDecomposition ad = decompose(closure);
		//TODO begin tx here?
		//save the decomposition into RDF
		persistDecomposition(dbName, ad);
		//add data to the admin DB
		addAdminData(closure, dbName);		
		//TODO commit here?
	}

	private static void addAdminData(OWLOntology ontology, String dbName) throws RepositoryException {
		RepositoryConnection adminConn = adminRepo.getConnection();
		ValueFactory factory = adminConn.getValueFactory();
		URI ontoURI = factory.createURI(ontology.getOntologyID().getOntologyIRI().toString());
		//this is an ontology
		adminConn.add(ontoURI, RDF.TYPE, OWL.ONTOLOGY);
		//stored in a specific DB
		adminConn.add(ontoURI, factory.createURI(SSWAPRDFConstants.SSWAP_STORED_IN_PROPERTY), factory.createLiteral(dbName));
		//extract namespaces
		for(String ns : extractNamespaces(ontology)) {
			adminConn.add(factory.createURI(ns), RDF.TYPE, factory.createURI(SSWAPRDFConstants.SSWAP_NS_TYPE));
			adminConn.add(factory.createURI(ns), RDFS.ISDEFINEDBY, ontoURI);
			
			System.out.println(ns + " defined by " + ontoURI);
		}
		
		adminConn.close();
	}
	
	/*
	 * Clears admin data for the given database (since it's being recreated)
	 */
	private static void cleanUpAdminData(String dbName) throws RepositoryConfigException, RepositoryException {
		RepositoryConnection adminConn = adminRepo.getConnection();
		ValueFactory factory = adminConn.getValueFactory();
		
		try {
			GraphQuery graphQuery = adminConn.prepareGraphQuery(QueryLanguage.SPARQL, 
					"CONSTRUCT {?onto <" + SSWAPRDFConstants.SSWAP_STORED_IN_PROPERTY + "> ?db ." +
					"?onto <" + RDF.TYPE + "> <" + OWL.ONTOLOGY + "> ." +
					"?ns <" + RDFS.ISDEFINEDBY + "> ?onto } WHERE {" +
					"?onto <" + SSWAPRDFConstants.SSWAP_STORED_IN_PROPERTY + "> ?db ."+"" +
					"?onto <" + RDF.TYPE + "> <" + OWL.ONTOLOGY + "> ." +
					"?ns <" + RDF.TYPE + "> <" + SSWAPRDFConstants.SSWAP_NS_TYPE + "> ." +
					"?ns <" + RDFS.ISDEFINEDBY + "> ?onto " +
					"}");
			
			graphQuery.setBinding("db", factory.createLiteral(dbName));
			
			GraphQueryResult graphResult = graphQuery.evaluate();
			
			while (graphResult.hasNext()) {
				Statement st = graphResult.next();
				
				System.out.println("Deleting: " + st);
				
				adminConn.remove(st);
			}
		} catch (QueryEvaluationException e) {
			e.printStackTrace();
		} catch (MalformedQueryException me) {
			me.printStackTrace();
		} finally {
			adminConn.close();
		}
	}	

	private static Set<String> extractNamespaces(OWLOntology ontology) {
		Set<String> result = new HashSet<String>();
		
		for (OWLAxiom axiom : ontology.getLogicalAxioms()) {
			
			for (OWLEntity definedTerm : getDefinedTerms(axiom)) {
				if (!definedTerm.getIRI().toString().startsWith(XMLSchema.NAMESPACE)
						&& !OWLRDFVocabulary.BUILT_IN_VOCABULARY_IRIS.contains(definedTerm.getIRI())) {
					
					result.add(URIUtils.getNamespace(definedTerm.getIRI().toURI()));
				}
			}
		}
	
		
		return result;
	}
	
	

	/*
	 * TODO Think about it!
	 * Should return the list of terms which are "defined" by this axiom (for some meaning of defined)
	 * Right now we just look for TBox axioms
	 */
	private static Collection<OWLEntity> getDefinedTerms(OWLAxiom axiom) {
		List<OWLEntity> definedTerms = new ArrayList<OWLEntity>();
		
		if (axiom.getAxiomType().equals(AxiomType.SUBCLASS_OF)) {
			definedTerms.addAll(((OWLSubClassOfAxiom)axiom).getSubClass().getSignature());
		}
		else if (axiom.getAxiomType().equals(AxiomType.SUB_OBJECT_PROPERTY)) {
			definedTerms.addAll(((OWLSubObjectPropertyOfAxiom)axiom).getSubProperty().getObjectPropertiesInSignature());
		}
		else if (axiom.getAxiomType().equals(AxiomType.SUB_DATA_PROPERTY)) {
			definedTerms.addAll(((OWLSubDataPropertyOfAxiom)axiom).getSubProperty().getDataPropertiesInSignature());
		}
		else if (axiom.getAxiomType().equals(AxiomType.EQUIVALENT_CLASSES) || axiom.getAxiomType().equals(AxiomType.DISJOINT_CLASSES)){
			definedTerms.addAll(axiom.getSignature());
		}
		else if (axiom.getAxiomType().equals(AxiomType.DECLARATION)){
			definedTerms.addAll(axiom.getSignature());
		}
		
		return definedTerms;
	}

	private static void persistDecomposition(String dbName, AtomicDecomposition ad) throws RepositoryConfigException, RepositoryException, ADPersistenceException {
		//get hold of the repository connection
		RepositoryConnection conn = null;
		
		if (sesameManager.getRepositoryIDs().contains(dbName)) {
			//delete it
			System.err.println("Recreating the repository " + dbName);
			Repository repo = sesameManager.getRepository(dbName);
			
			conn = repo.getConnection();
			conn.clear();
			cleanUpAdminData(dbName);
		}
		else {
			//create a new repo
			sesameManager.addRepositoryConfig(new RepositoryConfig(dbName, new SailRepositoryConfig(new NativeStoreConfig())));
			Repository repo = sesameManager.getRepository(dbName);

			conn = repo.getConnection();
		}
		//ok, now persist it
		SesameADPersister persister = new SesameADPersister(conn);
		
		persister.persist(ad);
		conn.close();
	}



	/*
	 * Decomposes and labels the ontology
	 */
	private static AtomicDecomposition decompose(OWLOntology ontology) {
		AtomicDecomposition ad = new ChiaraDecompositionAlgorithm(MODULE_TYPE).decompose(ontology.getOWLOntologyManager(), ontology);
		
		new MSSLabelingAlgorithm().compute(ad);
		new AtomSignatureLabelingAlgorithm().compute(ad);
		
		return ad;
	}

	private static void initRepo() throws RepositoryException, RepositoryConfigException {
		sesameManager = new LocalRepositoryManager(REPO_DIR);		
		sesameManager.initialize();
		sesameManager.addRepositoryConfig(new RepositoryConfig(RDFOntologyRepository.ADMIN_DB_NAME, new SailRepositoryConfig(new NativeStoreConfig())));
		adminRepo = sesameManager.getRepository(RDFOntologyRepository.ADMIN_DB_NAME);
	}

	private static void cleanup() throws IOException {
		if (REPO_DIR.exists()) {
			FileUtils.cleanRecursively(REPO_DIR);
		}
		else {
			REPO_DIR.mkdirs();
		}
	}
}