package etxt2db.creator;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;

import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerException;

import org.apache.uima.UIMAFramework;
import org.apache.uima.cas.CAS;
import org.apache.uima.cas.CASException;
import org.apache.uima.collection.CollectionProcessingEngine;
import org.apache.uima.collection.EntityProcessStatus;
import org.apache.uima.collection.StatusCallbackListener;
import org.apache.uima.collection.metadata.CpeDescription;
import org.apache.uima.examples.SourceDocumentInformation;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.tcas.Annotation;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.InvalidXMLException;
import org.apache.uima.util.XMLInputSource;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;

import etxt2db.api.Document;
import etxt2db.corpus.MyAnnotation;
import etxt2db.corpus.DocumentLoader;
import etxt2db.mappings.AnnotationToKeyMapper;
import etxt2db.section.SectionSplitter;
import etxt2db.tokenizer.Span;
import etxt2db.tokenizer.Tokenizer;
import etxt2db.utils.ArchiveUnZipper;
import etxt2db.utils.FileSystemUtils;

/**
 * A Loader/Runner of UCompare workflows.
 * <p>
 * Contains an inner Thread responsible for running a workflow.
 * <p>
 * @author Gonçalo Simões
 * @author Rui Lageira
 *
 */
public class WorkflowLoader {

	private List<Document> documents;

	private DocumentLoader loader;

	private AnnotationToKeyMapper mapper;

	private Map<String,Document> originalDocs;

	/**
	 * Constructs a loader of a UCompare workflow, using a {@link SectionSplitter} and a {@link Tokenizer}
	 * in documents loading.
	 * <p>
	 * @param splitter	the splitter to be used
	 * @param tokenizer	the tokenizer to be used 
	 */
	public WorkflowLoader(SectionSplitter splitter, Tokenizer tokenizer) {
		this(splitter,tokenizer,new AnnotationToKeyMapper());
	}
	
	/**
	 * Constructs a loader of a UCompare workflow, using a {@link SectionSplitter} and a {@link Tokenizer}
	 * in documents loading, and a provided {@link AnnotationToKeyMapper} object.
	 * <p>
	 * @param splitter	the splitter to be used
	 * @param tokenizer	the tokenizer to be used 
	 * @param mapper	the 
	 */
	public WorkflowLoader(SectionSplitter splitter, Tokenizer tokenizer, AnnotationToKeyMapper mapper) {
		documents = new ArrayList<Document>();
		loader = new DocumentLoader(splitter,tokenizer);
		this.mapper = mapper;
		originalDocs = new HashMap<String,Document>();
	}

	/**
	 * @return	the loader of documents
	 */
	public DocumentLoader getDocumentLoader() {
		return loader;
	}

	/**
	 * @return	the annotation types mapper
	 */
	public AnnotationToKeyMapper getAnnotationMapper() {
		return mapper;
	}

	/**
	 * Getter of a particular document loaded before running the workflow,
	 * provided its content.
	 * <p>
	 * @param content	the content of the document to return
	 * @return			the document with the provided content
	 */
	public Document getOriginalDoc(String content) {
		return originalDocs.get(content);
	}

	/**
	 * Loads a UCompare workflow provided an input text to be classified.
	 * <p>
	 * @param documentText	the input to provide to the workflow
	 * @param workflowPath	the path to the UCompare workflow file (.ucz)
	 * @return	a list of documents loaded and annotated along the execution of the workflow
	 * @throws IOException						if an input/output error occurs
	 * @throws ResourceInitializationException	if an error producing or running the CPE occurs
	 * @throws InvalidXMLException				if an error parsing the CPE descriptor occurs
	 * @throws TransformerException				if an error writing the CPE descriptor occurs
	 * @throws ParserConfigurationException		if an error parsing the CPE descriptor occurs
	 * @throws SAXException						if an error parsing the CPE descriptor occurs
	 * @throws ParseException					if an error parsing the input text occurs
	 */
	public List<Document> load(String documentText, String workflowPath) throws IOException, ResourceInitializationException, InvalidXMLException, TransformerException, ParserConfigurationException, SAXException, ParseException {

		String cpeDescriptorPath = extractWorkflow(workflowPath);

		List<Document> docs = loader.load(documentText);

		for(Document d : docs){
			originalDocs.put(d.getContent(), d);
		}

		appendTextCollectionReader(documentText, cpeDescriptorPath);

		// Run workflow
		new UCompareWorkflowRunner(cpeDescriptorPath);

		return documents;
	}

	/**
	 * Loads a UCompare workflow provided a file or directory referring to document(s) to be classified.
	 * <p>
	 * @param file			the data location containing the input to be provided to the workflow
	 * @param workflowPath	the path to the UCompare workflow file (.ucz)
	 * @return	a list of documents loaded and annotated along the execution of the workflow
	 * @throws IOException						if an input/output error occurs
	 * @throws ResourceInitializationException	if an error producing or running the CPE occurs
	 * @throws InvalidXMLException				if an error parsing the CPE descriptor occurs
	 * @throws TransformerException				if an error writing the CPE descriptor occurs
	 * @throws ParserConfigurationException		if an error parsing the CPE descriptor occurs
	 * @throws SAXException						if an error parsing the CPE descriptor occurs
	 * @throws ParseException					if an error parsing the input text occurs
	 */
	public List<Document> load(File file, String workflowPath) throws IOException, ResourceInitializationException, InvalidXMLException, TransformerException, ParserConfigurationException, SAXException, ParseException {
		
		String cpeDescriptorPath = extractWorkflow(workflowPath);

		List<File> files = new ArrayList<File>();
		files.add(file);

		List<Document> docs = loader.load(file);

		for(Document d : docs){
			originalDocs.put(d.getContent(), d);
		}		
		appendFileSystemCollectionReader(files, cpeDescriptorPath);

		// Run workflow
		new UCompareWorkflowRunner(cpeDescriptorPath);

		return documents;
	}

	/**
	 * Loads a UCompare workflow provided an array of files or directories referring to documents to be classified.
	 * <p>
	 * @param files			the data locations containing the input to be provided to the workflow
	 * @param workflowPath	the path to the UCompare workflow file (.ucz)
	 * @return	a list of documents loaded and annotated along the execution of the workflow
	 * @throws IOException						if an input/output error occurs
	 * @throws ResourceInitializationException	if an error producing or running the CPE occurs
	 * @throws InvalidXMLException				if an error parsing the CPE descriptor occurs
	 * @throws TransformerException				if an error writing the CPE descriptor occurs
	 * @throws ParserConfigurationException		if an error parsing the CPE descriptor occurs
	 * @throws SAXException						if an error parsing the CPE descriptor occurs
	 * @throws ParseException					if an error parsing the input text occurs
	 */
	public List<Document> load(File[] files, String workflowPath) throws IOException, ResourceInitializationException, InvalidXMLException, TransformerException, ParserConfigurationException, SAXException, ParseException {

		String cpeDescriptorPath = extractWorkflow(workflowPath);

		List<File> filesList = new ArrayList<File>();

		for(int i = 0; i < files.length; i++) {
			filesList.add(files[i]);
		}

		List<Document> docs = loader.load(files);

		for(Document d : docs){
			originalDocs.put(d.getContent(), d);
		}

		appendFileSystemCollectionReader(filesList, cpeDescriptorPath);

		// Run workflow
		new UCompareWorkflowRunner(cpeDescriptorPath);

		return documents;
	}

	/**
	 * Extracts the content of the UCompare workflow zip file (.ucz) to 
	 * a directory named "extracted/{name_of_the_workflow}" in the same level of the file.
	 * <p>
	 * @param workflowPath	the path to the UCompare workflow file
	 * @return	the path to the Collection Processing Engine (CPE) descriptor
	 * @throws IOException	if an input/output error occurs
	 * @throws SAXException if an error parsing a XML file occurs
	 * @throws ParserConfigurationException if an error parsing a XML file occurs
	 */
	private String extractWorkflow(String workflowPath) throws IOException, ParserConfigurationException, SAXException {

		File workflow = new File(workflowPath);

		String extractionDirectory = getExtractionDirectory(workflow);

		ArchiveUnZipper.unzip(workflow, extractionDirectory);

		// copy desc directory to the project's root, so that we don't have
		// to modify the cpe descriptor.
		// if extractionDirectory+"/desc" is not found, then keep going.
		try {
			FileSystemUtils.copyDirectory(extractionDirectory+"/desc", ".");
		} catch(FileNotFoundException e) {
			System.out.println("No additional descriptors in " + workflowPath);
		}

		// Get the path to the Workflow (CPE) descriptor.
		// TODO: Read it from the DefaultComponentsMap.xml
		return searchCPEDescriptorPath(extractionDirectory);
	}

	/**
	 * Writes a new text Collection Reader descriptor from a 
	 * template which has a keyword <code>INPUT_TEXT</code> to be replaced with 
	 * text to read. The name of the new descriptor is marked 
	 * with a timeStamp. The new Collection Reader element is added to a CPE descriptor.
	 * <p>
	 * @param text					the text to read
	 * @param cpeDescriptorPath		the path to the CPE descriptor to which the new Collection Reader element will be added
	 * @throws TransformerException				if an error writing the CPE descriptor occurs
	 * @throws ParserConfigurationException		if an error parsing the CPE descriptor occurs
	 * @throws SAXException						if an error parsing the CPE descriptor occurs
	 * @throws IOException						if an input/output error occurs
	 */
	private void appendTextCollectionReader(String text, String cpeDescriptorPath) throws TransformerException, IOException, ParserConfigurationException, SAXException {		
		String textCRName = "desc/CollectionReader/TextCollectionReader/TextCollectionReader.xml";
		String newTextCRName = "desc/CollectionReader/TextCollectionReader/variation" + System.currentTimeMillis();

		// Copy the TextCollectionReader.xml template file and add the text to read.
		FileSystemUtils.replaceFileToken(textCRName, newTextCRName + ".xml", "INPUT_TEXT", text);

		// Read the Workflow descriptor, append the CollectionReader information, Write it to disk
		org.w3c.dom.Document cpeDescriptor = FileSystemUtils.readDocument(cpeDescriptorPath);
		appendCollectionReader(cpeDescriptor,newTextCRName,"Reads a document from an input text.");
		FileSystemUtils.writeDocument(cpeDescriptor, cpeDescriptorPath);
	}

	/**
	 * Writes a new FileSystem Collection Reader descriptor from a 
	 * template which has a keyword <code>FILE_PATHS</code> to be replaced with 
	 * a sequence of file paths to read. The name of the new descriptor is marked 
	 * with a timeStamp. The new Collection Reader element is added to a CPE descriptor.
	 * <p>
	 * @param files					the files do read
	 * @param cpeDescriptorPath		the path to the CPE descriptor to which the new Collection Reader element will be added
	 * @throws TransformerException				if an error writing the CPE descriptor occurs
	 * @throws ParserConfigurationException		if an error parsing the CPE descriptor occurs
	 * @throws SAXException						if an error parsing the CPE descriptor occurs
	 * @throws IOException						if an input/output error occurs
	 */
	private void appendFileSystemCollectionReader(List<File> files, String cpeDescriptorPath) throws TransformerException, ParserConfigurationException, SAXException, IOException {
		String fileSystemCRName = "desc/CollectionReader/FileSystemCollectionReader/FileSystemCollectionReader.xml";
		String newFileSystemCRName = "desc/CollectionReader/FileSystemCollectionReader/variation" + System.currentTimeMillis();

		String filePaths = "";

		for(File f : files) {
			filePaths += "<string>" + f.getAbsolutePath() + "</string>";
		}

		// Copy the FileSystemCollectionReader.xml template file and add the sequence of files to read.
		FileSystemUtils.replaceFileToken(fileSystemCRName, newFileSystemCRName + ".xml", "FILE_PATHS", filePaths);

		// Read the Workflow descriptor, append the CollectionReader information, Write it to disk
		org.w3c.dom.Document cpeDescriptor =  FileSystemUtils.readDocument(cpeDescriptorPath);
		appendCollectionReader(cpeDescriptor,newFileSystemCRName,"Reads documents from files.");
		FileSystemUtils.writeDocument(cpeDescriptor, cpeDescriptorPath);
	}

	/**
	 * Computes the name of a directory to which the content of UCompare .ucz workflow file
	 * will be extracted.
	 * <p>
	 * @param workflow	the .ucz file from which to capture its directory and name
	 * @return	the name of the directory
	 */
	private String getExtractionDirectory(File workflow) {
		String workflowDirectory = workflow.getParent();
		return (workflowDirectory != null ? workflowDirectory + "/" : "") + "extracted/" + workflow.getName().split(".ucz")[0];
	}

	/**
	 * Consults the defaultComponentsMap.xml workflow file to get the relative path to the CPE 
	 * descriptor and returns his absolute path, provided the absolute extraction directory path.
	 * <p>
	 * @param extractionDirectory	the absolute path to the extraction directory
	 * @return	the absolute path to the CPE descriptor
	 * @throws ParserConfigurationException if an error parsing a XML file occurs
	 * @throws SAXException if an error parsing a XML file occurs
	 * @throws IOException	if an input/output error occurs
	 */
	private String searchCPEDescriptorPath(String extractionDirectory) throws ParserConfigurationException, SAXException, IOException {
		String cpesDir = extractionDirectory + "/cpedesc/";
		
		org.w3c.dom.Document compMap = FileSystemUtils.readDocument(extractionDirectory + "/defaultComponentsMap.xml");
		
		NodeList entry = compMap.getElementsByTagName("entry");
		
		if(entry != null) {
			return cpesDir + entry.item(0).getTextContent();
		}
		
		return null;
	}


	/**
	 * Appends a Collection Reader element to the CPE descriptor.
	 * <p>
	 * @param cpeDescriptor				the CPE descriptor XML document
	 * @param collectionReaderDescPath	the relative path to the Collection Reader descriptor to append
	 * @param description				the description of the Collection Reader to append
	 */
	private void appendCollectionReader(org.w3c.dom.Document cpeDescriptor, String collectionReaderDescPath, String description) {
		Element root = cpeDescriptor.getDocumentElement();		
		Element CRElement = cpeDescriptor.createElement("collectionReader");
		Element CIElement = cpeDescriptor.createElement("collectionIterator");
		Element CRDescriptor = cpeDescriptor.createElement("descriptor");
		Element includeElement = cpeDescriptor.createElement("include");
		includeElement.setAttribute("href",description);
		Element importElement = cpeDescriptor.createElement("import");
		importElement.setAttribute("name", collectionReaderDescPath.replace("/", "."));
		CRDescriptor.appendChild(includeElement);
		CRDescriptor.appendChild(importElement);
		CIElement.appendChild(CRDescriptor);
		CRElement.appendChild(CIElement);
		root.appendChild(CRElement);		
	}

	/**
	 * @return	the list of documents loaded through the workflow
	 */
	public List<Document> getDocuments() {
		return documents;
	}

	/**
	 * Main Class that runs a Collection Processing Engine (CPE). This class reads a
	 * CPE Descriptor from his path and instantiates the CPE. It also
	 * registers a callback listener with the CPE, which will print progress and
	 * statistics to System.out.
	 * 
	 * 
	 */
	private class UCompareWorkflowRunner extends Thread {

		/**
		 * The CPE instance.
		 */
		private CollectionProcessingEngine mCPE;

		/**
		 * Start time of CPE initialization
		 */
		private long mStartTime;

		/**
		 * Start time of the processing
		 */
		private long mInitCompleteTime;

		/**
		 * Constructs the thread, creates a Collection Processing Engine from the 
		 * CPE descriptor path and runs the CPE.
		 * <p>
		 * @param cpeDescriptorPath	the directory path to the CPE descriptor XML file
		 * @throws ResourceInitializationException	if an error producing or running the CPE occurs
		 * @throws InvalidXMLException				if an error parsing the CPE descriptor occurs
		 * @throws IOException						if an error reading the input CPE descriptor occurs
		 */
		public UCompareWorkflowRunner(String cpeDescriptorPath) throws ResourceInitializationException, InvalidXMLException, IOException {

			mStartTime = System.currentTimeMillis();

			// parse CPE descriptor
			System.out.println("Parsing CPE Descriptor");
			CpeDescription cpeDesc = UIMAFramework.getXMLParser().parseCpeDescription(new XMLInputSource(cpeDescriptorPath));

			// instantiate CPE
			System.out.println("Instantiating CPE");
			mCPE = UIMAFramework.produceCollectionProcessingEngine(cpeDesc);

			StatusCallbackListenerImpl listener = new StatusCallbackListenerImpl();

			// Create and register a Status Callback Listener
			mCPE.addStatusCallbackListener(listener);

			// Start Processing
			System.out.println("Running CPE");
			mCPE.process();

			while(!listener.hasFinished()) {
				try{
					Thread.sleep(1000);
				} catch(InterruptedException e) {
					e.printStackTrace();
					System.exit(-1);
				}
			}

		}

		/**
		 * Callback Listener. Receives event notifications from CPE.
		 * 
		 * 
		 */
		class StatusCallbackListenerImpl implements StatusCallbackListener {
			int entityCount = 0;

			long size = 0;

			boolean hasFinished = false;

			/**
			 * Called when the initialization is completed.
			 * 
			 * @see org.apache.uima.collection.processing.StatusCallbackListener#initializationComplete()
			 */
			public void initializationComplete() {
				System.out.println("CPM Initialization Complete");
				mInitCompleteTime = System.currentTimeMillis();
			}

			public boolean hasFinished() {
				return hasFinished;
			}

			/**
			 * Called when the batchProcessing is completed.
			 * 
			 * @see org.apache.uima.collection.processing.StatusCallbackListener#batchProcessComplete()
			 * 
			 */
			public void batchProcessComplete() {
				System.out.print("Completed " + entityCount + " documents");
				if (size > 0) {
					System.out.print("; " + size + " characters");
				}
				System.out.println();
				long elapsedTime = System.currentTimeMillis() - mStartTime;
				System.out.println("Time Elapsed : " + elapsedTime + " ms ");
			}

			/**
			 * Called when the collection processing is completed.
			 * 
			 * @see org.apache.uima.collection.processing.StatusCallbackListener#collectionProcessComplete()
			 */
			public void collectionProcessComplete() {
				long time = System.currentTimeMillis();
				System.out.print("Completed " + entityCount + " documents");
				if (size > 0) {
					System.out.print("; " + size + " characters");
				}
				System.out.println();
				long initTime = mInitCompleteTime - mStartTime;
				long processingTime = time - mInitCompleteTime;
				long elapsedTime = initTime + processingTime;
				System.out.println("Total Time Elapsed: " + elapsedTime + " ms ");
				System.out.println("Initialization Time: " + initTime + " ms");
				System.out.println("Processing Time: " + processingTime + " ms");

				System.out.println("\n\n ------------------ PERFORMANCE REPORT ------------------\n");
				System.out.println(mCPE.getPerformanceReport().toString());

				hasFinished = true;

			}

			/**
			 * Called when the CPM is paused.
			 * 
			 * @see org.apache.uima.collection.processing.StatusCallbackListener#paused()
			 */
			public void paused() {
				System.out.println("Paused");
			}

			/**
			 * Called when the CPM is resumed after a pause.
			 * 
			 * @see org.apache.uima.collection.processing.StatusCallbackListener#resumed()
			 */
			public void resumed() {
				System.out.println("Resumed");
			}

			/**
			 * Called when the CPM is stopped abruptly due to errors.
			 * 
			 * @see org.apache.uima.collection.processing.StatusCallbackListener#aborted()
			 */
			public void aborted() {
				System.out.println("Aborted");
			}

			/**
			 * Called when the processing of a Document is completed. <br>
			 * The process status can be looked at and corresponding actions taken.
			 * 
			 * @param aCas
			 *            CAS corresponding to the completed processing
			 * @param aStatus
			 *            EntityProcessStatus that holds the status of all the
			 *            events for aEntity
			 */
			public void entityProcessComplete(CAS aCas, EntityProcessStatus aStatus) {

				if (aStatus.isException()) {
					List<?> exceptions = aStatus.getExceptions();
					for (int i = 0; i < exceptions.size(); i++) {
						((Throwable) exceptions.get(i)).printStackTrace();
					}
					return;
				}

				JCas jcas = null;

				try {
					jcas = aCas.getJCas();
				} catch (CASException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}

				// Load the etxt2db document. It will gather the informations from the previous context of this UCompare component.
				List<Document> documents = null;
				try {
					documents = getDocumentLoader().load(jcas.getDocumentText());
				} catch (ParseException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}

				Iterator<Document> documentIter = documents.iterator();

				Document document = null;

				if(documentIter.hasNext()) {
					document = documentIter.next();
				}

				// Get the document URI to insert in etxt2db Document
				Iterator<?> it = jcas.getAnnotationIndex(SourceDocumentInformation.type).iterator();
				String docUri = null;

				// For each document there's a call to process.
				if (it.hasNext()) {
					SourceDocumentInformation srcDocInfo = (SourceDocumentInformation) it.next();
					docUri = srcDocInfo.getUri();
					try {
						document.setUri(new URI(docUri));
					} catch (URISyntaxException e) {
						// TODO Auto-generated catch block
						e.printStackTrace();
					}
				}

				// iterate and gather annotations from a preceding workflow.
				Iterator<?> annotationIter = jcas.getAnnotationIndex().iterator();
				while (annotationIter.hasNext()) {
					try {
						
						Annotation a = (Annotation) annotationIter.next();
						
						List<String> keys = null;
						keys = mapper.getKey(a);
						
						for(String type : keys) {
							
							List<Object> distinguisher = null;
							distinguisher = mapper.getAnnotations(a, type);
							
							MyAnnotation myAnn = new MyAnnotation(a, type, distinguisher);
	
							document.addAnnotation(myAnn);
						}
					
					} catch(Exception e) {
						e.printStackTrace();
						System.exit(-1);
					}
				}

				Document original = getOriginalDoc(document.getContent());
				Set<String> types = original.getEntityTypes();
				for(String type : types){
					List<Span> entities = original.getAnnotationsForTypeNotOrganized(type);
					for(Span span : entities){
						document.addNamedEntityAnnotation(type, span);
					}

				}

				getDocuments().add(document);

				entityCount++;
				String docText = aCas.getDocumentText();
				if (docText != null) {
					size += docText.length();
				}
				
				System.out.println("Processed " + document.getUri().toString());
				
			}		

		}

	}

}
