package main;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.semanticweb.owlapi.apibinding.OWLManager;
import org.semanticweb.owlapi.model.IRI;
import org.semanticweb.owlapi.model.OWLOntology;
import org.semanticweb.owlapi.model.OWLOntologyCreationException;
import org.semanticweb.owlapi.model.OWLOntologyManager;

import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.blob.CloudBlobClient;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlobDirectory;
import com.microsoft.azure.storage.blob.CloudBlockBlob;

import comparator.BDM;
import file.FileSD;
import file.OWLInputFormat;

public class Main {

	private static int numReducesTask = 1;
	
	/**
	 * No main, alem das configuracoes do job mapreduce eh realizada a seguinte logica:
	 * 	1- As ontologias (ou subontologias) sao separadas em pares (passadas como parametros). O ultimo valor do parametro indica quantos pares de ontologias existem.
	 * 	2- Existem outros 3 argumentos (otherArgs) que sao o limiar, numero de reduces, pasta de saida.
	 * 	3- Os pares de ontologias (subontologias) sao agrupados por um index de comparacao e subdivididos em source ou target (lembrando que uma ontologia pode esta
	 * 	   mais de uma vez em cada lista ou contido em ambas.
	 * 	4- As listas de target e source sao armazenadas na BDM e serao usadas pelo leitor de arquivos para gerar a chave: "comparacao.job.dataset".
	 * @param args
	 * @throws Exception
	 */
	public static void main(String[] args) throws Exception {
		long timeInit = System.currentTimeMillis();
		/* MapReduce main */
		Configuration conf = new Configuration();

		String[] allArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
		int sizeArg = allArgs.length;
		int listOfOntology = Integer.parseInt(allArgs[sizeArg - 1]) * 2;
		int totalOfParameters = listOfOntology + 3;
		String[] otherArgs = {allArgs[sizeArg - 4], allArgs[sizeArg - 3], allArgs[sizeArg - 2]};
		String[] pairsOntologyArgs = new String[listOfOntology];
		
		for (int i = listOfOntology; i > 0 ; i--) {
			pairsOntologyArgs[i - 1] = allArgs[(sizeArg - totalOfParameters - 2) + i];
		}
		
		numReducesTask = Integer.parseInt(otherArgs[2]);
		
		int numberOfPairs = Integer.parseInt(allArgs[sizeArg - 1]);
		String[] sources = new String[numberOfPairs];
		String[] targets = new String[numberOfPairs];
		String[] numberOfReplications = new String[numberOfPairs];
		File pathOnto1 = null;
		File pathOnto2 = null ;
		
		for (int i = 0; i < numberOfPairs; i++) {
			Object[] onto1 = readOWL(new Path(pairsOntologyArgs[i*2]), conf, "onto1");
			pathOnto1 = (File) onto1[0];
			int sizeOnto1 = ((OWLOntology) onto1[1]).getClassesInSignature().size() - 1;
			
			Object[] onto2 = readOWL(new Path(pairsOntologyArgs[(i*2)+1]), conf, "onto2");
			pathOnto2 = (File) onto2[0];
			int sizeOnto2 = ((OWLOntology) onto2[1]).getClassesInSignature().size() - 1;
			
			if (sizeOnto1 >= sizeOnto2) {
				String[] nameSource = pairsOntologyArgs[i*2].split("/");
				sources[i] = nameSource[nameSource.length - 1];
				
				String[] nameTarget = pairsOntologyArgs[(i*2)+1].split("/");
				targets[i] = nameTarget[nameTarget.length - 1];
				
				numberOfReplications[i] = String.valueOf(sizeOnto1);
			} else {
				String[] nameSource = pairsOntologyArgs[(i*2)+1].split("/");
				sources[i] = nameSource[nameSource.length - 1];
				
				String[] nameTarget =pairsOntologyArgs[i*2].split("/");
				targets[i] = nameTarget[nameTarget.length - 1];
				
				numberOfReplications[i] = String.valueOf(sizeOnto2);
			}
		}
		conf.setStrings("owl.source", sources);
		conf.setStrings("owl.target", targets);
		conf.setStrings("numberOfReplications", numberOfReplications);
		
		conf.set("threshold", otherArgs[1] + "");
		
		//set time out of jobs.
		long milliSeconds = 1000*60*60; //1 hour.
		conf.setLong("mapred.task.timeout", milliSeconds);

		Job job = new Job(conf, "Matching");

		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(FileSD.class);

		job.setNumReduceTasks(numReducesTask);

		job.setInputFormatClass(OWLInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		
		job.setJarByClass(BDM.class);
		job.setMapperClass(BDM.Map.class);
		job.setPartitionerClass(BDM.Part.class);
		job.setGroupingComparatorClass(BDM.NaturalKeyGroupingComparator.class);
		job.setReducerClass(BDM.Reduce.class);
		
		ArrayList<Path> paths = new ArrayList<Path>();
		for (String p : pairsOntologyArgs) {
			Path path = new Path(p);
			if (!paths.contains(path)) {
				paths.add(path);
			}
		}
		Path[] arrayPath = new Path[paths.size()];
		paths.toArray(arrayPath);

		FileInputFormat.setInputPaths(job, arrayPath);
		FileOutputFormat.setOutputPath(job, new Path(otherArgs[0]));

		if (job.waitForCompletion(true)) {
			long timeFinish = System.currentTimeMillis();
			System.err.println(">>>>>>>>Tempo de Execucao: " + (timeFinish - timeInit) / 1000);
			// formata o arquivo de saida bruto
			// formatOutFile(args[2]);
			pathOnto1.delete();
			pathOnto2.delete();
			System.exit(0);
		} else {
			pathOnto1.delete();
			pathOnto2.delete();
			System.exit(1);
		}

	}
	
	private static OWLOntology readOWL(String path) throws OWLOntologyCreationException, URISyntaxException{
		URI uri = new URI(path);
		OWLOntologyManager manager = OWLManager.createOWLOntologyManager();
		 // Load a copy of the people+pets ontology. We'll load the ontology
		 // from
		 // the web (it's acutally located in the TONES ontology repository).
		 IRI docIRI = IRI.create(uri);
		 // We load the ontology from a document - our IRI points to it
		 // directly
		 OWLOntology ontology =
		 manager.loadOntologyFromOntologyDocument(docIRI);
		
		 return ontology;
	}
	private static Object[] readOWL(Path path, Configuration conf, String fileName) throws Exception {
		// if (isAzure == 0) {
		// // Create our ontology manager in the usual way.
		// OWLOntologyManager manager = OWLManager.createOWLOntologyManager();
		// // Load a copy of the people+pets ontology. We'll load the ontology
		// // from
		// // the web (it's acutally located in the TONES ontology repository).
		// IRI docIRI = IRI.create(new File(path.toString()));
		// // We load the ontology from a document - our IRI points to it
		// // directly
		// OWLOntology ontology =
		// manager.loadOntologyFromOntologyDocument(docIRI);
		//
		// return ontology;
		// } else {
		final String storageConnectionString = "DefaultEndpointsProtocol=http;" + "AccountName=testematcher;"
				+ "AccountKey=TqIn51pzE5r8CyAv0QeTTBqtd7LbS7bksSv/C8CMXAzvabqjtobID4qk5rw9yiY55p4mx5pwUC0/I1l7b19Zqw==";

		CloudStorageAccount account = CloudStorageAccount.parse(storageConnectionString);
		CloudBlobClient serviceClient = account.createCloudBlobClient();

		// Container name must be lower case.
		CloudBlobContainer container = serviceClient.getContainerReference("superclusterlqd");
		// container.createIfNotExists();
		CloudBlobDirectory sdproj = container.getDirectoryReference("SDProject").getSubDirectoryReference("Input");
		CloudBlockBlob arq = sdproj.getBlockBlobReference(path.getName());
		// Download the image file.

		File destinationFile = new File(fileName + ".owl");
		arq.downloadToFile(destinationFile.getAbsolutePath());

		// Create our ontology manager in the usual way.
		OWLOntologyManager manager = OWLManager.createOWLOntologyManager();
		// Load a copy of the people+pets ontology. We'll load the ontology
		// from
		// the web (it's acutally located in the TONES ontology repository).
		IRI docIRI = IRI.create(destinationFile);
		// We load the ontology from a document - our IRI points to it
		// directly
		OWLOntology ontology = manager.loadOntologyFromOntologyDocument(docIRI);

//		byte[] fileData = getByteArray(destinationFile);
//		// OWLOntologyManager manager = onto1.getOWLOntologyManager();
//		File filePath = new File(System.getProperty("user.dir") + "/onto1.owl");
//		// manager.saveOntology(onto1, IRI.create(filePath.toURI()));
//		org.apache.hadoop.fs.FileSystem hdfs = org.apache.hadoop.fs.FileSystem.get(conf);
//		org.apache.hadoop.fs.Path path2 = new org.apache.hadoop.fs.Path(filePath.toString());
//		org.apache.hadoop.fs.FSDataOutputStream outputStream = hdfs.create(path2);
//		FileInputStream fileInputStream = new FileInputStream(destinationFile);
//		// manager.saveOntology(ontology, outputStream);
//		outputStream.write(fileData, 0, fileData.length);
		Object[] output = {destinationFile.getAbsoluteFile(), ontology};
		return output;
		// }

	}
	

	private static byte[] getByteArray(File file) {
		FileInputStream fileInputStream = null;

		byte[] bFile = new byte[(int) file.length()];

		try {
			// convert file into array of bytes
			fileInputStream = new FileInputStream(file);
			fileInputStream.read(bFile);
			fileInputStream.close();

			for (int i = 0; i < bFile.length; i++) {
				System.out.print((char) bFile[i]);
			}

			System.out.println("Done");
		} catch (Exception e) {
			e.printStackTrace();
		}
		return bFile;
	}

	/**
	 * Metodo responsavel por formatar um arquivo com as saidas reduzidas (sem
	 * duplicatas inferiores)
	 * 
	 * @param Caminho
	 *            da pasta de saida
	 * @throws IOException
	 *             Caso ocorra excessao
	 */

	private static void formatOutFile(String path) throws IOException {
		File file = targetFile(path);
		if (file == null) {
			System.err.println("Arquivo de saida nao pode ser lido");
			return;
		}
		List<String> lines = readFileLines(file);
		List<String> formatedLines = formatLines(lines);
		FileWriter writer = new FileWriter(path + File.separator + "part-r-00000-FORMATTED");
		String newLine = System.getProperty("line.separator");
		for (String str : formatedLines) {
			writer.write(str + newLine);
		}
		writer.close();
	}

	/**
	 * Metodo responsavel por retornar uma lista com as saidas sem duplicatas
	 * inferiores
	 * 
	 * @param lines
	 *            Conteudo do arquivo de saida bruto (com duplicatas inferiores)
	 * @return Lista contendo valores sem duplicatas
	 */

	private static List<String> formatLines(List<String> lines) {
		List<String> listAux = new ArrayList<String>();
		for (String s : lines) {
			if (!s.equals("")) {
				listAux.add(s);
			}

		}
		for (String line : lines) {
			if (line.equals("")) {
				continue;
			}
			String[] lineSource = getLineArray(line);
			for (String line1 : lines) {
				if (!line.equals(line1) && !line1.equals("")) {
					String[] lineTarget = getLineArray(line1);
					String entityTarget = lineTarget[0].split("	")[1];
					String entitySource = lineSource[0].split("	")[1];
					if ((Double.parseDouble(lineTarget[2]) < Double.parseDouble(lineSource[2])) && lineTarget[1].equals(lineSource[1])
							|| (Double.parseDouble(lineTarget[2]) < Double.parseDouble(lineSource[2]) && entityTarget.equals(entitySource))) {
						listAux.remove(line1);
					}

				}

			}

		}
		return listAux;

	}

	/**
	 * Metodo responsavel por retornar um array contendo todos os elementos de
	 * uma linha do arquivo de saida na seguinte forma [source, target, result]
	 * 
	 * @param line
	 *            Uma linha do arquivo de saida bruto (com duplicatas
	 *            inferiores)
	 * @return Array no formato [source, target, result]
	 */

	private static String[] getLineArray(String line) {
		String[] array = line.split(" = ");
		String val = array[array.length - 1].replace(",", ".");
		String source = array[0].split(" - ")[0];
		String target = array[0].split(" - ")[1];
		String[] arrayOut = new String[] { source, target, val };
		return arrayOut;
	}

	/**
	 * Metodo que recebe o arquivo de saida bruto (com duplicatas inferiores) e
	 * retorna uma lista com todas as linhas deste arquivo
	 * 
	 * @param file
	 *            Arquivo de saida bruto
	 * @return Lista com o conteudo do arquivo de saida bruto
	 * @throws IOException
	 */

	private static List<String> readFileLines(File file) throws IOException {
		FileReader fileReader = new FileReader(file);
		BufferedReader bufferedReader = new BufferedReader(fileReader);
		StringBuffer stringBuffer = new StringBuffer();
		String line;
		while ((line = bufferedReader.readLine()) != null) {
			stringBuffer.append(line);
			stringBuffer.append("\n");
		}
		fileReader.close();
		String[] array = stringBuffer.toString().split("\\n");
		List<String> list = new ArrayList<String>(Arrays.asList(array));
		return list;

	}

	/**
	 * Metodo que retorna o arquivo de saida bruto (com duplicatas inferiores)
	 * que a partir deste, sera derivado um segundo arquivo sem duplicatas
	 * inferiores
	 * 
	 * @param path
	 *            Caminho onde esta contido o arquivo de saida bruto
	 * @return Arquivo de saida bruto
	 */

	private static File targetFile(String path) {
		File file = new File(path);
		File[] listFiles = file.listFiles();
		for (File f : listFiles) {
			if (f.getName().equals("part-r-00000")) {
				return f;
			}
		}
		return null;
	}

}
