package main;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.semanticweb.owlapi.apibinding.OWLManager;
import org.semanticweb.owlapi.model.IRI;
import org.semanticweb.owlapi.model.OWLOntology;
import org.semanticweb.owlapi.model.OWLOntologyManager;

import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.blob.CloudBlobClient;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlobDirectory;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
import comparator.BDM;

import file.FileSD;
import file.OWLInputFormat;

public class Main {

	//private static int numReducesTask = 52;


	public static void main(String[] args) throws Exception {
		long timeInit = System.currentTimeMillis();
		/* MapReduce main */
		Configuration conf = new Configuration();

		String[] allArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
		int sizeArg = allArgs.length;
		
		
		String[] otherArgs = { allArgs[sizeArg - 5], allArgs[sizeArg - 4], allArgs[sizeArg - 3], allArgs[sizeArg - 2], allArgs[sizeArg - 1] };
		
		Path[] paths = { new Path(otherArgs[0]), new Path(otherArgs[1]) };
		

		// OWLOntology onto1 = readOWL(paths[0],
		// Integer.parseInt(otherArgs[4]));
		// OWLOntology onto2 = readOWL(paths[1],
		// Integer.parseInt(otherArgs[4]));

		int sizeOnto1 = readOWL(paths[0], Integer.parseInt(otherArgs[4]), conf).getClassesInSignature().size();
		int sizeOnto2 = readOWL(paths[1], Integer.parseInt(otherArgs[4]), conf).getClassesInSignature().size();

		// OWLOntology ontoA = readOWL(paths[0],
		// Integer.parseInt(otherArgs[4]));
		// Set<OWLClass> classes = ontoA.getClassesInSignature();
		// System.out.println("Init:");
		//
		// for (OWLClass classA : classes) {
		// //System.out.println(classA.toString());
		// // System.out.println("ex: " + classA.getNNF());
		// if (!classA.getReferencingAxioms(ontoA).isEmpty()) {
		// System.out.println(classA.getReferencingAxioms(ontoA));
		// }
		//
		// }
		// // System.out.println("ind: " + classA.getReferencingAxioms(ontoA));
		// // System.out.println("prop: " +
		// ontoA.getAnnotationPropertiesInSignature().size());
		// // System.out.println("prop class: " +
		// classA.getDataPropertiesInSignature().size());
		// // System.out.println("type class: " +
		// classA.getDatatypesInSignature().size());

		//conf.set("numReduceTasks", String.valueOf(numReducesTask));
		
		
		//SAMIR MODIFICOU AQUI
		if(Integer.parseInt(otherArgs[4]) == 0 ) {

			conf.set("owl.source", paths[0].getName());
			conf.set("owl.target", paths[1].getName());
			conf.setInt("numberOfReplications", sizeOnto1);
			System.out.println("Método 1 - Onto1 >= Onto2");
		} else {
			conf.set("owl.source", paths[1].getName());
			conf.set("owl.target", paths[0].getName());
			conf.setInt("numberOfReplications", sizeOnto2);
			System.out.println("Método 2 - Onto1 < Onto2");
		}
		
		
		conf.set("threshold", otherArgs[3] + "");
		conf.setInt("isAzure", Integer.parseInt(otherArgs[4]));
		
		

		Job job = new Job(conf, "Matching");

		job.setJarByClass(BDM.class);
		job.setMapperClass(BDM.Map.class);
		// job.setPartitionerClass(BDM.Partition.class);
		job.setReducerClass(BDM.Reduce.class);

		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(FileSD.class);

		//job.setNumReduceTasks(numReducesTask);

		job.setInputFormatClass(OWLInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);

		for (Path p : paths) {
	    	FileInputFormat.addInputPath(job, p);
		}		
//		FileInputFormat.setInputPaths(job, paths);
//			FileInputFormat.addInputPath(job, p);
//		}
		FileInputFormat.setInputPaths(job, paths);
		FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));

		if (job.waitForCompletion(true)) {
			long timeFinish = System.currentTimeMillis();
			System.err.println(">>>>>>>>Tempo de Execucao: " + (timeFinish - timeInit) / 1000);
			// formata o arquivo de saida bruto
			// formatOutFile(args[2]);
			System.exit(0);
		} else {
			System.exit(1);
		}

	}

	private static byte[] getByteArray(File file) {
		FileInputStream fileInputStream = null;

		byte[] bFile = new byte[(int) file.length()];

		try {
			// convert file into array of bytes
			fileInputStream = new FileInputStream(file);
			fileInputStream.read(bFile);
			fileInputStream.close();

			for (int i = 0; i < bFile.length; i++) {
				System.out.print((char) bFile[i]);
			}

			System.out.println("Done");
		} catch (Exception e) {
			e.printStackTrace();
		}
		return bFile;
	}

	/**
	 * Metodo responsavel por formatar um arquivo com as saidas reduzidas (sem
	 * duplicatas inferiores)
	 * 
	 * @param Caminho
	 *            da pasta de saida
	 * @throws IOException
	 *             Caso ocorra excessao
	 */

	private static void formatOutFile(String path) throws IOException {
		File file = targetFile(path);
		if (file == null) {
			System.err.println("Arquivo de saida nao pode ser lido");
			return;
		}
		List<String> lines = readFileLines(file);
		List<String> formatedLines = formatLines(lines);
		FileWriter writer = new FileWriter(path + File.separator + "part-r-00000-FORMATTED");
		String newLine = System.getProperty("line.separator");
		for (String str : formatedLines) {
			writer.write(str + newLine);
		}
		writer.close();
	}

	/**
	 * Metodo responsavel por retornar uma lista com as saidas sem duplicatas
	 * inferiores
	 * 
	 * @param lines
	 *            Conteudo do arquivo de saida bruto (com duplicatas inferiores)
	 * @return Lista contendo valores sem duplicatas
	 */

	private static List<String> formatLines(List<String> lines) {
		List<String> listAux = new ArrayList<String>();
		for (String s : lines) {
			if (!s.equals("")) {
				listAux.add(s);
			}

		}
		for (String line : lines) {
			if (line.equals("")) {
				continue;
			}
			String[] lineSource = getLineArray(line);
			for (String line1 : lines) {
				if (!line.equals(line1) && !line1.equals("")) {
					String[] lineTarget = getLineArray(line1);
					String entityTarget = lineTarget[0].split("	")[1];
					String entitySource = lineSource[0].split("	")[1];
					if ((Double.parseDouble(lineTarget[2]) < Double.parseDouble(lineSource[2])) && lineTarget[1].equals(lineSource[1])
							|| (Double.parseDouble(lineTarget[2]) < Double.parseDouble(lineSource[2]) && entityTarget.equals(entitySource))) {
						listAux.remove(line1);
					}

				}

			}

		}
		return listAux;

	}

	/**
	 * Metodo responsavel por retornar um array contendo todos os elementos de
	 * uma linha do arquivo de saida na seguinte forma [source, target, result]
	 * 
	 * @param line
	 *            Uma linha do arquivo de saida bruto (com duplicatas
	 *            inferiores)
	 * @return Array no formato [source, target, result]
	 */

	private static String[] getLineArray(String line) {
		String[] array = line.split(" = ");
		String val = array[array.length - 1].replace(",", ".");
		String source = array[0].split(" - ")[0];
		String target = array[0].split(" - ")[1];
		String[] arrayOut = new String[] { source, target, val };
		return arrayOut;
	}

	/**
	 * Metodo que recebe o arquivo de saida bruto (com duplicatas inferiores) e
	 * retorna uma lista com todas as linhas deste arquivo
	 * 
	 * @param file
	 *            Arquivo de saida bruto
	 * @return Lista com o conteudo do arquivo de saida bruto
	 * @throws IOException
	 */

	private static List<String> readFileLines(File file) throws IOException {
		FileReader fileReader = new FileReader(file);
		BufferedReader bufferedReader = new BufferedReader(fileReader);
		StringBuffer stringBuffer = new StringBuffer();
		String line;
		while ((line = bufferedReader.readLine()) != null) {
			stringBuffer.append(line);
			stringBuffer.append("\n");
		}
		fileReader.close();
		String[] array = stringBuffer.toString().split("\\n");
		List<String> list = new ArrayList<String>(Arrays.asList(array));
		return list;

	}

	/**
	 * Metodo que retorna o arquivo de saida bruto (com duplicatas inferiores)
	 * que a partir deste, sera derivado um segundo arquivo sem duplicatas
	 * inferiores
	 * 
	 * @param path
	 *            Caminho onde esta contido o arquivo de saida bruto
	 * @return Arquivo de saida bruto
	 */

	private static File targetFile(String path) {
		File file = new File(path);
		File[] listFiles = file.listFiles();
		for (File f : listFiles) {
			if (f.getName().equals("part-r-00000")) {
				return f;
			}
		}
		return null;
	}

	private static OWLOntology readOWL(Path path, int isAzure, Configuration conf) throws Exception {
		// if (isAzure == 0) {
		// // Create our ontology manager in the usual way.
		// OWLOntologyManager manager = OWLManager.createOWLOntologyManager();
		// // Load a copy of the people+pets ontology. We'll load the ontology
		// // from
		// // the web (it's acutally located in the TONES ontology repository).
		// IRI docIRI = IRI.create(new File(path.toString()));
		// // We load the ontology from a document - our IRI points to it
		// // directly
		// OWLOntology ontology =
		// manager.loadOntologyFromOntologyDocument(docIRI);
		//
		// return ontology;
		// } else {
		final String storageConnectionString = "DefaultEndpointsProtocol=http;" + "AccountName=testematcher;"
				+ "AccountKey=TqIn51pzE5r8CyAv0QeTTBqtd7LbS7bksSv/C8CMXAzvabqjtobID4qk5rw9yiY55p4mx5pwUC0/I1l7b19Zqw==";

		CloudStorageAccount account = CloudStorageAccount.parse(storageConnectionString);
		CloudBlobClient serviceClient = account.createCloudBlobClient();

		// Container name must be lower case.
		CloudBlobContainer container = serviceClient.getContainerReference("matchercluster");
		// container.createIfNotExists();
		CloudBlobDirectory sdproj = container.getDirectoryReference("SDProject").getSubDirectoryReference("Input");
		CloudBlockBlob arq = sdproj.getBlockBlobReference(path.getName());
		// Download the image file.

		File destinationFile = new File("TEMPONTO.owl");
		arq.downloadToFile(destinationFile.getAbsolutePath());

		// Create our ontology manager in the usual way.
		OWLOntologyManager manager = OWLManager.createOWLOntologyManager();
		// Load a copy of the people+pets ontology. We'll load the ontology
		// from
		// the web (it's acutally located in the TONES ontology repository).
		IRI docIRI = IRI.create(destinationFile);
		// We load the ontology from a document - our IRI points to it
		// directly
		OWLOntology ontology = manager.loadOntologyFromOntologyDocument(docIRI);

//		byte[] fileData = getByteArray(destinationFile);
//		// OWLOntologyManager manager = onto1.getOWLOntologyManager();
//		File filePath = new File(System.getProperty("user.dir") + "/onto1.owl");
//		// manager.saveOntology(onto1, IRI.create(filePath.toURI()));
//		org.apache.hadoop.fs.FileSystem hdfs = org.apache.hadoop.fs.FileSystem.get(conf);
//		org.apache.hadoop.fs.Path path2 = new org.apache.hadoop.fs.Path(filePath.toString());
//		org.apache.hadoop.fs.FSDataOutputStream outputStream = hdfs.create(path2);
//		FileInputStream fileInputStream = new FileInputStream(destinationFile);
//		// manager.saveOntology(ontology, outputStream);
//		outputStream.write(fileData, 0, fileData.length);

		destinationFile.delete();
		return ontology;
		// }

	}

}
