package prjCode;

import java.io.IOException;
import java.util.HashSet;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;




public class PreprocessingInput {

	private final static Text empty                                = new Text();

	private static Set<String> nouns;                             //set of part of speech description that are noun 
	private static Set<String> verbs;                             //set of part of speech description that are verb
	private static Set<String> auxiliaryVerbs;                    //set of auxiliary verbs
	private static Set<String> legalWordPath ;                    //set of type of word that can be in path

	static {
		nouns = new HashSet<String>();
		nouns.add("NN"); nouns.add("NNS"); nouns.add("NNP"); nouns.add("NNPS");

		verbs = new HashSet<String>();
		verbs.add("VB"); verbs.add("VBD"); verbs.add("VBN"); verbs.add("VBP"); verbs.add("VBZ");

		legalWordPath = new HashSet<String>();
		legalWordPath.add("NN"); legalWordPath.add("NNS"); legalWordPath.add("NNP"); legalWordPath.add("NNPS");
		legalWordPath.add("VB"); legalWordPath.add("VBD"); legalWordPath.add("VBN"); legalWordPath.add("VBP"); legalWordPath.add("VBZ");
		legalWordPath.add("JJ"); legalWordPath.add("JJR"); legalWordPath.add("JJS");
		legalWordPath.add("RB"); legalWordPath.add("RBR"); legalWordPath.add("RBS");
		legalWordPath.add("TO"); legalWordPath.add("IN");

		auxiliaryVerbs = new HashSet<String>();
		auxiliaryVerbs.add("am");auxiliaryVerbs.add("is"); auxiliaryVerbs.add("are"); auxiliaryVerbs.add("do"); auxiliaryVerbs.add("had"); 
		auxiliaryVerbs.add("was");auxiliaryVerbs.add("can"); auxiliaryVerbs.add("were"); auxiliaryVerbs.add("have"); auxiliaryVerbs.add("has");
		auxiliaryVerbs.add("did"); auxiliaryVerbs.add("does"); auxiliaryVerbs.add("could"); auxiliaryVerbs.add("may"); auxiliaryVerbs.add("might");
		auxiliaryVerbs.add("must"); auxiliaryVerbs.add("shall"); auxiliaryVerbs.add("should"); auxiliaryVerbs.add("will"); auxiliaryVerbs.add("would");
		auxiliaryVerbs.add("be"); auxiliaryVerbs.add("been"); auxiliaryVerbs.add("beung"); auxiliaryVerbs.add("having"); auxiliaryVerbs.add("'re"); auxiliaryVerbs.add("'s");
	}


	public static class MapClass extends Mapper<LongWritable, Text, TextTaggedKey, IntWritable> {

		private IntWritable numAppears          = new IntWritable(0);
		private Text word                       = new Text();
		private String[] line;
		private String[] ngram;
		private TextTaggedKey dKey              = new TextTaggedKey();
		private String[][]halfPaths = new String[15][3];
		private String[][] terms = new String[15][4];
		private String halfPath;
		private String firstHalf;
		private String[] secondHelfrev;
		private String secondHelf;
		private int rootIndex;
		private int rootRealIndex;
		private int arcToRoot;
		private int numString;
		private int dircetChild;
		private int nextTerm;
		private int i, j, m;

		@Override
		public void map(LongWritable key, Text value, Context context) throws IOException,  InterruptedException {

			rootIndex         = -1;
			rootRealIndex     = -1;
			arcToRoot         =  0;
			//	String[][]halfPaths = new String[15][3];

			line = value.toString().split("\t");  // "^	i/FW/nn/3 i/FW/nn/3 "^/FW/dep/0	28	2000,28  ==> ["^, i/FW/nn/3 i/FW/nn/3 "^/FW/dep/0, 28, 2000,28]

			//check array size is 4 or more
			if(line.length < 4)
				return;

			//split sentence to expressions 
			ngram = line[1].split(" ");           // i/FW/nn/3 i/FW/nn/3 "^/FW/dep/0  ==> [i/FW/nn/3, i/FW/nn/3, "^/FW/dep/0]


			if(ngram.length < 3)
				return;

			//String[][] terms = new String[ngram.length][4];

			//split expressions
			for(i=0; i < ngram.length; i++)
			{
				terms[i] = ngram[i].split("/");  // i/FW/nn/3 ==> [i, FW, nn, 3]

				if(terms[i].length != 4)
					return;

				//if it's the root
				if(terms[i][3].equals("0"))
				{
					//check root is verb
					if(!verbs.contains(terms[i][1]))
						return;

					//check root isn't auxiliary verbs
					if(auxiliaryVerbs.contains(terms[i][0]))
						return;

					//calculate root index
					rootIndex = i + 1;
					rootRealIndex = i;

				}
			}

			//fail to find root index
			if(rootIndex < 1)
				return;

			//find direct children of root that are noun
			for(i=0; i < ngram.length; i++)
			{
				if(terms[i][3].equals(Integer.toString(rootIndex)))
				{
					arcToRoot++;
				}
			}

			//if there number of direct children different from 2 filter the line
			if(arcToRoot != 2)
				return;

			// total number appearance of sentence
			numAppears.set(Integer.parseInt(line[2]));

			numString = 0;
			dircetChild = 0;

			//find all half path
			for(i=0; i < ngram.length; i++)
			{
				halfPath = "";

				//check first word of path is not the root and it is noun
				if((i != rootRealIndex) && nouns.contains(terms[i][1]) && (numString < 10))
				{
					nextTerm = Integer.parseInt(terms[i][3]);

					dircetChild = i;

					halfPaths[numString][2] = terms[i][0];


					while(nextTerm != rootIndex)
					{
						if(legalWordPath.contains(terms[nextTerm-1][1]))
						{
							halfPath = halfPath + " " +terms[nextTerm-1][0];
						}
						dircetChild = nextTerm-1;
						nextTerm = Integer.parseInt(terms[nextTerm-1][3]);

					}

					halfPaths[numString][0] = halfPath;
					halfPaths[numString][1] = terms[dircetChild][0];
					numString++;
				}

			}

			if(numString < 2)
				return;

			for(i=0;i< numString-1; i++)
			{
				firstHalf =  halfPaths[i][0];

				for(j=i+1; j<numString; j++)
				{

					if (halfPaths[j][1].contains(halfPaths[i][1]))
						break;

					secondHelfrev =  halfPaths[j][0].split(" ");

					secondHelf = "";

					if (firstHalf.length() > 0)

						secondHelf +=  " ";

					secondHelf += terms[rootRealIndex][0];



					for(m=secondHelfrev.length-1; m>=0; m--)
					{
						secondHelf += " ";

						secondHelf +=  secondHelfrev[m];

					}

					word.set(firstHalf + secondHelf);
					dKey.setKey(word);
					dKey.setTag(empty);
					context.write(dKey, numAppears);
					dKey.setTag(halfPaths[i][2] + "/" + halfPaths[j][2]);
					context.write(dKey, numAppears);
				}
			}




		}

	}

	public static class ReduceClass extends Reducer<TextTaggedKey ,IntWritable,TextTaggedKey ,IntWritable> {


		//	private int pSum;
		private Text retTag          = new Text();
		private int sum;
		//private int pSum;

		@Override
		public void reduce(TextTaggedKey key, Iterable<IntWritable> values, Context context) throws IOException,  InterruptedException {

			sum = 0;

			for (IntWritable value : values) {
				sum += value.get();
			}

			//if(key.getTag().compareTo(empty) == 0)
			//pSum = sum;

			//else
			//{
			//remove path that appearance number is less then DPMinCount
			//	if (conf.getInt("DPMinCount",0) > sum)
			//		return;
			retTag.set("/" + key.getTag() + "/");
			key.setTag(retTag);
			context.write(key ,new IntWritable(sum));
			//}

		}
	}

	public static class CombienrClass extends Reducer<TextTaggedKey,IntWritable,TextTaggedKey,IntWritable> {


		int sum;
		IntWritable val = new IntWritable();
		public void reduce(TextTaggedKey key, Iterable<IntWritable> values, Context context) throws IOException,  InterruptedException {

			sum = 0;
			for (IntWritable value : values) {
				sum += value.get();
			}
			val.set(sum);
			context.write(key ,val);

		}


	}

	public static class PartitionerClass extends Partitioner<TextTaggedKey,IntWritable> {
		// ensure that keys with same key are directed to the same reducer
		@Override
		public int getPartition(TextTaggedKey key,IntWritable value, int numPartitions) {

			if(key.getTag().equals(empty))
			{
				return  Math.abs(key.getKey().hashCode() + 1) % numPartitions;
			}

			return  Math.abs(key.getKey().hashCode()) % numPartitions;
		}
	}

	public static void loadFiles(Job job, String local, String inputFileNum, Configuration conf) throws IOException
	{
		//run local
		if (local.equals("1"))
		{
			FileInputFormat.addInputPath(job, new Path("/home/smadar/Downloads/quadarcs22.gz"));
			FileInputFormat.addInputPath(job, new Path("/home/smadar/Downloads/triarcs10.gz"));
			FileInputFormat.addInputPath(job, new Path("/home/smadar/Downloads/biarcs15.gz"));
		}

		//run on Amazon
		else
		{
			String s1 = "s3://dsp132/syntactic-ngram/quadarcs/quadarcs.";
			String s2 = "s3://dsp132/syntactic-ngram/triarcs/triarcs.";
			String s3 = "s3://dsp132/syntactic-ngram/quadarcs/quadarcs.";
			String finish = "-of-99";
			int size = Integer.parseInt(inputFileNum); //how many files from each bucket
			int i =0;
			if (size == 50)
			{
				i =10;
				size = 60;
			}
			else if(size == 100)
			{
				i = 60;
			}

			for(; i<size; i++)
			{
				String num = String.format("%02d", i);

				Path p1 = new Path(s1 + num + finish);
				Path p2 = new Path(s2 + num + finish);
				Path p3 = new Path(s3 + num + finish);

				FileSystem f1 = p1.getFileSystem(conf);
				FileSystem f2 = p2.getFileSystem(conf);
				FileSystem f3 = p3.getFileSystem(conf);

				try
				{
					if (f1.exists(p1)) {
						FileInputFormat.addInputPath(job, p1);
					}

					if (f2.exists(p2)) {
						FileInputFormat.addInputPath(job, p2);
					}

					if (f3.exists(p3)) {
						FileInputFormat.addInputPath(job, p3);
					}
				} catch (Exception e) {
					System.err.println();
				}

			}

		}
	}
	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		Job job = new Job(conf, "preprocessing input");
		job.setJarByClass(prjCode.PreprocessingInput.class);
		job.setMapperClass(prjCode.PreprocessingInput.MapClass.class);
		job.setPartitionerClass(prjCode.PreprocessingInput.PartitionerClass.class);
		job.setCombinerClass(prjCode.PreprocessingInput.CombienrClass.class);
		job.setReducerClass(prjCode.PreprocessingInput.ReduceClass.class);
		job.setOutputKeyClass(TextTaggedKey.class);
		job.setOutputValueClass(IntWritable.class);
		job.setInputFormatClass(SequenceFileInputFormat.class);
		loadFiles(job, args[1], args[3], conf);
		FileOutputFormat.setOutputPath(job, new Path(args[2]));
		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}

}
