package reduce;

import java.io.IOException;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import plagiarismChecker.MapReduceSkeleton;

public class ReduceJob3 extends Reducer<Text, Text, Text, Text> {

	@Override
	protected void reduce(Text key, Iterable<Text> values, Context context)
			throws IOException, InterruptedException {

		String[] comparableFileString = null;
		List<String> valuesList = new ArrayList<String>();
		for (Text value : values) {
			String valueString = value.toString();
			valuesList.add(valueString);
			String[] valueSplit = valueString.split(MapReduceSkeleton.separator);
			if (valueSplit[0].equals(MapReduceSkeleton.comparableFile)) {
				comparableFileString = valueSplit;
			}
		}
		if (comparableFileString == null)
			return;

		for (String value : valuesList) {
			String[] comparingFileString = value.split(MapReduceSkeleton.separator);
			if (!comparingFileString[0].equals(MapReduceSkeleton.comparableFile)) {
				
				int xy = Integer.parseInt(comparingFileString[1]) * Integer.parseInt(comparableFileString[1]);
				
				BigDecimal euclideanNorm = new BigDecimal(comparingFileString[2]).multiply(new BigDecimal(comparableFileString[2]));
				
				context.write(
						new Text(comparableFileString[0]
								+ MapReduceSkeleton.separator
								+ comparingFileString[0]), new Text(xy
								+ MapReduceSkeleton.separator + euclideanNorm));
			}
		}
	}
}