package mapreduce.phase3;

import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;
import java.util.TreeSet;

import mapreduce.ColumnGrp;
import mapreduce.Point;
import mapreduce.PointComparator;
import mapreduce.UnionFind;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;

/**
 * Recalculate vertices in specific columns by the help of new picture gained  from 
 * part 2
 */

@SuppressWarnings("deprecation")
public class Reduce3 extends MapReduceBase implements
Reducer<IntWritable, Text, IntWritable, Text> {

	public void reduce(IntWritable key, Iterator<Text> values,
			OutputCollector<IntWritable, Text> output, Reporter reporter)
	throws IOException {

		try {
			if(key.get() > 0)
			{
				TreeSet<Point> points = new TreeSet<Point>(new PointComparator());
				UnionFind uf = new UnionFind();
				while (values.hasNext()) {
					String line = values.next().toString();
					StringTokenizer tokenizer = new StringTokenizer(line);
					assert (tokenizer.hasMoreTokens());
					Point p1 = Point.fromString(tokenizer.nextToken());
					assert (tokenizer.hasMoreTokens());
					Point p2 = Point.fromString(tokenizer.nextToken());
						
					uf.makeSet(p1);
					uf.makeSet(p2);
					uf.union(p1, p2);
					points.add(p1);
				}
				
				for (Point point : points) {
					if(ColumnGrp.inActualBoundary(point, key.get())) {
						Text t1 = new Text(point.toString() + " " + uf.find(point).toString());
						output.collect(key, t1);
					}
				}
			}
			else
			{
				int sum = 0;
				while (values.hasNext()) {
					sum += Integer.parseInt(values.next().toString());
				}
				Text t = new Text(sum+"");
				output.collect(key, t);
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}