package mp;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.WritableComparable;

public class SecondarySort {

	/**
	 * Hadoop的key和value的传递序列化需要涉及两个重要的接口Writable和WritableComparable
	 * Writable接口中的两个方法write(DataOutput out)和readFields(DataInput in)
	 * WritableComparable接口中多了一个Comparable(T)是先继承了以上的Writable接口
	 * 也就是比Writable多了一个compareTo方法，这个的用途是是为了确定是不是相同的key,因此得出如下结论：
	 * hadoop为Key的数据类型必须实现WritableComparable，而Value的数据类型只需要实现Writable即可，
	 * 能做Key的一定可以做Value，能做Value的未必能做Key.
	 */
	public static class IntPair implements WritableComparable<IntPair>{
		private int first = 0;
		private int second = 0;
		
		public void set(int left, int right) {
			first = left;
			second = right;
		}

		public int getFirst() {
			return first;
		}

		public int getSecond() {
			return second;
		}

		@Override
		public void write(DataOutput out) throws IOException {
			out.writeInt(first - Integer.MIN_VALUE); 
			out.writeInt(second - Integer.MIN_VALUE); 
		}

		/**
		 * Read two Integers
		 * 
		 */
		@Override
		public void readFields(DataInput in) throws IOException {
			first = in.readInt() + Integer.MIN_VALUE;
			second = in.readInt() + Integer.MIN_VALUE;
		}
		
		

		@Override
		public int hashCode() {
			return first * 157 + second;
		}

		@Override
		public boolean equals(Object right) {
			if(right instanceof IntPair){
				IntPair r = (IntPair)right;
				return r.first == first && r.second == second;
			}else{
				return false;	
			}
			
		}

		@Override
		public int compareTo(IntPair o) {
			// TODO Auto-generated method stub
			return 0;
		}
		
	}
	
	
	
	public static void main(String[] args) {
		// TODO Auto-generated method stub

	}

}
