
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

public class PartitionerOwn extends Partitioner<Text,NullWritable> {
    /**
     * 这个方法就是我们自己来定义如何分区
     * @param text  key2那一行数据
     * @param nullWritable  v2
     * @param i  reduceTask的数量
     * @return
     */
    @Override
    public int getPartition(Text text, NullWritable nullWritable, int i) {
        String[] split = text.toString().split("\t");
        String gameResult = split[6];
        int a = 5;
            if(Integer.parseInt(gameResult) <=10000){
                a = 0;
            }else if (Integer.parseInt(gameResult) >= 10000&&Integer.parseInt(gameResult)<20000){

                a = 1;
            }
            else if (Integer.parseInt(gameResult) >= 20000&&Integer.parseInt(gameResult)<30000){

                a = 2;
            }else if (Integer.parseInt(gameResult) >= 30000&&Integer.parseInt(gameResult)<40000){

                a = 3;
            }else if (Integer.parseInt(gameResult) >= 40000){

                a = 4;
            }
           return a;
    }
}