package com.shujia.MapReduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class Demo06MySort {
    // 读取sumScore总分数据 做排序 并输出
    // Map 端
    public static class MyMapper extends Mapper<LongWritable, Text, KeySort, NullWritable> {
        @Override
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, KeySort, NullWritable>.Context context) throws IOException, InterruptedException {
            String[] splits = value.toString().split("\t");
            String id = splits[0];
            int sumScore = Integer.parseInt(splits[1]);
            KeySort keySort = new KeySort(id, sumScore);
            // 因为不需要做任何计算，所以不需要Reduce
            context.write(keySort, NullWritable.get());
        }
    }

    // Driver端
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://master:9000");
        // 创建一个MapReduce的Job
        Job job = Job.getInstance(conf);
        // 配置任务
        // 设置任务运行的名字
        job.setJobName("Demo06MySort");
        // 设置任务运行哪一个类
        job.setJarByClass(Demo06MySort.class);


        // 配置Map端
        // 指定Map任务运行哪一个类
        job.setMapperClass(MyMapper.class);
        // 配置Map端输出的Key的类型
        job.setMapOutputKeyClass(KeySort.class);
        // 配置Map端输出的Value的类型
        job.setMapOutputValueClass(NullWritable.class);

        // 如果没有Reduce任务 可以设为0 否则会默认启动一个Reduce任务
        // 虽然不需要Reduce任务做聚合操作，但是如果没有Reduce任务就不会产生shuffle
        // 没有shuffle就没有排序
        //        job.setNumReduceTasks(0);

        // 配置输入输出路径
        FileInputFormat.addInputPath(job, new Path("/data/sumScore/output"));

        Path path = new Path("/data/mySort/output");
        FileSystem fs = FileSystem.get(conf);
        // 判断输出路径是否存在，存在即删除
        if (fs.exists(path)) {
            fs.delete(path, true);
        }
        // 输出路径已存在会报错
        FileOutputFormat.setOutputPath(job, path);


        // 等待任务运行完成
        job.waitForCompletion(true);
    }
    /*
        hadoop jar Hadoop-1.0.jar com.shujia.MapReduce.Demo06MySort
     */


}

// 自定义排序类
class KeySort implements WritableComparable<KeySort> {
    String id;
    int sumScore;

    // 必须加上无参构造方法，否则运行时会报错
    public KeySort() {
    }

    public KeySort(String id, int sumScore) {
        this.id = id;
        this.sumScore = sumScore;
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        id = in.readUTF();
        sumScore = in.readInt();
    }

    // 自定义排序规则
    @Override
    public int compareTo(KeySort o) {
        // 先按照总分降序 总分相同时按id升序
        int i = this.sumScore - o.sumScore;
        if (i < 0) {
            return 1;
        } else if (i > 0) {
            return -1;
        } else {
            // 当分数相等时
            return this.id.compareTo(o.id);
        }
    }

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeUTF(id);
        out.writeInt(sumScore);
    }

    @Override
    public String toString() {
        return id + "," + sumScore;
    }
}
