package com.zyh.mapreduce;

import com.zyh.WordCountJob;
import com.zyh.entity.MyIntWritable;
import com.zyh.entity.Student;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;

public class StudentScoreJob extends Configured implements Tool {

    public static void main(String[] args) throws Exception{
        ToolRunner.run(new StudentScoreJob(),args);
    }

    @Override
    public int run(String[] strings) throws Exception {
        /**
         * 组装job任务
         */
        //String input = "/mr/demo1/user-pay-2021-9-9.log";
        String input = "/mr/demo1/student.txt";
        String output = "/mr/out/student/";

        //1 初始化配置
        Configuration conf = new Configuration();//自动加载classpath下的配置文件。
        conf.set("fs.defaultFS", "hdfs://192.168.193.10:9000");

        //2 创建job
        Job job = Job.getInstance(conf);
        job.setJarByClass(WordCountJob.class);

        //3 设置输入
        FileInputFormat.setInputPaths(job, new Path(input));

        //4 设置mapper
        job.setMapperClass(StudentMapper.class);
        job.setMapOutputKeyClass(MyIntWritable.class);
        job.setMapOutputValueClass(Student.class);
        //job.setCombinerClass(WordCountReducer.class);//在map阶段提前执行reduce方法
        //5 设置reducer
        job.setReducerClass(StudentReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setNumReduceTasks(3);//将reduce数量设置为2
        job.setPartitionerClass(SubjectPartition.class);//指定分区规则

        //6 设置输出路径(输出路径不能存在)
        FileOutputFormat.setOutputPath(job, new Path(output));
        //如果输出路径存在则删除。
        if (FileSystem.get(conf).exists(new Path(output))) {
            FileSystem.get(conf).delete(new Path(output), true);
        }


        //7 启动job
        boolean b = job.waitForCompletion(true);
        return b == true ? 0 : 1;// 成功返回0 ，失败返回1
    }

    private static class SubjectPartition extends Partitioner<MyIntWritable,Student>{

        /**
         * 分区方法
         * @param myIntWritable 分区时候数据的key
         * @param student 分区时候数据的value
         * @param i reduceTask的数量
         * @return
         */
        @Override
        public int getPartition(MyIntWritable myIntWritable, Student student, int i) {
            String subject = student.getSubject();
            if("语文".equals(subject)){
                return 0;
            }
            if("数学".equals(subject)){
                return 1;
            }
            return 2;
        }
    }
}
class StudentMapper extends Mapper<LongWritable,Text,MyIntWritable,Student>{
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] s = value.toString().split("  ");
        context.write(new MyIntWritable(Integer.parseInt(s[2])),new Student(s[0],s[1]));
    }
}
class StudentReducer extends Reducer<MyIntWritable,Student,Text,IntWritable>{
    @Override
    protected void reduce(MyIntWritable key, Iterable<Student> values, Context context) throws IOException, InterruptedException {
        for (Student student : values) {
            context.write(new Text(student.getName()+" "+student.getSubject()),new IntWritable(key.getValue()));
        }
    }
}