package com.shujia.mapreduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import java.io.IOException;
import java.util.ArrayList;

public class Demo2Join {

    /**
     * 读取学生表的map
     */

    public static class JoinStuMap extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            String line = value.toString();
            String[] split = line.split(",");

            String id = split[0];
            String name = split[1];
            String age = split[2];
            String gender = split[3];
            String clazz = split[4];

            Text outKey = new Text(id);

            Text outValue = new Text(name + "," + age + "," + gender + "," + clazz + "*");

            //以学号作为key输出

            context.write(outKey, outValue);
        }
    }


    /**
     * 读取分数表的map
     */

    public static class JoinScoMap extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            String line = value.toString();
            String[] split = line.split(",");

            String sid = split[0];

            String score = split[2];


            //以学号作为key   分数作为value   多个map的输出类型得一样

            /**
             * 输出输出带上一个标记
             * 在reduce通过标记来区分不同的数据
             */
            context.write(new Text(sid), new Text(score + "#"));

        }
    }


    /**
     * reduce 端
     * <p>
     * 1、计算学生总分
     * 2、关系学生信息
     */
    public static class JoinReduce extends Reducer<Text, Text, NullWritable, Text> {

        /**
         * values   有学生的的数据 和分数表的数据
         * <p>
         * 通过标记区分不同的数据
         */
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {


            //学生信息
            String stuInfo = "";

            //存储分数的集合
            ArrayList<Integer> scores = new ArrayList<>();

            for (Text value : values) {
                String line = value.toString();

                /**
                 * *结尾是学生表
                 *
                 * #结果是分数
                 *
                 */

                if (line.endsWith("*")) {
                    //去掉末尾的标记
                    stuInfo = line.substring(0, line.length() - 1);
                } else if (line.endsWith("#")) {
                    //去掉末尾的标记
                    String scoStr = line.substring(0, line.length() - 1);
                    int score = Integer.parseInt(scoStr);
                    scores.add(score);
                }

            }


            Integer sumSco = 0;
            //计算学生总分
            for (Integer score : scores) {
                sumSco = sumSco + score;
            }

            //学号
            String s_id = key.toString();

            String line = s_id + "," + stuInfo + "," + sumSco;


            //将结果输出到hdfs

            context.write(NullWritable.get(), new Text(line));


        }
    }


    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        /**
         *
         * 2、统计学生的总分  [学号，姓名，年龄，性别，班级，总分]
         */

        Configuration config = new Configuration();

        /**
         * 构建mapreduce 任务
         *
         */
        //创建job任务对象
        Job job = Job.getInstance(config);

        job.setJobName("Demo2Join");

        job.setJarByClass(Demo2Join.class);


        /**
         * 多map设置方法
         *
         */

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        //读取学生表数据
        Path stuPath = new Path("/data/student");
        MultipleInputs.addInputPath(job, stuPath, TextInputFormat.class, JoinStuMap.class);

        //读取分数表的数据
        Path scoPath = new Path("/data/score");
        MultipleInputs.addInputPath(job, scoPath, TextInputFormat.class, JoinScoMap.class);


        //设置reduce
        job.setReducerClass(JoinReduce.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);

        //指定输出路径
        Path outPath = new Path("/data/out_join");
        TextOutputFormat.setOutputPath(job, outPath);

        job.waitForCompletion(true);


    }
}
