package com.shujia.mapredue;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class Demo3SumScore {

    public static class SumScoreMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {


            //将一行数据转换成字符串
            String line = value.toString();

            //将数据拆分
            String[] split = line.split(",");

            //学号
            String id = split[0];

            //分数
            int score = Integer.parseInt(split[2]);


            //输出到reduce端
            //医学好作为key, 同一个key进入同一个reduce
            context.write(new Text(id), new IntWritable(score));

        }
    }


    public static class SumScoreReduce extends Reducer<Text, IntWritable, Text, NullWritable> {


        /**
         * Iterable ； 迭代器，是一个可以循环迭代的对象，数据可能在内存中，也可能在磁盘上
         */
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {

            //统计学生的总分
            int sum = 0;
            for (IntWritable value : values) {
                //每一科的分数
                int score = value.get();

                sum += score;
            }

            //将数据保存得到hdfs

            String line = key.toString() + "," + sum;

            //key 是学号加上分数，  value 为空
            context.write(new Text(line), NullWritable.get());

        }
    }


    public static void main(String[] args) throws Exception {

        //hadoop 配置文件对象
        Configuration configuration = new Configuration();

        //指定切片的最大值
        configuration.set("mapreduce.input.fileinputformat.split.maxsize", "66560");

        Job job = Job.getInstance(configuration);


        job.setJobName("sumScore");

        job.setJarByClass(Demo3SumScore.class);


        job.setMapperClass(SumScoreMapper.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);


        job.setReducerClass(SumScoreReduce.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);


        FileInputFormat.addInputPath(job, new Path("/data/score"));

        FileOutputFormat.setOutputPath(job, new Path("/data/sum_score"));


        job.waitForCompletion(true);

        /**
         *
         *  hadoop jar hadoop-1.0.jar com.shujia.mapredue.Demo3SumScore
         *
         */
    }
}
