import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.ArrayList;

public class Demo6 {

    public static class map extends Mapper<LongWritable,Text,Text,Text>{
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            //context获取切片，上面是hdfs就从hdfs，下面是reduce
            //获取路径InputSplit
            InputSplit is = context.getInputSplit(); //InputSplit获取切片，然后从hdfs中获取文件名或者路径
            FileSplit fileSplit= (FileSplit) is; //InputSplit是抽象类，不能使用自己的方法，所以用FileSplit来实现
            String s = fileSplit.getPath().toString(); //获取切片的文件路径，是path不是name

            if (s.contains("students")){
                //打上标签
                String s1 = "*"+value.toString();
                String id = value.toString().split(",")[0];
                context.write(new Text(id),new Text(s1));
            }else {
                String s1 = "#"+value.toString();
                String id = value.toString().split(",")[0];
                context.write(new Text(id),new Text(s1));
            }

        }
    }

    public static class reduce extends Reducer<Text,Text,Text,NullWritable> {
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            //此时进行了一个reducetask任务，key是学号，而values是相同key所对应的所有的数据，包括学生信息和分数信息，
            //此时里面有七个，六个是score信息，对其进行集合存储

            String st="";
            ArrayList<String> sc = new ArrayList<String>();
            //分数弄成一个集合是因为一个学生对应六个分数，可以通过对集合的遍历将六个成绩逐一算到学生中去

            for (Text value : values) {
                String s = value.toString();
                if (s.startsWith("*")){
                     st = s.substring(1); //此时注意s是包含标签的，记得索引0是标签
                }else {
                    sc.add(s.substring(1));
                }
            }

            //两张表进行拼接
//            for (String s : sc) {
//                String s1 = s.split(",")[2];
//                String end=st+","+s1;
//                context.write(new Text(end),NullWritable.get());
//            }

            int sum=0;
            for (String s : sc) {
                Integer i = Integer.valueOf(s.split(",")[2]);
                sum+=i;
            }
            String end=st+","+sum;
            context.write(new Text(end),NullWritable.get());
        }
    }


    public static void main(String[] args) throws Exception{
        Job job = Job.getInstance();
        job.setJobName("两个文件进行拼接");
        job.setJarByClass(Demo6.class);

        job.setMapperClass(map.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(reduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);

        Path in = new Path("/datajava");
        FileInputFormat.addInputPath(job,in);

        Path out = new Path("/output6");
        FileSystem fs = FileSystem.get(new Configuration());
        if (fs.exists(out)){
            fs.delete(out,true);
        }
        FileOutputFormat.setOutputPath(job,out);

        job.waitForCompletion(true);
        System.out.println("可以了第六次");

    }
}
