package com.shujia.reduce;

import java.io.*;
import java.util.HashMap;

/*
    编写reduce逻辑将map阶段的中间结果值进行聚合处理，mapreduce中默认只会提供一个reduce
 */
public class ReduceDemo {
    public static void main(String[] args) throws Exception{
        //获取所有的map结果文件
        File file = new File("hadoop/data/maps");
        File[] files = file.listFiles();
        BufferedReader br = null;

        //创建一个HashMap集合存储数据
        HashMap<String, Integer> map = new HashMap<>();

        //断言
//        assert files!=null;
        if(files!=null){
            for (File file1 : files) {
                br = new BufferedReader(new FileReader(file1));
                String line = null;
                while ((line=br.readLine())!=null){
                    String[] strings = line.split(" ");
                    String clazz = strings[0];
                    Integer number = Integer.valueOf(strings[1]);
                    if(!map.containsKey(clazz)){
                        map.put(clazz,number);
                    }else {
                        map.put(clazz,map.get(clazz)+number);
                    }
                }
            }
        }

        //创建Reduce输出流对象
        BufferedWriter bw = new BufferedWriter(new FileWriter("hadoop/data/reduce/part-r-00000"));

        //将集合的最终结果写入到reduce结果文件中
        map.forEach((k,v)->{
            try {
                bw.write(k+" "+v);
                bw.newLine();
                bw.flush();
            } catch (IOException e) {
                e.printStackTrace();
            }
        });

        if(br!=null){
            br.close();
        }
        bw.close();


    }
}
