package org.wyz.mapreduce.join;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;


public class JoinReducer extends Reducer<Text, Text, Text, Text> {
    private static final String TABLE_1 = "user.csv";
    private static final String TABLE_2 = "user_exam.csv";

    @Override
    protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        // 在这里进行join操作
        // 暴力双重循环即可

        // 懒得反复操作，直接先把Iterable里面的数据根据来演进行划分
        List<String> table1 = new ArrayList<>();
        List<String> table2 = new ArrayList<>();

        for (Text v : values) {
            String[] rowAndTable = v.toString().split("\t");
            if (rowAndTable[1].contains(TABLE_1)) {
                table1.add(rowAndTable[0]);
            } else if (rowAndTable[1].contains(TABLE_2)) {
                table2.add(rowAndTable[0]);
            } else {
                System.exit(1);
            }
        }

        for (String s1 : table1) {
            for (String s2 : table2) {
                context.write(key, new Text(s1 + "," + s2));
            }
        }
    }
}
