package hadoop.mr09_JOIN;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

// 以姓名为key
public class JoinMapper extends Mapper<LongWritable, Text,Text,Text> {
    Text out_key = new Text();
    Text out_value = new Text();
    StringBuilder sb = new StringBuilder();
    String fileName = null;

    // mapTask初始化方法，获取当前处理的切片文件名称
    @Override
    protected void setup(Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        // 获取当前处理的切片
        FileSplit split = (FileSplit)context.getInputSplit();
        // 获取切片的文件名
        fileName = split.getPath().getName();
        System.out.println("正在处理的文件： "+fileName);
    }
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        // 晴空字符容器
        sb.setLength(0);
        String value_ = value.toString();
        String[] split = value_.split(",");
        /*
        文件1：2305151131,李美玲,1,68,80,79,60,87,94,94,64
        文件2：张阳璐,女,699
        */
        if(fileName.contains("chengji.csv")){ // 详细成绩
            if(split[0].contains("学号")==false){
                out_key.set(split[1]);
                StringBuilder append = sb.append(value_);
                out_value.set(sb.insert(0,"ALL#").toString());
                context.write(out_key,out_value);
            }
        }else { // 总成绩
            out_key.set(split[0]);
            StringBuilder append = sb.append(split[1]).append(",").append(split[2]);
            out_value.set(sb.insert(0,"SUM#").toString());
            context.write(out_key,out_value);
        }
    }
}
