package com.zxj.hadoop.demo.mapreduce.outputformat;

import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;

/**
 * @Author 朱小杰
 * 时间 2017-08-26 .19:08
 * 说明 mapreduce写数据时，会先调用这个类的getRecordWriter()方法，拿到一个RecordWriter对象，再调这个对象的写数据方法
 */
public class MyOutputFormat<Text, LongWritable> extends FileOutputFormat<Text, LongWritable> {
    @Override
    public RecordWriter<Text, LongWritable> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
        return new MyRecordWriter<>();
    }

    /**
     * 自定义的RecordWriter
     *
     * @param <Text>
     */
    static class MyRecordWriter<Text, LongWritable> extends RecordWriter<Text, LongWritable> {
        private BufferedWriter writer;
        public MyRecordWriter() {
            try {
                writer = new BufferedWriter(new FileWriter("d:/myFileFormat"));
            } catch (Exception e) {
                e.printStackTrace();
            }
        }

        @Override
        public void write(Text key, LongWritable value) throws IOException, InterruptedException {
            writer.write(key.toString() + " " + value.toString());
            writer.newLine();
            writer.flush();
        }

        @Override
        public void close(TaskAttemptContext context) throws IOException, InterruptedException {
            writer.close();
        }
    }
}
