package com.ivyft.hive.hadoop;

import com.google.protobuf.GeneratedMessage;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

/**
 * <pre>
 *
 * Created by IntelliJ IDEA.
 * User: zhenqin
 * Date: 15/4/8
 * Time: 14:40
 * To change this template use File | Settings | File Templates.
 *
 * </pre>
 *
 * @author zhenqin
 */
public class IntLengthHeaderInputFormat extends FileInputFormat<LongWritable, BytesWritable> {




    public final static String PROTOBUF_CLASS = "hive.protobuf.class";




    public static void setInputObjectClass(Configuration conf, Class<? extends GeneratedMessage> clazz) {
        conf.set(PROTOBUF_CLASS, clazz.getName());
    }



    @Override
    public RecordReader<LongWritable, BytesWritable>
    createRecordReader(InputSplit split,
                       TaskAttemptContext context) {
        return new IntLengthHeaderRecordReader();
    }

    @Override
    protected boolean isSplitable(JobContext context, Path file) {
        return false;
    }

}
