package team.bluepen.supermarket.data.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import team.bluepen.supermarket.constant.Constant;
import team.bluepen.supermarket.data.hbase.HbaseConnectHelper;
import team.bluepen.supermarket.util.Log;

import java.io.IOException;

/**
 * 从HDFS读入到HBase
 * @author Kai
 */
public class TableFileReadRunner {
    private final Path path;
    private final Configuration configuration;

    public TableFileReadRunner(Path path) throws IOException {
        this.path = path;
        configuration = HbaseConnectHelper.create().getConfiguration();
        Log.i("HDFS", path.toUri().toString());
    }

    public TableFileReadRunner(String path) throws IOException {
        this(new Path(Constant.HADOOP_URL + "/" + path));
    }

    public boolean startJob(String table, Class<? extends Mapper<?, ?, ?, ?>> clz) throws IOException, InterruptedException, ClassNotFoundException {
        Job job = Job.getInstance(configuration);

        job.setJarByClass(TableFileReadRunner.class);
        job.setMapperClass(clz);

        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        job.setMapOutputValueClass(Put.class);

        job.setOutputFormatClass(TableOutputFormat.class);
        TextInputFormat.addInputPath(job, path);
        TableMapReduceUtil.initTableReducerJob(table, TableReadReducer.class, job);
        TableMapReduceUtil.addDependencyJars(job);

        job.setNumReduceTasks(1);
        return job.waitForCompletion(true);
    }

}
