package org.spark.api.utils

import java.io.IOException

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{IdentityTableMapper, TableInputFormat, TableMapReduceUtil}
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.{InputFormat, Job}
import org.apache.spark.SparkContext
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.RDD

object SparkContextUtil {

  @throws[IOException]
  def hbaseRDD(sc: SparkContext,
               config: Configuration,
               tableName: String,
               scan: Scan): RDD[Result] = {
    hbaseRDD(sc, config, TableName.valueOf(tableName), scan, classOf[TableInputFormat])
  }

  @throws[IOException]
  def hbaseRDD(sc: SparkContext,
               config: Configuration,
               tableName: TableName,
               scan: Scan): RDD[Result] = {
    hbaseRDD(sc, config, tableName, scan, classOf[TableInputFormat])
  }


  @throws[IOException]
  def hbaseRDD(sc: SparkContext,
               config: Configuration,
               tableName: TableName,
               scan: Scan,
               inputFormatClass: Class[_ <: InputFormat[ImmutableBytesWritable, Result]]): RDD[Result] = {
    val job = Job.getInstance(config)
    TableMapReduceUtil.initTableMapperJob(tableName, scan, classOf[IdentityTableMapper], null, null, job)
    val jobConfig = new JobConf(job.getConfiguration)
    SparkHadoopUtil.get.addCredentials(jobConfig)
    sc.newAPIHadoopRDD(jobConfig, inputFormatClass, classOf[ImmutableBytesWritable], classOf[Result]).values
  }
}
