package pxene.test

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat

object HBase2HBase {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
    val sparkContext = new SparkContext(sparkConf)
    val tableName = "t_prod_weixin_art"
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set("hbase.zookeeper.quorum", "dmp01,dmp02,dmp03,dmp04,dmp05")
    //设置读取表名
    hbaseConf.set(TableInputFormat.INPUT_TABLE, tableName)
    //设置读取列组
    hbaseConf.set(TableInputFormat.SCAN_COLUMNS, "info")
    //应用newAPIHadoopRDD读取HBase，返回NewHadoopRDD
    val hbaseRDD = sparkContext.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])

    //读取结果集RDD，返回一个MapPartitionsRDD
    val resRDD = hbaseRDD.map(tuple => tuple._2)
    val datas = resRDD.map(r => (Bytes.toString(r.getRow), Bytes.toString(r.getValue(Bytes.toBytes("info"), Bytes.toBytes("article_content"))))).take(100)

    //写入hbase的参数
    val job = Job.getInstance(hbaseConf)
    val jobConf = job.getConfiguration
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, "t_article_test")
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    
    sparkContext.parallelize(datas).map(convert).saveAsNewAPIHadoopDataset(jobConf)
    
  }

  def convert(triple: (String, String)) = {
    val p = new Put(Bytes.toBytes(triple._1))
    p.addColumn(Bytes.toBytes("article"), Bytes.toBytes("content"), Bytes.toBytes(triple._2))
    (new ImmutableBytesWritable, p)
  }
}