package com.datamining.hbase

import org.apache.hadoop.hbase.client.{ConnectionFactory, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants, TableName}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

import scala.collection.mutable
import scala.collection.mutable.ListBuffer


/**
  * Created by Administrator on 2017/5/23.
  */
/**
  * datamining
  * com.datamining
  * HbaseTest
  *
  * @author Administrator kevin
  */
object HbaseReadTest {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf();
    sparkConf.setMaster("local[4]");
    sparkConf.setAppName("HbaseTest");

    val tableName = TableName.valueOf("student")

    val hbaseConfig = HBaseConfiguration.create()
    hbaseConfig.set(HConstants.ZOOKEEPER_QUORUM, "hadoop01,hadoop02,hadoop03")
    hbaseConfig.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181")
    hbaseConfig.set(TableInputFormat.INPUT_TABLE, "student")

    val hbaseConnection = ConnectionFactory.createConnection(hbaseConfig)

    val sparkSession = SparkSession.builder().appName("HbaseReadTest").config(conf = sparkConf).getOrCreate();
    val sparkContext: SparkContext = sparkSession.sparkContext


    // ① 获取spark rdd
    val hbaseRdd = sparkSession.sparkContext.newAPIHadoopRDD(hbaseConfig, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])

    //    // ② 获取spark rdd, 提示找不到org.apache.spark.Logging, 可是2.1中为org.apache.spark.internal.Logging
    //    val hbaseContext = new HBaseContext(sparkContext, hbaseConfig)
    //
    //    val scan = new Scan();
    //
    //    //    scan.setRowPrefixFilter(Bytes.toBytes("uid"))
    //
    //    //    scan.setRowPrefixFilter()
    //    val hbaseRdd = hbaseContext.hbaseRDD(tableName, scan)
    //
    //    hbaseRdd.map { case (tuple_1, tuple_2) => {
    //
    //    }
    //    }


    // #######################
    val asd = hbaseRdd.map(r => {
      (
        Bytes.toString(r._2.getValue(Bytes.toBytes("info"), Bytes.toBytes("age"))),
        Bytes.toString(r._2.getValue(Bytes.toBytes("info"), Bytes.toBytes("name")))
        )
    }).take(10).foreach(println)

    // #######################
    val rowkeyRdd = hbaseRdd.map(tuple => tuple._1).map(item => Bytes.toString(item.get()))
    rowkeyRdd.take(10).foreach(println)

    println("########################################################")

    // #######################
    hbaseRdd.map(tuple => tuple._2).map(result => {
      (Bytes.toString(result.getRow()), Bytes.toString(result.getValue("info".getBytes, "name".getBytes())), Bytes.toString(result.getValue("info".getBytes, "age".getBytes())), Bytes.toString(result.getValue("info".getBytes, "gender".getBytes())))
      //      result.getFamilyMap(Bytes.toBytes("info"))
    }).take(10).foreach(println)

    println("======================================================")
    val outputRdd = hbaseRdd.map(tuple => tuple._2).map(result => {
      val resultMap = result.getFamilyMap("info".getBytes)

      //      var list: ListBuffer[Seq[String]] = ListBuffer()
      //      var map: Map[String, String] = Map()
      val map = new mutable.HashMap[String, String]()
      val keySet = resultMap.keySet()
      val iterator = keySet.iterator()
      while (iterator.hasNext) {
        val key = iterator.next()
        val value = resultMap.get(key)
        //              list += Seq(Bytes.toString(key), Bytes.toString(value))
        map += (Bytes.toString(key) -> Bytes.toString(value))
      }
      (Bytes.toString(result.getRow()), map)
    })

    //    (1,Xueqian,23,F)
    //    (2,Weiliang,24,M)
    //    (3,testAdd3,100,null)
    //    (4,testAdd4,100,null)

    //    outputRdd.saveAsTextFile("file://home/hadoop/hbaseTest.txt")
    sparkSession.close()
  }
}
