package hbase

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Scan, Put, HTable}
import org.apache.hadoop.hbase.filter._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.hadoop.io.Writable
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.types._
import org.apache.spark.{SparkContext, SparkConf}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import scala.collection.JavaConverters._

/**
  * Created by root on 16-1-11.
  */
object HbasePractice {
  val tableName = "spark_hbase_test"
  val hbaseHandle = HandleHbase
  
  /** 新建一个测试表*/
  def createTestTable(): Unit ={
    hbaseHandle.createTable(tableName,"cf0","cf1","cf2")
    val table = new HTable(hbaseHandle.conf,tableName)
    try {
      val putBuffer = new ArrayBuffer[Put]()
      for(i <- 0 until 100){
        val put = new Put(Bytes.toBytes("row-"+i))
        for(cf <- 0 until 3){
          for(times <- 0 until 5){
            val qualNum = Random.nextInt(9)
            val isPut = Random.nextInt(10) > 3
            if(isPut){
              put.add(Bytes.toBytes("cf"+cf),Bytes.toBytes("qual"+qualNum),Bytes.toBytes("val"+times))
              putBuffer.append(put)
              println("put:" + "row-"+i + ",cf"+cf + ",qual"+qualNum + ",val"+times)
            }
          }
        }
      }
      import scala.collection.JavaConversions.bufferAsJavaList
      table.put(putBuffer)
    } finally {
      table.close()
    }
    println("finish...")
  }

  def testCompareFilter(): Unit ={
    val table = new HTable(hbaseHandle.conf,tableName)

    var scan = new Scan()
    scan.addColumn(Bytes.toBytes("cf0"),Bytes.toBytes("qual6"))

    println("--------------------row filter BinaryComparator -------------------------")
    val rowFilter1 = new RowFilter(CompareFilter.CompareOp.LESS_OR_EQUAL,
      new BinaryComparator(Bytes.toBytes("row-22")))
    scan.setFilter(rowFilter1)
    val rowScanner1 = table.getScanner(scan)
    for(res <- rowScanner1.iterator().asScala){
      println(res)
    }
    rowScanner1.close()

    println("--------------------row filter SubstringComparator -------------------------")
    val rowFilter2 = new RowFilter(CompareFilter.CompareOp.EQUAL,
      new SubstringComparator("-3"))
    scan.setFilter(rowFilter2)
    val rowScanner2 = table.getScanner(scan)
    for(res <- rowScanner2.iterator().asScala){
      println(res)
    }
    rowScanner2.close()


    scan = new Scan()
    scan.setStartRow(Bytes.toBytes("row-2"))
    scan.setStopRow(Bytes.toBytes("row-3"))
    println("--------------------family filter BinaryComparator -------------------------")
    val familyFilter1 = new FamilyFilter(CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(Bytes.toBytes("cf1")))
    scan.setFilter(familyFilter1)
    val familyScanner1 = table.getScanner(scan)
    for(res <- familyScanner1.iterator().asScala){
      println(res)
    }
    familyScanner1.close()

    println("--------------------qualifier filter BinaryComparator -------------------------")
    /** 不管列簇,只要有列qual1就输出*/
    val qualifierFilter1 = new QualifierFilter(CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(Bytes.toBytes("qual1")))
    scan.setFilter(qualifierFilter1)
    val qualifierScanner1 = table.getScanner(scan)
    for(res <- qualifierScanner1.iterator().asScala){
      println(res)
    }
    qualifierScanner1.close()

    println("--------------------value filter BinaryComparator -------------------------")
    val valueFilter1 = new ValueFilter(CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(Bytes.toBytes("val2")))
    scan.setFilter(valueFilter1)
    val valueScanner1 = table.getScanner(scan)
    for(res <- valueScanner1.iterator().asScala){
      println(res)
    }
    valueScanner1.close()


    println("--------------------dependent column filter BinaryComparator -------------------------")
    /** 使用cf0:qual1这一列作为参考列,输出和这一列一起修改的列*/
    val dependentFilter1 = new DependentColumnFilter(Bytes.toBytes("cf0"),Bytes.toBytes("qual1"))
    scan.setFilter(dependentFilter1)
    val dependentScanner1 = table.getScanner(scan)
    for(res <- dependentScanner1.iterator().asScala){
      println(res)
    }
    dependentScanner1.close()

  }

  def testSpecialFilter(): Unit ={
    val table = new HTable(hbaseHandle.conf,tableName)

    val scan = new Scan()
    scan.setStartRow(Bytes.toBytes("row-1"))
    scan.setStopRow(Bytes.toBytes("row-2"))
    println("--------------------1.single column value filter -------------------------")
    /** 以列簇，列，值作为判断条件,过滤剩下匹配的行*/
    val singleColumnValueFilter = new SingleColumnValueFilter(Bytes.toBytes("cf0"),
      Bytes.toBytes("qual3"),CompareFilter.CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("val2")))
    singleColumnValueFilter.setFilterIfMissing(true)
    scan.setFilter(singleColumnValueFilter)
    val singleColumnValueScanner = table.getScanner(scan)
    for(res <- singleColumnValueScanner.iterator().asScala){
      println(res)
    }
    singleColumnValueScanner.close()

    println("--------------------2.single column value exclude filter -------------------------")
    /** 以列簇，列，值作为判断条件,过滤剩下匹配的行，作为匹配条件的列不再保留*/
    val singleColumnValueExcludeFilter = new SingleColumnValueExcludeFilter(Bytes.toBytes("cf0"),
      Bytes.toBytes("qual3"),CompareFilter.CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("val2")))
    singleColumnValueExcludeFilter.setFilterIfMissing(true)
    scan.setFilter(singleColumnValueExcludeFilter)
    val singleColumnValueExcludeScanner = table.getScanner(scan)
    for(res <- singleColumnValueExcludeScanner.iterator().asScala){
      println(res)
    }
    singleColumnValueExcludeScanner.close()

    println("--------------------3.prefix filter -------------------------")
    /** 匹配行健前缀*/
    val prefixFilter = new PrefixFilter(Bytes.toBytes("row-11"))
    scan.setFilter(prefixFilter)
    val prefixScanner = table.getScanner(scan)
    for(res <- prefixScanner.iterator().asScala){
      println(res)
    }
    prefixScanner.close()
  }

  def testFilterList(): Unit ={
    val table = new HTable(hbaseHandle.conf,tableName)
    val scan = new Scan()
    scan.setStartRow(Bytes.toBytes("row-2"))
    scan.setStopRow(Bytes.toBytes("row-3"))
    val familyFilter1 = new FamilyFilter(CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(Bytes.toBytes("cf1")))
    val qualifierFilter1 = new QualifierFilter(CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(Bytes.toBytes("qual1")))
    println("------------------test MUST_PASS_ALL---------------------")
    val filterList1 = new FilterList(FilterList.Operator.MUST_PASS_ALL,familyFilter1,qualifierFilter1)
    scan.setFilter(filterList1)
    val filterScanner1 = table.getScanner(scan)
    for(res <- filterScanner1.iterator().asScala){
      println(res)
    }
    filterScanner1.close()
    println("------------------test MUST_PASS_ONE---------------------")
    val filterList2 = new FilterList(FilterList.Operator.MUST_PASS_ONE,familyFilter1,qualifierFilter1)
    scan.setFilter(filterList2)
    val filterScanner2 = table.getScanner(scan)
    for(res <- filterScanner2.iterator().asScala){
      println(res)
    }
    filterScanner2.close()

  }

  def readAsRDD(): Unit ={
    val sparkConf = new SparkConf().setAppName("read-hbase-test").setMaster("local")
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(sparkConf)
    val hbaseConf = HandleHbase.conf
    /** hbase中的scan可以在这里当做参数传入*/
    val scan = new Scan()
    scan.setStartRow(Bytes.toBytes("row-1"))
    scan.setStopRow(Bytes.toBytes("row-2"))
    def convertScanToString (scan: Scan) = {
      val proto: ClientProtos.Scan = ProtobufUtil.toScan(scan)
      Base64.encodeBytes(proto.toByteArray)
    }
    /** TableInputFormat中有若干参数可以用来过滤,可以参考看一下TableInputFormat的静态常量*/
    hbaseConf.set(org.apache.hadoop.hbase.mapreduce.TableInputFormat.SCAN,
      convertScanToString(scan))

    hbaseConf.set(org.apache.hadoop.hbase.mapreduce.TableInputFormat.INPUT_TABLE,"spark_hbase_test")
    val rdd = sc.newAPIHadoopRDD(hbaseConf,classOf[org.apache.hadoop.hbase.mapreduce.TableInputFormat],
      classOf[ImmutableBytesWritable],classOf[org.apache.hadoop.hbase.client.Result])
    rdd.map{
      x=>
        x._2.containsColumn(Bytes.toBytes("cf0"),Bytes.toBytes("qual1"))
    }.foreach(println)
  }

  def writeHbaseFromRDD(tableName:String,columnFamily:String,column:String,rdd:RDD[(String,String)]): Unit = {
    val hbaseConf = HBaseConfiguration.create()
    //新旧API都可以用,大部分Hadoop版本包含新旧两版的API
    /** hadoop 旧API写法 */
    //    val jobConf = new org.apache.hadoop.mapred.JobConf(hbaseConf)
    //    jobConf.setOutputFormat(classOf[org.apache.hadoop.hbase.mapred.TableOutputFormat])
    //    jobConf.set(org.apache.hadoop.hbase.mapred.TableOutputFormat.OUTPUT_TABLE,tableName)
    //    rdd.map{
    //      case(key,value) =>
    //        val p = new Put(Bytes.toBytes(key))
    //        p.add(Bytes.toBytes(columnFamily),Bytes.toBytes(column),Bytes.toBytes(value))
    ////        p.setWriteToWAL(false)
    //        (new ImmutableBytesWritable,p)
    //    }.saveAsHadoopDataset(jobConf)

    /** hadoop 新API写法 */
    val job = new org.apache.hadoop.mapreduce.Job(hbaseConf)
    job.setOutputFormatClass(classOf[org.apache.hadoop.hbase.mapreduce.TableOutputFormat[ImmutableBytesWritable]])
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Writable])
    job.getConfiguration.set(org.apache.hadoop.hbase.mapreduce.TableOutputFormat.OUTPUT_TABLE, tableName)
    rdd.map {
      case (key, value) =>
        val p = new Put(Bytes.toBytes(key))
        p.add(Bytes.toBytes(columnFamily), Bytes.toBytes(column), Bytes.toBytes(value))
        //        p.setWriteToWAL(false)
        (new ImmutableBytesWritable, p)
    }.saveAsNewAPIHadoopDataset(job.getConfiguration)
  }

  /** 将Hbase中的数据转化为DataFrame */
  def transHbaseRDDToDF() = {
    val sparkConf = new SparkConf().setAppName("portrait-test").setMaster("local")
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(sparkConf)
    val sqlContext = new SQLContext(sc)

    //schame
    val inQualifiersStr = "cf0:qual1;String,cf1:qual2;String"
    val inQualifiers = inQualifiersStr.split(",").map(_.split(";"))
    val schame = StructType(Array(StructField("key",StringType)) ++ inQualifiers.map{
      case Array(column,dataType) =>
        dataType match {
          case "String" => StructField(column.replace(":","_"),StringType)
          case "Double" => StructField(column.replace(":","_"),DoubleType)
          case "Int" => StructField(column.replace(":","_"),IntegerType)
          case "Long" => StructField(column.replace(":","_"),LongType)
          case "Boolean" => StructField(column.replace(":","_"),BooleanType)
          case _ => StructField(column.replace(":","_"),StringType)
        }
    })

    val hbaseConf = HandleHbase.conf
    hbaseConf.set(org.apache.hadoop.hbase.mapreduce.TableInputFormat.INPUT_TABLE,"spark_hbase_test")
    /** 将HbaseRDD转化为DataFrame的过程*/
    val rdd = sc.newAPIHadoopRDD(hbaseConf,classOf[org.apache.hadoop.hbase.mapreduce.TableInputFormat],
      classOf[ImmutableBytesWritable],classOf[org.apache.hadoop.hbase.client.Result]).map{
      x=>
        val value = Array(Bytes.toString(x._2.getRow)) ++
          inQualifiers.map{
            case Array(column,dataType) =>
            val Array(family,col) = column.split(":")
            val isContainsColumn = x._2.containsColumn(Bytes.toBytes(family),Bytes.toBytes(col))
            dataType match {
              case "String" =>
                if(isContainsColumn)
                  Bytes.toString(x._2.getValue(Bytes.toBytes(family),Bytes.toBytes(col)))
                else
                  ""
              case "Double" =>
                if(isContainsColumn)
                  Bytes.toDouble(x._2.getValue(Bytes.toBytes(family),Bytes.toBytes(col)))
                else
                  0.0
              case "Int" =>
                if(isContainsColumn)
                  Bytes.toInt(x._2.getValue(Bytes.toBytes(family),Bytes.toBytes(col)))
                else
                  0
              case "Long" =>
                if(isContainsColumn)
                  Bytes.toLong(x._2.getValue(Bytes.toBytes(family),Bytes.toBytes(col)))
                else
                  0l
              case "Boolean" =>
                if(isContainsColumn)
                  Bytes.toBoolean(x._2.getValue(Bytes.toBytes(family),Bytes.toBytes(col)))
                else
                  false
              case _ =>
                if(isContainsColumn)
                  Bytes.toString(x._2.getValue(Bytes.toBytes(family),Bytes.toBytes(col)))
                else
                  ""
            }
        }
        Row(value:_*)
    }

    sqlContext.createDataFrame(rdd,schame).registerTempTable("hbase")
    sqlContext.sql("select * from hbase where cf0_qual1 = 'val0'")
  }

  def writeHbaseFromDF(df:DataFrame) ={
    val outQualifiersStr = "cf0:qual1;String,cf1:qual2;String"
    val inQualifiers = outQualifiersStr.split(",").map(_.split(";"))
    val hbaseConf = HBaseConfiguration.create()
    val job = new org.apache.hadoop.mapreduce.Job(hbaseConf)
    job.setOutputFormatClass(classOf[org.apache.hadoop.hbase.mapreduce.TableOutputFormat[ImmutableBytesWritable]])
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Writable])
    job.getConfiguration.set(org.apache.hadoop.hbase.mapreduce.TableOutputFormat.OUTPUT_TABLE, tableName)

    df.rdd.map{
      row =>
        val p = new Put(Bytes.toBytes(row.getAs[String]("key")))
        inQualifiers.foreach{
          case Array(column,dataType) =>
            val Array(family,col) = column.split(":")
            dataType match {
              case "String" =>
                val value = row.getAs[String](family + "_" + col)
                if(value != "")
                  p.add(Bytes.toBytes(family), Bytes.toBytes(col), Bytes.toBytes(value))
              case "Double" =>
                val value = row.getAs[Double](family + "_" + col)
                if(value != 0.0)
                  p.add(Bytes.toBytes(family), Bytes.toBytes(col), Bytes.toBytes(value))
              case "Int" =>
                val value = row.getAs[Int](family + "_" + col)
                if(value != 0)
                  p.add(Bytes.toBytes(family), Bytes.toBytes(col), Bytes.toBytes(value))
              case "Long" =>
                val value = row.getAs[Long](family + "_" + col)
                if(value != 0l)
                  p.add(Bytes.toBytes(family), Bytes.toBytes(col), Bytes.toBytes(value))
              case "Boolean" =>
                val value = row.getAs[Boolean](family + "_" + col)
                if(value)
                  p.add(Bytes.toBytes(family), Bytes.toBytes(col), Bytes.toBytes(value))
              case _ =>
                val value = row.getAs[String](family + "_" + col)
                if(value != "")
                  p.add(Bytes.toBytes(family), Bytes.toBytes(col), Bytes.toBytes(value))
            }
        }
//        p.setWriteToWAL(false)
        (new ImmutableBytesWritable, p)
    }.saveAsNewAPIHadoopDataset(job.getConfiguration)
//    classOf[Bytes].getMethod("toBytes",classOf[String]).invoke(Bytes,"aa")
  }

  def main (args: Array[String]): Unit = {
//    val b = new Bytes()
//    println(Bytes.toString(classOf[Bytes].getMethod("toBytes",classOf[String]).invoke(b,"aa").asInstanceOf[Array[Byte]]))
    transHbaseRDDToDF().show()
  }
}
