package cn.itcast.up.model.tools

import cn.itcast.up.model.bean.HBaseMeta
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.sources.{BaseRelation, InsertableRelation}
import org.apache.spark.sql.types.StructType

/**
  * 将数据写入Hbase的Relation
  */
class WritableRelation(context: SQLContext, meta: HBaseMeta, frame: DataFrame) extends BaseRelation with InsertableRelation with Serializable {
  /**
    * 获取一个sqlcontext对象
    *
    * @return
    */
  override def sqlContext: SQLContext = context

  /**
    * 数据的元数据信息
    *
    * @return
    */
  override def schema: StructType = frame.schema

  /**
    * 执行数据插入的逻辑
    *
    * @param data
    * @param overwrite
    */
  override def insert(data: DataFrame, overwrite: Boolean): Unit = {

    val config = new Configuration()
    config.set("hbase.zookeeper.property.clientPort", meta.zkPort)
    config.set("hbase.zookeeper.quorum", meta.zkHosts)
    config.set("zookeeper.znode.parent", "/hbase-unsecure")
    config.set("mapreduce.output.fileoutputformat.outputdir", "/test01")
    config.set(TableOutputFormat.OUTPUT_TABLE, meta.hbaseTable)
    //    data.rdd.map(line => ("","")).saveAsNewAPIHadoopDataset()
    val job = Job.getInstance(config)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    //将本次传入的DataFrame中的数据存入HBase
    data.rdd.map(row => {

      val rowkey: String = row.getAs[String]("userid")
      val tagId: String = row.getAs[String]("tagIds")

      //用当前的用户ID作为Rowkey
      val put = new Put(rowkey.getBytes)
      //向put中添加各个列信息
      put.addColumn(meta.family.getBytes, "userid".getBytes, rowkey.getBytes)
      put.addColumn(meta.family.getBytes, "tagIds".getBytes, tagId.getBytes)
      (new ImmutableBytesWritable, put)
    })
      .saveAsNewAPIHadoopDataset(job.getConfiguration)
  }
}
