package cn.itcast.model.utils

import cn.itcast.model.bean.HBaseMeta
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.sources.{BaseRelation, InsertableRelation}
import org.apache.spark.sql.types.StructType

/**
 * Hbase数据写入的具体实现类
 */
class HBaseWritableRelation(context: SQLContext, meta: HBaseMeta, frame: DataFrame) extends BaseRelation with InsertableRelation with Serializable {
  //定义一个SQLContext
  override def sqlContext: SQLContext = context
  // 指定列的元数据信息
  override def schema: StructType = frame.schema

  /**
   * 实现数据插入的最终执行的地方
   * @param data
   * @param overwrite
   */
  override def insert(data: DataFrame, overwrite: Boolean): Unit = {
//    data.rdd.map(row => ("", "")).saveAsNewAPIHadoopDataset(conf)
    val config = new Configuration()
    config.set("hbase.zookeeper.property.clientPort", meta.zkPort)
    config.set("hbase.zookeeper.quorum", meta.zkHosts)
    config.set("zookeeper.znode.parent", "/hbase-unsecure")
    config.set("mapreduce.output.fileoutputformat.outputdir", "/test01")
    config.set(TableOutputFormat.OUTPUT_TABLE, meta.hbaseTable)

    val job = Job.getInstance(config)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])

    // 保存数据到HBase
    // 先将DataFrame=>RDD
    data.rdd
      .map(row => {
        //+------+------+
        //|userId|tagIds|
        //+------+------+
        //|     1|    89|
        //|    10|    89|
        // 将row中的数据取出来,存入HBase
        // 用用户ID作为Rowkey
        val rowKey: String = row.getAs[String]("userId")
        val put = new Put(rowKey.getBytes)
        meta.selectFields.split(",")
          .map(fieldName => {
            // 根据列名获取当前列的值
            val columnValue: String = row.getAs[String](fieldName)
            // 向put中添加一列信息
            put.addColumn(meta.family.getBytes, fieldName.getBytes, columnValue.getBytes)
          })
        // 返回一个元组(ImmutableBytesWritable, put)
        (new ImmutableBytesWritable(rowKey.getBytes()), put)
      })
      .saveAsNewAPIHadoopDataset(job.getConfiguration)
  }
}
