package hivetohbase_scala

import org.apache.hadoop.hbase.KeyValue
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.collection.mutable.ArrayBuffer


object HiveSpark {
  def nullHandle(str: String): String = {
    if (str == null || "".equals(str)) {
      "NULL"
    } else {
      str
    }
  }

  def getColumnsInfo(dbName: String, sql: String,hiveUrl:String,sysUser:String) = {
    //连接hive
    val spark: SparkSession = HiveUtils.hiveConnect(hiveUrl,sysUser)

    println("*******************************从hive中读取数据********************************************")
    //Todo:后期要将数据库名改为传参
    spark.sql("use " + dbName)
    val dataFrame: DataFrame = spark.sql(
      """select ETL_PROC_WID,
      cast(w_insert_dt as string),
      cast(w_update_dt as string),
      order_code,
      cast(deco_project_id as string),
      sales_store_code,
      sales_store_name,
      cast(CUSTOMER_ID as string),
      CUSTOMER_CODE,
      CUSTOMER_NAME,
      cast(supplier_id as string),
      supplier_name,
      supplier_code,
      cast(MATERIAL_DESIGNER_ID as string),
      MATERIAL_DESIGNER_NAME,
      BRAND_NAME,
      cast(SETTLEMENT_STATUS as string),
      cast(CORPORATION_ID as string),
      cast(Main_Material_Order_Node as string),
      cast(pid as string),
      cast(order_date as string),
      cast(MAIN_MATERIAL_CATEGORY_ID as string),
      cast(SETTLEMENT_AMOUNT as string),
      cast(bargain_amount as string),
      report_store_name,
      sale_store_name,
      store_type_name,
      store_syb,
      MATERIAL_SALES_CATEGORY_NAME,
      GROUP_NAME,
      NAME,
      MD5(concat_ws("/",etl_proc_wid,order_code)) as rowkey
      from bol_dw_main_material_place_order limit 20""")


    println("***************************************************************************")
    dataFrame

  }

  def processHiveInfo(res:DataFrame,hBaseColumnFamily:String)={

    val dataRDD: RDD[(String, (String, String, String))] = res.rdd.flatMap(row => {
      var resArr = ArrayBuffer[(String, (String, String, String))]()
      val rowkey = row.getAs[String]("rowkey".toLowerCase)

      val iterator: Iterator[StructField] = row.schema.iterator
      while (iterator.hasNext) {
        val field: StructField = iterator.next()
        val columnFamily = hBaseColumnFamily
        val columnName = field.name
        val columnValue = nullHandle(row.getAs[String](columnName))
        resArr ++= Array((rowkey, (columnFamily, columnName, columnValue)))
      }
      resArr
    })


    //要保证行键，列族，列名的整体有序，必须先排序后处理，防止数据异常过滤rowkey
    val resRdd: RDD[(ImmutableBytesWritable, KeyValue)] =
      dataRDD.filter(x => x._1 != null).sortBy(x => (x._1, x._2._1, x._2._2)).map(x => {
        //将rdd转换成HFile需要的格式，HFile的key是ImmutableBytesWritable,
        // 那么我们定义的RDD也是要以ImmutableBytesWritable的实例为key
        //keyvalue的实例为values
        val rowKey = Bytes.toBytes(x._1)
        val family = Bytes.toBytes(x._2._1)
        val column = Bytes.toBytes(x._2._2)
        val value = Bytes.toBytes(x._2._3)
        (new ImmutableBytesWritable(rowKey), new KeyValue(rowKey, family, column, value))
      })

    resRdd


  }

  def main(args: Array[String]): Unit = {
    getColumnsInfo("test_hive", "","","")
  }

}
