package kudu

import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.junit.Test

/**
 * DataFrame API 操作 kudu
 */
class DFOpKudu {

  @Test
  def query(): Unit = {
    // 1、创建KuduContext 和SparkSession
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("sparkOpKudu")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()

    // 2、创建kuduContext
    val KUDU_MASTER = "s202:7051,s203:7051,s204:7051"
    val tableName = "sparkopKudu"

    //定义 map 集合，封装 kudu 的 master 地址和要读取的表名
    val options = Map(
      "kudu.master" -> KUDU_MASTER,
      "kudu.table" -> tableName
    )

    val df = spark.read.options(options)
      .format("kudu").load
    df.show()
  }

  @Test
  def DFWrite2Kudu(): Unit = {
    // 1、创建KuduContext 和SparkSession
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("sparkOpKudu")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()

    // 2、创建kuduContext
    val KUDU_MASTER = "s202:7051,s203:7051,s204:7051"
    val tableName = "sparkopKudu"

    //定义 map 集合，封装 kudu 的 master 地址和要读取的表名
    val options = Map(
      "kudu.master" -> KUDU_MASTER,
      "kudu.table" -> tableName
    )

    import spark.implicits._
    val data = List(Person(7, "qiqi", 30), Person(8, "xiaoba", 40))
    //导包
    import org.apache.kudu.spark.kudu._
    val df = data.toDF

    //把 dataFrame 结果写入到 kudu 表中 ,目前只支持 append 追加
    df.write.options(options).mode("append").kudu


    //加载表的数据，导包调用 kudu 方法，转换为 dataFrame，最后在使用 show 方法显示结果
    spark.read.options(options).kudu.show()
  }

  @Test
  def sparksql2Kudu(): Unit = {
    // 1、创建KuduContext 和SparkSession
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("sparkOpKudu")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()

    // 2、创建kuduContext
    val KUDU_MASTER = "s202:7051,s203:7051,s204:7051"
    val tableName = "sparkopKudu"

    //定义 map 集合，封装 kudu 的 master 地址和要读取的表名
    val options = Map(
      "kudu.master" -> KUDU_MASTER,
      "kudu.table" -> tableName
    )

    val data = List(Person(10, "小张", 30), Person(11, "小王", 40))
    import spark.implicits._
    val df: DataFrame = spark.sparkContext.parallelize(data).toDF

    //把 dataFrame 注册成一张表
    df.createTempView("temp1")
    //获取 kudu 表中的数据，然后注册成一张表
    import org.apache.kudu.spark.kudu._
    spark.read.options(options).kudu.createTempView("temp2")
    //使用 sparkSQL 的 insert 操作插入数据
    spark.sql("insert into table temp2 select * from temp1")
    spark.sql("select * from temp2 where age >30").show()


    //加载表的数据，导包调用 kudu 方法，转换为 dataFrame，最后在使用 show 方法显示结果
    spark.read.options(options).kudu.show()
  }

  @Test
  def kuduNativeRDD (): Unit = {
    // 1、创建KuduContext 和SparkSession
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("sparkOpKudu")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()

    // 2、创建kuduContext
    val KUDU_MASTER = "s202:7051,s203:7051,s204:7051"
    val kuduContext = new KuduContext(KUDU_MASTER, spark.sparkContext)

    //使用 kuduContext 对象调用 kuduRDD 方法，需要 sparkContext 对象，表名，想要的字段名称
    val kuduRDD: RDD[Row] = kuduContext.kuduRDD(spark.sparkContext,"sparkopKudu",Seq("name","age"))
    //操作该 rdd 打印输出
    val result: RDD[(String, Int)] = kuduRDD.map {
      case Row(name: String, age: Int) => (name, age)
    }
    result.foreach(println)
  }

}

case class Person(id: Long, name: String, age: Int)
