package cd.itcast.spark.kudu.kudu_curd

import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.{SparkConf, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object SparkKuduDataCurd {

  def main(args: Array[String]): Unit = {
    // TODO: 1、构建SparkSession实例对象sparkConf

      // TODO: 1、构建SparkSession实例对象
      val sparkConf = new SparkConf()
        .setMaster("local[4]").setAppName(this.getClass.getSimpleName)
      // 采用建造者模式创建SparkSession实例
      val spark: SparkSession = SparkSession.builder()
        .config(sparkConf) // 设置应用配置信息
        .getOrCreate()
      // 设置日志级别
      spark.sparkContext.setLogLevel("WARN")

      // TODO: 2、构建KuduContext实例对象，用以操作Kudu中表的数据
      var kuduMaster: String = "node01:7051,node02:7051,node03:7051"
      val kuduContext: KuduContext = new KuduContext(kuduMaster, spark.sparkContext)
      val tableName = "kudu_itcast"
      println("111111111111111")

      // 插入数据
//      insertData(spark, kuduContext, tableName)

      // 查询数据
val source = selectData(spark, kuduContext, tableName)
    println(source)

      // 更新数据
      //updateData(spark, kuduContext, tableName)

      // 插入更新数据
      //upsertData(spark, kuduContext, tableName)

      // 删除数据
      //deleteData(spark, kuduContext, tableName)

      // TODO：4、应用结束，关闭资源
      spark.close()


  }
  def insertData(spark: SparkSession, kuduContext: KuduContext, tableName: String) = {
    // a. 模拟产生数据
    val value: DataFrame = spark.createDataFrame(
      Seq(
        (1001, "zhangsan", 23, "男"),
        (1002, "lisi", 22, "男"),
        (1003, "xiaohong", 24, "女"),
        (1005, "zhaoliu2", 33, "男")
      )
    ).toDF("id", "name", "age", "gender")

    // 插入数据方式一：将DataFrame的行插入Kudu表。如果行存在，则忽略插入动作。
    //    data: DataFrame, tableName: String
    kuduContext.insertRows(value, tableName)
  }

  def selectData(spark: SparkSession, kuduContext: KuduContext, tableName: String) = {
    // 指定获取的列名称
    /**
      * def kuduRDD(sc: SparkContext,
      * tableName: String,
      * columnProjection: Seq[String] = Nil): RDD[Row]
      */
    val columnProjection: Seq[String] = Seq("id", "name", "age","gender")
    val datasRDD: RDD[Row] = kuduContext.kuduRDD(
      spark.sparkContext, tableName, columnProjection
    )
    // 数据打印出来
    datasRDD.foreach { row =>
      println(s"p-${TaskContext.getPartitionId()}: id = ${row.getInt(0)}" +
        s", name = ${row.getString(1)}, age = ${row.getInt(2)}")
    }


  }
}
