package com.kuduTest

import org.apache.kudu.client.CreateTableOptions
import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

object KuduSparkDemo {

  case class Person(name: String, age: Int, sex: String)

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .master("local[6]")
      .appName("kudu")
      .getOrCreate()
    val KUDU_MASTER = "192.168.100.1,192.168.100.2,192.168.100.3"
    val kuduContext: KuduContext = new KuduContext(KUDU_MASTER, spark.sparkContext)
    //创建表
    val tableName: String = "students"
    ddl(kuduContext, tableName);

  }


  /**
    * 创建kudu表
    *
    * @param kuduContext
    */
  def ddl(kuduContext: KuduContext, tableName: String): Unit = {
    if (kuduContext.tableExists(tableName)) {
      kuduContext.deleteTable(tableName)
    }

    val schema = StructType(
      StructField("name", StringType, nullable = false) ::
        StructField("age", IntegerType, nullable = false) ::
        StructField("sex", StringType, nullable = false) :: Nil
    )

    val keys: Seq[String] = Seq("name")

    import scala.collection.JavaConverters._
    val options: CreateTableOptions = new CreateTableOptions()
      .setRangePartitionColumns(List("name").asJava)
      .setNumReplicas(1)


    kuduContext.createTable(
      tableName,
      schema,
      keys,
      options
    )
  }


  def crud(kuduContext: KuduContext, tablename: String, spark: SparkSession): Unit = {
    import spark.implicits._
    val df: DataFrame = Seq(Person("zhangsan", 15, "nan "), Person("lisi", 25, "nv")).toDF()
    kuduContext.insertRows(df, tablename)
    kuduContext.deleteRows(df.select($"name"), tablename)
    kuduContext.upsertRows(df, tablename)
    kuduContext.updateRows(df, tablename)
  }

}
