import org.apache.kudu.client.CreateTableOptions
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

/**
  * @title: DataFrameKudu
  * @projectName kuduMaster
  * @description: TODO
  * @author Administrator
  * @date 2019/11/12 16:27
  */
case class People(id: Int, name: String, age: Int)

object DataFrameKudu {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder().master("local[2]").appName("DataFrameKudu").getOrCreate()
    val sc: SparkContext = sparkSession.sparkContext
    sc.setLogLevel("WARN")
    //指定kudu的master地址
    val kuduMaster = "node01:7051,node02:7051,node03:7051"
    val kuduContext = new KuduContext(kuduMaster, sc)
    //定义表名
    val tableName = "people"
    createTable(kuduContext, tableName)
    insertData2table(sparkSession, sc, kuduContext, tableName)
    getTableData(sparkSession, kuduMaster, tableName)
    UpsertData(sparkSession, sc, kuduMaster, kuduContext, tableName)
    getTableData(sparkSession, kuduMaster, tableName)
    deleteData(sparkSession, sc, kuduMaster, kuduContext, tableName)
    getTableData(sparkSession, kuduMaster, tableName)
    dataFrame2kudu(sparkSession, sc, kuduMaster, tableName)
    getTableData(sparkSession, kuduMaster, tableName)
    SparkSql2Kudu(sparkSession, sc, kuduMaster, tableName)
    kuduNativeRDD(sc, kuduContext, tableName)
  }

  /**
    * 创建表
    *
    * @param kuduContext
    * @param tableName
    */
  private def createTable(kuduContext: KuduContext, tableName: String): Unit = {
    //定义表的schema
    val schema = StructType(
      StructField("id", IntegerType, false) ::
        StructField("name", StringType, false) ::
        StructField("age", IntegerType, false) :: Nil
    )
    //定义表的主键
    val tablePrimaryKey = List("id")
    //定义表的选项配置
    val options = new CreateTableOptions
    import scala.collection.JavaConverters._
    options.setRangePartitionColumns(List("id").asJava)
    options.setNumReplicas(1)
    //创建表
    if (!kuduContext.tableExists(tableName)) {
      kuduContext.createTable(tableName, schema, tablePrimaryKey, options)
    }

  }

  /**
    * 插入数据到表中
    *
    * @param sparkSession
    * @param sc
    * @param kuduContext
    * @param tableName
    */
  private def insertData2table(sparkSession: SparkSession, sc: SparkContext, kuduContext: KuduContext, tableName: String): Unit = {
    //准备数据
    val data = List(People(1, "zhangsan", 20), People(2, "lisi", 30), People(3, "wangwu", 40))
    val peopleRDD: RDD[People] = sc.parallelize(data)

    import sparkSession.implicits._
    val df: DataFrame = peopleRDD.toDF
    kuduContext.insertIgnoreRows(df, tableName)
  }

  /**
    * 删除表的数据
    *
    * @param sparkSession
    * @param sc
    * @param kuduMaster
    * @param kuduContext
    * @param tableName
    */
  private def deleteData(sparkSession: SparkSession, sc: SparkContext, kuduMaster: String, kuduContext: KuduContext, tableName: String): Unit = {
    //定义一个map集合，封装kudu的相关信息
    val options = Map(
      "kudu.master" -> kuduMaster,
      "kudu.table" -> tableName
    )
    /*import sparkSession.implicits._
    val data = List(People(1, "zhangsan", 20), People(2, "lisi", 30), People(3, "wangwu", 40))
    val dataFrame: DataFrame = sc.parallelize(data).toDF*/
    import org.apache.kudu.spark.kudu._
    val dataFrame = sparkSession.read.options(options).kudu
    dataFrame.createTempView("temp")
    //获取年龄大于30的所有用户id
    val result: DataFrame = sparkSession.sql("select id from temp where age >30")
    //删除对应的数据，这里必须要是主键字段
    result.show()
    kuduContext.deleteRows(result, tableName)
  }

  /**
    * 更新数据--添加数据
    *
    * @param sc
    * @param kuduMaster
    * @param kuduContext
    * @param tableName
    */
  private def UpsertData(sparkSession: SparkSession, sc: SparkContext, kuduMaster: String, kuduContext: KuduContext, tableName: String): Unit = {
    //定义一个map集合，封装kudu的相关信息
    val options = Map(
      "kudu.master" -> kuduMaster,
      "kudu.table" -> tableName
    )
    import sparkSession.implicits._
    val data = List(People(1, "zhangsan", 60), People(6, "tom", 30))
    val dataFrame: DataFrame = sc.parallelize(data).toDF
    //如果存在就是更新，否则就是报错
    kuduContext.upsertRows(dataFrame, tableName)
  }

  /**
    * 使用DataFrameApi读取kudu表中的数据
    *
    * @param sparkSession
    * @param kuduMaster
    * @param tableName
    */
  private def getTableData(sparkSession: SparkSession, kuduMaster: String, tableName: String): Unit = {
    //定义map集合，封装kudu的master地址和要读取的表名
    val options = Map(
      "kudu.master" -> kuduMaster,
      "kudu.table" -> tableName
    )
    import org.apache.kudu.spark.kudu._
    sparkSession.read.options(options).kudu.show()
  }

  /**
    * DataFrame api 写数据到kudu表
    *
    * @param sparkSession
    * @param sc
    * @param kuduMaster
    * @param tableName
    */
  private def dataFrame2kudu(sparkSession: SparkSession, sc: SparkContext, kuduMaster: String, tableName: String): Unit = {
    //定义map集合，封装kudu的master地址和要读取的表名
    val options = Map(
      "kudu.master" -> kuduMaster,
      "kudu.table" -> tableName
    )
    val data = List(People(7, "jim", 30), People(8, "xiaoming", 40))
    import sparkSession.implicits._
    val dataFrame: DataFrame = sc.parallelize(data).toDF
    //把dataFrame结果写入到kudu表中  ,目前只支持append追加
    import org.apache.kudu.spark.kudu._
    dataFrame.write.options(options).mode(SaveMode.Append).kudu
    sparkSession.read.options(options).kudu.show()
  }

  /**
    * 使用sparksql操作kudu表
    *
    * @param sparkSession
    * @param sc
    * @param kuduMaster
    * @param tableName
    */
  private def SparkSql2Kudu(sparkSession: SparkSession, sc: SparkContext, kuduMaster: String, tableName: String): Unit = {
    //定义map集合，封装kudu的master地址和表名
    val options = Map(
      "kudu.master" -> kuduMaster,
      "kudu.table" -> tableName
    )
    val data = List(People(10, "小张", 30), People(11, "小王", 40))
    import sparkSession.implicits._
    val dataFrame: DataFrame = sc.parallelize(data).toDF
    //把dataFrame注册成一张表
    dataFrame.createOrReplaceTempView("temp1")
    import org.apache.kudu.spark.kudu._
    sparkSession.read.options(options).kudu.createOrReplaceTempView("temp2")
    sparkSession.sql("insert into table temp2 select * from temp1")
    sparkSession.sql("select * from temp2 where age > 30").show()
  }

  private def kuduNativeRDD(sc: SparkContext, kuduContext: KuduContext, tableName: String) = {
    val kuduRDD: RDD[Row] = kuduContext.kuduRDD(sc, tableName, Seq("name", "age"))
    val result: RDD[(String, Int)] = kuduRDD.map {
      case Row(name: String, age: Int) => (name, age)
    }
    result.foreach(println)
  }
}
