package my_project.util

import java.util

import org.apache.kudu.Schema
import org.apache.kudu.client.{CreateTableOptions, KuduClient, KuduTable}
import org.apache.spark.sql.{DataFrame, SaveMode}

object KuduUtils {


  /**
   * 将DF数据落地到kudu
   *
   * @param data
   * @param tableName
   * @param master
   * @param schema
   * @param partitionId
   */
  def sink(
            data: DataFrame,
            tableName: String,
            master: String,
            schema: Schema,
            partitionId: String
          ): Unit = {
    val kuduClient: KuduClient = new KuduClient.KuduClientBuilder(master).build()

    //如果表已经存在则删除
    if (kuduClient.tableExists(tableName)) {
      kuduClient.deleteTable(tableName)
    }

    //准备参数
    val options = new CreateTableOptions()
    options.setNumReplicas(1)
    val parcols = new util.LinkedList[String]()
    parcols.add(partitionId)
    //ddHashPartitions(List<String> columns, int buckets)
    options.addHashPartitions(parcols, 3)
    //创建表
    kuduClient.createTable(tableName, schema, options)

    data.write.mode(SaveMode.Append)
      .format("org.apache.kudu.spark.kudu")
      .option("kudu.master", master)
      .option("kudu.table", tableName)
      .save()

    kuduClient.close()

  }


}
