package com.lqfan.bigdata.Project.utils

import java.util

import org.apache.kudu.Schema
import org.apache.kudu.client.{CreateTableOptions, KuduClient}
import org.apache.spark.sql.{DataFrame, SaveMode}

object KuduUtils {

  def sink(master: String,
           tableName: String,
           partitionKey: String,
           schema: Schema,
           data: DataFrame
          ): Unit = {
    val client: KuduClient = new KuduClient.KuduClientBuilder(master).build()
    if(client.tableExists(tableName)) {
      println("????????????????????/")
      client.deleteTable(tableName)
    }
    val options: CreateTableOptions = new CreateTableOptions()
    options.setNumReplicas(1)  //设置副本数

    val parcols: util.LinkedList[String] = new util.LinkedList[String]()
    parcols.add(partitionKey)  //按照。。字段来分区
    options.addHashPartitions(parcols, 3)

    client.createTable(tableName, schema, options)

    data.write.mode(SaveMode.Append)
      .format("org.apache.kudu.spark.kudu")
      .option("kudu.master",master)
      .option("kudu.table", tableName)
      .save()
  }
}
