package com.imooc.bigdata.chapter08_my.utils

import java.util

import org.apache.kudu.Schema
import org.apache.kudu.client.{CreateTableOptions, KuduClient}
import org.apache.spark.sql.{DataFrame, SaveMode}

object KuduUtils {

  /**
    * 将DF结果集 数据 落地到 Kudu
    * @param dataFrame
    * @param tableName
    * @param master
    * @param schema
    * @param partitionId
    */
  def sink (
            dataFrame:DataFrame,
            tableName:String,
            master:String,
            schema:Schema,
            partitionId:String): Unit ={

    val client = new KuduClient.KuduClientBuilder(master).build();
    //如果已经存在就删除，因为只能append
    if (client.tableExists(tableName)){
      client.deleteTable(tableName)
    }

    //表的选项信息 optioopns 信息
    val optioopns: CreateTableOptions = new CreateTableOptions()
    optioopns.setNumReplicas(1) //设置副本系数为1


    val parcols: util.LinkedList[String] = new util.LinkedList[String]()
    parcols.add(partitionId)
    optioopns.addHashPartitions(parcols,3)
    //有了这三个参数看可以床架一张表
    client.createTable(tableName,schema,optioopns)

    //数据写入Kudu
    dataFrame.write
      .mode(SaveMode.Append)
      .format("org.apache.kudu.spark.kudu")
      .option("kudu.table",tableName)
      .option("kudu.master",master)
      .save()
  }
}
