package com.chb.dmp.etl

import org.apache.kudu.client.CreateTableOptions
import org.apache.kudu.{ColumnSchema, Schema, Type}
import org.apache.spark.sql.{Column, Dataset, Row, SparkSession}

// pmt.json https://www.cnblogs.com/wh984763176/p/13387479.html
object ETLRunner {
  def main(args: Array[String]): Unit = {
    import com.chb.dmp.utils.KuduHelper._


    // 1、创建SparkSession
    import com.chb.dmp.utils.SparkConfigHelper._ // 隐式转换 是的SparkSession扩展使用loadConfig
    val spark = SparkSession.builder()
      .appName("pmt json etl")
      .master("local[3]")
      .loadConfig()
      .getOrCreate()

    // 2、读取数据集
    val source = spark.read.json("dataset/pmt.json")
    source.show()


    // 3、 数据操作
    // 对于不同的数据处理操作来说， 需要将操作在这个地方调用
    // processor.process
    val dsWithCity = IPProcessor.process(source)


    import spark.implicits._
    val selectRows: Seq[Column] = Seq('uuid, 'ip, 'city)
    val dsWithCitySelect: Dataset[Row] = dsWithCity.select(selectRows: _*)


    // 4、数据落地
    val tableName = ""
    import scala.collection.JavaConverters._
    val schema = new Schema(List(
      new ColumnSchema.ColumnSchemaBuilder("id", Type.STRING).key(true).build(),
      new ColumnSchema.ColumnSchemaBuilder("name", Type.STRING).key(false).build()
    ).asJava)
    val options = new CreateTableOptions().addHashPartitions(Seq("uuid").asJava, 2).setNumReplicas(1)
    spark.createKuduTable(tableName, schema, options)


    dsWithCitySelect.saveToKudu(tableName)

  }

}
