package tech.ch.udf

import org.apache.spark.sql.SparkSession

object UdfMain {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local[*]").getOrCreate()

     val sparkIpUDF = new SparkIpUDF(spark)

    // 注册普通的UDF函数
    sparkIpUDF.registerCommon()

    // 注册从文件加载数据的UDF函数
    // 加载IP地址库的数据
    val ipLibDF = spark.read.format("json").load("data/ip/ip_lib_thin")
    // 将IP地址库的数据转换为数组
    val ipLibArr = ipLibDF.selectExpr("ipv4_full(start_ip) as start_ip",
        "ipv4_full(end_ip) as end_ip",
        "area_code"
      ).repartition(1)
      .orderBy("start_ip")  // 根据开始ip排序
      .rdd.map(x => (x.getString(0), x.getString(1), x.getString(2)))
      .collect()
    // 一定要将从文件里面获取的数据广播出去，才能在UDF函数中使用。否则在每个task里面都拷贝一份数组，性能非常差
    val ipLibArrBroadcast = spark.sparkContext.broadcast(ipLibArr)
    // 注册IP地址库的UDF函数
    sparkIpUDF.registerIpSearch(ipLibArrBroadcast)

    // 构造测试数据
    val mockData = List("27.186.0.2", "159.226.222.3")
    // 转换为DataFrame，并注册为表test
    import spark.implicits._
    // source_ip为字段名
    val df = spark.sparkContext.makeRDD(mockData).toDF("source_ip")
    df.createOrReplaceTempView("test")

   val newDF = spark.sql(
      """
        |select source_ip,
        |ipv4_to_num(source_ip) as source_ip_num,
        |ipv4_full(source_ip) as source_ip_full,
        |ip_search(source_ip) as ip_area
        |from test
      """.stripMargin)

    newDF.show()
  }
}
