package com.scala.learn.sparkUDF

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * @Copyright: Shanghai Definesys Company.All rights reserved.
  * @Description: 1、使用广播变量 优化ip的计算
  *               2、获取已经清洗的数据计算
  * @author: chuhaitao
  * @since: 2019/3/16 19:42
  * @history:
  *          1.2019/3/16 created by chuhaitao
  */
object SqlIpLocation2 {


  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName("ip")
      .master("")
      .getOrCreate()

    //1 读取规则 并广播

    val lines: RDD[String] = spark.sparkContext.textFile("/")

    val tuples: Array[(Long, Long, String)] = lines.map(line => {
      val fields: Array[String] = line.split("[|]")
      val startNum = fields(1)
      val endNum = fields(2)
      val province = fields(3)
      (startNum.toLong, endNum.toLong, province)
    }).collect()

    //2、广播变量
    val broad: Broadcast[Array[(Long, Long, String)]] = spark.sparkContext.broadcast(tuples)

    //3、读取日志信息(已经清洗的数据)
    //id  time
    val frame: DataFrame = spark.read.parquet("/log_parquet")

    frame.createTempView("log")

    //4 使用自定义函数

    spark.udf.register("ip2Province", (ip: String) => {
      //ip转换成long类型
      val fg = ip.split("[.]")
      var ipNum = 0L
      for (i <- 0 until fg.length) {
        ipNum = fg(i).toLong | ipNum << 8L
      }
      //使用二分法,获取索引
      val value: Array[(Long, Long, String)] = broad.value
      val index = BinarySearch.binarySearch(value, ipNum)
      //返回省份
      value(index)._3

    })

    val res: DataFrame = spark.sql("select ip2Province(ip) ,count(*) from log ")


    res.show()

  }

}
