package com.scala.learn.sparkUDF

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
  * @Copyright: Shanghai Definesys Company.All rights reserved.
  * @Description: 1 使用join方法 查询 ip归属地的次数
  *               2、缺点是：效率比较慢  每一个excutor都需要加载规则
  * @author: chuhaitao
  * @since: 2019/3/16 15:52
  * @history:
  *          1.2019/3/16 created by chuhaitao
  */
object SqlIpLocaltion {


  val ip2Long = (ip: String) => {
    val fg = ip.split("[.]")
    var ipNum = 0L
    for (i <- 0 until fg.length) {
      ipNum = fg(i).toLong | ipNum << 8L
    }
    ipNum
  }

  //1
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local")
      .appName("ip")
      .getOrCreate()



    import spark.implicits._

    //获取请求的日志信息
    //val lines: Dataset[String] = spark.read.textFile("/u01/log/access.log")
    //从已经清洗的parquet文件中读取
    val accessDf: DataFrame = spark.read.parquet("/u01/log/access_parquet")

    //注册成一个视图
    accessDf.createTempView("access_v")


    //2 加载ip的规则


    val ipLines: Dataset[String] = spark.read.textFile("/u01/rules/ip.txt")

    val iprules: Dataset[(Long, Long, String)] = ipLines.map(line => {
      val fields = line.split("[|]")
      val startNum = fields(1).toLong
      val endNum = fields(2).toLong
      val province = fields(3)
      (startNum, endNum, province)
    })

    //ds->df
    val rulesDF: DataFrame = iprules.toDF("starNum", "endNum", "province")
    //注册视图
    rulesDF.createTempView("rules")

    //使用自定义函数的    使用的函数名字必须和定义名字一样
    val frame: DataFrame = spark.sql("select ip ,province ,count(*) from access_v left join rules " +
      " on ( ip2Long(ip)>=starNum and  ip2Long(ip)<=endNum ) group by ip) ")


  }


}
