package com.gzp.one

import org.apache.spark.sql.SparkSession

object FindIp {
  case class Ip(startIp: String, endIp: String, address: String)
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .master("local[*]")
      .appName(this.getClass.getCanonicalName)
      .getOrCreate()

    spark.sparkContext.setLogLevel("warn")

    val sc = spark.sparkContext

    import spark.implicits._

    val ipLogsRDD = sc.textFile("/Users/admin/Downloads/spark/data/http.log")
                      .map(_.split("\\|")(1))

    val ipInfoRDD = sc.textFile("/Users/admin/Downloads/spark/data/ip.dat")
                      .map{
                        case line :String =>{
                          val strSplit: Array[String] = line.split("\\|")
                          Ip(strSplit(0), strSplit(1), strSplit(7))
                        }
                      }

    val brIpInfo = sc.broadcast(ipInfoRDD.map(x => (ip2Long(x.startIp),ip2Long(x.endIp),x.address)).collect())

    ipLogsRDD.map(x => {
      val index = binarySearch(brIpInfo.value,ip2Long(x))
      if (index != -1)
        brIpInfo.value(index)._3
      else
        "NUll"
    }).map(x => (x,1))
      .reduceByKey(_+_)
      .map(x => s"城市: ${x._1},访问量:${x._2}")
      .saveAsTextFile("/Users/admin/Downloads/spark/data/output_ips")
  }

  //二分法匹配ip规则
  def binarySearch(lines: Array[(Long, Long, String)], ip: Long): Int = {
    var low = 0
    var high = lines.length - 1
    while (low <= high) {
      val middle = (low + high) / 2
      if ((ip >= lines(middle)._1) && (ip <= lines(middle)._2))
        return middle
      if (ip < lines(middle)._1)
        high = middle - 1
      else {
        low = middle + 1
      }
    }
    -1
  }

  //ip转成long类型
  def ip2Long(ip: String): Long = {
    val fragments = ip.split("[.]")
    var ipNum = 0L
    for (i <- 0 until fragments.length) {
      ipNum = fragments(i).toLong | ipNum << 8L
    }
    ipNum
  }

}
