import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

case class Ip(startIp: String, endIp: String, address: String)


object Job1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("job1")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    import spark.implicits._
    val httpLog: RDD[String] = sc.textFile("D:\\MyStudent\\studentCode\\SparkDemo\\JobDemo\\data\\http.log").map(_.split("\\|")(1))

    val ip = sc.textFile("D:\\MyStudent\\studentCode\\SparkDemo\\JobDemo\\data\\ip.dat")
      .map{
        case line =>{
          val strSplit = line.split("\\|")
          Ip(strSplit(0),strSplit(1),strSplit(7))
        }
      }

    val ipArray: Array[(Long, Long, String)] = ip.map(x => {
      (ipToLong(x.startIp), ipToLong(x.endIp), x.address)
    }).collect()

    val ipBroad: Broadcast[Array[(Long, Long, String)]] = sc.broadcast(ipArray)

    httpLog.map(x =>{
      val index = binarySearch(ipBroad.value, ipToLong(x))
      if (index != -1) ipBroad.value(index)._3
      else  "NULL"
    }).map((_,1))
      .reduceByKey(_ + _)
      .map(x => s"城市：${x._1}, 访问量：${x._2}")
      .saveAsTextFile("D:\\MyStudent\\studentCode\\SparkDemo\\JobDemo\\data\\job1\\output_ips")

  }


  def ipToLong(ip:String):Long ={
    val ipString = ip.split("[.]")
    var ipNum = 0L
    for (i <- 0 until ipString.length){
      ipString(i).toLong | ipNum << 8L
    }
    ipNum
  }

  def binarySearch(lines: Array[(Long, Long, String)], ip: Long): Int = {
    var low = 0
    var high = lines.length - 1
    while (low <= high) {
      val middle = (low + high) / 2
      if ((ip >= lines(middle)._1) && (ip <= lines(middle)._2))
        return middle
      if (ip < lines(middle)._1)
        high = middle - 1
      else {
        low = middle + 1
      }
    }
    -1
  }
}
