package com.example

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, SparkSession}


case class IpInfoTable(startIp: Long, endIp: Long, city: String)

object Test1 {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName(this.getClass.getCanonicalName)
      .getOrCreate()
    val sc = spark.sparkContext

    // 读取http.log文件的数据并拿到每行数据的IP地址
    val httpLogsRDD: RDD[String] = sc.textFile("data/http.log")
      .map(_.split("\\|")(1))
    // 读取ip.dat文件数据并拿到城市和其对应的IP地址组
    val ipTableRDD = sc.textFile("data/ip.dat").map {
      case line: String => {
        val strSplit: Array[String] = line.split("\\|")
        (strSplit(0), strSplit(1), strSplit(7))
      }
    }

    // 将城市和对应的IP组(字典表)广播到所有的Executor上
    val bdIPTable: Broadcast[Array[IpInfoTable]] =
      sc.broadcast(ipTableRDD.map(x => {
        IpInfoTable(ip2Long(x._1), ip2Long(x._2), x._3)
      }).collect())

    // 用httplog的IP在字典表里寻找对应的城市
    httpLogsRDD
      .map(x => {
        searchForCity(bdIPTable.value, ip2Long(x))
      })
      // 然后通过城市为key统计出每个城市的访问量,最后输出成文件
      .map(x => (x, 1))
      .reduceByKey(_ + _)
      .map(x => s"城市：${x._1}, 访问量：${x._2}")
      // 将分区设为1以便输出时只写一个文件
      .repartition(1)
      .saveAsTextFile("data/output/output_ips")

  }

  //ip转成long类型
  def ip2Long(ip: String): Long = {
    val fragments = ip.split("[.]")
    var ipNum = 0L
    for (i <- 0 until fragments.length) {
      ipNum = fragments(i).toLong | ipNum << 8L
    }
    ipNum
  }

  def searchForCity(ipTable: Array[IpInfoTable], ip: Long): String = {
    for(i <- 0 until ipTable.length - 1 ) {
      if (ip >= ipTable(i).startIp && ip <= ipTable(i).endIp) {
        return ipTable(i).city
      }
    }
    "未知城市"
  }
}
