import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.{SparkConf, SparkContext}

// 数据格式：时间点			省份		城市	用户		广告
//http.log：用户访问网站所产生的日志。日志格式为：时间戳、IP地址、访问网址、访问数据、浏览器信息等ip.dat：ip段数据，记录着一些ip段范围对应的位置
//文件位置：data/http.log、data/ip.dat
//# http.log样例数据。格式：时间戳、IP地址、访问网址、访问数据、浏览器信
//20090121000132095572000|125.213.100.123|show.51.com|/shoplist.php?
//phpfile=shoplist2.php&style=1&sex=137|Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1;SV1;
//Mozilla/4.0(Compatible Mozilla/4.0(Compatible-EmbeddedWB 14.59
//http://bsalsa.com/EmbeddedWB- 14.59  from: http://bsalsa.com/ )|http://show.51.com/m
object LocateIp2 {

  var stat0RDD : RDD[(String,String)] = null


  def main(args: Array[String]): Unit = {
    // 1、创建SparkContext
    val conf = new SparkConf().setAppName(this.getClass.getCanonicalName.init).setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    val N = 3

    // 1、读取IP
    val ipsRdd: RDD[String] = sc.textFile("file:///D:\\BaiduNetdiskDownload\\lagoubigdata\\fourthPhrase\\大数据正式班第四阶段模块一\\scala编程\\0-讲义和代码\\代码\\sparkPartOne\\data\\ip.dat")
     stat0RDD = ipsRdd.map {line =>
        val fields :Array[String] = line.split("\\|")
       var index = fields(0).lastIndexOf(".")
       //fields(0) = fields(0).substring(0,index)
      (fields(0).substring(0,index),fields(7))
    }



    // 2、生成RDD
    val lines: RDD[String] = sc.textFile("file:///D:\\BaiduNetdiskDownload\\lagoubigdata\\fourthPhrase\\大数据正式班第四阶段模块一\\scala编程\\0-讲义和代码\\代码\\sparkPartOne\\data\\http.log")

    // 3、RDD转换
    // 1、修改文件内容并统计各城市的访问量
    val httpLogRDD: RDD[(String,String,String,String,String,String)] = lines.map { line =>
      val fields: Array[String] = line.split("\\|")
     // fields(1) = queryCityByIp(stat0RDD,fields(1))

      //var index = fields(1).lastIndexOf(".")

      (fields(0), fields(1),fields(2),fields(3),fields(4),fields(5))
    }

    var newHttpLog :RDD[(String,(String,String,String,String,String,String))]= httpLogRDD.map {

    a=>(a._2.substring(0,a._2.lastIndexOf(".")),a)}

    //newHttpLog.collect.foreach(println)
    //var resultRDD: RDD[(String, (String, String))] =
    //newHttpLog.leftOuterJoin(stat0RDD)
    var joinRdd = newHttpLog.join(stat0RDD)
    joinRdd.collect.foreach(println)
    println("====================")
    var newRDD = joinRdd.map{
      case (a,((b,c,d,e,f,g),h))=>
        (b,h,d,e,f,g)

    }
    newRDD.map(x=>(x._2,1)).reduceByKey(_+_).collect().foreach(println);

    //stat1RDD.
    println("====================")
    newRDD.collect.foreach(println)
    println("====================")
    newRDD.coalesce(1,false).saveAsTextFile("file:///D:\\BaiduNetdiskDownload\\lagoubigdata\\fourthPhrase\\大数据正式班第四阶段模块一\\scala编程\\0-讲义和代码\\代码\\sparkPartOne\\data\\http2.log")

    // 5、关闭SparkContext
    sc.stop()
  }


}