package com.lagoue.spark

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, DataFrameNaFunctions, SparkSession}

/**
 * @author: yehw
 * @date: 2020/10/23 19:30
 * @description: ip路径查找
 */
/** http--Ds */
case class Http(timeSamp: String, ip: String, webUrl: String, accessData: String, browserInformation: String)

/** ip地址数据 */
case class Ip(ip1: String,ip2:String, address: String)

object homework1 {
  def main(args: Array[String]): Unit = {
    println("测试scala环境配置成功")
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("Spark SQL basic example")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    import spark.implicits._
    //http信息
    val schema = "timeSamp String, ip String, webUrl String, accessData String, browserInformation String"
    val df1 = spark.read.
      option("delimiter", "|")
      .option("header", "true")
      .option("inferschema", "true")
      .schema(schema)
      .csv("data/http.log")
    df1.show(10)
    //ip信息
    val fileRDD: RDD[String] = sc.textFile("data/ip.dat")
    val linesRDD: RDD[Array[String]] = fileRDD.map(_.split("\\|"))
    val rowRDD: RDD[(String, String, String)] = linesRDD.map(line => (line(0), line(1), line(7)))
    val ips:DataFrame=rowRDD.toDF("ip1","ip2","address")
    ips.show(10)
    val frame = df1.join(ips, $"ip" < $"ip2" and ($"ip" > $"ip1"),"inner")
    frame.groupBy("address").count.show
    spark.close()
  }
}
