package com.lagoue.spark

import java.text.SimpleDateFormat
import java.util.Date

import org.apache.commons.lang3.time.FastDateFormat
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

/**
 * @author: yehw
 * @date: 2020/10/23 20:56
 * @description: ⽇志分析
 */
case class log(ip: String, hitRate: String,  requestTime:Date)

object homework2 {
  def main(args: Array[String]): Unit = {
    println("测试scala环境配置成功")
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("Spark SQL basic example")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    import spark.implicits._
    val schema = "ip String, hitRate String, responseTime Int, requestTime String, requestMethod String,requestUrl String, requestAgreementAgreement String, statusCode Int, responseSize Long, referer String, userAgent String"
    /* val df1 = spark.read.
       option("delimiter", " ")
       .option("header", "true")
       .option("inferschema", "true")
       .schema(schema)
       .csv("data/cdn.txt")*/
    //df1.show(10);
    //计算独立的ip数量
    //df1.groupBy("ip").count.show()
    //计算视频ip数量
    //df1.createGlobalTempView("log")
    //val frame = spark.sql("select ip,requestUrl from global_temp.log where requestUrl like '%mp4%'")
    //frame.groupBy("ip").count().show()
    val lines = sc.textFile("data/cdn.txt")
    val rawRDD = lines.map(line => {
      val arr = line.split("\\s+")
      (arr(0), arr(1), arr(3)
      )
    })

    val value = rawRDD.map(log => (log._1, log._2, log._3.split("/")(0)+"::"+log._3.split(":")(1)))
    val frame = value.toDF("ip", "hit", "dayTime")
    frame.show(10)
    frame.createGlobalTempView("logClick")
    spark.sql("select dayTime,count(ip) from global_temp.logClick group by dayTime").show(false)
    spark.close()
  }
}


