package com.lagou.no2

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}

object No2 {
    def main(args: Array[String]): Unit = {
        //设置日志级别
        Logger.getLogger("org").setLevel(Level.WARN)
        //创建sparkSession对象
        val spark = SparkSession.builder()
                .appName(s"${this.getClass.getCanonicalName}")
                .master("local[*]")
                .getOrCreate()
        import spark.implicits._

        //读取文件
        val df: DataFrame = spark.read
                .option("header", "false")
                .option("inferschema", "true")
                .option("delimiter", " ")
                .csv("data/cdn.txt")
                .toDF("ip","hit","hitNum","dateTime","timezone","request","status","requestNum","agent","broswer")
        //注册表
        df.createOrReplaceTempView("cdn")

        //求独立IP数 并写入文件
       spark.sql(
            """
              |select ip,count(1) from cdn group by ip
            """.stripMargin).repartition(1)
                .write
                .format("csv")
                .mode("overwrite")
                .option("header","true")
                .save("data/cdn/aloneIp")

        //求每个视频独立IP数

        spark.sql(
            """
              |select regexp_extract(request,'([0-9]+).mp4', 0) as mp4Name
              |       ,count(distinct ip) as ipNum
              |from cdn
              |where request like '%mp4%'
              |group by regexp_extract(request,'([0-9]+).mp4', 0)
            """.stripMargin)
                .repartition(1)
                .write
                .format("csv")
                .mode("overwrite")
                .option("header","true")
                .save("data/cdn/videoAloneIp")

        //每小时的流量
        spark.sql(
            """
              |select split(dateTime,':')[1] as hour
              |,concat(round(sum(requestNum)/1024/1024,2),'G') as traffic
              |from cdn
              |group by split(dateTime,':')[1]
            """.stripMargin)
                .repartition(1)
                .write
                .format("csv")
                .mode("overwrite")
                .option("header","true")
                .save("data/cdn/hourTraffic")

        spark.close()


    }
}
