package main.java.ad_log

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

/**
  * AdLogDemo
  *
  * @author zhangyimin
  *  2018-11-16 下午4:22
  * @version 1.0
  */
object AdLogDemo {

  def main(args: Array[String]): Unit = {
    //屏蔽日志
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    val sparkSession = SparkSession.builder().master("local").appName("AD_INFO_LOG").getOrCreate()

    val sc = sparkSession.sparkContext
    val sqlContext=sparkSession.sqlContext


    val clickLogRdd = sc
      .textFile("hdfs://10.16.7.36:9000/flume/ad_click_area_info/20181116/events-.1542353433024")
      .map(_.split(","))
      .map(x => {
        new clickLog(x(0).toInt, x(1), x(2), x(3), x(4).toInt)
      })
    val areaRdd = sc
      .textFile("hdfs://10.16.7.36:9000/data/input/hot_products/area")
      .map(_.split(","))
      .map(x => {
        new area(x(0).toInt, x(1))
      })
    import sqlContext.implicits._
    clickLogRdd.toDF().createTempView("user_click_log")
    areaRdd.toDF().createTempView("area")

//    sparkSession.sql("select\n  a.area_name,\n  b.url ,\n  count(b.click_time)     click_count\nfrom area a, user_click_log b\nwhere a.area_id = b.area_id\ngroup by b.url, a.area_name").show

    sparkSession.sql("select\n  a.area_name,\n  b.url ,\n  b.click_time,\n  count(b.click_time)     click_count\nfrom area a, user_click_log b\nwhere a.area_id = b.area_id\ngroup by b.url, a.area_name,b.click_time").show
    sparkSession.stop()
  }


}


case class area(area_id: Int, area_name: String)
case class clickLog(user_id:Int,user_ip:String,click_time:String,url:String,area_id:Int)