package liuzhou.spark.project

import liuzhou.spark.beans.{CategaryClickCount, CategraySearchClickCount, ClickLog}
import liuzhou.spark.common.DataUtils
import liuzhou.spark.dao.{CategaryClickCountDAO, CategraySearchClickCountDAO}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

import scala.collection.mutable.ListBuffer
/**
  * 功能开发  liuzhou.spark.project.StatStreamingAPP  liuzhou.spark.project.StatStreamingAPP#main
  */
object StatStreamingAPP {
  def main(args: Array[String]): Unit = {
    start()
  }
  def start(): Unit ={
    val  ssc = new StreamingContext("local[*]","liuzhou",Seconds(5))

    //消费者参数  zookeeper node01:2181...
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "node1:9092,node2:9092,node3:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "aqi_1",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("flumeTopic")


    //ConsumerRecord(topic = flumeTopic, partition = 0, offset = 1691, CreateTime = 1560099903255, serialized key size = -1, serialized value size = 68, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = 167.143.124.132	2019-06-10 01:05:01	"GET pianhua/130 HTTP/1.0"	-	200))
    //从kafka获取日志
    val logStream = KafkaUtils.createDirectStream[String,String](
      ssc ,
      PreferConsistent,
      Subscribe[String,String](topics,kafkaParams)
    ).map(x=>x.value())

    //需求：统计每天每个栏目的点击量
    //167.143.124.132	2019-06-10 01:05:01	"GET pianhua/130 HTTP/1.0"	-	200
    //清洗日志
    var cleanLog = logStream.map(line => {
      var infos = line.split("\t")
      var url = infos(2).split(" ")(1)
      var classId = 0   //点击类别
      if (url.startsWith("www")){
        classId = url.split("/")(1).toInt
      }
      //返回这个样例类
      ClickLog(infos(0),DataUtils.parseToMin(infos(1)),classId,infos(3),infos(4).toInt)
    }).filter(y => y.classId!=0)


    cleanLog.map(x => {
      (x.time.trim+"_"+x.classId,1) //rowId的构造
    }).reduceByKey(_+_).foreachRDD(rdd=>{
      rdd.foreachPartition(p =>{
        val list = new ListBuffer[CategaryClickCount]
        p.foreach(pair => {
          list.append(CategaryClickCount(pair._1,pair._2))
        })
        CategaryClickCountDAO.save(list)
      })
    })

    //每个栏目下面从渠道过来的流量
    cleanLog.map(x=>{
      val url = x.refer.replace("//","/")
      val splits = url.split("/")
      var host =""
      if (splits.length>2){
        host = splits(1)
      }
      (host,x.time,x.classId)
    }).filter(y => y._1 !="").map(z =>{
      (z._2.trim+"_"+z._1+"_"+z._3,1)
    }).reduceByKey(_+_).foreachRDD(rdd => {
      rdd.foreachPartition(p=>{
        val list = new ListBuffer[CategraySearchClickCount]
        p.foreach(pairs=>{
          list.append(CategraySearchClickCount(pairs._1,pairs._2))
        })
        CategraySearchClickCountDAO.save(list)
      })
    })

    ssc.sparkContext.setLogLevel("WARN")

    ssc.start()
    ssc.awaitTermination()
  }
}
