package com.atguigu.app

import java.text.SimpleDateFormat
import java.util
import java.util.Date

import com.alibaba.fastjson.JSON
import com.atguigu.bean.StartUpLog
import com.atguigu.common.logger.GmallConstants
import com.atguigu.utils.{MykafkaUtil, RedisUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis
import org.apache.phoenix.spark._

/**
 * 需求一：日活
 *
 * @author WangJX
 * @date 2019/11/26 18:09 
 * @version 1.0
 */
object DauApp {

  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("DucApp")

    val ssc = new StreamingContext(conf, Seconds(5))

    //从kafka中获取数据
    val dStream: InputDStream[ConsumerRecord[String, String]] = MykafkaUtil.getKafkaStream(GmallConstants.KAFKA_TOPIC_STARTUP, ssc)

    //测试从kafka读取数据
    /*    dStream.foreachRDD(rdd => {
          rdd.map(_.value()).collect().foreach(println)
        })*/

    //读取redis中的数据进行广播变量，使用transform会周期性的调用
    val filterDStream: DStream[ConsumerRecord[String, String]] = dStream
      .transform(rdd => {

      println("过滤前数据：" + rdd.count())

      //获取redis的连接
      val jedis: Jedis = RedisUtil.getJedisPool()
      val time: String = new SimpleDateFormat("yyyy-MM-dd").format(new Date())
      val timed = "duc:" + time
      val valueString: util.Set[String] = jedis.smembers(timed)
      //设置广播变量
      val valueBroad: Broadcast[util.Set[String]] = ssc.sparkContext.broadcast(valueString)


      //对数据进行过滤
      val value: RDD[ConsumerRecord[String, String]] = rdd.filter(str => {
        //把string字符串转化为JSON
        val upLog: StartUpLog = JSON.parseObject(str.value(), classOf[StartUpLog])
        //如果存在则过滤掉数据
        !valueBroad.value.contains(upLog.mid)
      })
      jedis.close()
      println("过滤后数据：" + value.count())

      value
    })

    //这里是为了放置在同一个分区中出现重复数据，造成redis压力，所以提前对数据进行group，对数据进行去重
    //对数据进行进一步过滤
    val filteredDStream = filterDStream.map(rdd => {
      val str: String = rdd.value()
      val startUpLog: StartUpLog = JSON.parseObject(str, classOf[StartUpLog])

      //      println(startUpLog.mid + "_" + startUpLog)

      (startUpLog.mid, startUpLog)
    })
      .groupByKey()
      .map {
        case (mid, rdd) => {
          val list: List[StartUpLog] = rdd.toList
          val resoult: List[StartUpLog] = list.sortBy(_.ts).take(1)
          resoult(0)
        }
      }


    //封装数据，使其完整
    val mapDStream = filteredDStream.map(rdd => {
      //获取kafka中的value值
      //      val value: String = rdd.value()
      //把value值的String类型转化为StartUpLog的样例类
      //      val startUpLog: StartUpLog = JSON.parseObject(rdd, classOf[StartUpLog])

      //往样例类中添加系统收到的时间
      val systemTime: String = new SimpleDateFormat("yyyy-MM-dd HH").format(new Date)
      val time: Array[String] = systemTime.split(" ")
      rdd.logDate = time(0)
      rdd.logHour = time(1)
      rdd
    })


    //做数据缓存
    mapDStream.cache()

    //保存数据到redis,这里考虑到性能的问题所以使用mapPartitions连接redis
    mapDStream.mapPartitions(
      valueIter => {
      var times = 0L

      //获时间(天)，设置redis的表名
      if (valueIter.hasNext) {
        times = valueIter.next().ts
      } else {
        times = new Date().getTime
      }

      //获取redis的连接
      val jedis: Jedis = RedisUtil.getJedisPool()

      val time: String = new SimpleDateFormat("yyyy-MM-dd").format(times)
      val key = "duc:" + time
      //循环遍历rdd中的数据，并保存进redis的set数据类型中
      for (elem <- valueIter) {
        //
        jedis.sadd(key, elem.mid)
      }
      //关闭连接
      jedis.close()
      valueIter
    })
        .print()      //执行一个action算子，不然不做计算



    //保存数据到Hbase中
    mapDStream.foreachRDD {
      rdd =>
        //需要引入隐式转换  import org.apache.phoenix.spark._
        rdd.saveToPhoenix("GMALL_DAU",
          Seq("MID", "UID", "APPOD", "AREA", "OS", "CH", "TYPE", "VS", "LOGDATE", "LOGHOUR", "TS"),
          new Configuration,
          Some("hadoop105,hadoop106,hadoop107:2181"))
    }




    ssc.start()

    ssc.awaitTermination()
  }
}

