package com.bawei.gmall.realtime.app

import java.text.SimpleDateFormat
import java.util.Date

import com.alibaba.fastjson.JSON
import com.bawei.gmall.common.GmallConstants
import com.bawei.gmall.realtime.bean.StartUpLog
import com.bawei.gmall.realtime.util.KafkaUtil
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis
import org.apache.phoenix.spark._

object DauApp {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("dau_app").setMaster("local[*]")
    val ssc = new StreamingContext(conf, Seconds(5))
    // 1. 消费kafka中的数据
    val inputDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtil.getKafkaStream(GmallConstants.KAFKA_TOPIC_STARTUP, ssc)
    // 2. 转换样例类
    val startUpLogDStream = inputDStream.map {
      record => {
        val str = record.value()
        // 3. 反向解析
        val startUpLog = JSON.parseObject(str, classOf[StartUpLog])
        // 4. 转换时间
        val dateTimeString = new SimpleDateFormat("yyyy-MM-dd HH").format(new Date(startUpLog.ts))
        val dates = dateTimeString.split(" ")
        startUpLog.logDate = dates(0)
        startUpLog.logHour = dates(1)
        startUpLog
      }
    }

    // 5. 利用redis进行去重
    val filterDStream = startUpLogDStream.transform {
      rdd => {
        println("过滤前 ： " + rdd.count())
        val jedis = new Jedis("hadoop102", 6379)
        val dauKey = "dau:" + new SimpleDateFormat("yyyy-MM-dd").format(new Date())
        val dauSet = jedis.smembers(dauKey)
        // 广播变量
        val dauBroadcase = ssc.sparkContext.broadcast(dauSet)
        val filterRdd = rdd.filter {
          startUpLog => !dauBroadcase.value.contains(startUpLog.mid)
        }
        println("过滤后 ：" + filterRdd.count())
        filterRdd
      }
    }

    // 6. 批次内区中，按照mid进行分组，每组取第一个值
    val realFilterDStream = filterDStream.map(startUpLog => (startUpLog.mid, startUpLog))
      .groupByKey()
      .flatMap {
        case (mid, startUpLogItr) => {
          startUpLogItr.take(1)
        }
      }

    // 7. 对当前数据进行缓存
    realFilterDStream.cache()

    // 8. 根据mid去重
    realFilterDStream.foreachRDD {
      rdd =>
        rdd.foreachPartition {
          startUpLogItr =>
            val jedis = new Jedis("hadoop102", 6379)
            for (elem <- startUpLogItr) {
              val key = "dau:" + elem.logDate
              println("mid : : : " + elem.mid)
            }
            jedis.close()
        }
    }

    // 9. 将去重后的结果写入到HBase
    realFilterDStream.foreachRDD {
      rdd => {
        // MID  | UID  | APPID  | AREA  | OS  | CH  | TYPE  | VS  | LOGDATE  | LOGHOUR  | TS
        rdd.saveToPhoenix("GMALL_DAU",
          Seq("MID", "UID", "APPID", "AREA", "OS", "CH", "TYPE", "VS", "LOGDATE", "LOGHOUR", "TS"),
          new Configuration,
          Some("hadoop102,hadoop103,hadoop104:2181"))
      }
    }

    // 10. 启动流程
    println("启动流程")
    ssc.start()
    ssc.awaitTermination()
  }
}
