package com.zhao.biz.offline.sparkcore

import java.text.SimpleDateFormat
import java.util.{Calendar, Date}

import com.zhao.common.{CommonData, EventType}
import com.zhao.dao.impl.GameAnaylysisResultImpl
import com.zhao.entity.GameAnaylysisResultBean
import com.zhao.utils.CommonUtils
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.sparkContextFunctions

import scala.collection.mutable.ArrayBuffer

/**
 * Description: <br/>
 * Copyright (c) ，2020 ， 赵 <br/>
 * This program is protected by copyright laws. <br/>
 * Date： 2020/12/14 15:01
 *
 * @author 柒柒
 * @version : 1.0
 */

object HotBloodOfflineCal {

  def main(args: Array[String]): Unit = {

    //1.拦截非法的参数
    if (args == null || args.length != 1){
      println(
        """
          |警告!
          |请录入参数!<基准日>,如2018-02-01
          |""".stripMargin)
      sys.exit(-1)
    }

    //2.获得参数(基准日,并将其封装到广播变量中去)
    val Array(baseDate) = args
    println(s"基准日:$baseDate")

    //SparkSession
    val (spark,sc) = getSparkContext

    //将基准日信息封装到广播变量中去
    val bcBaseDate = sc.broadcast(baseDate)

    //从ES中读取数据,并cache
    val rddFromES: RDD[(String, String, String, String)] = readDataFromES(sc)

    //分别计算指标
    //a)新增用户数
    val newUserRDD: RDD[(String, String)] = calNewAddUser(sc, rddFromES, bcBaseDate, 2).cache
    val newUserCnt: Long = newUserRDD.count()
    println(s"新增用户是:$newUserCnt")

    //b)活跃用户数
    val activeUserCnt: Long = calActiveUser(sc, rddFromES, bcBaseDate, 2)
    println(s"活跃用户数:$activeUserCnt")

    //c)次日留存率(基准日注册的用户RDD Join 次日登录的用户RDD)/基准日注册的用户数
    val container: ArrayBuffer[String] = ArrayBuffer[String]()
    for (i <- 2 to 8){
      val nextDayStayRate: Double = calNextDayStayRate(sc, newUserCnt, newUserRDD, rddFromES, bcBaseDate, i)
      val stayRate: String = f"${nextDayStayRate * 100}%.2f".concat("%")

    }
    println(s"留存率是:$container")
    //将计算后的结果落地到RDBMS中固化起来
    //save2DB(baseDate,newUserCnt,activeUserCnt,container)
  }

  def save2DB(baseDate: String, newUserCnt: Long, activeUserCnt: Long, container: ArrayBuffer[String]) = {

    //Dao实例准备
    val dao: GameAnaylysisResultImpl = new GameAnaylysisResultImpl

    //构建实例
    val bean: GameAnaylysisResultBean = new GameAnaylysisResultBean(baseDate,
      newUserCnt,
      activeUserCnt,
      container(0).trim(),
      container(1).trim(),
      container(2).trim(),
      container(3).trim(),
      container(4).trim(),
      container(5).trim(),
      container(6).trim()
    )
    //save
    dao.save(bean)
  }

  /**
   * 计算留存率
   * @param sc
   * @param newUserCnt
   * @param newUserRDD
   * @param rddFromES
   * @param bcBaseDate
   * @param dateFlg
   */
  def calNextDayStayRate(sc: SparkContext, newUserCnt: Long, newUserRDD: RDD[(String, String)], rddFromES: RDD[(String, String, String, String)], bcBaseDate: Broadcast[String], dateFlg: Int) = {
    //1.基准日的注册用户数
    //2.次日留存数=(基准日注册的RDD join 次日登录的用户RDD).count
    //3.次日留存率=次日留存数/基准日的注册用户数
    //4.返回
    val baseDateRDD: RDD[(String, String)] = newUserRDD
    val nextDayLoginRDD: RDD[(String, String)] = calBaseDayNewAddRDD(sc, rddFromES, bcBaseDate, 1, dateFlg)
    //(user,"") join (user,"")
    val nextDayStayCnt: Long = baseDateRDD.join(nextDayLoginRDD).map(_._1).count()
    if (newUserCnt == 0){ //当新增用户数为0时,作为除数(分母)没有意义,会的大奥NaN,增加判断语句直接返回为0.0
      0.0
    }else{
      val nextDayStayRate: Double = nextDayStayCnt.toDouble / newUserCnt
      //返回
      nextDayStayRate
    }
  }

  /**
   * 计算活跃用户
   * @param sc
   * @param rddFromES
   * @param bcBaseDate
   * @param dateFlg
   */
  def calActiveUser(sc: SparkContext, rddFromES: RDD[(String, String, String, String)], bcBaseDate: Broadcast[String], dateFlg: Int) = {
    //1.获得基准日信息,如:2018-02-01
    //2.对RDD中满足条件的元素进行分析
    //条件:玩游戏的时间>=基准日时间 and 玩游戏时间<基准日的次日且(事件类型=注册||事件类型=登录)
    val nowDayActiveUserCnt = calBaseDayNewAddRDD(sc, rddFromES, bcBaseDate, 2, dateFlg).count()

    //返回
    nowDayActiveUserCnt
  }

  /**
   * 获得指定日的毫秒值
   * @param date
   * @param distanceDay
   */
  def getSpecialDayMills(date: Date, distanceDay: Int) = {
    val calendar: Calendar = Calendar.getInstance()
    calendar.setTime(date)
    calendar.add(Calendar.DATE,distanceDay)
    val nextDayMillis: Long = calendar.getTimeInMillis
    nextDayMillis
  }

  /**
   * 获得基准日以及下一日对应的毫秒值,以及后天对应的毫秒值
   *
   * @param bcBaseDate
   * @param dateFlg 日期标志值
   *
   * 次日 二日 三日 四日 五日 六日 七日
   * --------------------------------
   * 2    3    4    5    6   7    8
   */
  def getBaseDayAndNextDayMillis(bcBaseDate: Broadcast[String], dateFlg: Int) = {

    val baseDateStr = bcBaseDate.value.concat(" 00:00:00")
    val sdf: SimpleDateFormat = new SimpleDateFormat(CommonUtils.getPropertiesValueBykey(CommonData.TIME_PATTERN))
    val date: Date = sdf.parse(baseDateStr)
    val baseDateMillis: Long = date.getTime

    val nextDayMillis: Long = getSpecialDayMills(date, 1)

    //次日开始
    val nextDayStartMillis: Long = getSpecialDayMills(date, dateFlg - 1)
    //次日结束
    val nextDayEndMillis: Long = getSpecialDayMills(date, dateFlg)
    //println(s"开始时间:$date,结束时间:${calender.getTime}")
    (baseDateMillis,nextDayMillis,nextDayStartMillis,nextDayEndMillis)
  }

  /**
   * 获得基准日新增用户的RDD
   * @param sc
   * @param rddFromES
   * @param bcBaseDate
   * @param eventFlg 0->注册用户;1->次日登录用户;2->活跃用户
   * @param dateFlg
   */
  def calBaseDayNewAddRDD(sc: SparkContext, rddFromES: RDD[(String, String, String, String)], bcBaseDate: Broadcast[String], eventFlg: Int, dateFlg: Int) = {

    //1.获得基准日信息,如:2018=02-01
    //2.对RDD中能够满足条件的元素进行分析
    //条件:玩游戏的时间>=基准日信息 and 玩游戏时间<基准日的次日且(事件类型=注册)
    val tupleTime: (Long, Long, Long, Long) = getBaseDayAndNextDayMillis(bcBaseDate, dateFlg)
    //将上述Driver进程中的变量封装到广播变量中,可以节省Executor进程内存空间
    val bcTime: Broadcast[(Long, Long, Long, Long)] = sc.broadcast(tupleTime)
    val nowDayNewAddRDD: RDD[(String, String)] = rddFromES.filter(perEle => {
      //从广播变量获取时间
      val tmpTime = bcTime.value
      val baseDateMillis = tmpTime._1
      val nextDayMillis = tmpTime._2
      val nextDayStartMillis = tmpTime._3
      val nextDayEndMillis = tmpTime._4

      //时间
      val timeStr = perEle._3
      val sdf = new SimpleDateFormat(CommonUtils.getPropertiesValueBykey(CommonData.TIME_PATTERN2))
      val timeMills: Long = sdf.parse(timeStr).getTime

      //事件类型
      val eventType = perEle._2

      //正式进行过滤
      val condition = eventFlg match {
        case 0 => (timeMills >= baseDateMillis && timeMills < nextDayMillis && eventType.equals(EventType.REGISTER.getEventType))
        case 1 => (timeMills >= nextDayStartMillis && timeMills < nextDayEndMillis) && (eventType.equals(EventType.LOGIN.getEventType))
        case 2 => (timeMills >= baseDateMillis && timeMills < nextDayEndMillis) &&
          (eventType.equals(EventType.REGISTER.getEventType) || eventType.equals(EventType.LOGIN.getEventType))
      }
      //判断
      condition
    }).map(_._1)
      .distinct
      .map((_, ""))
    nowDayNewAddRDD
  }

  /**
   * 计算新增用户的RDD
   * @param sc
   * @param rddFromES
   * @param bcBaseDate
   * @param dateFlg
   * @return
   */
  def calNewAddUser(sc: SparkContext, rddFromES: RDD[(String, String, String, String)], bcBaseDate: Broadcast[String], dateFlg: Int) = {
    //获得基准日新增用户的RDD
    val nowDayNewAddUserRDD: RDD[(String, String)] = calBaseDayNewAddRDD(sc, rddFromES, bcBaseDate, 0, dateFlg)

    //返回
    nowDayNewAddUserRDD
  }

  /**
   * 获得SparkContext和Spark的实例
   * @return
   */
  private def getSparkContext ={
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName(this.getClass.getSimpleName)
      .config("es.nodes", "node01,node02,node03")
      .config("port", "9200")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext
    (spark,sc)
  }

  /**
   * 从es分布式集群中读取数据,并对数据进行标准化,清洗掉脏数据,将最终的结果缓存起来
   * @param sc
   * @return
   */
  private def readDataFromES(sc: SparkContext) = {
    val query =
      """
        |{
        |    "query":{
        |     "match_all":  {}
        |    }
        |}
        |""".stripMargin
    //调查:是指索引库中的全部数据,还是一页的十条数据
//    val cnt = sc.esRDD("gamelog", query)
//      .count()
//    println(s"cnt=$cnt")

    //从ES中读取数据,并cache
    import org.elasticsearch.spark._
    val rddFromES: RDD[(String, String, String, String)] = sc.esRDD("gamelog", query)
      .map(perEle => {
        val record = perEle._2

        //用户名
        val username = record.getOrElse("userName", "").asInstanceOf[String]
        //时间类型
        val eventType = record.getOrElse("event_type", "").asInstanceOf[String]
        //玩游戏的时间
        val time = record.getOrElse("time", "").asInstanceOf[String]
        val ip = record.getOrElse("ip", "").asInstanceOf[String]
        (username, eventType, time, ip)
      }) //从es记录中筛选出需要的field
      .filter(perEle => { //将没有ip记录的信息过滤
        val ip = perEle._4
        val regex = """(\d{1,3}\.){3}\d{1,3}""" //若是正则表达式中包含特殊符号,不用转义
        ip.matches(regex)
      }).cache()

    //返回
    rddFromES
  }
}













