package com.atguigu.sparkstreaming.apps

import java.time.LocalDate

import com.alibaba.fastjson.JSON
import com.atguigu.realtime.constants.{DBNameConstant, PrefixConstant, TopicConstant}
import com.atguigu.realtime.utils.{PropertiesUtil, RedisUtil}
import com.atguigu.sparkstreaming.beans.{ActionLog, CouponAlertInfo, StartLog}
import com.atguigu.sparkstreaming.utils.{DStreamUtil, DateParseUtil}
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}
import redis.clients.jedis.Jedis

import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.control.Breaks

/**
 *
 *   不是累加的聚合运算，一般都支持输出幂等。
 *
 *   at least once + 幂等输出 保证精确一次。
 *
 *   采取ES进行存储的话，在SparkConf需要添加一些ES的参数。
 *
 *   ------------------------------
 *
 *
 *
 *
 *
 */
object AlertApp extends BaseApp {
  override var groupId: String = "realtime220309"
  override var topic: String = TopicConstant.ACTION_LOG
  override var appName: String = "AlertApp"
  override var batchDuration: Int = 10


  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName(appName)

    //加入ES的集群地址，端口，其他设置
    sparkConf.set("es.nodes",PropertiesUtil.getProperty("es.nodes"))
    sparkConf.set("es.port",PropertiesUtil.getProperty("es.port"))
    //运行自动创建index
    sparkConf.set("es.index.auto.create", "true")
    //允许将主机名转换为ip
    sparkConf.set("es.nodes.wan.only", "true")

    //重写context
    context = new StreamingContext(sparkConf,Seconds(batchDuration))

    runApp{

      val ds: InputDStream[ConsumerRecord[String, String]] = DStreamUtil.createDStream(groupId, context, topic)

      //当前批次的偏移量
      var ranges: Array[OffsetRange] = null

      val ds1: DStream[ActionLog] = ds.transform(rdd => {

        //获取当前消费到的这个批次偏移量
        ranges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        //样例类转换
        rdd.map(record => JSON.parseObject(record.value(), classOf[ActionLog]))

      })

      //一般情况提交的间隔和窗口是一样的。 为了预警效果。设置提交时间为30s，重复计算。
      val ds2: DStream[ActionLog] = ds1.window(Minutes(5), Seconds(30))

      val ds3: DStream[((String, String), Iterable[ActionLog])] = ds2.map(actionLog => ((actionLog.mid, actionLog.uid), actionLog))
        .groupByKey()

      //过滤出所有增加了收货地址的用户
      val ds4: DStream[((String, String), Iterable[ActionLog])] = ds3.filter {
        case ((mid, uid), logs) => {

          //是否有嫌疑
          var flag = false

          Breaks.breakable {
            logs.foreach(log => {

              if ("trade_add_address".equals(log.action_id)) {

                flag = true

                //没有必要继续判断他的其他actionLog
                Breaks.break()

              }

            })
          }

          flag


        }
      }

      /*
          (String, Iterable[Iterable[ActionLog]])

          (mid1, [
                    [
                      user1log1,user1log2,user1log3
                    ],
                    [
                      user2log1,user2log2,user2log3
                    ],
                     ......
                ])
       */
      val ds5: DStream[(String, Iterable[Iterable[ActionLog]])] = ds4.map {
        case ((mid, uid), logs) => (mid, logs)
      }.groupByKey()

      //过滤到 过去5分钟，设备上登录的增加了收货地址的用户个数 超过2的设备
      val ds6: DStream[(String, Iterable[Iterable[ActionLog]])] = ds5.filter(_._2.size >= 2)

      val ds7: DStream[(String, Iterable[ActionLog])] = ds6.mapValues(_.flatten)

      val ds8: DStream[CouponAlertInfo] = ds7.map {
        case (mid, logs) => {

          val uids: mutable.Set[String] = new mutable.HashSet[String]
          val itemIds: mutable.Set[String] = new mutable.HashSet[String]
          val events: ListBuffer[String] = new ListBuffer

          logs.foreach(log => {

            uids.add(log.uid)
            events.append(log.action_id)
            if ("favor_add".equals(log.action_id)) {

              itemIds.add(log.item)

            }
          })

          //ts
          val ts: Long = System.currentTimeMillis()

          /*
              id: 体现mid
                      mid_2022-07-20 15：37：20， log1
                      mid_2022-07-20 15：37：21， log2

                  mid_2022-07-20 15：37

               同一设备，如果一分钟产生多条预警，只保留最后一条预警日志

             -----------------------------
                mid_2022-07-20 23：59：10， log1
                    index2022-07-20

                mid_2022-07-20 23：59：50， log1
                    index2022-07-21


           */
          CouponAlertInfo(mid + "_" + DateParseUtil.parseMillTsToDateTime2(ts), uids, itemIds, events, ts)
        }
      }

      //写入ES
      //提供的那些静态方法导入
      import org.elasticsearch.spark._

      ds8.foreachRDD( rdd => {

        println("即将写入:"+rdd.count())

        /*
          resource: String: 写入的Index
           cfg: scala.collection.Map[String, String]: 其他的配置
                必须要配置:  es.mapping.id -> 要写入ES的那个RDD中封装的类型的哪个属性作为 _id

                每天一个index

            --------------------
            数据漂移问题:  不属于这一天的数据写入了这一天的集合中!

              mid1_2022-07-20 23:59 写入到了  DBNameConstant.ALERTINDEX2022-07-21

              不用解决。对ES没有任何影响。

            ------------------
            从根本上解决漂移问题:
                  不用 saveToEs，而是自己写代码写入ES。额外学习 ES 提供的 JavaAPI |  RestAPI

                  rdd.foreachPartition(partition => {
                      //创建到ES的连接

                      // partition.foreach( data => JestClient.execute(Action (data)))

                      //关闭连接

                  }

                  )


         */
        rdd.saveToEs(DBNameConstant.ALERTINDEX + LocalDate.now(),Map( "es.mapping.id" -> "id"))

        //提交偏移量
        ds.asInstanceOf[CanCommitOffsets].commitAsync(ranges)


      })

    }


  }
}
