package com.mjf.gmall.realtime.dws

import java.util

import com.alibaba.fastjson.JSON
import com.alibaba.fastjson.serializer.SerializeConfig
import com.mjf.gmall.realtime.bean.{OrderDetail, OrderDetailWide, OrderInfo}
import com.mjf.gmall.realtime.util.{MyKafkaUtil, OffsetManager, RedisUtil}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

import scala.collection.mutable.ListBuffer

/**
 * orderInfo 与 orderDetail join 生成宽表(使用缓存实现)
 *
 * 技术栈：
 *  zookeeper/kafka/redis
 *
 * -双流join问题：
 *  无法保证应当配对的主表和从表数据都在一个批次中，join有可能丢失数据
 *
 * -解决方案：
 *  1.使用缓存
 *  2.使用滑动窗口+数据去重（适用于短延迟，延迟越大数据膨胀越大）
 */
object OrderInfoDetailWideCacheApp {
  def main(args: Array[String]): Unit = {
    // 加载流
    val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("order_wide_app")
    val ssc: StreamingContext = new StreamingContext(conf, Seconds(5))

    val topicOrderInfo = "dwd_order_info"
    val groupIdOrderInfo = "dws_order_info_group"

    val topicOrderDetail = "dwd_order_detail"
    val groupIdOrderDetail = "dws_order_detail_group"

    // 从redis读取offset - orderInfo
    val orderInfoKafkaOffsetMap: Map[TopicPartition, Long] = OffsetManager.getOffset(topicOrderInfo, groupIdOrderInfo)

    // 从redis读取offset - orderDetail
    val orderDetailKafkaOffsetMap: Map[TopicPartition, Long] = OffsetManager.getOffset(topicOrderDetail, groupIdOrderDetail)


    // 从Kafka消费数据 - orderInfo
    var orderInfoRecordInputStream: InputDStream[ConsumerRecord[String, String]] = null
    if (orderInfoKafkaOffsetMap == null && orderInfoKafkaOffsetMap.size >0) {
      // 如果redis中已经有了offset信息
      orderInfoRecordInputStream = MyKafkaUtil.getKafkaStream(topicOrderInfo, ssc, orderInfoKafkaOffsetMap, groupIdOrderInfo)
    } else {
      orderInfoRecordInputStream = MyKafkaUtil.getKafkaStream(topicOrderInfo, ssc, groupIdOrderInfo)
    }

    // 从Kafka消费数据 - orderDetail
    var orderDetailRecordInputStream: InputDStream[ConsumerRecord[String, String]] = null
    if (orderDetailKafkaOffsetMap == null && orderDetailKafkaOffsetMap.size >0) {
      // 如果redis中已经有了offset信息
      orderDetailRecordInputStream = MyKafkaUtil.getKafkaStream(topicOrderDetail, ssc, orderInfoKafkaOffsetMap, groupIdOrderDetail)
    } else {
      orderDetailRecordInputStream = MyKafkaUtil.getKafkaStream(topicOrderDetail, ssc, groupIdOrderDetail)
    }


    // 获得偏移结束点 - orderInfo
    var orderInfoOffsetRanges: Array[OffsetRange] = Array.empty[OffsetRange]  // 写在算子外面为启动时执行
    val orderInfoOffsetDstream: DStream[ConsumerRecord[String, String]] = orderInfoRecordInputStream.transform {
      rdd =>
        // 获得本批次的偏移量的结束位置，用于更新redis中的偏移量
        orderInfoOffsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges  // 写在算子里面是周期性执行（driver端）
        rdd
    }

    // 获得偏移结束点 - orderDetail
    var orderDetailOffsetRanges: Array[OffsetRange] = Array.empty[OffsetRange]  // 写在算子外面为启动时执行
    val orderDetailOffsetDstream: DStream[ConsumerRecord[String, String]] = orderDetailRecordInputStream.transform {
      rdd =>
        // 获得本批次的偏移量的结束位置，用于更新redis中的偏移量
        orderDetailOffsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges  // 写在算子里面是周期性执行（driver端）
        rdd
    }


    // 将json字符串 转换为 OrderInfo 对象
    val orderInfoDStream: DStream[OrderInfo] = orderInfoOffsetDstream.map{
      record =>
        val jsonString: String = record.value()
        val orderInfo: OrderInfo = JSON.parseObject(jsonString, classOf[OrderInfo])

        orderInfo
    }

    // 将json字符串 转换为 OrderDetail 对象
    val orderDetailDStream: DStream[OrderDetail] = orderDetailOffsetDstream.map{
      record =>
        val jsonString: String = record.value()
        val orderDetail: OrderDetail = JSON.parseObject(jsonString, classOf[OrderDetail])

        orderDetail
    }


    orderInfoDStream.print(1000)
    orderDetailDStream.print(1000)

    // 转换成kv结构
    val orderInfoWithKeyDStream: DStream[(Long, OrderInfo)] = orderInfoDStream.map(orderInfo => (orderInfo.id, orderInfo))
    val orderDetailWithKeyDStream: DStream[(Long, OrderDetail)] = orderDetailDStream.map(orderDetail => (orderDetail.order_id, orderDetail))

    // 无法保证应当配对的主表和从表数据都在一个批次中，join有可能丢失数据
//    val orderJoinedDStream: DStream[(Long, (OrderInfo, OrderDetail))] = orderInfoWithKeyDStream.join(orderDetailWithKeyDStream)
//    orderJoinedDStream.print(1000)


    // 使用缓存进行双流join
    val orderFullJoinedDStream: DStream[(Long, (Option[OrderInfo], Option[OrderDetail]))] = orderInfoWithKeyDStream.fullOuterJoin(orderDetailWithKeyDStream)

    val orderDetailWideDStream: DStream[OrderDetailWide] = orderFullJoinedDStream.flatMap {
      case (orderId, (orderInfoOption, orderDetailOption)) =>
        val orderDetailWideList: ListBuffer[OrderDetailWide] = new ListBuffer[OrderDetailWide]
        val jedis: Jedis = RedisUtil.getJedisClient
        // 1.主表不是空
        if (orderInfoOption != None) {
          val orderInfo: OrderInfo = orderInfoOption.get
          if (orderDetailOption != None) {
            val orderDetail: OrderDetail = orderDetailOption.get
            // 1.1在同一批次能够关联，两个对象组合成 OrderDetailWide 对象
            val orderDetailWide: OrderDetailWide = new OrderDetailWide(orderInfo, orderDetail)
            orderDetailWideList.append(orderDetailWide)
          }

          // 1.2转换成JSON写入redis缓存
          val orderInfoJsonStr: String = JSON.toJSONString(orderInfo, new SerializeConfig(true))
          // 为什么不用hash来处理？ (1)hash过期时间只能针对整个hash来设置，不能针对某个kv (2)单个key过大效率低，不容易进行分布式
          // 写入redis   type : string  key : order_info:orderId  value : orderInfoJsonStr  ex : 600s(过期时间)
          jedis.setex("order_info:" + orderId, 600, orderInfoJsonStr)

          // 1.3查询缓存中是否有对应的orderDetail
          val orderDetailJsonSet: util.Set[String] = jedis.smembers("order_detail:" + orderId)
          if (orderDetailJsonSet.size() > 0) {
            import scala.collection.JavaConversions._ // 不能直接使用Scala的集合迭代方式迭代Java集合，需要引入隐式转换
            for (orderDetailJsonStr <- orderDetailJsonSet) {
              val orderDetail: OrderDetail = JSON.parseObject(orderDetailJsonStr, classOf[OrderDetail])
              val orderDetailWide: OrderDetailWide = new OrderDetailWide(orderInfo, orderDetail)
              orderDetailWideList.append(orderDetailWide)
            }
          }

        } else { // 2.主表为空，从表不是空
          val orderDetail: OrderDetail = orderDetailOption.get
          // 2.1转换成JSON写入redis缓存
          val orderDetailJsonStr: String = JSON.toJSONString(orderDetail, new SerializeConfig(true))
          // 因为主表和从表的关系是一对多，所以使用set方便一次查到所有对应值
          // 写入redis   type : set  key : order_detail:orderId  value : orderDetailJsonStr  ex : 600s(过期时间)
          jedis.sadd("order_detail:" + orderId, orderDetailJsonStr)
          jedis.expire("order_detail:" + orderId, 600) // 设置过期时间

          // 2.2从表查询缓存中的主表信息
          val orderInfoJsonStr: String = jedis.get("order_info:" + orderId)
          if (orderInfoJsonStr.length > 0) {
            val orderInfo: OrderInfo = JSON.parseObject(orderInfoJsonStr, classOf[OrderInfo])
            val orderDetailWide: OrderDetailWide = new OrderDetailWide(orderInfo, orderDetail)
            orderDetailWideList.append(orderDetailWide)
          }

        }
        jedis.close()
        orderDetailWideList
    }

    orderDetailWideDStream.print(1000)


    ssc.start()
    ssc.awaitTermination()


  }
}
