package com.mjf.gmall.realtime.dwd

import com.alibaba.fastjson.{JSON, JSONObject}
import com.alibaba.fastjson.serializer.SerializeConfig
import com.mjf.gmall.realtime.bean.OrderDetail
import com.mjf.gmall.realtime.util.{MyKafkaSink, MyKafkaUtil, OffsetManager, PhoenixUtil}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * 订单明细: 商品粒度
 *
 * 技术栈：
 *  zookeeper/kafka/redis
 */
object OrderDetailApp {
  def main(args: Array[String]): Unit = {

    // 加载流
    val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("order_info_app")
    val ssc: StreamingContext = new StreamingContext(conf, Seconds(5))

    val topic = "ods_order_detail"
    val groupId = "order_detail_group"

    // 从redis读取offset
    val kafkaOffsetMap: Map[TopicPartition, Long] = OffsetManager.getOffset(topic, groupId)

    // 从Kafka消费数据
    var recordInputStream: InputDStream[ConsumerRecord[String, String]] = null
    if (kafkaOffsetMap == null && kafkaOffsetMap.size >0) {
      // 如果redis中已经有了offset信息
      recordInputStream = MyKafkaUtil.getKafkaStream(topic, ssc, kafkaOffsetMap, groupId)
    } else {
      recordInputStream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)
    }

    //获得偏移结束点
    var offsetRanges: Array[OffsetRange] = Array.empty[OffsetRange]  // 写在算子外面为启动时执行
    val inputGetOffsetDstream: DStream[ConsumerRecord[String, String]] = recordInputStream.transform {
      rdd =>
        // 获得本批次的偏移量的结束位置，用于更新redis中的偏移量
        offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges  // 写在算子里面是周期性执行（driver端）
        rdd
    }

    // 将json字符串 转换为 OrderDetail对象
    val orderDetailDStream: DStream[OrderDetail] = inputGetOffsetDstream.map{
      record =>
        val jsonString: String = record.value()
        val orderDetail: OrderDetail = JSON.parseObject(jsonString, classOf[OrderDetail])

        orderDetail
    }

    // 合并维表数据 品牌、分类、spu
    val orderDetailWithSkuInfoDStream: DStream[OrderDetail] = orderDetailDStream.mapPartitions {
      orderDetailIter =>
        val orderDetailList: List[OrderDetail] = orderDetailIter.toList
        if (orderDetailList.size > 0) {
          val skuId: String = orderDetailList.map(_.sku_id).mkString("','")
          val sql: String = "select ID,SPU_ID,TM_ID,CATEGORY3_ID,SPU_NAME,TM_NAME,CATEGORY3_NAME from GMALL0105_SKU_INFO where ID in ('" + skuId + "')"
          val skuInfoList: List[JSONObject] = PhoenixUtil.queryList(sql)
          val skuInfoMap: Map[String, JSONObject] = skuInfoList.map(skuInfo => (skuInfo.getString("ID"), skuInfo)).toMap
          for (orderDetail <- orderDetailList) {
            val skuInfoObj: JSONObject = skuInfoMap.getOrElse(orderDetail.sku_id.toString, null)
            if (skuInfoObj != null) {
              orderDetail.spu_id = skuInfoObj.getLongValue("SPU_ID")
              orderDetail.tm_id = skuInfoObj.getLongValue("TM_ID")
              orderDetail.category3_id = skuInfoObj.getLongValue("CATEGORY3_ID")
              orderDetail.spu_name = skuInfoObj.getString("SPU_NAME")
              orderDetail.tm_name = skuInfoObj.getString("TM_NAME")
              orderDetail.category3_name = skuInfoObj.getString("CATEGORY3_NAME")
            }
          }
        }

        orderDetailList.toIterator
    }

    // 将数据推回到Kafka
    orderDetailWithSkuInfoDStream.foreachRDD{
      rdd =>
        rdd.foreach{
          orderDetail =>
            // new SerializeConfig(true) 解决fastJson和Scala的兼容性问题
            val orderDetailJson: String = JSON.toJSONString(orderDetail, new SerializeConfig(true))
            // 用订单号分区，防止join时发生shuffle
            MyKafkaSink.send("dwd_order_detail", orderDetail.order_id.toString, orderDetailJson)
        }
    }

    orderDetailWithSkuInfoDStream.print(1000)

    ssc.start()
    ssc.awaitTermination()

  }
}
