package com.zlm.realtime.dws

import com.alibaba.fastjson.JSON
import com.alibaba.fastjson.serializer.SerializeConfig
import com.zlm.realtime.bean.{OrderDetail, OrderInfo, OrderWide}
import com.zlm.realtime.utils.{MyESUtils, MyKafkaSinkUtils, MyKafkaUtils, MyOffsetUtils, MyPropertiesUtils, MyRedisUtils}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

import java.lang
import java.text.SimpleDateFormat
import java.util.{Date, Properties}
import scala.collection.mutable.ListBuffer

/**
 * Author: Harbour 
 * Date: 2021-04-19 14:30
 * Desc: 将订单和订单明细合并
 */
object OrderWideApp {
    val orderInfoGroupId = "dws_order_info_group"
    val orderInfoTopic = "dwd_order_info"
    val orderDetailGroupId = "dws_order_detail_group"
    val orderDetailTopic = "dwd_order_detail"

    def main(args: Array[String]): Unit = {
        val conf: SparkConf = new SparkConf().setAppName("OrderWideApp").setMaster("local[*]")
        val ssc = new StreamingContext(conf, Seconds(5))

        // step 1. 获取kafka流 转换Json对象
        val orderInfoKafkaInputStream: InputDStream[ConsumerRecord[String, String]] =
            MyKafkaUtils.getKafkaStream(orderInfoTopic, ssc, orderInfoGroupId)

        var orderInfoOffsetRanges: Array[OffsetRange] = Array.empty[OffsetRange]
        val orderInfoStream: DStream[(Long, OrderInfo)] = orderInfoKafkaInputStream.transform(
            (rdd: RDD[ConsumerRecord[String, String]]) => {
                orderInfoOffsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
                rdd
            }
        ).map(
            (record: ConsumerRecord[String, String]) => {
                val orderInfo: OrderInfo = JSON.parseObject(record.value(), classOf[OrderInfo])
                (orderInfo.id, orderInfo)
            }
        )


        val orderDetailKafkaInputStream: InputDStream[ConsumerRecord[String, String]] =
            MyKafkaUtils.getKafkaStream(orderDetailTopic, ssc, orderDetailGroupId)

        var orderDetailOffsetRanges: Array[OffsetRange] = Array.empty[OffsetRange]
        val orderDetailStream: DStream[(Long, OrderDetail)] = orderDetailKafkaInputStream.transform(
            (rdd: RDD[ConsumerRecord[String, String]]) => {
                orderDetailOffsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
                rdd
            }
        ).map(
            (record: ConsumerRecord[String, String]) => {
                val orderDetail: OrderDetail = JSON.parseObject(record.value(), classOf[OrderDetail])
                (orderDetail.order_id, orderDetail)
            }
        )

        // step 2. 关联起来
        val orderInfoWindowStream: DStream[(Long, OrderInfo)] = orderInfoStream.window(Seconds(50), Seconds(5))
        val orderDetailWindowStream: DStream[(Long, OrderDetail)] = orderDetailStream.window(Seconds(50), Seconds(5))
        val joinedDStream: DStream[(Long, (OrderInfo, OrderDetail))] = orderInfoWindowStream.join(orderDetailWindowStream, 4)

        // step 3. 处理合并后的数据流
        val joinedOrderWideDStream: DStream[OrderWide] = joinedDStream.mapPartitions(
            (orderInfoIter: Iterator[(Long, (OrderInfo, OrderDetail))]) => {

                val orderInfoList: List[(Long, (OrderInfo, OrderDetail))] = orderInfoIter.toList

                // 使用redis去重
                val orderWideList: ListBuffer[OrderWide] = ListBuffer[OrderWide]()
                val jedisClient: Jedis = MyRedisUtils.getJedisClient
                for ((id, (orderInfo, orderDetail)) <- orderInfoList) {

                    val key: String = "join_orderId:" + id
                    val isExist: lang.Long = jedisClient.sadd(key, orderDetail.id.toString)
                    jedisClient.expire(key, 60 * 10)
                    if (isExist == 1L) {
                        orderWideList.append(new OrderWide(orderInfo, orderDetail))
                    }
                }
                jedisClient.close()

                orderWideList.toIterator
            }
        )

        val orderWideDStream: DStream[OrderWide] = joinedOrderWideDStream.mapPartitions(
            // step 4. 实现实付分摊功能
            (orderWideIter: Iterator[OrderWide]) => {
                val client: Jedis = MyRedisUtils.getJedisClient

                /**
                 * final_total_amount 实付总金额 （原始总金额 + 运费 - 优惠金额）
                 * original_total_amount 原始总金额
                 * final_detail_amount 分摊金额（最终目标）
                 *
                 * 实付总金额 / 实付分摊金额 = 原始总金额 / （单价 * 数量）
                 * 实付分摊金额 = 实付总金额 * 单价 * 数量 / 原始总金额
                 *
                 * 由于除法可能除不尽 如 100/3 = 33.3
                 * 所以最后一次计算要用减法 100 - 33.3 - 33.3 = 33.4
                 *
                 * redis ： 之前所有商品的原始总金额      订单id +
                 * （用于判断当前商品是不是此订单中的最后一单）
                 * redis :  之前分摊的总金额              订单id + split_amount
                 * （用于计算最后一个商品的实付金额）
                 */
                val orderWideList: List[OrderWide] = orderWideIter.toList
                // step 4.1 计算每个订单的分摊金额，和总金额，存到redis里
                for (o <- orderWideList) {
                    val remainKey: String = "org_amount:" + o.order_id
                    val splitKey: String = "fin_amount:" + o.order_id

                    // step 4.2 从redis中取之前累加的总原始金额 与 之前累加的总实际金额 并转换
                    val redis_org_amount_str: String = client.get(remainKey)
                    val redis_fin_amount_str: String = client.get(splitKey)

                    val redis_org_amount: Double = if (redis_org_amount_str != null && redis_org_amount_str.nonEmpty) {
                        redis_org_amount_str.toDouble
                    } else {
                        0D
                    }
                    val redis_fin_amount: Double = if (redis_fin_amount_str != null && redis_fin_amount_str.nonEmpty) {
                        redis_fin_amount_str.toDouble
                    } else {
                        0D
                    }

                    // step 4.3 计算当前订单的总金额 与 当前订单的分摊金额
                    val cur_org_total_amount: Double = o.sku_num * o.sku_price

                    // 如果redis的原始总金额 + 当前订单的总金额 = 订单总金额
                    if (o.original_total_amount - cur_org_total_amount == redis_org_amount) {
                        // 使用减法，计算当前订单的最后一个订单详情的分摊金额
                        o.final_detail_amount = Math.round((o.final_total_amount - redis_fin_amount)*100D) / 100D
                    } else {
                        // 使用除法计算
                        o.final_detail_amount = Math.round((o.final_total_amount * cur_org_total_amount)  / o.original_total_amount * 100D) / 100D
                        // 计算完成后更新redis缓存
                        client.setex(remainKey, 60 * 10, (redis_org_amount + cur_org_total_amount).toString)
                        client.setex(splitKey, 60 * 10, (redis_fin_amount + o.final_detail_amount).toString)
                    }

                    println(o)
                }
                client.close()
                orderWideList.toIterator
            }
        )

        val sparkSession: SparkSession = SparkSession.builder().appName("OrderWideApp").getOrCreate()

        orderWideDStream.foreachRDD(
            (rdd: RDD[OrderWide]) => {

                rdd.cache()

                // step 5. 将合并后的数据发送到 kafka
                rdd.foreachPartition(
                    (orderWideIter: Iterator[OrderWide]) => {

                        // 顺便往ES插入一份
                        val dt: String = new SimpleDateFormat("yyyy-MM-dd").format(new Date())
                        val orderWideList: List[OrderWide] = orderWideIter.toList
                        MyESUtils.bulkInsert(orderWideList, "mall_order_wide" + dt)

                        for (orderWide <- orderWideList) {
                            MyKafkaSinkUtils.send(
                                topic = "dws_order_wide",
                                JSON.toJSONString(orderWide, new SerializeConfig(true))
                            )

                        }
                    }
                )

                // step 6. 将合并后的数据存储到 clickHouse
                import sparkSession.implicits._
                rdd.toDF().write.mode(SaveMode.Append)
                  .option("batchSize", "100")
                  .option("isolationLevel", "NONE")
                  .option("numPartitions", "4")
                  .option("driver", MyPropertiesUtils.getProperty("clickhouse.driver"))
                  .jdbc(MyPropertiesUtils.getProperty("clickhouse.url"),"order_wide", new Properties())

                MyOffsetUtils.saveOffsetToRedis(orderDetailTopic, orderDetailGroupId, orderDetailOffsetRanges)
                MyOffsetUtils.saveOffsetToRedis(orderInfoTopic, orderInfoGroupId, orderInfoOffsetRanges)
            }
        )

        ssc.start()
        ssc.awaitTermination()
    }
}
