package com.atguigu.realtime.dws

import com.atguigu.realtime.BaseAppV3
import com.atguigu.realtime.bean.{OrderDetail, OrderInfo, OrderWide}
import com.atguigu.realtime.util.{MyKafkaUtil, MyRedisUtil, OffsetManager}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.Decimal
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.OffsetRange
import org.json4s.jackson.{JsonMethods, Serialization}
import redis.clients.jedis.Jedis

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer

/**
 * Author atguigu
 * Date 2020/11/20 9:09
 */
object DwsOrderWideApp_3 extends BaseAppV3 {
    override val master: String = "local[2]"
    override val appName: String = "DwsOrderWideApp_3"
    override val groupId: String = "DwsOrderWideApp_32"
    override val topics: Seq[String] = Seq("dwd_order_info", "dwd_order_detail")
    override val bachTime: Int = 5
    
    private val orderInfoPre = "order_info"
    private val orderDetailPre = "order_detail"
    
    /**
     * 缓存OrderDetail
     *
     * @param client
     * @param orderDetail
     * @return
     */
    def cacheOrderDetail(client: Jedis, orderDetail: OrderDetail) = {
        implicit val f = org.json4s.DefaultFormats
        val key = s"${orderDetailPre}:${orderDetail.order_id}"
        val field = orderDetail.id.toString
        val value = Serialization.write(orderDetail)
        client.hset(key, field, value)
    }
    
    /**
     * 缓村OrderInfo信息
     *
     * @param client
     * @param orderInfo
     * @return
     */
    def cacheOrderInfo(client: Jedis, orderInfo: OrderInfo) = {
        implicit val f = org.json4s.DefaultFormats
        val key = s"${orderInfoPre}:${orderInfo.id}"
        val value = Serialization.write(orderInfo)
        client.setex(key, 10 * 60, value)
    }
    
    override def run(ssc: StreamingContext,
                     sourceStreams: Map[String, DStream[ConsumerRecord[String, String]]],
                     offsetRanges: Map[String, ListBuffer[OffsetRange]]): Unit = {
        val spark: SparkSession = SparkSession.builder()
            .config(ssc.sparkContext.getConf)
            .getOrCreate()
        
        
        val orderInfoStream = sourceStreams("dwd_order_info")
            .map(record => {
                implicit val f = org.json4s.DefaultFormats + toLong + toDouble
                val orderInfo: OrderInfo = JsonMethods.parse(record.value()).extract[OrderInfo]
                (orderInfo.id, orderInfo)
            })
        
        val orderDetailStream = sourceStreams("dwd_order_detail")
            .map(record => {
                implicit val f = org.json4s.DefaultFormats + toLong + toDouble
                val orderDetail: OrderDetail = JsonMethods.parse(record.value()).extract[OrderDetail]
                (orderDetail.order_id, orderDetail)
            })
        val orderWideDetailStream = orderInfoStream
            .fullOuterJoin(orderDetailStream)
            .mapPartitions(it => {
                implicit val f = org.json4s.DefaultFormats
                val client: Jedis = MyRedisUtil.getClient
                
                val result = it.flatMap {
                    case (orderId, (Some(orderInfo), opt)) =>
                        println("some some")
                        // 1. 把orderInfo存入缓存
                        cacheOrderInfo(client, orderInfo)
                        // 3. 读对方缓存
                        val key = s"${orderDetailPre}:${orderId}"
                        val r = client.hgetAll(key).asScala
                            .map {
                                case (orderDetailId, orderDetailStr) =>
                                    val orderDetail = JsonMethods.parse(orderDetailStr).extract[OrderDetail]
                                    new OrderWide(orderInfo, orderDetail)
                            }
                            .toList
                        client.del(key)
                        
                        if (opt.isDefined) {
                            new OrderWide(orderInfo, opt.get) :: r
                        } else {
                            r
                        }
                    
                    case (orderId, (None, Some(orderDetail))) =>
                        println("none some")
                        // 1. 去order_info缓存查找
                        val orderInfoStr: String = client.get(s"${orderInfoPre}:${orderId}")
                        if (orderInfoStr != null) { // 2. 如果找到join
                            val orderInfo: OrderInfo = JsonMethods.parse(orderInfoStr).extract[OrderInfo]
                            new OrderWide(orderInfo, orderDetail) :: Nil
                        } else { // 3. 如果没找到, 把自己存入缓存
                            cacheOrderDetail(client, orderDetail)
                            Nil
                        }
                }
                
                client.close()
                result
            })
        
        
        val clickHouseUrl = "jdbc:clickhouse://hadoop162:8123/gmall"
        orderWideDetailStream
            .mapPartitions((it: Iterator[OrderWide]) => { // // 计算分摊金额
                val client: Jedis = MyRedisUtil.getClient
                val r = it.map(orderWide => {
                    // 保存前面所有详情的总金额 key
                    val preTotalKey = s"pre_total:${orderWide.order_id}"
                    // 保存前面所有详情的总分担 key
                    val preShareKey = s"pre_share:${orderWide.order_id}"
                    
                    // 1. 获取前面的详情的总金额  如果是该单的第一个详情, 则是空
                    val preTotalTemp: String = client.get(preTotalKey)
                    val preTotal: Double = if (preTotalTemp != null) preTotalTemp.toDouble else 0D
                    
                    // 2. 获取前面的详情的总分摊
                    val preShareTemp: String = client.get(preShareKey)
                    val preShare: Double = if (preShareTemp != null) preShareTemp.toDouble else 0D
                    
                    // 3. 判断是否为最后一单
                    val current = orderWide.sku_num * orderWide.sku_price
                    if (current == orderWide.original_total_amount - preTotal) { // 是最后一单
                        val share = Decimal(orderWide.final_total_amount) - Decimal(preShare)
                        orderWide.final_detail_amount = share.toDouble
                        // 删除相关的key
                        client.del(preTotalKey)
                        client.del(preShareKey)
                        println("最后一单...")
                    } else {
                        val shareTemp = current * orderWide.final_total_amount / orderWide.original_total_amount
                        val share = math.round(shareTemp * 100).toDouble / 100 // 10.888  => 10.888*100 => 1088.8 => 1089 => 1089/100 => 10.89
                        orderWide.final_detail_amount = share
                        // redis 保存总的分摊, 必须使用redis提供的自增功能
                        client.incrByFloat(preShareKey, share)
                        // 前面详情的原始总金额
                        client.incrByFloat(preTotalKey, current)
                        println("不是最后一单...")
                    }
                    orderWide
                })
                client.close()
                r
            })
            .foreachRDD(rdd => { // 把数据写入到clickhouse中
                // 使用spark-sql把数据写入到clickhouse
                /*rdd
                    .toDS
                    .write
                    .option("batchsize", "100")
                    .option("isolationLevel", "NONE") // 设置没有事务
                    .option("numPartitions", "2") // 设置并发
                    .mode("append")  //
                    .jdbc(clickHouseUrl, "order_wide", new Properties())*/
                
                // 把dws数据, 写入到dws层(kafka)
                rdd.foreachPartition(it => {
                    val producer: KafkaProducer[String, String] = MyKafkaUtil.getProducer
                    it.foreach(orderWide => {
                        implicit val f = org.json4s.DefaultFormats
                        producer.send(new ProducerRecord[String, String]("dws_order_wide", Serialization.write(orderWide)))
                    })
                    producer.close()
                })
                
                OffsetManager.saveOffsets(offsetRanges.values.reduce(_ ++ _), groupId, topics)
            })
    }
}

/*
把json字符串转成java对象
    解析
    
    val obd: 样例类= JsonMethods.parse(s).extract[样例类]
把java对象变成json字符串
    序列化
    
    val jsonString  = Serialization.write(java对象)  // {}
    
                // [ ]
    
    
    




order_info缓存类型:
    容易存, 容易读
    key                             value
    "order_info:"+order_id          OrderInfo的json字符串格式  {"": "", "": ""}
 
 
order_detail缓存类型:
    容易存, 容易读
    
    key                                                  value
    //"order_detail:"+order_id+order_detail_id             OrderDetail的json字符串格式  {"": "", "": ""}
    
    "order_detail:"+order_id                             value(hash)
                                                         field                  value
                                                         order_detail_id        OrderDetail的json字符串格式  {"": "", "": ""}
                                                         
                                                         
                                                         
                                                      

 */