package com.atguigu.realtime.ods

import com.atguigu.realtime.BaseApp
import com.atguigu.realtime.util.{MyKafkaUtil, OffsetManager}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.OffsetRange
import org.json4s.JValue
import org.json4s.jackson.{JsonMethods, Serialization}

import scala.collection.mutable.ListBuffer

/**
 * Author atguigu
 * Date 2020/11/16 9:23
 *
 * canal写到kafka的数据, 解析, 分流, 写到kafka中
 */
object BaseDBMaxwellApp extends BaseApp {
    override val master: String = "local[2]"
    override val appName: String = "BaseDBMaxwellApp"
    override val groupId: String = "BaseDBMaxwellApp"
    override val topic: String = "maxwell_gmall_db"
    override val bachTime: Int = 3
    
    val tableNames = List(
        "order_info",
        "order_detail",
        "user_info",
        "base_province",
        "base_category3",
        "sku_info",
        "spu_info",
        "base_trademark")
    
    
    def run(ssc: StreamingContext,
            sourceStream: DStream[String],
            offsetRanges: ListBuffer[OffsetRange]) = {
        // 分流
        sourceStream
            .map(str => {
                implicit val f = org.json4s.DefaultFormats
                val j: JValue = JsonMethods.parse(str)
                val data: JValue = j \ "data"
                val tableName: String = (j \ "table").extract[String]
                val operate: String = (j \ "type").extract[String]
                //                data.children.map(child => (tableName, operate, JsonMethods.compact(JsonMethods.render(child))))
                (tableName, operate.toLowerCase(), Serialization.write(data))
            })
            .filter {
                case (tableName, operate, data) =>
                    // 主要满足要求的表, 和非删除的数据, 和内容不能是0
                    tableNames.contains(tableName) && operate != "delete" && data.length > 2
            }
            .foreachRDD(rdd => {
                // 写入到ODS层(kafka)
                rdd.foreachPartition((it: Iterator[(String, String, String)]) => {
                    // 先获取一个Kafka的生产者
                    val producer: KafkaProducer[String, String] = MyKafkaUtil.getProducer
                    // 写入
                    it.foreach {
                        case (tableName, operate, data) =>
                            val topic = s"ods_$tableName"
                            // order_info这个表: 只写入insert的数据
                            if (tableName != "order_info") {
                                producer.send(new ProducerRecord[String, String](topic, data))
                            } else if (operate == "insert") {
                                producer.send(new ProducerRecord[String, String](topic, data))
                            }
                    }
                    // 关闭生产者
                    producer.close()
                })
                OffsetManager.saveOffsets(offsetRanges, groupId, topic)
            })
    }
}
