package com.atguigu.gmall.realtime.app

import java.util

import com.alibaba.fastjson.{JSON, JSONArray, JSONObject}
import com.atguigu.gmall.realtime.util.{MyKafkaSink, MyKafkaUtil, OffsetManager, RedisUtil}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

object BaseDbApp {


  //0 环境 ssc
  //1  读取偏移量
//  2 接收kafka数据
  //3  提前偏移量结束点
  //4  转换结构 msg->jsonObj
  //5  分流
        //5.1  事实数据-> kafka
        //5.2  维度数据-> Redis
  //6  提交偏移量
  def main(args: Array[String]): Unit = {
    //0 环境 ssc
    val sparkConf: SparkConf = new SparkConf().setAppName("base_db_app").setMaster("local[4]")
    val ssc = new StreamingContext(sparkConf,Seconds(5))

    val topic="ODS_BASE_DB_C"
    val  groupId="base_db_group"

    //1  读取偏移量
    val offsetMap: Map[TopicPartition, Long] = OffsetManager.getOffset(topic,groupId)

    //  2 接收kafka数据
    var inputDstream: InputDStream[ConsumerRecord[String, String]]=null
    if(offsetMap!=null && offsetMap.size>0){
      inputDstream  = MyKafkaUtil.getKafkaStream(topic,ssc,offsetMap ,groupId)
    }else{
      inputDstream  = MyKafkaUtil.getKafkaStream(topic,ssc ,groupId)
    }

    //3  提取偏移量结束点
    var offsetRanges: Array[OffsetRange]=null  //driver
    val inputOffsetDstream: DStream[ConsumerRecord[String, String]] = inputDstream.transform { rdd =>
      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges   // 每批次执行一次
      rdd
    }

    //4  转换结构 msg->jsonObj
    val jsonObjDstream: DStream[JSONObject] = inputOffsetDstream.map { record =>
      val jsonString: String = record.value()
      val jsonObj: JSONObject = JSON.parseObject(jsonString)
      jsonObj
    }


    //5  分流

   // val  dimTables= Array("user_info","base_province","sku_info")
   // val  factTables= Array("order_info","order_detail","payment_info","cart_info","favor_info")
    //5.1  事实数据-> kafka
    //5.2  维度数据-> Redis
     //driver  val jedisClient: Jedis = RedisUtil.getJedisClient   A 不适合
    jsonObjDstream.foreachRDD{rdd=>
      //driver  val jedisClient: Jedis = RedisUtil.getJedisClient  B 不适合

      ////////////////////////////////////实现动态配置mysql redis/////////////////////////
      //把维度表的清单 和事实表的清单保存在redis中
      // type?  set     key?  dim_tables   value ?  table_name...       field/score?  无
      // 写api  ? sadd 读api  smembers   ? 过期时间？无
      val jedisClientDriver: Jedis = RedisUtil.getJedisClient
      val dimTablesKey="DIM_TABLES"
      val dimTablesSet: util.Set[String] = jedisClientDriver.smembers(dimTablesKey)
      println(s"检查维度表：$dimTablesSet")
      val dimTableBC: Broadcast[util.Set[String]] = ssc.sparkContext.broadcast(dimTablesSet)

      val factTablesKey="FACT_TABLES"
      val factTablesSet: util.Set[String] = jedisClientDriver.smembers(factTablesKey)
      println(s"检查事实表：$factTablesSet")
      val factTableBC: Broadcast[util.Set[String]] = ssc.sparkContext.broadcast(factTablesSet)
      jedisClientDriver.close()
      ////////////////////////////////////实现动态配置mysql redis  end /////////////////////////

      rdd.foreachPartition{jsonObjItr=>    //只要有一个批次执行一次 且在executor中执行的代码 就需要用foreachPartition

        val jedisClient: Jedis = RedisUtil.getJedisClient  //executor
        for (jsonObj <- jsonObjItr ) {
          val tableName: String = jsonObj.getString("table")
          val optType: String = jsonObj.getString("type")

          if( factTableBC.value.contains(tableName)){
            //事实表  拆到指定的主题
            // 发到什么主题   主题名：   DWD_[TABLE_NAME]_[I/U/D]
            var opt:String=null
            if(optType.equals("UPDATE")){
              opt="U"
            }else if(optType.equals("INSERT")) {
              opt="I"
            }
            if(opt!=null){
              val topicName=s"DWD_${tableName.toUpperCase}_$opt"
              val dataJsonArr: JSONArray = jsonObj.getJSONArray("data")
              for( i<- 0 to dataJsonArr.size()-1){
                val dataJsonObj: JSONObject = dataJsonArr.getJSONObject(i)
                val key: String = dataJsonObj.getString("id")
                MyKafkaSink.send(topicName,key,dataJsonObj.toJSONString)
                if(tableName.equals("order_detail")){
                  // Thread.sleep(200)
                }
              }
            }
          }
          if(dimTableBC.value.contains(tableName)){
            // redis
            // type ? string    key?  DIM:[table_name]:[id]   value?  dataJson  field/score? 无
            // 写入api?  set  读取api ?  get  过期时间？ 无

            var opt:String=null
            if(optType.equals("UPDATE")){
              opt="U"
            }else if(optType.equals("INSERT")) {
              opt="I"
            }
            val dataJsonArr: JSONArray = jsonObj.getJSONArray("data")
            if(opt!=null) {
              for (i <- 0 to dataJsonArr.size() - 1) {
                val dataJsonObj: JSONObject = dataJsonArr.getJSONObject(i)
                val key: String = dataJsonObj.getString("id")
                val dimKey = s"DIM:${tableName.toUpperCase}:$key"
                jedisClient.set(dimKey, dataJsonObj.toJSONString)
              }
            }

          }
        }
        jedisClient.close()
        MyKafkaSink.flush()
      }


      //6  提交偏移量
      OffsetManager.saveOffset(topic,groupId,offsetRanges)
    }



    ssc.start()
    ssc.awaitTermination()




  }

}
