package com.atguigu.realtime.dwd

import java.util.Properties

import com.atguigu.realtime.BaseAppV2
import com.atguigu.realtime.bean._
import com.atguigu.realtime.util.OffsetManager
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.OffsetRange
import org.json4s.jackson.JsonMethods

import scala.collection.mutable.ListBuffer

/**
 * Author atguigu
 * Date 2020/11/16 15:14
 * 把kafka的维度表数据(ods_...), 写入到 HBase
 *
 * 消费所有的ods所有的维度表数据, 放在一个流中, 根据不同的数据, 写入到hbase不同的表
 *
 */
object DwdDimApp_2 extends BaseAppV2 {
    
    override val master: String = "local[*]"
    override val appName: String = "DwdDimApp"
    override val groupId: String = "DwdDimApp"
    override val topics: Seq[String] = Seq(
        "ods_user_info",
        "ods_sku_info",
        "ods_spu_info",
        "ods_base_category3",
        "ods_base_province",
        "ods_base_trademark")
    
    override val bachTime: Int = 3
    
    def saveToPhoenix[T <: Product](rdd: RDD[(String, String)],
                                    odsTopic: String,
                                    tableName: String,
                                    cols: Seq[String])(implicit mf: scala.reflect.Manifest[T]) = {
        import org.apache.phoenix.spark._
        rdd
            .filter(_._1 == odsTopic)
            .map {
                case (topic, content) =>
                    val f = org.json4s.DefaultFormats
                    JsonMethods.parse(content).extract[T](f, mf)
            }
            .saveToPhoenix(
                tableName,
                cols, // 表的字段名
                zkUrl = Option("hadoop162,hadoop163,hadoop164:2181"))
    }
    
    override def run(ssc: StreamingContext,
                     sourceStream: DStream[(String, String)],
                     offsetRanges: ListBuffer[OffsetRange]): Unit = {
        
        val spark: SparkSession = SparkSession.builder()
            .config(ssc.sparkContext.getConf)
            .getOrCreate()
        import org.apache.phoenix.spark._
        import spark.implicits._
        
        sourceStream
            .foreachRDD((rdd: RDD[(String, String)]) => {
                rdd.cache()
                //不同的topic的数据写入到不同的表中
                
                saveToPhoenix[ProvinceInfo](rdd,
                    "ods_base_province",
                    "gmall_province_info",
                    Seq("ID", "NAME", "AREA_CODE", "ISO_CODE")
                )
                
                saveToPhoenix[UserInfo](rdd,
                    "ods_user_info",
                    "gmall_user_info",
                    Seq("ID", "USER_LEVEL", "BIRTHDAY", "GENDER", "AGE_GROUP", "GENDER_NAME")
                )
                
                saveToPhoenix[SpuInfo](rdd,
                    "ods_spu_info",
                    "gmall_spu_info",
                    Seq("ID", "SPU_NAME"))
                
                saveToPhoenix[BaseCategory3](rdd,
                    "ods_base_category3",
                    "gmall_base_category3",
                    Seq("ID", "NAME", "CATEGORY2_ID"))
                
                saveToPhoenix[BaseTrademark](rdd,
                    "ods_base_trademark",
                    "gmall_base_trademark",
                    Seq("ID", "TM_NAME"))
                
                // sku
                val url = "jdbc:phoenix:hadoop162,hadoop163,hadoop164:2181"
                
                spark.read.jdbc(url, "gmall_spu_info", new Properties()).createOrReplaceTempView("spu")
                spark.read.jdbc(url, "gmall_base_category3", new Properties()).createOrReplaceTempView("category3")
                spark.read.jdbc(url, "gmall_base_trademark", new Properties()).createOrReplaceTempView("tm")
                
                // 制作sku临时表
                rdd
                    .filter(_._1 == "ods_sku_info")
                    .map {
                        case (_, content) =>
                            
                            implicit val f = org.json4s.DefaultFormats
                            JsonMethods.parse(content).extract[SkuInfo]
                    }
                    .toDS()
                    .createOrReplaceTempView("sku")
                spark
                    .sql(
                        """
                          |select
                          |    sku.id as id,
                          |    sku.spu_id spu_id,
                          |    sku.price price,
                          |    sku.sku_name sku_name,
                          |    sku.tm_id  tm_id,
                          |    sku.category3_id  category3_id,
                          |    sku.create_time  create_time,
                          |    category3.name  category3_name,
                          |    spu.spu_name  spu_name,
                          |    tm.tm_name  tm_name
                          |from sku
                          |join spu on sku.spu_id=spu.id
                          |join category3 on sku.category3_id=category3.id
                          |join tm on sku.tm_id=tm.id
                          |""".stripMargin)
                    .as[SkuInfo]
                    .rdd
                    .saveToPhoenix("gmall_sku_info",
                        Seq("ID", "SPU_ID", "PRICE", "SKU_NAME", "TM_ID", "CATEGORY3_ID", "CREATE_TIME", "CATEGORY3_NAME", "SPU_NAME", "TM_NAME"),
                        zkUrl = Option("hadoop162,hadoop163,hadoop164:2181"))
                OffsetManager.saveOffsets(offsetRanges, groupId, topics)
            })
    }
    
    
}
