package com.atguigu.realtime.dwd

import com.atguigu.realtime.BaseApp
import com.atguigu.realtime.bean.{OrderInfo, ProvinceInfo, UserInfo, UserStatus}
import com.atguigu.realtime.util.{MyKafkaUtil, OffsetManager, SparkSqlUtil}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.OffsetRange
import org.json4s.jackson.{JsonMethods, Serialization}

import scala.collection.mutable.ListBuffer

/**
 * Author atguigu
 * Date 2020/11/16 14:15
 * 经过考虑, 只剩下用户首单, 把首单的记录写到es中
 */
object DwdOrderInfoApp extends BaseApp {
    override val master: String = "local[2]"
    override val appName: String = "DwdOrderInfoApp"
    override val groupId: String = "DwdOrderInfoApp"
    override val topic: String = "ods_order_info"
    override val bachTime: Int = 3
    
    override def run(ssc: StreamingContext,
                     sourceStream: DStream[String],
                     offsetRanges: ListBuffer[OffsetRange]): Unit = {
        val spark: SparkSession = SparkSession.builder()
            .config(ssc.sparkContext.getConf)
            .getOrCreate()
        import spark.implicits._
        
        val orderInfoStream = sourceStream
            .map(str => {
                implicit val f = org.json4s.DefaultFormats + toLong + toDouble
                JsonMethods.parse(str).extract[OrderInfo]
            })
        
        // 1. 补充维度表信息
        val orderInfoStreamWithAllDim = orderInfoStream.transform(rdd => {
            // 后面rdd使用多次, 次数必须做缓存, 提升计算性能
            rdd.cache()
            // 0. 先获取这次用到的所用用户的id
            val userIds = rdd.map(_.user_id).collect().mkString("','") // 1 2 3    1','2','3
            val provinceIds = rdd.map(_.province_id).collect().mkString("','") // 1 2 3    1','2','3
            
            // 1. 读取维度
            val userInfoSql = s"select * from gmall_user_info where id in('${userIds}')"
            val userInfoRDD = SparkSqlUtil
                .getRDD[UserInfo](spark, userInfoSql)
                .map(user => (user.id, user))
            
            
            val provinceSql = s"select * from gmall_province_info where id in('${provinceIds}')"
            val provinceRDD = SparkSqlUtil
                .getRDD[ProvinceInfo](spark, provinceSql)
                .map(pro => (pro.id, pro))
            
            // 2. rdd join维度信息, 返回join后的rdd ..    x join y on x.id=y.xid
            rdd
                .map(info => (info.user_id.toString, info))
                .join(userInfoRDD) // 与用户表join
                .map {
                    case (userId, (orderInfo, userInfo)) => // 补齐用户信息
                        orderInfo.user_age_group = userInfo.age_group
                        orderInfo.user_gender = userInfo.gender_name
                        
                        (orderInfo.province_id.toString, orderInfo)
                }
                .join(provinceRDD) // 与省份表join
                .map {
                    case (proId, (orderInfo, proInfo)) => //补齐省份信息
                        orderInfo.province_name = proInfo.name
                        orderInfo.province_area_code = proInfo.area_code
                        orderInfo.province_iso_code = proInfo.iso_code
                        orderInfo
                }
        })
        
        // 2. 处理首单:如何判断首单
        val resultStream = orderInfoStreamWithAllDim.transform(rdd => {
            // create table user_status(user_id varchar primary key ,is_first_order boolean) SALT_BUCKETS = 5;
            // 1. 去hbase读出用户的状态
            rdd.cache()
            val userIds: String = rdd.map(_.user_id).collect().mkString("','")
            val sql = s"select * from user_status where user_id in ('$userIds')"
            val oldUserIds = SparkSqlUtil
                .getRDD[UserStatus](spark, sql)
                .map(_.user_id)
                .collect()
            
            rdd
                .map(orderInfo => {
                    if (oldUserIds.contains(orderInfo.user_id.toString)) {
                        orderInfo.is_first_order = false
                    } else {
                        orderInfo.is_first_order = true
                    }
                    (orderInfo.user_id, orderInfo)
                })
                .groupByKey() // 如果一个用户第一次下单, 一个批次内下了多次,都会标记为首单
                .flatMap {
                    // zs
                    case (_, it) =>
                        val list = it.toList
                        if (list.head.is_first_order) { // 如果有任何一单是首单, 前面都会被标记为首单
                            val listOrdered: List[OrderInfo] = list.sortBy(_.create_time) // 3
                            listOrdered.head :: listOrdered.tail.map(info => { //后两个
                                info.is_first_order = false
                                info
                            })
                            
                            //中置表达式
                            /*listOrdered match {
                                case head :: tail if head.is_first_order =>
                                    head :: tail.map(info => {
                                        info.is_first_order = false
                                        info
                                    })
                            }*/
                            
                        } else it
                }
            
        })
        
        // 3. 把首单的用户id写入到hbase中
        resultStream.foreachRDD(rdd => {
            import org.apache.phoenix.spark._
            rdd.cache()
            rdd.collect().foreach(println)
            // a: 首单的用户id写入到hbase, 使用sparksql
            rdd
                .filter(_.is_first_order)
                .map(orderInfo => UserStatus(orderInfo.user_id.toString, true))
                .saveToPhoenix("user_status",
                    Seq("USER_ID", "IS_FIRST_ORDER"),
                    zkUrl = Option("hadoop162,hadoop163,hadoop164:2181"))
            
            
            /*rdd.foreachPartition(it => {
                MyEsUtil.insertBulk(s"gmall_order_info_${LocalDate.now()}", it)
            })*/
            
            // b: 把数据写到kafka的 dwd层
            rdd.foreachPartition(it => {
                val producer: KafkaProducer[String, String] = MyKafkaUtil.getProducer
                
                it.foreach(orderInfo => {
                    implicit val f = org.json4s.DefaultFormats
                    val content = Serialization.write(orderInfo)
                    producer.send(new ProducerRecord[String, String]("dwd_order_info", content))
                })
                
                producer.close()
            })
            
            OffsetManager.saveOffsets(offsetRanges, groupId, topic)
        })
        
    }
}


