package com.zlm.realtime.dim

import com.alibaba.fastjson.serializer.SerializeConfig
import com.alibaba.fastjson.{JSON, JSONObject}
import com.zlm.realtime.bean.{OrderInfo, ProvinceInfo, UserInfo, UserStatus}
import com.zlm.realtime.utils._
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.text.SimpleDateFormat
import java.util.Date
import scala.collection.mutable.ListBuffer

/**
 * Author: Harbour 
 * Date: 2021-04-15 11:47
 * Desc:
 */
object OrderInfoApp {

    def main(args: Array[String]): Unit = {

        // step 1. 获取context
        val conf: SparkConf = new SparkConf().setAppName("Order-Info").setMaster("local[*]")
        val ssc = new StreamingContext(conf, Seconds(5))

        // step 2. 获取kafka输入流
        val topic = "ods_order_info"
        val groupId = "ods_order_group"

        val offsetMap: Map[TopicPartition, Long] = MyOffsetUtils.getOffsetFromRedis(topic, groupId)
        val kafkaInputDStream: InputDStream[ConsumerRecord[String, String]] =
            MyKafkaUtils.getKafkaStream(topic, ssc, offsetMap, groupId)

        // step 3. 获取偏移量
        var ranges: Array[OffsetRange] = Array.empty
        val s1: DStream[ConsumerRecord[String, String]] = kafkaInputDStream.transform(
            (rdd: RDD[ConsumerRecord[String, String]]) => {
                ranges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
                rdd
            }
        )

        val s2: DStream[OrderInfo] = s1.map(
            // step 4. 处理得到的json数据
            (record: ConsumerRecord[String, String]) => {
                val canalStr: String = record.value()
                val orderInfo: OrderInfo = JSON.parseObject(canalStr, classOf[OrderInfo])
                /* 时间格式： 2021-04-15 11:11:11 */
                val createTimes: Array[String] = orderInfo.create_time.split(" ")
                orderInfo.create_date = createTimes(0)
                orderInfo.create_hour = createTimes(1).split(":")(0)

                orderInfo
            }
        )

        // step 5. 确定用户是否是首单
        val orderInfoDStream: DStream[OrderInfo] = s2.mapPartitions(
            (orderInfoItr: Iterator[OrderInfo]) => {
                val orderInfoList: List[OrderInfo] = orderInfoItr.toList
                // 将这个分区内的所有订单的用户ID拼接成一个字符串
                val ids: String = orderInfoList.map((_: OrderInfo).user_id).mkString("','")

                val sql = s"select USER_ID from USER_STATUS where USER_ID in ('$ids') "
                // 查询这个字符串内的所有ID，并把查询到的结果返回回来
                val consumedUserIds: ListBuffer[String] = MyPhoenixUtils
                  .queryAll(sql)
                  .map((_: JSONObject).getString("USER_ID"))

                // 只要查询结果中包含这个用户ID，这个用户ID就是非首单用户，0代表首单，1代表非首单
                for (orderInfo <- orderInfoList) {
                    orderInfo.if_first_order =
                      if (consumedUserIds.contains(orderInfo.user_id.toString)) "1" else "0"
                }

                orderInfoList.iterator
            }
        ).map(
            (orderInfo: OrderInfo) => (orderInfo.user_id, orderInfo)
        ).groupByKey(
            // 为下一步处理做聚合
        ).flatMap {
            // step 6. 同批次状态修正（防止在一个采集周期内，用户多次下单，导致每次下单都被认为是首单）
            case (_, orderInfoItr) =>
                val orderInfoList: List[OrderInfo] = orderInfoItr.toList
                if (orderInfoItr.size > 1) {
                    // 对这个批次这个用户的所有订单，按照创建时间进行排序
                    orderInfoList.sortWith(
                        (info1: OrderInfo, info2: OrderInfo) => {
                            info1.create_time < info2.create_time
                        }
                    )
                    // 如果第一单是首单，那么后面的订单转换为非首单
                    if (orderInfoList.head.if_first_order == "0") {
                        for (i <- 1 until orderInfoList.size) {
                            orderInfoList(i).if_first_order = "1"
                        }
                    }
                }
                orderInfoList
        }


        // step: 7. 将省份信息 关联到订单信息里,省份信息较少，可以用广播变量优化
        val orderInfoWithProvinceDStream: DStream[OrderInfo] = orderInfoDStream.transform(
            (rdd: RDD[OrderInfo]) => {

                // 查询所有省份信息，并映射为ProvinceInfo Map
                val sql = "select id, name, area_code, iso_code from mall_province_info"
                val provinceInfoMap: Map[String, ProvinceInfo] = MyPhoenixUtils.queryAll(sql).map(
                    (provinceJsonObj: JSONObject) => {
                        val provinceInfo: ProvinceInfo = ProvinceInfo(
                            provinceJsonObj.getString("ID"),
                            provinceJsonObj.getString("NAME"),
                            provinceJsonObj.getString("AREA_CODE"),
                            provinceJsonObj.getString("ISO_CODE")
                        )
                        (provinceInfo.id, provinceInfo)
                    }
                ).toMap

                val provinceInfoMapBroadcast: Broadcast[Map[String, ProvinceInfo]] =
                    ssc.sparkContext.broadcast(provinceInfoMap)

                // 将省份信息合并到订单信息中
                rdd.map {
                    (orderInfo: OrderInfo) => {
                        val provinceInfo: ProvinceInfo = {
                            provinceInfoMapBroadcast.value.getOrElse(orderInfo.province_id.toString, null)
                        }
                        if (provinceInfo != null) {
                            orderInfo.province_name = provinceInfo.name
                            orderInfo.province_area_code = provinceInfo.area_code
                            orderInfo.province_iso_code = provinceInfo.iso_code
                        }
                        orderInfo
                    }
                }
            }
        )

        // step: 8. 将用户信息 关联到订单信息里
        val orderInfoWithUserInfoDStream: DStream[OrderInfo] = orderInfoWithProvinceDStream.mapPartitions(
            (orderInfoIter: Iterator[OrderInfo]) => {
                val orderInfoList: List[OrderInfo] = orderInfoIter.toList
                val userIds: String = orderInfoList.map((_: OrderInfo).user_id).mkString("','")
                val sql = s"select ID, USER_LEVEL, BIRTHDAY, GENDER, AGE_GROUP, GENDER_NAME from mall_user_info where ID in('$userIds')"

                val userInfoJsonList: ListBuffer[JSONObject] = MyPhoenixUtils.queryAll(sql)

                val userInfoMap: Map[String, UserInfo] = userInfoJsonList.map(
                    (userInfoObj: JSONObject) => {
                        (userInfoObj.getString("ID"),
                          JSON.toJavaObject(userInfoObj, classOf[UserInfo]))
                    }
                ).toMap

                val orderInfoWithUserInfoList: List[OrderInfo] = orderInfoList.map(
                    (orderInfo: OrderInfo) => {
                        val userInfo: UserInfo = userInfoMap.getOrElse(orderInfo.user_id.toString, null)
                        if (userInfo != null) {
                            orderInfo.user_gender = userInfo.gender_name
                            orderInfo.user_age_group = userInfo.age_group
                        }
                        orderInfo
                    }
                )

                for (elem <- orderInfoWithUserInfoList) {
                    println(elem)
                }

                orderInfoWithUserInfoList.toIterator
            }
        )

        orderInfoWithUserInfoDStream.foreachRDD(
            // step 9. 将所有首单信息更新保存到 HBase 中去
            (orderInfoRDD: RDD[OrderInfo]) => {
                import org.apache.phoenix.spark._

                orderInfoRDD.cache()

                // 将所有首单状态为 "0" （首单）的RDD 过滤出来
                orderInfoRDD.filter(
                    (_: OrderInfo).if_first_order.equals("0")
                ).map(
                    // 转为 （已消费的状态）UserStatus（xx, 1）
                    (orderInfo: OrderInfo) => {
                        UserStatus(orderInfo.user_id.toString, "1")
                    }
                ).saveToPhoenix( // 插入Hbase
                    tableName = "USER_STATUS",
                    cols = Seq("USER_ID", "IF_CONSUMED"),
                    new Configuration,
                    zkUrl = Some(MyPropertiesUtils.getProperty("zk.host"))
                )

                // step 10. 将数据插入 ELASTIC SEARCH
                orderInfoRDD.foreachPartition(
                    (e: Iterator[OrderInfo]) => {
                        val orderInfoList: List[OrderInfo] = e.toList
                        val dt: String = new SimpleDateFormat("yyyy-MM-dd").format(new Date())
                        MyESUtils.bulkInsert(orderInfoList, "mall_order_info" + dt)

                        // step 11. 将数据重新发送到 KAFKA 的下一层
                        for (orderInfo <- orderInfoList) {
                            MyKafkaSinkUtils.send(
                                topic = "dwd_order_info",
                                JSON.toJSONString(orderInfo, new SerializeConfig(true))
                            )
                        }
                    }
                )

                // 将偏移量保存起来
                MyOffsetUtils.saveOffsetToRedis(topic, groupId, ranges)
            }
        )

        ssc.start()
        ssc.awaitTermination()
    }

}
