package com.smile.realtime.dwd

import com.alibaba.fastjson.serializer.SerializeConfig
import com.alibaba.fastjson.{JSON, JSONObject}
import com.smile.realtime.bean.{OrderInfo, ProvinceInfo, UserStatus}
import com.smile.realtime.utils.{MyESUtil, MyKafkaSink, MyKafkaUtil, OffsetManagerUtil, PhoenixUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.text.SimpleDateFormat
import java.util.Date

object OrderInfoApp {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("OrderInfoApp").setMaster("local[4]")
    val ssc: StreamingContext = new StreamingContext(conf, Seconds(5))

    var topic = "ods_order_info"
    var groupId = "order_info_group"


    //从 Redis 中读取 Kafka 偏移量
    val kafkaOffsetMap: Map[TopicPartition, Long] =
      OffsetManagerUtil.getOffset(topic, groupId)
    var recordDstream: InputDStream[ConsumerRecord[String, String]] = null
    if (kafkaOffsetMap != null && kafkaOffsetMap.size > 0) {
      //Redis 中有偏移量 根据 Redis 中保存的偏移量读取
      recordDstream = MyKafkaUtil.getKafkaStream(topic,
        ssc, kafkaOffsetMap, groupId)
    } else {
      // Redis 中没有保存偏移量 Kafka 默认从最新读取
      recordDstream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)
    }
    //得到本批次中处理数据的分区对应的偏移量起始及结束位置
    // 注意：这里我们从 Kafka 中读取数据之后，直接就获取了偏移量的位置，因为 KafkaRDD 可以转换为 HasOffsetRanges，会自动记录位置
    var offsetRanges: Array[OffsetRange] = Array.empty[OffsetRange]
    val offsetDStream: DStream[ConsumerRecord[String, String]] =
      recordDstream.transform {
        rdd => {
          offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
          rdd
        }
      }
    //对从 Kafka 中读取到的数据进行结构转换，由 Kafka 的 ConsumerRecord 转换为一个OrderInfo 对象
    val orderInfoDStream: DStream[OrderInfo] = offsetDStream.map {
      record => {
        val jsonString: String = record.value()
        val orderInfo: OrderInfo =
          JSON.parseObject(jsonString, classOf[OrderInfo])
        //通过对创建时间 2020-07-13 01:38:16 进行拆分，赋值给日期和小时属性，方便后续处理
        val createTimeArr: Array[String] = orderInfo.create_time.split(" ")
        //获取日期赋给日期属性
        orderInfo.create_date = createTimeArr(0)
        //获取小时赋给小时属性
        orderInfo.create_hour = createTimeArr(1).split(":")(0)
        orderInfo
      }
    }
    /*
//方案 1：对 DStream 中的数据进行处理，判断下单的用户是否为首单
//缺点：每条订单数据都要执行一次 SQL，SQL 执行过于频繁
val orderInfoWithFirstFlagDStream: DStream[OrderInfo] =
orderInfoDStream.map {
orderInfo => {
//通过 phoenix 工具到 hbase 中查询用户状态
var sql: String = s"select user_id,if_consumed from user_status2020 where
user_id ='${orderInfo.user_id}'"
val userStatusList: List[JSONObject] = PhoenixUtil.queryList(sql)
if (userStatusList != null && userStatusList.size > 0) {
 orderInfo.if_first_order = "0"
 } else {
 orderInfo.if_first_order = "1"
 }
 orderInfo
 }
 }
 orderInfoWithFirstFlagDStream.print(1000)
 */
    //方案 2：对 DStream 中的数据进行处理，判断下单的用户是否为首单
    //优化:以分区为单位，将一个分区的查询操作改为一条 SQL
    val orderInfoWithFirstFlagDStream: DStream[OrderInfo] =
    orderInfoDStream.mapPartitions {
      orderInfoItr => {
        //因为迭代器迭代之后就获取不到数据了，所以将迭代器转换为集合进行操作
        val orderInfoList: List[OrderInfo] = orderInfoItr.toList
        //获取当前分区内的用户 ids
        val userIdList: List[Long] = orderInfoList.map(_.user_id)
        //从 hbase 中查询整个分区的用户是否消费过，获取消费过的用户 ids
        var sql: String = s"select user_id,if_consumed from user_status1483 where user_id in('${userIdList.mkString("','")}')"
        val userStatusList: List[JSONObject] = PhoenixUtil.queryList(sql)
        //得到已消费过的用户的 id 集合
        val cosumedUserIdList: List[String] =
          userStatusList.map(_.getString("USER_ID"))
        //对分区数据进行遍历
        for (orderInfo <- orderInfoList) {
          //注意：orderInfo 中 user_id 是 Long 类型，一定别忘了进行转换
          if (cosumedUserIdList.contains(orderInfo.user_id.toString)) {
            //如已消费过的用户的 id 集合包含当前下订单的用户，说明不是首单
            orderInfo.if_first_order = "0"
          } else {
            orderInfo.if_first_order = "1"
          }
        }
        orderInfoList.toIterator
      }
    }
    // orderInfoWithFirstFlagDStream.print()

    //==============4.同批次订单状态修复=============
    //4.1 因为要分组，所以先对 DStream 结果进行转换
    val orderInfoWithKeyDStream: DStream[(Long, OrderInfo)] =
    orderInfoWithFirstFlagDStream.map {
      orderInfo => {
        (orderInfo.user_id, orderInfo)
      }
    }
    //4.2 按照 key 对数据进行分组
    val groupByKeyDStream: DStream[(Long, Iterable[OrderInfo])] =
      orderInfoWithKeyDStream.groupByKey()
    //4.3 对分组之后的数据，按照时间进行排序
    val orderInfoRealWithFirstFlagDStream: DStream[OrderInfo] =
      groupByKeyDStream.flatMap {
        case (userId, orderInfoItr) => {
          //将迭代器转换为集合进行操作
          val orderInfoList: List[OrderInfo] = orderInfoItr.toList
          //如果同一批次中，订单的数量大于 1
          if (orderInfoList.size > 1) {
            //按照下单时间进行排序
            val sortedList: List[OrderInfo] = orderInfoList.sortWith {
              (orderInfo1, orderInfo2) => {
                orderInfo1.create_time < orderInfo2.create_time
              }
            }
            //获取排序后的第一个元素
            val orderInfoFirst: OrderInfo = sortedList(0)
            //判断是否为首单
            if (orderInfoFirst.if_first_order == "1") {
              //将除了首单之外的其它订单全部置为非首单
              for (i <- 1 to orderInfoList.size - 1) {
                val orderInfoNotFirst: OrderInfo = orderInfoList(i)
                orderInfoNotFirst.if_first_order = "0"
              }
            }
          }
          orderInfoList
        }
      }

        //5.1 关联省份方案 1 以分区为单位进行关联
        val orderInfoWithProvinceDStream: DStream[OrderInfo] = {
          orderInfoRealWithFirstFlagDStream.mapPartitions {
            orderInfoItr => {
              val orderInfoList: List[OrderInfo] = orderInfoItr.toList
              //获取本批次中所有订单省份的 ID
              val provinceIdList: List[Long] = orderInfoList.map(_.province_id)
              //根据省份 id 到 Hbase 省份表中获取省份信息
              var sql: String = s"select id,name,area_code,iso_code from smile2020_province_info where id in('${provinceIdList.mkString("','")}')"
              //{"id":"1","name":"zs","area_code":"1000","iso_code":"CN-JX"}
              val provinceJsonList: List[JSONObject] = PhoenixUtil.queryList(sql)
              //将 provinceInfoList 转换为 Map 集合 [id->{"id":"1","name":"zs","area_code":"1000","iso_code":"CN-JX"}]
              val provinceJsonMap: Map[Long, JSONObject] = provinceJsonList.map {
                proJsonObj => {
                  (proJsonObj.getLongValue("ID"), proJsonObj)
                }
              }.toMap
              for (orderInfo <- orderInfoList) {
                val provinceObj: JSONObject =
                  provinceJsonMap.getOrElse(orderInfo.province_id, null)
                if (provinceObj != null) {
                  orderInfo.province_iso_code = provinceObj.getString("ISO_CODE")
                  orderInfo.province_name = provinceObj.getString("NAME")
                  orderInfo.province_area_code = provinceObj.getString("AREA_CODE")
                }
              }
              orderInfoList.toIterator
            }
          }
        }


//            //5.1 关联省份方案 2 使用广播变量，在 Driver 端进行一次查询 分区越多效果越明显 前提：省份数据量较小
//        val orderInfoWithProvinceDStream: DStream[OrderInfo] =
//          orderInfoRealWithFirstFlagDStream.transform {
//            rdd => {
//              //每一个采集周期，都会在 Driver 端 执行从 hbase 中查询身份信息
//              var sql: String = "select id,name,area_code,iso_code from gmall2020_province_info where id "
//              val provinceInfoList: List[JSONObject] = PhoenixUtil.queryList(sql)
//              //封装广播变量
//              val provinceInfoMap: Map[String, ProvinceInfo] = provinceInfoList.map {
//                jsonObj => {
//                  val provinceInfo = ProvinceInfo(
//                    jsonObj.getString("ID"),
//                    jsonObj.getString("NAME"),
//                    jsonObj.getString("AREA_CODE"),
//                    jsonObj.getString("ISO_CODE")
//                  )
//                  (provinceInfo.id, provinceInfo)
//                }
//              }.toMap
//              val provinceInfoBC: Broadcast[Map[String, ProvinceInfo]] =
//                ssc.sparkContext.broadcast(provinceInfoMap)
//              val orderInfoWithProvinceRDD: RDD[OrderInfo] = rdd.map {
//                orderInfo => {
//                  val provinceBCMap: Map[String, ProvinceInfo] = provinceInfoBC.value
//                  val provinceInfo: ProvinceInfo =
//                    provinceBCMap.getOrElse(orderInfo.province_id.toString, null)
//                  if (provinceInfo != null) {
//                    orderInfo.province_name = provinceInfo.name
//                    orderInfo.province_area_code = provinceInfo.area_code
//                    orderInfo.province_iso_code = provinceInfo.iso_code
//                  }
//                  orderInfo
//                }
//              }
//              orderInfoWithProvinceRDD
//            }
//          }

        //5.2 关联用户
        val orderInfoWithUserDStream: DStream[OrderInfo] =
          orderInfoWithProvinceDStream.mapPartitions {
            orderInfoItr => {
              val orderInfoList: List[OrderInfo] = orderInfoItr.toList
              val userIdList: List[Long] = orderInfoList.map(_.user_id)
              //根据用户 id 到 Phoenix 中查询用户
              var sql: String =
                s"select id,user_level,birthday,gender,age_group,gender_name from smile2020_user_info where id in('${userIdList.mkString("','")}')"
              val userJsonList: List[JSONObject] = PhoenixUtil.queryList(sql)
              val userJsonMap: Map[Long, JSONObject] = userJsonList.map(userJsonObj =>
                (userJsonObj.getLongValue("ID"), userJsonObj)).toMap
              for (orderInfo <- orderInfoList) {
                val userJsonObj: JSONObject = userJsonMap.getOrElse(orderInfo.user_id,
                  null)
                if (userJsonObj != null) {
                  orderInfo.user_gender = userJsonObj.getString("GENDER_NAME")
                  orderInfo.user_age_group = userJsonObj.getString("AGE_GROUP")
                }
              }
              orderInfoList.toIterator
            }
          }
        orderInfoWithUserDStream.print(1000)


    //==============3.维护订单的状态=============
    orderInfoRealWithFirstFlagDStream.foreachRDD {
      rdd => {
        rdd.cache()
        //从所有的订单中，过滤出首单
        val firstOrderRDD: RDD[OrderInfo] = rdd.filter(_.if_first_order == "1")
        //saveToPhoenix 要求 rdd 中元素的属性和表中的列数必须一致
        val firstOrderUserRDD: RDD[UserStatus] = firstOrderRDD.map {
          orderInfo => UserStatus(orderInfo.user_id.toString, "1")
        }
        import org.apache.phoenix.spark._
        firstOrderUserRDD.saveToPhoenix(
          "USER_STATUS1483",
          Seq("USER_ID", "IF_CONSUMED"),
          new Configuration(),
          Some("spider01,spider02,spider03:2181")
        )
        //--------------3.2 将订单信息写入到 ES 中-----------------
        rdd.foreachPartition {
          orderInfoItr =>{
            val orderInfoList: List[(String,OrderInfo)] =
              orderInfoItr.toList.map(orderInfo => (orderInfo.id.toString,orderInfo))
            val dateStr: String = new SimpleDateFormat("yyyyMMdd").format(new Date())
            MyESUtil.bulkInsert(orderInfoList, "smile2020_order_info_" + dateStr)
            //3.2 将订单信息推回 kafka 进入下一层处理 主题： dwd_order_info
            for ((id,orderInfo) <- orderInfoList) {
              //fastjson 要把 scala 对象包括 caseclass 转 json 字符串 需要加入,new SerializeConfig(true)
              MyKafkaSink.send("dwd_order_info",
                JSON.toJSONString(orderInfo,new SerializeConfig(true)))
            }

          }
        }

        //保存偏移量到 Redis
        OffsetManagerUtil.saveOffset(topic, groupId, offsetRanges)
      }
    }
    ssc.start()
    ssc.awaitTermination()
  }
}
