package streaming

import com.alibaba.fastjson.JSON
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, broadcast}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import utils.{ConfParser, Jedis, OffsetManager}

/**
  * Created by Administrator on 2018/03/25.
  */
object CMCCApp {
  Logger.getLogger("org").setLevel(Level.WARN)
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
    .setAppName("CMCC数据监控平台")
    .setMaster("local[*]")
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    .set("spark.rdd.compress", "true")
    .set("spark.streaming.stopGracefullyOnShutdown", "true")
    .set("spark.streaming.kafka.maxRatePerPartition", "5000")
    val ssc: StreamingContext = new StreamingContext(conf,Seconds(1))
    // 使用广播变量广播省份编码映射关系
    val provinceDict = ssc.sparkContext.textFile(ConfParser.pcode2NamePath).map(line => {
      val split = line.split("\t")
      (split(0), split(1))
    }).collect().toMap
    val broadcast = ssc.sparkContext.broadcast(provinceDict)
    //从kafka中拉取数据
    //指定要从那个主题下拉取数据
    val topics = Array(ConfParser.config.getString("cmcc.kafka.topics"))
    val groupId = ConfParser.config.getString("cmcc.kafka.group")

    //指定kafka相关参数
    val kafkaParams = Map[String ,Object](
      "bootstrap.servers"->ConfParser.config.getString("cmcc.kafka.brokers"),
      "key.deserializer"->classOf[StringDeserializer],
      "value.deserializer"->classOf[StringDeserializer],
      "group.id"->groupId,
      "auto.offset.reset"->"earliest",
      "enable.auto.commit"-> (false:java.lang.Boolean)
    )
    //第一次启动，从最早偏移量开始读取数据
    //查询redis中是否存储有偏移量信息
    val dbOffset = OffsetManager(topics, groupId)
    val stream: InputDStream[ConsumerRecord[String, String]] = if (dbOffset.size == 0) {
      KafkaUtils.createDirectStream(
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
      )
    } else {//如果非第一次启动，需要从rides中存储的偏移量往后读取数据
      KafkaUtils.createDirectStream(
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Assign[String, String](dbOffset.keySet, kafkaParams, dbOffset)
      )
    }

    stream.foreachRDD(rdd=>{
      if(!rdd.isEmpty()){

        //获取第一次的偏移量
        val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        //获取SparkSession对象
        val spark = SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()
        import spark.implicits._

        // 开始业务指标的计算
        rdd.map(crd => JSON.parseObject(crd.value()))
          .filter(obj => obj.getString("serviceName")
            .equalsIgnoreCase("reChargeNotifyReq"))
          .map(obj => {
            // 获取和指标相关的数据项
            //订单状态
            val busr = obj.getString("bussinessRst")
            //金额
            val money = obj.getDouble("chargefee")
            //计算时间差相关数据
            val reqTime = obj.getString("requestId")
            val renfTime = obj.getString("receiveNotifyTime")
            //城市ID
            val proviceId = obj.getString("provinceCode")
            val busrAndmoneyAndcostTime: (Int, Double, Long) = if (busr.equals("0000")) {
              val costTime = utils.TimeUtils.caluCostTime(reqTime.substring(0, 18), renfTime)
              (1, money, costTime)
            } else {
              (0, 0, 0)
            }
            //数据当前时间
            val day = reqTime.substring(0, 8)
            val hour = reqTime.substring(8, 10)
            val minutes = reqTime.substring(10, 12)
            //(省份ID，订单，成功订单，金额，充值用时)
            (proviceId, 1, busrAndmoneyAndcostTime._1, busrAndmoneyAndcostTime._2, busrAndmoneyAndcostTime._3, day, hour, minutes)
          }).toDF("proviceId", "zOrder", "sOrder", "money", "costTime", "day", "hour", "minutes")
          .cache()
          .createOrReplaceTempView("Project_logs4")

        //计算业务概况指标（总订单量, 成功订单量, 充值成功金额, 充值总时长）

        spark.sql(
          """
            |select day,
            |sum(zOrder) total, sum(sOrder) succ, sum(money) totalMoney, sum(costTime) totalTime
            |from Project_logs4 group by day
          """.stripMargin)
          .foreachPartition(itr => {
            val jedis = Jedis.getJedis
            itr.foreach(row => {
              jedis.hincrBy("A-"+row.getAs[String]("day"), "succ", row.getAs[Long]("succ"))
              jedis.hincrByFloat("A-"+row.getAs[String]("day"), "money", row.getAs[Double]("totalMoney"))
              jedis.hincrBy("A-"+row.getAs[String]("day"), "ttime", row.getAs[Long]("totalTime"))
              jedis.hincrBy("A-"+row.getAs[String]("day"), "total", row.getAs[Long]("total"))
            })

            jedis.close()
          })
        // 统计实时充值办理趋势
        spark.sql(
          """
            |select day, hour,sum(zOrder) total, sum(sOrder) succ from logs group by day, hour
          """.stripMargin)
          .foreachPartition(itr => {
            val jedis = Jedis.getJedis
            itr.foreach(row => {
              jedis.hincrBy("A-"+row.getAs[String]("day"), "t-"+row.getAs[String]("hour"), row.getAs[Long]("total"))
              jedis.hincrBy("A-"+row.getAs[String]("day"), "s-"+row.getAs[String]("hour"), row.getAs[Long]("succ"))
            })
            jedis.close()
          })

        // 统计省份充值成功订单分布
        spark.sql(
          """
            |select day, pCode,sum(sOrder) succ from logs group by day, pCode
          """.stripMargin)
          .foreachPartition(itr => {
            val jedis = Jedis.getJedis
            itr.foreach(row => {
              val pname = broadcast.value.getOrElse(row.getAs[String]("pCode"), row.getAs[String]("pCode"))
              jedis.hincrBy("B-"+row.getAs[String]("day"), pname, row.getAs[Long]("succ"))
            })
            jedis.close()
          })


        // 每分钟的充值订单及金额
        spark.sql(
          """
            |select day, hour, minutes, sum(sOrder) succ, sum(money) totalMoney from logs group by day, hour, minutes
          """.stripMargin)
          .foreachPartition(itr => {
            val jedis = Jedis.getJedis
            itr.foreach(row => {

              val key = "C-"+row.getAs[String]("day")+row.getAs[String]("hour")+row.getAs[String]("minutes")
              jedis.hincrBy(key, "succ", row.getAs[Long]("succ"))
              jedis.hincrByFloat(key, "money", row.getAs[Double]("totalMoney"))
              // key的有效期
              jedis.expire(key, 24 * 60 * 60)
            })
            jedis.close()
          })




        // 存储偏移量
        val jedis = Jedis.getJedis
        offsetRanges.foreach(or => {
          jedis.hset(or.topic+"-"+groupId, or.partition.toString, or.untilOffset.toString)
        })
        jedis.close()
      }

    })

  ssc.start()
  ssc.awaitTermination()

  }

}
