import com.alibaba.fastjson.JSON
import com.typesafe.config.ConfigFactory
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, rdd}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaCluster, KafkaUtils}
import scalikejdbc._
import scalikejdbc.config.DBs
import utill.{JedisUtils, Utils}

object DataMontior {
    // 屏蔽日志
    //Logger.getLogger("org.apache").setLevel(Level.WARN)
    def main(args: Array[String]): Unit = {

        //创建kafka参数
      val load=ConfigFactory.load()
      val kafkaParams=Map(
          "metadata.broker.list" -> load.getString("kafka.broker.list"),
          "group.id" -> load.getString("kafka.group.id"),
          "auto.offset.reset" -> "smallest"
      )
      val topics=load.getString("kafka.topics").split(",").toSet
      //StramingContext
      val sparkConf = new SparkConf()
      sparkConf.setMaster("local[*]")
      sparkConf.setAppName("实时统计")
      val ssc = new StreamingContext(sparkConf,Seconds(2))

      //从kafka弄数据  数据库中获取到当前消费到偏移量，从该位置接着往后消费
    //val fromOffsets=Map[TopicAndPartition,Long]()
      //两种方式 直连和另外一种  这里选择直连
    //加载配置信息
    DBs.setup()//scala操作数据库用
    val fromOffsets: Map[TopicAndPartition, Long] = DB.readOnly{implicit session =>
        sql"select * from streamingOffset where groupid=?".bind(load.getString("kafka.group.id")).map(rs => {
        (TopicAndPartition(rs.string("topic"), rs.int("partitions")), rs.long("offset"))
      }).list().apply()
    }.toMap


      //假设程序第一次启动
      val stream=if(fromOffsets.size==0){
        KafkaUtils.createDirectStream[String,String ,StringDecoder,StringDecoder](ssc,kafkaParams,topics)
      }else{
        var checkedOffset=Map[TopicAndPartition, Long]()  //校验后的偏移量  就是做到  保存的偏移量是正确的
        val kafkaCluster = new KafkaCluster(kafkaParams)
        val earliestLeaderOffset = kafkaCluster.getEarliestLeaderOffsets(fromOffsets.keySet)
        if(earliestLeaderOffset.isRight){
          val topicAndPartitionToOffset = earliestLeaderOffset.right.get

          //开始对比 自己持有的偏移量和kafka集群的偏移量
            checkedOffset=fromOffsets.map(owner=> {
            val clusterEarlistOffset = topicAndPartitionToOffset.get(owner._1).get.offset//集群中持有的偏移量
            if(owner._2>=clusterEarlistOffset){//自己保存的偏移量  如果大于集群中 说明没过期 不变  如果小于改成集群中的
              owner
            }else{
              (owner._1,clusterEarlistOffset)
            }
          })
        }

        //非第一次启动 持续消费
        val messageHandler=(mm:MessageAndMetadata[String ,String])=>(mm.key() ,mm.message())
        KafkaUtils.createDirectStream[String,String ,StringDecoder,StringDecoder,(String,String)](ssc,kafkaParams,fromOffsets,messageHandler)
      }

      //处理数据  根据业务  需求
    //receiver 接收数据是在Excutor 端cache 如果使用窗口函数的话，没必要进行cache,默认就cache,WAL;
    /**
      * 如果不是采用窗口函数的话，可以cache,数据会放做一个副本放到另外一台节点上容错
      */
    //direct 接收数据是在Driver端
      stream.foreachRDD(rdd=> {
          rdd.foreach(println)

          val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
          val baseData = rdd.map(t => JSON.parseObject(t._2))
              .filter(_.getString("serviceName").equalsIgnoreCase("reChareNotifyReq"))
              .map(jsObj=>{
                  val result = jsObj.getString("bussinessRst")
                  val fee:Double=if(result.equals("0000"))  jsObj.getDouble("chargefee") else 0
                  val isSucc:Double=if(result.equals("0000")) 1 else 0
                  val receiveTime=jsObj.getString("receiveNotifyTime")
                  val startTime=jsObj.getString("requestId")
                  val pCode=jsObj.getString("provinceCode")
                  val costime=if(result.equals("0000"))   Utils.caculateRqt(startTime,receiveTime) else 0//消耗时间

                  ("A-"+startTime.substring(0,8),startTime.substring(0,10),List[Double](1,isSucc,fee,costime.toDouble),pCode,startTime.substring(0,12))

              })

          /**
            * 实时报表--业务概况
            * 统计全网的充值订单量，充值金额，充值成功率以及充值平均时长
            */
          baseData.map(t=>(t._1,t._3)).reduceByKey((list1,list2)=>{
              (list1 zip list2) map (x=>x._1+x._2)
          }).foreachPartition(itr=>{
              val client=JedisUtils.getJedisClient()
              itr.foreach(tp=>{
                  client.hincrBy(tp._1,"total",tp._2(0).toLong)
                  client.hincrBy(tp._1,"succ",tp._2(1).toLong)
                  client.hincrByFloat(tp._1,"money",tp._2(2))
                  client.hincrBy(tp._1,"timer",tp._2(3).toLong)
                  client.expire(tp._1,60*60*24*2)//设置过期时间
              })
              client.close()
          })

          //没分中的数据分布情况统计
          baseData.map(t=>("C-"+t._5,t._3)).reduceByKey((list1,list2)=>
              (list1 zip list2) map (x=>x._1 +x._2)
          ).foreachPartition(itr=>{
              val client=JedisUtils.getJedisClient()
              itr.foreach(tp=>{
                  client.hincrBy(tp._1,"succ",tp._2(1).toLong)
                  client.hincrByFloat(tp._1,"money",tp._2(2))
                  client.expire(tp._1,60*60*24*2)
              })
              client.close()
          })

          //记录便移量
          offsetRanges.foreach(osr=>{
              DB.autoCommit{implicit session=>
                  sql"REPLACE INTO streamingOffset(topic, groupid, partitions, offset) VALUES(?,?,?,?)"   //替换删除  如果存在删除 如果不存在插入
                      .bind(osr.topic, load.getString("kafka.group.id"), osr.partition, osr.untilOffset).update().apply()
              }
          })


      })



      //结果存到redis 将偏移量存入到mysql
      //
      ssc.start()
      ssc.awaitTermination()

  }
}
