
import com.alibaba.fastjson.{JSON, JSONObject}
import com.typesafe.config.ConfigFactory
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.scalatest.time.Second
import scalikejdbc._
import scalikejdbc.config.DBs
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaCluster, KafkaUtils}
import utils.{JedisUtils, Utils}

/**
  * author:CN.CDG
  * Date:2019/2/20
  * Time:21:20
  **/

object RTMonitorV2 {
  //屏蔽日志

  def main(args: Array[String]): Unit = {
val load=ConfigFactory.load()//用于读取resource中的配置文件
    //创建kafka的相关参数
    val kafkaParams=Map(
      "metadata.broker.list"->load.getString("kafka.broker"),
      "group.id"->load.getString("kafka.group.id"),
      "auto.offset.reset"->"smallest"
    )
    //拿到配置文件中的topic信息，topics或许为多个，按照，号进行分割，将topics存放在set集合中
    val topics=load.getString("kafka.topics").split(",",-1).toSet
//创建streamingContext
    val sparkConf=new SparkConf()
      .setMaster("local[*]")
      .setAppName("RTMonitorV2")
    val ssc=new StreamingContext(sparkConf,Seconds(2))

    //从kafka弄数据，---从数据库中获取到当前的消费到的偏移量位置---从该位置接着往后消费数据

    //加载配置信息
    DBs.setup()
    val fromOffsets: Map[TopicAndPartition, Long] = DB.readOnly(implicit session =>
      sql"select * from streaming_offset_24 where groupid=?".bind(load.getString("kafka.group.id")).map(rs => {
        (TopicAndPartition(rs.string("topic"), rs.int("partitions")), rs.long("offset"))
      }).list().apply()
    ).toMap
    fromOffsets

    //假设程序第一次启动 ，此时offset值为0
    val stream=if(fromOffsets.size==0){
      //程序第一次启动,这里导包不知道对不
     // KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
      KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaParams,topics)

    }else{
      var checkedoffset=Map[TopicAndPartition,Long]()
      val kafkaCluster=new KafkaCluster(kafkaParams)
      val earliestLeaderOffsets=kafkaCluster.getEarliestLeaderOffsets(fromOffsets.keySet)
      if(earliestLeaderOffsets.isRight){
        val topicAndPartitionToOffset=earliestLeaderOffsets.right.get
        //开始对比
        checkedoffset = fromOffsets.map(owner => {
          val clusterEarliestOffset = topicAndPartitionToOffset.get(owner._1).get.offset
          if (owner._2 >= clusterEarliestOffset) {
            owner
          } else {
            (owner._1, clusterEarliestOffset)
          }
        })
      }
      //程序不是第一次启动
      val messageHandler = (mm: MessageAndMetadata[String, String]) => (mm.key(), mm.message())
      KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, checkedoffset, messageHandler)
    }


    /**
      * receiver接收数据如果是在executor端可以进行cache，如果使用窗口函数的话就没必要进行cache
      * 默认就会cache，只不过使用的是预写日志wal，进行容错的
      */

    //根据业务需求处理数据
stream.foreachRDD(rdd=>{
  val offsetRanges=rdd.asInstanceOf[HasOffsetRanges].offsetRanges
  val baseData=rdd.map(t=>{
    val baseData = rdd.map(t => JSON.parseObject(t._2))
      .filter(_.getString("serviceName").equalsIgnoreCase("reChargeNotifyReq"))
      .map(jsObj=>{
        val result = jsObj.getString("bussinessRst")
        val fee: Any = if (result.equals("0000")) {
          jsObj.getDouble("chargefree")
        } else 0
        val isSucc=if(result.equals("0000"))1else 0
        val receiveTime=jsObj.getString("receiveNotifyTime")
        val startTime=jsObj.getString("requestId")
        val pCode=jsObj.getString("provinceCode")
        //消耗的时间
        val costime=if(result.equals("0000")){
          Utils.caculateRqt(startTime,receiveTime)
        }else 0
        //返回元组
        /*("A_"+startTime.substring(0,8),startTime.substring(0,10), List[Double](1,isSucc,fee,costime.toDouble),pCode,)*/
        ("A-" + startTime.substring(0, 8), startTime.substring(0, 10),
          List[Double](1, isSucc, fee, costime.toDouble),
          pCode, startTime.substring(0, 12))

      })
    //业务报表
    /**
      * 统计全国的充值订单量，充值金额，充值成功率以及充值的平均时长
      *
      */
    baseData.map(t=>
      (t._1,t._3)
    ).reduceByKey((list1,list2)=>{
      (list1 zip list2 ).map(x=>x._1+x._2)
    }).foreachPartition(itr=>{
      val client=JedisUtils.getJedisClient()
      itr.foreach(tp=>{
        client.hincrBy(tp._1,"total",tp._2(0).toLong)
        client.hincrBy(tp._1,"succ",tp._2(1).toLong)
        client.hincrBy(tp._1,"money",tp._2(2).toLong)
        client.hincrBy(tp._1,"timer",tp._2(3).toLong)
        client.expire(tp._1,60*60*24*2)
      })
client.close()
    })
//每个小时数据分布情况
baseData.map(t=>("B_"+t._2,t._3)).reduceByKey((list1,list2)=>{
  (list1.zip(list2)).map(x=>x._1+x._2)
}).foreachPartition(itr=>{
  val client=JedisUtils.getJedisClient()
  itr.foreach(tp=>{
    client.hincrBy(tp._1,"total",tp._2(0).toLong)
    client.hincrBy(tp._1,"succ",tp._2(1).toLong)
    client.expire(tp._1,60*60*24*2)
  })
  client.close()
})
    //统计每个省充值成功的数据
    baseData.map(t=>((t._2,t._4),t._3)).reduceByKey((list1,list2)=>{
      (list1 zip list2).map(x=>x._1+x._2)
    }).foreachPartition(itr=>{
      val client=JedisUtils.getJedisClient()
      itr.foreach(tp=>{
        client.hincrBy("P_"+tp._1._1.substring(1,8),tp._1._2,tp._2(1).toLong)
        client.expire("P_"+tp._1._1.substring(0,8),60*60*24*2)
      })
      client.close()
    })
//每分钟数据分布情况统计

  })
})


  }
}
