package com.zhao.sparkstreaming.index_project

import java.sql.{Connection, PreparedStatement}
import java.text.SimpleDateFormat
import java.util.Date

import com.alibaba.fastjson.{JSON, JSONObject}
import com.zhao.utils.JdbcUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * Description: 市场指数计算<br/>
 * Copyright (c) ，2020 ，  <br/>
 * This program is protected by copyright laws. <br/>
 * Date： 2020/9/3 18:19
 * 启动实时项目的实时读取偏移量+保存偏移量
 *
 * @author 柒柒
 * @version : 1.0
 */

object IndexStreaming {

  def main(args: Array[String]): Unit = {

    //必须配置表参数路径，否则程序无法执行
    if (args.length != 3){
      println("argument is wrong!!!")
      sys.exit()
    }

    //配置文件路径
    val indexMappingPath = args(0)
    val appId = args(1)
    val groupId = args(2)

    //初始化Spark配置信息
    val conf = new SparkConf()
      .setAppName(appId)
      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition","100")  //每秒钟每个分区kafka拉取消息的速率
      .set("spark.serializer","org.apache.spark.serializer.KryoSerializer")  //序列化
      .set("spark.rdd.compress","true")  //rdd压缩

    val sc: SparkContext = new SparkContext(conf)
    //自定义采集周期 批次间隔
    val ssc = new StreamingContext(sc, Seconds(1))

    ssc.sparkContext.setLogLevel("WARN")

    val IndexMapping: RDD[String] = sc.textFile(indexMappingPath)

    //在指数配置表中取出symbol,unit_weight
    val IndexMapping1: Map[String, String] = IndexMapping.collect()
      .map(perEle => {
        val arr = perEle.split(",")
        val symbol = arr(1)
        val unit_weight = arr(2)
        (symbol, unit_weight)
      }).toMap

    //使用广播变量
    val IndexMappingBroadCast: Broadcast[Map[String, String]] = sc.broadcast(IndexMapping1)

    //Kafka相关的参数
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "localhost:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean) //让消费者不用自动提交偏移量
    )
    val topics = Array("Test1")
    //读取历史偏移量（在Driver端查询历史偏移量）
    val offset: Map[TopicPartition, Long] = OffsetUtils.queryHistoryOffsetFromMySQL(appId, groupId)
    //sparkStreaming跟Kafka整合，使用的是官方推荐的直连方式，使用Kafka底层的消费API，效率更高
    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      ssc, //传入StreamingContext
      LocationStrategies.PreferConsistent, //位置策略
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, offset) //消费策略（订阅的topic，kafka参数，历史偏移量）
    )

    kafkaDStream.foreachRDD(rdd => {
      if(!rdd.isEmpty()) {
        //获取偏移量信息
        val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        val lines = rdd.map(_.value())

        val result: RDD[(Double, Double, String)] = getCommonRDD(lines, IndexMappingBroadCast)
        result.foreach(println)
        //println("-----------Kafka Flush-----------")

        //保存到数据库
        save2DB(result)

        //获取一个数据库连接（适用数据库连接池）
        var connection: Connection = null
        var pstm2: PreparedStatement = null
        try {
          connection = DruidConnectionPool.getConnection
          //开启事务
          connection.setAutoCommit(false)

          pstm2 = connection.prepareStatement("INSERT INTO t_kafka_offset (app_gid, topic_partition, offset) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE offset = ?")
          //将偏移量写入到数据库中 t_kafka_offset : 分析（topic、分区、组ID、结束偏移量）
          for (range <- offsetRanges) {
            val topic = range.topic
            val partition = range.partition
            val offset = range.untilOffset
            //设置参数
            pstm2.setString(1, appId + "_" + groupId)
            pstm2.setString(2, topic + "_" + partition)
            pstm2.setLong(3, offset)
            pstm2.setLong(4, offset)
            //执行update
            pstm2.executeUpdate()
          }
          //提交事务
          connection.commit()
        } catch {
          case e: Exception => {
            e.printStackTrace()
            //回滚
            connection.rollback()
            //停掉sparkstreaming
            ssc.stop(true)
          }
        } finally {
          //释放资源
          if(pstm2 != null) {
            pstm2.close()
          }
          if(connection != null) {
            connection.close()
          }
        }
      }
    })

    //开启任务
    ssc.start()
    //等待任务,处理下一个批次(线程等待)
    ssc.awaitTermination()
  }


  /**
   * 将Mapping数据和输入数据进行处理
   *
   * @param rdd
   * @param indexMappingBroadcast
   * @return
   */
  def getCommonRDD(rdd: RDD[String], indexMappingBroadcast: Broadcast[Map[String, String]]) = {
    val value: RDD[(Double, Double, String)] = rdd.map(perEle => {
      //先将这条数据解析为JSONObject
      val jsonObj: JSONObject = JSON.parseObject(perEle)
      //解析外部JSON的key,才能得到其对应的内部json
      val JSONArray = jsonObj.getJSONArray("data")

      //数据初始化
      var symbol = ""
      var price_BTC = 0.0
      var binance = 0.0
      var huobi = 0.0
      var okex = 0.0
      var avg_price = 0.0
      var price = 0.0
      var times = ""

      for (i <- 0 until JSONArray.size()) {
        //循环遍历JSON数组里面每一个JSONObject
        val nObject: JSONObject = JSONArray.getJSONObject(i)
        symbol = nObject.getString("type") //symbol  币种
        binance = nObject.getString("binance").toDouble //binance
        huobi = nObject.getString("huobi").toDouble //huobi
        okex = nObject.getString("okex").toDouble //okex

        //println(s"$symbol-$binance-$huobi-$okex")

        //avg_price   平均价格
        //都不为0
        if (binance != 0 && huobi != 0 && okex != 0) {
          avg_price = (binance + huobi + okex) / 3
        }
        //有一个为0
        if (binance == 0 || huobi == 0 || okex == 0) {
          avg_price = (binance + huobi + okex) / 2
        }
        //有两个为0
        if ((binance == 0 && huobi == 0) || (binance == 0 && okex == 0) || (huobi == 0 && okex == 0)) {
          avg_price = binance + huobi + okex
        }

        val unit_weight = indexMappingBroadcast.value.getOrElse(symbol, "未知").toDouble //单位基点权值

        price += avg_price / unit_weight //价格累加得到index
        if (i == 0) {
          price_BTC = avg_price
        }

        val time = new Date().getTime
        val format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
        times = format.format(time) //处理数据的时间

        // println(s"$symbol-$price-$avg_price")

      }
      //(index,price_BTC,times)  市场指数数值,BTC价格,时间
      (price, price_BTC, times)
    })
    //返回值
    value

  }


  /**
   * 将数据保存到数据库
   *
   * @param rdd
   */
  def save2DB(rdd: RDD[(Double, Double, String)]): Unit = {
    if (!rdd.isEmpty()) {
      rdd.foreachPartition(itr => {
        if (!itr.isEmpty) {

          val conn = JdbcUtil.getConn()

          //建表时一定有主键，否则  on duplicate key update
          //插入数据 不存在就插入 存在就更新
          itr.foreach(perEle => {
            val sql =
              """
                |insert into mapping(indexs,price_BTC,time)
                |value(?,?,?)
                |""".stripMargin

            val statement: PreparedStatement = conn.prepareStatement(sql)

            statement.setDouble(1, perEle._1)
            statement.setDouble(2, perEle._2)
            statement.setString(3, perEle._3)

            statement.executeUpdate()
          })

          JdbcUtil.releaseCon(conn)
        }
      })
    }
  }
}
