package fun.lumia.combine

import java.sql.DriverManager
import java.text.SimpleDateFormat
import java.util.Properties
import java.util.regex.Pattern

import bean.model.B2BModel
import com.alibaba.fastjson.JSON
import fun.lumia.common.SparkTool
import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Durations, StreamingContext}
import utils.timeUtils.{formatEndLong, formatStartLong, _}
import fun.lumia.combine.bean.model
import org.apache.spark.sql.SaveMode

import scala.collection.mutable.ListBuffer

object comsumeAndCompute extends SparkTool {
  /**
   * 在run方法里面编写spark业务逻辑
   */
  def formatTimeTemp = (timeStampTemp: Long) => {
    val format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    format.format(timeStampTemp + 5 * 60 * 1000)
  }

  override def run(args: Array[String]): Unit = {
    /**
     * AUTO_OFFSET_RESET_CONFIG 可以设置两个参数
     * smallest 代表 --from-beginning
     * largest 代表消费最新的数据
     */

    val kafkaParams = Map(
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> CombineConstant.KAFKA_BOOTSTRAP_SERVERS,
      ConsumerConfig.GROUP_ID_CONFIG -> CombineConstant.GROUP_ID_CONFIG,
      ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> CombineConstant.AUTO_OFFSET_RESET_CONFIG
    )

    val ssc = new StreamingContext(sc, Durations.seconds(120))
    val ds = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
      ssc,
      kafkaParams,
      Set(CombineConstant.FONSVIEW_TOPIC)
    ).map(_._2)
    //  ds.print(10)
    val sqlContext = sql
    import sqlContext.implicits._
    sqlContext.udf.register("formatTimeTemp", formatTimeTemp)
    ds.foreachRDD(rdd => {
      val fonsviewSourceRDD = rdd.coalesce(50)
      rdd.repartition(100)
      val fonsviewRDD = fonsviewSourceRDD.filter(line => {
        val json = JSON.parseObject(line)
        val message = json.getString("message")
        val messageSplits = message.split("\\|")
        val filesize = messageSplits(14).trim()
        val reqstarttime = messageSplits(15).trim()
        val reqendtime = messageSplits(16).trim()
        messageSplits.length == 46 &&
          Pattern.matches("\\d+\\.*\\d*", filesize) &&
          Pattern.matches("\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", reqstarttime) &&
          Pattern.matches("\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", reqendtime) &&
          tranTimeb2bfh(reqendtime) >= tranTimeb2bfh(reqstarttime)
      }).flatMap(line => {
        val json = JSON.parseObject(line)
        val message = json.getString("message")
        val messageSplits = message.split("\\|")
        val server_ip = messageSplits(1)
        val reqdomain = messageSplits(6)
        val vendor = CombineConstant.FONSVIEW_VENDOR_NAME
        val filesize = messageSplits(14).trim().toDouble
        val reqstarttime = tranTimeb2bfh(messageSplits(15))
        val reqendtime = tranTimeb2bfh(messageSplits(16))
        val sendtime = reqendtime - reqstarttime
        //      val timestamp = 000000000L
        //      val listBuffer = new ListBuffer[B2BModel]
        //      val fonsview = B2BModel(timestamp, server_ip, reqdomain, vendor, filesize, 1)
        //      listBuffer.append(fonsview)
        //      listBuffer
        splitLineData(server_ip, reqdomain, vendor, filesize, reqstarttime, reqendtime, sendtime)
      })
      val fonsviewDF = fonsviewRDD.toDF()
      fonsviewDF.registerTempTable("fonsviewTable")
      sql.sql(
        """
          |select time_stamp,
          |formatTimeTemp(time_stamp) as format_timestamp,
          |server_ip,
          |reqdomain,
          |vendor,
          |sum(filesize) as sum_filesize,
          |sum(total) as total
          |from fonsviewTable group by time_stamp,server_ip,reqdomain,vendor
          |""".stripMargin).registerTempTable("groupTable")
      val domainDF = sql.read.format("jdbc") //指定读取数据格式
        .options(Map( //指定参数
          "url" -> CombineConstant.MYSQL_URL,
          "driver" -> CombineConstant.MYSQL_DRIVER,
          "dbtable" -> CombineConstant.MYSQL_DBTABLE_DOMAIN,
          "user" -> CombineConstant.MYSQL_USER,
          "password" -> CombineConstant.MYSQL_PASSWORD
        )).load() //加载数据
      domainDF.select("DOMAIN", "BUSI_NAME").registerTempTable("domainTable")
      sql.sql(
        """
          |select * from groupTable a left join domainTable b on a.reqdomain=b.DOMAIN
          |
          |""".stripMargin).registerTempTable("finalTable")
      val finalDF = sql.sql(
        """
          |select time_stamp,
          |format_timestamp,
          |server_ip,
          |reqdomain,
          |vendor,
          |BUSI_NAME as busi_name,
          |sum_filesize,
          |total
          |from finalTable
          |""".stripMargin)
      val prop = new Properties()
      prop.put("driver", CombineConstant.MYSQL_DRIVER)
      prop.put("user", CombineConstant.MYSQL_USER)
      prop.put("password", CombineConstant.MYSQL_PASSWORD)
      finalDF.cache()
      finalDF.write.mode(SaveMode.Overwrite).jdbc(CombineConstant.MYSQL_URL, CombineConstant.MYSQL_DBTABLE_DWS, prop)

      finalDF.foreachPartition(iter => {
        // 建立JDBC
        val conn = DriverManager.getConnection(CombineConstant.MYSQL_URL, CombineConstant.MYSQL_USER, CombineConstant.MYSQL_PASSWORD)
        conn.setAutoCommit(false)
        val dbTable = CombineConstant.MYSQL_DATABLE_B2B_KPI
        val sqlStr =
          s"""
             |INSERT INTO $dbTable (
             |time_stamp,
             |format_timestamp,
             |server_ip,
             |reqdomain,
             |vendor,
             |busi_name,
             |sum_filesize,
             |total)
             |VALUES(?,?,?,?,?,?,?,?)
             |ON DUPLICATE KEY UPDATE sum_filesize=sum_filesize+?,
             |total=total+?
             |""".stripMargin
        val stat = conn.prepareStatement(sqlStr)
        iter.foreach(row => {
          val time_stamp = row.getAs[Long]("time_stamp")
          val format_timestamp = row.getAs[String]("format_timestamp")
          val server_ip = row.getAs[String]("server_ip")
          val reqdomain = row.getAs[String]("reqdomain")
          val vendor = row.getAs[String]("vendor")
          val busi_name = row.getAs[String]("busi_name")
          val sum_filesize = row.getAs[Double]("sum_filesize")
          val total = row.getAs[Long]("total")
          stat.setLong(1, time_stamp)
          stat.setString(2, format_timestamp)
          stat.setString(3, server_ip)
          stat.setString(4, reqdomain)
          stat.setString(5, vendor)
          stat.setString(6, busi_name)
          stat.setDouble(7, sum_filesize)
          stat.setLong(8, total)
          stat.setDouble(9, sum_filesize)
          stat.setLong(10, total)
          stat.addBatch()
        })
        stat.executeBatch()
        conn.commit()
        stat.close()
        conn.close()

      })
      finalDF.unpersist()
    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }

  /**
   * 初始化spark配置
   *  conf.setMaster("local")
   */
  override def init(): Unit = {
    //    conf.setMaster("local[4]")
    conf.setAppName("Combine")
    conf.set("spark.sql.shuffle.partitions", "100")
    conf.set("spark.shuffle.sort.bypassMergeThreshold", "10000000")
    conf.set("spark.shuffle.file.buffer", "64k")
    conf.set("spark.reducer.maxSizeInFlight", "64m")
  }

  def calcuNewFilesize(consistTime: Long, filesize: Double, sendtime: Long): Double = {
    consistTime * filesize / sendtime
  }

  def splitLineData(server_ip: String,
                    reqdomain: String,
                    vendor: String,
                    filesize: Double,
                    reqstarttime: Long,
                    reqendtime: Long,
                    sendtime: Long): ListBuffer[B2BModel] = {
    val StartTimeFormat = formatStartLong(reqstarttime)
    val EndTimeFormat = formatEndLong(reqendtime)
    var i = 1
    var timestampTemp = StartTimeFormat
    val windowGap = 5 * 60 * 1000
    val count = (EndTimeFormat - StartTimeFormat) / windowGap
    val list = new ListBuffer[B2BModel]
    var filesizeNew: Double = filesize
    var consistTime = 0L

    if (sendtime != 0) {
      if (count > 1) {
        while (i < count) {
          filesizeNew = 0.0
          var consistTimeTemp = 0L
          if (i == 1) {
            consistTimeTemp = timestampTemp + windowGap - reqstarttime
          } else {
            consistTimeTemp = windowGap
          }
          filesizeNew = calcuNewFilesize(consistTimeTemp, filesize, sendtime)
          list.append(B2BModel(
            timestampTemp,
            server_ip,
            reqdomain,
            vendor,
            filesizeNew,
            1
          ))
          timestampTemp = timestampTemp + windowGap
          i += 1
        }
        consistTime = reqendtime - timestampTemp
      } else {
        consistTime = reqendtime - reqstarttime
      }
      filesizeNew = calcuNewFilesize(consistTime, filesize, sendtime)
    } else {
      filesizeNew = filesize
    }

    list.append(B2BModel(
      timestampTemp,
      server_ip,
      reqdomain,
      vendor,
      filesizeNew,
      1
    ))
    list

  }
}
