package com.spark.mysql

import java.util.Properties

import com.spark.mysql.insertfunction.insertToCompare
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * @author hjn
  * @Time 2020.4.6-4.10
  */
object abnormal {
  def timeAdd(nextTime: String, interval: String): String = {
    var next = nextTime.toCharArray
    var temp = interval.toCharArray

    var temp1: Int = next(3).toInt - 48 // MM的第二位
    var temp2: Int = temp(3).toInt - 48

    temp1 += temp2

    if (temp1 == 10) { // MM的第二位，向MM的第一位进位
      temp1 = 0
      next(3) = (temp1 + 48).toChar

      var temp3 = next(2).toInt - 48 // MM的第一位
      if (temp3 == 5) { // 向HH的第二位进位
        temp3 = 0
        next(2) = (temp3 + 48).toChar

        var temp4: Int = next(1).toInt - 48 // HH的第二位
        var temp5: Int = next(0).toInt - 48 // HH的第一位
        if (temp4 == 9) { // HH的第二位为9，向HH的第一位进位
          temp4 = 0
          next(1) = (temp4 + 48).toChar
          temp5 += 1
          next(0) = (temp5 + 48).toChar
        } else if (temp4 == 3 && temp5 == 2) { // HH为23，由于产生进位，则直接变为24，即00
          temp4 = 0
          next(1) = (temp4 + 48).toChar
          temp5 = 0
          next(0) = (temp5 + 48).toChar
        } else {
          temp4 += 1 // HH的第二位进位
          next(1) = (temp4 + 48).toChar
        }
      } else {
        temp3 += 1 // MM的第一位进一
        next(2) = (temp3 + 48).toChar
      }
    }
    next(3) = (temp1 + 48).toChar
    var result: String = ""
    for (i <- 0 to 5) {
      result += next(i)
    }
    result
  }

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("abnormal")
    val sc = new SparkContext(conf)
    val spark: SparkSession = SparkSession.builder()
      .config("spark.sql.shuffle.partitions", 1).getOrCreate()
    spark.sparkContext.setLogLevel("ERROR")

    /**
      * 读取mysql的第一中方式
      *
      */

    val properties = new Properties()
    properties.setProperty("user", "root")
    properties.setProperty("password", "root")

    /**
      * 定义时间片
      */
    //    var startTime = "000000" // 是否存在00:00点的人数样本？
    var nextTime = "000500"
    val interval = "000500" // 5分钟时间间隔

    while (true) {

      // 获取数据
      val allResult: Array[Row] = spark.read.jdbc("jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=utf-8&useSSL=false", "(SELECT longitude,latitude,count FROM historycount WHERE time like \"%" + nextTime + "\" ) T", properties).collect()
      val lines: Array[((String, String), String)] = allResult.map(line => {
        val result: Array[String] = line.toString().split(",")
        val longitude = result(0)
        val latitude = result(1)
        val count = result(2)
        ((longitude, latitude), count)
      })

      val rddLines = sc.parallelize(lines) // 数组转换为rdd
      val reformatLines = rddLines.groupBy(_._1) // 根据坐标进行group
      //      reformatLines.foreach(println)
      val group: RDD[Iterable[((String, String), String)]] = reformatLines.map(_._2) // 获取CompactBuffer部分
      /**
        * 要考虑group是否为空?
        */
      for (elem <- group) { // 对于每一个基站
        val length: Int = elem.size // 样本数
      var num: Int = 0 // 样本人数总和
        var longitude: String = ""
        var latitude: String = ""

        /**
          * 获取总计算值
          */
        elem.foreach(x => {
          longitude = x._1._1.drop(1) // 获取longitude
          latitude = x._1._2 // 获取latitude
          num += x._2.dropRight(1).toInt
        })
        val finalNum: Int = num / length // 计算人数平均值

        /**
          * 报错:
          * com.mysql.jdbc.exceptions.jdbc4.CommunicationsException: Communications link failure
          * 估计原因是:产生多余的连接,导致数据库无法分配多余的连接
          */
        insertToCompare(nextTime, longitude, latitude, finalNum)
      }
      nextTime = timeAdd(nextTime, interval)
      //      Thread.sleep(10000)
    }

  }
}