/**
 * Spark Streaming 处理数据
 * 天津工业大学 大数据实训 第四组
 * 杨玺锟
 * 2020-03-07
 **/

package spark

import java.sql.{PreparedStatement, ResultSet}
import java.text.SimpleDateFormat
import java.util.Calendar

import com.google.gson.Gson
import org.apache.spark.SparkConf
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable
import scala.util.control.Breaks.{break, breakable}

object SparkStreamKafka {
  def main(args: Array[String]): Unit = {
    //local中的数字是cpu的核数，如果写*的话表示全部使用，本地模式使用local
    //appName是自定义的，没有特殊的作用
    //yarn模式(生产) yarn cluster模式(生产集群) standalone模式，cluster模式(集群)
    var conf = new SparkConf().setMaster("local[*]").setAppName("kafka")


    val ssc = new StreamingContext(conf, Seconds(5))

    // kafka properties
    val topicsSet = Array("test")
    val kafkaParams = mutable.HashMap[String, String]()
    kafkaParams.put("bootstrap.servers", "192.168.2.12:9092")
    kafkaParams.put("group.id", "test-consumer-group")
    kafkaParams.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    kafkaParams.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topicsSet, kafkaParams
      )
    )
    ssc.checkpoint("e:\\streamingData")

    var rs: ResultSet = null
    var pstmt: PreparedStatement = null
    var resultFlag: Boolean = false

    /** 字符计数demo */
    // Get the lines, split them into words, count the words and print
    //    val lines = messages.map(_.value)
    //    val words = lines.flatMap(_.split(" "))
    //    val wordCounts = words.map(x => (x, 1L)).reduceByKey(_ + _)
    //    wordCounts.print()

    /**
     * 处理JSON字符串为case class 生成RDD[case class] 然后直接转成DataFrame
     */
    stream.map(record => handleMessage2CaseClass(record.value())).foreachRDD(rdd => {
      val spark = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
      val df = spark.createDataFrame(rdd)
      //print(spark)
      //df.show()

      if (!df.rdd.isEmpty) { // 如果没有kafka数据传入则跳过数据处理
        //begin of data process
        println("process")
        /** 计算热门类型 */
        val res_top_type = df.select("game_name", "game_type").filter("game_type != ''").groupBy("game_type").count()
        // res_top_type.show()
        res_top_type.foreach(row => {
          var sql = s"SELECT game_type FROM top_type WHERE game_type = ?"
          //print(row(0))
          pstmt = MySQLConnect.conn().prepareStatement(sql)
          pstmt.setString(1, row(0).toString)
          rs = pstmt.executeQuery()
          if (!rs.next()) {
            // println("not has")
            sql = s"INSERT INTO top_type VALUES(?,?)"
            pstmt = MySQLConnect.conn().prepareStatement(sql)
            pstmt.setString(1, row(0).toString)
            pstmt.setString(2, row(1).toString)
            resultFlag = pstmt.execute()
          } else {
            // println("has")
            sql = s"UPDATE top_type SET count = ? WHERE game_type = ?"
            pstmt = MySQLConnect.conn().prepareStatement(sql)
            pstmt.setString(1, row(1).toString)
            pstmt.setString(2, row(0).toString)
            resultFlag = pstmt.execute()
          }
        })

        /** 查询打折比 */
        val res_top_discount = df.filter("price_discount != 0").na.fill("").select("game_name", "game_price", "price_discount", "game_type")
        val res_top_discount_del = df.filter("price_discount = 0").select("game_name")
        // res_top_discount.show()
        res_top_discount_del.foreach(row => {
          var sql = s"DELETE FROM top_discount WHERE game_name = ?"
          pstmt = MySQLConnect.conn().prepareStatement(sql)
          pstmt.setString(1, row(0).toString)
          resultFlag = pstmt.execute()
        })
        res_top_discount.foreach(row => {
          var sql = s"SELECT game_name FROM top_discount WHERE game_name = ?"
          pstmt = MySQLConnect.conn().prepareStatement(sql)
          pstmt.setString(1, row(0).toString)
          rs = pstmt.executeQuery()
          if (!rs.next()) {
            // println("not has")
            sql = s"INSERT INTO top_discount VALUES(?,?,?,?)"
            pstmt = MySQLConnect.conn().prepareStatement(sql)
            pstmt.setString(1, row(0).toString)
            pstmt.setString(2, row(1).toString)
            pstmt.setDouble(3, row(2).asInstanceOf[Double])
            //print(row(2).asInstanceOf[Double])
            pstmt.setString(4, row(3).toString)
            resultFlag = pstmt.execute()
          } else {
            // println("has")
            sql = s"UPDATE top_discount SET game_price=?, price_discount=?, game_type=? WHERE game_name = ?"
            pstmt = MySQLConnect.conn().prepareStatement(sql)
            pstmt.setString(1, row(1).toString)
            pstmt.setDouble(2, row(2).asInstanceOf[Double])
            pstmt.setString(3, row(3).toString)
            pstmt.setString(4, row(0).toString)
            resultFlag = pstmt.execute()
          }
        })

        /** 价格分类 */

        val price_class = df.filter("game_price != 0").select("game_price")
        val class1 = price_class.filter("game_price > 0 and game_price <= 49").count().asInstanceOf[Int]
        val class2 = price_class.filter("game_price >= 50 and game_price <= 99").count().asInstanceOf[Int]
        val class3 = price_class.filter("game_price >= 100 and game_price <= 149").count().asInstanceOf[Int]
        val class4 = price_class.filter("game_price >= 150 and game_price <= 200").count().asInstanceOf[Int]
        val class5 = price_class.filter("game_price >= 200 and game_price <= 299").count().asInstanceOf[Int]
        val class6 = price_class.filter("game_price >= 300").count().asInstanceOf[Int]
        //      println(class1,class2,class3,class4,class5,class6)
        if (!(class1 == 0 && class2 == 0 && class3 == 0 && class4 == 0 && class5 == 0 && class6 == 0)) {
          val sql1 = s"DELETE FROM price_class"
          val sql2 = s"INSERT INTO price_class VALUES($class1,$class2,$class3,$class4,$class5,$class6)"
          resultFlag = MySQLConnect.conn().createStatement().execute(sql1)
          resultFlag = MySQLConnect.conn().createStatement().execute(sql2)
        }


        /** 按年份查询 */
        // 清空表格
        val sql_yearly = s"DELETE FROM year_avg"
        resultFlag = MySQLConnect.conn().createStatement().execute(sql_yearly)
        for (x <- 0 to 4) {
          val year = 2016 + x // 2016 2017 2018 2019 2020
          // println(year)
          // avg(评论人数+好评比)
          val year_review = df.filter("release_date is not null and game_review_count != 0 and positive_review_ratio != 0").select("game_review_count", "positive_review_ratio").filter(s"release_date between ${year}0000 and ${year}1231").agg(avg("game_review_count"), avg("positive_review_ratio"))
          // year_review.show()
          year_review.foreach(row => {
            val sql = s"INSERT INTO year_avg VALUE(?,?,?)"
            pstmt = MySQLConnect.conn().prepareStatement(sql)
            pstmt.setString(1, year.toString)
            pstmt.setDouble(2, row(0).asInstanceOf[Double])
            pstmt.setDouble(3, row(1).asInstanceOf[Double])
            resultFlag = pstmt.execute()
          })
        }


        /** 按月份查询 */
        // 清空表格
        val sql_monthly = s"DELETE FROM month_avg"
        resultFlag = MySQLConnect.conn().createStatement().execute(sql_monthly)

        val cal = Calendar.getInstance()
        for (x <- 1 to 13) { // 最近12个月
          // 获取轮次月
          cal.add(2, -1) // 每轮次减去1个月

          // 除去本月 消耗次数 相当于continue
          breakable {
            if (x == 1) {
              break
            }
            // println(x,cal.get(Calendar.YEAR),cal.get(Calendar.MONTH)+1) //MONTH从0开始
            val formatTemp = new SimpleDateFormat("yyyyMMdd")
            var firstday, lastday: String = ""
            val cal1 = Calendar.getInstance()
            cal1.set(Calendar.MONTH, cal.get(Calendar.MONTH) + 1) // 轮次中的月份
            cal1.set(Calendar.YEAR, cal.get(Calendar.YEAR)) // 轮次月中年份

            // 获取轮次月第一天
            cal1.add(Calendar.MONTH, 0)
            cal1.set(Calendar.DAY_OF_MONTH, 1)
            firstday = formatTemp.format(cal1.getTime)

            // 获取轮次月最后一天
            cal1.add(Calendar.MONTH, 1)
            cal1.set(Calendar.DAY_OF_MONTH, 0)
            lastday = formatTemp.format(cal1.getTime)

            // println(firstday, lastday)
            val month_review = df.filter("release_date is not null and game_review_count != 0 and positive_review_ratio != 0").select("game_review_count", "positive_review_ratio").filter(s"release_date >= $firstday and release_date <= $lastday").agg(avg("game_review_count"), avg("positive_review_ratio"))
            // month_review.show()
            val monthStr: String = firstday + "-" + lastday
            month_review.foreach(row => {
              val sql = s"INSERT INTO month_avg VALUE(?,?,?)"
              pstmt = MySQLConnect.conn().prepareStatement(sql)
              pstmt.setString(1, monthStr)
              pstmt.setDouble(2, row(0).asInstanceOf[Double])
              pstmt.setDouble(3, row(1).asInstanceOf[Double])
              resultFlag = pstmt.execute()
            })
          }

        }

      } else println("clear") // end of data process
    })


    ssc.start()
    ssc.awaitTermination()
  }

  /** json Object */
  def handleMessage2CaseClass(jsonStr: String): KafkaMessage = {
    val gson = new Gson()
    gson.fromJson(jsonStr, classOf[KafkaMessage])
  }

  case class KafkaMessage(game_name: String, game_price: Int, price_discount: Double, release_date: String, game_review_count: Int, positive_review_ratio: Double, game_type: String, game_about: String, img_src: String)

}