package formatfa.streaming

import java.sql.{Connection, DriverManager}
import java.text.SimpleDateFormat
import java.util.Date

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.dstream.DStream

//实时流处理主类

class RatingLive {

}
//一个评分 , 看过电影，因为电影
//case class RatingItem(movieItem)
//一个电影
case class MovieItem(movieId:String,ratingSum:Float,ratingCount:Int)

//用户，评分的电影的个数
case class UserItem(userId:String,ratingCount:Int)
//隐式类型转换 ，将 DStream[MovieItem] 转换为 DStream[(key,Movie)]

object RatingLive{

//  更新数据到mysql,数据的时间 和 数据
  def updateTop(datetime:String,data:Array[MovieItem]):Unit={
    println("插入数据....")
    //      注册驱动
    Class.forName("com.mysql.jdbc.Driver")
    val  connection = DriverManager.getConnection("jdbc:mysql://192.168.6.66:3306/movies","root","root")
    val statement = connection.createStatement()
//    清空表
    statement.execute("truncate table  rating_count_top")
    for(movie<-data)
    {
//      println(s"insert into  movie_ratingcount_top values('${movie.movieId}',${movie.ratingCount},${movie.ratingSum})")
      statement.execute(s"insert into   rating_count_top values('${movie.movieId}',${movie.ratingCount},${movie.ratingSum},'${datetime}')")}

  }
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("RatingLive").setMaster("local[2]")

//    1. 初始化
    val scc = new StreamingContext(conf,Seconds(2))
    scc.checkpoint("result/checkpoint")


//    2. 设置日志输出等级
    scc.sparkContext.setLogLevel("WARN")

//    3. 设置输入数据源

    val sources:DStream[String] =  scc.socketTextStream("localhost",8888)

      /*
      4. 处理流程

      - 转换 为 (MovieID,(userId,rating,数量)) 的 map形式
      - updateStateByKey 返回(电影id , (总评分,总评分人数) )

       */
    val  ratings_pair =  sources.map(item=>{
      val values = item.split(",")
//      userId,movieId,rating,timestamp
      val userId = values(0)
      val movieId = values(1)
      val rating = values(2).toFloat
      (movieId,rating)
    })

  val formater = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
  val movies = ratings_pair.updateStateByKey[MovieItem](
//    这次接收的电影的所有评分，和上次(之前累计)的评分
    ((ratings:Seq[Float], preValue:Option[MovieItem])=>{

//      之前累计的
      var ratingCount:Int = preValue.getOrElse(MovieItem(null,0,0)).ratingCount
      var ratingSum:Float = preValue.getOrElse(MovieItem(null,0,0)).ratingSum
//      添加
      for(rating<-ratings)
        {
          ratingCount+=1
          ratingSum+=rating
        }

      Option( MovieItem(null,ratingSum,ratingCount))
    }
  )).map(item=>{
    //      添加movie id
    item._2.copy(movieId =item._1)
  })
//    每个只有一个
    ratings_pair.foreachRDD(i=>{
      println("batch 键值对数量:"+i.count())
    })


//    遍历DStream 的 每一个RDD ,rdd是所有batch计算后的movieitem的rdd
    movies.foreachRDD((rdd:RDD[MovieItem],time)=>{

      val date = new Date(time.milliseconds)
      val time_str = formater.format(date)
      println( "当前处理时间:"+time)

      val top = rdd.sortBy(movie=>movie.ratingCount,ascending=false).take(15)
      //保存数据到mysql
      updateTop( time_str,top)
      top.foreach(item=>{
        println(s"电影ID:${item.movieId} 评分总数:${item.ratingCount}")
      })

    })


//    5. 开始 直到被停止
    scc.start()

    scc.awaitTermination()

  }

}