package com.wtw.streaming

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

import scala.reflect.internal.util.TableDef.Column

case class Recs(mid: Int, recs: String)

case class Recommendation(mid: Int, score: Double)

case class MysqlConfig(url: String, user: String, password: String, driver: String)

case class MovieRecs(mid: Int, recs: Array[Seq[Recommendation]])

object ConnHelper extends Serializable {
  lazy val jedis = new Jedis("192.168.120.201", 6379)
}


object StreamingRecommender {
  val MAX_USER_RATINGS_NUM = 20
  val MAX_SIM_MOVIES_NUM = 20
  val MYSQL_STREAM_RECS_COLLECTION = "StreamRecs"
  val MYSQL_RATING_COLLECTION = "Rating"
  val MYSQL_MOVIE_RECS_COLLECTION = "MovieRecs"

  val config = Map(
    "spark.cores" -> "local[*]",
    "mysql.url" -> "jdbc:mysql://localhost:3306/recommend",
    "mysql.user" -> "root",
    "mysql.password" -> "root",
    "mysql.driver" -> "com.mysql.jdbc.Driver"
  )
  val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("StreamingRecommender")
  val spark = SparkSession.builder().config(sparkConf).getOrCreate()
  val sc = spark.sparkContext
  val ssc = new StreamingContext(sc, Seconds(5))

  import scala.collection.JavaConversions._

  def getUserRecentlyRating(num: Int, uid: Int, jedis: Jedis): Array[(Int, Double)] = {
    //从用户的队列中取出num个评分
    jedis.lrange("uid:" + uid.toString, 0, num).map { item =>
      val attr = item.split("\\:")
      (attr(0).trim.toInt, attr(1).trim.toDouble)
    }.toArray
  }


  def getTopSimMovies(num: Int, mid: Int, uid: Int, simMovies: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]], ratingDF: DataFrame): Array[Int] = {
    val allSimMovies: Array[(Int, Double)] = simMovies(mid).toArray

    val frame: DataFrame = spark.sql("select mid, score from ratings where uid = " + uid)
    val ratingExist: Array[Int] = frame.rdd.collect().map(item => {
      item(0).toString.toInt
    })

    allSimMovies.filter(x => {
      !ratingExist.contains(x._1)
    }).sortWith(_._2 > _._2)
      .take(num)
      .map(x => x._1)
  }


  def getMoviesSimScore(mid1: Int, mid2: Int, simMovies: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]]): Double = {
    simMovies.get(mid1) match {
      case Some(sims) => sims.get(mid2) match {
        case Some(score) => score

        case None => 0.0
      }

      case None => 0.0
    }
  }

  def log(m: Int): Double = {
    val N = 10
    math.log(m) / math.log(N)
  }

  def computeMovieScores(candidates: Array[Int], userRecentlyRatings: Array[(Int, Double)], simMovies: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]]): Array[(Int, Double)] = {
    val scores = scala.collection.mutable.ArrayBuffer[(Int, Double)]()
    val increMap = scala.collection.mutable.HashMap[Int, Int]()
    val decreMap = scala.collection.mutable.HashMap[Int, Int]()

    for(candidate <- candidates; userRecentlyRating <- userRecentlyRatings) {
      val simScore = getMoviesSimScore(candidate, userRecentlyRating._1, simMovies)

      if(simScore > 0.7){
        scores += ((candidate, simScore * userRecentlyRating._2))
        if(userRecentlyRating._2 > 3) {
          increMap(candidate) = increMap.getOrDefault(candidate, 0) + 1
        }else {
          decreMap(candidate) = decreMap.getOrDefault(candidate, 0) + 1
        }
      }
    }

    scores.groupBy(_._1).map{
      case (mid, scoreList) => {
        (mid, scoreList.map(_._2).sum / scoreList.length + log(increMap.getOrDefault(mid, 1)) - log(decreMap.getOrDefault(mid, 1)))
      }
    }.toArray

  }

  def storeDataInMysql(uid: Int, recs: Array[(Int, Double)])(implicit mysqlConfig: MysqlConfig) = {
    val df: DataFrame = spark.read.format("jdbc")
      .option("url", config("mysql.url"))
      .option("driver", config("mysql.driver"))
      .option("user", config("mysql.user"))
      .option("password", config("mysql.password"))
      .option("dbtable", MYSQL_STREAM_RECS_COLLECTION)
      .load()

    //表不存在

    val rdd: RDD[(Int, String)] = sc.makeRDD(List((uid, recs.mkString("[", ",", "]"))))
    if(df == null) {
      val row = rdd.map(x => {
        Row((x._1, x._2))
      })

      val fields = Array(StructField("uid", IntegerType), StructField("recs", StringType))
      val schema = StructType(fields)

      spark.createDataFrame(row, schema)

      df.write.mode("overwrite")
        .format("jdbc")
        .option("driver", mysqlConfig.driver)
        .option("url", mysqlConfig.url)
        .option("dbtable", MYSQL_STREAM_RECS_COLLECTION) //表名
        .option("user", mysqlConfig.user)
        .option("password", mysqlConfig.password)
        .save()
    }else {
      //表存在则更新对应uid的推荐
      df.createOrReplaceTempView("streamrecs")
      val arr = rdd.collect()
      val id: Int = arr(0)._1
      val recs = arr(0)._2
      val res: DataFrame = spark.sql("update streamrecs set recs = " + recs + "where uid = " + id)
      res.write.mode("overwrite")
        .format("jdbc")
        .option("driver", mysqlConfig.driver)
        .option("url", mysqlConfig.url)
        .option("dbtable", MYSQL_STREAM_RECS_COLLECTION) //表名
        .option("user", mysqlConfig.user)
        .option("password", mysqlConfig.password)
        .save()
    }

  }

  def main(args: Array[String]): Unit = {

    import spark.implicits._
    implicit val mysqlConfig = MysqlConfig(config("mysql.url"), config("mysql.user"), config("mysql.password"), config("mysql.driver"))

    val MovieRDD = spark.read.format("jdbc")
      .option("url", config("mysql.url"))
      .option("driver", config("mysql.driver"))
      .option("user", config("mysql.user"))
      .option("password", config("mysql.password"))
      .option("dbtable", MYSQL_MOVIE_RECS_COLLECTION)
      .load()
      .as[Recs]
      .rdd

    val ratingDF: DataFrame = spark.read.format("jdbc")
      .option("url", config("mysql.url"))
      .option("driver", config("mysql.driver"))
      .option("user", config("mysql.user"))
      .option("password", config("mysql.password"))
      .option("dbtable", MYSQL_RATING_COLLECTION)
      .load()

    ratingDF.createOrReplaceTempView("ratings")

    var simMovie: RDD[(Int, Seq[Recommendation])] = MovieRDD.map(x => {
      val arr = x.recs.split(",")
      val a: Seq[Recommendation] = for (i <- Range(0, arr.length, 2))
        yield Recommendation(arr(i).substring(1).toInt, arr(i + 1).substring(0, arr(i + 1).length - 1).toDouble)
      (x.mid, a)
    })

    val simMovieMatrix = simMovie.map(movieRecs => {
      (movieRecs._1, movieRecs._2.map(x => {
        (x.mid, x.score)
      }).toMap)
    }).collectAsMap()


    val simMovieMatrixBroadCast = sc.broadcast(simMovieMatrix)
    val kafkaPara = Map(
      "bootstrap.servers" -> "192.168.120.201:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "recommender",
      "auto.offset.reset" -> "latest"
    )

    val kafkaStream = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(config("kafka.topic")),
        kafkaPara))

    val ratingStream = kafkaStream.map { case msg =>
      var attr = msg.value().split("\\|")
      (attr(0).toInt, attr(1).toInt, attr(2).toDouble, attr(3).toInt)
    }

    ratingStream.foreachRDD(rdds => {
      rdds.foreach {
        case (mid, uid, score, timestamp) => {
          println(">>>>>>>>>>>>>>>>" + mid + "," + uid + "," + score)

          //获取最近的K次评分
          val userRecentlyRatings = getUserRecentlyRating(MAX_USER_RATINGS_NUM, uid, ConnHelper.jedis)

          //取出和当前电影相似的N个电影
          val simMovies = getTopSimMovies(MAX_SIM_MOVIES_NUM, mid, uid, simMovieMatrixBroadCast.value, ratingDF)

          //将每个备选电影计算出优先级
          val streamRecs = computeMovieScores(simMovies, userRecentlyRatings, simMovieMatrixBroadCast.value)

          storeDataInMysql(uid, streamRecs)
        }
      }
    })

    ssc.start()

    ssc.awaitTermination()
  }
}
