package com.oscar.itemcf

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

// 定义样例类
case class BookRating( userId: Int, bookId: Int, score: Double, timestamp: Int )
case class MySQLConfig( url: String,user: String,password: String )
// 标准推荐对象，bookId,score
case class Recommendation(bookId: Int, score:Double)
case class BookRecs(bookId: Int, recs: Seq[Recommendation])
case class BookRecsOnMysql(bookId: Int, recs:String)
object ItemCFRecommender {
  // 定义常量
  val MYSQL_RATING = "Rating"
  val ITEM_CF_BOOK_RECS = "ItemCFBookRecs"
  val MAX_RECOMMENDATION = 20


  def Pretreatment(bookRecs: Any) = ???

  def main(args: Array[String]): Unit = {
    //定义一些配置信息
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.url" -> "jdbc:mysql://localhost:3306/recommender",
      "mysql.user" -> "root",
      "mysql.password" -> "123456"
    )
    // 创建一个spark config
    val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineRecommender")
    // 创建spark session
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._
    // 声明一个隐式的配置对象
    implicit val mysqlConfig =
      MySQLConfig(config.get("mysql.url").get,config.get("mysql.user").get,config.get("mysql.password").get)


    // 加载数据,创建训练数据集
    val ratingDF = spark.read
      .format("jdbc")
      .option("url", mysqlConfig.url)
      .option("dbtable", MYSQL_RATING)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .load()
      .as[BookRating]
      .map(
        x=>(x.userId,x.bookId)
      )
      .toDF("userId","bookId").cache()

    // TODO: 核心算法，计算同现相似度，得到图书的相似列表
    // 统计每个图书的评分个数，按照bookId来做group by
    val bookRatingCountDF = ratingDF.groupBy("bookId").count()
    // 在原有的评分表上rating添加count
    val ratingWithCountDF = ratingDF.join(bookRatingCountDF, "bookId")

    // 将评分按照用户id两两配对，统计两个图书被同一个用户评分过的次数
    val joinedDF = ratingWithCountDF.join(ratingWithCountDF, "userId")
      .toDF("userId","book1","count1","book2","count2")
      .select("userId","book1","count1","book2","count2")
    // 创建一张临时表，用于写sql查询
    joinedDF.createOrReplaceTempView("joined")

    // 按照book1,book2 做group by，统计userId的数量，就是对两个图书同时评分的人数
    val cooccurrenceDF = spark.sql(
      """
        |select book1
        |, book2
        |, count(userId) as cocount
        |, first(count1) as count1
        |, first(count2) as count2
        |from joined
        |group by book1, book2
      """.stripMargin
    ).cache()

    // 提取需要的数据，包装成( bookId1, (bookId2, score) )
    val simDF = cooccurrenceDF.map{
      row =>
        val coocSim = cooccurrenceSim( row.getAs[Long]("cocount"), row.getAs[Long]("count1"), row.getAs[Long]("count2") )
        ( row.getInt(0), ( row.getInt(1), coocSim ) )
    }
      .rdd
      .groupByKey()
      .map{
        case (bookId,recs) =>
          BookRecs(bookId,recs.toList.filter(x=>x._1 != bookId)
            .sortWith(_._2>_._2)
            .take(MAX_RECOMMENDATION)
            .map(x => Recommendation(x._1,x._2)))
      }
      .toDF()

    simDF.show(10,truncate = false)
    // 将数据保存到MySQL中
    val bookSimDF: DataFrame =Pretreatment(simDF).toDF()
    bookSimDF.write
      .mode("overwrite")
      .format("jdbc")
      .option("url", mysqlConfig.url)
      .option("dbtable", ITEM_CF_BOOK_RECS)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .save()


    println("完美！你是最棒的！")
    // 关闭spark
    spark.stop()

  }
  // 按照公式计算同现相似度
  def cooccurrenceSim(coCount: Long, count1: Long, count2: Long): Double ={
    coCount / math.sqrt( count1 * count2 )
  }
  //数据预处理
  def Pretreatment(itemRecs: DataFrame) = {
    /**
      * 将数据转化为UserRecs(userId: Int, recs: String)的形式
      * 原始数据：[88110,WrappedArray([148549072,4.998024], [1759389,4.99786], [148548940,4.744902], [9768287,4.5232997], [1757402,4.476155])]
      * 处理后数据：88110,148549072:4.998024|1759389:4.99786|148548940:4.744902|9768287:4.5232997|1757402:4.476155
      */
    val itemRecsRDD= itemRecs.rdd.map(x=>x.toString())
      .map(item => {
        // 数据通过",WrappedArray"分隔，切分出来
        val attr = item.split(",WrappedArray")
        BookRecsOnMysql(attr(0).substring(1).toInt, chang(attr(1)))
      })
    itemRecsRDD
  }
  def chang(str: String): String = {
    val strs=str.substring(2).dropRight(3)
      .replace("], [","|")
      .replace(",",":")
    strs
  }
}
