package com.oscar.Offline

import org.apache.spark.SparkConf
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.jblas.DoubleMatrix


// 定义样例类
case class BookRating( userId: Int, bookId: Int, score: Double, timestamp: Int )
case class MySQLConfig( url: String,user: String,password: String )
// 标准推荐对象，bookId,score
case class Recommendation(bookId: Int, score:Double)
// 用户推荐列表
case class UserRecs(userId: Int, recs: Seq[Recommendation])
// 图书相似度（图书推荐）
case class BookRecs(bookId: Int, recs: Array[Double])
case class ItemRecsOnMySQL(itemId: Int, recs: String)

object OfflineRecommender {
  // 定义常量
  val MYSQL_RATING = "rating"
  val USER_RECS = "UserRecs"
  val BOOK_RECS = "bookRecs"
  val USER_MAX_RECOMMENDATION = 10
  val SIMILAR_SCORE = 0.6
  val PARAMETERARRAY =Array(50,5,0.01)


  def main(args: Array[String]): Unit = {
    //定义一些配置信息
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.url" -> "jdbc:mysql://localhost:3306/recommender",
      "mysql.user" -> "root",
      "mysql.password" -> "123456"
    )
    // 创建一个spark config
    val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineRecommender")
    // 创建spark session
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._
    // 声明一个隐式的配置对象
    implicit val mysqlConfig =
      MySQLConfig(config.get("mysql.url").get,config.get("mysql.user").get,config.get("mysql.password").get)


    // 加载数据,创建训练数据集
    val ratingDS = spark.read
      .format("jdbc")
      .option("url", mysqlConfig.url)
      .option("dbtable", MYSQL_RATING)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .load()
      .as[BookRating]
    // 将一个RDD随机切分成两个RDD，用以划分训练集和测试集
    val Array(trainData,testData) = ratingDS.randomSplit(Array(0.8, 0.2))
    //输入最优参数,训练得出模型
    val model = new org.apache.spark.ml.recommendation.ALS()
      .setRank(PARAMETERARRAY(0).toInt)// rank 是模型中隐语义因子的个数, iterations 是迭代的次数, lambda 是ALS的正则化参
      .setMaxIter(PARAMETERARRAY(1).toInt)
      .setRegParam(PARAMETERARRAY(2))
      .setUserCol("userId").setItemCol("bookId").setRatingCol("score")
      .fit(trainData.toDF())
    // Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
    model.setColdStartStrategy("drop")
    // 获得用户商品推荐列表
    val userRecs: DataFrame = model.recommendForAllUsers(USER_MAX_RECOMMENDATION)
    println("获得用户商品推荐列表为：")
    userRecs.show(6)

    // 将数据保存到MySQL中
    val UserRecsDF: DataFrame = Pretreatment(userRecs).toDF().withColumnRenamed("itemId","userId")
    UserRecsDF.write
      .mode("overwrite")
      .format("jdbc")
      .option("url", mysqlConfig.url)
      .option("dbtable", USER_RECS)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .save()



    println("=====开始计算商品相似度矩阵=========")
    //TODO：计算商品相似度矩阵
    //计算商品相似度矩阵
    //获取商品的特征矩阵，数据格式 RDD[(scala.Int, scala.Array[scala.Double])]
//    获得图书的特征矩阵
    val bookFeatures = model.itemFactors.rdd.map(x => x.toString())
      .map(item => {
        // book数据通过,分隔，切分出来
        val attr = item.split(",WrappedArray")
        BookRecs(attr(0).substring(1).toInt,
          attr(1).substring(1).dropRight(2).split(",").map(x => x.toDouble))
      }).map{x=>(x.bookId,new DoubleMatrix(x.recs))}

    // 计算笛卡尔积并过滤合并，将图书的特征向量与图书的特征向量相乘，获得图书与图书之间的相似度
    val bookRecs = bookFeatures.cartesian(bookFeatures)
      .filter{case (a,b) => a._1 != b._1}//过滤掉自身
      // 求余弦相似度
      .map{case (a,b) =>
        val simScore = consinSim(a._2,b._2)
        (a._1,(b._1,simScore))//(图书1Id，(图书2Id，相似度))
      }.filter(_._2._2 > SIMILAR_SCORE).sortBy(-_._2._2)//过滤出符合要求的数据并排序
      .groupByKey()
      .map{ case (bookId,items) =>
          UserRecs(bookId,items.toList.map(x => Recommendation(x._1,x._2)))
      }.toDF()

    // 将数据保存到MySQL中
    val bookRecsDF: DataFrame =Pretreatment(bookRecs).toDF().withColumnRenamed("itemId","bookId")
    bookRecsDF.write
      .mode("overwrite")
      .format("jdbc")
      .option("url", mysqlConfig.url)
      .option("dbtable", BOOK_RECS)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .save()


    println("完美！你是最棒的！")
    // 关闭spark
    spark.stop()
  }

  //计算余弦相似度
  def consinSim(book1: DoubleMatrix, book2: DoubleMatrix) = {
    book1.dot(book2) / ( book1.norm2()  * book2.norm2() )
  }

  //数据预处理
  def Pretreatment(itemRecs: DataFrame) = {
    /**
      * 将数据转化为UserRecs(userId: Int, recs: String)的形式
      * 原始数据：[88110,WrappedArray([148549072,4.998024], [1759389,4.99786], [148548940,4.744902], [9768287,4.5232997], [1757402,4.476155])]
      * 处理后数据：88110,148549072:4.998024|1759389:4.99786|148548940:4.744902|9768287:4.5232997|1757402:4.476155
      */
    val itemRecsRDD= itemRecs.rdd.map(x=>x.toString())
      .map(item => {
        // 数据通过",WrappedArray"分隔，切分出来
        val attr = item.split(",WrappedArray")
          ItemRecsOnMySQL(attr(0).substring(1).toInt, chang(attr(1)))
       })
    itemRecsRDD
  }
  def chang(str: String): String = {
    val strs=str.substring(2).dropRight(3)
      .replace("], [","|")
      .replace(",",":")
    strs
  }
  
}
