package com

import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.sql.SparkSession
import org.jblas.DoubleMatrix

/**
  * @author king
  * @date 2022/5/25 - 14:16
  */
object OfflinePreLoader {
  val USER_MAX_RECOMMENDATION = 20
  //  val arrToString = udf((value: Seq[Recommendation]) => {
  //    value.map(x=> x.
  //    System.out.print(value)
  //  })
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("offline")
    val spark=SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._
    //从mysql中读取数据
    val ratingRDD =spark.read.format("jdbc")
      .option("url","jdbc:mysql://localhost:3306/shop?characterEncoding=UTF-8&serverTimezone=UTC")
      .option("dbtable","rating")
      .option("user","root")
      .option("password","123456")
      .load()
      .as[ProductRating]
      .rdd.map(rating =>(rating.userId,rating.productId,rating.score)).cache()
    // 提取出所有用户和商品的数据集
    val userRDD = ratingRDD.map(_._1).distinct()
    val productRDD = ratingRDD.map(_._2).distinct()

    // 核心计算过程
    // 1. 训练隐语义模型
    val trainData = ratingRDD.map(x=>Rating(x._1,x._2,x._3))
    // 定义模型训练的参数，rank隐特征个数，iterations迭代词数，lambda正则化系数
    val ( rank, iterations, lambda ) = ( 5, 10, 0.01 )
    val model = ALS.train( trainData, rank, iterations, lambda )

    // 2. 获得预测评分矩阵，得到用户的推荐列表
    // 用userRDD和productRDD做一个笛卡尔积，得到空的userProductsRDD表示的评分矩阵
    val userProducts = userRDD.cartesian(productRDD)
    val preRating = model.predict(userProducts)

    // 从预测评分矩阵中提取得到用户推荐列表
    val userRecs = preRating.filter(_.rating>0)
      .map(
        rating => ( rating.user, ( rating.product, rating.rating ) )
      )
      .groupByKey()
      .map{
        case (userId, recs) =>
          UserRecs( userId, "["+recs.toList.sortWith(_._2>_._2).take(USER_MAX_RECOMMENDATION).map(x=>Recommendation(x._1,x._2).toString).mkString(",")+"]" )
      }
      .toDF()
    // userRecs.withColumn("recs", arrToString($"recs"))
    //    userRecs.write
    //      .option("uri", mongoConfig.uri)
    //      .option("collection", USER_RECS)
    //      .mode("overwrite")
    //      .format("com.mongodb.spark.sql")
    //      .save()

    // 3. 利用商品的特征向量，计算商品的相似度列表
    val productFeatures = model.productFeatures.map{
      case (productId, features) => ( productId, new DoubleMatrix(features) )
    }
    // 两两配对商品，计算余弦相似度
    val productRecs = productFeatures.cartesian(productFeatures)
      .filter{
        case (a, b) => a._1 != b._1
      }
      // 计算余弦相似度
      .map{
      case (a, b) =>
        val simScore = consinSim( a._2, b._2 )
        ( a._1, ( b._1, simScore ) )
    }
      .filter(_._2._2 > 0.4)
      .groupByKey()
      .map{
        case (productId, recs) =>
          ProductRecs( productId, "["+recs.toList.sortWith(_._2>_._2).map(x=>Recommendation(x._1,x._2).toString).mkString(",")+"]" )
      }
      .toDF()
    productRecs.write.format("jdbc")
      .mode("overwrite")
      .option("url", "jdbc:mysql://localhost:3306/shop?characterEncoding=UTF-8&serverTimezone=UTC")
      .option("dbtable", "ProductRecs")
      .option("user", "root")
      .option("password", "123456")
      .save()
    userRecs.show(1)
//        productRecs.write.mode(SaveMode.Overwrite)
//          .format("jdbc")
//          .option("url", "jdbc:mysql://localhost:3306/shop?useUnicode=true&characterEncoding=utf-8") //mysql jdbc地址
//          .option("dbtable", "ProductRecs") //数据表名
//          .option("user", "root") //数据库用户名
//          .option("password", "root") //数据库密码
//          .save()

    //spark.stop()
    userRecs.write

      .option("url","jdbc:mysql://localhost:3306/shop?characterEncoding=UTF-8&serverTimezone=UTC")
      .option("dbtable","user_recs")
      .option("user","root")
      .option("password","123456")
      .mode("overwrite")
      .format("jdbc")
      .save()


  }

  //  /**
  //    * 从MySql数据库中获取DateFrame
  //    *
  //    * @param spark SparkSession
  //    * @param sql 查询SQL
  //    * @return DateFrame
  //    */
  //  def getDFFromMysql(spark: SparkSession, sql: String): DataFrame = {
  //    println(s"url:${mySqlConfig.url} user:${mySqlConfig.user} sql: ${sql}")
  //    spark.read.format("jdbc").option("url", mySqlConfig.url)
  //      .option("user", mySqlConfig.user)
  //      .option("password", mySqlConfig.password)
  //      .option("driver", "com.mysql.jdbc.Driver")
  //      .option("query", sql) .load()
  //  }
  //
  //  /**
  //    * 将数据集插入Mysql表
  //    * @param df DataFrame
  //    * @param mysqlTableName 表名：database_name.table_name
  //    * @return
  //    */
  //  def overwriteMysqlData(df: DataFrame, mysqlTableName: String) = {
  //    //先清除Mysql表中数据
  //    truncateMysqlTable(mysqlTableName)
  //    //再往表中追加数据
  //    df.write.mode(SaveMode.Append).jdbc(mySqlConfig.url, mysqlTableName, getMysqlProp)
  //  }

  def consinSim(product1: DoubleMatrix, product2: DoubleMatrix): Double ={
    product1.dot(product2)/ ( product1.norm2() * product2.norm2() )
  }
  def say(): Unit ={
    print("111111")
  }
}
