package com.ocean.contentrecommend

import java.sql.Connection

import org.apache.spark.SparkConf
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.ml.linalg.SparseVector
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jblas.DoubleMatrix


/**
  * 电影样例类
  *
  * @param mid    电影ID
  * @param name   电影的名称
  * @param genres 电影所属类别 每一项用“|”分割
  */
case class Movie(mid: Int, name: String, genres: String)

/**
  * mysql连接配置
  *
  * @param uri
  * @param user
  * @param password
  */
case class MysqlConfig(uri: String, user: String, password: String)

/**
  * Content-Based Recommendation
  */
object ContentRecommend {
  //定义常量
  val MySql_MOVIE_Table = "movie"
  val CONTENT_MOVIE_RECOMMENDATION = "content_movie_recommendation"
  //电影的相似度阈值
  val MOVIE_SIMILARITY_THRESHOLD = 0.6

  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.uri" -> "jdbc:mysql://192.168.10.105:3306/recommend?useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true&useSSL=false",
      "mysql.user" -> "root",
      "mysql.password" -> "cde32wsxzaq1"
    )


    // 创建一个sparkConf
    val sparkConf: SparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("ContentRecommend").set("spark.driver.maxResultSize", "5g")

    // 创建一个SparkSession
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._

    implicit val mysqlConfig = MysqlConfig(config("mysql.uri"), config("mysql.user"), config("mysql.password"))

    val movieTagsDF: DataFrame = spark.read.format("jdbc")
      .option("url", mysqlConfig.uri)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("dbtable", MySql_MOVIE_Table)
      .load()
      .as[Movie]
      //将所有类别的分隔符转变成空格
      .map(data => (data.mid, data.name, data.genres.map(c => if (c == '|') ' ' else c)))
      .toDF("mid", "name", "genres")
      .cache()

    //TF-IDF算法
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("genres").setOutputCol("words")
    //对movieTagsDF增加新的一列words
    val wordsData: DataFrame = tokenizer.transform(movieTagsDF)

    //设置50个维度的特征，我们的类别不是很多，所以50可以保证不出现hash碰撞
    val hashingTF: HashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(50)
    val featuredData: DataFrame = hashingTF.transform(wordsData)

    val idf: IDF = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    //训练idf模型，得到每个词的逆文档频率
    val idfModel: IDFModel = idf.fit(featuredData)
    //得到tf-idf的值，作为新的特征向量
    val rescaleData: DataFrame = idfModel.transform(featuredData)

    val movieFeatures: RDD[(Int, DoubleMatrix)] = rescaleData.map(
      row => (row.getAs[Int]("mid"), row.getAs[SparseVector]("features").toArray)
    ).rdd
      .map(x => (x._1, new DoubleMatrix(x._2)))

    val similarMovieDF: DataFrame = movieFeatures.cartesian(movieFeatures)
      .filter {
        case (movieFeature1, movieFeature2) => movieFeature1._1 != movieFeature2._1
      }
      .map {
        case (movieFeature1, movieFeature2) => {
          val simScore: Double = this.cosineSim(movieFeature1._2, movieFeature2._2)
          (movieFeature1._1, (movieFeature2._1, simScore))
        }
      }.filter(_._2._2 > MOVIE_SIMILARITY_THRESHOLD)
      .groupByKey()
      .map {
        case (mid, simMovieIter) => {
          val sortedSimilarMovies: List[(Int, Double)] = simMovieIter.toList.sortBy(_._2)(Ordering.Double.reverse)
          (mid, sortedSimilarMovies)
        }
      }.flatMap(pair => {
      val mid: Int = pair._1
      val similarMovieList: List[(Int, Double)] = pair._2
      for (elem <- similarMovieList) yield {
        (mid, elem._1, elem._2)
      }
    }).toDF("mid", "sim_mid", "sim_score")

    saveToMySqlMovieRecommendation(CONTENT_MOVIE_RECOMMENDATION, similarMovieDF)

    spark.stop()
  }

  /**
    * 计算两个向量的余弦相似度
    *
    * @param movie1
    * @param movie2
    * @return
    */
  def cosineSim(movie1: DoubleMatrix, movie2: DoubleMatrix): Double = {
    movie1.dot(movie2) / (movie1.norm2() * movie2.norm2())
  }

  /**
    *
    * @param CONTENT_MOVIE_RECOMMENDATION
    * @param movieRecommendationDF
    * @param mysqlConfig
    */
  def saveToMySqlMovieRecommendation(CONTENT_MOVIE_RECOMMENDATION: String, movieRecommendationDF: DataFrame)(implicit mysqlConfig: MysqlConfig) = {
    val connection: Connection = JDBCUtil.getConnection

    movieRecommendationDF.write
      .mode("overwrite")
      .format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", mysqlConfig.uri)
      .option("dbtable", CONTENT_MOVIE_RECOMMENDATION)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("isolationLevel", "SERIALIZABLE")
      .option("truncate", "true")
      .option("batchsize", "2000")
      .save()

    try {
      connection.prepareStatement(
        s"""
           |create index idx_mid on  $CONTENT_MOVIE_RECOMMENDATION(mid)
      """.stripMargin).execute()

      connection.prepareStatement(
        s"""
           |create index idx_sim_mid on  $CONTENT_MOVIE_RECOMMENDATION(sim_mid)
      """.stripMargin)
    } catch {
      case e: Exception => e.printStackTrace()
    }

    connection.close()

  }

}
