package com.wtw.content

import org.apache.spark.SparkConf
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.ml.linalg.SparseVector
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jblas.DoubleMatrix

case class MovieRecs(mid: Int, recs: String)

case class MysqlConfig(url: String, user: String, password: String, driver: String)

case class MovieRating(uid: Int, mid: Int, score: Double, timestamp: Int)

case class Movie(mid: Int, name: String, descri: String, timelong: String, issue: String,
                 shoot: String, language: String, genres: String, actors: String, directors: String)

object ContentRecommender {
  val MYSQL_MOVIE_COLLECTION = "Movie"
  val CONTENT_MOVIE_RESCS = "ContentMovieRecs"
  val USER_MAX_RECOMMENDATION = 20

  def consinSim(movie1: DoubleMatrix, movie2:DoubleMatrix) : Double ={
    movie1.dot(movie2) / ( movie1.norm2()  * movie2.norm2() )
  }

  def storeDFInMysql(df: DataFrame, collections_name: String)(implicit mysqlConfig: MysqlConfig) = {
    df.write.mode("overwrite")
      .format("jdbc")
      .option("driver", mysqlConfig.driver)
      .option("url", mysqlConfig.url)
      .option("dbtable", collections_name) //表名
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .save()
  }

  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.url" -> "jdbc:mysql://localhost:3306/recommend",
      "mysql.user" -> "root",
      "mysql.password" -> "root",
      "mysql.driver" -> "com.mysql.jdbc.Driver"
    )

    val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineRecommender")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    val sc = spark.sparkContext

    import spark.implicits._
    implicit val mysqlConfig = MysqlConfig(config("mysql.url"), config("mysql.user"), config("mysql.password"), config("mysql.driver"))

    val movieTagsDF = spark.read.format("jdbc")
      .option("url", config("mysql.url"))
      .option("driver", config("mysql.driver"))
      .option("user", config("mysql.user"))
      .option("password", config("mysql.password"))
      .option("dbtable", MYSQL_MOVIE_COLLECTION)
      .load()
      .as[Movie]
      .map(x => (x.mid, x.name, x.genres.map(c => if(c == '|') ' ' else c)))
      .toDF("mid", "name", "genres")
      .cache()

    //创建分词器 按照 ' '分词
    val tokenizer = new Tokenizer().setInputCol("genres").setOutputCol("words")
    val wordsData = tokenizer.transform(movieTagsDF)

    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(50)
    val futurizedData = hashingTF.transform(wordsData)
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(futurizedData)
    val rescaledData = idfModel.transform(futurizedData)

    val movieFeatures = rescaledData.map(row => {
      (row.getAs[Int]("mid"), row.getAs[SparseVector]("features").toArray)
    })
      .rdd
      .map(x => {
        (x._1, new DoubleMatrix(x._2))
      })

//    movieFeatures.collect().foreach(println)
    val movieRecs = movieFeatures.cartesian(movieFeatures)
      .filter{case (a,b) => a._1 != b._1}
      .map{case (a,b) =>
        val simScore = this.consinSim(a._2,b._2) // 求余弦相似度
        (a._1,(b._1,simScore))
      }.filter(_._2._2 > 0.6)
      .groupByKey()
      .map{case (mid,items) =>
        MovieRecs(mid,items.toList.map(x => {
          "[" + x._1 + "," + x._2 + "]"
        }).mkString(","))
      }
      .toDF()

    storeDFInMysql(movieRecs, CONTENT_MOVIE_RESCS)

    spark.stop()
  }

}
