package com.kylin


// $example on$
import java.util.Properties

import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.sql.SaveMode
// $example off$
import org.apache.spark.sql.SparkSession


object ALS {
  case class Rating(user_id: Int, music_id: Int, rating: Float, timestamp: Long)
  def parseRating(str: String): Rating = {
    val fields = str.split("::")
    assert(fields.size == 4)
    Rating(fields(0).toInt, fields(1).toInt, fields(2).toFloat, fields(3).toLong)
  }

  val mysql_user="music_itemcf"
  val mysql_pwd ="music_itemcf"
  val mysql_driver="com.mysql.jdbc.Driver"
  val mysql_url = "jdbc:mysql://115.29.140.3:3306/febs_music_itemcf?useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC"


  def main(args: Array[String]) {


    val connectionProperties = new Properties
    //connectionProperties.setProperty("dbtable", "data")
    connectionProperties.setProperty("user", mysql_user)
    connectionProperties.setProperty("password", mysql_pwd)
    connectionProperties.setProperty("driver", mysql_driver)
    //connectionProperties.setProperty("truncate","true")

    val spark = SparkSession
      .builder
      .appName("ALS")
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._

    // $example on$
    val ratings = spark.read.textFile("engine/src/main/resources/sample_musiclens_ratings.txt")
      .map(parseRating)
      .toDF()
    val Array(training, test) = ratings.randomSplit(Array(0.8, 0.2))

    // Build the recommendation model using ALS on the training data
    val als = new ALS()
      .setMaxIter(5)
      .setRegParam(0.01)
      .setUserCol("user_id")
      .setItemCol("music_id")
      .setRatingCol("rating")
    val model = als.fit(training)

    // Evaluate the model by computing the RMSE on the test data
    // Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
    model.setColdStartStrategy("drop")
    val predictions = model.transform(test)

    val evaluator = new RegressionEvaluator()
      .setMetricName("rmse")
      .setLabelCol("rating")
      .setPredictionCol("prediction")
    val rmse = evaluator.evaluate(predictions)
    println(s"Root-mean-square error = $rmse")

    // Generate top 10 music recommendations for each user
    val userRecs = model.recommendForAllUsers(10)
    // Generate top 10 user recommendations for each music
    val musicRecs = model.recommendForAllItems(10)

    // Generate top 10 music recommendations for a specified set of users
    val users = ratings.select(als.getUserCol).distinct().limit(3)
    val userSubsetRecs = model.recommendForUserSubset(users, 10)
    // Generate top 10 user recommendations for a specified set of musics
    val musics = ratings.select(als.getItemCol).distinct()
    val musicSubSetRecs = model.recommendForItemSubset(musics, 10)
    // $example off$
    userRecs.show(10,false)
    musicRecs.show(10,false)
    userSubsetRecs.show(10,false)
    musicSubSetRecs.show(20,false)
    userSubsetRecs.printSchema()
    musicSubSetRecs.printSchema()

    import org.apache.spark.sql.functions._
    musicSubSetRecs.withColumn("recommendation",explode(col("recommendations"))).drop("recommendations").createOrReplaceTempView("music_recommendation")
    userSubsetRecs.withColumn("recommendation",explode(col("recommendations"))).drop("recommendations").createOrReplaceTempView("user_recommendation")
    val res_music_cf = spark.sql("select music_id,recommendation.user_id,recommendation.rating from music_recommendation")
    val res_user_cf = spark.sql("select user_id ,recommendation.music_id,recommendation.rating from user_recommendation")
    res_music_cf.show(100)
    res_user_cf.show(100)
    res_music_cf.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "item_cf", connectionProperties)
    res_user_cf.write.mode(SaveMode.Overwrite).jdbc(mysql_url,"user_cf",connectionProperties)
    //musicSubSetRecs.createOrReplaceTempView("music")

    //val df =spark.sql("select music_id, recommendations.user_id as user_id1,recommendations.rating as rating1 from music")
    //df.show(10,false)
    //df.withColumn("user_id",explode(col("user_id1"))).show(100)
    //val res = df.withColumn("user_id1",split(df.col("user_id"),",")).withColumn("rating1",split(df.col("rating"),","))
    //res.show(10)

    spark.stop()
  }
}
