package com.kylin

import java.util.Properties

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SaveMode, SparkSession}

//基于物品推荐
object ItemCF {

  val mysql_user="root"
  val mysql_pwd ="root"
  val mysql_driver="com.mysql.jdbc.Driver"
  val mysql_url = "jdbc:mysql://115.29.140.3:3306/febs_food_itemcf?useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC"


  def main(args: Array[String]): Unit = {

    val connectionProperties = new Properties
    //connectionProperties.setProperty("dbtable", "data")
    connectionProperties.setProperty("user", mysql_user)
    connectionProperties.setProperty("password", mysql_pwd)
    connectionProperties.setProperty("driver", mysql_driver)
    connectionProperties.setProperty("truncate","true")

    //1.构建Spark对象
    val spark = SparkSession.builder.appName("ItemCF").master("local[2]").getOrCreate()
    val sc = spark.sparkContext
    Logger.getRootLogger.setLevel(Level.WARN)

    val df =spark.sqlContext.read.jdbc(mysql_url, "ratings", connectionProperties)
    //2.读取数据
    //val data_path = "/Users/kylin/Documents/workspace_tmp/FEBS-Vue-itemcf/engine/src/main/java/com/kylin/sample_itemCF.txt"
    //val data: RDD[String] = sc.textFile(data_path)
    val data_rdd = df.rdd.map(row=>{
      row(0)+","+row(1)+","+row(2)
    })
    val user_data: RDD[ItemPref] = data_rdd.map(_.split(",")).map(f => (ItemPref(f(0), f(1), f(2).toDouble))).cache()

    //3.建立模型
    val mysimil = new ItemSimilarity()
    val simil_rdd1: RDD[ItemSimi] = mysimil.Similarity(user_data,"cooccurrence")
    val recommd = new RecommendedItem
    val recommd_rdd1: RDD[UserRecomm] = recommd.Recommend(simil_rdd1,user_data,30)

    //4.打印结果
    println(s"物品相似度矩阵(商品i,商品j,相似度)：${simil_rdd1.count()}")

    simil_rdd1.sortBy(_.similar,false).collect().foreach{ ItemSimi =>
      println("物品相似度矩阵[商品iD:"+ItemSimi.itemid1+",商品ID:"+ItemSimi.itemid2+",的相似度为:"+ItemSimi.similar+"]")
    }
    println(s"用户推荐列表(用户,商品,推荐值)：${recommd_rdd1.count()}")

    recommd_rdd1.sortBy(_.pref,false).collect().foreach{
      UserRecomm=>println("用户推荐列表[用户ID:"+UserRecomm.userid+",商品ID:"+UserRecomm.itemid+",推荐值:"+UserRecomm.pref+"]")
    }
    import spark.implicits._
    val recommd_df = recommd_rdd1.sortBy(_.pref,false).toDF()
    val simil_df = simil_rdd1.sortBy(_.similar,false).toDF()
    recommd_df.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "user_cf", connectionProperties)

    simil_df.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "item_df", connectionProperties)

    sc.stop()
  }
}

