package com.hive

import org.apache.spark.ml.feature.StringIndexer
import org.apache.spark.mllib.linalg.distributed.{CoordinateMatrix, MatrixEntry}
import org.apache.spark.sql.{Row, SaveMode, SparkSession}

/**
 * 推荐用户相似度
 */
object Similar {

  def main(args: Array[String]): Unit = {

    val spark =SparkSession
      .builder()
      .master("local[*]")
      .enableHiveSupport()
      .getOrCreate()


    // 1. insert into spark.stuinfo   (class name sex birth )
    var df =spark.sql(
      """
        |select hash(class,name,sex,birth) as hash,t.* from stuinfo.stu t
        |""".stripMargin)
    df.createOrReplaceTempView("stuinfo")
    df.show(false)

    // 这一段是映射ID
    val indexer =new StringIndexer()
      .setInputCol("hash")
      .setOutputCol("id")
    df = indexer.fit(df).transform(df)  // 训练一个StringIndexer => StringIndexerModel
    df.createOrReplaceTempView("stuinfo")


    // 2. Similar
    val url = "jdbc:mysql://mysql:3306/spark?characterEncoding=UTF-8"
    val table="stuinfo"
    val prop = new java.util.Properties
    prop.setProperty("user","root")
    prop.setProperty("password","123456")
    df.write.mode(SaveMode.Overwrite).jdbc(url, table, prop)

    // 3. insert into spark.user_similarity

    df=spark.sql(
      """
        |select id,gpa,count(1) score from stuinfo group by id,gpa
        |""".stripMargin)

    import spark.implicits._


    val matrixRdd = df.rdd.map{
      //这里将用户与物品减缓是因为后续相似度计算是以列（用户）来计算的
      case Row(id : Double ,gpa : Int , score : Long) => {
        MatrixEntry(gpa.toLong,id.toLong,score.toDouble)
      }
    }
    val coordinateMatrix = new CoordinateMatrix(matrixRdd)
    /**
     * MatrixEntry(1,1,7.0)
     * MatrixEntry(1,2,6.0)
     * MatrixEntry(1,3,7.0)
     * MatrixEntry(1,4,5.0)
     * MatrixEntry(1,5,5.0)
     */
    val res_entr = coordinateMatrix
      .toRowMatrix()
      .columnSimilarities()
      .entries

    val res_df = res_entr.map(x => {
      (x.i , x.j ,x.value)
    }).toDF("user_id","sim_user_id","score")
    //查看与用户3相似的用户以及相似度
    println("res_df.cnt:"+res_df.count())
    res_df.printSchema()
    res_df.show()
//    val writeOpts = Map[String, String]("createTableColumnTypes" -> "score double(5,2)")   // .options(writeOpts)
    res_df.write.mode(SaveMode.Overwrite).jdbc(url,"user_similarity", prop)


  }

}
