package recommendPackage

import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.DefaultFormats
import org.json4s.jackson.Json
import utils.KafkaProducer
import utils.WriteRDDToMySQL.connection


class recommend {

  def Top(top:Int): Array[(Int, Double)]  = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("helloMlib")
    val sc = new SparkContext(conf)
    val line: RDD[String] = sc.textFile("src/main/resources/student.data")
    val value: RDD[(Int, Double)] = line.map {
      x =>
      val fileds = x.split("\t")
      (fileds(4).toInt, fileds(6).toDouble)
    }
    val tuples: Array[(Int, Double)] = value.sortBy(_._2, ascending = false).collect().take(top)
    connection("root","mysqlym123","jdbc:mysql://localhost:3306/recommendPackage?useSSL=false",
      "INSERT INTO top (stu_id, gpa) VALUES (?, ?)",tuples,sc)
    tuples
  }
  def recommend(stuId:Int):Array[Rating]={
    val sc = new SparkContext("local[2]", "First Spark App")
    sc.setLogLevel("ERROR")
    val rawData = sc.textFile("src/main/resources/student.data")
    val rawRatings = rawData.map(_.split("\t"))
    val ratings = rawRatings.map(x => (x(4).toInt, x(6).toInt, x(6).toDouble+10))
    val ratingRDD: RDD[Rating] = ratings.map {
      case (stuId, gap, rank) => Rating(stuId, gap, rank)
    }
    val model = ALS.train(ratingRDD, 30, 10, 0.01)
    println(model)
    val topKRecs: Array[Rating] = model.recommendProducts(stuId, 3)
    println(String.valueOf(Json(DefaultFormats).write(topKRecs)))
    sc.stop()
    KafkaProducer.send("topN",String.valueOf(Json(DefaultFormats).write(topKRecs)))
    topKRecs
  }






}
