package recommendPackage

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.json4s.DefaultFormats
import org.json4s.jackson.Json
import utils.KafkaProducer

import scala.collection.mutable.ArrayBuffer
/**
 * @author 杨铭
 *         2023/6/20,8:27
 */
object Count {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("RDD")
    val sc = new SparkContext(conf)
    val line: RDD[String] = sc.textFile("src/main/resources/student.data")
    val value: RDD[(Int, (Int, String))] = line.map(
      x => {
        val fileds = x.split("\t")
        (fileds(0).toInt, (fileds(2).toInt, fileds(5)))
      }
    )
    //    val value1: RDD[Seq[((Int, String), Int)]] =
    val value1: RDD[Seq[((Int, String), Int)]] = value.groupByKey().map(x => {
      val u = x._1
      val y = x._2.toArray
      var result = new ArrayBuffer[((Int, String), Int)]()
      for (i <- 0 until y.length - 1) {
        if (y(i)._1 == 0) {
          result.addOne(((u, y(i)._2 + "W"), 1))
        } else {
          result.addOne(((u, y(i)._2 + "M"), 1))
        }

      }
      result.toSeq
    })
    val result: RDD[((Int, String), Int)] = value1.flatMap(seq => seq).reduceByKey(_ + _)

    val json: RDD[Person] =
      result.map {
      case ((classId, name), num) => Person(classId = classId, name = name, num = num)
    }

    json.foreach( data =>{
      println(String.valueOf( Json(DefaultFormats).write(data)))
      KafkaProducer.send("rdd",String.valueOf( Json(DefaultFormats).write(data)))
    }
    )
  }

  case class Person(classId:Int,name:String,num:Int)
}
