package com._51doit.spark03

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Usermv {

  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)

    val sc: SparkContext = new SparkContext(conf)

    val user: RDD[String] = sc.textFile("D:\\07spark\\spark-day02\\work\\题2\\a.txt")

    val udata: RDD[(String, String)] = user.map(tr => {
      val uarr: Array[String] = tr.split(" ", 2)
      val uid: String = uarr(0)
      val ageAndname: String = uarr(1)

      (uid, ageAndname)

    })

    val movie: RDD[String] = sc.textFile("D:\\07spark\\spark-day02\\work\\题2\\b.txt")

    val mdata: RDD[(String, String)] = movie.map(st => {
      val moviedata: Array[String] = st.split(" ", 2)
      val buid: String = moviedata(0)
      val allmovie: String = moviedata(1)
      (buid, allmovie)
    })



//将B数据的valur转换为集合
    val res1: RDD[(String, List[String])] = mdata.mapValues(List(_))

    //进行聚合操作
    val res2: RDD[(String, List[String])] = res1.reduceByKey(_++_)

    //res2.foreach(println)

    //进行左关联操作
    val res3: RDD[(String, (String, Option[List[String]]))] = udata.leftOuterJoin(res2)
//res3.foreach(println)


    val res4: RDD[String] = res3.map {
      case (id, (name, optlst)) => {

        val moviestr: String = optlst match {

          case None => "null null null"

          case Some(lst) => {

            val sorted: List[String] = lst.sortBy(tr => tr.split(" ")(0))
            sorted.mkString(" ")


          }


        }
        id ++ " " + name + "," + moviestr


      }

    }
    //res4.foreach(println)








  }





}
