package com.study.core

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object CommonFriendV1 {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.OFF)
    Logger.getLogger("akka").setLevel(Level.OFF)
//    System.setProperty("hadoop.home.dir", "E:\\hadoop-2.6.0-cdh5.15.0")
    val ss = SparkSession
      .builder()
      .appName(" spark 2.0")
      .master("local")
      .getOrCreate()
    val sc = ss.sparkContext
    val rdd=sc.parallelize(List(("A","BCDEKL"),("B","EKL"),("C","EKLBCD"),("K","ADCKL")))

    val flatMapRdd = rdd.flatMapValues(x=>x.split(""))
    println("=============(用户，好友)==================")
    flatMapRdd.foreach(println)
    println("==============(好友，用户)=================")
    val rdd1 = flatMapRdd.map(x=>(x._2,x._1))
    rdd1.foreach(println)
    println("==============笛卡尔积(好友，(用户,用户))=================")
    val joinRdd = rdd1.join(rdd1)
    joinRdd.foreach(println)
    println("===============================")
    val result: RDD[((String, String), Set[String])] = joinRdd.filter(x => x._2._1 < x._2._2)
      .map(x => (x._2, Set(x._1)))
      .reduceByKey(_ ++ _)
   //   .groupByKey()
      //.foreach(println)
    result.map {
      case ((user1, user2), iter) => {
        user1+","+user2+" ["+iter.mkString(",") +"]"
      }
    }.foreach(println)
  }
}
