package com.zt.bigdata.spark.dataalgorithms.chapter04

import com.zt.bigdata.template.spark.BasicTemplate

import scala.collection.mutable.ArrayBuffer

/**
  *
  */
class LeftJoinRDD extends BasicTemplate[Parameter] {
  override def process(parameter: Parameter): Unit = {


    val spark = buildSparkSession(parameter)
    val usersFile = spark.sparkContext.textFile(parameter.rightFile)
    val transactionsFile = spark.sparkContext.textFile(parameter.leftFile)

    val users = usersFile.map {
      u =>
        val line = u.split(",")
        (line(0), ("Location", line(1)))
    }

    val transactions = transactionsFile.map {
      t =>
        val line = t.split(",")
        (line(2), ("Product", line(1)))
    }

    val allRDD = transactions.union(users)
    allRDD.collect().foreach(println)

    val groupedRDD = allRDD.groupByKey()

    val productLocationsRDD = groupedRDD.flatMap {
      x =>
        var location = "UNKNOWN"
        val products = ArrayBuffer.empty[String]
        x._2.foreach {
          pair =>
            if (pair._1.equals("Location"))
              location = pair._2
            else
              products.+=(pair._2)
        }
        products.map(x => (x, location))
    }
    val productByLocations = productLocationsRDD.groupByKey()
    println("每件产品 销售地区罗列")
    productByLocations.collect().foreach(println)

    println("每件产品 销售个数")
    productByLocations.map(x => (x._1, x._2.size)).collect().foreach(println)

    println("每件产品 销售地区去重罗列")
    productByLocations
      //      .map(x => (x._1, x._2.toSet))
      .mapValues(_.toSet)
      .collect()
      .foreach(println)

  }
}
