package com.li.spark

import org.apache.spark.{SparkConf, SparkContext}

object TransformationOp {
  def main(args: Array[String]): Unit = {
    val sc: SparkContext = getSparkContext
    //    //map:对每个集合元素乘2
    //    mapOp(sc);
    //    //过滤出偶数
    //    filterOp(sc);
    //    //将行拆分为单词
    //    flatMapOp(sc);
    //    //分组
    //    groupByKeyOp(sc);
    //    //分组2(tuple中数据超过2列的场景)
    //    groupByKeyOp2(sc);
    //    //统计
    //    reduceBykeyOp(sc);
    //    //排序
    //    sortByKeyOp(sc);

    //    joinOp(sc)
    //    distinctOp(sc)
    sc.stop();
  }

  private def getSparkContext = {
    //创建SparkContext
    val conf = new SparkConf();
    conf.setAppName("TransformationOp").setMaster("local");
    new SparkContext(conf);
  }

  //对每个集合元素乘2
  def mapOp(sc: SparkContext): Unit = {
    sc.parallelize(Array(1, 2, 3, 4, 5), 2).map(_ * 2).foreach(println(_));
  }

  //过滤出偶数
  def filterOp(sc: SparkContext): Unit = {
    sc.parallelize(Array(1, 2, 3, 4, 5), 2).filter(_ % 2 == 0).foreach(println(_));
  }

  //将行拆分为单词
  def flatMapOp(sc: SparkContext): Unit = {
    sc.parallelize(Array("mount mount havertz", "kante kante lukaku"), 2)
      .flatMap(_.split(" "))
      .map((_, 1)).reduceByKey(_ + _)
      .foreach(println(_));
  }

  //根据key分组
  def groupByKeyOp(sc: SparkContext): Unit = {
    sc.parallelize(Array(("mount", "eng"), ("mount", "cn"), ("havertz", "ger"), ("kante", "fra")), 2)
      .groupByKey()
      .foreach(tup => {
        print(tup._1 + ":")
        tup._2.foreach(print(_))
        println();
      });
  }

  //根据key分组(tuple中数据超过2列的场景)
  def groupByKeyOp2(sc: SparkContext): Unit = {
    sc.parallelize(Array(("mount", "eng", 20), ("7252", "eng", 24), ("havertz", "ger", 20), ("kante", "fra", 31)), 2)
      .map(tuple => (tuple._2, (tuple._1, tuple._3)))
      .groupByKey()
      .foreach(tup => {
        print(tup._1 + ":")
        tup._2.foreach(print(_))
        println();
      });
  }

  //统计
  def reduceBykeyOp(sc: SparkContext): Unit = {
    sc.parallelize(Array(("mount", "eng"), ("mount", "cn"), ("havertz", "ger"), ("kante", "fra")), 2)
      .map(tup => (tup._1, 1))
      .reduceByKey(_ + _).foreach(println(_))
  }

  //排序
  def sortByKeyOp(sc: SparkContext): Unit = {
    sc.parallelize(Array(("mount", 19), ("havertz", 29), ("kante", 7)), 2)
      .map(tup => (tup._2, tup._1))
      .sortByKey()
      .map(tup => (tup._2, tup._1))
      .foreach(println(_))
  }

  def joinOp(sc: SparkContext): Unit = {
    sc.parallelize(Array(("mount", "eng"), ("havertz", "ger"), ("kante", "fra")))
      .join(sc.parallelize(Array(("mount", "cn"), ("havertz", "ger"), ("kante", "fra"))))
      .foreach(println(_))
  }

  def distinctOp(sc: SparkContext) = {
    sc.parallelize(Array(("mount", "eng"), ("mount", "eng"), ("havertz", "ger"), ("kante", "fra"))).distinct()
      .foreach(println(_))
  }
}
