package com.atbeijing.bigdata.spark.mytest.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD


object Operator {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("groupBy")
    val sc = new SparkContext(conf)

    val list = List(1,2,3,4,5,6)
    val r1: RDD[Int] = sc.makeRDD(list,3)

    val r1Map: RDD[String] = r1.map(i => i+"a")
    r1Map.collect().foreach(i => print(i+" "))//1a 2a 3a 4a 5a 6a
    println

    println("===================mapPartitions===================")
    //改变数据集数据量
    val r1MapP: RDD[Int] = r1.mapPartitions(list => list.filter(_ %2==0))
    r1MapP.collect().foreach(i => print(i+" "))//2 4 6
    println

    println("==================mapPartitionsWithIndex====================")
    //改变数据集数据量
    val r1mapPWI: RDD[Int] = r1.mapPartitionsWithIndex(
      (index, list) => {
        if (index == 0) {
          list.map(_ * 2)
        } else {
          Nil.iterator
        }
      }
    )
    r1mapPWI.collect().foreach(i => print(i+" "))//6 8
    println

    println("=================flatMap=====================")
    //扁平化
    val rdd: RDD[Any] = sc.makeRDD(
      List(
        List(1, 2), 3, List(4, 5),6,"a"
      )
    )
    //数据集中的数据扁平化,并用一个容器返回
    val value: RDD[Any] = rdd.flatMap(
      {
        case list: List[_] => list
        case num: Int => List(num)
        case str: String => List(str)
      }
    )
    value.collect().foreach(i => print(i+" "))//1 2 3 4 5 6
    println

    println("===================glom===================")
    //返回RDD每个分区中最大值的和
    val r1G: RDD[Array[Int]] = r1.glom()
    val max: RDD[Int] = r1G.map(array => array.max)
    val ints: Array[Int] = max.collect()
    val sum: Int = ints.sum
    println(sum)//12

    println("===================groupBy===================")
    val rddG = sc.makeRDD(
      List("Hello", "Hadoop", "Spark", "Hive", "Scala", "Hoop"), 3
    )
    val rdd1: RDD[(String, Iterable[String])] = rddG.groupBy(s => s.substring(0,1),2)
    //rdd1.saveAsTextFile("outPut")

    println("===================filter===================")
    val r1F: RDD[Int] = r1.filter(i => i%2==0)
    r1F.collect().foreach(i => print(i+" "))//2 4 6
    println

    println("===================sample===================")
    val res: RDD[Int] = r1.sample(false,0.5)
    res.collect().foreach(i => print(i+" "))
    println
    val res1: RDD[Int] = r1.sample(true,2)
    res1.collect().foreach(i => print(i+" "))//1 1 1 2 2 3 3 4 4 5 5 6 6
    println



  }
}
