package RDD_homework

import org.apache.spark.{Partitioner,SparkConf, SparkContext}

object five_work {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("work")
    val sc = new SparkContext(sparkConf)

    first_work(sc)
    two_work(sc)
    three_work(sc)
    four_work(sc)
    sc.stop()
  }


  //TODO 第一题
  def first_work(sc : SparkContext): Unit ={
    val dataRDD = sc.makeRDD(1 to 100)
    val dataRDD2 = dataRDD.filter(_>50).map(_*2).filter(_>100)
    val result = dataRDD2.collect()
    println(result.mkString(","))
  }

  //TODO 第二题
  def two_work(sc : SparkContext): Unit ={
    val dataRDD = sc.makeRDD(List("Hello scala","Hello Spark","Scala is the best","Spark too"))
    val words = dataRDD.flatMap(_.toLowerCase().split(" "))
    println(words.collect().mkString(","))
  }

  //TODO 第三题
  def three_work(sc : SparkContext): Unit ={
    val dataRdd = sc.makeRDD(List(("Alice",90),("Bob",80),("Charlie",70),("Alice",85),("Bob",95)))
    val Rdd_Sum = dataRdd.reduceByKey(_+_)
    println(Rdd_Sum.collect().mkString(","))
  }


  //TODO 第四题
  class MyPartitioner extends Partitioner{
    override def numPartitions: Int = 2

    override def getPartition(key: Any): Int = {
      key match {
        case str: String => if (str.length <= 5) 0 else 1
        case _ => throw new IllegalArgumentException("Expected a string")
      }
    }
  }


  def four_work(sc :SparkContext): Unit ={
    val rdd = sc.makeRDD(List("apple","banana","orange","pear","watermelon","grape","pineapple"))
    val partRDD = rdd.map(str => (str, 1)).partitionBy(new MyPartitioner).persist()
    val resultRDD = partRDD.mapPartitions(iter => Iterator(iter.size))
    resultRDD.collect().foreach(println)
  }
}



