package com.fwmagic.spark.core.cases.badcase

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object BadCase {
    def main(args: Array[String]): Unit = {

        val conf: SparkConf = new SparkConf()
                .setAppName(this.getClass.getSimpleName)
                .setMaster("local[*]")

        val sc: SparkContext = new SparkContext(conf)

        val rdd: RDD[String] = sc.parallelize(List("spark flink storm java scala spark flink java flume"))

        val words: RDD[String] = rdd.flatMap(line => line.split(" "))

//        val wordAndOne: RDD[(String, Int)] = words.map((_,1))

        //正确位置：
        //val count: Long = words.count()

        /*  val tp3 = words.map(word => {
              //错误示范：在函数里面调用words.filter()，即在Executor中调用RDD的方法
              val filtered: RDD[String] = words.filter(_.startsWith("spark"))
              filtered
          })*/
        val tp3: RDD[(String, Long, Int)] = words.map(word => {
            //错误示范：在函数里面调用words.count()，即在Executor中触发action，应该在Driver端调用
            val count: Long = words.count()
            (word, count, 1)
        })

        tp3.collect().foreach(println)
        //val sorted: RDD[(String, Int)] = wordAndOne.reduceByKey(_+_).sortBy(tp => tp._2,false)

        //sorted.collect().foreach(println)

        sc.stop()

    }

}
