package gunglad.com.gitee_22_4_21_task03
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object Task01 {
//  Exception in thread "main" java.lang.VerifyError: class scala.collection.mutable.WrappedArray overrides final method toBuffer.()Lscala/collection/mutable/Buffer;

  //  错误: 找不到或无法加载主类 gunglad.com.gitee_22_4_21_task03.Task01
    def main(args: Array[String]): Unit = {

      println("start")
      /**
       * SparkConf对象，它是所有任务计算的源头
       * 他会创建 DAGScheduleWould you like setup to check if a newer version of this program is available? (Requires an internet connection)r 和TaskScheduler
       */
      //TODO:1 创建SparkContext对象
      val sc = new SparkContext(new SparkConf().setMaster("local[2]").setAppName("统计单词"))

      /**
       * 创建方式:从文件系统加载数据创建 RDD 算子
       * 注意:
       * path:参数先是本地查找文件
       * 如果本地没有就HDFS上查找文件
       * 两者都没有就会报出错误:
       * Could not locate executable null\bin\winutils.exe in the Hadoop binaries
       * 类型为 :java.io.IOException
       */
      //TODO:2 读取数据文件，
//      val one_LineDataStr:RDD[String] = sc.textFile("D:\\spark_demo_code\\spark\\src\\main\\resources\\words.txt")
      val one_LineDataStr:RDD[String] = sc.textFile("D:\\proj\\hadoop\\spark_demo_code\\spark\\src\\main\\resources\\words.txt")

      //2.1查看 读取结果
      //src/words.txt MapPartitionsRDD[1] at textFile at Tast01.scala:19
      println(one_LineDataStr.collect().toBuffer)
//      ArrayBuffer((scala,1), (hello,3), (spark,1))
      //TODO:3 切分单词
      val words:RDD[String] = one_LineDataStr.flatMap(lines => lines.split(" "))

      //3.1
      val listWord:RDD[(String,Int)] = words.map(word => (word, 1))

      //3.2
      val result:RDD[(String,Int)] = listWord.reduceByKey((listWord1, listWordNext) => listWord1 + listWordNext)

      //3.3
      val tuples:Array[(String,Int)] = result.collect()
      println(tuples.toBuffer)
      //第一次提交
      sc.stop()

    }

}
