package org.spark.example

import java.io.File

import org.apache.spark.{SparkConf, SparkContext}

/**
  * @Author: chengjj
  * @Date: 2020-08-18
  */
object SparkHello {
    def test(): Unit = {

        val a = 2
        val b = ("1", "abc", "a");
        println(a)
        println(b)

        val utils: Utils = new Utils;
        utils.deleteDir(new File("E:\\test\\spark\\out"));

        val conf = new SparkConf();
        conf.setAppName("test").setMaster("local");
        val sc = new SparkContext(conf);
        val text = sc.textFile("E:\\test\\spark\\input.txt", 3)
        val words = text.flatMap { line => line.split(" ") }
        val up = words.map(word => word.toUpperCase());
        // val f = up.filter(w => w.contains("SQL") || w.contains("STORM"))
        val pairs = up.map { word => (word, 1) }

        println("reduce")
        val results = pairs.reduceByKey(_+_, 3);
        val sorted = results.sortBy(x => x._2);

        val sorted1 = sorted.repartition(1);

//        for (i<- 0 to 300000000){
//            sorted.foreach { x => println(x) }
//            fsorted.foreach { x => println(x) }
//        }
        sorted1.saveAsTextFile("E:\\test\\spark\\out")
        //sorted.persist();
        println(sorted.count());
        println(sorted.first());
        println(sorted.top(2).foreach(println))


        val bl = sc.parallelize(List(1,2), 1);
        val append = bl.fold(1)((x,y) => x+y);
        println("append"+append)
        val re = bl.reduce((x,y) => x+y)
        println("reduce"+re)
    }

    def main(args: Array[String]) {
        test();
    }
}
