package com.chenjj.bigdata.spark;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.deploy.yarn.ApplicationMaster;
import org.apache.spark.scheduler.TaskSetManager;
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend;
import org.apache.spark.sql.hive.HiveContext;
import scala.Tuple2;

import java.util.Arrays;

public class WordCountLocalLambdaRunner {
    public static void main(String[] args) throws InterruptedException {



        SparkConf conf = new SparkConf()
                .setAppName("WordCountLocal")
                .setMaster("local")
                .set("spark.testing.memory","1024000000");

        JavaSparkContext sc = new JavaSparkContext(conf);

        String classPath = WordCountLocalLambdaRunner.class.getResource("/").getPath();
        JavaRDD<String> lines = sc.textFile(classPath + "word.txt");

       lines.flatMap(line ->Arrays.asList(line.split(" ")).iterator())
              .mapToPair(word -> new Tuple2<>(word,1))
              .reduceByKey((v1,v2)->v1+v2)
              .foreach(x-> System.out.println(x._1 + ":" + x._2));

        sc.close();

        Thread.sleep(50000);
    }
}
