package com.stqf.scala.spark;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.Optional;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;

/**
 * Created By admin
 * Date: 2020/1/13
 * Description:
 */
public class SparkWord {
    public static void main(String[] args) throws Exception {
        // simpleWordCount();
        String path = ClassLoader.getSystemResource("./urls.txt").getPath();
        SparkSession spark = SparkSession.builder().appName("Java Spark SQL basic example").master("local").getOrCreate();
        Dataset<Row> ds = spark.read().json(path);
        ds.createGlobalTempView("urls");
        Dataset<Row> sql_1 = spark.sql("select href as name, name as href, url from global_temp.urls order by href asc limit 300");
        sql_1.show();
        spark.stop();
    }

    public static void simpleWordCount() {
        String path = ClassLoader.getSystemResource("./words.txt").getPath();
        SparkConf conf = new SparkConf().setAppName("WordCount").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(conf);
        JavaRDD<String> rdd = sc.textFile(path);
        rdd.flatMap(line -> {
            String line_ = Optional.ofNullable(line).orElse("");
            String line__ = line_.trim();
            List<String> words = Stream.of(line__.split("\\s|,|\\.")).map(String::trim).filter(item -> item.length() > 0).collect(Collectors.toList());
            return words.iterator();
        }).mapToPair(s -> new Tuple2<>(s, 1)).reduceByKey(Integer::sum).mapToPair(item -> new Tuple2<>(item._2(), item._1())).sortByKey().foreach(item -> System.out.printf("count is :%-3d value:%s %n", item._1(), item._2()));
        sc.stop();
    }

}
