package com.mayy.spark.demo;

import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.SparkSession;

/**
 * 提交到 Spark Standalone 集群
 * ./spark-submit --class  com.mayy.spark.demo.SimpleApp  --master spark://192.40.10.130:7077   /root/myspark/jars/spark-simple-demo-0.0.1.jar  /root/myspark/jars/README.md
 * ./spark-submit --class  com.mayy.spark.demo.SimpleApp  --master spark://192.40.10.130:7077   --driver-java-options "-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005" /root/myspark/spark-simple-demo-0.0.1.jar  /root/myspark/README.md
 * ./spark-submit --class  com.mayy.spark.demo.SimpleApp  --master spark://192.40.10.130:7077   --conf "spark.driver.extraJavaOptions=-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005" /root/myspark/spark-simple-demo-0.0.1.jar  /root/myspark/README.md
 * ./spark-submit --class  com.mayy.spark.demo.SimpleApp  --master spark://192.40.10.130:7077   --conf "spark.driver.extraJavaOptions=-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005" --conf "spark.executor.extraJavaOptions=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5006" /root/myspark/spark-simple-demo-0.0.1.jar  /root/myspark/README.md
 *
 *
 * ps -ef | grep  spark-simple-demo-0.0.1.jar
 */
public class SimpleApp {

    public static void main(String[] args) {
        // 在创建 SparkSession 前设置系统属性
        System.setProperty("spark.driver.memory", "128m");
        System.setProperty("spark.testing.memory", "471859200");

        String logFile;
        if (args != null && args.length > 0) {
            logFile = args[0];
        } else {
            logFile = "C:/workspace_bigdata/spark-demo/spark-demo2/README.md"; // 使用项目内的小文件
        }

        //master local[*]表示本地运行（[*]表示使用所有 CPU 核心）
        SparkSession spark = SparkSession.builder()
                .appName("Simple Application")
//                .master("local[*]")
                .master("local[1]")  // 改为单核运行
                .config("spark.driver.memory", "128m")
                .config("spark.sql.adaptive.enabled", "true")
                .getOrCreate();

        Dataset<String> logData = spark.read().textFile(logFile).cache();

        long numAs = logData.filter((FilterFunction<String>) value -> value.contains("a")).count();
        long numBs = logData.filter((FilterFunction<String>) value -> value.contains("b")).count();

        System.out.println("Lines with a: " + numAs + ", lines with b: " + numBs);
        spark.stop();

    }

}
