package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo07WordCntSubmit {
  def main(args: Array[String]): Unit = {
    /**
     * 将Spark SQL代码提交到集群：
     * 1、将master配置代码注释掉，在提交命令中指定，方便调试
     * 2、注意输入输出路径，必须是HDFS存在的某个路径
     * 3、使用maven将代码打成jar包并上传
     * 4、构建提交的命令：
     * spark-submit --master yarn --deploy-mode client --conf spark.sql.shuffle.partitions=2 --class com.shujia.spark.sql.Demo07WordCntSubmit spark-1.0.jar
     */

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      //      .master("local")
      //      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val lineDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "#")
      .schema("line String")
      .load("/data/spark/words")

    val wordCntDF: DataFrame = lineDF
      .select(explode(split($"line", ",")) as "word")
      .groupBy($"word")
      .agg(count("*") as "cnt")

    wordCntDF
      .write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save("/data/spark/wcnt-spark-sql/")
  }

}
