package org.hadoop.spark
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession}
object SparkSQL {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf();
    conf.setMaster("local[2]");
    conf.setAppName("SQL");
    val session: SparkSession = SparkSession.builder().config(conf).getOrCreate();
    val sqlContext: SQLContext = session.sqlContext;
    val ctx: SparkContext = session.sparkContext;
    val rdd: RDD[String] = ctx.textFile("file:///D:/a/stud.txt");
    //注意要做隐式导入
    import session.implicits._;
    val df: DataFrame = rdd.flatMap(_.split("\\s+")).toDF("str");
    df.show();
    df.groupBy("str").count().sort("str").show();
    //创建View
    df.createTempView("words");
    sqlContext.sql("select str,count(str) cnt from words group by str order by str") //执行SQL
      //转成RDD
      .rdd
      .saveAsTextFile("file:///D:/a/out001"); //保存到指定目录
    session.close();
  }
}
