package org.hadoop.spark

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession}
object SparkSQL2 {
  def main(args: Array[String]): Unit = {
    if (args.length != 2) {
      println("usage : in out");
      return;
    }
    val inPath: String = args.apply(0);
    val outPath: String = args.apply(1);
    val hConf: Configuration = new Configuration();
    val fs: FileSystem = FileSystem.get(hConf);
    val dest: Path = new Path(outPath);
    if (fs.exists(dest)) {
      fs.delete(dest, true);
    }
    val conf: SparkConf = new SparkConf();
    conf.setAppName("SQL");
    val session: SparkSession = SparkSession.builder().config(conf).getOrCreate();
    val sqlContext: SQLContext = session.sqlContext;
    val ctx: SparkContext = session.sparkContext;
    val rdd: RDD[String] = ctx.textFile(inPath);
    //注意要做隐式导入
    import session.implicits._;
    val df: DataFrame = rdd.flatMap(_.split("\\s+")).toDF("str");
    df.show();
    df.groupBy("str").count().sort("str").show();
    df.createTempView("words"); //创建View
    sqlContext.sql("select str,count(str) cnt from words group by str order by str") //执行SQL
      .rdd //转成RDD
      .saveAsTextFile(outPath); //保存到指定目录
    session.close();
  }
}
