package scalapackage.testspark

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Germmy on 2018/5/28.
  */
object TestSparkSql {

  def main(args: Array[String]) {
    val sparkConf: SparkConf = new SparkConf().setAppName("TestSparkSql").setMaster("local[2]")
    val sc: SparkContext = new SparkContext(sparkConf)

    //1、引入sparkSqlContext
    val sQLContext=new SQLContext(sc)

    //2、剩下的和sparkShell逻辑一样
    //2.1、读取文件
    val lines: RDD[Array[String]] = sc.textFile(args(0)).map(_.split(","))//它为什么能够推断出来它就是可以被split,难道就是因为rdd[String]?

    //2.2、转为personRdd
    val personRdd: RDD[Person] = lines.map(x=>Person(x(0).toInt,x(1),x(2).toInt,x(3).toInt))

    //2.3、转为dataFrame
    import  sQLContext.implicits._ //我操，竟然是引为sqlContext里面的implicit
    val personDf: DataFrame = personRdd.toDF()

    //2.4、注册表
    personDf.registerTempTable("myTable")

    //2.5、查询
    val dataFrame: DataFrame = sQLContext.sql("select * from myTable order by age desc limit 2")

    //2.6、写入Hdfs
    dataFrame.write.json(args(1))

    sc.stop()
  }

}
case class Person(id:Int,name:String,age:Int,faceVal:Int)


