package com.dtkavin.sparkSQL

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by IntelliJ IDEA.
  * Programmer : John Zn
  * Date : 2016/4/22 0022
  * Time : 22:43
  * Discribtion : streamSQL的DSL和SQL语言使用，dataFrame的schame为case class
  */
class PersonSql {

}

case class Person(id: Int, name: String, age: Int)

object PersonSql {
  def main(args: Array[String]) {
    val conf = new SparkConf()
    val sc = new SparkContext("local[3]", "PersonSql", conf)
    val sqlc = new SQLContext(sc)

    val personRdd = sc.textFile("hdfs://spark01:9000/streamsql").map(_.split(",")).map(x => (Person(x(0).toInt, x(1), x(2).trim.toInt)))

    import sqlc.implicits._
    val personDF = personRdd.toDF()
    //    personDF.show()

    //DSL-------
    personDF.select(personDF.col("id"), personDF.col("name"), personDF.col("age")).show
    personDF.select("name").show
    personDF.select(col = "age").show
    personDF.filter(personDF.col("age") > 18 && personDF.col("id") > 1).show()
    personDF.sort("id").show()

    println("*" * 20)

    // sql------
    personDF.registerTempTable("t_person")
    sqlc.sql("select * from t_person").show()
    sqlc.sql("select * from t_person where id>1 and age >18").show()
    sqlc.sql("select * from t_person order by id desc").show()

    sc.stop()
  }
}