import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

case class People(val name: String, val age: Int)

/**
  * Created by Administrator on 2016/3/20.
  */
object SparkSqlTest {

  val conf = new SparkConf()
    .setAppName("TestSpark")
    .setMaster("local")

  val sc = new SparkContext(conf)
  val sqlContext = new SQLContext(sc)

  import sqlContext.implicits._

  def testSparkSQL() = {
    val file = sc.textFile("file:///D:/people.txt")
    val persons = file.map(_.split("\\s"))
      .map(p => People(p(0), p(1).toInt)).toDF()

    persons.registerTempTable("person")
    val man = sqlContext.sql("SELECT name FROM people WHERE age > 10 and age < 31")
    man.map("Name:" + _ (0))
      .collect().foreach(println)
  }

  def testSparkSQL_DSL() = {
    val file = sc.textFile("file:///D:/person.txt")
    val persons = file.map(_.split("\\s"))
      .map(p => People(p(0), p(1).toInt)).toDF()

    val man = persons.where('age >= 10)
      .where('age <= 31)
      .select('name)

    man.map("Name:" + _ (0)).collect().foreach(println)
  }

  def testSparkSQL_scheme() = {
    val file = sc.textFile("file:///D:/person.txt")

    val persons = file.map(_.split("\\s"))
      .map(p => People(p(0), p(1).toInt)).toDF()

    val man = persons.where('age >= 10)
      .where('age <= 31)
      .select('name)

    man.map("Name:" + _ (0)).collect().foreach(println)
  }

  def main(args: Array[String]) {
    testDataFrame()
    sc.stop()
  }

  def testDataFrame() = {

    val df = sqlContext.read.json("file:///D:/people.json")
    df.show()
    df.printSchema()
    df.select("name").show()
    df.select(df("name"), df("age") + 1).show()
    df.filter(df("age") > 21).show()
    df.groupBy("age").count().show()
  }


}
