//class0406
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

/**
 * 读取txt文件转成DataFrame形式操作
 */
object person {
  case class Person(name: String, age: Int)
  def main(args: Array[String]) {
    println("Hello World!")
    val conf = new SparkConf().setMaster("local").setAppName("person")
    val sc = new SparkContext(conf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    import sqlContext.implicits._
//  原始文件位置
    val people = sc.textFile("Z:\\Server\\spark\\spark-2.4.4-bin-hadoop2.7\\examples\\src\\main\\resources\\people.txt").map(_.split(",")).map(p => Person(p(0).trim.toString, p(1).trim.toInt)).toDF()
    people.registerTempTable("people")
    val teenagers = sqlContext.sql("SELECT name, age FROM people WHERE age >= 13 AND age <= 19")
    val t2 = sqlContext.sql("SELECT name, age FROM people WHERE age >= 30")
    teenagers.map(t => "Name: " + t(0)).collect().foreach(println)
    teenagers.map(t => "Name: " + t.getAs[String]("name")).collect().foreach(println)
    t2.map(t => "Name: " + t(0)).collect().foreach(println)
    t2.map(t => "Name: " + t.getAs[String]("name")).collect().foreach(println)
  }
}
