package com.scala.learn.sparksql1

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

/**
  * @Copyright: Shanghai Definesys Company.All rights reserved.
  * @Description:
  * @author: chuhaitao
  * @since: 2019/3/9 14:57
  * @history:
  *          1.2019/3/9 created by chuhaitao
  */
object SqlDemo {
  /**
    * 使用2.0之前的方式
    **/
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("TopN").setMaster("local")

    val sc = new SparkContext(conf)


    val lines = sc.parallelize(List("2,laozhuang,20,99", "3,xiaoming,18,199", "4,xiaohuang,20,200", "5,xiaoniu,18,99"))

    val userRDD: RDD[Person] = lines.map(_.split(","))
      .map(arr => Person(arr(0).toLong, arr(1), arr(2).toInt, arr(3).toInt))


    val sqlContext = new SQLContext(sc)
    //导入隐式转换，转换成dataFrame
    import sqlContext.implicits._
    val pdf = userRDD.toDF()
    //注册
    pdf.registerTempTable("user")

    //1 使用sqlContext
    // val result = sqlContext.sql("select * from user order by fv desc ,age asc ")

    // result.show()
    //2使用 api
    var res1 = pdf.select("id")

    pdf.filter(pdf.col("age") >= 18).show
    pdf.printSchema()
    res1.show()
  }

  /*进行RDD和dataFrame进行关联*/
  case class Person(id: Long, name: String, age: Int, fv: Int)

}
