package sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object SparkSql {
  def main(args: Array[String]): Unit = {
    //创建sparksql的运行环境
    val sparkconf=new SparkConf().setMaster("local[*]").setAppName("sparkSql")
    val spark=SparkSession.builder().config(sparkconf).getOrCreate()
//    val sc=new SparkContext(sparkconf)
//
//    sc.stop()
    //DataFrame操作
    //读取json格式
val df: DataFrame = spark.read.json("data/user.json")
    df.show()
//数据转换成sql格式
df.createOrReplaceTempView("user")
    spark.sql("select * from user").show()
    spark.sql("select avg(age) from user").show()
    //数据转换成DataFrame
    //在使用DataFrame时，如果涉及到转换，需要加入转换规则（引入 import spark.implicits._）
df.select("age","username").show()
    import spark.implicits._
    df.select('age+1).show()
    //dataSet数据转换,DataFrame其实是特定泛型的DataSet
    val seq=Seq(1,2,3,4)
    val ds:Dataset[Int]=seq.toDS()
    ds.show()

    //RDD直接转换成DataFrame
    val rdd: RDD[(Int, String, Int)] = spark.sparkContext.makeRDD(List((1, "张三", 32), (2, "lisi", 23), (3, "lala", 18)),3)
//    rdd.toDS()
val df1: DataFrame = rdd.toDF("id", "name", "age")
    df1.show()
    val rowRDD: RDD[Row] = df1.rdd
//    rowRDD.saveAsTextFile("data/1.json")
    //DataFrame和DataSet互转
    val ds1: Dataset[User] = df1.as[User]
    ds1.toDF()
    //RDD转换成DataSet
    val ds2: Dataset[User] = rdd.map {
      case (id, name, age) => {
        User(id = id, name = name, age = age)
      }
    }.toDS()

    ds2.show()

    spark.close()
  }
 case class User(id:Int,name:String,age:Int)
  /**User类等价于类继承Serializable,并且自动生成伴生对象和其他的默认方法*/

}
