import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object E1_DataFrameTest {
  def main(args: Array[String]): Unit = {
    testDF()
  }

  def testDF(): Unit = {
    //    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("sparksql")
    //    val spark = SparkSession.builder().config(conf).getOrCreate()

    val spark = SparkSession.builder().master("local[*]").appName("sparksql")
      //      .enableHiveSupport()
      .config("spark.sql.warehouse.dir", "target/spark-warehouse")
      .getOrCreate()

    import spark.implicits._

    val df = spark.read
      .option("multiline", "true") // 处理这种跨行的JSON文件
      .json("file:///D:\\workspace\\lab\\learnbigdata\\learnspark\\sparksql\\src\\main\\resources\\person.json")
    df.createOrReplaceTempView("user")
    println("11111111111111111111111111111")
    df.sqlContext.sql("select avg(age) from user").show
    """
      |+--------+
      ||avg(age)|
      |+--------+
      ||    24.5|
      |+--------+
      |""".stripMargin
    //SQL风格语法
    println("22222222222222222222222222222")
    df.select("name", "age").show()
    """
      |+-------+----+
      ||   name| age|
      |+-------+----+
      ||Michael|null|
      ||   Andy|  30|
      || justin|  19|
      |+-------+----+
      |""".stripMargin
    println("333333333333333333333333333")
    df.select($"name", $"age" + 1).show()
    """
      |+-------+---------+
      ||   name|(age + 1)|
      |+-------+---------+
      ||Michael|     12|
      ||   Andy|       31|
      || justin|       20|
      |+-------+---------+
      |""".stripMargin
    println("44444444444444444444444444444")
    df.filter($"age" > 20).show()
    """
      |+---+----+
      ||age|name|
      |+---+----+
      || 30|Andy|
      |+---+----+
      |""".stripMargin
    println("555555555555555555555555555555")
    df.groupBy("age").count().show()
    """
      || age|count|
      |+----+-----+
      ||  19|    1|
      ||  11|    1|
      ||  30|    1|
      |+----+-----+
      |""".stripMargin
    //*****RDD=>DataFrame=>DataSet*****
    val rdd1: RDD[(Int, String, Int)] = spark.sparkContext.makeRDD(List((1, "qiaofeng", 30), (2, "xuzhu", 28), (3, "duanyu", 20)))
    // rdd => DataFrame
    val df1: DataFrame = rdd1.toDF("id", "name", "age")
    // DataFrame => DataSet
    val ds1: Dataset[Person] = df1.as[Person]
    // DataSet => DataFrame
    val df2: DataFrame = ds1.toDF()
    // DataFrame => RDD
    val rdd2: RDD[Row] = df2.rdd
    // RDD=>DataSet
    rdd1.map {
      case (id, name, age) => Person(id, name, age)
    }.toDS()
    // DataSet => RDD
    ds1.rdd
    spark.stop
  }

  case class Person(id: Int, name: String, age: Int)
}
