package cn.doitedu.day07

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object T02_DataFrameDemo1 {

  def main(args: Array[String]): Unit = {

    //是对SparkContext的增强，里面包装这SparkContext
    val spark: SparkSession = SparkSession.builder().appName("SQLWordCount")
      .master("local[4]")
      .getOrCreate()

    //DataFrame也是一个抽象数据集 RDD + Schema

    val lines: RDD[String] = spark.sparkContext.textFile("data/login.txt")

    //描述了列的名称，类型，能不能为空
    val schema = StructType(
      List(
        StructField("uid", StringType),
        StructField("dt", StringType),
      )
    )

    //RDD
    val rowRDD: RDD[Row] = lines.map(line => {
      val fields = line.split(",")
      val uid = fields(0)
      val dt = fields(1)
      Row(uid, dt)
    })

    val df: DataFrame = spark.createDataFrame(rowRDD, schema)

    //将DataFrame注册成试图或表
    df.createTempView("v_login")

    val df2: DataFrame = spark.sql(
      s"""
         |select * from v_login where dt >= '2018-03-01'
         |""".stripMargin)

    df2.show()
  }

}
