package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo6DataFrameApi {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("sql")
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    import spark.implicits._
    val dianxingDF: DataFrame = spark
      .read
      .option("sep", ",")
      .schema("mdn STRING , grid_id STRING,city_id STRING,county_id STRING,duration STRING,grid_first_time STRING, grid_last_time STRING,  day_id STRING")
      .csv("spark/data/dianxin_data")


    dianxingDF.select("mdn")
    dianxingDF.select(dianxingDF("mdn"))
    dianxingDF.select($"mdn") // 等同于dianxingDF("mdn")


    //导入spark sql 常用函数
    import org.apache.spark.sql.functions._


    //计每一个城市游客人数前十的区县
    dianxingDF
      .groupBy($"city_id", $"county_id")
      .agg(countDistinct($"mdn") as "c")
      .select($"city_id", $"county_id", $"c", row_number over Window.partitionBy($"city_id").orderBy($"c".desc) as "rank")
      .where($"rank" <= 10)
      .show(1000)


    //统计每科的最高分
    val scoreDF: DataFrame = spark.read
      .option("sep", ",")
      .schema("s_id STRING,c_id STRING,sco INT")
      .csv("spark/data/score.txt")

    scoreDF
      .groupBy($"c_id")
      .agg(max($"sco") as "sco")
      .show(1000)


    // join

    val studentDF: DataFrame = spark.read
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .csv("spark/data/students.txt")


    val joinDF: DataFrame = scoreDF.join(studentDF, $"s_id" === $"id", "left")

    joinDF.show()
    /*import org.apache.spark.sql._
    import org.apache.spark.sql.types._

    val schema: StructType = StructType(
      List(
        StructField("id", StringType),
        StructField("name", StringType),
        StructField("age", IntegerType),
        StructField("gender", StringType),
        StructField("clazz", StringType)
      )
    )

    val studentDF: DataFrame = spark.read
      .option("sep", ",")
      .schema(schema)
      .csv("")*/


  }
}
