package doit20.sparksql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{Column, Dataset, SparkSession}
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}


/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-04-10
 * @desc DSP风格api调用示例2
 */
object Demo11 {

  def main(args: Array[String]): Unit = {


    val spark = SparkSession.builder()
      .appName("dsl风格api演示")
      .config("spark.sql.shuffle.partitions",1)  // spark-submit --master yarn --deploy-mode cluster --spark.sql.shuffle.partitions=400
      .master("local")
      .enableHiveSupport()
      .getOrCreate()

    // id,name,age,score,gender
    val schema = StructType(Seq(
      StructField("id", DataTypes.IntegerType),
      StructField("name", DataTypes.StringType),
      StructField("age", DataTypes.IntegerType),
      StructField("score", DataTypes.DoubleType),
      StructField("gender", DataTypes.StringType)
    ))
    val frame = spark.read.schema(schema).option("header", "true").csv("data/stu2.txt")

    val res = frame.where("id<=6").select("id", "name", "age")
    // res.show(100,false)

    // select方法中，如果传字符串参数的话，只能是列名，不能有任何表达式
    //val res2 = frame.where("id<=6").select("id","name","age+10")

    import spark.implicits._
    // 在select方法，如果传入的column类型参数，则可以写表达式
    frame.where("id<=6").select('id, 'name, 'age + 10)


    // selectExpr中可以传入字符串的表达式
    val res2 = frame.where("id<=6").selectExpr("id", "name", "age+10")
    // res2.show(100,false)


    // 求每种性别的平均分：写法1
    val res3 = frame.groupBy("gender").avg("score")
    // dataframe上重命名一个列
    val res4 = res3.withColumnRenamed("avg(score)", "avg_score")
    //res4.show(100, false)

    // 求每种性别的平均分及平均年龄
    // The available aggregate methods are `avg`, `max`, `min`, `sum`, `count`.
    val res5 = frame.groupBy("gender").agg(("score","avg"),("age","sum"))
    // res5.show(100,false)
    /**
     * +------+----------+--------+
     * |gender|avg(score)|sum(age)|
     * +------+----------+--------+
     * |m     |87.5      |121     |
     * |f     |84.225    |112     |
     * +------+----------+--------+
     */
    // Column参数的写法
    import org.apache.spark.sql.functions._
    frame.groupBy('gender).agg(avg('score) as "avg_score",sum('age) as "sum_age" )


    /**
     * 求每种性别中成绩最高的两个人的信息
     */
    frame.createTempView("t")
    spark.sql(
      """
        |select
        |  id,name,age,score,gender
        |from
        |(
        |   select
        |    id,name,age,score,gender,row_number() over(partition by gender order by score desc rows between unbounded preceding and current row) as rn
        |   from
        |    t
        | ) o
        |where rn<=2
        |
        |""".stripMargin)

    val spec = Window.partitionBy('gender).orderBy('score desc).rowsBetween(Window.unboundedPreceding, 0)
    val res6 = frame.select('id,'name,'age,'score,row_number().over(spec) as "rn").where("rn<=2")
    //res6.show(100,false)


    /**
     * 关联 stu 和 stu_other两表
     */
    val stuOther = spark.read.option("header", true).option("inferSchema", true).csv("data/stu_other.csv")
    stuOther.createTempView("o")
    spark.sql("select  t.*,o.* from t join o on t.id=o.id")

    // 如果不传join的条件，则产生笛卡尔积
    val res7 = frame.join(stuOther)
    // res7.show(100,false)

    // join条件用  usingColumn一个参数指定：意味着左右两表都有同名字段
    val res8 = frame.join(stuOther, "id")
    //res8.show(100,false)

    // 指定join条件的api调法
    val res9 = frame.join(stuOther, frame("id") === stuOther("id"))
    // res9.show(100,false)


    val res10 = frame.join(stuOther, frame("id") === stuOther("id"), "left_outer")
    res10.show(100,false)



    spark.close()

  }
}
