package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo6Student {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("student")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    //读取学生表
    val student: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING, clazz STRING")
      .load("data/students.txt")

    //读取分数表
    val score: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("sId STRING,cId STRING ,sco DOUBLE")
      .load("data/score.txt")

    //读取科目表
    val subject: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("cId STRING,cName STRING ,sumSco DOUBLE")
      .load("data/subject.txt")


    /**
      * 3、统计每科都及格的学生 [学号，姓名，班级，性别，科目名，科目分数]
      */

    //1、关联分数表和科目表判断学生科目是否及格
    val resultDF: DataFrame = score
      //关联科目表
      .join(subject, "cId")
      //保留及格的分数
      .where($"sco" >= $"sumSco" * 0.6)
      //计算学生及格的科目数
      .withColumn("c", count($"sId") over Window.partitionBy($"sId"))
      //取出都及格的学生
      .where($"c" === 6)
      //关联学生表获取学生信息
      .join(student, $"sId" === $"id")
      //整理数据
      .select($"id", $"name", $"clazz", $"gender", $"cName", $"sco")


    //保存数据
    resultDF
      .write
      .format("csv")
      .mode(SaveMode.Overwrite)
      .option("sep", "\t")
      .save("data/temp1")

  }

}
