package project

/**
 * @author 鲁新茹
 * @date 2023/6/19
 */
import java.util.Properties

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SparkSession}
object SparkSql1 {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setAppName("sparkSql").setMaster("local[2]")
    val session:SparkSession = SparkSession.builder().config(conf).appName("importData").master("local[2]").getOrCreate()
    val url = "jdbc:mysql://192.168.57.11:3306/information?characterEncoding=UTF-8"
    // 指定要加载的表名
    val tableName = "student"
    // 指定 结果1存储的表
    val tableName1 = "studentResult"
    // 指定 结果2存储的表
    val tableName2 = "studentResult1"
    // 配置连接数据库的相关属性
    val properties = new Properties()
    // 用户名
    properties.setProperty("user", "root")
    properties.setProperty("password", "root")
    // 通过JDBC读取数据 创建DataFrame
    val dataFrame: DataFrame = session.read.jdbc(url, tableName, properties)
    // 将dataFrame 注册成一张表
    dataFrame.show()
    dataFrame.createTempView("stuInfo")
    val result1:DataFrame = session.sql("select classId,sum(case when sex = '1' then 1 else 0 end) as male,sum(case when sex = '0' then 1 else 0 end) as female from stuInfo group by classId")
    println("各个班级男女各自的总数：")
    result1.show()
    result1.write.mode("overwrite").jdbc(url,tableName1,properties)
    // mode:指定数据插入模式（overwrite：表示覆盖，如果表不存在，事先帮我们创建）
    val result2:DataFrame = session.sql("select semester,classId,sum(case when sex = '1' then 1 else 0 end) as male,sum(case when sex = '0' then 1 else 0 end) as female from stuInfo group by semester,classId")
    println("不同学期各个班级男女各自的人数：")
    result2.show()
    result2.write.mode("overwrite").jdbc(url,tableName2,properties)

  }


}
