package com.hliushi.spark.sql

import org.apache.spark.sql.SparkSession

/**
 * descriptions:
 *
 * author: Hliushi
 * date: 2021/5/18 16:17
 */
object MysqlRead {

  def main(args: Array[String]): Unit = {
    // 1.创建SparkSession对象
    val spark = SparkSession.builder()
      .appName("mysql_read")
      .master("local[6]")
      .getOrCreate()

    // 2.配置mysql设置
    val props = new java.util.Properties()
    props.setProperty("driver", "com.mysql.jdbc.Driver")
    props.setProperty("user", "root")
    props.setProperty("password", "root")

    val dataFrame = spark.read.jdbc("jdbc:mysql:///school_info", "telecommunications", props)


    val sql =
      """
        |select a.s_id,a.c_id,
        |        @i:=@i +1 as i保留排名,
        |        @k:=(case when @score=a.s_score then @k else @i end) as rank不保留排名,
        |        @score:=a.s_score as score
        |    from (
        |        select s_id,c_id,s_score from score GROUP BY s_id,c_id,s_score ORDER BY s_score DESC
        |)a,(select @k:=0,@i:=0,@score:=0)s
        |""".stripMargin

    /**
     *
     * SparkSQL 中并没有直接提供按照SQL进行筛选读取数据的API和参数, 但是可以通过dbtable曲线救国
     * dbtable指定目标表的名称, 但是因为dbtable中可以编写SQL, 使用子查询把返回的结果作为一个新表
     * #
     * 哈哈哈 - option("dbtable", sql as new_table)
     * 这一手曲线救国, 也骚气了, 见识到了SparkSQL的奇技淫巧,
     * 但是我这里还有一个问题, Spark代码编写都没有和Java的Spring框架整合, 难道说是不需要要吗
     * 还是说 Spark代码编写主要向集群提交任务, 发挥集群的计算能力, 用不到去整合Spring框架
     */
    spark.read.format("jdbc")
      .option("url", "jdbc:mysql:///test")
      //.option("dbtable", "(SELECT * FROM telecommunications WHERE s_id > 2013013989 AND s_id < 2018012319) as stu")
      .option("dbtable", s"($sql) as test")
      .option("user", "root")
      .option("password", "root")
      .load()
      .show()


    dataFrame.show()
  }
}