package dataframe

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object DataFrame_FinalDemo03 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("DataFrame_FinalDemo03")

    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()
    import spark.implicits._

    // 加载数据文件
    val lineRDD: RDD[String] = spark
      .sparkContext
      .textFile("data/bank-full.csv")

    // 数据预处理
    val bankCustomerRDD: RDD[BankCustomer] = lineRDD
      .filter(line => !line.startsWith("\"age\";"))
      .map(line => {
        val fields = line.split(";")
        BankCustomer(
          fields(0).replace("\"", "").replace(" ", "").toInt,
          fields(1).replace("\"", ""),
          fields(2).replace("\"", ""),
          fields(3).replace("\"", ""),
          fields(5).replace("\"", "").replace(" ", "").toInt
        )
      })

    // 转换为DataFrame
    val bankCustomerDF: DataFrame = bankCustomerRDD.toDF()
    bankCustomerDF.show(5)

    // 注册临时视图
    bankCustomerDF.createOrReplaceTempView("tb_bankcustomer")

    // 探索性分析
    println(bankCustomerDF.count())

    spark
      .sql("select * from tb_bankcustomer where age<30")
      .show()

    // 统计小于30岁的年青客户的按年龄客户数量分布
    spark.sql(
      """
        |select age,count(*) as total_count
        |from tb_bankcustomer
        |where age<30
        |group by age
        |order by age
        |""".stripMargin)
      .show()



    spark.stop()
  }

  case class BankCustomer(age:Integer,job:String,marital:String,education:String,balance:Integer)
}
