package com.project.clean

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{StringType}
import org.apache.spark.sql.types.DoubleType

object graduate_application {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","root")
    //-:spark配置对象
    val sparkConf = new SparkConf()
      .setMaster("local[*]")

    //-:sparkSession对象
    val sparkSession = SparkSession
      .builder()
      .config(sparkConf)
      //-:开启hive的支持
      .enableHiveSupport()
      .appName("Graduate_Application")
      .getOrCreate()

    //-:指定hdfs中文件路径
    val graduate_application = "/finalProject/ods/graduate_application.csv"
    //-:读取csv文件
    val df = sparkSession.read.option("header","true").csv(graduate_application)
    //-:字段集合
    val cols = Array("year","applicants_number","growth_rate")
    df.show()
    //-:删除所有包含空值的数据行,根据列进行判断
    val df_1 = df.na.drop(cols)
    //-:为字段指定数据类型
    val table = df_1.select(
      col( "year")cast(StringType),
      col( "applicants_number")cast(DoubleType),
      col( "growth_rate")cast(StringType)
    )
    //-:将清洗之后的数据保存到 hive中=》数据库:db_finalProject =>表:dwd_graduate_application
    table.write.mode(SaveMode.Overwrite).saveAsTable("final_project.dwd_graduate_application")
    //-:sparkSession的停止、关闭
    sparkSession.stop()
    sparkSession.close()
  }

}
