package com.project.clean

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{DoubleType, StringType}

object postgraduate_information {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","root")
    //-:spark配置对象
    val sparkConf = new SparkConf()
      .setMaster("local[*]")

    //-:sparkSession对象
    val sparkSession = SparkSession
      .builder()
      .config(sparkConf)
      //-:开启hive的支持
      .enableHiveSupport()
      .appName("Postgraduate_Information")
      .getOrCreate()

    //-:指定hdfs中文件路径
    val postgraduate_information = "/finalProject/ods/postgraduate_information.csv"
    //-:读取csv文件
    val df = sparkSession.read.option("header","true").csv(postgraduate_information)
    //-:字段集合
    val cols = Array("year","school_name","department_name","professional_code",
      "professional_name","total_score","politics_guanzong","foreign_languages",
      "business_course_one","business_course_two")
    //-:删除所有包含空值的数据行,根据列进行判断
    val df_1 = df.na.drop(cols)
    //-:为字段指定数据类型
    val table = df_1.select(
      col( "year")cast(StringType),
      col( "school_name")cast(StringType),
      col( "department_name")cast(StringType),
      col( "professional_code")cast(StringType),
      col( "professional_name")cast(StringType),
      col( "total_score")cast(StringType),
      col( "politics_guanzong")cast(StringType),
      col( "foreign_languages")cast(StringType),
      col( "business_course_one")cast(StringType),
      col( "business_course_two")cast(StringType),

    )
    //-:将清洗之后的数据保存到 hive中=》数据库:db_finalProject =>表:dwd_postgraduate_information
    table.write.mode(SaveMode.Overwrite).saveAsTable("final_project.dwd_postgraduate_information")
    //-:sparkSession的停止、关闭
    sparkSession.stop()
    sparkSession.close()
  }

}
