package com.project.clean

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.StringType

object university_information {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","root")
    //-:spark配置对象
    val sparkConf = new SparkConf()
      .setMaster("local[*]")

    //-:sparkSession对象
    val sparkSession = SparkSession
      .builder()
      .config(sparkConf)
      //-:开启hive的支持
      .enableHiveSupport()
      .appName("University_Information")
      .getOrCreate()

    //-:指定hdfs中文件路径
    val university_information = "/finalProject/ods/university_information.csv"
    //-:读取csv文件
    val df = sparkSession.read.option("header","true").csv(university_information)
    //-:字段集合
    val cols = Array("school","province","school_level","school_types")
    //-:删除所有包含空值的数据行,根据列进行判断
    val df_1 = df.na.drop(cols)
    //-:为字段指定数据类型
    val table = df_1.select(
      col( "school")cast(StringType),
      col( "province")cast(StringType),
      col( "school_level")cast(StringType),
      col( "school_types")cast(StringType),
    )
    //-:将清洗之后的数据保存到 hive中=》数据库:db_finalProject =>表:dwd_university_information
    table.write.mode(SaveMode.Overwrite).saveAsTable("final_project.dwd_university_information")
    //-:sparkSession的停止、关闭
    sparkSession.stop()
    sparkSession.close()
  }
}
