package com.project.clean

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{IntegerType, StringType}

object all_college {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","root")
    //-:spark配置对象
    val sparkConf = new SparkConf()
      .setMaster("local[*]")

    //-:sparkSession对象
    val sparkSession = SparkSession
      .builder()
      .config(sparkConf)
      //-:开启hive的支持
      .enableHiveSupport()
      .appName("All_College")
      .getOrCreate()

    //-:指定hdfs中文件路径
    val all_college = "/finalProject/ods/all_college.csv"
    //-:读取csv文件
    val df = sparkSession.read.option("header","true").csv(all_college)
    //-:字段集合
    val cols = Array("province","school_name","type","public_private",
      "Bachelor_associate","nine_eight_five","two_one_one",
      "double_first_class","city","belonging_to","address")
    //-:删除所有包含空值的数据行,根据列进行判断
    val df_1 = df.na.drop(cols)
    //为字段指定数据类型 Intergal
    val table = df_1.select(
      col( "province")cast(StringType),
      col( "school_name")cast(StringType),
      col( "type")cast(StringType),
      col( "public_private")cast(StringType),
      col( "Bachelor_associate")cast(StringType),
      col( "nine_eight_five")cast(IntegerType),
      col( "two_one_one")cast(IntegerType),
      col( "double_first_class")cast(StringType),
      col( "city")cast(StringType),
      col( "belonging_to")cast(StringType),
      col( "address")cast(StringType)
    )
    //-:将清洗之后的数据保存到 hive中=》数据库:db_finalProject =>表:dwd_all_college
    table.write.mode(SaveMode.Overwrite).saveAsTable("final_project.dwd_all_college")
    //-:sparkSession的停止、关闭
    sparkSession.stop()
    sparkSession.close()
  }
}
