
import org.apache.spark.{SPARK_BUILD_DATE, SparkConf}
import org.apache.spark.sql.{SaveMode, SparkSession, functions}
import org.apache.spark.sql.functions.{col, to_date}
import org.apache.spark.sql.types.{DataType, DateType, FloatType, IntegerType, StringType}


//数据清洗加导入
object spark {
  def main(args: Array[String]): Unit = {
    //1.spark配置对象
    var sparkConf = new  SparkConf()
      .setMaster("local[*]")
    //2  sparksession 对象
    val sparksession = SparkSession
      .builder()
      .config(sparkConf)
      .enableHiveSupport() // 开启hive
      .appName("stu")
      .getOrCreate()
    val originPath = "/student/data/stu.1735281572509.csv"
    val df = sparksession.read.option("header","true").csv(originPath)

    val dfWithCorrectDateAndCleaned = df
      .na.drop(cols = Array("student_iD", "name", "gender", "age", "gpa", "major", "interested_domain", "projects",
      "future_career","python","sql","java"))

    val table = dfWithCorrectDateAndCleaned.select(
      col("student_iD").cast(IntegerType),
      col("name") cast(StringType),
      col("gender") cast(StringType),
      col("age") cast(IntegerType),
      col("gpa") cast(FloatType),
      col("major") cast(StringType),
      col("interested_domain") cast(StringType),
      col("projects") cast(StringType),
      col("future_career") cast(StringType),
      col("python") cast(StringType),
      col("sql") cast(StringType),
      col("java") cast(StringType)
    )
    table.write.mode(SaveMode.Overwrite).saveAsTable("db_stu.tb_stu")


    sparksession.stop()
    sparksession.close()
  }
}
