package com.neuedu.heart

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.IntegerType

object CardioEtl {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","root")
    //-1. spark 配置对象
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
    //-2. sparksession对象
    val sparkSession = SparkSession
      .builder()
      .config(sparkConf)
      .enableHiveSupport()  //-:开启hive的支持
      .appName("CardioEtl")
      .getOrCreate()

    //-3. 定义hdfs中文件的位置
    val originPath = "/heart-data/ods/cardio_train.csv";
    //-4. 读取csv文件
    val df = sparkSession.read.option("header","true").csv(originPath)
    //- 显示获取到的csv文件内容
    //df.show()

    // 字段集合
    val cols = Array("id","age","gender","height","weight","ap_hi","ap_lo","cholesterol","gluc","smoke","alco","active","cardio")
    // 过滤所有包含空值的数据行,根据列进行判断
    df.na.drop(cols)

    // 为字段指定数据类型
    val table = df.select(
      col("id") cast(IntegerType),
      col("age") cast(IntegerType),
      col("gender") cast(IntegerType),
      col("height") cast(IntegerType),
      col("weight") cast(IntegerType),
      col("ap_hi") cast(IntegerType),
      col("ap_lo") cast(IntegerType),
      col("cholesterol") cast(IntegerType),
      col("gluc") cast(IntegerType),
      col("smoke") cast(IntegerType),
      col("alco") cast(IntegerType),
      col("active") cast(IntegerType),
      col("cardio") cast(IntegerType),
    )

    //-:将清洗之后的数据保存到 hive中=》数据库：db_heart =>表：dwd_cardio
    //-:可以自动创建对应的表，不需要手动创建
    table.write.mode(SaveMode.Overwrite).saveAsTable("db_heart.dwd_cardio")

    // sparkSession的停止、关闭
    sparkSession.stop()
    sparkSession.close()
  }
}
