/*
package com.hliushi.spark.rdd

/**
 * descriptions:
 *
 * author: Hliushi
 * date: 2021/5/13 8:41
 */

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 本地访问Spark集群(StandAlone模式)，调试代码
 */
object ExeFromSparkStandAlone {
  def main(args: Array[String]): Unit = {
    // 设置提交任务的用户
    System.setProperty("HADOOP_USER_NAME", "root")

    //读取hdfs的文件
    val path = "hdfs://node01:8020/stu/student.txt"

    val conf: SparkConf = new SparkConf().setAppName("ExeFromSparkStandAlone")
      .setMaster("spark://node01:7077") //Spark集群地址
      .set("spark.driver.host", "192.168.52.2") //必须添加本地IP（运行Idea的机器）
      .setJars(List("E:\\IdeaProjects\\my001\\target\\my001-1.0-SNAPSHOT.jar")) //必须添加jar，此jar包为该项目打包的

    val spark: SparkSession = SparkSession.builder.config(conf).getOrCreate()

    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //读取文件
    val stuRDD: RDD[String] = sc.textFile(path)

    //RDD ===> DataFrame 需要隐式转换
    //split第二个参数为“-1”，代表不过滤空值
    val stuDF: DataFrame = stuRDD.map(_.split(",", -1))
      .map(line => Student(line(0).toInt, line(1), line(2), line(3).toInt, line(4))).toDF()

    //    stuDF.printSchema()

    stuDF.show(100)

    //默认压缩格式为Snappy，数据格式为Parquet
    //    stuDF.repartition(1).write.mode(SaveMode.Overwrite).parquet("E:\\data\\student")

    //设置压缩格式为gzip
    //    stuDF.repartition(1).write.mode(SaveMode.Overwrite)
    //      .option("compression","gzip").parquet("E:\\data\\student_gzip")

    //关闭资源
    spark.stop()
  }

  case class Student(id: Int, name: String, sex: String, age: Int, country: String)

}*/
