package org.file_service

import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

object Test01 {

  def analysis(sparkSession:SparkSession): Unit = {
    println("执行操作")
    val spark = sparkSession
    spark
      .read
      .format("csv")
      .option("header", "true") //表示第一行是列的名称
      .option("multiLine", "true")
      .option("inferSchema", "true") //读取文件时是否自动判断列类型
      .load("hdfs://hadoop102:8020/comprehension/result.csv").show()
    spark.stop()
  }

}