package com.atguigu.day07

import java.util.Properties

import org.apache.spark.sql.{SaveMode, SparkSession}
import org.junit.Test

class $07_ReadFile {

  val spark = SparkSession.builder().master("local[4]").appName("test").enableHiveSupport().getOrCreate()

  import spark.implicits._

  /**
    * spark读取文件分为两种方式:
    *     1、spark.read
    *         .format()  --指定数据源类型
    *         .option()...  --指定读取数据的参数和值
    *         .load()/load(path) --加载数据
    *     2、spark.read.option()...csv()/json()/parquet()
    */
  @Test
  def read(): Unit ={
    //读取文件文件
    //spark.read.format("text").load("datas/wc.txt").show
    //spark.read.textFile("datas/wc.txt")
    //读取csv文件
    //  csv常用option:
    //      sep: 指定字段之间的分隔符
    //      header: 指定是否以csv第一行作为列名
    //      inferSchema: 指定是否自动推断列的类型
    //val df = spark.read.format("csv").option("sep",",").option("header","true").option("inferSchema","true").load("datas/presidential_polls.csv")
    //df.printSchema()
    //spark.read.option("sep",",").option("header","true").csv("datas/presidential_polls.csv").show

    //读取json文件
    //spark.read.format("json").load("datas/xx.json").show
    //spark.read.json("datas/xx.json").show

    //读取parquet [ spark默认的读取与保存的文件格式都是parquet ]
   /* spark.read.option("sep",",").option("header","true").csv("datas/presidential_polls.csv")
      .write.mode(SaveMode.Overwrite).parquet("output/parquet")*/
    //默认读取parquet文件
    spark.read.load("output/parquet").show
    spark.read.format("parquet").load("output/parquet").show
    spark.read.parquet("output/parquet").show
  }

  @Test
  def readMySQL(): Unit ={

    spark.read.format("jdbc")
         .option("url","jdbc:mysql://hadoop102:3306/test")
        .option("user","root")
        .option("password","root123")
        .option("dbtable","person")
        .load()
        //.show()

    val props = new Properties()
    props.setProperty("user","root")
    props.setProperty("password","root123")
    //第一种方式<小数据量场景会用>
    //此种方式读取mysql数据只有一个分区<只适用于表的数量很小>
    val df = spark.read.jdbc("jdbc:mysql://hadoop102:3306/gmall","user_info",props)
    println(df.rdd.partitions.length)

    //第二种方式<不用>
    //分区的条件
    val conditionArry = Array("id<25","id>=25 and id<50","id>=50")
    //此种情况读取Mysql的时候分区数 = conditionArry的元素个数
    val df2 = spark.read.jdbc("jdbc:mysql://hadoop102:3306/gmall","user_info",conditionArry,props)
    println(df2.rdd.partitions.length)
    //df2.write.mode(SaveMode.Overwrite).csv("output/csv")

    //第三种方式<大数量场景常用>
    //此种情况读取mysql的分区数 = (upperBound-lowerBount)>numPartitions ? numPartitions : upperBound-lowerBount
    val minRows = spark.read.jdbc("jdbc:mysql://hadoop102:3306/gmall","(select min(id) minid from user_info) user",props).collect()
    val minid = minRows.head.getAs[Long]("minid")
    val maxRows = spark.read.jdbc("jdbc:mysql://hadoop102:3306/gmall","(select max(id) maxid from user_info) user",props).collect()
    val maxid = maxRows.head.getAs[Long]("maxid")
    println(s"minid=${minid} maxid=${maxid}")
    val df3 = spark.read.jdbc("jdbc:mysql://hadoop102:3306/gmall","user_info","id",minid,maxid,100,props)
    println(df3.rdd.partitions.length)
  }

  /**
    * 读取hive数据
    */
  @Test
  def readHive(): Unit ={
   /*val spark2 = SparkSession.builder()
      //开启hive支持
      .enableHiveSupport()
      .master("local[4]")
      .appName("test")
      .getOrCreate()*/

    spark.sql("select * from person").show
  }
}
