package com.doit.day06

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import java.util.Properties

/**
 * @Author:
 * @WX: 17710299606
 * @Tips: 学大数据 ,到多易教育
 * @DOC: https://blog.csdn.net/qq_37933018?spm=1000.2115.3001.5343
 * @Description:
 */
object Demo07_Out {
  def main(args: Array[String]): Unit = {
    val session = SparkSession.builder()
      .appName("test")
      .master("local[*]")
      .enableHiveSupport()
      .getOrCreate()

    val df = session.read.json("data/log")
    // 创建一个日期    df2    dt
    df.createTempView("tb_log")

    val df2: DataFrame = session.sql(
      """
        |select
        | * ,
        | from_unixtime(ts/1000 , 'yyyy-MM-dd') as  dt
        |from
        |tb_log
        |""".stripMargin)


/*
    3 hive
     df2
      .write
      .mode(SaveMode.Overwrite)
      .format("orc")
      .option("orc.compress", "snappy")
      // 根据字段的值分区
      .partitionBy("dt")
      .saveAsTable("doe48.tb_log")*/


    /**
     * 2 写数据到mysql
     */
    /*    val df = session.read.json("data/log")
    val url = "jdbc:mysql://localhost:3306/doe47?characterEncoding=UTF-8"
    val table = "tb_log"
    val properties = new Properties()
    properties.setProperty("user", "root")
    properties.setProperty("password", "root")
    df.write.mode(SaveMode.Append).jdbc(url, table , properties)*/


    /**
     * 1 输出文件
     */
    // df.write.parquet()
    // df.write.orc()
    // df.write.csv()
    // df.write.text()
    //  df.write.json()
    // df.write.save("data/out")
    // df.write.format("json").save("data/out2")


  }

}
