package edu.csl.study.spark.basic

import java.util.Properties

import edu.csl.study.spark.basic.StructuredStreaming.rootDir
import org.apache
import org.apache.spark
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.api.java.UDF1
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

/**
 * 日志分析：
 * （1）GBK编码的文件，textFile默认使用UTF-8编码会乱码，所以要转换处理。
 *
 */
object SparkSQL {

  //定义一个样例类 :case class 类(param1：type1 )需要放在函数外面，作为成员变量。
  case class Person(id:String,name:String,age:Int)
  case class Person2(id:String,name:String,age:String)
  /**
   * 通过样例类获取DF
   * @param spark
   * @param rddText
   * @return
   */
  def  getDataFrame_CaseClass(spark: SparkSession ,rddText:RDD[Array[String]] ):DataFrame={
    println("------------获取dataframe方式：通过样例类-----------------")
    //把rdd与样例类进行关联
    val personRDD:RDD[Person]= rddText.map(x=>Person(x(0),x(1),x(2).toInt))
    //将rdd转换成dataFrame
    //需要手动导入隐式转换
    import spark.implicits._
    val personDF1: DataFrame = personRDD.toDF()
    personDF1
  }

  /**
   * 通过StructTType获取DF
   * @param spark
   * @param rddText
   * @return
   */
  def  getDataFrame_StructType(spark: SparkSession ,rddText:RDD[Array[String]] ):DataFrame={
    println("------------ 获取dataframe方式：通过StructType直接指定Schema-----------------")
    //4、将rdd与Row对象进行关联
    val rowRDD: RDD[Row] = rddText.map(x=>Row(x(0),x(1),x(2).toInt))
    //指定dataFrame的schema信息
    //这里指定的字段个数和类型必须要跟Row对象保持一致
    val schema=StructType(
        StructField("id",StringType)::
        StructField("name",StringType)::
        StructField("age",IntegerType)::Nil
    )
    spark.createDataFrame(rowRDD,schema)
  }
  /**
   * 通过MySQL获取DF
   * @param spark
   * @return
   */
  def  getDataFrame_MySQL(spark: SparkSession ):DataFrame={
    println("------------获取dataframe方式：通过MySQL获取DF-----------------")

    //3、读取mysql表的数据
    //3.1 指定mysql连接地址
    val url="jdbc:mysql://localhost:3306/bigdata"
    //3.2 指定要加载的表名
    val tableName="person"
    // 3.3 配置连接数据库的相关属性
    val properties = new Properties()

    //用户名
    properties.setProperty("user","root")
    //密码
    properties.setProperty("password","admin")

    spark.read.jdbc(url,tableName,properties)


  }

  /**
   * //mode:指定数据的插入模式
   * //overwrite: 表示覆盖，如果表不存在，事先帮我们创建
   * //append   :表示追加， 如果表不存在，事先帮我们创建
   * //ignore   :表示忽略，如果表事先存在，就不进行任何操作
   * //error    :如果表事先存在就报错（默认选项）
   *
   * 打包成jar扔到服务器中执行。注意：需要指定--driver-class-path
   * spark-submit \
   * --master spark://node01:7077 \
   * --class com.kaikeba.sql.Data2Mysql \
   * --executor-memory 1g \
   * --total-executor-cores 4 \
   * --driver-class-path /home/hadoop/mysql-connector-java-5.1.38.jar \
   * --jars /home/hadoop/mysql-connector-java-5.1.38.jar \
   * spark_class02-1.0-SNAPSHOT.jar \
   * append  kaikeba
   *
   * --driver-class-path  指定需要的JAR
   * --jars 指定excutor端的额外JAR
   *
   * @param df
   * @param saveMode
   * @param tableName
   */
  def  saveDataFrame_MySQL(df: DataFrame ,saveMode:String,tableName:String):Unit={
    println("------------存储在Mysql中-----------------")

    //3、读取mysql表的数据
    //3.1 指定mysql连接地址
    val url="jdbc:mysql://localhost:3306/bigdata"
    // 3.3 配置连接数据库的相关属性
    val properties = new Properties()

    //用户名
    properties.setProperty("user","root")
    //密码
    properties.setProperty("password","admin")

    df.write.mode(SaveMode.Append).jdbc(url,tableName,properties)

  }
  val baseDir = System.getProperty("user.dir")
  val rootDir = baseDir+ "\\testFile\\"

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().appName("CaseClassSchema").master("local[2]").getOrCreate()
    spark.sparkContext.setLogLevel("WARN")
    dataSet_Demo(spark)
    //dataFrame_Demo(spark)
  }
  def dataFrame_Demo(spark: SparkSession): Unit = {

    //1、构建SparkSession对象 ：
    /***
     *   SparkSession封装了SparkConf、SparkContext和SQLContext，是2.0版本之后的新的入口，是一个统一的入口。
     *   虽然仍然可以使用SQLContext和HiveContext，但是推荐使用SparkSession
     */
    val sc = spark.sparkContext

    sc.setLogLevel("warn")


    //实现自定义的UDF函数

    //小写转大写
    spark.udf.register("low2Up",new UDF1[String,String]() {
      override def call(t1: String): String = {
        t1.toUpperCase
      }
    },StringType)

    //大写转小写
    spark.udf.register("up2low",(x:String)=>x.toLowerCase)




    //获取dataframe方式一：通过样例类
    println("------------获取dataframe方式一：通过样例类-----------------")
     val rddText:RDD[Array[String]] =    sc.textFile(rootDir +"people.txt").map(x=>x.split(" "))
     val personDF1: DataFrame = getDataFrame_CaseClass(spark,rddText)

      //打印DataFrame
      personDF1.printSchema

      personDF1.show()

    val dataFrame2: DataFrame = getDataFrame_StructType(spark,rddText)
    //打印DataFrame
    dataFrame2.printSchema

    dataFrame2.show()

    val dataFrame3: DataFrame =getDataFrame_MySQL(spark)

    dataFrame3.show()

    dataFrame3.createTempView("user")

    spark.sql("select low2Up(name) from user where age = 18").show

    // 存储
    //todo: 5.1 保存结果数据到文本文件  ----  保存数据成文本文件目前只支持单个字段，不支持多个字段
    var path = rootDir +"people_out.txt"
    FileTools.deleteDir(path);
    spark.sql("select name from user ").write.text(path)

    //todo: 5.2 保存结果数据到json文件
    path = rootDir +"people_out.json"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.json(path )

    //todo: 5.3 保存结果数据到parquet文件
    path = rootDir +"people_parquet"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.parquet(path)

    //todo: 5.4 save方法保存结果数据，默认的数据格式就是parquet
    path = rootDir +"people_save"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.save(path)

    //todo: 5.5 保存结果数据到csv文件
    path = rootDir +"people_csv"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.csv(path)

    //todo: 5.6 保存结果数据到表中  说一下默认的配置saveAsTable方法会以parquet文件的形式存储数据
    path = baseDir +"\\spark-warehouse\\person2"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.saveAsTable("person2")
    path = baseDir +"\\spark-warehouse\\person3"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.format("parquet").save(path)

    //todo: 5.7  按照单个字段进行分区 分目录进行存储
    path = rootDir +"people_partitions"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.partitionBy("age").json(path)

    //todo: 5.8  按照多个字段进行分区 分目录进行存储
    path = rootDir +"people_numPartitions"
    FileTools.deleteDir(path);
    spark.sql("select * from user ").write.partitionBy("age","name").json(path)
    //保存结果数据到mysql表中
    //mode:指定数据的插入模式
    //overwrite: 表示覆盖，如果表不存在，事先帮我们创建
    //append   :表示追加， 如果表不存在，事先帮我们创建
    //ignore   :表示忽略，如果表事先存在，就不进行任何操作
    //error    :如果表事先存在就报错（默认选项）
    saveDataFrame_MySQL(spark.sql("select name, pass, age, sex, phone from user where age = 18 "),"append","person2")


    println("------------/读取数据获取DataFrame方式二：sparkSession获取DataFrame------------------")

    //获取DataFrame方式二：sparkSession获取
    val personDF2:DataFrame=spark.read.text(rootDir +"people.txt")

    //打印DataFrame
    personDF2.printSchema

    personDF2.show()

    println("------------spark.read.json读取Json-----------------")
    val peopleDF3:DataFrame=spark.read.json(rootDir +"person.json")
    //打印schema信息
    peopleDF3.printSchema

    //展示数据
    peopleDF3.show

    //查询指定的字段
    import spark.implicits._
    println("------------DataFrame操作:DSL风格语法 -----------------")
    peopleDF3.select("name").show
    peopleDF3.select($"name").show
    peopleDF3.select(peopleDF3.col("name")).show

    peopleDF3.select($"name",$"age",$"age"+1).show

    //实现age大于30过滤
    peopleDF3.filter($"age" > 30).show

    //按照age分组统计次数
    peopleDF3.groupBy("age").count.show

    //按照age分组统计次数降序
    peopleDF3.groupBy("age").count().sort($"count".desc).show

    println("------------DataFrame操作:SQL风格语法 -----------------")



    //DataFrame注册成表
    peopleDF3.createTempView("person")
    //使用SparkSession调用sql方法统计查询
    spark.sql("select * from person").show
    spark.sql("select name from person").show
    spark.sql("select name,age from person").show
    spark.sql("select * from person where age >30").show
    spark.sql("select count(*) from person where age >30").show
    spark.sql("select age,count(*) from person group by age").show
    spark.sql("select age,count(*) as count from person group by age").show
    spark.sql("select * from person order by age desc").show




      //关闭上下文
    spark.stop()
  }

  /**
   * DataSet的Demo操作
   * @param spark
   */
  def dataSet_Demo(spark: SparkSession)= {
    println("------------DataFrame操作 - spark.read.text-----------------")
    val df: DataFrame = spark.read.text(rootDir + "people.txt")
    df.printSchema
    df.show()
    /*
       返回数据
        +----------------------+
        |                 value|
        +----------------------+
        |1 李四 18 1 1363136...|
        |2 萌娃 25 0 1363136...|
        |3 静静 26 0 1363136...|
        |4 小鱼 29 1 1363136...|
        |5 岁岁 32 0 1363136...|
        +----------------------+
     *
     */
    println("------------DataFrame操作 - spark.read.csv-----------------")
    val df2: DataFrame = spark.read.csv(rootDir + "people.csv")
    df2.show()
    /*
        +---+----+---+---+-----------+
        |_c0| _c1|_c2|_c3|        _c4|
        +---+----+---+---+-----------+
        |  1|李四| 18| 女|13631360811|
        |  2|萌娃| 25| 男|13631360812|
        |  3|静静| 26| 男|13631360813|
        |  4|小鱼| 29| 男|13631360814|
        |  5|岁岁| 32| 女|13631360815|
        +---+----+---+---+-----------+
    */
    val  df3: DataFrame = spark.read.csv(rootDir + "people.csv").toDF("id","name","age","sex","phone")
    df3.show()
    /*
    +---+----+---+---+-----------+
    | id|name|age|sex|      phone|
    +---+----+---+---+-----------+
    |  1|李四| 18| 女|13631360811|
    |  2|萌娃| 25| 男|13631360812|
    |  3|静静| 26| 男|13631360813|
    |  4|小鱼| 29| 男|13631360814|
    |  5|岁岁| 32| 女|13631360815|
    +---+----+---+---+-----------+
    */

    val schema=StructType(
      StructField("id",StringType)::
        StructField("name",StringType)::
        StructField("age",IntegerType)::Nil
    )
    val  df31: DataFrame = spark.read.schema(schema).csv(rootDir + "people.csv")
    val source:DataFrame = spark.read.schema(schema).format("csv").load(rootDir + "people.csv");
    source.show()


    println("------------DataFrame操作 - spark.createDataFrame-----------------")
    val  df4: DataFrame =spark.createDataFrame(
      List(("张三1","M","27"),("张三2","F","20"),("张三3","M","24"))).toDF("name","sex","age")
    df4.show()
    /*+-----+---+---+
      | name|sex|age|
      +-----+---+---+
      |张三1|  M| 27|
      |张三2|  F| 20|
      |张三3|  M| 24|
      +-----+---+---+
      */

    /**
     * 这里调用的map方法，实际上已经是DataSet了，所以返回了DateSet对象的数据
     */
    import spark.implicits._
    val ds1 = df.map(item => {
      var arr = item.getString(0).split(" ")
      Person(arr(0), arr(1), arr(2).toInt)
    })
    ds1.show()

    /**
     * //把一个DataSet转换成DataFrame
     * val dataFrame = ds.toDF()
     * //把一个DataFrame转换成DataSet
     * val dataSet = df.as[Person]
     * //从dataFrame和dataSet获取得到rdd
     * val rdd1 = dataFrame.rdd
     * val rdd2 = dataSet.rdd
     */


    println("------------DataSet操作:json -> DataFrame  -----------------")
    val peopleDF: DataFrame = spark.read.json(rootDir + "person.json")
    /**
     * 注意等号为 ===
     * 不等号为   =!=
     */
    peopleDF.filter($"name" === "zhangSan").show()

    println("------------DataSet操作:json ->  DataSet -----------------")
    val peopleDS: Dataset[Person2] = spark.read.json(rootDir + "person.json").as[Person2]
//    peopleDS.printSchema()
//    peopleDS.show()
    /**
     * 虽然转换成Person了，但是数据项仍然有school和sex项
     * +---+---+-----+----------------------------------+---+
     * |age| id| name|                            school|sex|
     * +---+---+-----+----------------------------------+---+
     * | 18|  1|smith|[南京市鼓楼区汉口路22号, 南京大学]| 男|
     * +---+---+-----+----------------------------------+---+
     */
    peopleDS.filter(_.name.equals("smith") ).show()

    println("------------DataSet操作:  parquet -> DataFrame -----------------")
    val path = baseDir +"\\spark-warehouse\\person3"
    val person3DF:DataFrame  = spark.read.parquet(path);
    person3DF.show()
    println("------------DataSet操作:  parquet -> Dataset -----------------")
    val person3DS:Dataset[Person2]  = spark.read.parquet(path).as[Person2];
    person3DS.show()

  }

  /***
   * Hive操作
   */
  def HiveOperate(): Unit = {
    //1、构建SparkSession对象
    val spark: SparkSession = SparkSession.builder()
      .appName("HiveSupport")
      .master("local[2]")
      .enableHiveSupport() //开启对hive的支持
      .getOrCreate()
    //2、直接使用sparkSession去操作hivesql语句

    //2.1 创建一张hive表
    spark.sql("create table people(id string,name string,age int) row format delimited fields terminated by ','")

    //2.2 加载数据到hive表中
    spark.sql("load data local inpath './data/kaikeba.txt' into table people ")

    //2.3 查询
    spark.sql("select * from people").show()

    spark.stop()
  }


}
