package com.example.bigdata.SparkSQL

import java.text.SimpleDateFormat
import java.util.Calendar

import com.example.bigdata.bean.User
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.io.Source

object SparkSQLDemo {
    def main(args: Array[String]): Unit = {
        import java.text.SimpleDateFormat
        val spark: SparkSession = SparkSession
                .builder()
                .master("local[1]")
                .appName("SparkSQL-Demo")
                .getOrCreate()
        val jsonFile = "input/user.json"
        val csvFile = "input/user.csv"
        val parquetFile = "input/winco_TriKha/part1.parquet"
        val parquetFile1 = "input/users.parquet"
        val hdfsFile = "hdfs://172.17.128.2:9820/data/datacenter/locationTag/tagFromWifiFlat_D/2020/05/31/part-00000-71e4d83f-7585-47f3-a613-31f8a93e8cd3-c000.gz.parquet"
        //runSparkSQL_Demo(spark)
        //DSL_Demo(spark,jsonFile)
        //SQL_Demo(spark,jsonFile)
        //read_CSVFileDemo(spark,csvFile)
        //read_ParquetFileDemo(spark,parquetFile)
        //自定义SchemaDemo(spark)
        //read_JDBCDemo(spark)
        //transformDemo(spark)
        //TriKha(spark,hdfsFile)
        val badfile = "hdfs://172.17.128.2:9820/data/datacenter/solar-system/wifiFlat/2020/02/03/part-02859-a49fbe4f-10f2-4db5-922f-fb741d313b34-c000.gz.parquet"
        //readBadParquet(spark,badfile)
        val dbfile = "conf/db.conf"
        val db = Source.fromFile(dbfile, "UTF-8").mkString.split("\n")

        println("url : " + db(0).trim +
        "\nuser : "+ db(1).trim,
        "\npasswd : "+ db(2).trim,
        "\ntablename  : "+ db(3).trim
        )
        var dbinfo: Map[String, String] = Map(
            "url" -> db(0).trim,
            "driver" -> "com.mysql.jdbc.Driver",
            //"driver" -> "com.mysql.cj.jdbc.MysqlDataSource",
            "user" -> db(1).trim,
            "password" -> db(2).trim,
            "dbtable" -> db(3).trim
        )
        /*val input: Array[String] = Source.fromFile(args(0), "UTF-8").mkString.split("::")
        val temp_table=input(0)
        val sql=input(1)
        println("temp_table : " + input(0))
        println("sql : " + input(1))*/

        //val filename = "input/dbSQL.txt"
//        inputMySQL(spark,dbinfo,input(0),input(1))
        spark.stop()
    }

    def inputMySQL(spark:SparkSession,dbinfo:Map[String, String],tablename:String,sql:String): Unit ={


        val temp_report = spark.read.format("jdbc").options(dbinfo).load().createTempView(tablename)
        //spark.sql("set spark.sql.caseSensitive=true")
        spark.sql(sql).show
        println("sparkSQL 执行完成\n")
    }

    def getDay(dataNuber:Int):String= {
        var dateFormat: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
        var cal: Calendar = Calendar.getInstance()
        cal.add(Calendar.DATE, dataNuber)
        var day = dateFormat.format(cal.getTime())
        day
    }
    def TriKha(spark:SparkSession,file:String): Unit =
    {
        import spark.implicits._
        val path = "conf/yhhadoop310001/"
        val krb5File = path + "krb5.conf"
        val coreFile = path+"core-site.xml"
        val hdfsFile = path+"hdfs-site.xml"
        val conf = new Configuration
        conf.addResource(new Path(coreFile))
        conf.addResource(new Path(hdfsFile))
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem")
        conf.set("dfs.client.failover.proxy.provider.yhhadoop310001", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")
        System.setProperty("java.security.krb5.conf", krb5File)
        UserGroupInformation.setConfiguration(conf)
        val keytabFile = path + "hadoop.keytab"
        val kerbersUser = "hadoop/bj-jd-dc-namenode-prod-0009.tendcloud.com@HADOOP.COM"
        UserGroupInformation.loginUserFromKeytab(kerbersUser, keytabFile)
        val dataframe = spark.read.parquet(file)
        // tagname=学前教育，艺术兴趣
        val df2 = dataframe.filter($"tagname" ==="学前教育" || $"tagname" === "艺术兴趣")
        // 近一个月连接过wifi 的 tdid
        val aMonthAgo = getDay(-31)
        val df3 = df2.filter($"date" >= aMonthAgo && $"connect".notEqual("false")).map(row=>row.getAs[String]("tdid"))
        df3.show(100)
        println("df3.count() : " + df3.count())
        df2.distinct()

    }

    def readBadParquet(spark:SparkSession,file:String): Unit ={
        spark.read.parquet(file).show(20)

    }
    def runSparkSQL_Demo(spark:SparkSession): Unit ={
        import  spark.implicits._

        val jsonFile = "input/user.json"
        val df1 = spark.read.json(jsonFile)
        df1.show()
        println("df1.count() :" +df1.count())
        df1.filter($"sex" ==="Female")
                .filter($"age" < 25).show()
        val dataframe01 = spark.read.json(jsonFile).as[User]
        dataframe01.show()
        dataframe01.filter(_.sex == "Female").filter(_.age < 25).show()
    }

    /**
      *  DSL 语法风格
      */
    def DSL_Demo(spark:SparkSession,jsonFile:String): Unit =
    {
        import spark.implicits._
        val dataFrame = spark.read.json(jsonFile)
        dataFrame.groupBy("sex").agg(Map(
            "age" -> "max",
            "name" -> "count"
        )).show()
        dataFrame.show()
        dataFrame.select("name", "age", "sex")
                .filter($"sex" === "Female")
                .filter($"age" < 25)
                .show()
        dataFrame.select("name", "age", "sex")
                .filter($"sex" === "Female" && $"age" < 25)
                .show()
        dataFrame.select($"name").map(row=>row.getAs[String]("name").toLowerCase()).show()
    }

    /**
      *  SQL 语法风格
      */
    def SQL_Demo(spark:SparkSession,jsonFile:String): Unit =
    {
        val dataFrame = spark.read.json(jsonFile)

        dataFrame.createTempView("t_user")
        spark.sql("select * from t_user").show()
        spark.sql("select name,age,sex from t_user where sex = 'Female' and age < 25 ").show()
        spark.sql("select sex,max(age),count(name) from t_user group by sex ").show()
    }

    /**
      *    SparkSql 读取 CSV 文件
      */
    def read_CSVFileDemo(spark:SparkSession,csvFile:String): Unit =
    {
        val dataFrame = spark.read.csv(csvFile).toDF("name","age","sex")
        dataFrame.show()
    }

    /**
      *  SparkSQL 读取Parquet文件
      */
    def read_ParquetFileDemo(spark:SparkSession,parquetFile:String): Unit =
    {
        val dataFrame = spark.read.load(parquetFile)
        dataFrame.show()
    }

    /**
      *   自定义Schema 存储parquet 数据
      */
    def 自定义SchemaDemo(spark:SparkSession): Unit ={
        val schema = StructType(List(
            StructField("name",StringType,true),
            StructField("age",IntegerType,true),
            StructField("sex",StringType,true)
        ))
        val javaList = new java.util.ArrayList[Row]()
        javaList.add(Row("Alice",20,"Female"))
        javaList.add(Row("Tom",18,"Male"))
        javaList.add(Row("Boris",30,"Male"))
        val df1 = spark.createDataFrame(javaList,schema)
        df1.show()
        df1.repartition(1).write.format("parquet").save("output/parquetFile")
    }

    /**
      *  从数据库读取数据
      */
    def read_JDBCDemo(spark:SparkSession): Unit =
    {
        val df1 = spark.read.load("output/JDBCparquet")
        df1.show()
        /*
        val options = Map(
            "url" -> "jdbc:mysql://localhost:3306/guard?useSSL=false&serverTimezone=Asia/Shanghai",
            "driver"-> "com.mysql.cj.jdbc.Driver",
            "user"-> "root",
            "password"-> "123456",
            "dbtable"-> "student")
        val dataframe = spark.read.format("jdbc").options(options).load()
        dataframe.show()
        val dataframe7 = spark.createDataFrame(List(
            ("Alice","Female","20"),
            ("Tom","Male","25"),
            ("Boris","Male","18")
        )).toDF("name","sex","age")
        dataframe7.repartition(1).write.format("parquet").save("output/JDBCparquet")
    */}

    /**
      *  RDD/DF/DS 之间的转换
      */
    def transformDemo(spark:SparkSession): Unit =
    {
        import  spark.implicits._
        //创建RDD
        val rdd = spark.sparkContext.makeRDD(List(
            (1, "zhangsan", 20),
            (2, "lisi", 30),
            (3, "wangwu", 40)
        ))
        //转换为DF
        val dataframe = rdd.toDF("id","name","age")
        dataframe.show()

        //转换为DS
        val dataSet = dataframe.as[People]
        dataSet.show()

        //转换为 DF
        val dataframe02 = dataSet.toDF()

        //转换为RDD
        val rdd1 = dataframe02.rdd
        rdd1.foreach( row => {
            // 通过索引访问数据
            println(row.getInt(0))
        })
    }
}
case  class People(id:Int,name:String,age:Int)
