package com.zt.bigdata.spark.spark.sql

import com.typesafe.scalalogging.LazyLogging
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, Encoders, SparkSession}

case class A(val a: Int, b: Double)

case class Revenue(val product: String, val category: String, val revenue: Int)

object SparkSQLFunction extends LazyLogging {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder().master("local")
      .appName("Spark SQL basic example")
      .config("spark.sql.warehouse.dir", "/tmp/spark-warehouse")
      .getOrCreate()

    implicit def sql(str: String): DataFrame = spark.sql(str)

    "select 2 <= 2".show

    """select 2 % 1.8""".show

    """select MOD(2,1.8)""".show()

    """select 3 & 5""".show()

    """select 2 * 3 """.show()

    "select 1.0 / 1".show

    """select to_date('2009-07-30 04:17:52') <= to_date('2009-07-30 04:17:52')""".show

    """SELECT to_date('2009-07-30 04:17:52') <= to_date('2009-08-01 04:17:52')""".show

    """select 1 <= NULL""".show

    """select 2 <=> 2""".show()

    """select 1 <=> '1'""".show()

    """select NULL <=> NULL""".show()

    """select 2 = 2""".show()

    """select 1 = '1'""".show()

    """select NULL = NULL""".show()

    """select 2 == 2""".show()

    """select 1 == '1'""".show()

    """select NULL == NULL""".show()

    """select 3 ^ 5""".show()

    """select abs(-1)""".show()

    """select acos(1)""".show()

    """select acos(2)""".show()

    """select add_months('2016-01-31',1)""".show()
    """select date_add('2016-01-31',29)""".show()

    """select array(1,2,3)""".show()
    """select array_contains(array(1,2,3),2)""".show()

    """SELECT ascii('222')""".show()

    """select asin(0)""".show()
    """select asin(0.5)""".show()

    "select assert_true(0<1)".show()

    """select base64('Spark SQL') """.show()

    """select bigint(11111111111111111111111111111)""".show()

    """SELECT bin(13)""".show()
    """SELECT bin(-13)""".show()

    """select bround(2.5,0)""".show()

    """select cast('10' as int)""".show()
    """select cast('10' as double)""".show()
    """select cast('2016-01-31' as date)""".show()

    """select to_date('2016/01/31 04/40/43','yyyy/MM/dd HH/mm/ss')""".show()
    """select to_timestamp('2016/01/31 04/40/43','yyyy/MM/dd HH/mm/ss')""".show()
    """select unix_timestamp('2016/01/31 04/40/43','yyyy/MM/dd HH/mm/ss')""".show()
    """select from_unixtime(1454186443,'yyyy-MM-dd HH:mm:ss')""".show()

    """select cbrt(27)""".show()

    """select ceil(-0.1)""".show()

    """select char(65)""".show()

    """SELECT character_length('Spark SQL ')""".show()

    """SELECT coalesce(NULL, 1, NULL)""".show()

    """SELECT concat('Spark', 'SQL')""".show()

    """SELECT concat_ws(' ', 'Spark', 'SQL')""".show()

    //进制转换
    """select conv('100',2,10)""".show()
    """SELECT conv(-10, 16, -10)""".show()

    import spark.implicits._
    spark.sparkContext
      .parallelize(Seq("100", "101"))
      .toDF("a")
      .select(conv('a, 2, 10), cos('a), cbrt('a))
      .show()

    """select date_trunc('YEAR','2015-03-05T09:32:05.359'),
      |date_trunc('MM','2015-03-05T09:32:05.359'),
      |date_trunc('DD','2015-03-05T09:32:05.359'),
      |date_trunc('HOUR','2015-03-05T09:32:05.359')
    """.stripMargin.show()

    """select datediff('2009-07-31', '2009-07-30')""".show()


    val schema = Encoders.product[A].schema

    """select format_string('Hello World %d %s' ,100,'days')""".show()
    """select from_json('{"a":1, "b":0.8}','a INT ,b DOUBLE')""".show()
    """select
      |from_json('{"time":"26/08/2015"}','time Timestamp',map('timestampFormat', 'dd/MM/yyyy'))""".stripMargin
      .show()

    spark
      .sql("""select '{"a":1, "b":0.8}' as json""")
      .select(from_json('json, schema))
      .show()

    """select from_unixtime(0,'yyyy-MM-dd HH:mm:ss')""".show()

    """select get_json_object('{
      |"name":"zhang",
      |"age":28,
      |"address":{"province":"zhejiang","city":"hangzhou"}}','$.address.province') as province""".stripMargin.show()

    """select greatest(10, 9, 2, 4, 3)""".show()

    """select if( 1 < 2 ,true,false)""".show()
    """SELECT
      |to_json(named_struct('a', 1, 'b', 2)),
      |to_json(named_struct('time', to_timestamp('2015-08-26', 'yyyy-MM-dd')), map('timestampFormat', 'dd/MM/yyyy')),
      |to_json(array(named_struct('a', 1, 'b', 2))),
      |to_json(map('a', named_struct('b', 1))),
      |to_json(map(named_struct('a', 1),named_struct('b', 2))),
      |to_json(map('a', 1)),
      |to_json(array((map('a', 1))))
    """.stripMargin.show()

    //window function
    import spark.implicits._
    val productRevenue = spark.sparkContext.parallelize(
      Seq(
        Revenue("Thin", "Cell phone", 6000),
        Revenue("Normal", "Tablet", 1500),
        Revenue("Mini", "Tablet", 5500),
        Revenue("Ultra thin", "Cell phone", 5000),
        Revenue("Very thin", "Cell phone", 6000),
        Revenue("Big", "Tablet", 2500),
        Revenue("Bendable", "Cell phone", 3000),
        Revenue("Foldable", "Cell phone", 3000),
        Revenue("Pro", "Tablet", 4500),
        Revenue("Pro2", "Tablet", 6500)
      )
    ).toDF()

    productRevenue.createTempView("productRevenue")
    //每种category里面销量最高的两种product
    spark.sql(
      """
        |select product,category,revenue,rank
        |from (
        |select product,category,revenue,dense_rank() over (partition by category order by revenue desc) as rank
        |from productRevenue) tmp
        |where
        |rank <= 2
      """.stripMargin
    ).show()

    /*
    +----------+----------+-------+----+
    |   product|  category|revenue|rank|
    +----------+----------+-------+----+
    |      Thin|Cell phone|   6000|   1|
    | Very thin|Cell phone|   6000|   1|
    |Ultra thin|Cell phone|   5000|   2|
    |      Pro2|    Tablet|   6500|   1|
    |      Mini|    Tablet|   5500|   2|
    +----------+----------+-------+----+
    */
    println("使用window")
    val w = Window.partitionBy($"category").orderBy($"revenue".desc)

    productRevenue
      .withColumn("rn", row_number.over(w))
      .where($"rn" <= 2)
      .show()
    /*
    +----------+----------+-------+----+
    |   product|  category|revenue|rank|
    +----------+----------+-------+----+
    |      Thin|Cell phone|   6000|   1|
    | Very thin|Cell phone|   6000|   2|
    |      Pro2|    Tablet|   6500|   1|
    |      Mini|    Tablet|   5500|   2|
    +----------+----------+-------+----+
    */

    val w2 = Window.partitionBy('category).orderBy('revenue.desc)
    val w3 = Window.partitionBy('category).orderBy('revenue.desc).rowsBetween(Window.currentRow, Window.unboundedFollowing)

    productRevenue.select(
      'product,
      'category,
      'revenue,
      ('revenue - first('revenue).over(w2)).alias("diff with max"),
      ('revenue - last('revenue).over(w3)).alias("diff with min")
    ).show()


    val df = Seq(
      (1, "b"),
      (1, "b"),
      (3, "b"),

      (1, "a"),
      (1, "a"),
      (2, "a")
    )
      .toDF("id", "category")

    val byCategoryOrderedById = Window
      .partitionBy('category)
      .orderBy('id)
      .rowsBetween(Window.currentRow, 1) //只计算当前行和following 1 line
    //逻辑行 按照行数 找偏移量

    val byCategoryOrderedById2 = Window
      .partitionBy('category)
      .orderBy('id)
      .rangeBetween(Window.currentRow, 1)
    //按照范围找偏移量

    df.withColumn("sum", sum('id) over byCategoryOrderedById)
      .withColumn("sum2", sum('id) over byCategoryOrderedById2)
      .show()

    /*
    +---+--------+---+----+
    | id|category|sum|sum2|     sum      .rangeBetween(start, end)
    +---+--------+---+----+
    |  1|       b|  3|   3| -> 1 + 2    current row value 1, following 2 - 1 = 1 <= end(1) so 1 + 2 = 3
    |  2|       b|  5|   5|    2 + 3                      2            3 - 2 = 1 <= end(1) so 2 + 3 = 5
    |  3|       b|  3|   3|    3                          3                                           3
    |  1|       a|  2|   4|    1 + 1                      1            1 - 1 = 0 <= end(1)  2 - 1 <= end(1) so 1 + 1 + 2 = 4
    |  1|       a|  3|   4|    1 + 2                      1            1 - 1 = 0 <= end(1)  2 - 1 <= end(1) so 1 + 1 + 2 = 4
    |  2|       a|  2|   2|    2                          2                                           2
    +---+--------+---+----+
    */

    val empDF = spark.createDataFrame(Seq(
      (7369, "SMITH", "CLERK", 7902, "17-Dec-80", 800, 20, 10),
      (7499, "ALLEN", "SALESMAN", 7698, "20-Feb-81", 1600, 300, 30),
      (7521, "WARD", "SALESMAN", 7698, "22-Feb-81", 1250, 500, 30),
      (7566, "JONES", "MANAGER", 7839, "2-Apr-81", 2975, 0, 20),
      (7654, "MARTIN", "SALESMAN", 7698, "28-Sep-81", 1250, 1400, 30),
      (7698, "BLAKE", "MANAGER", 7839, "1-May-81", 2850, 0, 30),
      (7782, "CLARK", "MANAGER", 7839, "9-Jun-81", 2450, 0, 10),
      (7788, "SCOTT", "ANALYST", 7566, "19-Apr-87", 3000, 0, 20),
      (7839, "KING", "PRESIDENT", 0, "17-Nov-81", 5000, 0, 10),
      (7844, "TURNER", "SALESMAN", 7698, "8-Sep-81", 1500, 0, 30),
      (7876, "ADAMS", "CLERK", 7788, "23-May-87", 1100, 0, 20)
    )).toDF("empno", "ename", "job", "mgr", "hiredate", "sal", "comm", "deptno")

    println("部门内工资排名")
    val partitionWindow = Window.partitionBy('deptno).orderBy('sal.desc)
    empDF.select(
      'empno,
      'deptno,
      'sal,
      (dense_rank() over partitionWindow).alias("denseRank"),
      (rank() over partitionWindow).alias("rank"),
      (row_number() over partitionWindow).alias("row_number")
    ).show()

    println("部门内工资总和")
    empDF
      .select(
        '*,
        (sum('sal) over partitionWindow).alias("depSum"))
      .show()

    println("部门内工资比你低一位的那个人")
    empDF.select(
      '*,
      lead('sal, 1, 0).over(partitionWindow).alias("next_value")
    ).show()

    println("部门内工资比你高一位的那个人")
    empDF.select(
      '*,
      lag('sal, 1, 0).over(partitionWindow).alias("prev_value")
    ).show()

    println("部门内收入最高的那个人")
    empDF.select(
      '*,
      first('sal).over(partitionWindow).alias("first_val")
    ).show()

    logger.error("部门内收入最低的那个人,使用last 有错误")
    logger.error("\n发生这种情况是因为默认窗口框架是无界前行和当前行之间的范围，\n" +
      "因此除非您更改框架，否则last_value（）永远不会超出当前行。")
    empDF.select(
      '*,
      last('sal).over(partitionWindow).alias("last_val")
    ).show()

    println("修改后的 部门内收入最低的那个人")
    val partitionWindowWithUnboundedFollowing =
      Window
        .partitionBy('deptno)
        .orderBy('sal.desc)
        .rowsBetween(Window.currentRow, Window.unboundedFollowing)
    empDF.select(
      '*,
      last('sal).over(partitionWindowWithUnboundedFollowing).alias("last_val")
    ).show()


    import spark.implicits._
    val data = spark.sparkContext
      .textFile("file:///Users/zhangtong/IdeaProjects/hangy/zhangtong/springcloud/allin/spark/spark-project/src/main/resources/stock.txt")
      .map(x => x.split(","))
      .map(x => (x(0), x(1), x(2).toDouble, x(3).toDouble, x(4).toDouble, x(5).toDouble, x(6).toInt))


    val df2 = data
      .toDF("date", "Ticker", "open", "high", "low", "close", "volume_for_the_day")
    df2.createTempView("stocks")

    val yesterday_price = spark.sql(
      """select
        |Ticker,
        |date,
        |close,
        |lag(close,1) over (partition by ticker order by date ) as yesterday_price
        |from stocks""".stripMargin).show

    val nextday_price = spark.sql(
      """select
        |Ticker,
        |date,
        |close,
        |lead(close,1) over (partition by ticker order by date ) as yesterday_price
        |from stocks""".stripMargin).show

    val first_value = spark
      .sql(
        """select
          |distinct ticker ,
          |date,high,
          |first_value(high) over (partition by ticker order by date) as first_high
          |from stocks""".stripMargin).show

    val last_value = spark
      .sql(
        """select
          |distinct ticker,
          |date,
          |high,
          |last_value(high) over (partition by ticker order by date ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) as last_high
          |from stocks""".stripMargin).show(50)

    val minimum = spark.sql(
      """
        |select distinct ticker,
        |min(close) over (partition by ticker) as lowest,
        |max(close) over (partition by ticker) as largest,
        |count(ticker) over (partition by ticker) as counts,
        |sum(volume_for_the_day) over (partition by ticker) as total_volume
        |from stocks
      """.stripMargin).show(50)
    val running_total = spark.sql(
      """
        |select  ticker,
        |date,
        |volume_for_the_day,
        |sum(volume_for_the_day) over (partition by ticker order by date) as running_total
        |from stocks
      """.stripMargin).show(50)

    val avg = spark.sql(
      """
        |select distinct ticker,
        |avg(volume_for_the_day) over (partition by ticker) as average_volume
        |from stocks
      """.stripMargin).show(50)



    /* Oracle
      select month,
           sum(tot_sales) month_sales,
           sum(sum(tot_sales)) over (order by month
              rows between 1 preceding and unbounded following) all_sales
      from orders
      group by month;

       MONTH    MONTH_SALES ALL_SALES(全年的销量) 此计算结果有误 应为 unbounded preceding
     ---------- ----------- ----------
         1      610697    6307766
         2      428676    6307766
         3      637031    5697069
         4      541146    5268393
         5      592935    4631362
         6      501485    4090216
         7      606914    3497281
         8      460520    2995796

         9      392898    2388882 1 preceding
        10      510117    1928362 current row
        11      532889    1535464 unbounded following
        12      492458    1025347 unbounded following

       1928362 = 392898 + 510117 + 532889 + 492458
       1 preceding 意味着当前行（510117） 前进一行（392898）参与计算，unbounded following 意味着后面的行（532889 492458）全部参与计算
    */
  }
}
