package com.hliushi.spark.exmaple

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{DateType, StringType, StructField, StructType}
import org.junit.Test

/**
 * descriptions:
 *
 * author: Hliushi
 * date: 2021/5/30 8:40
 */
class Interview {

  val spark: SparkSession = SparkSession.builder()
    .appName("interview_problem")
    .master("local[6]")
    .getOrCreate()

  spark.sparkContext.setLogLevel("WARN")

  import spark.implicits._


  /** 原始数据
   * .    2017-01-01  a
   * .    2017-01-01  b
   * .    2017-01-01  c
   * .    2017-01-02  a
   * .    2017-01-02  b
   * .    2017-01-02  d
   * .    2017-01-03  b
   * .    2017-01-03  e
   * .    2017-01-03  f
   *
   * 需求:根据数据可以看出我们要求的记过为
   * 2017-01-01 新增三个用户（a,b,c）
   * 2017-01-02 新增一个用户（d）
   * 2017-01-03 新增两个用户（e，f）
   */
  @Test
  def test01(): Unit = {

    val schema = StructType(
      List(
        StructField("date", DateType),
        StructField("user", StringType)
      )
    )
    val dataFrame = spark.read
      .schema(schema)
      .csv("dataset/interview_data/test01.csv")

    //dataFrame.show()

    // 1解题思路: 对原始数据进行倒排索引
    //val df1 = dataFrame.map((row: Row) => row.getString(1) -> row.getString(0))

    dataFrame.groupBy($"user")
    //.min("date")
    //.show()
  }

  @Test
  def test01_2(): Unit = {
    val source = spark.sparkContext.textFile("dataset/interview_data/test01.csv")

    val rdd1 = source.map((str: String) => {
      val splitArr = str.split(",")
      splitArr(1) -> splitArr(0)
    })
      .groupByKey()
      .map((kv: (String, Iterable[String])) => kv._2.min -> 1)
      .countByKey()
      .toList.toDF("date", "count")
      .sort($"date")


    rdd1.show()
    //  +----------+-----+
    //  |      date|count|
    //  +----------+-----+
    //  |2017-01-01|    3|
    //  |2017-01-02|    1|
    //  |2017-01-03|    2|
    //  +----------+-----+


    //  (a,2017-01-01)
    //  (b,2017-01-01)
    //  (d,2017-01-02)
    //  (b,2017-01-03)
    //  (c,2017-01-01)
    //  (e,2017-01-03)
    //  (a,2017-01-02)
    //  (f,2017-01-03)
    //  (b,2017-01-02)

    //  (d,CompactBuffer(2017-01-02))
    //  (e,CompactBuffer(2017-01-03))
    //  (b,CompactBuffer(2017-01-01, 2017-01-02, 2017-01-03))
    //  (a,CompactBuffer(2017-01-01, 2017-01-02))
    //  (f,CompactBuffer(2017-01-03))
    //  (c,CompactBuffer(2017-01-01))

    //  (e,(2017-01-03,1))
    //  (d,(2017-01-02,1))
    //  (a,(2017-01-01,1))
    //  (b,(2017-01-01,1))
    //  (c,(2017-01-01,1))
    //  (f,(2017-01-03,1))

    //rdd1.foreach(println(_))
  }

  @Test
  def sortTest(): Unit = {
    val source = spark.sparkContext.textFile("dataset/interview_data/test01.csv")

    val rdd1 = source.map((str: String) => str.split(",")(0))

    rdd1.sortBy((str: String) => str)
      .collect()
      .foreach(println(_))
  }
}