package com.gjy.learning.scala

import org.apache.spark.sql.SparkSession

import scala.language.postfixOps

/** Use Scala (Spark) API to implement the following task.
 * Given the spark Dataframe as:
 * [
 * ('ABC17969(AB)', '1', 'ABC17969', 2022),
 * ('ABC17969(AB)', '2', 'CDC52533', 2022),
 * ('ABC17969(AB)', '3', 'DEC59161', 2023),
 * ('ABC17969(AB)', '4', 'F43874', 2022),
 * ('ABC17969(AB)', '5', 'MY06154', 2021),
 * ('ABC17969(AB)', '6', 'MY4387', 2022),
 *
 * ('AE686(AE)', '7', 'AE686', 2023),
 * ('AE686(AE)', '8', 'BH2740', 2021),
 * ('AE686(AE)', '9', 'EG999', 2021),
 * ('AE686(AE)', '10', 'AE0908', 2021),
 * ('AE686(AE)', '11', 'QA402', 2022),
 * ('AE686(AE)', '12', 'OM691', 2022)
 * ]
 *
 * Schema: peer_id, id_1, id_2, year.
 * Process:
 * 1.For each peer_id, get the year when peer_id contains id_2, for example for ‘ABC17969(AB)’ year is 2022.
 * 2.Given a size number, for example 3.
 * For each peer_id count the number of each year (which is smaller or equal than the year in step1).
 * For example, for ‘ABC17969(AB)’, the count should be:
 * 2022, 4
 * 2021, 1
 * (2023 is bigger than 2022, hence do not include)
 * 3.Order the value in step 2 by year and check if the count number of the first year is bigger or equal than
 * the given size number. If yes, just return the year.
 * If not, plus the count number from the biggest year to next year until the count number is bigger or equal than
 * the given number. For example, for ‘AE686(AE)’, the year is 2023, and count are:
 * 2023, 1
 * 2022, 2,
 * 2021, 3
 * As 1(2023 count) + 2(2022 count) >= 3 (given size number), the output would be 2023, 2022.
 *
 * The final expected output for the given example would be:
 * [
 * ('ABC17969(AB)', 2022),
 * (' AE686(AE)', 2022),
 * (' AE686(AE)', 2023),
 * ]
 *
 *
 * Requirement:
 * 1.Share your implement code;
 * 2.Write unit test;
 * 3.Add screenshot for the run result.
 *
 * Tips:
 * If the given number is 5, dataframe is:
 * [
 * ('AE686(AE)', '7', 'AE686', 2022),
 * ('AE686(AE)', '8', 'BH2740', 2021),
 * ('AE686(AE)', '9', 'EG999', 2021),
 * ('AE686(AE)', '10', 'AE0908', 2023),
 * ('AE686(AE)', '11', 'QA402', 2022),
 * ('AE686(AE)', '12', 'OA691', 2022),
 * ('AE686(AE)', '12', 'OB691', 2022),
 * ('AE686(AE)', '12', 'OC691', 2019),
 * ('AE686(AE)', '12', 'OD691', 2017)
 * ]
 * Then output would be:
 * [
 * ('AE686(AE)',2022),
 * ('AE686(AE)',2021)
 * ]
 *
 * If give number is 7, the output would be:
 * [
 * ('AE686(AE)',2022),
 * ('AE686(AE)',2021),
 * ('AE686(AE)',2019)
 * ]
 */
object TaskOne_SQL {


  def dealWithSizeDF(size: Int, sp: SparkSession): Unit = {

    val data = Seq(
     ("AE686(AE)", "7", "AE686", 2022),
     ("AE686(AE)", "8", "BH2740", 2021),
     ("AE686(AE)", "9", "EG999", 2021),
     ("AE686(AE)", "10", "AE0908", 2023),
     ("AE686(AE)", "11", "QA402", 2022),
     ("AE686(AE)", "12", "OA691", 2022),
     ("AE686(AE)", "12", "OB691", 2022),
     ("AE686(AE)", "12", "OC691", 2019),
     ("AE686(AE)", "12", "OD691", 2017)
    )
    // Define the schema

    val schema = List(
      "peer_id", "id_1", "id_2", "year"
    )

    // Create the DataFrame
    val df = sp.createDataFrame(data).toDF(schema: _*)
    df.show()
    df.createTempView("df")
    //For each peer_id, get the year when peer_id contains id_2
    val step1Result = sp.sql("SELECT peer_id, year FROM df WHERE peer_id LIKE '%'||id_2||'%'")
    step1Result.createTempView("step1Result")
    /* 2.Given a size number, for example 3.
    * For each peer_id count the number of each year (which is smaller or equal than the year in step1).
    * For example, for ‘ABC17969(AB)’, the count should be:
    * 2022, 4
    * 2021, 1
    * (2023 is bigger than 2022, hence do not include)
    */

    val step2Result = sp.sql("" +
      "SELECT t.peer_id,t.year,t.count " +
      "  FROM " +
      "    (SELECT df.peer_id, df.year,count(1) count,MIN(S.year) min_year " +
      "       FROM df " +
      "       JOIN step1Result S" +
      "         ON df.peer_id = S.peer_id" +
      "   GROUP BY df.peer_id, df.year) t" +
      " WHERE t.year <= t.min_year")

    step2Result.show()
    step2Result.createTempView("step2Result")
    /** 3.Order the value in step 2 by year and check if the count number of the first year is bigger or equal than
     * the given size number. If yes, just return the year.
     * If not, plus the count number from the biggest year to next year until the count number is bigger or equal than
     * the given number. For example, for ‘AE686(AE)’, the year is 2023, and count are:
     */


    val results = sp.sql(
      "            SELECT peer_id, year " +
        "            FROM (" +
        "             SELECT peer_id,year,count," +
        "                    SUM(count) OVER(PARTITION BY peer_id ORDER BY year DESC) cumulative_count," +
        "                    RANK() OVER(PARTITION BY peer_id ORDER BY year DESC) cumulative_rn" +
        "               FROM step2Result )" +
        "           WHERE (cumulative_rn = 1 AND count >= " + size + ") " +
        "              OR (cumulative_count <= " + size + ") " +
        "              OR (cumulative_count - count < " + size + ")" +
        "           ORDER BY peer_id,year DESC")
    results.show()
  }

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("TaskOne")
      .master("local[*]")
      .getOrCreate()
    dealWithSizeDF(7, spark)
    spark.stop()
  }
}
