package com.lmq

import com.lmq.Utils.MyUtils._
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{StringType, StructField, StructType}


object statistics {
  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)


  val spark: SparkSession = SparkSession.builder()
    .master("local[*]")
    .appName(this.getClass.getName)
    .getOrCreate()

  val path1 = "./data/train-item-views.csv"
  val path2 = "./data/train-purchases.csv"
  val path3 = "./output/filteredLen"
  val schema =  StructType(
    Array(
      StructField("session_id", StringType,nullable = true),
      StructField("user_id", StringType, nullable = true),
      StructField("item_id", StringType, nullable = true),
      StructField("timeframe", StringType, nullable = true),
      StructField("eventdate", StringType, nullable = true)
    )
  )
  val schema2 =  StructType(
    Array(
      StructField("sessionId", StringType,nullable = true),
      StructField("userId", StringType, nullable = true),
      StructField("timeframe", StringType, nullable = true),
      StructField("eventdate", StringType, nullable = true),
      StructField("ordernumber", StringType, nullable = true),
      StructField("itemId", StringType, nullable = true)
    )
  )
  /**
   * root
   *   |-- session_id: string (nullable = true)
   *   |-- itemId: string (nullable = true)
   *   |-- userId: string (nullable = true)
   *   |-- timeframe: string (nullable = true)
   *   |-- eventdate: string (nullable = true)
   */

  val schema3 =  StructType(
    Array(
      StructField("session_id", StringType,nullable = true),
      StructField("itemId", StringType, nullable = true),
      StructField("userId", StringType, nullable = true),
      StructField("timeframe", StringType, nullable = true),
      StructField("eventdate", StringType, nullable = true)
    )
  )

  val schema4 =  StructType(
    Array(
      StructField("session_id", StringType,nullable = true),
      StructField("item_id", StringType, nullable = true),
      StructField("user_id", StringType, nullable = true),
      StructField("timeframe", StringType, nullable = true),
      StructField("eventdate", StringType, nullable = true)
    )
  )


  def UnionDataset():Unit = {
    val read = spark.read.option("header", value = true)
      .option("inferschema", value = true)

    val frame1 = read
      .option("header", value = true)
      .option("delimiter", ";")
      .schema(schema)
      .csv(path1)
    frame1.show(20, truncate = false)

    val frame2 = frame1.groupBy(col("session_id"))
      .agg(collect_list(col("item_id")).as("seqItems"))
    frame2.show(20,false)

    val frame3 =
      frame2.join(frame1, Seq("session_id"), "left")
      .select(
        "session_id",
        "seqItems",
        "user_id",
        "timeframe",
        "eventdate"
      )

      /**
       *        root
       *
       *        |-- session_id: string (nullable = true)
       *
       *        |-- seqItems: array (nullable = true)
       *
       *        |    |-- element: string (containsNull = true)
       *
       *        |-- user_id: string (nullable = true)
       *
       *        |-- timeframe: string (nullable = true)
       *
       *        |-- eventdate: string (nullable = true)
       *
       */
//      .printSchema()
//      .show(20,false)



    val frame4 = read
    .option("header", value = true)
    .option("delimiter", ";")
    .schema(schema2)
    .csv(path2)
    .toDF("session_id", "userId","timeframe","eventdate","ordernumber","itemId")

    frame4.show(20, truncate = false)
    import scala.collection.mutable

    val concatedFrame = frame2.join(frame4,
      Seq("session_id"),
      "inner")
      .select(
        col("session_id"),
        col("userId"),
        combTolist(col("seqItems"), col("itemId")).as("trainSeqs"),
        col("eventdate"),
        col("timeframe")
      )
      .withColumn("Longtimestamp", unix_timestamp(to_timestamp(col("eventdate"))))
      .orderBy(
        "Longtimestamp",
        "timeframe"
      )

//      .printSchema()
//      .show(20,false)
    println("all lines are " +  concatedFrame.count()
    + " lines. ")

    val units = concatedFrame.filter(
      """
        |userId!='NA'
        |""".stripMargin)
//      .show(false)
val outputFrame = concatedFrame.filter(
  """
    |userId!='NA'
    |""".stripMargin)
  .select(
    col("userId"),
//    col("session_id"),
//    combTimeList(col("eventdate"), col("timeframe")),
    col("eventdate"),
    toStrFnc(col("trainSeqs"))

  )
//      .show(false)

    println("After filtering, we have " + units.count())
    toSingleCSV(outputFrame,false,"./output/TrainData")


  }


  def goWithFilterData(): Unit = {

    val read = spark.read

    val frame1 = read
      .option("header", value = false)
      .option("delimiter", ",")
      .schema(schema4)
      .csv(path3)
    frame1.show(20, truncate = false)


    val frame2 = frame1.groupBy(col("session_id"))
      .agg(collect_list(col("item_id")).as("seqItems"))
    frame2.show(20,false)

    val frame3 =
      frame2.join(frame1, Seq("session_id"), "left")
        .select(
          "session_id",
          "seqItems",
          "user_id",
          "timeframe",
          "eventdate"
        )

    /**
     *        root
     *
     *        |-- session_id: string (nullable = true)
     *
     *        |-- seqItems: array (nullable = true)
     *
     *        |    |-- element: string (containsNull = true)
     *
     *        |-- user_id: string (nullable = true)
     *
     *        |-- timeframe: string (nullable = true)
     *
     *        |-- eventdate: string (nullable = true)
     *
     */
    //      .printSchema()
    //      .show(20,false)



    val frame4 = read
      .option("header", value = true)
      .option("delimiter", ";")
      .schema(schema2)
      .csv(path2)
      .toDF("session_id", "userId","timeframe","eventdate","ordernumber","itemId")

    frame4.show(20, truncate = false)
    import scala.collection.mutable

    val concatedFrame = frame2.join(frame4,
      Seq("session_id"),
      "inner")
      .select(
        col("session_id"),
        col("userId"),
        combTolist(col("seqItems"), col("itemId")).as("trainSeqs"),
        col("eventdate"),
        col("timeframe")
      )
      .withColumn("Longtimestamp", unix_timestamp(to_timestamp(col("eventdate"))))
      .orderBy(
        "Longtimestamp",
        "timeframe"
      )

    //      .printSchema()
    //      .show(20,false)
    println("all lines are " +  concatedFrame.count()
      + " lines. ")

    val units = concatedFrame.filter(
      """
        |userId!='NA'
        |""".stripMargin)
    //      .show(false)
    val outputFrame = concatedFrame.filter(
      """
        |userId!='NA'
        |""".stripMargin)
      .select(
        col("userId"),
        //    col("session_id"),
        //    combTimeList(col("eventdate"), col("timeframe")),
        col("eventdate"),
        toStrFnc(col("trainSeqs"))

      )
    //      .show(false)

    println("After filtering, we have " + units.count())
    toSingleCSV(outputFrame,false,"./output/filteredTrainData")






  }

  def main(args: Array[String]): Unit = {

//    UnionDataset()
    goWithFilterData()


  }

}
