package com.lmq

//import com.lmq.Utils.MyUtils.{combTimeList, combTolist, toSingleCSV, toStrFnc}
import com.lmq.Utils.MyUtils._
import com.lmq.Utils.SparkSessionSingleton
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, collect_list, to_timestamp, unix_timestamp}
import org.apache.spark.sql.types.{StringType, StructField, StructType}


/**
 * 第2步 预处理之过滤:
 * 删除在整个数据集中出现次数少于5次的ItemId，删除只包含一个ItemId的会话
 *
 */
object filterLen {
  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)


  val spark: SparkSession = SparkSession.builder()
    .master("local[*]")
    .appName(this.getClass.getName)
    .getOrCreate()

  val path1 = "./data/train-item-views.csv"
  val path2 = "./data/train-purchases.csv"
  val path3 = "./output/filteredLen"

  val schema =  StructType(
    Array(
      StructField("session_id", StringType,nullable = true),
      StructField("user_id", StringType, nullable = true),
      StructField("item_id", StringType, nullable = true),
      StructField("timeframe", StringType, nullable = true),
      StructField("eventdate", StringType, nullable = true)
    )
  )
  val schema2 =  StructType(
    Array(
      StructField("sessionId", StringType,nullable = true),
      StructField("userId", StringType, nullable = true),
      StructField("timeframe", StringType, nullable = true),
      StructField("eventdate", StringType, nullable = true),
      StructField("ordernumber", StringType, nullable = true),
      StructField("itemId", StringType, nullable = true)
    )
  )


  def UnionDataset():Unit = {
    val read = spark.read.option("header", value = true)
      .option("inferschema", value = true)

    val frame1 = read
      .option("header", value = true)
      .option("delimiter", ";")
      .schema(schema)
      .csv(path1)
    frame1.show(20, truncate = false)

    val frame2 = frame1.groupBy(col("session_id"))
      .agg(collect_list(col("item_id")).as("seqItems"))
    frame2.show(20,false)

    val frame3 =
      frame2.join(frame1, Seq("session_id"), "left")
        .select(
          "session_id",
          "seqItems",
          "user_id",
          "timeframe",
          "eventdate"
        )

    /**
     *        root
     *
     *        |-- session_id: string (nullable = true)
     *
     *        |-- seqItems: array (nullable = true)
     *
     *        |    |-- element: string (containsNull = true)
     *
     *        |-- user_id: string (nullable = true)
     *
     *        |-- timeframe: string (nullable = true)
     *
     *        |-- eventdate: string (nullable = true)
     *
     */
    //      .printSchema()
    //      .show(20,false)



    val frame4 = read
      .option("header", value = true)
      .option("delimiter", ";")
      .schema(schema2)
      .csv(path2)
      .toDF("session_id", "userId","timeframe","eventdate","ordernumber","itemId")

    frame4.show(20, truncate = false)
    import scala.collection.mutable

    frame4.createTempView("v")

    spark.udf.register("combTimeList",combTimeList)
    spark.sql(
      """select w.session_id,w.itemId,w.userId,w.timeframe,w.eventdate,
        |rank() over (partition by session_id order by combTimeList(eventdate,timeframe)) as TimeRank
        |from
        |( select v.* from
        |     (select ItemId from
        |           ( select itemId, count(1) as cnt
        |               from v
        |                 group by ItemId
        |            ) A where cnt>=1
        |      ) B
        |       left join v
        |       on B.itemId=v.itemId
        |) w
        |""".stripMargin).createTempView("xtable")
    //        .show(false)
    spark.sql(
      """
        |select xtable.session_id,xtable.itemId,xtable.userId,xtable.timeframe,xtable.eventdate
        |from
        |( select session_id from
        |       (select session_id,sum(1) as cnts from xtable group by session_id) groupedtable where cnts>4
        | ) ltable
        |left join xtable
        |on ltable.session_id=xtable.session_id
        |""".stripMargin)
      .createTempView("unsortTable")
    spark.sql(
      """
        |select * from unsortTable limit 10
        |""".stripMargin
    )
    spark.sql(
      """
        |select * from unsortTable
        |order by combTimeList(eventdate,timeframe)
        |""".stripMargin)
//      .printSchema()
      /**
       * root
       *   |-- session_id: string (nullable = true)
       *   |-- itemId: string (nullable = true)
       *   |-- userId: string (nullable = true)
       *   |-- timeframe: string (nullable = true)
       *   |-- eventdate: string (nullable = true)
       */
      .coalesce(1).write
      .option("header","false")

      .csv(path3)
    println("Done.")
    //        .show(1000,false)

//    val concatedFrame = frame2.join(frame4,
//      Seq("session_id"),
//      "inner")
//      .select(
//        col("session_id"),
//        col("userId"),
//        combTolist(col("seqItems"), col("itemId")).as("trainSeqs"),
//        col("eventdate"),
//        col("timeframe")
//      )
//      .withColumn("Longtimestamp", unix_timestamp(to_timestamp(col("eventdate"))))
//      .orderBy(
//        "Longtimestamp",
//        "timeframe"
//      )

//    //      .printSchema()
//    //      .show(20,false)
//    println("all lines are " +  concatedFrame.count()
//      + " lines. ")
//
//    val units = concatedFrame.filter(
//      """
//        |userId!='NA'
//        |""".stripMargin)
//    //      .show(false)
//    val outputFrame = concatedFrame.filter(
//      """
//        |userId!='NA'
//        |""".stripMargin)
//      .select(
//        col("userId"),
//        //    col("session_id"),
//        //    combTimeList(col("eventdate"), col("timeframe")),
//        col("eventdate"),
//        toStrFnc(col("trainSeqs"))
//
//      )
//    //      .show(false)
//
//    println("After filtering, we have " + units.count())
//    toSingleCSV(outputFrame,false,"./output/TrainData")


  }





  def main(args: Array[String]): Unit = {
    UnionDataset()
  }

}
