package com.mvlens

import com.lmq.Utils.MyUtils.{toSingleCSV, toStrFnc}
import com.lmq.Utils.SparkSessionSingleton
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

/**
 * Transform timestamp to day format, and split the dataset by day format.
 *
 */
object GenSessions {
  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)
  val spark =  SparkSessionSingleton.getInstance(null)

  def splitSessions(): Unit = {
    println(s"Current used spark.version is: ${spark.version}.")

    val read = spark.read.option("header", value = false)
      .option("inferschema", value = false)
//    val frame = read.csv("D:\\2019开题\\TGSRec-master\\TGSRec-master\\datasets\\ml-100k\\user.csv")
    val frame = read.csv("D:\\2019开题\\TGSRec-master\\TGSRec-master\\ML25M\\ml-25m\\ratings.csv")
    frame.printSchema()
    frame.show(false)
    val myTable = frame
      .withColumn("day", substring(from_unixtime(col("_c3")), 1, 10))
      .groupBy(col("day"), col("_c0"))
      .agg(collect_list(col("_c1")).as("itemSeqs"))
      .orderBy("day")
    //      .show(false)
    val toWriteTable = myTable
      .withColumn("len", size(col("itemSeqs")))
      .select(
        col("_c0"),
        col("day"),
        toStrFnc(col("itemSeqs"))
      )
      .filter("len>4")
    //      .show(false)
        toSingleCSV(toWriteTable,false,"D:\\2019开题\\TGSRec-master\\TGSRec-master\\ML25M\\ml-25m\\filteredLen")

    //      .show(false)
  }
  def main(args: Array[String]): Unit = {
    splitSessions()

  }

}
