package com.lmq

import com.lmq.GenitemInfo.spark
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.{col, collect_list, udf}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, SparkSession}

import java.util
import scala.collection.mutable

/**
 * TODO: NOT USED.
 *     Generate session with format : sessionid,
 */
object sessionwithidCombinTarg {


  var global_cnt = 1
  //  var cur_sessionId = -1
  val AssigneUid: UserDefinedFunction = udf((x: String) => {
    //    if (cur_sessionId == -1) cur_sessionId = x.toInt
    //    if (cur_sessionId != x.toInt){
    if (math.random > 0.5) {
      //        val value = global_cnt.toString.toInt
      global_cnt = global_cnt + 1
      global_cnt
    } else global_cnt

  })

  def genRandomUserId(): util.ArrayList[Int] = {
    val lines = 27947
    val avglen = 6.6
    val userNumber = (1.88 * lines / avglen).toInt
    var x: util.ArrayList[Int] = new util.ArrayList[Int]()
    var cnt = 0
    for (i <- 1 to userNumber) {
      cnt = cnt + 1
      if (math.random > 0.5)
        x.add(cnt)
    }
    x
  }

  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)

  val spark = SparkSession.builder()
    .master("local[*]")
    .appName("Test")
    .getOrCreate()


  def main(args: Array[String]): Unit = {
    val utils = new utils()
    //CSV schema
    val schema = StructType(
      Array(
        StructField("SessionId", IntegerType, nullable = true),
        //        StructField("TimeStr", StringType, nullable = true),
        StructField("ItemId", IntegerType, nullable = true),
        StructField("Context", StringType, nullable = true),
        StructField("Time", DoubleType, nullable = false)
        //        StructField("TimeRank",IntegerType, nullable = true)
      )
    )
    //    read CSV
    val v: DataFrame = spark.read
      .schema(schema)
      //      .option("header",value = false)
      //      .option("header",value = true)

      //      .csv("D:\\pythonProject\\pythonProject\\ComparisonWithYoochoose\\src\\head1.csv")
      //            .csv("file:///home/iptv/yoochoose/Reslt.csv/1over64.csv")
      .csv("file:///home/iptv/yoochoose/OneOsixfour_v2/part-00000-a24d0f25-8b8d-4e48-bacc-e60f0343ed96-c000.csv")
    //            .csv("D:\\pythonProject\\pythonProject\\ComparisonWithYoochoose\\src\\head1.csv")
    //      df.show(false)

    //      val timeTransfer = udf((x:String)=>  utils.timeStr2Tsp(x) )
    //      df.select(
    //        col("SessionId"),
    //        col("TimeStr"),
    //        col("ItemId"),
    //        col("Context"),
    //        timeTransfer( col("TimeStr") ).alias("Time")
    //      )
    //        .createOrReplaceTempView("allData")
    //
    //      val v: DataFrame = spark.sql(""" select * ,rank() over (partition by SessionId order by Time) as
    //    `TimeRank` from allData""")
    v.printSchema()


    println("=====================================")
    v.show(false)

    def sortwithPositionElement(Pt: Int) = udf((x: mutable.WrappedArray[Double]) => x(Pt))
    //      println(v.count())
    v.createTempView("v")

    val wx = v
      .groupBy(col("SessionId"))
      .agg(collect_list("ItemId")
      .as("ItemIds"), collect_list("Time").as("Time"))

    val schema3 = StructType(
      //Session ID, Timestamp, Item ID, Price, Quantity
      Array(
        StructField("SessionId", IntegerType, nullable = true),
        //        StructField("TimeStr", StringType, nullable = true),
        StructField("Timestamp", StringType, nullable = true),
        StructField("ItemId", StringType, nullable = true),
        StructField("Price",DoubleType, nullable = false),
        StructField("quantity",DoubleType, nullable = false)
        //        StructField("TimeRank",IntegerType, nullable = true)
      )
    )
   def toStrr =udf((x:Int)=> x.toString)
    def combinStr : UserDefinedFunction  = udf((x: mutable.WrappedArray[Int]) => x.mkString(",")+","  )
//    def combinStr : UserDefinedFunction  = udf((x: mutable.WrappedArray[Int]) => x.mkString(",")+","  )
    // buy data vd
    val vd:DataFrame = spark.read
      .schema(schema3)
      .csv("file:///home/iptv/yoochoose/yoochoose-buys.dat")
//    val w =
      wx.join(vd,  Seq("SessionId"), "inner")
        .select(
          col("sessionId"),
          combinStr( col("ItemIds")).as("itemIds"),
          col("ItemId"),
          col("Time")

        ).createTempView("TMPss")
    val w = spark.sql(
      """
        |select SessionId,  CONCAT(itemIds,ItemId) as ItemId,Time
        |from TMPss
        |""".stripMargin)
//      .show(false)
//      wx.join(vd)
//      .show(false)
    //    w.printSchema()
    val toStrFnc = udf((x: mutable.WrappedArray[AnyVal]) => x.mkString(","))
    val mymin = udf((x: mutable.WrappedArray[Double]) => x.min)
    val tsp2dateFnc = udf((x:Double)=>utils.Tsp2Time(x))
    println("if we get to the ==>w.show,146")
    w.show(100, truncate = false)
    w
      .
      orderBy(mymin(col("Time")))
      .select(
        // we assignUid with udf AssignedUid
        AssigneUid(col("SessionId")),
        tsp2dateFnc(mymin(col("Time"))),
        col("ItemId")
//        col("SessionId")
//        toStrFnc(col("ItemId"))

        //        mymin(col("Time"))
        //        col("Time"),
        //        sort_array(col("Time"))


      )
//      .show(false)
      .coalesce(1).write
      .option("header", "false")
      .csv("file:///home/iptv/yoochoose/OneOsixfour_sessionsRevisevTmpFinal")
    println("Done.")
    //              .show(100,false)


    //        .count())
    //        .show(1000,truncate = false)
    //      println(
    // Random generated userid
    //
    //    spark.sql(
    //      """select v.* from
    //        |(select ItemId from
    //        |( select ItemId, count(1) as cnt
    //        |from v
    //        |group by ItemId) A where cnt>=5) B
    //        |left join v
    //        |on B.ItemId=v.ItemId
    //        |""".stripMargin)
    //    //        .count())
    //    //        .show(1000,truncate = false)
    //
    //    //      val xtable =
    //    spark.sql(
    //      """select w.SessionId,w.ItemId,w.Context,w.Time,
    //        |rank() over (partition by SessionId order by Time) as TimeRank
    //        |from
    //        |(select v.* from
    //        |(select ItemId from
    //        |( select ItemId, count(1) as cnt
    //        |from v
    //        |group by ItemId) A where cnt>=5) B
    //        |left join v
    //        |on B.ItemId=v.ItemId) w
    //        |""".stripMargin).createTempView("xtable")
    //    //        .show(false)
    //    spark.sql(
    //      """
    //        |select xtable.SessionId,xtable.ItemId,xtable.Context,xtable.Time
    //        |from
    //        |(select SessionId from
    //        |(select SessionId,sum(1) as cnts from xtable group by SessionId) groupedtable where cnts>1
    //        |) ltable
    //        |left join xtable
    //        |on ltable.SessionId=xtable.SessionId
    //        |""".stripMargin).coalesce(1).write
    //      .option("header","true")
    //      .csv("file:///home/iptv/yoochoose/OneOsixfour")
    //    println("Done.")
    //        .show(1000,false)


    // remove sessions which the length is less than 1 and apearance is less than 5.

    //    v.coalesce(1).write
    //      .option("header","true")
    //      .csv("file:///home/iptv/yoochoose/Reslt.csv")

    //      .toPandas()\
    //    .to_csv("helloResult.csv",index=None)


  }

}
