package com.lmq

import com.lmq.preprocess.spark
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType, StructField, StructType}

import scala.collection.mutable

/**
 * TODO： main process for item.
 *      Generate item information file with format "itemID,catagory1|catagory2|xxx",
 *      "|" means every kind of catagory value is seprated by "|".
 */
object genAllItemInfo {




  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)

  val spark = SparkSession.builder()
    .master("local[*]")
    .appName("Test")
    .getOrCreate()


  def main(args: Array[String]): Unit = {
    val utils = new utils()
    //csv sechema
    val schema2 = StructType(
      Array(
        StructField("SessionId", IntegerType, nullable = true),
        StructField("ItemString", StringType, nullable = true)
      )
    )
    val schema3 = StructType(
      Array(
        StructField("ItemId", IntegerType, nullable = true)
      )


    )
    val schema = StructType(
      Array(
        StructField("SessionId", IntegerType, nullable = true),
        StructField("TimeStr", StringType, nullable = true),
        StructField("ItemId", IntegerType, nullable = true),
        StructField("Context", StringType, nullable = true)
      )
    )
    val df1: DataFrame = spark.read
      .schema(schema)
      .option("header",value = true)
      .csv("file:///home/iptv/yoochoose/yoochoose-clicks.dat")
    //      .csv("D:\\pythonProject\\pythonProject\\ComparisonWithYoochoose\\src\\yoochoose-clicks.csv")
//    df1.show(false)
//    val df: DataFrame = spark.read
//      .schema(schema)
//      .option("header",value = true)
//      .csv("file:///home/iptv/yoochoose/yoochoose-clicks.dat")
//    //CSV schema
    import spark.implicits._

    val history2: RDD[String] = spark.sparkContext.textFile("file:///home/iptv/yoochoose/OneOsixfour_sessionAddtarg/history2.txt")
//    val history2: RDD[String] = spark.sparkContext.textFile("src/1.txt")
    val p: RDD[Array[String]] = history2.map(_.split(",")).map(x=>x(1).split("\\$"))
//    p.foreach(println)
    val allItemSet: RDD[Row] = p.map(x=>{ x.slice(1,x.length)}).flatMap((x: Array[String]) =>  x ).map(x=>Row(x.toInt)).distinct()
    val df: DataFrame = spark.createDataFrame(allItemSet, schema3)
    df.show(false)
    print("=======================")
    println("all item count is "+df.count())
    println("=======================")
//
val df3 = df.join(df1, Seq("ItemId"), "left")
  .select("ItemId",
    "Context").distinct()
    df3
      .show(false)
    println("now we get =>"+df3.count())
    println("all distinct item is -=>:"+df3.select("ItemId").distinct().count())
    //28361 items
    val df4 = df3.groupBy(col("ItemId"))
      .agg(collect_set(col("Context")).as("Context"))
      .select(
        col("ItemId"),
        utils.toStrFnc("|")(col("Context"))

      )
    df4
      .show(false)

    //save file now
    df4.coalesce(1)
      .write
      .option("header","false")
      .csv("file:///home/iptv/yoochoose/ItemInfor")

  }

}
