package com.mvlens

import com.lmq.Utils.MyUtils.toSingleCSV
import com.lmq.Utils.{SetAccumulatorV2, SparkSessionSingleton }
import com.lmq.Utils.MyUtils.trancUseSym
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.functions._

object extractInfo {
  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)
  val spark =  SparkSessionSingleton.getInstance(null)

  val path1 = "D:\\2019开题\\TGSRec-master\\TGSRec-master\\datasets\\ml-100k\\item.csv"

  def ExtractCateInfo(): Unit = {

    println("Hello, beginning processing...")
    println(spark.sparkContext.appName)


    val read = spark.read.option("header", value = false)
      .option("inferschema", value = false)

    val frame = read
      .option("delimiter", ",")
      .csv(path1)
      .withColumn("itemId",col("_c0"))
    frame.show(false)

//    D:\javaproject\diginetica_dataset\output\filteredTrainData
    val history1 = spark.sparkContext.textFile("D:\\2019开题\\TGSRec-master\\TGSRec-master\\datasets\\ml-100k\\history1.txt")

    //    val stringBroads = spark.sparkContext.broadcast(strings)
    // user defined accumulator of set string.
    val mySetAcc = new SetAccumulatorV2
    val mySetAcc2 = spark.sparkContext.collectionAccumulator[String]("mysecondAcc")
    // register it into spark context
    spark.sparkContext.register(mySetAcc,"mySetAcc")
    history1.foreach(
      (x: String) => {
        val b: Array[String] = x.split(",")
//        for (z <- b.slice(2,b.length)) stringBroads.value += z
//        for (z <- b.slice(2,b.length)) strings2 += z
        for (z <- b.slice(2,b.length)) mySetAcc.add(z)
        for (z <- b.slice(2,b.length)) mySetAcc2.add(z)
//        println("\n================")
      }
    )
//    println(strings)
//    println(strings.size)
//    println(strings2)
//    println(strings2.size)

    println(s"mySetAcc length is ${mySetAcc.value.size} ")
    println(s"mySetAcc   is ${mySetAcc.value} ")
    println(s"mySetAcc2 length is ${mySetAcc2.value.size} ")

    /**
     * Result is as follows:
     * mySetAcc length is 14405
     * mySetAcc2 length is 44183
     *
     * which represents that spark.sparkContext.collectionAccumulator
     * is a list accumulator, not a set accumulator
     */
    val tuples: Array[(String, String)] = mySetAcc.value.toArray[String] zip mySetAcc.value.toArray[String]


    val frame1 = spark.createDataFrame(tuples).select("_1").toDF("itemId")
      .join(
        frame,
        Seq("itemId"),
        "left_outer"
      )
      .select(
        col("itemId"),
        trancUseSym("|")(
        array(
          col("_c5"),
          col("_c6"),
          col("_c7"),
          col("_c8"),
          col("_c9"),
          col("_c10"),
          col("_c11"),
          col("_c12"),
          col("_c13"),
          col("_c14"),
          col("_c15"),
          col("_c16"),
          col("_c17"),
          col("_c18"),
          col("_c19"),
          col("_c20"),
          col("_c21"),
          col("_c22"),
          col("_c23"))
         ).as("cateInfo")

      )

    /**
     * Result is
     * +------+--------+
     *|itemId|cateInfo|
     *+------+--------+
     *|420   |3|4|12  |
     *|249   |5       |
     *|1360  |5       |
     *|879   |1|16|17 |
     *|1286  |5|12|14 |
     *|1068  |8       |
     *|212   |8       |
     *|1402  |8       |
     *|842   |4|5|8   |
     *|13    |5       |
     *|1078  |3|4     |
     *|1031  |2|4     |
     *|397   |1       |
     *|350   |1|13|16 |
     *|1587  |18      |
     *|513   |13|16   |
     *
     */
    frame1
      .show(false)
    println(frame1.count())
//    toSingleCSV(frame1,false,"./output/ItemWithCate")
    toSingleCSV(frame1,false,"D:\\2019开题\\TGSRec-master\\TGSRec-master\\datasets\\ml-100k\\ItemWithCate")


//    println( frame.count())





    println("Done")

  }
  def main(args: Array[String]): Unit = {

    ExtractCateInfo()

  }

}
