package com.myiptv

import com.lmq.Utils.MyUtils.{toSingleCSV, trancUseSym}
import com.lmq.Utils.{SetAccumulatorV2, SparkSessionSingleton}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.functions._

object extractInfo2 {
  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)
  val spark =  SparkSessionSingleton.getInstance(null)

//  val path1 = "D:\\2019开题\\TGSRec-master\\TGSRec-master\\ML25M\\ml-25m\\movies.csv"
//  val BasePath = "/workspace/ml-25m/"
  val BasePath = "D:\\2019开题\\TGSRec-master\\TGSRec-master\\ML25M\\ml-25m\\"
//  var path1: String = BasePath + "movies.csv"
  var path1: String = BasePath + "mov2.csv"

  def ExtractCateInfo(): Unit = {

    println("Hello, beginning processing...")
    println(spark.sparkContext.appName)


    val read = spark.read.option("header", value = false)
      .option("inferschema", value = false)

    val reads = spark.read.option("header", value = true)
      .option("inferschema", value = false)

    val frame = reads
      .option("delimiter", ",")
      .csv(path1)
      .withColumn("itemId",col("movieId"))
    frame.show(false)
    var flag = true
    if (flag) {


//    D:\javaproject\diginetica_dataset\output\filteredTrainData
    val history1 = spark.sparkContext.textFile(BasePath + "history2.txt")

    //    val stringBroads = spark.sparkContext.broadcast(strings)
    // user defined accumulator of set string.
    val mySetAcc = new SetAccumulatorV2
    val mySetAcc2 = spark.sparkContext.collectionAccumulator[String]("mysecondAcc")
    // register it into spark context
    spark.sparkContext.register(mySetAcc,"mySetAcc")
    history1.foreach(
      (x: String) => {
        val b: Array[String] = x.split(",")
//        for (z <- b.slice(2,b.length)) stringBroads.value += z
//        for (z <- b.slice(2,b.length)) strings2 += z
        for (z <- b.slice(2,b.length)) mySetAcc.add(z)
        for (z <- b.slice(2,b.length)) mySetAcc2.add(z)
//        println("\n================")
      }
    )
//    println(strings)
//    println(strings.size)
//    println(strings2)
//    println(strings2.size)

    println(s"mySetAcc length is ${mySetAcc.value.size} ")
    println(s"mySetAcc   is ${mySetAcc.value} ")
    println(s"mySetAcc2 length is ${mySetAcc2.value.size} ")

    /**
     * Result is as follows:
     * mySetAcc length is 14405
     * mySetAcc2 length is 44183
     *
     * which represents that spark.sparkContext.collectionAccumulator
     * is a list accumulator, not a set accumulator
     */
    val tuples: Array[(String, String)] = mySetAcc.value.toArray[String] zip mySetAcc.value.toArray[String]


    val frame1 = spark.createDataFrame(tuples).select("_1").toDF("itemId")
      .join(
        frame,
        Seq("itemId"),
        "left_outer"
      )
      .select(
        col("itemId"),
        col("genres")

      )
//      .show(false)

//    frame1
//      .show(false)
    println(frame1.count())
////    toSingleCSV(frame1,false,"./output/ItemWithCate")
    toSingleCSV(frame1,false,BasePath + "ItemWithCate")


//    println( frame.count())




    }
    println("Done")

  }
  def main(args: Array[String]): Unit = {

    ExtractCateInfo()

  }

}
