package com.jnpc.spark
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

object TfIdf {
  case class RawDataRecord( pmgroup: Double, text: String)
  def main(args: Array[String]): Unit = {
    if(args.length !=2) {
      println("Usage: PMWorkGroup <filepath> <threadsCount>  ")
      System.exit(1)
    }
    val Array(filepath,savepath) = args
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("TfIdf").getOrCreate()
    val sc=spark.sparkContext
    sc.setLogLevel("WARN")

    val sqlContext = spark.sqlContext
    import sqlContext.implicits._
    val rawData = spark.sparkContext.textFile(filepath,1)
    val lines =  rawData.mapPartitions( card => card.map(item => item.split("\t")) )
    val pmgroupMap = lines.map(fields => fields(2)).distinct.collect.sorted.zipWithIndex.toMap
    val srcRDD = lines.map {
      fields =>
        val categoryIdxpmgroup = pmgroupMap(fields(2))
        Row(categoryIdxpmgroup, fields(1))
    }
    val splits = srcRDD.randomSplit(Array(0.8, 0.2))
//    val trainingDF = splits(0).toDF()
//    val testDF = splits(1).toDF()


    val schema =
      StructType(
        StructField("label", IntegerType, false) ::
          StructField("seg", StringType, false) :: Nil)

    val sentenceData = sqlContext.createDataFrame(srcRDD, schema).toDF("label", "seg")
    val tokenizer = new Tokenizer().setInputCol("seg").setOutputCol("words")
    val wordsData = tokenizer.transform(sentenceData)

    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(26)
    val featurizedData = hashingTF.transform(wordsData)
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedData)
    val rescaledData = idfModel.transform(featurizedData)
    rescaledData.select("features", "label").take(3).foreach(println)

    rescaledData.select("features", "label").write.format("json").save("file:///C:/files/dev/LTP/data/tfidf.model")
  }
}
