package bayes

import java.util

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

/**
  * Created by hunter.coder 涛哥  
  * 2019/5/6 11:14
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description:
  **/
object DataProcess {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val spark = SparkSession.builder().appName("bayes_dp").master("local").getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    import scala.collection.JavaConversions._

    val generalDs = spark.read.textFile("G:\\testdata\\comment\\general")
    val goodDs = spark.read.textFile("G:\\testdata\\comment\\good")
    val poorDs = spark.read.textFile("G:\\testdata\\comment\\poor")

    val stopwordsList = spark.read.textFile("G:\\testdata\\stopwords").collectAsList()
    // 广播停用词典
    val stopwords = spark.sparkContext.broadcast(stopwordsList)

    val labledPoor = poorDs.mapPartitions(iter=>{
      val stList = stopwords.value
      iter.map(line=> {
        val terms = HanLP.segment(line).map(term => term.word).--(stList)
          .mkString(" ")
        "0" + "\001" + terms
      }
      )

    })



    val labledGeneral = generalDs.mapPartitions(iter=>{
      val stList = stopwords.value
      iter.map(line=> {
        val terms = HanLP.segment(line).map(term => term.word).--(stList)
          .mkString(" ")
        "1" + "\001" + terms
      }
      )

    })



    val labledGood = goodDs.mapPartitions(iter=>{
      val stList = stopwords.value
      iter.map(line=> {
        val terms = HanLP.segment(line).map(term => term.word).--(stList)
          .mkString(" ")
        "2" + "\001" + terms
      }
      )

    })

    labledPoor.union(labledGeneral).union(labledGood)
      .write.text("G:\\testdata\\comment\\all\\")

    spark.close()
  }


}
