package cn.doitedu.ml.textvec

import java.util

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.Term
import org.apache.spark.ml.feature.{HashingTF, IDF}
import org.apache.spark.sql.{Dataset, SparkSession}

object Text2VectorDemo {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .master("local")
      .appName("语言向量化")
      .getOrCreate()

    import spark.implicits._

    // 加载原始数据
    val sample = spark.read.textFile("userprofile/data/text_vec")


    // spark加载停止词
    val stopwords: Dataset[String] = spark.read.textFile("userprofile/data/comment_sample/stopwords")
    val stopwordsSet = stopwords.collect().toSet
    val bc = spark.sparkContext.broadcast(stopwordsSet)


    // 分词(引入中文分词工具包：庖丁分词，IK分词器，HaNLP） NLP：nature language process 自然语言处理
    val words = sample.map(s=>{

      val stopwds: Set[String] = bc.value


      val split = s.split("\t")
      val id = split(0)

      val terms: util.List[Term] = HanLP.segment(split(1))
      import scala.collection.JavaConversions._

      val words: Array[String] = terms.map(term => term.word).toArray.filter(w => !stopwds.contains(w))
      (id,words)
    }).toDF("id","words")

    words.show(100,false)

    // 将词数组  数据集 ，进行特征向量化，把每一条数据的词数组映射成一个特征向量（TF-IDF作为特征值）
    val hashingTF = new HashingTF()
      .setInputCol("words")
      .setNumFeatures(100000)
      .setOutputCol("tfvec")

    // transform是那种对一行输入就能产生计算结果的算法
    val tfVec = hashingTF.transform(words)
    tfVec.show(100,false)


    val idf = new IDF()
      .setInputCol("tfvec")
      .setOutputCol("tfidfvec")

    // 那种需要进行全局统计计算的逻辑，一般都封装在fit方法中
    val model = idf.fit(tfVec)
    val tfidfvec = model.transform(tfVec)

    tfidfvec.show(10,false)


    val sampleVec = tfidfvec.drop("words","tfvec")
    sampleVec.show(100,false)

    spark.close()
  }

}
