package com.cxk.ml

import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.feature.{HashingTF, IDF, StopWordsRemover, Tokenizer, Word2Vec}
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable

object TD_IDF {

  def apply(spark: SparkSession, label: String, sentence: String): TD_IDF = new TD_IDF(spark, label, sentence)

  private[this] class TokenAnalyzer() extends Tokenizer {

    override protected def createTransformFunc: String => Seq[String] = (sentence: String) => {
      val analyzer = new SmartChineseAnalyzer()

      val data = mutable.Set[String]()
      val tokenizer = analyzer.tokenStream("content", sentence)

      tokenizer.reset()
      val cta = tokenizer.getAttribute(classOf[CharTermAttribute])
      while (tokenizer.incrementToken()) {
        data.add(cta.toString)
      }

      tokenizer.end()
      tokenizer.close()
      analyzer.close()

      data.toSeq
    }
  }

  class TD_IDF(val spark: SparkSession, val label: String, val sentence: String) {

    def transform(df: DataFrame): DataFrame = {
      //进行分词
      val tokenizer = new TokenAnalyzer().setInputCol(sentence).setOutputCol("raw")
      val rawData = tokenizer.transform(df)

      //去掉停用词
      val remover = new StopWordsRemover().setInputCol("raw").setOutputCol("words")
      val words = remover.transform(rawData)

      //把对文本内容处理简化为K维向量空间的向量运算，而向量空间的相似度表示文本寓意相似度
      val word2vec = new Word2Vec().setInputCol("words").setOutputCol("result").setVectorSize(3).setMinCount(1)
      val model = word2vec.fit(words)
      model.transform(words)
    }
  }

}
