package offline

import com.huaban.analysis.jieba.{JiebaSegmenter, SegToken}
import com.huaban.analysis.jieba.JiebaSegmenter.SegMode
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

object JieBa {
  def main(args: Array[String]): Unit = {
    //结巴分词类的序列化
    println("start=========")
    val conf = new SparkConf()
      .registerKryoClasses(Array(classOf[JiebaSegmenter]))
    val spark = SparkSession
      .builder()
      .master("local[2]")
      .appName("Jieba")
      .config(conf)
      .enableHiveSupport()
      .getOrCreate()
    val df = spark.sql("select * from default.news_seg")

    //jieba分词对Sentence进行分词i
    val segmenter = new JiebaSegmenter()
    //将对于的jieba类创建broadcast
    val broadcastSeg = spark.sparkContext.broadcast(segmenter)
    val jiebaUdf = udf { (sentence: String) =>
      val exeSegmenter = broadcastSeg.value
      exeSegmenter.process(sentence.toString, SegMode.INDEX)
        .toArray().map(_.asInstanceOf[SegToken].word)
        .filter(_.length > 1).mkString("/")
    }
    val df_seg = df.withColumn("seg", jiebaUdf(col("sentence")))
    df_seg.show(50)

    print("end==== ")
  }
}
