package dmp.tags

import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{SQLContext, SparkSession}

/**
  * author:CN.CDG
  * Date:2019/2/16
  * Time:13:15
  **/
object ContextTags2 {
  def main(args: Array[String]): Unit = {
    if(args.length!=4){
      println(
        """
          |
        """.stripMargin)
      sys.exit()
    }
    val Array(inputPath,dictFilePath,stopFilePath,outputPath)=args
    val conf=new SparkConf()
      .setMaster("local[*]")
      .setAppName("ContextTags")
      .set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    val spark=SparkSession
      .builder()
      .config(conf)
      .getOrCreate()

    val sc=spark.sparkContext
val sqlContext=new SQLContext(sc)
//读取数据
    val dictMap = sc.textFile(dictFilePath)
      .map(line => line.split("\t", -1))
      .filter(_.length>5)
      .map(arr=>(arr(4),arr(1)))
      .collect().toMap
    dictMap


    val stopWordsMap = sc.textFile(dictFilePath).map((_,0)).collect().toMap
    stopWordsMap
    //广播到executer端
    val broadcastAppDict = sc.broadcast(dictMap)
    val broadcastStopWordsDict = sc.broadcast(stopWordsMap)


    //读取日志parquet文件  where中也可以写SQL语句
    //spark.sqlContext.read.parquet(inputPath).where(TagsUtils.hasSomeUserIdConditition).
    //sqlContext.parquetFile(inputPath).where(TagsUtils.hasSomeUserIdConditition).map(s=>{})


    sc.stop()
  }
}
