package cn.dmp.service

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object DmpMainServiceV2 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      // 设置序列化方式， [rdd] [worker]
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      // 占用空间比较小
      .set("spark.rdd.compress", "true")
    val sc: SparkContext = new SparkContext(conf)

    val sqlContext: SQLContext = new SQLContext(sc)
    val parquet: DataFrame = sqlContext.read.parquet("logFile")
    val fileInfo: RDD[String] = sc.textFile("H:\\bigdata\\大数据课程\\杨家伟视频\\项目二\\资料PDF\\app_dict.txt")
    val fileMsg: RDD[Array[String]] = fileInfo.map(info => {
      info.split("\t", -1)
    })
    val filterInfo: RDD[Array[String]] = fileMsg.filter(arr=>arr.length>=4)

    val dictInfo: RDD[(String, String)] = filterInfo.map(arr => {
      (arr(4), arr(1))
    })

    val broadInfo: Broadcast[Map[String, String]] = sc.broadcast(dictInfo.collect().toMap)

    //第二问
    FlowDistribution.accFlowDistributions(parquet, sqlContext)

    //第三问
    AreaDistributionV2.getAeraDistribution(parquet,sqlContext,broadInfo)


    sc.stop()
  }
}
