package cn.dmp.service

import cn.dmp.util.Jpools
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import redis.clients.jedis.Jedis

object DmpMainService {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      // 设置序列化方式， [rdd] [worker]
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      // 占用空间比较小
      .set("spark.rdd.compress", "true")
    val sc: SparkContext = new SparkContext(conf)

    val sqlContext: SQLContext = new SQLContext(sc)

    val parquet: DataFrame = sqlContext.read.parquet("logFile")

    val fileInfo: RDD[String] = sc.textFile("H:\\bigdata\\大数据课程\\杨家伟视频\\项目二\\资料PDF\\app_dict.txt")
    val fileMsg: RDD[Array[String]] = fileInfo.map(info => {
      info.split("\t", -1)
    })
    val filterInfo: RDD[Array[String]] = fileMsg.filter(arr => arr.length >= 4)

    val dictInfo: RDD[(String, String)] = filterInfo.map(arr => {
      (arr(4), arr(1))
    })

    //方式一：使用广播变量
    val broadInfo: Broadcast[Map[String, String]] = sc.broadcast(dictInfo.collect().toMap)

    //方式二：存放redis
    dictInfo.foreachPartition(info => {
      val jedis: Jedis = Jpools.getJedis()
      info.foreach(t => {
        jedis.hset("dict", t._1, t._2)
      })
      jedis.close()
    })

    //第二问
    //FlowDistribution.accFlowDistributions(parquet, sqlContext)

    //第三问
    AreaDistribution.getAeraDistributionV2(parquet, sqlContext)


    sc.stop()
  }
}
