package com.yanduo.report

import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 使用spark 算子实现 -统计省市数据分布
  * 课程地址：https://www.bilibili.com/video/BV1F4411i7jK?p=16
  * @author Gerry chan
  */
object ProCityRptV3 {
  def main(args: Array[String]): Unit = {
    //0 校验参数个数
    if (args.length != 1) {
      println(
        """
          |cn.dmp.report.ProCityRptV3
          |参数：
          | logInputPath
          | resultOutputPath
        """.stripMargin)
      sys.exit()
    }

    // 1 接收程序参数
    val Array(logInputPath, resultOutputPath) = args
    //2 创建sparkConf --> sparkContext
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")

    //RDD 序列化到磁盘，worker 与worker 之前的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    //读取数进行统计 -1 代表切割到行尾
    sc.textFile(logInputPath).map(line => line.split(",", -1))
      .filter(_.length >= 85)
      //该步都得到的是数组，转化成：((河北省，石家庄), 1)
      .map(arr => ((arr(24),arr(25)), 1))
      // 转换成：河北省，石家庄，20
      .reduceByKey(_ + _).map(t => t._1._1+","+t._1._2 + "," + t._2)
      .saveAsTextFile(resultOutputPath)



  }
}
