package cn.dmp.report

import cn.dmp.utils.AdRptKpi
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

object RptDeviceAnalysis {

    def main(args: Array[String]): Unit = {

        // 检验参数 -> 配置文件
        if (args.length != 5) {
            println(
                """
                  |cn.dmp.report.RptDeviceAnalysis
                  |参数：dataInputPath, outputPath
                """.stripMargin)
            sys.exit()
        }

        val Array(dataInputPath, outputPath1, outputPath2, outputPath3, outputPath4) = args

        // sparkcontext
        val sparkConf = new SparkConf().setMaster("local[*]").setAppName("终端设备")
        val sc = new SparkContext(sparkConf)

        // 读取数据 - parquet
        val sqlc = new SQLContext(sc)
        // 数据文件 - dataframe
        val parquet = sqlc.read.parquet(dataInputPath)


        val baseData = parquet.map(row => {
            val ispname = row.getAs[String]("ispname")
            val nwname = row.getAs[String]("networkmannername")
            val dtype = row.getAs[Int]("devicetype")
            val client = row.getAs[Int]("client")
            (ispname, nwname, dtype, client, AdRptKpi(row)._2)
        }).cache()


        // 运营商
        baseData.map(t => (t._1, t._5))
          .reduceByKey((list1, list2) => list1 zip list2 map (t => t._1 + t._2))
          .saveAsTextFile(outputPath1)

        // 网络类型
        baseData.map(t => (t._2, t._5)).reduceByKey((list1, list2) => list1 zip list2 map (t => t._1 + t._2))
          .saveAsTextFile(outputPath2)

        // 设备类型
        baseData.map(t => (t._3, t._5)).reduceByKey((list1, list2) => list1 zip list2 map (t => t._1 + t._2))
          .saveAsTextFile(outputPath3)

        // 操作系统
        baseData.map(t => (t._4, t._5)).reduceByKey((list1, list2) => list1 zip list2 map (t => t._1 + t._2))
          .saveAsTextFile(outputPath4)


        sc.stop()
    }

}
