package cn.dmp.report

import cn.dmp.util.AdApi
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

/**
  * Created by Administrator on 2018/4/23.
  */
object RepDeviceAnalysis {

  def main(args: Array[String]): Unit = {

    if (args.length != 5){
      println(
        """
          |dataInputPath,outputPath1,outputPath2....
          |参数不合法，请输入正确参数个数：dataInputPath
        """.stripMargin)
      sys.exit(1)
    }
    val conf = new SparkConf()
      .setMaster("local[*]").setAppName("RepDeviceAnalysis")
      .set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(conf)  //spark离线job的入口

    //输入参数 （模糊匹配）
    val Array(dataInputPath,outputPath1,outputPath2,outputPath3,outputPath4)=args
    //读parquent文件要用SQLContext
    val sqlContext: SQLContext = new SQLContext(sc)
    val dataFrame = sqlContext.read.parquet(dataInputPath)

    val map1 = dataFrame.map({
      row =>
        val ispname = row.getAs[String]("ispname") //运营商
      val netname = row.getAs[String]("networkmannername") //网络类型
      val devicetype = row.getAs[Int]("devicetype") //设备类型
      val client = row.getAs[Int]("client") //操作系统

        (ispname, netname, devicetype, client, AdApi(row)._2)
    }).cache() //缓存 优化

    map1.map(t=>(t._1,t._5)).reduceByKey((list1,list2)=>
      list1.zip(list2).map(t=>t._1+t._2)).saveAsTextFile(outputPath1)

    map1.map(t=>(t._2,t._5)).reduceByKey((list1,list2)=>
      list1.zip(list2).map(t=>t._1+t._2)).saveAsTextFile(outputPath2)

    map1.map(t=>(t._3,t._5)).reduceByKey((list1,list2)=>
      list1.zip(list2).map(t=>t._1+t._2)).saveAsTextFile(outputPath3)

    map1.map(t=>(t._4,t._5)).reduceByKey((list1,list2)=>
      list1.zip(list2).map(t=>t._1+t._2)).saveAsTextFile(outputPath4)

    sc.stop()
  }
}
