package cn.dmp.test

import cn.dmp.service.FlowDistribution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object DmpMainServiceBak {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      // 设置序列化方式， [rdd] [worker]
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      // 占用空间比较小
      .set("spark.rdd.compress", "true")
    val sc: SparkContext = new SparkContext(conf)

    val sqlContext: SQLContext = new SQLContext(sc)
    val parquet: DataFrame = sqlContext.read.parquet("logFile")
    val logRdd: RDD[Row] = parquet.rdd
    //logRdd.foreach(println)
    val splitRdd: RDD[Array[String]] = logRdd.map(row => {
      row.toString().split(",", -1)
    })
    //过滤
    val filterData: RDD[Array[String]] = splitRdd.filter(t => t.size>= 85)
    filterData.cache()

    //第一问
    //FlowDistribution.accFlowDistribution(filterData, sqlContext)
    FlowDistributionTest.accFlowDistribution(filterData, sqlContext)

   /* // 地域分布

    //REQUESTMODE	PROCESSNODE	ISEFFECTIVE	ISBILLING	ISBID	ISWIN	ADORDERID
    //24,25     8,35,30,31,39,42,2
    val distributionInfo: RDD[(String, String, String, String, String, String, String, String, String)] = filterData.map(info => {
      (info(24), info(25), info(8), info(35), info(30), info(31), info(39), info(42), info(2))
    })
    import sqlContext.implicits._
    val distributionDF: DataFrame = distributionInfo.toDF("provence", "city", "requestmode", "processnode", "iseffective", "isbilling", "isbid", "iswin", "adorderid")
    distributionDF.registerTempTable("t_distribution")
    //原始请求数
    val originNum: DataFrame = sqlContext.sql(
      """
        |select provence,count(1) as originalNum from t_distribution where requestmode=1 and processnode>=1
        |group by provence
      """.stripMargin)
    val oriNum = originNum.toString().toInt
    println(oriNum)
    val groupByProInfo: RDD[(String, Iterable[(String, String, String, String, String, String, String, String, String)])] = distributionInfo.groupBy(t=>t._1)
    groupByProInfo.map(info=>{
      info

    })*/
    sc.stop()
  }
}
