package com.chenjj.bigdata.spark.scala.sparkdemo1

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.slf4j.{Logger, LoggerFactory}

class DataProcess {

}


object  DataProcess{
  val logger = LoggerFactory.getLogger(DataProcess.getClass)

  def hello(): Unit ={
    logger.info("this is hello")
  }


  /**
    *
    *
    * @param sqlContext
    * @param elem
    */
  def processLine(sqlContext:SQLContext,elem:RDD[String])={
    if(!elem.isEmpty()){

      //把json格式的RDD加载到sqlContext，注册成sqlContext的临时表。
      import sqlContext.implicits._
      val sqlRDD = sqlContext.read.json(elem.toDS())
      sqlRDD.printSchema()
      sqlRDD.createOrReplaceTempView("SparkDemo_Table")


      val reqDataFrame = sqlContext.sql("select appid,service,count(0) from SparkDemo_Table group by appid,service").collect()
      for(row <- reqDataFrame){
          val appid = row.getString(0)
          val service = row.getString(1)
          val count = row.getString(2)
        printf("(%s,%s)->%d\n",appid,service,count)
      }

      val perfDataFrame = sqlContext.sql("select service,min(requestTime),max(requestTime),avg(requestTime) from SparkDemo_Table group by service").collect()
      for (row <- perfDataFrame){
        val service = row.getString(0)
        val max = row.getLong(1)
        val min = row.getLong(2)
        val avg = row.getDouble(3)
        printf("%s ->(%d,%d,%f)",service,max,min,avg)
      }

    }else{
        println("elem is empty")
    }

  }
}