package active_device

import util.Util._

import java.text.SimpleDateFormat
import java.util.{Calendar, TimeZone}
import java.text.MessageFormat.format
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive._
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import  org.json4s.JsonAST._
import  org.apache.spark.sql.types._
object daily_active_device {

  def main(args: Array[String]) {

    // Validate args
    if (args.length == 0) {
      println("Usage: [2015-05-25|date]")
       sys.exit(1)
    }
    implicit lazy val formats = org.json4s.DefaultFormats
     //   val date_str="2015-05-26"
    val date_str=args(0).toString
    val date_sdf = new SimpleDateFormat("yyyy-MM-dd")
    val daily_date = date_sdf.parse(date_str)
    val  calender = Calendar.getInstance(TimeZone.getTimeZone("GMT+8"))
         calender.setTime(daily_date)
    val (year,month,day) =(calender.get(Calendar.YEAR),calender.get(Calendar.MONTH)+1,calender.get(Calendar.DAY_OF_MONTH))

    val pk_sql=format(getConfig("active_device.pkmap"))
    val daily_ac_dev_sql= format(getConfig("active_device.day"),year.toString,month.toString,day.toString)

    //  保存到hdfs 目录
    val filePath=getConfig("active_device.hdfsdir")+"e_device_living_count_date/"+date_str

    val sparkConf=new SparkConf().setAppName("day_active_device")
    val sc= new SparkContext(sparkConf)
    val hdfs=FileSystem.get(new Configuration())
    if(hdfs.exists(new Path(filePath)))hdfs.delete(new Path(filePath),true)

    val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
    import sqlContext._
    val  df=sqlContext.sql(daily_ac_dev_sql)
    //
    val pk=sqlContext.sql(pk_sql)
    //DataFrame = [product_key: string, country: string, region: string, city: string, gid: int]
    //need  add group_id
    //  获取 没有活跃的设备 product_key
    val pkmap=pk.distinct.map(row=>row.mkString("@#"))
      .subtract( df.select("product_key")
      .distinct.map(product_key=>product_key.mkString("@#")))
      .map(product_key=>product_key.toString
      +"@#"+product_key.toString+"%" +"Unknown"
      +"@#"+product_key.toString+"%" +"Unknown"+"%" +"Unknown"
      +"@#"+product_key.toString+"%" +"Unknown"+"%" +"Unknown"+"%" +"Unknown"
      +"@#"+product_key.toString+"%" +"Unknown"+"%" +"Unknown"+"%" +"Unknown"+"%" +"Unknown"
      +"@#"+product_key.toString+"%"+"location"+"%"+"Unknown"
      +"@#"+product_key.toString+"%"+"location"+"%"+"Unknown"+"%"+"Unknown"
      +"@#"+product_key.toString+"%"+"location"+"%"+"Unknown"+"%"+"Unknown"+"%"+"Unknown"
      )
      .flatMap(line=>line.split("@#"))
      .map(lineMap=>(lineMap.split("%")(0),(lineMap,0L)))

    df.map(row=>row.mkString("@#")).map(line=>line.split("@#")).map(
      line=>line(0).toString
        +"@#"+line(0)+"%"+replaceNull(line(4))
        +"@#"+line(0)+"%"+replaceNull(line(4))+"%"+replaceNull(line(1))
        +"@#"+line(0)+"%"+replaceNull(line(4))+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))
        +"@#"+line(0)+"%"+replaceNull(line(4))+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))+"%"+replaceNull(line(3))
        +"@#"+line(0)+"%"+"location"+"%"+replaceNull(line(1))
        +"@#"+line(0)+"%"+"location"+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))
        +"@#"+line(0)+"%"+"location"+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))+"%"+replaceNull(line(3))
    ).flatMap(line =>line.split("@#"))
      .map(line=>(line,1L))
      .reduceByKey(_ + _)
      .map(line=>(line._1.split("%")(0),line))
      .++(pkmap)
      .map(t=>(t._1,toJobejct(t._2,date_str)))
      .reduceByKey((x,y)=>x merge y)
      .map(line=>compact(render(line._2)))
      .coalesce(1, shuffle = true)
      .saveAsTextFile(filePath)

    //pretty  ,compact
  }


}
