package incr_device

import java.text.MessageFormat.format
import java.text.SimpleDateFormat
import java.util.{Calendar, TimeZone}
import  org.apache.spark.sql.types._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.jackson.JsonMethods._
import util.Util._

object incr_device_total {

   def main(args: Array[String]) {
     // Validate args
     if (args.length == 0) {
       println("Usage: [2015-05-25|date]")
       sys.exit(1)
     }
      // val date_str="2015-05-31"
     val date_str=args(0).toString

     val total_ac_dev_sql= format(getConfig("incr_device.total"),conv2ts(addDay(date_str,1).toString).toString)
     val pk_sql=format(getConfig("active_device.pkmap"))
     //  val filePath ="hdfs:///gdms/"+"e_device_activated_total/"+date_str
     val filePath=getConfig("incr_device.hdfsdir")+"e_device_activated_total/"+date_str

     val sparkConf=new SparkConf().setAppName("day_active_device")
     val sc= new SparkContext(sparkConf)
     val hdfs=FileSystem.get(new Configuration())
     if(hdfs.exists(new Path(filePath)))hdfs.delete(new Path(filePath),true)

     val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
     val  df=sqlContext.sql(total_ac_dev_sql)
     val pk=sqlContext.sql(pk_sql)

     val pkmap=pk.distinct.map(row=>row.mkString("@#"))
       .subtract( df.select("product_key")
       .distinct.map(product_key=>product_key.mkString("@#")))
       .map(product_key=>product_key.toString
       +"@#"+product_key.toString+"%" +"Unknown"
       +"@#"+product_key.toString+"%" +"Unknown"+"%" +"Unknown"
       +"@#"+product_key.toString+"%" +"Unknown"+"%" +"Unknown"+"%" +"Unknown"
       +"@#"+product_key.toString+"%" +"Unknown"+"%" +"Unknown"+"%" +"Unknown"+"%" +"Unknown"
       +"@#"+product_key.toString+"%"+"location"+"%"+"Unknown"
       +"@#"+product_key.toString+"%"+"location"+"%"+"Unknown"+"%"+"Unknown"
       +"@#"+product_key.toString+"%"+"location"+"%"+"Unknown"+"%"+"Unknown"+"%"+"Unknown"
       )
       .flatMap(line=>line.split("@#"))
       .map(line=>(line.split("%")(0),(line,0L)))
     //DataFrame = [product_key: string, country: string, region: string, city: string, gid: int]
     //need  add group_id

   df.map(row=>row.mkString("@#")).map(line=>line.split("@#")).map(
       line=>line(0).toString
         +"@#"+line(0)+"%"+replaceNull(line(4))
         +"@#"+line(0)+"%"+replaceNull(line(4))+"%"+replaceNull(line(1))
         +"@#"+line(0)+"%"+replaceNull(line(4))+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))
         +"@#"+line(0)+"%"+replaceNull(line(4))+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))+"%"+replaceNull(line(3))
         +"@#"+line(0)+"%"+"location"+"%"+replaceNull(line(1))
         +"@#"+line(0)+"%"+"location"+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))
         +"@#"+line(0)+"%"+"location"+"%"+replaceNull(line(1))+"%"+replaceNull(line(2))+"%"+replaceNull(line(3))
     ).flatMap(line =>line.split("@#"))
     .map(line=>(line,1L))
     .reduceByKey(_ + _)
     .map(line=>(line._1.split("%")(0),line))
     .++(pkmap)
     .map(t=>(t._1,toJobejct(t._2)))
     .reduceByKey((x,y)=>x merge y)
     .map(line=>compact(render(line._2)))
     .coalesce(1, shuffle = true)
     .saveAsTextFile(filePath)

 //pretty  ,compact
   }


 }
