package user

import java.text.SimpleDateFormat
import java.util.{Calendar, TimeZone}
import java.text.MessageFormat.format
import  org.apache.spark.sql.types._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.jackson.JsonMethods._
import util.Util._

object user_incr_week {
  def main(args: Array[String]) {
    // Validate args
    if (args.length == 0) {
      println("Usage: [2015-05-25|date]")
      sys.exit(1)
    }
    val date_str=args(0).toString
    //val date_str="2015-05-24"

  //  val filePath="hdfs:///test/daily_user/"+"week/"+getWeek(date_str)

    val filePath=getConfig("user.hdfsdir")+"e_device_bind_user_activated_week/"+getWeek(date_str)

    val sparkConf=new SparkConf().setAppName("user_inc_week_total")
    val sc= new SparkContext(sparkConf)
    val hdfs=FileSystem.get(new Configuration())
    if(hdfs.exists(new Path(filePath)))hdfs.delete(new Path(filePath),true)
    val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)

    //  val user_week_sql=format("select  u_bind.product_key, u_bind.uid,g.gid   from ( select d.product_key as product_key, d.mac  as mac ,bu.uid   as uid  from (select    u.uid,u.did    from (select u_incr.uid  from  report.user_incr  u_incr  where  unix_timestamp(u_incr.ts,\"{3}\")  < unix_timestamp(\"{0}\",\"{2}\") and  unix_timestamp(u_incr.ts,\"{3}\") >= unix_timestamp(\"{1}\",\"{2}\")  ) incr  left join default.mongo_binding u  on  incr.uid=u.uid ) bu left  join  default.mongo_device   d on  bu.did=d.did   where  d.product_key is  not null ) u_bind   left join report.group_device_tmp g  on ( u_bind.product_key=g.product_key and u_bind.mac=g.mac)   where g.gid is not null ",addDay(date_str,1),DayofWeek(date_str),"yyyy-MM-dd","yyyy-MM-dd HH:mm:ss")

    val user_week_sql=format(getConfig("user.week"),conv2ts(addDay(date_str,1).toString).toString,conv2ts(DayofWeek(date_str).toString).toString)

    val  df=sqlContext.sql(user_week_sql)
    val pk_sql=format(getConfig("active_device.pkmap"))
    val pk=sqlContext.sql(pk_sql)
    val pkmap=pk.distinct.map(line=>line.mkString("@#"))
      .subtract( df.select("product_key").distinct.map(line=>line.mkString("@#")))
      .map(line=>line.toString +"@#"+line.toString+"%" +"Unknown" )
      .flatMap(line=>line.split("@#"))
      .map(line=>(line.split("%")(0),(line,0L)))

 // finally Schema DataFrame = [product_key: string, uid: string, gid: string]
 // 单独算 product_key 的总数.原因是:一个用户可以属于多个分组..
    val dfcount=df.map(row=>row.mkString("@#"))
      .map(line=>line.split("@#"))
      .map( line=> line(0).toString+"%"+replaceNull(line(1)).toString)
      .distinct
      .map(line=>(line.split("%")(0),1L))
      .reduceByKey(_+_)
      .map(line=>(line._1.split("%")(0),line))

     df.map(row=>row.mkString("@#"))
       .map(line=>line.split("@#"))
       .map(line=>line(0).toString+"%"+replaceNull(line(2)).toString)
       .map(line=>(line,1L))
       .reduceByKey(_ + _)
       .map(line=>(line._1.split("%")(0),line))
       .++(dfcount)
       .++(pkmap)
       .map(t=>(t._1,toUserJobejct(t._2,getWeek(date_str).toString)))
       .reduceByKey((x,y)=>x merge y)
       .map(line=>compact(render(line._2)))
       .coalesce(1, shuffle = true)
       .saveAsTextFile(filePath)


  }
}
