
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive._
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import  org.json4s.JsonAST._
/*case class  Location(country:String,region:String,city:String)
case class  DeviveActive(count:BigInt,group_id:BigInt,loc:Location)
val  dd=Map[String,DeviveActive]()*/
implicit lazy val formats = org.json4s.DefaultFormats
def  check(t:(String,Int)):org.json4s.JsonAST.JValue ={
  val pk=t._1
  val num=t._2
  val pk2= pk.split("-")
  pk2.length match {
    case 1 => {
      ("product_key"->pk2(0).toString) ~ ("count" -> num)
    }
    case 2 =>{
      ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map("count"->num)))
    }
    case 3 =>{
      ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map(pk2(2).toString->Map("count"->num))))
    }
    case 4 =>{
      ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map(pk2(2).toString->Map( pk2(3).toString->Map("count"->num) ))))
    }
    case 5 =>{
      ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map(pk2(2).toString->Map( pk2(3).toString->Map(pk2(4).toString->num) ))))
    }
  }

}
val t=("aa",10)
//val t=("aa-99-china",200)
val t2=("aa-99-china-guangdong",200)

//val t=("aa-99-china-guangdong-gz",199)
val c1=check(t)
val c2=check(t2)



pretty(render(check(t)))
/*val json=Array(("aa-g99-china",200),("aa-g98-china",200),("aa",9999),("aa-g99",333),("aa-g99-china-gd",190),("aa-g99-china-gd-gz",99)).
  map(t=>check(t)).reduce(_  merge _)*/
//map(json=>pretty(render(json)))
//pretty(render(json))
//. map(line=>compact(render(line)))
//  pretty(render(res)))


//pretty(render(