import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}

object active_device {


  def  show(line:Array[Tuple2[String,String]]):String= {

    for {s <- line} s._1
 ""
  }


  def main(args: Array[String]) {

    val filePath="hdfs:///test/res"
    val sparkConf=new SparkConf().setAppName("workCount")
    val sc= new SparkContext(sparkConf)
    val hdfs=FileSystem.get(new Configuration())
    if(hdfs.exists(new Path(filePath)))hdfs.delete(new Path(filePath),true)

    val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)

    val  df=sqlContext.sql("select *  from report.testdevice")

       //  df.groupBy("product_key").count().show()
      // df.printSchema()
   // df.select("product_key").groupBy("product_key").count().show()

    //df.groupBy("ip").count().show()
   // val a=df.select("product_key").toJSON.map(line=>(line,1)).reduceByKey(_ + _).map(line=>pretty(line))
  //  val a=df.select("product_key").map(line=>(line,1)).reduceByKey(_ + _).collect.toList


    /*val a=df.select("product_key").map(line=>(line,1)).reduceByKey(_ + _).map(
    t=> ("product_key"->t._1.get(0).toString)
      ~ ("count" ->t._2.toInt)
    ).map(json=>pretty(render(json))).coalesce(1, shuffle = true).saveAsTextFile("hdfs:///test/res")*/

   /* val a=df.select("product_key").map(line=>(line,1)).reduceByKey(_ + _).map(
      t=> ("product_key"->t._1.get(0).toString)
        ~ ("count" ->t._2.toInt)
    ).map(json=>pretty(render(json))).*/

     // df.select("product_key","group_id").join()
     case class  Location(country:String,region:String,city:String)
     case class  DeviveActive(product_key:String,count:BigInt,group_id:BigInt,loc:Location)


  val res=  df.map(
      line=>line.get(0).toString
      +","+line.get(0)+"-"+line.get(1).toString
      +","+line.get(0)+"-"+line.get(1).toString+"-"+line.get(4).toString
      +","+line.get(0)+"-"+line.get(1).toString+"-"+line.get(4).toString+"-"+line.get(5).toString
      +","+line.get(0)+"-"+line.get(1).toString+"-"+line.get(4).toString+"-"+line.get(5).toString+"-"+line.get(6).toString
    ).flatMap(line =>line.split(",")).map(line=>(line,1)).reduceByKey(_ + _).map(t=>check(t)).reduce(_ merge _ )

 // val s=res.map(line=>(line._1,line._2)).cache()
    //.map(t=>check(t)).reduce(_ merge _ )


    //  .reduce(_ merge _ )



         //coalesce(1, shuffle = true).saveAsTextFile("hdfs:///test/res3")

      def  check(t:(String,Int))={
        val pk=t._1
        val num=t._2
        val pk2= pk.split("-")
        pk2.length match {
          case 1 => {
            ("product_key"->pk2(0).toString) ~ ("count" -> num)
          }
          case 2 =>{
            ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map("count"->num)))
          }
          case 3 =>{
            ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map(pk2(2).toString->Map("count"->num))))
          }
          case 4 =>{
            ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map(pk2(2).toString->Map( pk2(3).toString->Map("count"->num) ))))
          }
          case 5 =>{
            ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map(pk2(2).toString->Map( pk2(3).toString->Map(pk2(4).toString->num) ))))
          }
          case 6 =>{
            ("product_key"->pk2(0).toString) ~ ("group_id" ->Map(pk2(1).toString ->Map(pk2(2).toString->Map( pk2(3).toString->Map(pk2(4).toString->num) ))))
          }
        }
      }




   // a.saveAsTextFile("hdfs:///test/res")
   /* val  json=(a.map(
       t=> ("product_key"->t._1.get(0).toString)
         ~ ("count" ->t._2.toInt)
    ))*/
    //  ~ ("count" ->t._2)
    //val p=a.toJSON.map(line=>parse(line)).map(line=>pretty(line))

   //  val r=pretty(render(json))
     //sc.parallelize(compact(render(json))).saveAsTextFile("hdfs:///test/res")
   //println(pretty(render(json)))











  }


}
