package com.gizwits.MLlib

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._

object DataInfoStatistics {
  def main(args: Array[String]) {
    val filePath="hdfs:///test/datainfo"
   val  pk="503802cf3c3d4331a651fdc44f024631"
    val daily_sql= " select t.info[\"s_dp0\"] as s_dp0 ,t.info[\"s_dp1\"] as s_dp1," +
      "t.info[\"s_dp2\"] as s_dp2,t.info[\"s_dp3\"] as s_dp3,t.info[\"s_dp4\"] as s_dp4 ," +
      "t.info[\"s_dp5\"] as s_dp5,t.info[\"s_dp6\"] as s_dp6,t.info[\"s_dp7\"] as s_dp7," +
      "t.info[\"s_dp8\"] as s_dp8 from analyzedb.t_cmd_datapoint t" +
      " where product_key=\"503802cf3c3d4331a651fdc44f024631\" and year = 2015 and month = 6 and day = 25"

    val sparkConf=new SparkConf().setAppName("datainfo")
    val sc= new SparkContext(sparkConf)
    val hdfs=FileSystem.get(new Configuration())
    if(hdfs.exists(new Path(filePath)))hdfs.delete(new Path(filePath),true)

    val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
    val  df=sqlContext.sql(daily_sql)
   val d= df.map(line=>line.mkString(","))
     .map(line=>line.split(",")).
     map(f=>f.map(_.toDouble))
     .map(f=>Vectors.dense(f))

   val info=Statistics.colStats(d)
   val arr=Array(info.max.toArray,info.mean.toArray,info.min.toArray)

    /**  pretty
     * Matrix transpose
     * @param xss
     * @return
     */
    def transposeDouble(xss: Array[Array[Double]]):Array[Array[Double]]={
      for (i <- Array.range(0, xss(0).length)) yield
      for (xs <- xss) yield  xs(i)
    }

    def parse(xss: Array[Array[Double]]) = {
      for (a <- Array.range(0, xss.length))
        yield
      ("s_dp"+a,Map("max"->xss(a)(0),"mean"->xss(a)(1),"min"->xss(a)(2)) )
    }
    sc.parallelize(parse(transposeDouble(arr)).toList)
      .map(line=>{("pk"->pk)~(line._1->line._2)})
      .map(f=>(pk,f)).reduceByKey(_ merge _)
      .map(f=>pretty(render(f._2)))
      .coalesce(1, shuffle = true)
      .saveAsTextFile(filePath)

  }
}


