package spark.work

//import breeze.linalg.{max, sum}
import java.util

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Column, DataFrame, SQLContext, SparkSession}
import org.apache.spark.sql.catalyst.expressions.Literal

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.functions.{col, column, row_number}
import org.apache.spark.sql.types.{DataTypes, StructType}
import org.apache.spark.sql.expressions._
//import org.apache.spark.sql.functions._
import org.apache.spark.sql.functions.{col, row_number}
/**
  * Created by liuwei on 2017/11/8.
  */
object RowToColumn {

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("RowToColumnTest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder

    val ss = spark.getOrCreate();
    import ss.implicits._


    import ss.implicits._

    val df = ss.createDataFrame(Seq(
      ("张三","女","数学",60,15,"张老师"),
      ("张三","女","语文",70,12,"李老师"),
      ("张三","女","物理",50,22,"张老师"),
      ("王二","男","数学",50,16,"赵老师"),
      ("王二","男","语文",60,20,"孙老师"),
      ("王二","男","历史",50,22,"李老师")
    )).toDF("姓名", "性别","科目","成绩","排名","监考老师")
    import scala.collection.JavaConverters._
    import org.apache.spark.sql.functions._
    df.createOrReplaceTempView("test")
    val rowLabel = Seq("姓名")
    val columnLabel = Seq("科目","教师")
    val valueLabel = Seq("成绩","排名")
//    val keyArr = ArrayBuffer.empty[String]
    val keyColumnArr = df.select(columnLabel.head,columnLabel.tail:_*).distinct().collect().map(_.get(0).toString)

    val arr = ArrayBuffer.empty[String]
    rowLabel.foreach(arr += _)
//    arr += rowLabel:_*
//    val columnArr = ArrayBuffer.empty[String]
    val keyArr = arr.clone()
    columnLabel.foreach(keyArr += _)
//    val keyRowArr = df.select(rowLabel).distinct().collect().map(_.get(0).toString)

      for(keyColumn <- keyColumnArr)
        for(value<- valueLabel){
        val sb = new StringBuilder()
        sb.append(columnLabel(0))
        sb.append("(")
        sb.append(keyColumn)
        sb.append(")_value(")
        sb.append(value)
        sb.append(")")
        arr += sb.toString()
    }




    if(df.select(keyArr.head,keyArr.tail:_*).distinct().count() < df.count()) throw new Exception("数据行列标签不能确定唯一值！")

//    keyColumnArr.foreach(println(_))
//    println(keyColumnArr+"=======")
    val cols = rowLabel.map(col(_))
    val groupDF =  df.groupBy(cols:_*)
//    val res2 = groupDF.pivot(columnLabel(0),keyColumnArr).max(valueLabel:_*).toDF(arr:_*).show(false)
    val exprs = collection.mutable.MutableList.empty[Column]
    val column = col("成绩")
    exprs += org.apache.spark.sql.functions.first(column)
//    exprs += org.apache.spark.sql.functions.first(col("排名"))
    exprs += org.apache.spark.sql.functions.max(col("监考老师"))
     groupDF.pivot(columnLabel(0),keyColumnArr).agg(exprs.head, exprs.tail: _*).show(false)

    println(keyArr.size)
    keyArr.indices.foreach(index=>{
      println(index)
    })

//    res2.columns
//    println(res2)

//    println(map)
//    df.collect().foreach(line=>{
//      val key1 = line.getAs[String](rowLabel)
//             val key2 = line.getAs[String](columnLabel)
//             val value = (line.getAs[Int]("成绩"),line.getAs[Int]("排名"))
//             val a=  Map(key2 ->value)
//             if(!map.contains(key1)){
//               map += (key1 -> a)
//             }else{
//               var valueMap = map.get(key1).get
//               valueMap += (key2 ->value)
//               map += (key1 -> valueMap)
//             }
//    })
//    var resDF = df.select(rowLabel).distinct().rdd
//    for(i <- 0 until keyColumnArr.size){
//      resDF.zip(df.filter(_.getAs[String](columnLabel) == keyColumnArr(0)).rdd)
//    }
//    println("=============="+resDF.first())
//    resDF.first().get(0)
//    resDF.map(row=>{
//
//      for(i <- 0 until row.size){
//        print(row.get(i))
//      }
//    })
//    println(resDF.count())


//    val df2 = ss.createDataFrame(map.toSeq).show
//    val a = df
//     df.foreach(line =>{
//       val key1 = line.getAs[String](rowLabel)
//       val key2 = line.getAs[String](columnLabel)
//       val value = (line.getAs[String]("成绩"),line.getAs[String]("科目"))
//       val a=  Map(key2 ->value)
//       if(!map.contains(key1)){
//         map += (key1 -> a)
//       }else{
//         var valueMap = map.get(key1).get
//         valueMap += (key2 ->value)
//       }
//     })
//    val a = df.groupByKey(row=>{
//      row.
//    })
//    println(map)

//    a.

//    val w = Window.partitionBy(rowLabel).orderBy($"成绩".desc)
//    df.select($"姓名",sum($"排名").over(w).as("sum_duration")).show
// val sqlContext = new SQLContext(sc)
//    sqlContext.sql(
//        " select * from test ").show

    //    val dfTop3 = re.withColumn("rank", row_number.over(w)).where($"rank" <= 3).show

//    val a = df.groupBy(rowLabel).agg()

//    println(a.map)

//     df.groupBy(rowLabel).pivot(columnLabel).max("成绩","排名").show(false)
//    df.rdd.aggregateByKey(0)(math.max(_, _), _ + _).collect.foreach(print)
//    val tuples = df.rdd.aggregate(List.empty[List[String]])(seqOp = (result, lineWithIndex) => {
//         List(line)
//    }, combOp = (x, y) => x ::: y)
//    tuples.foreach(println(_))

    //    val res2 = df.groupBy(rowLabel).pivot(columnLabel).max("成绩","排名").show(false)
////    val res = documentDF.groupBy(rowLabel).pivot(columnLabel,calueLabel).agg()
//    val res= df.groupBy(rowLabel).pivot(columnLabel)
//      .agg(Map(
//      "成绩" -> "all",
//      "排名" -> "max"
//     ))
//    res.show(false)
  }

}
