package spark.mllib.synonyms

import org.apache.spark.ml.feature.{LabeledPoint, Word2Vec}
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.Any
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}


/**
  * Created by liuwei on 2017/9/6.
  */
object SynonymsTest {

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("ChiSquareTest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()
    import spark.implicits._

    val documentDF = spark.createDataFrame(Seq(
      "Hi I heard about Spark".split(" "),
      "I wish Java could use case classes".split(" "),
      "Logistic regression models are neat test".split(" ")
    ).map(Tuple1.apply)).toDF("text")

    val documentDF2 = spark.createDataFrame(Seq(
      "Hi I heard about Spark".split(" "),
      "scala is good  f jjjj kkkk k".split(" "),
      "Logistic regression models are neat test".split(" "),
    "I wish Java could use case classes I ok".split(" "),
    "I am from china".split(" ")
    ).map(Tuple1.apply)).toDF("text")
//
//    val leftId =

    // Learn a mapping from words to Vectors.
    val word2Vec = new Word2Vec()
      .setInputCol("text")
      .setOutputCol("result")
      .setVectorSize(4)
      .setMinCount(1)
    val model = word2Vec.fit(documentDF)
    val word2Vec2 = new Word2Vec()
      .setInputCol("text")
      .setOutputCol("result")
      .setVectorSize(4)
      .setMinCount(1)
    val model2 = word2Vec2.fit(documentDF2)
    val cosUDF = udf{
      (arr:mutable.WrappedArray[String] ) =>
        arr.length.toDouble
    }
    val cosUDF2 = udf{
      (arr:mutable.WrappedArray[String] ) =>
        arr.length
    }

    //vector 每个数平方相加开根号
    val mathUDF = udf{
      (arr:mutable.WrappedArray[String] ) =>
        arr.length
    }
//    val leftDF = model.transform(documentDF).withColumn("wordSize",cosUDF(col("text"))).withColumn("leftId",cosUDF2(col("text"))).drop(col("text"))
    val rightDF = model2.transform(documentDF2).withColumn("wordSize",cosUDF(col("text"))).withColumn("rightId",cosUDF2(col("text"))).drop(col("text"))
    val leftDF =rightDF.filter("rightId < 7").withColumnRenamed("rightId","leftId")
//    println("-------------------")
//    println(leftDF.schema+"-------------------"+rightDF.schema)
//    leftDF.intersect(rightDF).show

//    println(leftDF.schema)
//    println(leftDF.columns.length)
    leftDF.show(false)
    rightDF.show(false)

    val selectNum = 2;
    val leftId = "leftId"
    val rightId = "rightId"
    val leftIdValue = leftDF.select(leftId).rdd.map(_.get(0)).collect().toSet
    val leftIdValueSet = sc.broadcast(leftIdValue).value
    println( " leftIdValueSet: " + leftIdValueSet)
    val leftArr = rightDF.select("rightId","result").rdd.map(row => {
      if(leftIdValueSet.contains(row.get(0))){
        var res = new ArrayBuffer[Double]
        for (i <- 1 until row.size)
          row.get(i) match {
            case x: Double => res += x.asInstanceOf[Double]
            case x: Vector => res ++= x.asInstanceOf[Vector].toArray
            case _ =>
          }
        (row.get(0), res, math.sqrt(res.map(num => {math.pow(num, 2)}).sum))
      }else{
        (None,ArrayBuffer.empty[Double],-1.0)
      }
    }).filter(_._3.!=(-1.0))
    println( " leftArr: " + leftArr.first() + "size" + leftArr.first()._2.size)
    val  rightArr=rightDF.select("rightId","result").rdd.map(row => {
      val label = row.get(0)
      var res = new ArrayBuffer[Double]
      for(i <- 1 until row.size)
        row.get(i) match {
          case x:Double => res += row.get(i).asInstanceOf[Double]
          case x:Vector => res ++= row.get(i).asInstanceOf[Vector].toArray
          case _ =>
        }
      (label,res, math.sqrt(res.map(num => {math.pow(num, 2)}).sum))
    })
    leftArr.foreach(println)
    println("===============")
    rightArr.foreach(println)
//    val inRDD = leftArr.intersection(rightArr)
//    println("===============inRDD.count()"+inRDD.count())

//    val broadcastRdd  = sc.broadcast(leftArr)
//    broadcastRdd.value


//    val inRDD2 = leftArr.leftOuterJoin(rightArr)
//    println("===============inRDD2.count()"+inRDD2.count())

//    leftArr.toDF().show(false)
//    rightArr.toDF().show(false)
//    inRDD2.toDF().show(false)
//    val total = inRDD.cartesian(rightArr)
    val broadcastRdd  = sc.broadcast(leftArr.collect())
    val result = rightArr.flatMap(arr2=>{
       broadcastRdd.value.map(arr1=>{
        val value = SimilarityUtil.cosineSimilarity(arr1._2,arr2._2,arr1._3,arr2._3)
        var res = new ArrayBuffer[Any]
        res += arr1._1.toString
        res += arr2._1.toString
        res += value
        Row.fromSeq(res)
      })

    })

   /* val total =  broadcastRdd.value.cartesian(rightArr)
    val result = total.map( arr=>{
      val value = SimilarityUtil.cosineSimilarity(arr._1._2,arr._2._2,arr._1._3,arr._2._3)
      var res = new ArrayBuffer[Any]
      res += arr._1._1.toString
      res += arr._2._1.toString
      res += value
      Row.fromSeq(res)
      }
    )*/




//    val re = result.toDF("leftId","rightId","id")//.orderBy(col("left"),col("id").desc)
val schema = StructType(
  Seq(
    StructField(leftId, DataTypes.StringType)
    , StructField(rightId+"_RLT", DataTypes.StringType)
    , StructField("value",DataTypes.DoubleType)
  )
)
    val re = spark.createDataFrame(result,schema)


    val w = Window.partitionBy(leftId).orderBy($"value".desc)
    val dfTop3 = re.withColumn("rank", row_number.over(w)).where($"rank" <= 3).show
//    leftArr.foreach(arr1=>
//      rightArr.foreach(arr2=>
//        println(SimilarityUtil.cosineSimilarity(arr1,arr2))
//      )
//    )
//    re.foreach(println)

   /* val selectNum = 2;

    val  leftArr=leftDF.select("result","wordSize").rdd.map(row => {
      var res = new ArrayBuffer[Double]
      for(i <- 0 until selectNum)
        row.get(i) match {
          case x:Double => res += row.get(i).asInstanceOf[Double]
          case x:Vector => res ++= row.get(i).asInstanceOf[Vector].toArray
          case _ =>
        }
      res
    })*/
//    val  rightArr=rightDF.select("result","wordSize").rdd.map(row => {
//      var res = new ArrayBuffer[Double]
//      for(i <- 0 until selectNum)
//        row.get(i) match {
//          case x:Double => res += row.get(i).asInstanceOf[Double]
//          case x:Vector => res ++= row.get(i).asInstanceOf[Vector].toArray
//          case _ =>
//        }
//      res
//    })

//    for()

//    val re = rightDF.select("result").rdd.zip(leftDF.select("result").rdd).map(row=>{
//      (SimilarityUtil.cosineSimilarity(row._1.get(0).asInstanceOf[Vector].toArray.toVector,row._2.get(0).asInstanceOf[Vector].toArray.toVector),
//        SimilarityUtil.L1Similarity(row._1.get(0).asInstanceOf[Vector].toArray.toVector,row._2.get(0).asInstanceOf[Vector].toArray.toVector),
//        SimilarityUtil.L2Similarity(row._1.get(0).asInstanceOf[Vector].toArray.toVector,row._2.get(0).asInstanceOf[Vector].toArray.toVector))
//    })
//    re.toDF().show()
//    leftArr.foreach(println)


//    leftDF.join(rightDF,$"leftId" === $"rightId","full").show(false)

  }



}
