package com.hadwinling.alogriithm.projectforpso.onepso

import breeze.linalg.sum
import breeze.numerics.pow
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions.{col, desc, udf}
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}

import scala.util.Random

object OnePSO {

  val pop_size = 5
  val iter_max = 260
  val dim = 5
  var t = 0 //loop counter
  /*initialize the global best :fitness,pos,vel*/
  var gb: (Double, (Array[Double], Array[Double])) = (Double.PositiveInfinity, (Array.fill(dim)(0.0), Array.fill(dim)(0.0)))
  val partitionsNum = 4
  //  val conf = new SparkConf().setAppName("SparkPSO").setMaster("spark://211.69.243.34:7077")
  val conf = new SparkConf().setAppName("SparkPSO").setMaster("local")

  def main(args: Array[String]): Unit = {
    val time_start = System.nanoTime
    val sc = new SparkContext(conf)
    /* initialize the pos and vel of every partical individual:(num,(pos,vel,pb_pos))*/
    var particals_tmp: RDD[(Int, (Array[Double], Array[Double], Array[Double]))] = sc.parallelize(List.range(0, pop_size), partitionsNum)
      .map(num => (num, (Array.fill(dim)(Random.nextDouble()), Array.fill(dim)(Random.nextDouble()), Array.fill(dim)(Random.nextDouble()))))

    var particles_fitness: RDD[(Int, (Array[Double], Array[Double], Double, Array[Double], Double))] = particals_tmp.mapPartitions(addFitness).persist()
    //particlesBC: (num,(pos,vel,gb_pos))
    var particlesBC: Broadcast[collection.Map[Int, (Array[Double], Array[Double], Array[Double])]] = sc.broadcast(particles_fitness.map(x => (x._1, (x._2._1, x._2._2, x._2._4))).collectAsMap)
    gb = particles_fitness.map(x => (x._2._3, (x._2._1, x._2._2))).sortByKey(true).take(1)(0)
    //gbBC:(fitness,(pos,vel))
    /*find the global best */
    var gbBC: Broadcast[(Double, (Array[Double], Array[Double]))] = sc.broadcast(gb)
    t = t + 1
    /* calling the function and update the information of particles */
    var particlesArray: Array[(Int, (Array[Double], Array[Double], Array[Double]))] = sc.parallelize(List.range(0, pop_size), partitionsNum).map { x =>
      //      PSOStuff.updateParticle(x, particlesBC.value(1)._1, particlesBC.value(1)._2, particlesBC.value(1)._3, gbBC.value._2._1)
      //      更新粒子
      updateParticle(x, particlesBC.value(x)._1, particlesBC.value(x)._2, particlesBC.value(x)._3, gbBC.value._2._1)
    }.collect
    //cleaning
    particlesBC.destroy

    while (t < iter_max) {
      particles_fitness = sc.parallelize(particlesArray, partitionsNum).mapPartitions(addFitness).persist()
      particlesBC = sc.broadcast(particles_fitness.map(x => (x._1, (x._2._1, x._2._2, x._2._4))).collectAsMap)
      /*find the global best */
      gb = particles_fitness.map(x => (x._2._3, (x._2._1, x._2._2))).sortByKey(true).take(1)(0)
      gbBC.destroy
      gbBC = sc.broadcast(gb)
      //print the information of every particle in swarm of every interation
      /* List.range(0, pop_size).foreach { j =>
         println("num:" + particlesArray(j)._1)
         print("gb_pos:")
         List.range(0, dim).foreach { i =>
           print(gb._2._1(i) + "\t")
         }
         println
         println("gb_fitness:" + gb._1)
         print("pos:")
         List.range(0, dim).foreach { i =>
           print(particlesArray(j)._2._1(i) + "\t")
         }
         println
         print("vel:")
         List.range(0, dim).foreach { i =>
           print(particlesArray(j)._2._2(i) + "\t")
         }
         println
         print("pb_pos:")
         List.range(0, dim).foreach { i =>
           print(particlesArray(j)._2._3(i) + "\t")
         }
         println
         println
       }*/
      t = t + 1
      /*calling the function and update the information of particles*/
      particlesArray = sc.parallelize(List.range(0, pop_size), partitionsNum).map { x =>
        //          PSOStuff.updateParticle(x, particlesBC.value(1)._1, particlesBC.value(1)._2, particlesBC.value(1)._3, gbBC.value._2._1)
        updateParticle(x, particlesBC.value(x)._1, particlesBC.value(x)._2, particlesBC.value(x)._3, gbBC.value._2._1)
      }.collect



      //cleaning
      particlesBC.destroy
    }
    val runtime = (System.nanoTime - time_start) / 1e9d
    //    println("Timer", runtime)
    //    println(sc.defaultParallelism)
    //    println(Random.nextDouble)
    sc.stop()
    //    particlesBC.value.foreach(println)
    //      println(gbBC.value._2._1)
    //    println(gbBC.id)
  }

  def sphere(pos: Array[Double]) = {
    sum(pos.map(pos => pow(pos, 2)))
  }

  def linkPrediction(pos: Array[Double]) = {
    val predictedNum = 3
    val spark1: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val myManualSchema: StructType = StructType(Array(
      StructField("user1_id", StringType, true),
      StructField("user2_id", StringType, true),
      StructField("J", DoubleType, true),
      StructField("A", DoubleType, true),
      StructField("C", DoubleType, true),
      StructField("P", DoubleType, true),
      StructField("K", DoubleType, true),
    ))
    val matrixDF: DataFrame = spark1.read.format("csv")
      .schema(myManualSchema)
      .option("header", "true")
      .load("file:///home/hadoop/Downloads/psoInput/matrix.csv")

    def computwJ(x: Double): Double = {
      pos(0) * x
    }

    def computwA(x: Double): Double = {
      pos(1) * x
    }

    def computwC(x: Double): Double = {
      pos(2) * x
    }

    def computwP(x: Double): Double = {
      pos(3) * x
    }

    def computwK(x: Double): Double = {
      pos(4) * x
    }

    val wJ = udf(computwJ(_: Double))
    val wA = udf(computwA(_: Double))
    val wC = udf(computwC(_: Double))
    val wP = udf(computwP(_: Double))
    val wK = udf(computwK(_: Double))
    val preSdf: DataFrame = matrixDF.select(col("user1_id"), col("user2_id"), wJ(col("J")) as "J", wA(col("A")) as "A"
      , wC(col("C")) as "C", wP(col("P")) as "P", wK(col("K")) as "K")
    val Sdf: DataFrame = preSdf.selectExpr("user1_id", "user2_id", "J+A+C+P+K as S").orderBy(desc("S"))
    val predictDS: Dataset[Row] = Sdf.select("user1_id", "user2_id").limit(predictedNum)

    val myManualSchema2: StructType = StructType(Array(
      StructField("user1_id", StringType, true),
      StructField("user2_id", StringType, true),
    ))
    val testDF: DataFrame = spark1.read.format("csv")
      .schema(myManualSchema2)
      .option("header", "true")
      .load("file:///home/hadoop/Downloads/psoInput/test.csv")

//    val incorrect: Dataset[Row] = testDF.except(predictDS)

//    var incorrectTemp = incorrect
    var count = 0
   /* for(i <- 0 to testDF){
      for ( j <- 0 to predictDS) {
        if (testDF(i)==predictDS(j)){
          count = count+1
        }
      }
    }*/



//    val incorrectNum = incorrectTemp.count()
    val fitness = 1.0 * count / predictedNum
//    var fitness = 1.0
    fitness
  }

  //  (num,(pos,vel,pb_pos))
  def addFitness(particle: Iterator[(Int, (Array[Double], Array[Double], Array[Double]))]) = {
    particle.map {
      x =>
        val fitness = linkPrediction(x._2._1)
        val pb_fitness = linkPrediction(x._2._3)
        //  (num,(pos,vel,fitness,pb_pos,pb_fitness))
        (x._1, (x._2._1, x._2._2, fitness, x._2._3, pb_fitness))
    }
  }

  def updateParticle(num: Int, pos: Array[Double], vel: Array[Double], pb_pos: Array[Double], gb_pos: Array[Double]) = {
    val newpos: Array[Double] = Array.fill(dim)(0.0)
    val newvel: Array[Double] = Array.fill(dim)(0.0)
    val newpb_pos: Array[Double] = Array.fill(dim)(0.0)
    val fitness: Double = linkPrediction(pos)
    var newfitness: Double = 0.0
    List.range(0, dim).foreach { i =>
      val c1 = 1 // c1 and c2  are Acceleration Factors
      val c2 = 1
      val w = 1 //w is weight
      /*   Apply the formula to update imformation of  the particle in PSO */
      newvel(i) = w * vel(i) + c1 * Random.nextDouble() * (pb_pos(i) - pos(i)) + c2 * Random.nextDouble() * (gb_pos(i) - pos(i))
      newpos(i) = pos(i) + newvel(i)
    }
    newfitness = linkPrediction(newpos)
    /*    if new fitness is better than the old one ,update the gb_pos of the particle*/
    if (newfitness < fitness) {
      List.range(0, dim).foreach { i =>
        newpb_pos(i) = newpos(i)
      }
    } else {
      List.range(0, dim).foreach { i =>
        newpb_pos(i) = pb_pos(i)
      }
    }
    (num, (newpos, newvel, newpb_pos))
  }

}