package com.hadwinling.alogriithm.projectforpso.miemiepso

import breeze.linalg._
import breeze.numerics._
import org.apache.spark.SparkConf
import org.apache.spark.sql.functions.{col, desc, udf}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
object Function {
  // Sphere
  def sphere( pos: Array[Double] ) = {
    sum(pos.map(pos => pow(pos, 2)))
  }
  def linkPrediction(pos: Array[Double]) = {
    val predictedNum=3
    val conf = new SparkConf().setAppName("SparkPSO").setMaster("local")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val myManualSchema: StructType = StructType(Array(
      StructField("user1_id", StringType, true),
      StructField("user2_id", StringType, true),
      StructField("J", DoubleType, true),
      StructField("A", DoubleType, true),
      StructField("C", DoubleType, true),
      StructField("P", DoubleType, true),
      StructField("K", DoubleType, true),
    ))
    val matrixDF: DataFrame = spark.read.format("csv")
      .schema(myManualSchema)
      .option("header", "true")
      .load("file://home/hadoop/Documents/psoInput/matrix.csv")
    def computwJ(x: Double): Double = {
      pos(0) * x
    }
    def computwA(x: Double): Double = {
      pos(1) * x
    }
    def computwC(x: Double): Double = {
      pos(2) * x
    }
    def computwP(x: Double): Double = {
      pos(3) * x
    }
    def computwK(x: Double): Double = {
      pos(4) * x
    }
    val wJ=udf(computwJ(_:Double))
    val wA=udf(computwA (_:Double))
    val wC=udf(computwC (_:Double))
    val wP=udf(computwP (_:Double))
    val wK=udf(computwK (_:Double))
    val preSdf: DataFrame = matrixDF.select(col("user1_id"), col("user2_id"), wJ(col("J")) as "J", wA(col("A")) as "A"
      , wC(col("C")) as "C", wP(col("P")) as "P", wK(col("K")) as "K")
    val Sdf: DataFrame = preSdf.selectExpr("user1_id", "user2_id", "J+A+C+P+K as S").orderBy(desc("S"))
    val predictDS: Dataset[Row] = Sdf.select("user1_id", "user2_id").limit(predictedNum)
    val myManualSchema2: StructType = StructType(Array(
      StructField("user1_id", StringType, true),
      StructField("user2_id", StringType, true),
    ))
    val testDF: DataFrame = spark.read.format("csv")
      .schema(myManualSchema2)
      .option("header", "true")
      .load("file:////home/hadoop/Documents/psoInput/test.csv")

    val incorrect: Dataset[Row] = testDF.except(predictDS)
    val incorrectNum=incorrect.count()
    val fitness=1.0*incorrectNum/predictedNum
    fitness
  }
}