package com.lmq

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions._

object SimilarityComputation {

  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)

  val spark = SparkSession.builder()
    .master("local[*]")
    .appName("SimilarityComputation")
    .getOrCreate()


  def wh1(buyDf: Dataset[Row], knnDf: Dataset[Row]) = {
    val groupBuyDf = buyDf.groupBy("asset_acct", "data_type")
      .agg(collect_list("prod_code"))
// Explicit cartesian join with another DataFrame.
    val res = knnDf.crossJoin(
      buyDf.select("data_type").dropDuplicates()
    ).join(
      groupBuyDf.toDF("asset_acct", "data_type", "asset_acct_cl"),
      Seq("asset_acct", "data_type"),
      "left_outer")

      .join(
        groupBuyDf.toDF("similar_asset_acct", "data_type", "similar_asset_acct_cl"),
        Seq("similar_asset_acct", "data_type"),
        "left_outer"
      )
////    Returns a [[DataFrameNaFunctions]] for working with missing data
      .na.fill(0.0).select(
      col("asset_acct"), col("similar_asset_acct"), col("data_type"),
      greatest(size(col("asset_acct_cl")), lit(0)).as("asset_acctcl"),
      greatest(size(col("similar_asset_acct_cl")), lit(0)).as("simi_acctcl"),
      //intersection
      greatest(size(array_intersect(col("asset_acct_cl"), col("similar_asset_acct_cl"))), lit(0)).as("inter_size")
    )
//      .withColumn("p1",col("inter_size/(asset_acctcl + simi_acctcl - inter_size)"))

    res.show(false)

    val computeDive = udf((x:Double,y:Double,z:Double,alp:String) =>{
      if (x+y-z==0) {
        0.0
      } else
        { val b=3
          var ae=1.0
          if (alp == "1.0") ae=2.0

          (ae * z) / (b*(x+y-z))
        }
    } )
    val compWH = udf((x:Double,y:Double)=>  x*(1+y) )
//    spark.udf.register("computeDive", computeDive)
//    res.createOrReplaceTempView("res")
    res.select(
      col("asset_acct"),
      col("similar_asset_acct"),
        col("data_type"),
        col("asset_acctcl"),
        col("simi_acctcl"),
        col("inter_size"),
      computeDive(col("asset_acctcl"),
        col("simi_acctcl"),
        col("inter_size"),
        col("data_type")).as("sumCols")
    )
            .groupBy(col("asset_acct"), col("similar_asset_acct"))
            .agg(sum("sumCols").as("whOne"))
      //      .count())
      //    println(knnDf.count())
            .join(knnDf,
              Seq("asset_acct", "similar_asset_acct"),
            "left_outer"
            )
      .select(
        col("asset_acct"),
        col("similar_asset_acct"),
//        col("whOne"),
//        col("prediction"),
        compWH(
          col("whOne"),
          col("prediction")
        ).as("whscore")
      )
      .createOrReplaceTempView("res")
    val divOne = udf((x:Int)=>  1.0/x )
    val jframe = buyDf
      .groupBy("asset_acct", "prod_code")
      .agg(divOne(count(col("data_type"))).as("frequency"))
//    jframe
//      .orderBy(col("frequency"))

    val lframe: Dataset[Row] = spark.sql(
      """
        |select
        |asset_acct,
        |similar_asset_acct,
        |whscore,
        |row_number() over(partition by asset_acct order by whscore desc) as ranking
        |from  res
        |
        |""".stripMargin)
      .filter("ranking<=5")
    lframe
      .join(
        jframe,
        Seq("asset_acct"),
        "left_outer"
      ).createOrReplaceTempView("reslt")
      spark.sql(
        """
          |select
          |asset_acct,
          |ranking,
          |frequency,
          |prod_code
          |from reslt
          |order by asset_acct,ranking,frequency
          |""".stripMargin)
      .show(1000,truncate = false)

    /**
     *
     *
     * output
     * asset_acct:用户id
     * ranking 相似用户的相似度排名
     * frequency: 相似用户购买、点击的prod_code的次数的倒数--> 1/count()
     * prod_code:相似用户购买、点击的prod_code
     * +--------------------------------+-------+---------+-------------+
     * |asset_acct                      |ranking|frequency|prod_code    |
     * +--------------------------------+-------+---------+-------------+
     * |00138189155c186ab8bb87a1d1ee1ee1|1      |1.0      |F011077      |
     * |00138189155c186ab8bb87a1d1ee1ee1|2      |1.0      |F011077      |
     * |00138189155c186ab8bb87a1d1ee1ee1|3      |1.0      |F011077      |
     * |00138189155c186ab8bb87a1d1ee1ee1|4      |1.0      |F011077      |
     * |00138189155c186ab8bb87a1d1ee1ee1|5      |1.0      |F011077      |
     * |001c8a48266c4c49af30afd71792d1d9|1      |1.0      |F011077      |
     */
    //    buyDf
//      .groupBy("asset_acct","prod_code")
//      .agg(count(col("data_type")).as("frequency"))



//      .show(10000,truncate = false)

//      .show(false)







  }

  def main(args: Array[String]): Unit = {
    val buy_file = "src/main/resources/sample_buy_info.csv"
    val knn_file = "src/main/resources/sample_knn_top50.csv"

    val read = spark.read.option("header", true)
      .option("inferschema", true)

    val buy_df: DataFrame = read.csv(buy_file)
    val knn_df = read.csv(knn_file)
    wh1(buy_df, knn_df)
  }
}
