package com.test

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
import org.apache.spark.sql.{Row, SparkSession}

import java.nio.charset.CodingErrorAction
import scala.io.{Codec, Source}

object GoodtoGood {

  val spark = SparkSession.builder().appName("als-demo").master("local[*]").getOrCreate()

  def mapUserIdAndGoodsRatings(): RDD[(Int, (Int, Double))] = {

    val useridMappedWithGoodsidAndRating: RDD[(Int, (Int, Double))] = {


      // 定义数据结构
      val schema = new StructType()
        .add("userId", IntegerType)
        .add("itemId", IntegerType)
        .add("rating", DoubleType)

      // 读取数据并应用定义的数据结构
      val rating = spark.read
        .schema(schema)
        .options(Map("inferSchema" -> "false", "delimiter" -> ", ", "header" -> "true"))
        .csv("D:\\PycharmProjects\\wordpress\\active_ran_like.txt")

      // 将DataFrame转换为RDD，指定所需的数据类型
      val rddData: RDD[(Int, (Int, Double))] = rating.rdd.map {
        case Row(userId: Int, itemId: Int, rating: Double) =>
          (userId, (itemId, rating))
      }

      rddData
    }

    useridMappedWithGoodsidAndRating
  }

  // 修改mapUserIdAndGoodsRatings方法
  def mapUserIdAndGoodsRatingsPlus(): RDD[(Int, (Int, Double))] = {

    val filePaths: List[String] = List("D:\\PycharmProjects\\wordpress\\active_ran_buy.txt",
                                        "D:\\PycharmProjects\\wordpress\\active_ran_collect.txt",
                                        "D:\\PycharmProjects\\wordpress\\active_ran_like.txt",
                                        "D:\\PycharmProjects\\wordpress\\active_ran_browse.txt")
    // 定义数据结构
    val schema = new StructType()
      .add("userId", IntegerType)
      .add("itemId", IntegerType)
      .add("rating", DoubleType)

    // 读取数据并应用定义的数据结构
    val allRatings = filePaths.map { filePath =>
      spark.read
        .schema(schema)
        .options(Map("inferSchema" -> "false", "delimiter" -> ", ", "header" -> "false"))
        .csv(filePath)
    }

    // 合并所有数据
    val combinedRatings = allRatings.reduce((df1, df2) => df1.union(df2))


    val rating_value: Double = 1.0
    // 将DataFrame转换为RDD，指定所需的数据类型
    val rddData: RDD[((Int, Int), Double)] = combinedRatings.rdd.map {
      case Row(userId: Int, itemId: Int, rating: Double) =>
        ((userId, itemId), rating_value)

    }
    // 对相同的(userid, goodid)的rating值进行相加
    val aggregatedRatings = rddData.reduceByKey(_ + _)
      .map { case ((userId, itemId), ratingSum) => (userId, (itemId, ratingSum)) }

    aggregatedRatings
  }


  def filterDuplicateGoodsData(useridAndPairOfGoods:(Int, ((Int, Double), (Int, Double)))): Boolean ={
    val goodsId1: Int = useridAndPairOfGoods._2._1._1
    val goodsId2: Int = useridAndPairOfGoods._2._2._1
    goodsId1 < goodsId2
  }


  def mapGoodsPairsWithRatings(userIdAndGoodsData: (Int, ((Int, Double), (Int, Double)))):
  ((Int, Int), (Double, Double)) = {
    val goodsid1 = userIdAndGoodsData._2._1._1
    val goodsid2 = userIdAndGoodsData._2._2._1

    val rating1 = userIdAndGoodsData._2._1._2
    val rating2 = userIdAndGoodsData._2._2._2

    ((goodsid1, goodsid2), (rating1, rating2))
  }

  /**
   * 计算商品之间的余弦相似度
   *
   * */
  def computeCosineSimilarity(ratingPairs: Iterable[(Double, Double)]): (Double, Int) = {
    var numOfPairs: Int = 0
    var sumXX: Double = 0.0
    var sumYY: Double = 0.0
    var sumXY: Double = 0.0

    for (ratingPair: (Double, Double) <- ratingPairs) {
      val ratingX: Double = ratingPair._1
      val ratingY: Double = ratingPair._2

      sumXX += ratingX * ratingX
      sumYY += ratingY * ratingY
      sumXY += ratingX * ratingY
      numOfPairs += 1
    }

    val numerator: Double = sumXY
    val denominator: Double = Math.sqrt(sumXX) * Math.sqrt(sumYY)
    val result: Double = numerator / denominator
    (result, numOfPairs)
  }



  def mapGoodsIdAndName(): Map[Int, String] = {

    implicit val codec = Codec("UTF-8")
    codec.onMalformedInput(CodingErrorAction.REPLACE)
    codec.onUnmappableCharacter(CodingErrorAction.REPLACE)


    val path = "D:\\PycharmProjects\\wordpress\\goodid_goodname.txt"

//    // 定义数据结构
//    val schema = new StructType()
//      .add("goodId", IntegerType)
//      .add("goodName", IntegerType)
//
//    // 读取数据并应用定义的数据结构
//    val good = spark.read
//      .schema(schema)
//      .options(Map("inferSchema" -> "false", "delimiter" -> ", ", "header" -> "true"))
//      .csv(path)
//
//    // 将DataFrame转换为RDD，指定所需的数据类型
//    val idAndNameMapped: Map[Int, String] = good.


    val idAndNameMapped: Map[Int, String] = Source.fromFile(path).getLines().map(line => {
      val lineArr = line.split(',')
      (lineArr.head.toInt, lineArr(1)) // 电影id和电影名称之间的映射
    }).toMap[Int, String]

    // 展示前3个
    idAndNameMapped.take(3).foreach(println(_))


    idAndNameMapped
  }

  def  suggestTop10Goods(GoodsAndSimilarityScore:RDD[((Int,Int),(Double,Int))], args: Array[String]){
    println("为你推荐以下商品: ")

    // 余弦相似度的阈值，设置为0.95
    val sorceThreshold: Double = 0.8

    // 至少要有大于等于50个用户对该商品组合进行评分
    val co0ccurenceThreshold: Double = 10

    // 获取传入的商品id
    val goodsid = args.head.toInt

    // 找到于传入参数的电影比较相似的商品，并排除该商品本身
    val goodsPairsFilteredAccordingToThreshold: RDD[((Int,Int),(Double,Int))] = GoodsAndSimilarityScore
      .filter((goodPairAndScore: ((Int,Int),(Double,Int))) => {
        val goodsPair: (Int,Int) = goodPairAndScore._1
        val ratingAndNumOfPairs: (Double, Int) = goodPairAndScore._2
        // 该goodid属于该商品组合，并且余弦相似度>=0.95，超过50个用户对该商品组合进行了评分
        (goodsPair._1 == goodsid || goodsPair._2 == goodsid) && ratingAndNumOfPairs._1 > sorceThreshold && ratingAndNumOfPairs._2 > co0ccurenceThreshold
      })

    // 取出前10个推荐的商品
    // 即找出于该商品与相识度最为接近的商品
    val first10GoodsAndTheirScores: Array[((Int,Int),(Double,Int))] = goodsPairsFilteredAccordingToThreshold.take(20)


    val idAndGoodNmes = mapGoodsIdAndName()
    // 根据Key 获取value值
    println("与"+idAndGoodNmes(goodsid)+" 比较相近的商品为:")
    first10GoodsAndTheirScores.foreach(goodPairAndScore =>{

      val good1: Int = goodPairAndScore._1._1
      val good2: Int = goodPairAndScore._1._2

      var suggestedGood: Int = good2
      if (goodsid == good2){
        suggestedGood = good1
      }

      val predictedValue: Double = goodPairAndScore._2._1
      val count: Int = goodPairAndScore._2._2

      println(suggestedGood,idAndGoodNmes(suggestedGood),predictedValue,count)
    })
    println("-----推荐商品打印完成-----")

  }


  def main(args: Array[String]): Unit = {
    // 这个args是哪里来的


    // 设置日志级别
    Logger.getLogger("org").setLevel(Level.ERROR)

    // 调用mapUseridAndGoodRating
    val userIdMappedWithMovieIdAndRating: RDD[(Int,(Int,Double))] = mapUserIdAndGoodsRatingsPlus()

    /**
     * 打印该RDD数据，便于观察
     * (1001,(10000,3.0))
     * (1002,(10000,3.0))
     * (1003,(10000,3.0))
     */

    userIdMappedWithMovieIdAndRating.take(3).foreach(println(_))
    println("userIdMappedWithMovieIdAndRating")
    // 对于某一个用户，找到该用户对应的商品评分，并进行两两组合
    // 输出的格式为：[(userId, ((good1, rating1), (good2, rating2)))]
    val pairOfGoodsWatchedBySameUser: RDD[(Int, ((Int, Double), (Int, Double)))] = userIdMappedWithMovieIdAndRating.join(userIdMappedWithMovieIdAndRating)

    /**
     * 打印该RDD数据，便于观察数据
     * (4904,((10684,3.0),(10684,3.0)))
     * (1084,((10008,3.0),(10008,3.0)))
     * (4685,((10663,3.0),(10663,3.0)))
     */

    pairOfGoodsWatchedBySameUser.take(3).foreach(println(_))
    println("pairOfGoodsWatchedBySameUser")

    // 对于上面的join操作，会出现重复数据，我们只需要无重复的两两组合
//    val pairOfGoodsWithoutDuplicates: RDD[(Int, ((Int, Double), (Int, Double)))] = pairOfGoodsWatchedBySameUser.filter(filterDuplicateGoodsData)

    val pairOfGoodsWithoutDuplicates: RDD[(Int, ((Int, Double), (Int, Double)))] = pairOfGoodsWatchedBySameUser
      .map { case (userId, ((itemId1, rating1), (itemId2, rating2))) =>
        (userId, (Math.min(itemId1, itemId2), Math.max(itemId1, itemId2), rating1, rating2))
      }
      .distinct()
      .map { case (userId, (itemIdMin, itemIdMax, rating1, rating2)) =>
        (userId, ((itemIdMin, rating1), (itemIdMax, rating2)))
      }

    /**
     * 打印该RDD数据，便于观察数据
     * (1016,((10040,3.0),(10108,3.0)))
     * (1002,((10268,3.0),(10296,3.0)))
     * (1011,((10125,3.0),(10831,3.0)))
     */

    pairOfGoodsWithoutDuplicates.take(3).foreach(println(_))
    println("pairOfGoodsWithoutDuplicates")

    // 对上述的RDD进行map操作，加工成形如(movie1,movie2) => (rating1,rating2)
    val GoodsPairAndRatings: RDD[((Int, Int), (Double, Double))] = pairOfGoodsWithoutDuplicates.map(mapGoodsPairsWithRatings)

    /**
     * 打印该RDD数据，便于观察数据
     * ((10040,10108),(3.0,3.0))
     * ((10268,10296),(3.0,3.0))
     * ((10125,10831),(3.0,3.0))
     */

    GoodsPairAndRatings.take(3).foreach(println(_))
    println("GoodsPairAndRatings")

    // 对上述的RDD按照key进行分组，这样相同的key，
    // 即相同组合的商品对应的评分就构成了商品的评分矩阵
    val groupOfRatingPairsForSameGoodsPair: RDD[((Int, Int), Iterable[(Double, Double)])] = GoodsPairAndRatings.groupByKey()


    /**
     * 打印该RDD数据，便于观察数据
     * ((10750,10759),Seq((3.0,3.0)))
     * ((10068,10228),Seq((3.0,3.0)))
     * ((10310,10851),Seq((3.0,3.0), (3.0,3.0)))
     */

    groupOfRatingPairsForSameGoodsPair.take(3).foreach(println(_))
    println("groupOfRatingPairsForSameGoodsPair")

    //根据上面的评分矩阵，计算两两商品组合的评分矩阵
    // 注：mapValues算子仅作用在value上,不会改变key的值，依然返回(key,value)的形式
    val goodPairsAndSimilarityScore: RDD[((Int, Int), (Double, Int))] = groupOfRatingPairsForSameGoodsPair.mapValues(computeCosineSimilarity)

    /**
     * 打印该RDD数据，便于观察数据
     * ((10750,10759),(1.0,1))
     * ((10068,10228),(1.0,1))
     * ((10310,10851),(1.0000000000000002,2))
     */

    goodPairsAndSimilarityScore.take(3).foreach(println(_))
    println("goodPairsAndSimilarityScore")
    //根据余弦相似度计算相似的电影，打印电影推荐列表
    // args为传入的电影id参数
//    if (args.length > 0)
//
//      suggestTop10Goods(goodPairsAndSimilarityScore, args)
//
//    else
//      println("请输入电影id")

    val argList: Array[String] = Array("10486")
    suggestTop10Goods(goodPairsAndSimilarityScore, argList)


  }

}
