package org.zjt.spark.book

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
  * 利用皮尔逊算法得到同现矩阵的关联系数
  *
  * Created by Administrator on 2017/6/22.
  */
object RelationProduct extends App {
  var sparkConf = new SparkConf().setMaster("local[2]").setAppName("RecommendProduct")
  val sc = new SparkContext(sparkConf)

  //1、得到访问该商品的人数
  val rdd = sc.textFile("D:\\Idea workspace\\scala-demo\\src\\main\\resource\\(sample)sam_tianchi_2014002_rec_tmall_log.csv")
    .distinct().coalesce(2).persist().map {
    line => {
      //i161,u2625,click,2014/9/18 15:03
      val array: Array[String] = line.split(",")
      val score = if (array(2).equals("click")) 1 else {
        if (array(2) equals ("cart")) 2 else 3
      }
      (array(0), array(1) + ":" + score)
    }
  }.groupByKey().flatMap {
    a => {
      var array = ArrayBuffer[(String, String)]()
      val totalP = a._2.size
      for (value <- a._2) { //USER   PID SCORE TOTAL
        array += new Tuple2[String, String](value.split(":")(0), a._1 + ":" + value.split(":")(1) + ":" + totalP)
      }
      array
    }
  }

  var rdd2 = rdd.join(rdd).filter(a => !a._2._1.equals(a._2._2)).map {
    a => if (a._2._1 > a._2._2) (a._1, (a._2._2, a._2._1)) else a
  }.distinct().map(a => (a._2._1.split(":")(0) + ":" + a._2._2.split(":")(0), a._2))
    .groupByKey() //将所有人的关联向量相加


  // TODO: 此处利用皮尔逊算法得到关联值。


  println(rdd2.collect().mkString("\n"))
  Thread.sleep(300000)
  sc.stop
}
