import java.io.{File, PrintWriter}

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.fpm.{FPGrowth, FPGrowthModel}

object fpg {

  def main(args: Array[String]) {
    //0 构建Spark对象
    val conf = new SparkConf().setAppName("fpg")
    val sc = new SparkContext(conf)
    Logger.getRootLogger.setLevel(Level.WARN)

    //1 读取样本数据
    val data_path = "xac.dat"
    val data = sc.textFile(data_path)
    val examples = data.map(_.split(" ")).cache()

    //2 建立模型
    val minSupport = 0.092.toDouble//最小支持度
    val numPartition = 10.toInt//数据分区
    var minConfidence=0.6.toDouble
    val model = new FPGrowth().
      setMinSupport(minSupport).
      setNumPartitions(numPartition).
      run(examples)

    //3 查看所有的频繁项集，并且列出它出现的次数
    val writer = new PrintWriter(new File("fpout.txt" ))
    writer.println(s"Number of frequent itemsets: ${model.freqItemsets.count()}")
    model.freqItemsets.collect().foreach { itemset =>
      writer.println(itemset.items.mkString("[", " ", "]") + ":" + itemset.freq)
    }

    //通过置信度筛选出推荐规则则
    //antecedent表示前项
    //consequent表示后项
    //confidence表示规则的置信度
    //这里可以把规则写入到Mysql数据库中，以后使用来做推荐
    //如果规则过多就把规则写入redis，这里就可以直接从内存中读取了，我选择的方式是写入Mysql，然后再把推荐清单写入redis
    model.generateAssociationRules(minConfidence).collect().foreach(rule=>{
      println(rule.antecedent.mkString(",")+"-->"+
        rule.consequent.mkString(",")+"-->"+ rule.confidence)
    })
    //查看规则生成的数量
    println(model.generateAssociationRules(minConfidence).collect().length)

  }
}
