package com.datamining.rec_test

import org.apache.spark.mllib.fpm.FPGrowth
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Administrator on 2017/3/11.
  */
/**
  * spark-test
  * FP_growthTest
  *
  * @author Administrator kevin
  * @create 2017-03-11 16:51
  */
object FP_growthTest {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf();
    sparkConf.setMaster("yarn-cluster");
    sparkConf.setAppName("my_test");

    //    val jdbc_url = "jdbc:mysql://192.168.20.234:3306/[mytest]"
    //    val table_name = "statistics_item_repurchase_rate"
    //    val properties = new Properties()
    //    properties.setProperty("user", "root")
    //    properties.setProperty("password", "root")
    //    properties.setProperty("driver", "com.mysql.jdbc.Driver")

    // 创建sparkSession
    val sparkSession = SparkSession.builder().appName("kevin-BuyBackHiveStatisticToMysqlByDay").config(conf = sparkConf).enableHiveSupport().getOrCreate();

    import sparkSession.implicits._
    import sparkSession.sql


    val query_order_item_sql = "SELECT xysc_order_goods.order_id AS order_id,  xysc_order_goods.goods_id AS goods_id,  im_item.category_id AS category_id,  im_item.item_number AS item_number,  COUNT(*) AS num  FROM   Yamibuy_Master.xysc_order_info   INNER JOIN Yamibuy_Master.xysc_order_goods ON (xysc_order_info.order_id = xysc_order_goods.order_id)   INNER JOIN Yamibuy_IM.im_item ON (xysc_order_goods.goods_id = im_item.goods_id)   WHERE   im_item.status = 'A'  AND xysc_order_info.pay_status > 0  AND xysc_order_goods.is_gift = 0  GROUP BY  xysc_order_goods.order_id,  xysc_order_goods.goods_id,  im_item.category_id,  im_item.item_number   ORDER BY  order_id ASC,  goods_id ASC "

    // spark 跑进hive , sqoop 导出到 mysql

    val query_order_item_df = sql(query_order_item_sql).select("order_id", "goods_id")

    val query_order_item_rdd = query_order_item_df.rdd

    val query_order_item_rdd_group_by_key = query_order_item_rdd.map(x => (x(0), x(1))).groupByKey()


    val fp_growth = new FPGrowth().setMinSupport(0.001).setNumPartitions(16);

    val model = fp_growth.run(query_order_item_rdd_group_by_key.map(x => x._2.toArray))

    // 频繁集 == [频繁集],出现的次数
    val frequent_itemsets_rdd = model.freqItemsets.map(x => (x.items.toList, x.freq)).map(x => (x._1.mkString("[", ",", "]"), x._2))

    //通过置信度筛选出推荐规则则
    //antecedent表示前项
    //consequent表示后项
    //confidence表示规则的置信度
    val minConfidence: Double = 0.1 // 最小置信度 10%
    // 结构化rdd
    val antecedent_consequent_confidence_rdd = model.generateAssociationRules(minConfidence).map(x => Row(x.antecedent.mkString(","), x.consequent.mkString(","), x.confidence * 100))
    val schema = StructType(Array(StructField("item_combin", DataTypes.StringType, true), StructField("recommend_item", DataTypes.StringType, true), StructField("confidence", DataTypes.DoubleType, true)))
    val antecedent_consequent_confidence_df = sparkSession.createDataFrame(antecedent_consequent_confidence_rdd, schema)
    // 结果写入先写入hive (spark对mysql写入支持不友好)
    antecedent_consequent_confidence_df.createTempView("antecedent_consequent_confidence_df")
    sql("CREATE DATABASE IF NOT EXISTS yamibuy_recommend")
    sql("USE yamibuy_recommend")
    // 建表
    sql("CREATE TABLE IF NOT EXISTS yamibuy_recommend.re_recommand_item (   id BIGINT,   item_combin STRING COMMENT '前置项',   recommend_item STRING COMMENT '后置项',   confidence DOUBLE COMMENT '置信度',   update_time TIMESTAMP COMMENT '更新时间(数据生成的时间, 不是写入数据库时间)',   edit_time INT COMMENT '更新时间, 时间戳,(数据生成的时间, 不是写入数据库时间)' ) COMMENT 'FP_GROWTH 算法' STORED AS ORC") // 建表
    // 写入数据到hive表
    //    sql(" SELECT item_combin, recommend_item, confidence FROM antecedent_consequent_confidence_df")// 写入数据到hive

    sql("add jar /opt/cloudera/parcels/CDH/lib/hive/lib/hive-contrib.jar")
    sql("create temporary function row_sequence as 'org.apache.hadoop.hive.contrib.udf.UDFRowSequence'")
    sql("INSERT OVERWRITE TABLE yamibuy_recommend.re_recommand_item SELECT row_sequence(), item_combin, recommend_item, confidence, current_timestamp(), unix_timestamp() FROM antecedent_consequent_confidence_df")

    sparkSession.close()
  }
}
