package title5

import java.util.Properties

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import title4.UserPortrait.searchBrands
import org.apache.spark. SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Time
import org.apache.spark.streaming.kafka.KafkaUtils

/**
  * 问题5.将数据存储到kafka中，用SparkSteaming对数据进行实时处理
  */
object KafkaStreaming {
  def main(args: Array[String]): Unit = {

    /**
      * 获取标签规则数据
      */
    val session = SparkSession.builder().master("local[*]").appName(this.getClass.getName).getOrCreate()

    import session.implicits._
    //1.读取标签规则数据
    val url = "jdbc:mysql://localhost:3306/scott?characterEncoding=utf-8"
    val tname = "rule"
    val p = new Properties()
    p.setProperty("user", "root")
    p.setProperty("password", "123456")
    p.setProperty("driver", "com.mysql.jdbc.Driver")
    val jdbc = session.read.jdbc(url, tname, p)

    jdbc.createTempView("temptable")
    //2.利用sql列转行，将标签转换成一行数据
    val tempdata = jdbc.sqlContext.sql("select order,concat_ws(',',collect_set(brand)) brands from temptable group by order")

    //3.将列转行的数据作为规则 收集 并广播出去
    val rule: Array[(String, String)] = tempdata.map({ t =>
      val line = t.toString()
      val strings = line.split(",")
      val order: String = strings(0).substring(1)
      val brands: String = line.substring(line.indexOf(",") + 1, line.length - 1)
      (order, brands)
    }).collect()

    val ruleBC: Broadcast[Array[(String, String)]] = session.sparkContext.broadcast(rule)

    /*******************************************************************************/


    //我想做实时需要创建一个StreamingContext，将SparkContext，是对sc的增强
    //创建StreamingContext要指定一个生产RDD的时间间隔
    val ssc = new StreamingContext(session.sparkContext, Seconds(5))

    val zkQuorum = "hdp-01:2181,hdp-02:2181,hdp-03:2181"
    val groupId = "g1"
    val topic = Map[String, Int]("UserPortrait" -> 1)
    //从kafka中拉取数据

    val data: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(ssc, zkQuorum, groupId, topic)
    data.foreachRDD((rdd: RDD[(String, String)], time: Time) => {
      val userAndOrder: DataFrame = rdd.map(_._2).map({ t =>
        val line = t.split(" ")
        val user = line(0)
        val order = line(3)
        (user, order)
      }).toDF("user", "order")


      userAndOrder.registerTempTable("table1")
      //.查出每个用户的商品 （行转列）
      val userAndOrders: DataFrame = userAndOrder.sqlContext.sql("select user,concat_ws(',',collect_set(order)) order from table1 group by user")

      //.根据用户商品匹配标签库
      val result = userAndOrders.map({ t =>
        val rule: Array[(String, String)] = ruleBC.value
        val str = t.toString()
        //用户标识
        val user = str.split(",")(0).substring(1)
        //用户购买的商品
        val orders: Array[String] = str.substring(str.indexOf(",") + 1, str.length - 1).split(",")
        //匹配标签
        val brands: String = searchBrands(orders, rule)
        (user, brands)
      }).toDF("id", "brands")
      result.show()

      //      val context: SparkContext = rdd.sparkContext
      //      val sqlContext = SQLContextSingleton.getInstance(context)


    })

    //启动
    ssc.start()

    ssc.awaitTermination()
  }

  /**
    * 获取单例sqlContext
    */
  object SQLContextSingleton {
    @transient private var instance: SQLContext = _

    def getInstance(sparkContext: SparkContext): SQLContext = {
      if (instance == null) {
        instance = new SQLContext(sparkContext)
      }
      instance
    }
  }

}
