import java.sql.{Connection, DriverManager, ResultSet}

import cn.tecnova.bean.NlpJsonBean
import cn.tecnova.utils.ConfigHandler
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * description:
  * Rabcheng
  * Date:2019/3/28 14:56
  **/
object TestMapMysqlV1 {


  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", "500")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")

    val ssc = new StreamingContext(conf,Seconds(2))

    val data: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array(ConfigHandler.topic), ConfigHandler.kafkaParams(""))
    )

    //将json数据转换成class
    val jsonDS: DStream[NlpJsonBean] = data.map(re => {
      val gson = new Gson()
      gson.fromJson(re.value(), classOf[NlpJsonBean])
    })

    jsonDS.foreachRDD(rdd => {
      val session = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
      val dataFrame: DataFrame = session.createDataFrame(rdd)

      import session.implicits._

      val res: Dataset[String] = dataFrame.mapPartitions(iter => {

//        try {

        val con: Connection = DriverManager.getConnection(ConfigHandler.url, ConfigHandler.user, ConfigHandler.passwd)
        val mediaStat = con.prepareStatement(
          """
          select
          id,name
          from media_type
        """.stripMargin)

        val mediaTypeSet: ResultSet = mediaStat.executeQuery()

        val strings: Iterator[String] = iter.map(row => {

          val str = row.getAs[String]("uuid")

          var mediaList = List[(String, String)]()
          var value = ""
          while (mediaTypeSet.next()) {

            val mediaTypeId = mediaTypeSet.getString("id")
            val mediaTypeName = mediaTypeSet.getString("name")

            if (mediaTypeId.toInt >= 2) {

              value = value + "--" + (str + "," + mediaTypeName)
            }
          }

          value
        })
        strings

//      }catch {
//          case e:Exception => null
//        }finally {}
      })


//      val res: Dataset[String] = dataFrame.map(row => {
//        value
//      })


      val resss: Dataset[String] = res.flatMap(_.split("--"))
      /*
      resss.map(str=>{
        val splited: Array[String] = str.split(",")

      })*/

      resss.foreachPartition(iter=>{
        iter.foreach(s=>{
          println(s)
        })
      })

    })

    ssc.start()
    ssc.awaitTermination()
  }

}
