/**
 * @author Xue ShuWen
 * @date 2022/11/19 20:22
 */


package scala
import java.sql.{Connection, DriverManager, PreparedStatement}
import java.text.SimpleDateFormat
import java.util

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
object connMysql {

  val driver = "com.mysql.cj.jdbc.Driver"
  val url = "jdbc:mysql://localhost:3306/stuInfo"
  val username = "root"
  val password = "root"
  val group = "niit01"

  def main(args: Array[String]): Unit = {

    System.setProperty("hadoop.home.dir", "C:\\soft\\winutils-master\\hadoop-2.7.3")
    System.setProperty("HADOOP_USER_NAME", "root")

    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkKafka")
    val ssc = new StreamingContext(sparkConf, Seconds(2))
    ssc.sparkContext.setLogLevel("error")

    // 3. kafka 配置项 导入java.until.Map
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    val props = new util.HashMap[String, Object]()
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "niit01:9092")
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

    // 4.设置Topic
    var topics = Array("stuInfo")
    val recordDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent, //位置策略,源码强烈推荐使用该策略,会让Spark的Executor和Kafka的Broker均匀对应
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)) //消费策略,源码强烈推荐使用该策略
    val resultDStream: DStream[Array[String]] = recordDStream.map(_.value()).map(_.split("\t")).cache()


    resultDStream.foreachRDD(rdd => {
      rdd.filter(_.length == 7).collect().foreach(student => {
        Class.forName(driver)
        val connection: Connection = DriverManager.getConnection(url, username, password)
        var sql = "insert into studentInfo values (?,?,?,?,?,?,?,?)"
        val statement: PreparedStatement = connection.prepareStatement(sql)
        statement.setInt(1, student(0).toInt);
        statement.setString(2, student(1).toString);
        statement.setInt(3, student(2).toInt);
        statement.setString(4, student(3).toString);
        statement.setInt(5, student(4).toInt);
        statement.setInt(6, student(5).toInt);
        statement.setString(7, student(6).toString);
        statement.setDouble(8, student(7).toDouble);
        statement.executeUpdate()
        statement.close()
        connection.close()
        //        val ob1 = rdd.flatMap(x => x.map((_,1))).filter(_._1.contains("1"))
        //        ob1.reduceByKey(_+_).collect().foreach(print)
        //        println(student(0))
        //        println(student(1))
        //        println(student(2))
        //        println(student(3))
        //        println(student(4))
        //        println(student(5))
        println(student(6) + "--------------------")
      })

    })

    resultDStream.foreachRDD(rdd=>{
      rdd.filter(_.length==7).filter(_(2)=="1").collect().foreach(student=>{
        Class.forName(driver)
        val connection: Connection = DriverManager.getConnection(url, username, password)
        var sql = "insert into sex values (?,?,?,?,?,?,?)"
        val statement: PreparedStatement = connection.prepareStatement(sql)
        statement.setInt(1, student(0).toInt);
        statement.setString(2, student(1).toString);
        statement.setInt(3, student(2).toInt);
        statement.setString(4, student(3).toString);
        statement.setInt(5, student(4).toInt);
        statement.setInt(6, student(5).toInt);
        statement.setString(7, student(6).toString);
        statement.executeUpdate()
        statement.close()
        connection.close()
//        val ob1 = rdd.flatMap(x => x.map((_,1))).filter(_._1.contains("1"))
//        ob1.reduceByKey(_+_).collect().foreach(print)
//        println(student(0))
//        println(student(1))
//        println(student(2))
//        println(student(3))
//        println(student(4))
//        println(student(5))
        println(student(6)+"--------------------")
      })

      })

//    resultDStream.filter(_(3) == "1").foreachRDD {
//      rdd: RDD[Array[String]] => {
//        rdd.foreachPartition {
//          iter: Iterator[Array[String]] => {
//            Class.forName(driver)
//            val connection: Connection = DriverManager.getConnection(url, username, password)
//            var sql = "insert into sex values (?,?,?,?,?,?,?)"
//            iter.foreach {
//              line: Array[String] => {
//                line.foreach(print)

//                val statement: PreparedStatement = connection.prepareStatement(sql)
//                statement.setString(1, line(0));
//                statement.setString(2, line(1));
//                statement.setString(3, line(2));
//                statement.setString(4, line(3));
//                statement.setString(5, line(4));
//                statement.setString(6, line(5));
//                statement.setString(7, line(6));
//                statement.executeUpdate()
//                statement.close()
//              }
//            }
//            connection.close()
//          }
//        }
//      }
//    }

//    // 2.查询出评论赞的个数在10个以上的数据，并写入到mysql数据库中的like_status表中
//    resultDStream.filter(_ (5).toInt > 10).foreachRDD {
//      rdd: RDD[Array[String]] => {
//        rdd.foreachPartition {
//          iter: Iterator[Array[String]] => {
//            Class.forName(driver)
//            val connection: Connection = DriverManager.getConnection(url, username, password)
//            var sql = "insert into like_status values (?,?,?,?,?,?,?,?,?,?,?)"
//            iter.foreach {
//              line: Array[String] => {
//                val statement: PreparedStatement = connection.prepareStatement(sql)
//                statement.setInt(1, line(0).toInt);
//                statement.setInt(2, line(1).toInt);
//                statement.setString(3, line(2));
//                statement.setString(4, line(3));
//                statement.setString(5, line(4));
//                statement.setString(6, line(5));
//                statement.setString(7, line(6));
//                statement.setString(8, line(7));
//                statement.setString(9, line(8));
//                statement.setInt(10, line(9).toInt);
//                statement.setString(11, line(10));
//                statement.executeUpdate()
//                statement.close()
//              }
//            }
//            connection.close()
//          }
//        }
//      }
//    }
//
//    val dateFormat1 = new SimpleDateFormat("yyyy/MM/dd HH:mm")
//    val dateFormat2 = new SimpleDateFormat("yyyy/MM/dd")
//
////    使用 spark sql 统计 各个班级的男女的人数
//// 3.分别计算出2018/10/20 ，2018/10/21，2018/10/22，2018/10/23这四天每一天的评论数是多少，并写入到mysql数据库中的count_conmment表中
//    val value: DStream[Array[String]] = resultDStream.filter {
//      date:Array[String] => {
//        val str: String = dateFormat2.format(dateFormat1.parse(date(2)))
//        if ("2018/10/20".equals(str) || "2018/10/21".equals(str) || "2018/10/22".equals(str) || "2018/10/23".equals(str)) {
//          true
//        } else {
//          false
//        }
//      }
//    }
//    value.foreachRDD {
//      rdd: RDD[Array[String]] => {
//        rdd.groupBy(x => dateFormat2.format(dateFormat1.parse(x(2)))).map(x => x._1 -> x._2.size).foreachPartition {
//          iter: Iterator[(String, Int)] => {
//            Class.forName(driver)
//            val connection: Connection = DriverManager.getConnection(url, username, password)
//            var sql = "insert into count_conmment values (?,?)"
//            iter.foreach {
//              line: (String, Int) => {
//                val statement: PreparedStatement = connection.prepareStatement(sql)
//                statement.setString(1, line._1);
//                statement.setInt(2, line._2.toInt);
//                statement.executeUpdate()
//                statement.close()
//              }
//            }
//            connection.close()
//          }
//        }
//      }
//      }
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}



