package org.example


import com.mysql.jdbc.Connection
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import java.sql.{DriverManager, PreparedStatement}
object SparkStreaming_1 {
  def main(args: Array[String]): Unit = {
    val sparkConf = new
        SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    val ssc = new StreamingContext(sparkConf, Seconds(5))
    val dStream: ReceiverInputDStream[String] =
    ssc.socketTextStream("172.16.104.29",9999)
    val lines: DStream[String] = dStream.flatMap(_.split(" "))
    val wordDStream: DStream[(String,Int)] = lines.map((_,1))
    val result = wordDStream.reduceByKey(_+_)
    result.print()
    ssc.start()
    ssc.awaitTermination()


    object SparkStreaming_1 {
      def main(args: Array[String]): Unit = {
        val sparkConf = new
            SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
         // 5秒统计一次
        val ssc = new StreamingContext(sparkConf, Seconds(5))
        ssc.checkpoint("checkpoint_dir")

         //数据处理
        val dStream: ReceiverInputDStream[String] =
        ssc.socketTextStream("172.16.104.100", 8888)
        //  词频统计

        val lines: DStream[String] = dStream.flatMap(_.split(" "))
        //  val wordDStream: DStream[(String,Int)] = lines.map((_,1))
        //  val result = wordDStream.reduceByKey(_+_)
        //定义函数用于累计每个单词出现的次数
        // 统计每个单词的出现次数（当前批次）
        val wordCounts = lines.map(word => (word, 1))
        // 定义累加函数

        val updateFunc = (values: Seq[Int], state: Option[Int]) => {
          val currentCount = values.sum
          val previousCount = state.getOrElse(0)
          Some(currentCount + previousCount)
        }
        // 使用updateStateByKey进行累加统计
        val runningCounts = wordCounts.updateStateByKey(updateFunc)
        // 打印结果
        runningCounts.print()
        ssc.start()
        ssc.awaitTermination()
      }
    }

    object FilteredWorkCount{
      def main(args: Array[String]): Unit = {
        val conf=new
        SparkConf().setAppName("FilteredWordCount").setMaster("local[2]")
        val ssc =new StreamingContext(conf,Seconds(5))
        val lines=ssc.socketTextStream("localhost",9999)
        val stopwords=Set("a","an","the","this","that")
        val words=lines.flatMap(_.split("\\s+"))
          .filter(_.matches("[a-zA-Z]+"))
          .map(_.toLowerCase()
          .filter(!stopwords.contains()
          val wordCounts=words.map(word=>(word,1)).reduceByKey(_+_)
        wordCounts.print()
        ssc.start()
        ssc.awaitTermination()
      }
}
    object SocketToMySQL{
      def main(args: Array[String]): Unit = {
        val conf =new SparkConf()
          .setAppName("SocketToMySQL")
          .setMaster("local[2]")
        val ssc =new StreamingContext(conf,Seconds(5))
        val lines=ssc.socketTextStream("localhost",9999)
        val wordCounts=lines.flatMap(_.split(" "))
          .filter(_.nonEmpty)
          .map(word=>(word,1))
          .reduceByKey(_+_)
        wordCounts.foreachRDD{rdd=>
          rdd.foreachPartition{partitionOfRecords=>}
          var connection:Connection=null
          var preparedStatement:PreparedStatement=null
          try {
            val url = "jdbc:mysql://localhost:3306/spark_db"
            val username = "root"
            val password = "yourpassword"
            Class.forName("com.mysql.jdbc.Driver")
            connection = DriverManager.getConnection(url.username.password)
            val sql =
              """
              INSERT INFO wordCounts(word,count)
            VALUES(?,?)
            ON DUPLICATE KEY UPDATE count=count+VALUES(count)
            """
            preparedStatement = connection.prepareStatement(sql)

            partitionoOfRecords.foreach { case (word, count) =>
              preparedStatement.setString(1, word)
            }
            preparedStatement.setInt(2, count)
            preparedStatement.executeUpdate()
          }
          }catch{
          case e: Exception => e.printStackTrace()
        }finally{
          if (prepareedStatement !=null)preparedStatement.close()
          if (connection !=null)connection.close()
        }
      }
    }
    ssc.start()
    ssc.awaitTermination()
   }
}
object WindowsSocKetCSVProcessor{
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir","C:\\path\\to\\hadoop")
    val conf = new SparkConf()
      .setAppName("windowsSocketCSVProcessor")
      .setMaster("local[2]")
    val ssc = new StreamingContext(conf, Seconds(5))
    val lines = ssc.socketTextStream("localhost", 9999)
    val schema = StructType(Array(
      StructField("id", IntegerType, nullable = true),
      StructField("name", StringType, nullable = true),
      StructField("age", IntegerType, nullable = true),
      StructField("city", StringType, nullable = true)
    ))
    val processData = lines.flatMap { line =>
      try {
        val field = line.split(",").map(_.trim)
        if (field.length == schema.length) {
          if (field.length == schema.length) {
            Some(Row(
              field(0).toInt,
              field(1),
              field(2).toInt,
              field(3)
            ))
          } else {
            None
          }
          }
        catch
        {
          case e: Exception =>
            println(s"Error parsing line:$line,${e.getMessage}")
            None
        }
      }
      processedData.foreachRDD { rdd =>
        if (!rdd.isEmpty()) {
          val spark = {
            SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()
            import spark.implicits._
            val df = spark.createDataFrame(rdd, schema)
            val cityCounts = df.grouBy("city").count()
            println("===按城市统计人数===")
            cityCounts.show()
            val ageStates=df.describe("age")
            println("===年龄统计===")
            ageStates.show()
          }
        }
        lines.count().map(cnt => s"Received $cnt records").print()
        ssc.start()
        ssc.awaitTermination()
      }
    }
  }}

