package com.alison.source

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.sql.types.{LongType, StringType, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.util.Properties
import scala.util.Random

object E3_source_file_kafka {

  def main(args: Array[String]): Unit = {
    //    file_source
    new Thread(() => send_kafka, "send_kafka").start()
//    source_kafka
    consumer_msg
  }


  def file_source(): Unit = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("Hello")
      .master("local[*]")
      .getOrCreate()

    // 导入隐式转换

    // 定义 Schema, 用于指定列名以及列中的数据类型
    val userSchema = new StructType().add("name", StringType).add("age", LongType)
    // 创建一个流式DataFrame，这里从文件系统中读取数据
    val directory = this.getClass.getResource("/").toString
    println(directory)
    val user: DataFrame = spark.readStream
      .format("csv")
      .schema(userSchema)
      .load(directory + "/datas/") // 必须是目录

    // 启动查询, 把结果打印到控制台
    val query = user.writeStream
      .outputMode("append") // 使用append输出模式
      .format("console")
      .start()

    // 等待应用程序终止
    query.awaitTermination()

    //关闭 Spark
    spark.stop()
  }
  """
    |读取自动分区的文件夹内的文件, 文件夹是 key=value 形式
    |datas
    |--group=1
    |----user1.csv
    |--group=2
    |----user2.csv
    |-----------------
    |读出来的数据显示
    |+----+---+-----+
    ||name|age|group|
    |+----+---+-----+
    ||   A| 11|    2|
    |""".stripMargin

  """
    |source_kafka
    |""".stripMargin

  def send_kafka() = {
    val topic = "source-topic" // Kafka主题
    val brokers = "localhost:9092" // Kafka集群地址

    // 配置Kafka生产者
    val props = new Properties()
    props.put("bootstrap.servers", brokers)
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")

    val producer = new KafkaProducer[String, String](props)
    // 生成随机单词的列表
    val randomWords = List("apple", "banana", "orange", "grape", "kiwi", "pear", "mango")
    // 每隔1秒发送一个包含多个随机单词的随机字符串到Kafka主题
    while (true) {
      val randomWordCount = Random.nextInt(5) + 1 // 生成1到5个随机单词
      val randomString = (1 to randomWordCount)
        .map(_ => randomWords(Random.nextInt(randomWords.length)))
        .mkString(" ")

      val record = new ProducerRecord[String, String](topic, randomString)
      producer.send(record)
      println("数据发送完毕：" + randomString)
      Thread.sleep(1000)
    }

    producer.close()
  }

  def source_kafka() = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("Hello")
      .master("local[*]")
      .getOrCreate()
    // 导入隐式转换
    import spark.implicits._

    // 创建一个流式DataFrame，这里从Kafka中读取数据
    val lines: DataFrame = spark.readStream
      .format("kafka") // 设置 kafka 数据源
      .options(
        Map("key.serializer" -> "org.apache.kafka.common.serialization.StringSerializer",
          "value.serializer" -> "org.apache.kafka.common.serialization.StringSerializer")
      )
      .option("kafka.bootstrap.servers", "localhost:9092")
      .option("subscribe", "source-topic") // 也可以订阅多个主题 "topic1,topic2"
      .load

    // 启动查询, 把结果打印到控制台
    val query = lines.writeStream
      .outputMode("update") // 使用update输出模式
      .format("console")
      .start()
    //    lines.show()//
    //df.show()//注意:该写法是离线的写法,会报错,所以应使用实时的写法:Queries with streaming sources must be executed with writeStream.start(); //df.show()//注意:该写法是离线的写法,会报错,所以应使用实时的写法:Queries with streaming sources must be executed with writeStream.start();
    """
      |lines.printSchema
      |root
      | |-- key: binary (nullable = true)
      | |-- value: binary (nullable = true)
      | |-- topic: string (nullable = true)
      | |-- partition: integer (nullable = true)
      | |-- offset: long (nullable = true)
      | |-- timestamp: timestamp (nullable = true)
      | |-- timestampType: integer (nullable = true)
      |""".stripMargin

    // 报错， Queries with streaming sources must be executed with writeStream.start();
    //    lines.map(_.getString(2)).cache().writeStream.outputMode(OutputMode.Complete()).format("console")

    // 等待应用程序终止
    query.awaitTermination()
    //关闭 Spark
    spark.stop()
  }


  def consumer_msg() = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("Hello")
      .master("local[*]")
      .getOrCreate()

    // 导入隐式转换
    import spark.implicits._

    // 创建一个流式DataFrame，这里从Kafka中读取数据
    val lines: DataFrame = spark.readStream
      .format("kafka") // 设置 kafka 数据源
      .option("kafka.bootstrap.servers", "localhost:9092")
      .option("subscribe", "source-topic") // 也可以订阅多个主题 "topic1,topic2"
      .load

    // Word Count 统计
    val wordCounts = lines
      .select("value").as[String] // 将每条消息的 value 取出（不需要key）
      .flatMap(_.split("\\W+")) // 拆分出单词
      .groupBy("value") // 分组
      .count()

    // 启动查询, 把结果打印到控制台
    val query = wordCounts.writeStream
      .outputMode("complete") // 使用complete输出模式
      .format("console")
      .start()

    // 等待应用程序终止
    query.awaitTermination()
    //关闭 Spark
    spark.stop()
  }

}
