package com.xf.day05

import au.com.bytecode.opencsv.CSVReader
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import java.io.StringReader
import scala.collection.mutable
/**
 * 写法	                           说明
 * rdd.reduce(_ + _)          	  最简写，利用 _ 表示参数，编译器推断类型
 * rdd.reduce((a, b) => a + b)	  完整匿名函数，推荐用于生产代码，可读性强
 * rdd.reduce(add)	              使用外部定义的函数，适合复用
 * rdd.reduce((x, y) => { ... })	多行逻辑时使用
 */
object TestAPI {
  def main(args: Array[String]): Unit = {
   //   test2()
   //   reduceMethod()
   //   testJoin()
   //  testSample()
   //  testLookUp()

    testFold()

    //    val ls = List[Int](1 ,2, 3, 4, 5, 6)
//    val res = ls.fold(10)((x1, x2) => x1 + x2)
//    println(res )  // 31

//    val rdd1 = RddInt.collect().toBuffer
//    println(rdd1)  // ArrayBuffer(1, 2, 3, 4, 5, 6, 2, 5, 1)
//
//    val cnt: Long = RddInt.count()
//    println(cnt)   // 9
//
//    val countByRdd : Map[Int, Long]= RddInt.countByValue()
//    println( countByRdd )  // HashMap(5 -> 2, 1 -> 2, 6 -> 1, 2 -> 2, 3 -> 1, 4 -> 1)
//
//    val takeRdd = RddInt.take(3)
//    val buffer : mutable.Buffer[Int]= takeRdd.toBuffer
//    println( buffer)  // ArrayBuffer(1, 2, 3)
//
//    val topRdd = RddInt.top(3)
//    val buffer2 = topRdd.toBuffer
//    println(buffer2)  // ArrayBuffer(6, 5, 5)


    println("===========================>")
//    val reduceSum : Int = RddInt.reduce((x, y) => x + y)
//    println( reduceSum)  // 29

    // val RddInt = sc.makeRDD(List(1, 2, 3, 4, 5, 6, 2, 5, 1))
//    val foldInt : Int = RddInt.fold(2)((x, y) => x + y)
//    println(foldInt)  // 29


  }

  private def testFold(): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[1]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    val RddInt = sc.parallelize(List(1, 2, 3, 4, 5, 6, 2, 5, 1))

    // 查看实际分区数
    println(s"实际分区数: ${RddInt.getNumPartitions}")

    println("=== 分区数据分布 ===")
    RddInt.mapPartitionsWithIndex { (partitionIndex, iterator) =>
      val partitionData = iterator.toList
      println(s"分区 $partitionIndex: ${partitionData.mkString("[", ", ", "]")}")
      partitionData.iterator
    }.count()

    val foldInt: Int = RddInt.fold(2)((x, y) => x + y)
    println(foldInt) // 33
  }

  private def testLookUp(): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    val LKRdd = sc.makeRDD(Array(("A", 0), ("A", 2), ("B", 1), ("B", 2), ("C", 1)))
    val ResultRdd = LKRdd.lookup("A")
    val buffer: mutable.Buffer[Int] = ResultRdd.toBuffer

    println(buffer) // ArrayBuffer(0, 2)
  }

  private def testSample(): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    val SampleRDD: RDD[Int] = sc.parallelize(1 to 1000)
    /**
     * 1.withReplacement: Boolean (第一个参数：false)
     * 含义：是否进行有放回抽样
     * false：无放回抽样，每个元素最多被选中一次
     * true：有放回抽样，同一个元素可能被多次选中
     *
     * 2.fraction: Double (第二个参数：0.01)
     * 含义：抽样比例
     * 当 withReplacement = false 时：表示每个元素被选中的概率
     * 当 withReplacement = true 时：表示期望每个元素被选中的次数
     * 取值范围：0.0 到 1.0（无放回）或 ≥0（有放回）
     *
     * 3. seed: Long (第三个参数：1)
     * 含义：随机数种子
     * 用于保证抽样结果的可重复性
     * 相同种子会产生相同的抽样结果
     *
     * 种子 1 保证了：
     * 每次运行这段代码都会选中相同的 11 个元素
     * 开发、测试、生产环境中的抽样结果一致
     * 便于调试和结果验证
     */
    val sampleCnt1: RDD[Int] = SampleRDD.sample(false, 0.01, 1)
    sampleCnt1.foreach(println)
    //  val num  = sampleCnt1.count()
    //  println(num) // 11

    println("================================================>")
    val sampleCnt2: RDD[Int] = SampleRDD.sample(false, 0.01, 1)
    sampleCnt2.foreach(println)
  }

  private def testJoin(): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    val pairRDD1 = sc.parallelize(List(("Scala", 2), ("Scala", 3), ("Java", 4), ("Python", 8)))
    val pairRDD2 = sc.parallelize(List(("Scala", 3), ("Java", 5), ("HBsae", 4), ("Java", 10)))
    val pairRDD3 = pairRDD1.join(pairRDD2)
    val elements: Array[(String, (Int, Int))] = pairRDD3.collect()
    println(elements.toBuffer) // ArrayBuffer((Java,(4,5)), (Java,(4,10)), (Scala,(2,3)), (Scala,(3,3)))

    val left_Join: RDD[(String, (Int, Option[Int]))] = pairRDD1.leftOuterJoin(pairRDD2)
    val buffer: mutable.Buffer[(String, (Int, Option[Int]))] = left_Join.collect().toBuffer
    println(buffer) // ArrayBuffer((Java,(4,Some(5))), (Java,(4,Some(10))), (Scala,(2,Some(3))), (Scala,(3,Some(3))), (Python,(8,None)))
  }

  private def test2(): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    // 读取本地文件
    val path = "D:\\tmp\\grade.csv"

    val gradeRDD = sc.textFile(path)

    val result: RDD[Array[String]] = gradeRDD.map(line => {
      val reader = new CSVReader(new StringReader(line));
      reader.readNext()
    })


    // x(0) , x(1), x(2)
    result.collect().foreach(x => println(x(0), x(1), x(2)))
  }

  private def totalCntTest(): Unit = {

    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    // 读取本地文件
    val path = "D:\\tmp\\wd.txt"
    // 读取HDFS文件并创建RDD
    // val path = "hdfs://master:9000/tmp/wd.txt"
    // 创建一个RDD, 名字是 linesRDD
    val linesRDD: RDD[String] = sc.textFile(path)

    val lineCnt = linesRDD.map(line => line.length)
    lineCnt.saveAsTextFile("file:///D:\\tmp\\line_cnt")

    val totalCnt= lineCnt.reduce((a, b) => a + b)

    println(totalCnt)

    // 暂停 10 秒，方便查看 UI
    Thread.sleep(10000000)
  }

  private def RDDTest(): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)
    val seq = List(("I believe in human beings", List("uncertainty", "fear", "huger")),
      ("the human being", List("Scala", "python", "java")),
      ("Hello World", List("Red", "Blue", "Black")))

    conf.set("spark.default.parallelism", "2")

    val rddP = sc.parallelize(seq)
    println(rddP.partitions.size) // 16

    val rddM = sc.makeRDD(seq)
    println(rddM.partitions.size) // 3
  }

  private def reduceMethod(): Unit = {
    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    val arr = Array(1, 2, 3, 4, 5, 6)
    val rdd = sc.parallelize(arr)

    // val sum = rdd.reduce( _ + _)
    val sum = rdd.reduce((a, b) => a + b)

    println(sum)
  }
}
