import java.util.Properties

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}
import org.junit.{Assert, Test}

/**
  * @author dinghao 
  * @create 2021-09-22 13:17 
  * @message
  */
class SparkTest extends Assert {
  System.setProperty("HADOOP_USER_NAME", "work")
  val conf: SparkConf = new SparkConf().setMaster("local[1]").setAppName("sparkTest").set("dfs.client.use.datanode.hostname", "true")
  val sc: SparkContext = new SparkContext(conf)
  val sparksql: SparkSession = SparkSession.builder().enableHiveSupport.config(conf).getOrCreate()

  //TODO 转换算子，单值转换
  //TODO 1.flatMap 扁平映射,先flat，后map
  @Test
  def flat_map_test: Unit = {
    val rdd1: RDD[List[Int]] = sc.makeRDD(List(List(1, 2), List(3, 4)))
    rdd1.flatMap(x => x).map((_, 1)) foreach (println)
  }

  //TODO 2.glom 将同一个分区的数据直接转换为相同类型的内存数组进行处理，分区不变
  @Test
  def glom_test: Unit = {
    println("--------------glom-------------")
    val rdd3: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4, 5), 2)
    rdd3.foreach(println)
    rdd3.glom().foreach(x => println(x.mkString(",")))

    println("--------------glom小功能-------------")
    //计算所有分区最大值求和（分区内取最大值，分区间最大值求和）
    println(rdd3.glom.map(_.max).sum().toInt)
    rdd3.glom.map(_.max).collect.sum
  }

  //TODO 3.groupby 按照指定规则分组
  @Test
  def groupby_test: Unit = {
    println("--------------groupby-------------")
    val rdd4: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4, 5, 6, 7, 8))
    rdd4.groupBy(_ % 2).foreach(println)

    println("--------------groupby小功能1-------------")
    //将List("Hello", "hive", "hbase", "Hadoop")根据单词首写字母进行分组
    val rdd41: RDD[String] = sc.makeRDD(List("Hello", "hive", "hbase", "Hadoop"))
    rdd41.groupBy(_.substring(0, 1)).foreach(println)
  }

  //TODO 4.filter 过滤，结果为true的保留
  @Test
  def filter_test: Unit = {
    val rdd4: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4, 5, 6, 7, 8))
    println("--------------fileter-------------")
    rdd4.filter(_ % 2 == 0).foreach(println)
  }

  //TODO 5.sample 根据指定的规则从数据集中抽取数据，第一个参数为true放回，false不放回
  @Test
  def sample_test: Unit = {
    val rdd4: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4, 5, 6, 7, 8))
    //不放回抽取
    println("-------------sample不放回----------------")
    rdd4.sample(false, 0.5).foreach(println)

    //放回抽取
    println("--------------sample放回---------------------")
    rdd4.sample(true, 2).foreach(println)
  }

  //TODO 6.distinct 去重
  @Test
  def distinct_test: Unit = {
    val rdd6: RDD[Int] = sc.makeRDD(List(1, 2, 3, 5, 6, 3, 6, 8, 7, 9, 4, 4, 6, 7, 8), 2)
    rdd6.distinct().glom.foreach(x => println(x.mkString(",")))
    println("传参") //如果不传原来几个分区之后就几个分区
    rdd6.distinct(3).glom.foreach(x => println(x.mkString(",")))
  }

  //TODO 7.coalesce 缩减分区
  @Test
  def coalesce_test: Unit = {
    val rdd6: RDD[Int] = sc.makeRDD(List(1, 2, 3, 5, 6, 3, 6, 8, 7, 9, 4, 4, 6, 7, 8), 8)
    rdd6.coalesce(4).glom.foreach(x => println(x.mkString(",")))
  }

  //TODO 8.repartition 重新分区,其实就是经过shuffle的coalesce
  @Test
  def repartition_test: Unit = {
    val rdd6: RDD[Int] = sc.makeRDD(List(1, 2, 3, 5, 6, 3, 6, 8, 7, 9, 4, 4, 6, 7, 8), 2)
    rdd6.repartition(10).glom().foreach(x => println(x.mkString(",")))
  }

  //TODO 9.sortBy 排序
  @Test
  def sortBy_test: Unit = {
    val rdd9: RDD[Int] = sc.makeRDD(List(1, 2, 3, 5, 6, 3, 6, 8, 7, 9, 4, 4, 6, 7, 8), 2)
    rdd9.sortBy(x => x, numPartitions = 1).foreach(println)
    rdd9.sortBy(x => x).glom().foreach(x => println(x.mkString(",")))
    println("---------------collect------------------")
    println(rdd9.sortBy(x => x).collect.mkString(","))
    println("----------------倒序---------------------")
    println(rdd9.sortBy(x => x, false).collect.mkString(","))
  }

  //TODO key-Value类型
  //TODO 1.partitionBy 根据指定分区器进行分区
  @Test
  def partitionBy_test: Unit = {
    val rdd1: RDD[(Int, String)] = sc.makeRDD(List((1, "a"), (2, "b"), (2, "cgt"), (1, "5"), (1, "mn"), (2, "r")), 3)
    val rdd2: RDD[(Int, String)] = rdd1.partitionBy(new HashPartitioner(2))
    rdd1.glom().foreach(x => println(x.mkString(",")))
    rdd2.glom().foreach(x => println(x.mkString(",")))
  }

  //TODO 2.reduceByKey 将数据按照相同的Key对Value进行聚合
  @Test
  def reduceByKey_test: Unit = {
    val rdd1: RDD[(Int, String)] = sc.makeRDD(List((1, "a"), (2, "b"), (2, "cgt"), (1, "5"), (1, "mn"), (2, "r")), 3)
    rdd1.reduceByKey(_ + _).glom.foreach(x => println(x.mkString(",")))
    rdd1.reduceByKey(_ + _, 1).glom.foreach(x => println(x.mkString(",")))
  }

  //TODO 3.groupByKey 将数据源的数据根据key对value进行分组
  @Test
  def groupByKey_test: Unit = {
    val rdd3: RDD[String] = sc.makeRDD(List("Hello", "hive", "hbase", "Hadoop"))
    rdd3.map(x => (x.charAt(0), x)).groupByKey.foreach(println)
    val rdd1: RDD[(Int, String)] = sc.makeRDD(List((1, "a"), (2, "b"), (2, "cgt"), (1, "5"), (1, "mn"), (2, "r")), 3)
    rdd1.groupByKey(1).glom().foreach(x => println(x.mkString(",")))
    rdd1.groupByKey(1).map(
      t => {
        (t._1, t._2.size)
      }
    ).glom().foreach(x => println(x.mkString(",")))
  }

  //TODO 4.aggregateByKey 将数据根据不同的规则进行分区内计算和分区间计算
  @Test
  def aggregateByKey_test: Unit = {
    val rdd4: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("b", 2), ("a", 4), ("a", 5), ("b", 3), ("b", 5)))
    rdd4.aggregateByKey(0)(
      (zero, v) => math.max(zero, v),
      (x, y) => x + y
    ).glom().foreach(x => println(x.mkString(",")))
  }

  //TODO 5.foldByKey 当分区内计算规则和分区间计算规则相同时，aggregateByKey就可以简化为foldByKey
  @Test
  def foldByKey_test: Unit = {
    val rdd4: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("b", 2), ("a", 4), ("a", 5), ("b", 3), ("b", 5)))
    rdd4.foldByKey(0)(_ + _).foreach(println)
  }

  //TODO 6.combineByKey 也是根据key聚合RDD数据
  @Test
  def combineByKey_test: Unit = {
    val rdd5: RDD[(Int, String)] = sc.makeRDD(List((1, "abc"), (2, "bdf"), (2, "cgt"), (1, "56"), (1, "mn"), (2, "r")), 2)

    val rdd51: RDD[(String, Int)] = sc.makeRDD(List(("a", 88), ("b", 95), ("a", 91), ("b", 93), ("a", 95), ("b", 98)))
    //求每个key的平均值
    val rdd52: RDD[(String, (Int, Int))] = rdd51.combineByKey(
      zero => (0, 0),
      (zero: (Int, Int), v) => (zero._1 + v, zero._2 + 1),
      (v1: (Int, Int), v2: (Int, Int)) => (v1._1 + v2._1, v1._2 + v2._2)
    )
    val rdd53: RDD[(String, Int)] = rdd52.map {
      case (word, (sum, cnt)) => (word, sum / cnt)
    }
    rdd53.foreach(println)
  }

  //TODO 7.sortByKey 在一个(K,V)的RDD上调用，K必须实现Ordered接口(特质)，返回一个按照key进行排序的
  @Test
  def sortByKey_test: Unit = {
    val rdd5: RDD[(Int, String)] = sc.makeRDD(List((1, "abc"), (2, "bdf"), (2, "cgt"), (1, "56"), (1, "mn"), (2, "r")), 2)
    rdd5.sortByKey().collect.foreach(println)
  }

  //TODO 8.join 在类型为(K,V)和(K,W)的RDD上调用，返回一个相同key对应的所有元素连接在一起的(K,(V,W))的RDD
  @Test
  def join_test: Unit = {
    val rdd5: RDD[(Int, String)] = sc.makeRDD(List((1, "abc"), (2, "bdf"), (2, "cgt"), (1, "56"), (1, "mn"), (2, "r")), 2)
    val rdd6: RDD[(Int, String)] = sc.makeRDD(List((1, "111"), (2, "222")), 2)
    rdd5.join(rdd6).collect.foreach(println)
  }

  //TODO 9.leftOuterJoin 左外连接
  @Test
  def leftOuterJoin_test: Unit = {
    val rdd5: RDD[(Int, String)] = sc.makeRDD(List((1, "abc"), (2, "bdf"), (2, "cgt"), (1, "56"), (1, "mn"), (2, "r")), 2)
    val rdd6: RDD[(Int, String)] = sc.makeRDD(List((1, "111"), (2, "222")), 2)
    rdd5.leftOuterJoin(rdd6).collect.foreach(println)
  }

  //TODO 10.cogroup 在类型为(K,V)和(K,W)的RDD上调用，返回一个(K,(Iterable<V>,Iterable<W>))类型的RDD
  @Test
  def cogroup_test: Unit = {
    val rdd5: RDD[(Int, String)] = sc.makeRDD(List((1, "abc"), (2, "bdf"), (2, "cgt"), (1, "56"), (1, "mn"), (2, "r")), 2)
    val rdd6: RDD[(Int, String)] = sc.makeRDD(List((1, "111"), (2, "222")), 2)
    rdd5.cogroup(rdd6).collect.foreach(println)
  }

  //TODO 双Value类型
  //TODO 交集和差集有shuffle，并集没有
  @Test
  def doubleValue_test: Unit = {
    val rdd1: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4, 5))
    val rdd2: RDD[Int] = sc.makeRDD(List(3, 4, 6, 7, 8), 4)
    val rdd3: RDD[String] = sc.makeRDD(List("a", "b", "c", "d", "1"))
    rdd1.glom.foreach(x => println(x.mkString(",")))
    rdd2.glom.foreach(x => println(x.mkString(",")))

    //TODO 1.intersection 求两个RDD交集，不会存在重复元素，交集按最大区保留
    rdd1.intersection(rdd2).glom.foreach(x => println(x.mkString(",")))

    //TODO 2.union  求两个RDD并集，会存在重复元素，即：只是将两个集合拼在一起，并集是两个区加起来
    rdd1.union(rdd2).glom.foreach(x => println(x.mkString(",")))
    //TODO 3.subtract  求两个RDD差集，差集是谁在前面按保留谁的区
    rdd2.subtract(rdd1).glom.foreach(x => println(x.mkString(",")))

    //TODO 4.zip  将两个RDD相同位置的数据拉到一起,两个RDD的分区数必须一致，每个分区的元素个数必须一致
    //    rdd1.zip(rdd2).foreach(println)
    rdd1.zip(rdd3).glom.foreach(x => println(x.mkString(",")))
  }

  @Test
  def RAFCBYkey_test: Unit = {
    println("=========reducebykey=========")
    val rdd1: RDD[(Int, String)] = sc.makeRDD(List((1, "a"), (2, "b"), (2, "cgt"), (1, "5"), (1, "mn"), (2, "r")), 3)
    rdd1.reduceByKey(_ + _).glom.foreach(x => println(x.mkString(",")))

    println("=========aggregateByKey=========")
    val rdd4: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("b", 2), ("a", 4), ("a", 5), ("b", 3), ("b", 5)))
    rdd4.aggregateByKey(1)(
      (zero, v) => math.max(zero, v),
      (x, y) => x + y
    ).glom().foreach(x => println(x.mkString(",")))


    println("=========foldByKey=========")
    rdd4.foldByKey(0)(_ + _).foreach(println)

    println("=========combineByKey=========")
    val rdd51: RDD[(String, Int)] = sc.makeRDD(List(("a", 88), ("b", 95), ("a", 91), ("b", 93), ("a", 95), ("b", 98)))
    //求每个key的平均值
    val rdd52: RDD[(String, (Int, Int))] = rdd51.combineByKey(
      zero => (0, 0),
      (zero: (Int, Int), v) => (zero._1 + v, zero._2 + 1),
      (v1: (Int, Int), v2: (Int, Int)) => (v1._1 + v2._1, v1._2 + v2._2)
    )
    val rdd53: RDD[(String, Int)] = rdd52.map {
      case (word, (sum, cnt)) => (word, sum / cnt)
    }
    rdd53.foreach(println)
  }

  @Test
  def save_test: Unit = {
    //TODO textFile
    val rdd: RDD[String] = sc.textFile("input/word.txt")

    //TODO sequenceFile
    rdd.map(("dhtest", _)).saveAsSequenceFile("output/output1")
    val rdd2: RDD[(String, String)] = sc.sequenceFile("output/output1", classOf[String], classOf[String])
    rdd2.foreach(println)
  }

  @Test
  def broadcast_test: Unit = {
    val rdd1 = sc.makeRDD(List(1, 2, 3, 4))
    val i = 5
    //TODO 非广播形式
    println("------------------非广播形式----------------")
    rdd1.map(_ + i).collect.foreach(println)

    //TODO 广播形式
    println("------------------广播形式----------------")
    val broadcast = sc.broadcast(i)
    rdd1.map(_ + broadcast.value).collect.foreach(println)
  }

  @Test
  def action_test: Unit = {
    //TODO 1.reduce 聚合元素，先聚合分区内，再聚合分区间
    val rdd1: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4, 5))
    println(rdd1.reduce(_ + _))

    //TODO 2.collect 将rdd的数据转成array返回
    println(rdd1.collect.mkString(","))

    //TODO 3.count 返回RDD中元素的个数
    println(rdd1.count())

    //TODO 3. first 返回RDD中的第一个元素
    println(rdd1.first())

    //TODO 4. take 返回RDD中的前n个元素
    println(rdd1.take(3).mkString(","))

    //TODO 5. takeOrdered 返回该RDD排序后的前n个元素组成的数组
    println(rdd1.takeOrdered(2)(Ordering.Int.reverse).mkString(","))

    //TODO 6.aggregate 分区的数据通过初始值和分区内的数据进行聚合，然后再和初始值进行分区间的数据聚合
    println("-------------aggregate--------------")
    println(rdd1.aggregate(0)(_ + _, _ * _))

    //TODO 7.fold 折叠操作，aggregate的简化版操作
    println(rdd1.fold(0)(_ + _))

    //TODO 8.	countByKey 统计每种key的个数
    val rdd2: RDD[(Int, String)] = sc.makeRDD(List((1, "a"), (2, "b"), (2, "cgt"), (1, "5"), (1, "mn"), (2, "r")))
    println(rdd2.countByKey())

    //TODO 9.	save 将数据保存到不同格式的文件中
    rdd1.saveAsTextFile("output/outputtext")
    rdd1.saveAsObjectFile("output/outputobject")
    rdd2.saveAsSequenceFile("output/outputseq")

    //TODO 10.foreach 遍历RDD中的每一个元素
    rdd1.foreach(println)
  }

  @Test
  def sparksql_test: Unit = {
    import sparksql.implicits._
    //TODO spark读取mysql数据 方式一
    val df1: DataFrame = sparksql.read.format("jdbc")
      .option("url", "jdbc:mysql://192.168.1.193:6603/xmanager")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "xmanager")
      .option("password", "Xmanager_666.cn")
      .option("dbtable", "xm_node")
      .load()
    df1.createTempView("t1")
    //TODO DSL风格
    df1.select("id").show
    //TODO SQL风格
    sparksql.sql("select * from t1").show()

    //TODO 方式二
    val props = new Properties()
    props.setProperty("user", "xmanager")
    props.setProperty("password", "Xmanager_666.cn")
    sparksql.read.jdbc("jdbc:mysql://192.168.1.193:6603/xmanager", "xm_node", props).show()
  }

  @Test
  def spark_read_HBase: Unit = {
    val sc = new SparkContext(new SparkConf().setAppName("HBaseSource").setMaster("local[*]"))

    val conf = HBaseConfiguration.create()
    // 设置HBase的配置
    conf.set("hbase.zookeeper.property.clientPort", "2181")
    conf.set("hbase.zookeeper.quorum", "192.168.1.151,192.168.1.153,192.168.1.154")
    conf.set("hbase.rootdir", "hdfs://192.168.1.151:9000/home/hbase")
    conf.set("hbase.master", "192.168.1.151:60010")
    // 设置查询的表明
    conf.set(TableInputFormat.INPUT_TABLE, "hbasetablename")
    val RDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[Result])
    RDD.cache()
    // 遍历输出
    RDD.foreach({ case (_, result) =>
      val key = Bytes.toString(result.getRow)
      val message = Bytes.toString(result.getValue(
        "hbaseFamilyName".getBytes,
        "hbaseColumnName".getBytes))

      printf("ID: %s, message: %s \n", key, message)
    })
  }
}
