package com.sunzm.spark.core.rdd.transformation

import java.util.Date

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.{DateFormatUtils, DateUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
 * RDD 转换类算子的示例程序
 *
 */
object SparkRDDTransformation {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("RDD转换类算子示例")
      .setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)

    //mapFunctionDemo(sc)
    //filterFunctionDemo(sc)
    //flatMapFunctionDemo(sc)
    //mapPartitionsFunctionDemo(sc)
    //mapPartitionsWithIndexFunctionDemo(sc)
    //unionIntersectionAndDiffFunctionDemo(sc)
    //bykeyFunctionDemo(sc)
    //sortByAndsortByKeyFunctionDemo(sc)
    coalesceAndrepartitionFuncDemo(sc)

    sc.stop()
  }

  /**
   * map算子：  map(func)
   * 每个元素都使用 func 函数进行一次转换，形成一个新的RDD
   * 原来有几个元素，map后还是几个元素
   * map算子是一对一操作，不会改变元素的数量
   *
   * @param sc
   */
  def mapFunctionDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/sql/msgLog.log")

    /*
    需求： 把 msgLog.log 的数据，只保留下面的字段：
     source、datetime、senderName、receiverName、action、
     sessionId、userId、 companyId、receiverType、senderType

     并且把 datetime 字段的格式变成 yyyy-MM-dd HH:mm:ss 格式, 使用一个新的字段 datetimeStr 表示
     */
    val mapRDD = dataRDD.map(jsonStr => {
      val jSONObject: JSONObject = JSON.parseObject(jsonStr)

      val source: Int = jSONObject.getIntValue("source")
      val action: Int = jSONObject.getIntValue("action")
      val receiverType: Int = jSONObject.getIntValue("receiverType")
      val senderType: Int = jSONObject.getIntValue("senderType")

      val datetime = jSONObject.getLongValue("datetime")
      val datetimeStr = DateFormatUtils.format(datetime, "yyyy-MM-dd HH:mm：ss")

      val senderName = jSONObject.getString("senderName")
      val receiverName = jSONObject.getString("receiverName")
      val sessionId = jSONObject.getString("sessionId")
      val userId = jSONObject.getString("userId")
      val companyId = jSONObject.getString("companyId")

      jSONObject.clear()

      jSONObject.put("source", source)
      jSONObject.put("action", action)
      jSONObject.put("receiverType", receiverType)
      jSONObject.put("senderType", senderType)
      jSONObject.put("datetime", datetime)
      jSONObject.put("datetimeStr", datetimeStr)
      jSONObject.put("senderName", senderName)
      jSONObject.put("receiverName", receiverName)
      jSONObject.put("sessionId", sessionId)
      jSONObject.put("userId", userId)
      jSONObject.put("companyId", companyId)

      jSONObject.toJSONString
    })


    mapRDD.foreach(line => {

      println(line)
    })

  }

  /**
   * filter(func)
   *
   * 功能类似SQL语句中的WHERE
   * 执行func函数后，返回值为true的记录会被保留，返回值为false的记录会被丢弃
   *
   * @param sc
   */
  def filterFunctionDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/sql/msgLog.log")

    /*
    需求：
    过滤出数据中action=5的所有数据，只保留action和sessionId字段
     */


    val mapRDD: RDD[JSONObject] = dataRDD.filter(line => {
      val jSONObject: JSONObject = JSON.parseObject(line)

      val action = jSONObject.getIntValue("action")

      //val sessionId = jSONObject.getString("sessionId")
      action == 5

    }).map(line => {
      val jSONObject: JSONObject = JSON.parseObject(line)

      val action = jSONObject.getIntValue("action")

      val sessionId = jSONObject.getString("sessionId")

      jSONObject.clear()

      jSONObject.put("action", action)
      jSONObject.put("sessionId", sessionId)

      jSONObject
    })

    //println(mapRDD.count())

    mapRDD.foreach(json => {
      println(json)
    })

  }

  /**
   * flatMap(func)
   *
   * 输入一个元素，返回一个集合或者数组
   *
   * 和map的区别就在于，map处理一个数据，输出一个数据。flatMap是处理一个数据，输出一组数据
   *
   * @param sc
   */
  def flatMapFunctionDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/rdd/word.txt")

    /*
    需求：
    hello,java,hello,hadoop
    变成
    hello
    java
    hello
    hadoop
     */
    val flatMapRdd: RDD[String] = dataRDD.flatMap(line => {

      /*val wordArray: Array[String] = StringUtils.split(line, ",")
      wordArray*/

      StringUtils.split(line, ",")

      //原样返回
      //Array(line)

    })

    flatMapRdd.foreach(line => {
      println(line)

    })

  }

  /**
   * mapPartitions(func)
   *
   * 和map的区别在于
   * map是一次处理一条数据
   * mapPartitions是一次处理一个分区的数据
   *
   * @param sc
   */
  def mapPartitionsFunctionDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/sql/msgLog.log")

    /*
   需求： 把 msgLog.log 的数据，只保留下面的字段：
    source、datetime、senderName、receiverName、action、
    sessionId、userId、 companyId、receiverType、senderType

    并且把 datetime 字段的格式变成 yyyy-MM-dd HH:mm:ss 格式, 使用一个新的字段 datetimeStr 表示
    */

    val mapPartitionsRDD: RDD[String] = dataRDD.mapPartitions(ite => {
      ite.map(line => {
        val jSONObject: JSONObject = JSON.parseObject(line)

        val source: Int = jSONObject.getIntValue("source")
        val action: Int = jSONObject.getIntValue("action")
        val receiverType: Int = jSONObject.getIntValue("receiverType")
        val senderType: Int = jSONObject.getIntValue("senderType")

        val datetime = jSONObject.getLongValue("datetime")
        val datetimeStr = DateFormatUtils.format(datetime, "yyyy-MM-dd HH:mm：ss")

        val senderName = jSONObject.getString("senderName")
        val receiverName = jSONObject.getString("receiverName")
        val sessionId = jSONObject.getString("sessionId")
        val userId = jSONObject.getString("userId")
        val companyId = jSONObject.getString("companyId")

        jSONObject.clear()

        jSONObject.put("source", source)
        jSONObject.put("action", action)
        jSONObject.put("receiverType", receiverType)
        jSONObject.put("senderType", senderType)
        jSONObject.put("datetime", datetime)
        jSONObject.put("datetimeStr", datetimeStr)
        jSONObject.put("senderName", senderName)
        jSONObject.put("receiverName", receiverName)
        jSONObject.put("sessionId", sessionId)
        jSONObject.put("userId", userId)
        jSONObject.put("companyId", companyId)

        jSONObject.toJSONString

      })
    })

    mapPartitionsRDD.foreach(line => {
      println(line)

    })

  }

  /**
   * mapPartitionsWithIndex(func)
   *
   * mapPartitionsWithIndex 和 mapPartitions的区别在于，多了一个分区号
   *
   * @param sc
   */
  def mapPartitionsWithIndexFunctionDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/sql/msgLog.log")

    val mapPartitionsWithIndex: RDD[(Int, String)] = dataRDD.mapPartitionsWithIndex((index, ite) => {

      ite.map(line => {
        (index, line)
      })

    })

    mapPartitionsWithIndex.foreach(t => {
      val index = t._1
      val line = t._2

      println(s"index: ${index}: ${line}")
    })

    /*mapPartitionsWithIndex.foreach{
      case (index, line) => {
        println(s"index: ${index}: ${line}")
      }
    }*/

    val mapRDD = mapPartitionsWithIndex.map(t => {
      t._1
    })

    //去重 distinct
    val distinctRDD: RDD[Int] = mapRDD.distinct()
    distinctRDD.foreach(line => {
      println(line)
    })

  }

  /**
   * union(otherDataset)
   * 把2个RDD连接在一起, 求并集
   * 比如 rdd1: 1,3,5   rdd2:2,4,6
   * 那么 rdd1.union(rdd2)的结果为
   * 1,2,3,4,5,6
   *
   * intersection(otherDataset)
   * * 求2个rdd的交集 （也就是都有的元素）
   *
   * subtract(other: RDD[T])
   * 求2个RDD的差集，也就是rdd1中有，rdd2中没有的元素
   *
   * distinct
   * 去除RDD中重复的数据
   *
   * @param sc
   */
  def unionIntersectionAndDiffFunctionDemo(sc: SparkContext) = {
    val array1: Array[Int] = Array[Int](1, 3, 3, 5)
    val array2: Array[Int] = Array[Int](2, 3, 4, 6)

    //将scala集合并行化为RDD
    val rdd1: RDD[Int] = sc.parallelize(array1)
    val rdd2: RDD[Int] = sc.parallelize(array2)

    //并集
    val unionRDD: RDD[Int] = rdd1.union(rdd2)
    println("------并集------")
    unionRDD.foreach(line => {
      println(line)
    })

    //交集
    val intersectionRDD: RDD[Int] = rdd1.intersection(rdd2)
    println("------交集------")
    intersectionRDD.foreach(line => {
      println(line)
    })

    //差集
    val subtractRDD: RDD[Int] = rdd1.subtract(rdd2)
    println("------差集------")
    subtractRDD.foreach(line => {
      println(line)
    })

    //去重
    val distinctRDD: RDD[Int] = rdd1.distinct()
    println("------去重------")
    distinctRDD.foreach(line => {
      println(line)
    })
  }


  /**
   * groupByKey
   * 按照key进行分组
   *
   * groupBy(func)
   * 比groupByKey更加灵活，groupByKey只能按照key进行分组，groupBy可以自定义key
   *
   * reduceByKey(func)
   * 按照key对value进行聚合计算
   *
   * aggregateByKey(zeroValue)(seqOp, combOp)
   * zeroValue 初始值
   * seqOp 本地聚合使用的方法
   * combOp 全局聚合使用的方法
   *
   * @param sc
   */
  def bykeyFunctionDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/sql/msgLog.log")

    /*
    需求：
    求每个会话的消息数量（sessionId表示会话ID， action=5表示聊天消息）
     */
    val mapRDD: RDD[(String, Int)] = dataRDD.filter(line => {
      val jSONObject = JSON.parseObject(line)
      val action = jSONObject.getIntValue("action")
      action == 5
    }).map(line => {
      val jSONObject = JSON.parseObject(line)

      val sessionId = jSONObject.getString("sessionId")

      (sessionId, 1)
    })

    //1：使用groupByKey实现
    val groupByKeyRDD: RDD[(String, Iterable[Int])] = mapRDD.groupByKey()

    val groupByKeyResultRDD: RDD[(String, Int)] = groupByKeyRDD.map(t => {
      val sessionId: String = t._1
      val ite: Iterable[Int] = t._2

      //value求和
      val sum = ite.sum

      (sessionId, sum)
    })

    println("-----groupByKey-------")
    groupByKeyResultRDD.foreach {
      case (sessionId, sessinCount) => {
        println(s"${sessionId} -> ${sessinCount}")
      }
    }

    //2：使用groupBy实现
    val groupByResultRDD: RDD[(String, Int)] = mapRDD.groupBy {
      case (sessionId, _) => sessionId
    }.map {
      case (sessionId, tuples) => {
        val sessionCount = tuples.map(t => {
          t._2
        }).sum

        //(sessionId, tuples.size)
        (sessionId, sessionCount)
      }
    }
    println("-----groupBy-------")
    groupByResultRDD.foreach {
      case (sessionId, sessinCount) => {
        println(s"${sessionId} -> ${sessinCount}")
      }
    }

    //3:使用reduceBykey
    /*val reduceByKeyResultRDD: RDD[(String, Int)] = mapRDD.reduceByKey((a, b) => {
      a + b
    })*/

    val reduceByKeyResultRDD: RDD[(String, Int)] = mapRDD.reduceByKey(_ + _)

    println("-----reduceByKey-------")
    reduceByKeyResultRDD.foreach {
      case (sessionId, sessinCount) => {
        println(s"${sessionId} -> ${sessinCount}")
      }
    }

    //4: 使用
    //val aggregateByKeyResultRDD: RDD[(String, Int)] = mapRDD.aggregateByKey(0)(_ + _, _ + _)

    /*val aggregateByKeyResultRDD: RDD[(String, Int)] = mapRDD.aggregateByKey(0)((a: Int, b: Int) => {
      a + b
    }, (x: Int, y: Int) => {
      x + y
    })*/

    val aggregateByKeyResultRDD: RDD[(String, Int)] = mapRDD.aggregateByKey(0)(_ + _, _ + _)

    println("-----aggregateByKey-------")
    aggregateByKeyResultRDD.foreach {
      case (sessionId, sessinCount) => {
        println(s"${sessionId} -> ${sessinCount}")
      }
    }

  }

  /**
   * sortBy 可以对任意数据类型排序
   *
   * sortByKey只能对key-value类型的排序
   *
   * @param sc
   * @return
   */
  def sortByAndsortByKeyFunctionDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/rdd/order.txt")
    //分区数
    val partitionsSize = dataRDD.partitions.size

    //查看每个分区的数据
    val mapPartitionsWithIndexRDD: RDD[(Int, String)] = dataRDD.mapPartitionsWithIndex {
      case (index, ite) => {
        ite.map(line => {
          (index, line)
        })
      }
    }

    mapPartitionsWithIndexRDD.foreach(println)

    //排序
    /**
     * sortBy第一个参数用来指定排序的字段
     * 第二个参数用来指定是否升序，true升序，false降序
     * 第三个参数用来控制是否改变分区数，如果不传，分区数就不会变化
     *
     * sortBy只是保证分区内有序，无法保证分区间整体有序
     */
    val sortByRDD: RDD[String] = dataRDD.sortBy(line => {
      val jSONObject = JSON.parseObject(line)
      val orderTimeStr = jSONObject.getString("orderTime")

      val orderTime: Date = DateUtils.parseDate(orderTimeStr, "yyyy-MM-dd HH:mm:ss")
      orderTime.getTime
    }, true)

    //sortByRDD.foreach(println)

    /**
     * sortByKey 第一个参数表示升序还是降序，true是升序，false是降序
     * 第二个参数用来控制是否改变分区数，如果不传，分区数就不会变化
     *
     * 如果想要全局有序（先看0号分区，再看1号分区.....而不是打印的顺序）
     *
     * 如果只想分区内有序
     *
     * repartitionAndSortWithinPartitions
     * 通过自定义的分区器进行分区，并且按照key进行排序
     */
    val sortByKeyRDD = dataRDD.map(line => {
      val jSONObject = JSON.parseObject(line)
      val orderId = jSONObject.getString("orderId")
      val orderTimeStr = jSONObject.getString("orderTime")

      val orderTime: Date = DateUtils.parseDate(orderTimeStr, "yyyy-MM-dd HH:mm:ss")
      orderTime.getTime

      (orderTime, line)
    })
      /* .repartitionAndSortWithinPartitions(new Partitioner {
         override def numPartitions: Int = {
           2
         }

         override def getPartition(key: Any): Int = {

         }
       })*/
      .sortByKey()

    val sortByKeyMapPartitionsWithIndexRDD = sortByKeyRDD.mapPartitionsWithIndex {
      case (index, ite) => {
        ite.map(line => {
          (index, line)
        })
      }
    }

    sortByKeyMapPartitionsWithIndexRDD.foreach(println)
  }

  def coalesceAndrepartitionFuncDemo(sc: SparkContext) = {
    val dataRDD: RDD[String] = sc.textFile("data/spark/rdd/order.txt")

    //查看每个分区的数据
    val mapPartitionsWithIndexRDD: RDD[(Int, String)] = dataRDD.mapPartitionsWithIndex {
      case (index, ite) => {
        ite.map(line => {
          (index, line)
        })
      }
    }

    mapPartitionsWithIndexRDD.foreach{ case (index, line) => {
      println(s"index: ${index}, line: ${line}")
    }}

    /**
     * coalesce有2个参数
     * 第一个参数表示需要把分区变成多少个
     * 第二个参数表示要不要进行shuffle，默认是false，也就是不进行shuffle
     *
     * coalesce一般用来减少分区数
     */
    val coalesceRDD: RDD[String] = dataRDD.coalesce(1)

    println("----------coalesce----------")
    val coalesceRDDMapPartitionsWithIndexRDD: RDD[(Int, String)] = coalesceRDD.mapPartitionsWithIndex {
      case (index, ite) => {
        ite.map(line => {
          (index, line)
        })
      }
    }

    coalesceRDDMapPartitionsWithIndexRDD.foreach{ case (index, line) => {
      println(s"index: ${index}, line: ${line}")
    }}

    println("----------repartition----------")

    /**
     * repartition的源码为：
     *  coalesce(numPartitions, shuffle = true)
     *  也就是说repartition一定会产生 shuffle
     *
     *  repartition一般用来增加分区数
     */
    val repartitionRDD = dataRDD.repartition(3)
    val repartitionMapPartitionsWithIndexRDD: RDD[(Int, String)] = repartitionRDD.mapPartitionsWithIndex {
      case (index, ite) => {
        ite.map(line => {
          (index, line)
        })
      }
    }

    repartitionMapPartitionsWithIndexRDD.foreach{ case (index, line) => {
      println(s"index: ${index}, line: ${line}")
    }}
  }

}
