package main.scala.demo

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * MyWordCount
  *
  * @author zhangyimin
  * @version 1.0
  */
object MyWordCount {

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //配置
    val conf = new SparkConf()
    conf.setAppName("wordCount")
    conf.setMaster("local")
    //创建一个sparkcontext
    val sc = new SparkContext(conf)
    //    sc.textFile("hdfs://10.16.7.36:9000/data/input/data.txt")
    ////    sc.textFile(args(0))
    //      .flatMap(_.split(" "))
    //      .map((_,1))
    //      .reduceByKey(_+_).repartition(1)
    //      .saveAsTextFile("hdfs://10.16.7.36:9000/data/output/spark/0929")
    ////      .saveAsTextFile(args(1))
    //val rdd1=sc.parallelize(List(1,2,3,4,5,6,7,8,9),2)
    //    val rdd2=sc.parallelize(List(1,2,3,4,5),2)
    //    rdd1.aggregate(0)(math.max(_,_),_+_)
    //    rdd1.aggregate(0)(_+_,_+_)
    //    rdd1.aggregate(10)(math.max(_,_),_+_)
    //
    //    rdd2.aggregate(0)(math.max(_,_),_+_)
    //    rdd2.aggregate(0)(_+_,_+_)
    //    rdd2.aggregate(10)(math.max(_,_),_+_)
    //    rdd2.aggregate(10)(math.max(_,_),_+_)


    val rdd3 = sc.parallelize(List("12", "23", "345", "4567"), 2)
    println(rdd3.aggregate("")((x, y) => math.max(x.length, y.length).toString, (x, y) => x + y))

    //注意这里的局部聚合有个toString
    val rdd4 = sc.parallelize(List("12", "23", "345", ""), 2)
    val rdd5=rdd4.mapPartitionsWithIndex(func2)
    for(x<-rdd5)println(x)

    //注意这里的局部聚合有个toString
    println(rdd4.aggregate("")((x, y) => math.min(x.length, y.length).toString, (x, y) => x + y))
    println(rdd4.partitions.length)
    println( math.min("".length,"".length))


    sc.stop();
  }

  def func2(index: Int, it: Iterator[String]): Iterator[String] = {
    it.toList.map(x => "(partition id:" + index + ",value:" + x + ")").iterator
  }


}
