package spark_core.operate_transform.singlevalue;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

/**
 * @author shihb
 * @date 2020/1/6 12:04
 * map算子Demo,关系数据的变化
 */
public class MapDemo {

  public static void main(String[] args) {
    //local模式,创建SparkConf对象设定spark的部署环境
    SparkConf sparkConf = new SparkConf().setMaster("local[*]").setAppName("mark rdd");
    //创建spark上下文对象（这边是java上下文）
    JavaSparkContext sc = new JavaSparkContext(sparkConf);
    //1.从内存中创建
    JavaRDD<Integer> arrayRdd = sc.parallelize(Arrays.asList(1, 2, 3, 4,5),2);
//    arrayRdd.collect().forEach(System.out::println);

    //map算子
    JavaRDD<Integer> mapRdd = arrayRdd.map(x -> x * 2);

    //mapPartitions算子，效率优于map算子，减少交互次数,但可能会OOM
    JavaRDD<Integer> mapPartitionsRdd = arrayRdd.mapPartitions(iter -> {
      List<Integer> list = new ArrayList<>();
      while (iter.hasNext()) {
        list.add(iter.next() * 2);
      }
      return list.iterator();
    });

    //mapPartitionsWithIndex算子,注重分区的分区号
    JavaRDD<Tuple2<Integer, String>> tuple2RDD = arrayRdd
        .mapPartitionsWithIndex((num, datas) -> {
          List<Tuple2<Integer, String>> list = new ArrayList<>();
          while (datas.hasNext()) {
            list.add(new Tuple2(datas.next(), "分区数:" + num));
          }
          return list.iterator();
        }, true);


    tuple2RDD.collect().forEach(System.out::println);

    //停止
    sc.stop();
  }

}
