package com.example.demo.componet.spark;

import com.example.demo.componet.ReadCSV;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import scala.Tuple2;

import javax.annotation.Resource;
import java.util.List;
import java.util.Map;

/**
 * 读csvf返回RDD
 *
 * @author xwh
 * @date 2023/02/03
 */
@Component
public class ReadCSVReturnRDD {


    @Resource
    private JavaSparkContext sc;

    public Map<String,Integer> wordCountMap(String CSV_FILE_PATH,Integer HEAD_NUMBER) {

        ReadCSV readCSV = new ReadCSV();
        List<String> columnList = readCSV.getColumnList(CSV_FILE_PATH, HEAD_NUMBER);
        //内存中加载数据

        JavaRDD<String> rdd = sc.parallelize(columnList);

        //JavaRDD<String> rdd1 = rdd.map(x -> x.concat("xxx"));

        //转化为(word,1)类型
        JavaPairRDD<String, Integer> wordMap = rdd.mapToPair((PairFunction<String, String, Integer>) word -> new Tuple2<String, Integer>(word, 1));

        //对(word,1)计算  //必须要用匿名函数

        JavaPairRDD<String, Integer> reduceMap = wordMap.reduceByKey((Function2<Integer, Integer, Integer>) (integer, integer2) -> integer + integer2);


        //List接收rdd
        Map<String, Integer> rddMap = reduceMap.collectAsMap();

        return rddMap;

    }
}
