package com.atguigu.bigdata.spark.core.rdd.operator.transform;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.sources.In;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

public class Spark24_RDD_Req_JAVA {
    public static void main(String[] args) {
        //统计出每一个省份每个广告被点击数量排行的Top3
        SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("sparkCore");
        JavaSparkContext sc = new JavaSparkContext(conf);
        // 1. 获取原始数据：时间戳，省份，城市，用户，广告
        JavaRDD<String> input = sc.textFile("datas/agent.log");
        // 2. 将原始数据进行结构的转换。方便统计
        //    时间戳，省份，城市，用户，广告
        //    =>
        //    ( ( 省份，广告 ), 1 )
        JavaPairRDD<Tuple2<String,String>,Integer>  mapPair = input.mapToPair(new PairFunction<String, Tuple2<String, String>, Integer>() {
            @Override
            public Tuple2<Tuple2<String, String>, Integer> call(String s) throws Exception {
                String[] input = s.split(" ");
                Tuple2<String, String> p1  = new Tuple2<String, String>(input[1],input[4]);
                return new Tuple2<Tuple2<String, String>, Integer>(p1, 1);
            }
        });

        // 3. 将转换结构后的数据，进行分组聚合
        //    ( ( 省份，广告 ), 1 ) => ( ( 省份，广告 ), sum )
        JavaPairRDD<Tuple2<String,String>,Integer> reduceByKey = mapPair.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });

        // 4. 将聚合的结果进行结构的转换
        //    ( ( 省份，广告 ), sum ) => ( 省份, ( 广告, sum ) )
        JavaPairRDD<String, Tuple2<String, Integer>> mapPair1 = reduceByKey.mapToPair(new PairFunction<Tuple2<Tuple2<String, String>, Integer>, String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Tuple2<String, Integer>> call(Tuple2<Tuple2<String, String>, Integer> inputTuple2) throws Exception {
                String province = inputTuple2._1._1;
                String ad = inputTuple2._1._2;
                int sum = inputTuple2._2;
                Tuple2<String, Integer> temp = new Tuple2<>(ad, sum);
                return new Tuple2<String, Tuple2<String, Integer>>(province, temp);
            }
        });

        // 5. 将转换结构后的数据根据省份进行分组
        //    ( 省份, 【( 广告A, sumA )，( 广告B, sumB )】 )

        JavaPairRDD<String, Iterable<Tuple2<String, Integer>>>  gruopbykey = mapPair1.groupByKey();

        // 6. 将分组后的数据组内排序（降序），取前3名
        JavaPairRDD<String, Iterable<Tuple2<String, Integer>>> mapvalue = gruopbykey.mapValues(new Function<Iterable<Tuple2<String, Integer>>, Iterable<Tuple2<String, Integer>>>() {
            @Override
            public Iterable<Tuple2<String, Integer>> call(Iterable<Tuple2<String, Integer>> v1) throws Exception {
                List<Tuple2<String, Integer>> temp = new ArrayList<>();
                while(v1.iterator().hasNext()) {
                    temp.add(v1.iterator().next());
                }

                return null;
            }
        });


        sc.stop();
    }
}
