package com.etc.java;



import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.broadcast.Broadcast;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.hive.HiveContext;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;

import java.util.*;

/**
 * @author 杰哥
 * @Title: top3
 * @ProjectName scalaworldcount
 * @Description: TODO
 * @date 2019/8/1213:49
 */
public class top3 {
    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder().appName("top3").master("local[*]").getOrCreate();
        JavaSparkContext jsc = JavaSparkContext.fromSparkContext(spark.sparkContext());
        HiveContext sqlContext = new HiveContext(jsc.sc());
        Map<String, List<String>> queryParamMap = new HashMap<String, List<String>>();

        //设置过滤条件
        queryParamMap.put("city", Arrays.asList("北京", "天津", "南京"));
        queryParamMap.put("platform", Arrays.asList("Android"));
        queryParamMap.put("version", Arrays.asList("1.0", "1.2", "1.5", "2.0"));
        //放入广播变量
        final Broadcast<Map<String, List<String>>> broadcast = jsc.broadcast(queryParamMap);
        //获取数据源
        JavaRDD<String> stringJavaRDD = jsc.textFile("hdfs://master:9000/input/uu.txt");
        //过滤
        JavaRDD<String> filter = stringJavaRDD.filter(new Function<String, Boolean>() {
            private static final long serialVersionUID = 1L;

            @Override
            public Boolean call(String s) throws Exception {
                String[] split = s.split(",");
                String city = split[3];
                String platform = split[4];
                String version = split[5];
                Map<String, List<String>> value = broadcast.value();
                List<String> cities = value.get("city");
                if (cities.size() > 0 && !cities.contains(city)) {
                    return false;
                }
                List<String> platforms = value.get("platform");
                if (platforms.size() > 0 && !platforms.contains(platform)) {
                    return false;
                }
                List<String> versions = value.get("version");
                if (versions.size() > 0 && !versions.contains(version)) {
                    return false;
                }
                return true;
            }
        });
        //将过滤好的数据组装成(日期_搜索词, 用户)的格式
        JavaPairRDD<String, String> stringStringJavaPairRDD = filter.mapToPair(new PairFunction<String, String, String>() {
            @Override
            public Tuple2<String, String> call(String s) throws Exception {
                String[] split = s.split(",");
                String date = split[0];
                String user = split[1];
                String keyword = split[2];
                return new Tuple2<String, String>(date + "_" + keyword, user);
            }
        });

        //进行分组，获取每天每个搜索词，有哪些用户搜索了（没有去重）
        JavaPairRDD<String, Iterable<String>> stringIterableJavaPairRDD = stringStringJavaPairRDD.groupByKey();
        // 对每天每个搜索词的搜索用户，执行去重操作，获得其uv
        JavaPairRDD<String, Long> stringLongJavaPairRDD = stringIterableJavaPairRDD.mapToPair(new PairFunction<Tuple2<String, Iterable<String>>, String, Long>() {
            @Override
            public Tuple2<String, Long> call(Tuple2<String, Iterable<String>> KeywordUsers) throws Exception {
                String s = KeywordUsers._1;
                Iterator<String> iterator = KeywordUsers._2.iterator();
                List<String> list = new ArrayList<String>();
                //去重
                while (iterator.hasNext()) {
                    String next = iterator.next();
                    if (!list.contains(next)) {
                        list.add(next);
                    }
                }
                //获得uv
                long size = list.size();
                return new Tuple2<String, Long>(s, size);
            }
        });
        // 将每天每个搜索词的uv数据，转换成DataFrame
        JavaRDD<Row> map = stringLongJavaPairRDD.map(new Function<Tuple2<String, Long>, Row>() {
            @Override
            public Row call(Tuple2<String, Long> stringLongTuple2) throws Exception {
                String date = stringLongTuple2._1.split("_")[0];
                String keyword = stringLongTuple2._1.split("_")[1];
                long uv = stringLongTuple2._2;
                return RowFactory.create(date, keyword, uv);
            }
        });
        //创建表头
        List<StructField> structFields = Arrays.asList(
                DataTypes.createStructField("date", DataTypes.StringType, true),
                DataTypes.createStructField("keyword", DataTypes.StringType, true),
                DataTypes.createStructField("uv", DataTypes.LongType, true));
        StructType structType = DataTypes.createStructType(structFields);

        Dataset<Row> dataFrame = sqlContext.createDataFrame(map, structType);
        // 使用Spark SQL的开窗函数，统计每天搜索uv排名前3的热点搜索词
        dataFrame.registerTempTable("daily_keyword_uv");
        Dataset<Row> sql = sqlContext.sql(""
                + "SELECT date,keyword,uv "
                + "FROM ("
                + "SELECT "
                + "date,"
                + "keyword,"
                + "uv,"
                + "row_number() OVER (PARTITION BY date ORDER BY uv DESC) rank "
                + "FROM daily_keyword_uv"
                + ") tmp "
                + "WHERE rank<=3");

    }

}
