package com.stan.core.spark.ad;

import com.stan.common.DateUtils;
import com.stan.core.conf.ConfManager;
import com.stan.core.contants.Constants;
import com.stan.core.mapper.*;
import com.stan.core.mapper.factory.MapperFactory;
import com.stan.core.spark.SparkCalculator;
import com.stan.core.spark.ad.blackList.functions.SplitArrayFilterFunction;
import com.stan.core.spark.ad.blackList.functions.StreamToUserId2DataRDDFunction;
import com.stan.core.spark.ad.blackList.functions.UserId2DataAndBlackFunction;
import com.stan.core.spark.ad.blackList.functions.UserId2DataAndBlackToUserId2DataFunction;
import com.stan.core.vo.*;
import kafka.serializer.StringDecoder;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.*;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.hive.HiveContext;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Seconds;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Serializable;
import scala.Tuple2;
import java.util.*;

/**
 * 广告点击流Spark作业
 */
public class AdvClickStreamSpark implements SparkCalculator,Serializable {

    public static void main(String[] args) {
        AdvClickStreamSpark advClickStreamSpark = new AdvClickStreamSpark();
        advClickStreamSpark.run(args);
    }

    @Override
    public void run(String[] args) {
        SparkConf conf = new SparkConf();
        conf.setMaster("local[2]");
        conf.setAppName(Constants.CONF_ADV_CLICK_STREAM_APP_NAME);
        JavaSparkContext jsc = new JavaSparkContext(conf);

        compute(jsc,null);
        jsc.close();
    }

    @Override
    public void compute(JavaSparkContext jsc, Task task) {
        // 10秒为一批RDD TODO:可以设置
        JavaStreamingContext streamingContext = new JavaStreamingContext(
                jsc, Seconds.apply(5));

        // TODO: 配置检查点 Why?

        // TODO: 采用模拟数据作为数据，以便于测试

        // Kafka数据流 -> Spark Streaming
        Map<String,String> kafkaParams = new HashMap<String,String>();
        kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,ConfManager.getProperty(Constants.KAFKA_BORKER_LIST));
        kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG,"test-consumer-group");
        kafkaParams.put("zookeeper.connect","192.168.29.132:2181");

        String topicsStr = ConfManager.getProperty(Constants.ADV_CLICK_STREAM_TOPICS);

        Set<String> topics = new HashSet<String>();
        String[] topicsSplited = topicsStr.split(",");

        for(String topic : topicsSplited){
            topics.add(topic);
        }

        System.out.println(kafkaParams);
        System.out.println(topics);

        // 格式: (224,2019-04-19 15:01:45 江苏 淮安 20 24)
        JavaPairInputDStream<String,String> inputStream = KafkaUtils.createDirectStream(
                streamingContext,
                String.class,
                String.class,
                StringDecoder.class,
                StringDecoder.class,
                kafkaParams,
                topics
        );

        // 首先依据动态黑名单过滤数据,过滤后的数据进行统计，如果同一批次超过100条，则加入黑名单
        // 格式: (224,2019-04-19 15:01:45 江苏 淮安 20 24)
        JavaPairDStream<String,String> filteredInputStream = processInBlackList(jsc,inputStream);
        filteredInputStream.foreachRDD(new Function<JavaPairRDD<String, String>, Void>() {
            @Override
            public Void call(JavaPairRDD<String, String> stringStringJavaPairRDD) throws Exception {
                System.out.println(stringStringJavaPairRDD);
                return null;
            }
        });

        // 临时存储
        filteredInputStream = filteredInputStream.persist(StorageLevel.MEMORY_AND_DISK());

        // 功能1 流量实时统计结果
        // 目标数据 : (yyyyMMdd_province_city_aid,clickCount), 并存入mysql
        // 除去用Mysql进行累加，也可以使用updateStateByKey进行，但是这种方式把累计值保存在内存中，非常不安全
        JavaPairDStream<String,Integer> dateProvinceCityAid2ClickCount = computeRealTimeCount(filteredInputStream);
        dateProvinceCityAid2ClickCount.print();

        // 功能2 每天各省份的Top10广告
//        computeEveryDayProvinceTop10Ad(dateProvinceCityAid2ClickCount); // TODO:3

        // 功能3 滑动窗口 计算过去一个小时的点击趋势
        // 思路:
        // 1.获得过去1小时每分钟内的广告点击量
        // 2.存入数据库中
        computeAdClickCountWindow(filteredInputStream);

        streamingContext.start();
        streamingContext.awaitTermination();
        streamingContext.close();
    }

    /**
     * 滑动窗口 计算过去一个小时的点击趋势
     * @param filteredInputStream
     */
    public static void computeAdClickCountWindow(
            JavaPairDStream<String,String> filteredInputStream){
        // 1.获得过去1小时每分钟内的广告点击量
        JavaPairDStream<String,Long> minuteAid2One = filteredInputStream.flatMapToPair(
                new PairFlatMapFunction<Tuple2<String, String>, String, Long>() {
                    @Override
                    public Iterable<Tuple2<String, Long>> call(Tuple2<String, String> stringStringTuple2) throws Exception {
                        List<Tuple2<String,Long>> result = new ArrayList<Tuple2<String,Long>>();
                        String data = stringStringTuple2._2;
                        String[] dataSplited = data.split(" ");
                        String date = dataSplited[0];
                        String hour_minute_second = dataSplited[1];
                        String time = date + " " + hour_minute_second;
                        String aid = dataSplited[5];
                        Date actionTime = DateUtils.parseTime(time);
                        if(new Date().getTime() - actionTime.getTime() <= 60*60*1000){
                            String hour = hour_minute_second.split(":")[0];
                            String minute = hour_minute_second.split(":")[1];
                            result.add(new Tuple2<String, Long>(date + "_" + hour + ":"+minute+"_"+aid,1L));
                        }else{
                            return new ArrayList<Tuple2<String,Long>>();
                        }
                        return result;
                    }
                }
        );

        JavaPairDStream<String,Long> minuteAid2ClickCountStream = minuteAid2One.reduceByKey(
                new Function2<Long, Long, Long>() {
                    @Override
                    public Long call(Long num1, Long num2) throws Exception {
                        return num1 + num2;
                    }
                }
        );

        // 2.存入数据库中
        minuteAid2ClickCountStream.foreachRDD(
                new VoidFunction<JavaPairRDD<String, Long>>() {
                    @Override
                    public void call(JavaPairRDD<String, Long> minuteAid2ClickCountRDD) throws Exception {
                        minuteAid2ClickCountRDD.foreach(
                                new VoidFunction<Tuple2<String, Long>>() {
                                    @Override
                                    public void call(Tuple2<String, Long> minuteAid2ClickCount) throws Exception {
                                        String key = minuteAid2ClickCount._1;
                                        String[] keySplited = key.split("_");
                                        String date = keySplited[0];
                                        String aid = keySplited[2];
                                        String hour_minute = keySplited[1];
                                        String hour = hour_minute.split(":")[0];
                                        String minute = hour_minute.split(":")[1];
                                        Long clickCount = minuteAid2ClickCount._2;

                                        AdClickTrend adClickTrend = new AdClickTrend();
                                        adClickTrend.setAid(aid);
                                        adClickTrend.setTimeToMinute(date + " "+hour+":"+minute+":");
                                        adClickTrend.setClickCount(clickCount);
                                        MapperFactory mapperFactory = MapperFactory.getMapperFactory();
                                        AdClickTrendMapper adClickTrendMapper = mapperFactory.getMapper(AdClickTrendMapper.class);
                                        AdClickTrend dbAdClickCountTrend = adClickTrendMapper.get(adClickTrend);
                                        if(dbAdClickCountTrend == null || dbAdClickCountTrend.getClickCount() == 0){
                                            adClickTrendMapper.insert(adClickTrend);
                                        }else{
                                            adClickTrend.setClickCount(clickCount + dbAdClickCountTrend.getClickCount());
                                            adClickTrendMapper.insert(adClickTrend);
                                        }
                                    }
                                }
                        );
                    }
                }
        );
    }

    /**
     * 计算每天各省份的Top10广告
     * @param dateProvinceCityAid2ClickCount
     */
    public static void computeEveryDayProvinceTop10Ad(
            JavaPairDStream<String,Integer> dateProvinceCityAid2ClickCount){
        /***
         * 算法思路:
         * 1.输入的数据格式为: (yyyyMMdd_province_city_aid,clickCount)
         * 将这个格式拆解为 row(yyyyMMdd,province,city,aid,clickCount) 通过Spark SQL存入Hive中
         * 2.再使用Spark SQL对Hive调用SQL语句完成
         */
        dateProvinceCityAid2ClickCount.foreachRDD(
                new VoidFunction<JavaPairRDD<String, Integer>>() {
                    @Override
                    public void call(JavaPairRDD<String, Integer> stringIntegerJavaPairRDD) throws Exception {

                        JavaRDD<Row> rowsRDD = stringIntegerJavaPairRDD.map(new Function<Tuple2<String, Integer>, Row>() {
                            @Override
                            public Row call(Tuple2<String, Integer> dateProvinceCityAid2clickCount) throws Exception {
                                String dateProvinceCityAid = dateProvinceCityAid2clickCount._1;
                                int clickCount = dateProvinceCityAid2clickCount._2;
                                String[] strSplited = dateProvinceCityAid.split("_");
                                String date = strSplited[0];
                                String province = strSplited[1];
                                String city = strSplited[2];
                                String aid = strSplited[3];
                                return RowFactory.create(date,province,city,aid,clickCount);
                            }
                        });

                        // 添加到Hive中
                        StructType schema = DataTypes.createStructType(Arrays.asList(
                                DataTypes.createStructField("date",DataTypes.StringType,true),
                                DataTypes.createStructField("province",DataTypes.StringType,true),
                                DataTypes.createStructField("city",DataTypes.StringType,true),
                                DataTypes.createStructField("aid",DataTypes.StringType,true),
                                DataTypes.createStructField("clickCount",DataTypes.IntegerType,true)
                        ));
                        HiveContext hiveContext = new HiveContext(stringIntegerJavaPairRDD.context());
                        DataFrame dailyAdClickCountByProvinceDF = hiveContext.createDataFrame(rowsRDD,schema);
                        rowsRDD.foreach(new VoidFunction<Row>() {
                            @Override
                            public void call(Row row) throws Exception {
                                System.out.println("rowsRDD");
                                System.out.println(row);
                            }
                        });

                        // 注册成一张临时表
                        dailyAdClickCountByProvinceDF.registerTempTable("tmp_date_city_aid_clickCount");

                        // 对临时表进行查询，查询获得Top10的各个省份热门广告
                        String sql = "select date,province,aid,clickCount,rank from (select date,province,aid,clickCount,ROW_NUMBER()" +
                                " OVER ( PARTITION BY province ORDER BY clickCount DESC) as rank from tmp_date_city_aid_clickCount ) as t " +
                                "where rank <= 10";
                        DataFrame top10DF = hiveContext.sql(sql);
                        JavaRDD<Row> resultRDD = top10DF.javaRDD();
                        System.out.println("resultRDD:"+resultRDD.count());
                        // 将Top10的数据存入MySQL
                        resultRDD.foreach(new VoidFunction<Row>() {
                            @Override
                            public void call(Row row) throws Exception {
                                // 提取
                                String date = row.getString(0);
                                String province = row.getString(1);
                                String aid = row.getString(2);
                                long clickCount = row.getInt(3);
                                int rank = row.getInt(4);

                                AdTopNProvinceDailyCount adTopNProvinceDailyCount = new AdTopNProvinceDailyCount();
                                adTopNProvinceDailyCount.setDate(date);
                                adTopNProvinceDailyCount.setProvince(province);
                                adTopNProvinceDailyCount.setAid(aid);
                                adTopNProvinceDailyCount.setClickCount(clickCount);
                                adTopNProvinceDailyCount.setRank(rank);

                                AdTopNProvinceDailyCountMapper mapper = MapperFactory.getMapperFactory()
                                        .getMapper(AdTopNProvinceDailyCountMapper.class);
                                AdTopNProvinceDailyCount originData = mapper.getOne(adTopNProvinceDailyCount);
                                // 如果 当天当省当rank的数据已经有了，就将它覆盖掉
                                if(originData != null){
                                    mapper.update(adTopNProvinceDailyCount);
                                }else{ //如果没有，则进行插入
                                    mapper.insert(adTopNProvinceDailyCount);
                                }
                            }
                        });
                    }
                }
        );
    }


    /**
     *     功能1 流量实时统计结果
     *     目标数据 : (yyyyMMdd_province_city_aid,clickCount)
     * @param filteredInputStream
     * @return
     */
    public static JavaPairDStream<String,Integer> computeRealTimeCount(
            JavaPairDStream<String,String> filteredInputStream ){
        /**
         * 算法思路:
         *  1.进行字段顺序的调整，调整为(yyyyMMdd_province_city_aid,1)
         *  2.按key进行聚合计数 ， 获得(yyyyMMdd_province_city_aid,currentClickCount)
         *  3.与数据库中的数据进行累加 (yyyyMMdd_province_city_aid,clickCount)
         *  4.拆分并更新入数据库
         */
        JavaPairDStream<String,Integer> dateProvinceCityAid2ClickCount = filteredInputStream.transformToPair(
                new Function<JavaPairRDD<String, String>, JavaPairRDD<String, Integer>>() {
                    /**
                     * 算法思路:
                     *  1.进行字段顺序的调整，调整为(yyyyMMdd_province_city_aid,1)
                     *  2.按key进行聚合计数 ， 获得(yyyyMMdd_province_city_aid,currentClickCount)
                     *  3.与数据库中的数据进行累加 (yyyyMMdd_province_city_aid,clickCount) 并更新到数据库
                     *  输入为 (userid,data)
                     *  例如: (20,2019-04-20 10:09:42 上海 上海 20 33)
                     */
                    @Override
                    public JavaPairRDD<String, Integer> call(
                            JavaPairRDD<String, String> stringStringJavaPairRDD) throws Exception {
                        // 1.进行字段顺序的调整
                        // (20,2019-04-20 10:09:42 上海 上海 20 33)
                        JavaPairRDD<String, Integer> dateProvinceCityAid2OneRDD = stringStringJavaPairRDD.mapToPair(
                                new PairFunction<Tuple2<String, String>, String, Integer>() {
                                    @Override
                                    public Tuple2<String, Integer> call(Tuple2<String, String> stringStringTuple2) throws Exception {
                                        String data = stringStringTuple2._2;
                                        String[] dataSplited = data.split(" ");
                                        String date = dataSplited[0];
                                        String province = dataSplited[2];
                                        String city = dataSplited[3];
                                        String aid = dataSplited[5];
                                        String newKey = date + "_" + province + "_" + city + "_" + aid;
                                        return new Tuple2<String,Integer>(newKey,1);
                                    }
                                }
                        );

                        // 2.按key进行聚合计数 ， 获得(yyyyMMdd_province_city_aid,currentClickCount)
                        JavaPairRDD<String,Integer> dateProvinceCityAid2CurrentCountRDD = dateProvinceCityAid2OneRDD.reduceByKey(
                                new Function2<Integer, Integer, Integer>() {
                                    @Override
                                    public Integer call(Integer integer, Integer integer2) throws Exception {
                                        return integer + integer2;
                                    }
                                }
                        );

                        // 3.与数据库中的数据进行累加 (yyyyMMdd_province_city_aid,clickCount)
                        JavaPairRDD<String,Integer> dateProvinceCityAid2CountRDD = dateProvinceCityAid2CurrentCountRDD.mapToPair(
                                new PairFunction<Tuple2<String, Integer>, String, Integer>() {
                                    @Override
                                    public Tuple2<String, Integer> call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
                                        String dateProvinceCityAid = stringIntegerTuple2._1;
                                        String[] strSplited = dateProvinceCityAid.split("_");
                                        String date = strSplited[0];
                                        String province =strSplited[1];
                                        String city = strSplited[2];
                                        String aid = strSplited[3];

                                        int currentClickCount = stringIntegerTuple2._2;

                                        AdRealTimeCountMapper adRealTimeCountMapper = MapperFactory.
                                                getMapperFactory().getMapper(AdRealTimeCountMapper.class);


                                        AdRealTimeCount adRealTimeCount = new AdRealTimeCount(date,province,city,aid);

                                        adRealTimeCount = adRealTimeCountMapper.
                                                getAdRealTimeCount(adRealTimeCount);
                                        System.out.println(dateProvinceCityAid + adRealTimeCount);
                                        if(adRealTimeCount == null){
                                            adRealTimeCount = new AdRealTimeCount(date,province,city,aid,currentClickCount);
                                            adRealTimeCountMapper.addAdRealTimeCount(adRealTimeCount);
                                        }else{
                                            adRealTimeCount.setClickCount(adRealTimeCount.getClickCount() + currentClickCount);
                                            adRealTimeCount.setClickCount(adRealTimeCount.getClickCount());
                                            adRealTimeCount.setClickCount(adRealTimeCount.getClickCount());
                                            adRealTimeCountMapper.updateAdRealTimeCount(adRealTimeCount);
                                        }
                                        return new Tuple2<String, Integer>(dateProvinceCityAid,adRealTimeCount.getClickCount());
                                    }
                                }
                        );
                        return dateProvinceCityAid2CountRDD;
                    }
                }
        );
        return  dateProvinceCityAid2ClickCount;
    }


    /**
     * 1.使用黑名单对数据进行过滤
     * 2.顺便完成功能1
     * @param inputDStream
     */
    public static JavaPairDStream<String,String> processInBlackList(
            JavaSparkContext javaSparkContext,
            JavaPairInputDStream<String,String> inputDStream){
        /**
         * 算法思路:
         * 首先从数据库表中查询用户黑名单 blackListStream
         * 把userid提取出来作为key： (<userid>,<data>)
         * 对当前数据流和黑名单数据流依据key(userid)进行左连接，如果存在于黑名单数据流中，则丢弃数据
         */
        JavaPairDStream<String,String> userId2DataFilteredByBlackList = inputDStream.transformToPair(
                new Function<JavaPairRDD<String, String>, JavaPairRDD<String, String>>() {
                    @Override
                    public JavaPairRDD<String, String> call(
                            // 格式: (224,2019-04-19 15:01:45 江苏 淮安 20 24)
                            JavaPairRDD<String, String> adStreamRDD) throws Exception {
                        // 1.从数据库查询被黑名单禁用的user,并封装成<userid,boolean>格式的RDD
                        AdStreamBlackListMapper adStreamBlackListMapper = MapperFactory
                                .getMapperFactory().getMapper(AdStreamBlackListMapper.class);
                        List<AdStreamBlackList> AdStreamBlackListS = adStreamBlackListMapper
                                .findAllBlackList();

                        List<Tuple2<String,Boolean>> blackList = new ArrayList<Tuple2<String,Boolean>>();
                        for(com.stan.core.vo.AdStreamBlackList AdStreamBlackList:AdStreamBlackListS){
                            Tuple2<String,Boolean> userId2Bool = new Tuple2<String,Boolean>(AdStreamBlackList.getUserId(),true);
                            blackList.add(userId2Bool);
                        }

                        JavaPairRDD<String,Boolean> userId2BoolRDD =
                                javaSparkContext.parallelizePairs(blackList);
                        // 2.将广告点击流转成(<userid,data>)进行左外连接
                        // 注意这边不能使用内连接，内连接会导致join不上的左右数据全部丢失
                        // 这边需要保证广告点击流的数据中不在黑名单中的数据不丢失

                        // 首先将数据过滤一下，防止接下来运算中的错误
                        adStreamRDD.filter(new SplitArrayFilterFunction());

                        JavaPairRDD<String,String> userId2DataRDD = adStreamRDD.mapToPair(
                               new StreamToUserId2DataRDDFunction());
                        //进行左内连接，也就是黑名单中没有join上的数据可以丢弃，但是数据流中的不能
                        JavaPairRDD<String,Tuple2<String, com.google.common.base.Optional<Boolean>>> userId2dataAndBlack =
                                userId2DataRDD.leftOuterJoin(userId2BoolRDD);


                        // 3.对Optional<Boolean>中取true的数据进行删除
                        userId2dataAndBlack = userId2dataAndBlack.filter(new UserId2DataAndBlackFunction());

                        // 4.转回(userId,data)格式，并进行返回
                        JavaPairRDD<String,String> filteredUserId2DataRDD = userId2dataAndBlack.mapToPair(
                                new UserId2DataAndBlackToUserId2DataFunction());

                        return filteredUserId2DataRDD;
                    }
                }
        );


        return userId2DataFilteredByBlackList;
    }

}
