package com.heima.consumer.stream;

import com.alibaba.fastjson.JSON;
import com.heima.consumer.pojo.ArticleMessage;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.kstream.Grouped;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.Materialized;
import org.apache.kafka.streams.kstream.TimeWindows;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.config.TopicBuilder;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;

import java.time.Duration;
import java.util.Arrays;
import java.util.HashMap;

@Component
public class KafkaStreamConfig {
    /**
     * "10001"  "hello word"
     * "10001"  "hello kafka"
     * "10001"  "hello spring kafka"
     * "10001"  "kafka stream"
     * "10001"  "spring kafka"
     * -----------------------------  字符串切割
     *  "10001"  [hello,word]
     *  "10001"  [hello,kafka]
     *  "10001"  [hello,spring,kafka]
     *  "10001"  [kafka,stream]
     *  "10001"  [spring,kafka]
     * ------------------------------- 将二维集合转化为一维
     *  "10001"  hello
     *  "10001"  word
     *  "10001"  hello
     *  "10001"  kafka
     *  "10001"  hello
     *  "10001"  spring
     *  "10001"  kafka
     *  "10001"  kafka
     *  "10001"  stream
     *  "10001"  spring
     *  "10001"  stream
     *
     *  --------------------------------- 将字符串单词转化为 key
     *  hello   hello
     *  word    word
     *  hello   hello
     *  kafka   kafka
     *  hello   hello
     *  spring   spring
     *  kafka   kafka
     *  kafka   kafka
     *  stream   stream
     *  spring   spring
     *  stream   stream
     *
     *  -----------------------------   根据key的单词分组
     *  hello   hello
     *  hello   hello
     *  hello   hello
     *
     *  word    word
     *
     *  kafka   kafka
     *  kafka   kafka
     *  kafka   kafka
     *
     *  spring   spring
     *  spring   spring
     *
     *  stream   stream
     *  stream   stream
     *
     *  ------------------  求每组的数量
     *  hello : 3
     *  word  : 1
     *  kafka : 3
     *  spring : 2
     *  stream : 2
     *
     * ---------
     * hello : 3
     * word  : 1
     * kafka : 3
     * spring : 2
     * stream : 2
     *
     *
     * @param builder
     * @return
     */
    @Bean
    public KStream<String,String>KStream(StreamsBuilder builder){
        //1. 获取KStream流
        KStream<String,String> KStream = builder.<String,String>stream("kafka.topic5");
        KStream
                .mapValues(value -> value.split(""))
                .flatMapValues(value -> Arrays.asList(value))
                .map(((key, value) -> new KeyValue<>(value,value)))
                .groupByKey(Grouped.with(Serdes.String(),Serdes.String()))
                .count()
                .toStream()
                .map(((key, value) -> new KeyValue<>(key,value.toString())))
                .to("kafka.topic6");

        return KStream;
    }

    /**
     * @param builder
     * @return
     */
    @Bean
    public KStream<String,String>kStream(StreamsBuilder builder){
        //获取KStream流对象
        KStream<String,String> kStream = builder.stream("kafka.topic5");
        //定义流处理拓扑
        kStream
                //JSON转化为Java对象
                .mapValues(value -> JSON.parseObject(value,ArticleMessage.class))
                //key和值处理  key: 文章ID  , value : 行为类型:数量
                .map(((key, value) -> new KeyValue<>(value.getArticleId(),value.getType().toString()+":"+value.getAdd())))
                //根据key进行分组
                .groupByKey(Grouped.with(Serdes.Long(),Serdes.String()))
                //设置时间窗口
                .windowedBy(TimeWindows.of(Duration.ofMillis(10000)))
                //数据聚合
                .aggregate(() ->"COLLECTION:0,COMMENT:0,LIKES:0,VIEWS:0",(key, value, aggregate) -> {
                    //1. 将历史聚合结果进行切割，获取每一个行为的历史聚合数据
                    String[] split = aggregate.split(",");
                    //2. 将历史行为数据Map集合：{COLLECTION:0,COMMENT:0,LIKES:1,VIEWS:0}
                    HashMap<String,String> aggregate_result = new HashMap<>();
                    for (String str : split) {
                        String[] strs = str.split(":");
                        aggregate_result.put(strs[0], String.valueOf(Integer.parseInt(strs[1])));
                    }

                    //3. 对当前数据进行处理
                    String[] values = value.split(":");

                    aggregate_result.put(values[0],aggregate_result.get(values[0]) + Integer.parseInt(values[1]));

                    //4.使用模板字符串进行拼接
                    String.format("COLLECTION:%s,COMMENT:%s,LIKES:%s,VIEWS:%s",aggregate_result.get("COLLECTION"),aggregate_result.get("CO"))
                    return format;
                }, Materialized.with(Serdes.Long(),Serdes.String()))
                //重新转化为KStream
                .toStream()
                //数据格式转换
                .map(((key, value) -> new KeyValue<>(key.toString(),value)))
                .to("hot.article.incr.handle.topic");
        return kStream;

    }

    @Bean
    public NewTopic topic5(){
        return TopicBuilder.name("kafka.topic5").build();
    }

    @Bean
    public NewTopic topic6(){
        return TopicBuilder.name("kafka.topic6").build();
    }
}
