package com.heima.recommend.kafka.stream;

import com.alibaba.fastjson.JSON;
import com.heima.common.constants.KafkaMessageConstants;
import com.heima.model.message.UpdateArticleMess;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.kstream.Grouped;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.Materialized;
import org.apache.kafka.streams.kstream.TimeWindows;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;

import java.time.Duration;
import java.util.HashMap;

@Component
public class HotArticleStream {

    @Bean
    public KStream<String, String> kStream(StreamsBuilder builder) {
        //获取KStream流对象
        KStream<String, String> kStream = builder.stream(KafkaMessageConstants.HOT_ARTICLE_SCORE_TOPIC);
        //定义流处理拓扑
        kStream
                //JSON转化为Java对象
                .mapValues(value -> JSON.parseObject(value, UpdateArticleMess.class))
                //key和值处理  key: 文章ID  , value : 行为类型:数量
                .map((key, value) -> new KeyValue<>(value.getArticleId(), value.getType().toString() + ":" + value.getAdd()))
                //根据key进行分组
                .groupByKey(Grouped.with(Serdes.Long(), Serdes.String()))
                //设置时间窗口
                .windowedBy(TimeWindows.of(Duration.ofSeconds(20)))
                /*
                    参数1：聚合初始值 COLLECTION:0,COMMENT:0,LIKES:0,VIEWS:0
                    参数2：聚合逻辑
                        -key ：当前数据的key      文章id
                        -value ：当前数据的值      LIKES:1
                        -aggregate ：上一次的聚合结果     COLLECTION:0,COMMENT:0,LIKES:0,VIEWS:0
                    参数3：聚合之后的类型
                 */
                .aggregate(() -> "COLLECTION:0,COMMENT:0,LIKES:0,VIEWS:0", (key, value, aggregate) -> {
                    if (StringUtils.isBlank(value)) {
                        return aggregate;
                    }
                    //切割 原始数据：COLLECTION:0,COMMENT:0,LIKES:0,VIEWS:0
                    String[] split = aggregate.split(",");
                    HashMap<String, Integer> hashMap = new HashMap<>();
                    for (String s : split) {
                        String[] strs = s.split(":");
                        hashMap.put(strs[0], Integer.parseInt(strs[1]));
                    }
                    //切割 value：LIKES:1
                    String[] values = value.split(":");
                    //将当前数据加到原始数据中
                    hashMap.put(values[0], hashMap.get(values[0]) + Integer.parseInt(values[1]));

                    //将处理结果以原始数据的结构返回
                    String format = String.format("COLLECTION:%s,COMMENT:%s,LIKES:%s,VIEWS:%s", hashMap.get("COLLECTION"), hashMap.get("COMMENT"), hashMap.get("LIKES"), hashMap.get("VIEWS"));

                    return format;
                }, Materialized.with(Serdes.Long(), Serdes.String()))
                .toStream()
                .map(((key, value) -> new KeyValue<>(key.key().toString(), value)))
                .to(KafkaMessageConstants.HOT_ARTICLE_INCR_HANDLE_TOPIC);

        return kStream;
    }
}
