package online.shenjian.kafka;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.utils.Bytes;
import org.apache.kafka.streams.*;
import org.apache.kafka.streams.kstream.*;
import org.apache.kafka.streams.state.KeyValueStore;

import java.util.Properties;
import java.util.concurrent.CountDownLatch;

public class TransactionAnalyzer {


  public static void main(String[] args) {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "transaction-analyzer");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-server:30092");
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, new TransactionSerde().getClass());
    
    final StreamsBuilder builder = new StreamsBuilder();
    
    KStream<String, Transaction> transactions = builder.stream("transactions");
    
    // 计算每个客户的交易总额
    KTable<String, Double> customerTotalSpending = transactions
        .groupBy((key, transaction) -> transaction.getCustomerId())
        .aggregate(() -> 0.0, (customerId, transaction, total) -> total + transaction.getAmount(),
                Materialized.<String, Double, KeyValueStore<Bytes, byte[]>>as("customer-total-spending-store")
                        .withValueSerde(Serdes.Double()));
    
    // 计算每个客户的平均交易额
    KTable<String, Double> customerAvgSpending = transactions
        .groupBy((key, transaction) -> transaction.getCustomerId())
        .aggregate(() -> new TotalAndCount(0.0, 0L),
                   (customerId, transaction, totalAndCount) -> totalAndCount.add(transaction.getAmount()),
                Materialized.<String, TotalAndCount, KeyValueStore<Bytes, byte[]>>as("customer-avg-spending-store")
                        .withValueSerde(new TotalAndCountSerde()))
        .mapValues((totalAndCount) -> totalAndCount.getTotal() / totalAndCount.getCount(),
                Materialized.with(Serdes.String(), Serdes.Double()))
        // 去重，保证JOIN后结果唯一
        .suppress(Suppressed.untilTimeLimit(null, Suppressed.BufferConfig.unbounded()));;

    // 找出每个客户的最大交易额
    KTable<String, Double> customerMaxSpending = transactions
        .groupBy((key, transaction) -> transaction.getCustomerId())
        .reduce((transaction1, transaction2) ->
                transaction1.getAmount() > transaction2.getAmount() ? transaction1 : transaction2)
        .mapValues((transaction) -> transaction.getAmount(), Named.as("customer-max-spending"),
                Materialized.with(Serdes.String(), Serdes.Double()))
        // 去重，保证JOIN后结果唯一
        .suppress(Suppressed.untilTimeLimit(null, Suppressed.BufferConfig.unbounded()));

    // 将所有结果合并，并写入到输出主题中
    KStream<String, CustomerAnalysis> analysis = customerTotalSpending
        .join(customerAvgSpending, (total, avg) -> new CustomerAnalysis(total, avg))
        .join(customerMaxSpending, (analysis1, max) -> analysis1.withMax(max))
        .mapValues((key, analysis2) -> analysis2.normalize(key))
        .toStream()
        .map((key, analysis3) -> KeyValue.pair(key, analysis3));

    analysis.to("customer-analysis", Produced.with(Serdes.String(), new CustomerAnalysisSerde()));

    Topology topology = builder.build();
    KafkaStreams streams = new KafkaStreams(topology, props);
    final CountDownLatch latch = new CountDownLatch(1);

    // attach shutdown handler to catch control-c
    Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
      @Override
      public void run() {
        streams.close();
        latch.countDown();
      }
    });

    try {
      streams.start();
      latch.await();
    } catch (Throwable e) {
      System.exit(1);
    }
    System.exit(0);
  }
}
