package com.streamsets.datacollector.execution.mq;

import com.google.gson.Gson;
import com.streamsets.datacollector.execution.PipelineState;
import com.streamsets.datacollector.execution.StateEventListener;
import com.streamsets.datacollector.execution.mq.config.MqConfigUtil;
import com.streamsets.datacollector.execution.mq.model.StatusDto;
import com.streamsets.dc.execution.manager.standalone.ThreadUsage;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Date;
import java.util.Map;

public class MqStateEventNotifier implements StateEventListener {

  private static final Logger LOG = LoggerFactory.getLogger(MqStateEventNotifier.class);

  private String baseConfDir;

  public MqStateEventNotifier(String baseConfDir) {
    this.baseConfDir = baseConfDir;
  }

  @Override
  public void onStateChange(
    PipelineState fromState,
    PipelineState toState,
    String toStateJson,
    ThreadUsage threadUsage,
    Map<String, String> offset
  ) {
    MqConfigUtil mqConfigUtil = new MqConfigUtil(baseConfDir);
    try {
      KafkaProducer<String, String> producer = mqConfigUtil.getKafkaProducer();
      StatusDto statusDto = convertStatusDto(fromState, toState);
      ProducerRecord<String, String> record = new ProducerRecord<>(mqConfigUtil.getStateEventTopicName(), new Gson().toJson(statusDto));
      producer.send(record);
    } catch (Exception e) {
      LOG.error(e.getMessage(), e);
    }
  }

  private StatusDto convertStatusDto(PipelineState fromState, PipelineState toState) {
    StatusDto statusDto = new StatusDto();
    statusDto.setPipelineId(fromState.getPipelineId());
    statusDto.setFromState(fromState.getStatus().name());
    statusDto.setMessage(fromState.getMessage());
    statusDto.setToState(toState.getStatus().name());
    return statusDto;
  }
}
