package com.mycompany.bigdata;

import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Iterator;
import java.util.Properties;
import java.util.regex.Pattern;

import kafka.admin.AdminUtils;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import kafka.utils.ZKStringSerializer;

import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.exception.ZkMarshallingError;
import org.I0Itec.zkclient.serialize.ZkSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.goldengate.atg.datasource.AbstractHandler;
import com.goldengate.atg.datasource.DsConfiguration;
import com.goldengate.atg.datasource.DsEvent;
import com.goldengate.atg.datasource.DsOperation;
import com.goldengate.atg.datasource.DsTransaction;
import com.goldengate.atg.datasource.GGDataSource.Status;
import com.goldengate.atg.datasource.adapt.Col;
import com.goldengate.atg.datasource.adapt.Op;
import com.goldengate.atg.datasource.adapt.Tx;
import com.goldengate.atg.datasource.meta.ColumnMetaData;
import com.goldengate.atg.datasource.meta.DsMetaData;
import com.goldengate.atg.datasource.meta.TableMetaData;
import com.goldengate.atg.datasource.meta.TableName;
import com.goldengate.atg.util.ConfigException;

public class SampleHandlerKafka extends AbstractHandler {
  final private static Logger logger = LoggerFactory
      .getLogger(SampleHandlerKafka.class);

  /** count total number of operations */
  private long numOps = 0;

  /** count total number of transactions */
  private long numTxs = 0;

  private String regularFileName = "default_flume_debug.txt";

  /** At least metadataBrokerList and zkServer are required */
  private String metadataBrokerList = "";//
  private String zkServer = ""; // 10.31.92.56:2181,10.31.92.57:2181,10.31.92.58:2181
  private String serializerClass = "kafka.serializer.StringEncoder";
  private String requestRequiredAcks = "1";
  private String maxMessageSize = "1000000";
  private String delimiter = "|";

  private PrintWriter out;

  private PadKafkaProducer client;

  public void init(DsConfiguration conf, DsMetaData metaData) {
    logger.info("Initializing handler: mode=" + getMode());
    super.init(conf, metaData); // always call 'super'

    // Initialize regular file
    try {
      out = new PrintWriter(new FileWriter(regularFileName));
    } catch (IOException ioe) {
      throw new ConfigException("Can't initialize file, " + regularFileName,
          ioe);
    }

    client =
        new PadKafkaProducer(metadataBrokerList, serializerClass,
            requestRequiredAcks, maxMessageSize, zkServer);
    // Initialize client with the remote Kafka agent's host and port
    client.init();

  }

  public Status transactionBegin(DsEvent e, DsTransaction tx) {

    super.transactionBegin(e, tx);
    String eventFormat;
    eventFormat =
        String.format("Received begin tx event, numTx=%d : position=%s%n",
            numTxs, tx.getTranID());
    out.println(eventFormat);
    return Status.OK;
  }

  public Status operationAdded(DsEvent e, DsTransaction transaction,
      DsOperation operation) {
    super.operationAdded(e, transaction, operation);
    final Tx tx = new Tx(transaction, getMetaData(), getConfig());
    final TableMetaData tMeta =
        getMetaData().getTableMetaData(operation.getTableName());
    final Op op = new Op(operation, tMeta, getConfig());

    out.println("  Received operation: table='" + op.getTableName() + "'"
        + ", pos=" + op.getPosition() + " (total_ops= " + tx.getTotalOps()
        + ", buffered=" + tx.getSize() + ")" + ", ts=" + op.getTimestamp());

    if (isOperationMode()) {
      if (op.getOperationType().isDelete()) {
        processDel(tx, op);
      } else if (op.getOperationType().isUpdate()) {
        processUp(tx, op);
      } else {
        processOther(tx, op);
      }
    }
    return Status.OK;
  }

  @Override
  public Status transactionCommit(DsEvent e, DsTransaction transaction) {
    super.transactionCommit(e, transaction);

    numTxs++;

    Tx tx = new Tx(transaction, getMetaData(), getConfig());

    // in 'operation mode', all the operations would have been processed when
    // 'operationAdded' is called. In 'transaction mode', they are processed
    // when the commit event is received.
    if (!isOperationMode()) {
      for (Op op : tx) {
        if (op.getOperationType().isDelete()) {
          processDel(tx, op);
        } else if (op.getOperationType().isUpdate()) {
          processUp(tx, op);
        } else {
          processOther(tx, op); // process data...
        }
      }
    }

    out.println("  Received commit event, tx #" + numTxs + ": " + ", pos="
        + tx.getTranID() + " (total_ops= " + tx.getTotalOps() + ", buffered="
        + tx.getSize() + ")" + ", ts=" + tx.getTimestamp() + ")");
    out.flush();

    return Status.OK;
  }

  private void processDel(Tx currentTx, Op op) {
    logger.debug("processOp: tx=" + currentTx.getPosition() + ", op="
        + op.getPosition());
    numOps++;

    TableName tname = op.getTableName();
    TableMetaData tMeta = getMetaData().getTableMetaData(tname);

    String key = "";

    for (Iterator<ColumnMetaData> itr = tMeta.getKeyColumns().iterator(); itr
        .hasNext();) {
      ColumnMetaData cmd = itr.next();
      key += op.getColumn(cmd.getIndex()).getBeforeValue();
    }

    logger.debug("tname: =" + tname + ",keyval=" + key);

    StringBuffer sb = new StringBuffer();
    sb.append(tname);
    sb.append(delimiter);
    sb.append(op.getOperationType());
    sb.append(delimiter);
    sb.append(key);
    sb.append(delimiter);

    logger.debug("sb: =" + sb.toString());

    client.sendDataToKafka(sb.toString());
  }

  private void processOther(Tx currentTx, Op op) {
    // logger.debug("processOp: tx=" + currentTx.getPosition() + ", op=" +
    // op.getPosition() + ",OperationType=" + op.getOperationType());
    numOps++;

    TableName tname = op.getTableName();
    TableMetaData tMeta = getMetaData().getTableMetaData(tname);

    String key = "";

    for (Iterator<ColumnMetaData> itr = tMeta.getKeyColumns().iterator(); itr
        .hasNext();) {
      ColumnMetaData cmd = itr.next();
      key += op.getColumn(cmd.getIndex()).getAfterValue();
    }

    StringBuffer sb = new StringBuffer();
    sb.append(tname);
    sb.append(delimiter);
    sb.append(op.getOperationType());
    sb.append(delimiter);
    sb.append(key);
    sb.append(delimiter);

    int i = 0;
    for (Col c : op) {
      sb.append(tMeta.getColumnName(i) + "=" + c.getAfterValue());
      sb.append(delimiter);
      i++;
    }

    // logger.debug("sb: =" + sb.toString());
    client.sendDataToKafka(sb.toString());
  }

  private void processUp(Tx currentTx, Op op) {
    logger.debug("processOp: tx=" + currentTx.getPosition() + ", op="
        + op.getPosition() + ",OperationType=" + op.getOperationType());
    numOps++;

    TableName tname = op.getTableName();
    TableMetaData tMeta = getMetaData().getTableMetaData(tname);

    String key = "";

    for (Iterator<ColumnMetaData> itr = tMeta.getKeyColumns().iterator(); itr
        .hasNext();) {
      ColumnMetaData cmd = itr.next();
      key += op.getColumn(cmd.getIndex()).getAfterValue();
    }

    logger.debug("tname: =" + tname + ",keyval= " + key);

    StringBuffer sb = new StringBuffer();
    sb.append(tname);
    sb.append(delimiter);
    sb.append(op.getOperationType());
    sb.append(delimiter);
    sb.append(key);
    sb.append(delimiter);

    int i = 0;
    for (Col c : op) {
      if (c.isChanged()) {
        sb.append(tMeta.getColumnName(i) + "=" + c.getAfterValue());
        sb.append(delimiter);
      }
      i++;
    }

    logger.debug("sb: =" + sb.toString());
    client.sendDataToKafka(sb.toString());
  }

  public Status metaDataChanged(DsEvent e, DsMetaData meta) {
    logger.debug("metaDataChanged");
    super.metaDataChanged(e, meta);
    logger.debug("Received metadata event: " + e + "; current tables: "
        + meta.getTableNames().size());
    return Status.OK;
  }

  public void destroy() {
    logger.debug("destroy");
    out.println("Closing file... " + reportStatus());
    try {
      out.flush();
      out.close();
      client.cleanUp();
    } catch (Exception e) {

    }
    super.destroy();
  }

  public String reportStatus() {
    logger.debug("reportStatus");
    String s =
        "Status report: file=" + regularFileName + ", mode=" + getMode()
            + ", transactions=" + numTxs + ", operations=" + numOps;
    return s;
  }

  // Property setters methods.

  public void setRegularFileName(String filename) {
    logger.info("set regular filename: " + filename);
    this.regularFileName = filename;
  }

  public void setDelimiter(String delimiter) {
    this.delimiter = delimiter;
  }

  public void setMetadataBrokerList(String metadataBrokerList) {
    this.metadataBrokerList = metadataBrokerList;
  }

  public void setSerializerClass(String serializerClass) {
    this.serializerClass = serializerClass;
  }

  public void setRequestRequiredAcks(String requestRequiredAcks) {
    this.requestRequiredAcks = requestRequiredAcks;
  }

  public void setMaxMessageSize(String maxMessageSize) {
    this.maxMessageSize = maxMessageSize;
  }

  public void setZkServer(String zkServer) {
    this.zkServer = zkServer;
  }

}

// PARpcClientFacade
class PadKafkaProducer {

  final private static Logger logger = LoggerFactory
      .getLogger(PadKafkaProducer.class);
  private static final Pattern DELIMITER = Pattern.compile("\\|");
  @SuppressWarnings("rawtypes")
  private Producer producer;
  private String metadataBrokerList;
  private String serializerClass;
  private String requestRequiredAcks;
  private String maxMessageSize;
  private String zkServer;
  private scala.collection.Set<String> topics = null;
  private ZkClient zkClient = null;

  private ZkSerializer zkSerializer = new ZkSerializer() {
    @Override
    public byte[] serialize(Object o) throws ZkMarshallingError {
      return ZKStringSerializer.serialize(o);
    }

    @Override
    public Object deserialize(byte[] bytes) throws ZkMarshallingError {
      return ZKStringSerializer.deserialize(bytes);
    }
  };

  public PadKafkaProducer(String metadataBrokerList, String serializerClass,
      String requestRequiredAcks, String maxMessageSize, String zkServer) {
    this.metadataBrokerList = metadataBrokerList;
    this.serializerClass = serializerClass;
    this.requestRequiredAcks = requestRequiredAcks;
    this.maxMessageSize = maxMessageSize;
    this.zkServer = zkServer;
  }

  public void init() {
    zkClient = new ZkClient(zkServer, 10000, 10000, zkSerializer);
    // Create the client with load balancing properties
    producer = new Producer<String, String>(getConfig());
    topics = AdminUtils.fetchAllTopicConfigs(zkClient).keySet();
  }

  @SuppressWarnings("unchecked")
  public void sendDataToKafka(String data) {

    String table = DELIMITER.split(data)[0];
    int tableIndex = table.indexOf(".");
    if (tableIndex > 0) {
      table = table.substring(tableIndex + 1);
    }

    checkAndCreateTopic(table);

    KeyedMessage<String, String> message =
        new KeyedMessage<String, String>(table, data);
    long begin = System.currentTimeMillis();
    producer.send(message);
    long end = System.currentTimeMillis();
    logger.debug("consume:" + (end - begin));
  }

  private ProducerConfig getConfig() {
    Properties props = new Properties();
    props.put("metadata.broker.list", metadataBrokerList);
    props.put("serializer.class", serializerClass);
    props.put("request.required.acks", requestRequiredAcks);
    props.put("max.message.size", maxMessageSize);

    return new ProducerConfig(props);
  }

  /**
   * check whether topic exits, if not exist, create topic.
   * 
   * @param topic name
   */
  private void checkAndCreateTopic(String topic) {
    if (!topics.contains(topic)) {
      AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties());
      topics = AdminUtils.fetchAllTopicConfigs(zkClient).keySet();
    }
  }

  public void cleanUp() {
    producer.close();
  }

}