/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.qyer.flume.sink;

import static org.quartz.CronScheduleBuilder.cronSchedule;

import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;

import org.apache.commons.lang3.StringUtils;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurable;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.serialization.EventSerializer;
import org.apache.flume.serialization.EventSerializerFactory;
import org.apache.flume.sink.AbstractSink;
import org.quartz.JobBuilder;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobKey;
import org.quartz.SchedulerException;
import org.quartz.Trigger;
import org.quartz.TriggerBuilder;
import org.quartz.TriggerKey;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.base.Preconditions;
import com.qyer.flume.formatter.RealDateFormatPathManager;
import com.qyer.flume.schedule.CronScheduleJob;
import com.qyer.flume.schedule.GenericSchedulerHolder;

public class CronRollingFileSink extends AbstractSink implements Configurable {

  private static final Logger logger = LoggerFactory.getLogger(CronRollingFileSink.class);
  private static final int defaultBatchSize = 100;
  private final String DEFAULT_CRON_EXPRESSION = "0 0 * * * ?";

  private int batchSize = defaultBatchSize;

  private File directory;
  private String cronExpression;
  private OutputStream outputStream;

  private String serializerType;
  private Context serializerContext;
  private EventSerializer serializer;

  private SinkCounter sinkCounter;

  private RealDateFormatPathManager pathController;
  private volatile boolean shouldRotate;

  public CronRollingFileSink() {
    pathController = new RealDateFormatPathManager();
    shouldRotate = false;
  }

  @Override public void configure(Context context) {
    String dir = context.getString("sink.directory");
    String cronExpression = StringUtils.trimToNull(context.getString("sink.cronExpression"));
    serializerType = context.getString("sink.serializer", "TEXT");
    serializerContext = new Context(context.getSubProperties("sink." + EventSerializer.CTX_PREFIX));
    
    System.out.println("dir==" +dir);
    System.out.println("cronExpression==" +cronExpression);
    System.out.println("serializerType==+"+serializerType);

    Preconditions.checkArgument(directory == null, "Directory may not be null");
    Preconditions.checkNotNull(serializerType, "Serializer type is undefined");

    if (StringUtils.isBlank(cronExpression)) {
      this.cronExpression = DEFAULT_CRON_EXPRESSION;
    } else {
      this.cronExpression = cronExpression;
    }

    String customPrefix = context.getString("sink.filePrefix");
    pathController.config(customPrefix);
    batchSize = context.getInteger("sink.batchSize", defaultBatchSize);
    this.directory = new File(dir);
    if (sinkCounter == null) {
      sinkCounter = new SinkCounter(getName());
    }
    pathController.setBaseDirectory(this.directory);
  }

  public void initCronJob() throws SchedulerException {
    String jobKeyName = "CrontabJob." + getName();
    JobKey jobKey = new JobKey(jobKeyName, "CronJobGroup");
    TriggerKey triggerKey = new TriggerKey("CrontabTrigger." + getName(), "DefaultGroup");
    JobDataMap jdm = new JobDataMap();
    jdm.put(jobKeyName, this);
    JobDetail cronJob = JobBuilder.newJob(CronScheduleJob.class).withIdentity(jobKey)
                                  .withDescription("Crontab job for sink \"" + getName() + "\"")
                                  .usingJobData(jdm).build();

    Trigger cronTrigger = TriggerBuilder.newTrigger().withIdentity(triggerKey).startNow()
                                        .withSchedule(cronSchedule(cronExpression))
                                        .withDescription("Crontab trigger(" + cronExpression + ")")
                                        .forJob(jobKey).build();
    GenericSchedulerHolder.getInstance().getScheduler().scheduleJob(cronJob, cronTrigger);
  }

  @Override public void start() {
    logger.info("Starting {}...", this);
    sinkCounter.start();
    super.start();

    try {
      initCronJob();
    } catch (SchedulerException e) {
      logger.error("Cron sink(" + getName() + ") started with error.", e);
    }
    logger.info("RollingFileSink {} started.", getName());
  }

  @Override public Status process() throws EventDeliveryException {
    if (shouldRotate) {
      logger.info("Time to rotate {}", pathController.getCurrentFile());
      if (outputStream != null) {
        logger.info("Closing file {}", pathController.getCurrentFile());
        try {
          serializer.flush();
          serializer.beforeClose();
          outputStream.close();
          sinkCounter.incrementConnectionClosedCount();
          shouldRotate = false;
        } catch (IOException e) {
          sinkCounter.incrementConnectionFailedCount();
          throw new EventDeliveryException(
            "Unable to rotate file " + pathController.getCurrentFile() + " while delivering event",
            e);
        } finally {
          serializer = null;
          outputStream = null;
        }
        pathController.rotate();
      }
    }
    if (outputStream == null) {
      File currentFile=pathController.getCurrentFile();  
      
      logger.info("Opening output stream for file {}", currentFile);
      try {
        outputStream = new BufferedOutputStream(new FileOutputStream(currentFile,true));
        serializer = EventSerializerFactory
          .getInstance(serializerType, serializerContext, outputStream);
        serializer.afterCreate();
        sinkCounter.incrementConnectionCreatedCount();
      } catch (IOException e) {
        sinkCounter.incrementConnectionFailedCount();
        throw new EventDeliveryException(
          "Failed to open file " + pathController.getCurrentFile() + " while delivering event", e);
      }
    }

    Channel channel = getChannel();
    Transaction transaction = channel.getTransaction();
    Event event;
    Status result = Status.READY;

    try {
      transaction.begin();
      int eventAttemptCounter = 0;
      for (int i = 0; i < batchSize; i++) {
        event = channel.take();
        if (event != null) {
          sinkCounter.incrementEventDrainAttemptCount();
          eventAttemptCounter++;
          serializer.write(event);

          /*
           * FIXME: Feature: Rotate on size and time by checking bytes written and
           * setting shouldRotate = true if we're past a threshold.
           */

          /*
           * FIXME: Feature: Control flush interval based on time or number of
           * events. For now, we're super-conservative and flush on each write.
           */
        } else {
          // No events found, request back-off semantics from runner
          result = Status.BACKOFF;
          break;
        }
      }
      serializer.flush();
      outputStream.flush();
      transaction.commit();
      sinkCounter.addToEventDrainSuccessCount(eventAttemptCounter);
    } catch (Exception ex) {
      transaction.rollback();
      throw new EventDeliveryException("Failed to process transaction", ex);
    } finally {
      transaction.close();
    }

    return result;
  }

  @Override public void stop() {
    logger.info("RollingFile sink {} stopping...", getName());
    sinkCounter.stop();
    super.stop();

    if (outputStream != null) {
      logger.debug("Closing file {}", pathController.getCurrentFile());

      try {
        serializer.flush();
        serializer.beforeClose();
        outputStream.close();
        sinkCounter.incrementConnectionClosedCount();
      } catch (IOException e) {
        sinkCounter.incrementConnectionFailedCount();
        logger.error("Unable to close output stream. Exception follows.", e);
      } finally {
        outputStream = null;
        serializer = null;
      }
    }
    try {
      GenericSchedulerHolder.getInstance().destory();
    } catch (SchedulerException e) {
      logger.error("Cannot stop scheduler.", e);
    }
    logger.info("RollingFile sink {} stopped. Event metrics: {}", getName(), sinkCounter);
  }

  public void setShouldRotate(boolean shouldRotate) {
    this.shouldRotate = shouldRotate;
  }

  public File getDirectory() {
    return directory;
  }

  public void setDirectory(File directory) {
    this.directory = directory;
  }

  public String getCronExpression() {
    return cronExpression;
  }

  public void setCronExpression(String cronExpression) {
    this.cronExpression = cronExpression;
  }
}
