/*
 * Copyright (C) 2018 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you may not
 * use this file except in compliance with the License. You may obtain a copy of
 * the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations under
 * the License.
 */
package com.google.cloud.teleport.v2.templates;

import com.google.api.services.bigquery.model.TableRow;
import com.google.cloud.bigquery.TableId;
import com.google.cloud.teleport.metadata.Template;
import com.google.cloud.teleport.metadata.TemplateCategory;
import com.google.cloud.teleport.metadata.TemplateParameter;
import com.google.cloud.teleport.v2.cdc.dlq.BigQueryDeadLetterQueueSanitizer;
import com.google.cloud.teleport.v2.cdc.dlq.DeadLetterQueueManager;
import com.google.cloud.teleport.v2.cdc.dlq.StringDeadLetterQueueSanitizer;
import com.google.cloud.teleport.v2.cdc.mappers.BigQueryMappers;
import com.google.cloud.teleport.v2.coders.FailsafeElementCoder;
import com.google.cloud.teleport.v2.common.UncaughtExceptionLogger;
import com.google.cloud.teleport.v2.io.WindowedFilenamePolicy;
import com.google.cloud.teleport.v2.options.BigQueryStorageApiStreamingOptions;
import com.google.cloud.teleport.v2.templates.PubSubCdcToBigQuery.Options;
import com.google.cloud.teleport.v2.transforms.BigQueryConverters;
import com.google.cloud.teleport.v2.transforms.BigQueryConverters.BigQueryTableConfigManager;
import com.google.cloud.teleport.v2.transforms.BigQueryDynamicConverters;
import com.google.cloud.teleport.v2.transforms.ErrorConverters;
import com.google.cloud.teleport.v2.transforms.PubSubToFailSafeElement;
import com.google.cloud.teleport.v2.transforms.UDFTextTransformer.InputUDFOptions;
import com.google.cloud.teleport.v2.transforms.UDFTextTransformer.InputUDFToTableRow;
import com.google.cloud.teleport.v2.utils.BigQueryIOUtils;
import com.google.cloud.teleport.v2.utils.DurationUtils;
import com.google.cloud.teleport.v2.utils.ResourceUtils;
import com.google.cloud.teleport.v2.values.FailsafeElement;
import com.google.common.base.Strings;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.coders.CoderRegistry;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.extensions.gcp.options.GcpOptions;
import org.apache.beam.sdk.io.FileBasedSink;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO.Write.CreateDisposition;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO.Write.WriteDisposition;
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryInsertError;
import org.apache.beam.sdk.io.gcp.bigquery.InsertRetryPolicy;
import org.apache.beam.sdk.io.gcp.bigquery.WriteResult;
import org.apache.beam.sdk.io.gcp.pubsub.PubsubIO;
import org.apache.beam.sdk.io.gcp.pubsub.PubsubMessage;
import org.apache.beam.sdk.io.gcp.pubsub.PubsubMessageWithAttributesCoder;
import org.apache.beam.sdk.options.Default;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.Flatten;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.Reshuffle;
import org.apache.beam.sdk.transforms.windowing.FixedWindows;
import org.apache.beam.sdk.transforms.windowing.Window;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionList;
import org.apache.beam.sdk.values.PCollectionTuple;
import org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.ImmutableList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * The {@link PubSubCdcToBigQuery} pipeline is a streaming pipeline which ingests data in JSON
 * format from Cloud Pub/Sub, executes an optional UDF, and outputs the resulting records to
 * BigQuery. Any errors which occur in the transformation of the data or execution of the UDF will
 * be output to a separate errors table in BigQuery. The errors table will be created if it does not
 * exist prior to execution. Both output and error tables are specified by the user as template
 * parameters.
 *
 * <p><b>Pipeline Requirements</b>
 *
 * <ul>
 *   <li>The Pub/Sub topic and subscription exists.
 *   <li>The BigQuery output table exists or auto-mapping parameter is enabled.
 * </ul>
 *
 * <p>Check out <a
 * href="https://github.com/GoogleCloudPlatform/DataflowTemplates/blob/main/v2/pubsub-cdc-to-bigquery/README_PubSub_CDC_to_BigQuery.md">README</a>
 * for instructions on how to use or modify this template.
 */
@Template(
    name = "PubSub_CDC_to_BigQuery",
    category = TemplateCategory.STREAMING,
    displayName = "Pub/Sub CDC to Bigquery",
    description =
        "Streaming pipeline. Ingests JSON-encoded messages from a Pub/Sub topic, transforms them"
            + " using a JavaScript user-defined function (UDF), and writes them to a pre-existing"
            + " BigQuery table as BigQuery elements.",
    optionsClass = Options.class,
    flexContainerName = "pubsub-cdc-to-bigquery",
    contactInformation = "https://cloud.google.com/support",
    hidden = true,
    streaming = true)
public class PubSubCdcToBigQuery {

  /** The log to output status messages to. */
  private static final Logger LOG = LoggerFactory.getLogger(PubSubCdcToBigQuery.class);

  /** The default suffix for error tables if dead letter table is not specified. */
  public static final String DEFAULT_DEADLETTER_TABLE_SUFFIX = "_error_records";

  /** Pubsub message/string coder for pipeline. */
  public static final FailsafeElementCoder<PubsubMessage, String> CODER =
      FailsafeElementCoder.of(PubsubMessageWithAttributesCoder.of(), StringUtf8Coder.of());

  /** String/String Coder for FailsafeElement. */
  public static final FailsafeElementCoder<String, String> FAILSAFE_ELEMENT_CODER =
      FailsafeElementCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());

  /**
   * The {@link Options} class provides the custom execution options passed by the executor at the
   * command-line.
   */
  public interface Options
      extends PipelineOptions, InputUDFOptions, BigQueryStorageApiStreamingOptions {

    @TemplateParameter.PubsubSubscription(
        order = 1,
        groupName = "Source",
        description = "Pub/Sub input subscription",
        helpText =
            "Pub/Sub subscription to read the input from, in the format of"
                + " 'projects/your-project-id/subscriptions/your-subscription-name'",
        example = "projects/your-project-id/subscriptions/your-subscription-name")
    String getInputSubscription();

    void setInputSubscription(String value);

    @TemplateParameter.Boolean(
        order = 2,
        optional = true,
        description = "Auto Map Tables",
        helpText =
            "Determines if new columns and tables should be automatically created in BigQuery")
    @Default.Boolean(true)
    Boolean getAutoMapTables();

    void setAutoMapTables(Boolean value);

    @TemplateParameter.GcsReadFile(
        order = 3,
        optional = true,
        description = "Cloud Storage file with BigQuery schema fields to be used in DDL",
        helpText =
            "This is the file location that contains the table definition to be used when creating"
                + " the table in BigQuery. If left blank the table will get created with generic"
                + " string typing.")
    String getSchemaFilePath();

    void setSchemaFilePath(String value);

    @TemplateParameter.Text(
        order = 4,
        groupName = "Target",
        optional = true,
        description = "BigQuery Dataset Name or Template: dataset_name or {column_name}",
        helpText = "The name for the dataset to contain the replica table.")
    @Default.String("{_metadata_dataset}")
    String getOutputDatasetTemplate();

    void setOutputDatasetTemplate(String value);

    @TemplateParameter.Text(
        order = 5,
        groupName = "Target",
        description = "BigQuery Table Name or Template: table_name or {column_name}",
        helpText =
            "The location of the BigQuery table to write the output to. If a table does not "
                + "already exist one will be created automatically.")
    @Default.String("_metadata_table")
    String getOutputTableNameTemplate();

    void setOutputTableNameTemplate(String value);

    @TemplateParameter.BigQueryTable(
        order = 6,
        groupName = "Target",
        optional = true,
        description = "BigQuery output table (Deprecated)",
        helpText =
            "BigQuery table location to write the output to. The name should be in the format "
                + "`<project>:<dataset>.<table_name>`. The table's schema must match input objects.")
    String getOutputTableSpec();

    void setOutputTableSpec(String value);

    @TemplateParameter.BigQueryTable(
        order = 7,
        optional = true,
        description = "The dead-letter table name to output failed messages to BigQuery",
        helpText =
            "BigQuery table for failed messages. Messages failed to reach the output table for different reasons "
                + "(e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will"
                + " be created during pipeline execution. If not specified, \"outputTableSpec_error_records\" is used instead.",
        example = "your-project-id:your-dataset.your-table-name")
    String getOutputDeadletterTable();

    void setOutputDeadletterTable(String value);

    // Dead Letter Queue GCS Directory
    @TemplateParameter.GcsWriteFolder(
        order = 8,
        optional = true,
        description = "Dead Letter Queue Directory",
        helpText =
            "The name of the directory on Cloud Storage you want to write dead letters messages to")
    String getDeadLetterQueueDirectory();

    void setDeadLetterQueueDirectory(String value);

    @TemplateParameter.Duration(
        order = 9,
        optional = true,
        description = "Window duration",
        helpText =
            "The window duration/size in which DLQ data will be written to Cloud Storage. Allowed"
                + " formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh"
                + " (for hours, example: 2h).",
        example = "5m")
    @Default.String("5s")
    String getWindowDuration();

    void setWindowDuration(String value);

    // Thread Count
    @TemplateParameter.Integer(
        order = 10,
        optional = true,
        description = "Thread Number",
        helpText = "The number of parallel threads you want to split your data into")
    @Default.Integer(100)
    Integer getThreadCount();

    void setThreadCount(Integer value);
  }

  /**
   * The main entry-point for pipeline execution. This method will start the pipeline but will not
   * wait for it's execution to finish. If blocking execution is required, use the {@link
   * PubSubCdcToBigQuery#run(Options)} method to start the pipeline and invoke {@code
   * result.waitUntilFinish()} on the {@link PipelineResult}.
   *
   * @param args The command-line args passed by the executor.
   */
  public static void main(String[] args) {
    UncaughtExceptionLogger.register();

    Options options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class);
    BigQueryIOUtils.validateBQStorageApiOptionsStreaming(options);

    if (!Strings.isNullOrEmpty(options.getDeadLetterQueueDirectory())
        && !Strings.isNullOrEmpty(options.getOutputDeadletterTable())) {
      throw new IllegalArgumentException(
          "Cannot specify both deadLetterQueueDirectory and outputDeadletterTable");
    }

    run(options);
  }

  /**
   * Runs the pipeline to completion with the specified options. This method does not wait until the
   * pipeline is finished before returning. Invoke {@code result.waitUntilFinish()} on the result
   * object to block until the pipeline is finished running if blocking programmatic execution is
   * required.
   *
   * @param options The execution options.
   * @return The pipeline result.
   */
  public static PipelineResult run(Options options) {

    Pipeline pipeline = Pipeline.create(options);
    DeadLetterQueueManager dlqManager = buildDlqManager(options);
    String gcsOutputDateTimeDirectory = null;

    if (!Strings.isNullOrEmpty(options.getDeadLetterQueueDirectory())) {
      gcsOutputDateTimeDirectory = dlqManager.getRetryDlqDirectory() + "YYYY/MM/DD/HH/mm/";
    }

    CoderRegistry coderRegistry = pipeline.getCoderRegistry();
    coderRegistry.registerCoderForType(CODER.getEncodedTypeDescriptor(), CODER);
    coderRegistry.registerCoderForType(
        FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor(), FAILSAFE_ELEMENT_CODER);

    InputUDFToTableRow<String> failsafeTableRowTransformer =
        new InputUDFToTableRow<String>(
            options.getJavascriptTextTransformGcsPath(),
            options.getJavascriptTextTransformFunctionName(),
            options.getJavascriptTextTransformReloadIntervalMinutes(),
            options.getPythonTextTransformGcsPath(),
            options.getPythonTextTransformFunctionName(),
            options.getRuntimeRetries(),
            FAILSAFE_ELEMENT_CODER);

    BigQueryTableConfigManager bqConfigManager =
        new BigQueryTableConfigManager(
            options.as(GcpOptions.class).getProject(),
            options.getOutputDatasetTemplate(),
            options.getOutputTableNameTemplate(),
            options.getOutputTableSpec());

    /*
     * Steps:
     *  1) Read messages in from Pub/Sub
     *  2) Transform the PubsubMessages into TableRows
     *     - Transform message payload via UDF
     *     - Convert UDF result to TableRow objects
     *  3) Write successful records out to BigQuery
     *     - Automap new objects to BigQuery if enabled
     *     - Write records to BigQuery tables
     *  4) Write failed records out to BigQuery
     */

    /*
     * Step #1: Read messages in from Pub/Sub
     */

    PCollection<PubsubMessage> messages =
        pipeline.apply(
            "ReadPubSubSubscription",
            PubsubIO.readMessagesWithAttributes().fromSubscription(options.getInputSubscription()));

    PCollection<FailsafeElement<String, String>> jsonRecords;

    if (options.getDeadLetterQueueDirectory() != null) {

      PCollection<FailsafeElement<String, String>> failsafeMessages =
          messages.apply("ConvertPubSubToFailsafe", ParDo.of(new PubSubToFailSafeElement()));

      PCollection<FailsafeElement<String, String>> dlqJsonRecords =
          pipeline
              .apply(dlqManager.dlqReconsumer())
              .apply(
                  ParDo.of(
                      new DoFn<String, FailsafeElement<String, String>>() {
                        @ProcessElement
                        public void process(
                            @Element String input,
                            OutputReceiver<FailsafeElement<String, String>> receiver) {
                          receiver.output(FailsafeElement.of(input, input));
                        }
                      }))
              .setCoder(FAILSAFE_ELEMENT_CODER);

      jsonRecords =
          PCollectionList.of(failsafeMessages).and(dlqJsonRecords).apply(Flatten.pCollections());
    } else {
      jsonRecords =
          messages.apply("ConvertPubSubToFailsafe", ParDo.of(new PubSubToFailSafeElement()));
    }

    PCollectionTuple convertedTableRows =
        jsonRecords
            /*
             * Step #2: Transform the PubsubMessages into TableRows
             */
            .apply(
                Reshuffle.<FailsafeElement<String, String>>viaRandomKey()
                    .withNumBuckets(options.getThreadCount()))
            .apply("ApplyUdfAndConvertToTableRow", failsafeTableRowTransformer);

    /*
     * Step #3: Write the successful records out to BigQuery
     *   Either extract table destination only
     *   or extract table destination and auto-map new columns
     */
    PCollection<KV<TableId, TableRow>> tableEvents;
    if (options.getAutoMapTables()) {
      tableEvents =
          convertedTableRows
              .get(failsafeTableRowTransformer.transformOut)
              .apply(
                  "Map Data to BigQuery Tables",
                  new BigQueryMappers(bqConfigManager.getProjectId())
                      .buildBigQueryTableMapper(
                          bqConfigManager.getDatasetTemplate(), bqConfigManager.getTableTemplate())
                      .withDefaultSchemaFromGCS(options.getSchemaFilePath()));

    } else {
      tableEvents =
          convertedTableRows
              .get(failsafeTableRowTransformer.transformOut)
              .apply(
                  "ExtractBigQueryTableDestination",
                  BigQueryDynamicConverters.extractTableRowDestination(
                      bqConfigManager.getProjectId(),
                      bqConfigManager.getDatasetTemplate(),
                      bqConfigManager.getTableTemplate()));
    }

    /*
     * Step #3: Cont.
     *    - Write rows out to BigQuery
     */
    // TODO(https://github.com/apache/beam/pull/12004): Switch out alwaysRetry
    WriteResult writeResult =
        tableEvents.apply(
            "WriteSuccessfulRecords",
            BigQueryIO.<KV<TableId, TableRow>>write()
                .to(new BigQueryDynamicConverters().bigQueryDynamicDestination())
                .withFormatFunction(element -> element.getValue())
                .withoutValidation()
                .withCreateDisposition(CreateDisposition.CREATE_NEVER)
                .withWriteDisposition(WriteDisposition.WRITE_APPEND)
                .withExtendedErrorInfo()
                .withFailedInsertRetryPolicy(InsertRetryPolicy.alwaysRetry()));

    /*
     * Step 3 Contd.
     * Elements that failed inserts into BigQuery are extracted and converted to FailsafeElement
     */
    /*
     * Stage 4: Write Failures to GCS Dead Letter Queue
     */
    // TODO: Cover tableRowRecords.get(TRANSFORM_DEADLETTER_OUT) error values
    if (options.getDeadLetterQueueDirectory() != null) {

      BigQueryIOUtils.writeResultToBigQueryInsertErrors(writeResult, options)
          .apply(
              "DLQ: Write Insert Failures to GCS",
              MapElements.via(new BigQueryDeadLetterQueueSanitizer()))
          .apply(
              "Creating " + options.getWindowDuration() + " Window",
              Window.into(
                  FixedWindows.of(DurationUtils.parseDuration(options.getWindowDuration()))))
          .apply(
              "DLQ: Write File(s)",
              TextIO.write()
                  .withWindowedWrites()
                  .withNumShards(20)
                  .to(
                      WindowedFilenamePolicy.writeWindowedFiles()
                          .withOutputDirectory(gcsOutputDateTimeDirectory)
                          .withOutputFilenamePrefix("error")
                          .withShardTemplate("-SSSSS-of-NNNNN")
                          .withSuffix(".json"))
                  .withTempDirectory(
                      FileBasedSink.convertToFileResourceIfPossible(
                          options.getDeadLetterQueueDirectory())));

      PCollection<FailsafeElement<String, String>> transformDeadletter =
          PCollectionList.of(
                  ImmutableList.of(
                      convertedTableRows.get(failsafeTableRowTransformer.udfDeadletterOut),
                      convertedTableRows.get(failsafeTableRowTransformer.transformDeadletterOut)))
              .apply("Flatten", Flatten.pCollections())
              .apply(
                  "Creating " + options.getWindowDuration() + " Window",
                  Window.into(
                      FixedWindows.of(DurationUtils.parseDuration(options.getWindowDuration()))));

      PCollection<String> dlqWindowing =
          transformDeadletter
              .apply("Sanitize records", MapElements.via(new StringDeadLetterQueueSanitizer()))
              .setCoder(StringUtf8Coder.of());

      dlqWindowing.apply(
          "DLQ: Write File(s)",
          TextIO.write()
              .withWindowedWrites()
              .withNumShards(20)
              .to(
                  WindowedFilenamePolicy.writeWindowedFiles()
                      .withOutputDirectory(gcsOutputDateTimeDirectory)
                      .withOutputFilenamePrefix("error")
                      .withShardTemplate("-SSSSS-of-NNNNN")
                      .withSuffix(".json"))
              .withTempDirectory(
                  FileBasedSink.convertToFileResourceIfPossible(
                      gcsOutputDateTimeDirectory + "tmp/")));

    } else {
      PCollection<FailsafeElement<String, String>> failedInserts =
          BigQueryIOUtils.writeResultToBigQueryInsertErrors(writeResult, options)
              .apply(
                  "WrapInsertionErrors",
                  MapElements.into(FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor())
                      .via(
                          (BigQueryInsertError e) -> BigQueryConverters.wrapBigQueryInsertError(e)))
              .setCoder(FAILSAFE_ELEMENT_CODER);

      /*
       * Step #4: Write records that failed table row transformation
       * or conversion out to BigQuery deadletter table.
       */
      PCollectionList.of(
              ImmutableList.of(
                  convertedTableRows.get(failsafeTableRowTransformer.udfDeadletterOut),
                  convertedTableRows.get(failsafeTableRowTransformer.transformDeadletterOut)))
          .apply("Flatten", Flatten.pCollections())
          .apply(
              "WriteFailedRecords",
              ErrorConverters.WriteStringMessageErrors.newBuilder()
                  .setErrorRecordsTable(
                      BigQueryConverters.maybeUseDefaultDeadletterTable(
                          options.getOutputDeadletterTable(),
                          bqConfigManager.getOutputTableSpec(),
                          DEFAULT_DEADLETTER_TABLE_SUFFIX))
                  .setErrorRecordsTableSchema(ResourceUtils.getDeadletterTableSchemaJson())
                  .build());

      // 5) Insert records that failed insert into deadletter table
      failedInserts.apply(
          "WriteFailedRecords",
          ErrorConverters.WriteStringMessageErrors.newBuilder()
              .setErrorRecordsTable(
                  BigQueryConverters.maybeUseDefaultDeadletterTable(
                      options.getOutputDeadletterTable(),
                      bqConfigManager.getOutputTableSpec(),
                      DEFAULT_DEADLETTER_TABLE_SUFFIX))
              .setErrorRecordsTableSchema(ResourceUtils.getDeadletterTableSchemaJson())
              .build());
    }

    return pipeline.run();
  }

  /**
   * The class is a {@link PTransform} which transforms incoming {@link PubsubMessage} objects into
   * {@link TableRow} objects for insertion into BigQuery while applying an optional UDF to the
   * input. The executions of the UDF and transformation to {@link TableRow} objects is done in a
   * fail-safe way by wrapping the element with it's original payload inside the {@link
   * FailsafeElement} class. The transform transform will output a {@link PCollectionTuple} which
   * contains all output and dead-letter {@link PCollection}.
   *
   * <p>The {@link PCollectionTuple} output will contain the following {@link PCollection}:
   *
   * <ul>
   *   <li>{@link PubSubCdcToBigQuery#UDF_OUT} - Contains all {@link FailsafeElement} records
   *       successfully processed by the optional UDF.
   *   <li>{@link PubSubCdcToBigQuery#UDF_DEADLETTER_OUT} - Contains all {@link FailsafeElement}
   *       records which failed processing during the UDF execution.
   *   <li>{@link PubSubCdcToBigQuery#TRANSFORM_OUT} - Contains all records successfully converted
   *       from JSON to {@link TableRow} objects.
   *   <li>{@link PubSubCdcToBigQuery#TRANSFORM_DEADLETTER_OUT} - Contains all {@link
   *       FailsafeElement} records which couldn't be converted to table rows.
   * </ul>
   */
  private static DeadLetterQueueManager buildDlqManager(Options options) {
    if (options.getDeadLetterQueueDirectory() != null) {
      String tempLocation =
          options.as(PipelineOptions.class).getTempLocation().endsWith("/")
              ? options.as(PipelineOptions.class).getTempLocation()
              : options.as(PipelineOptions.class).getTempLocation() + "/";

      String dlqDirectory =
          options.getDeadLetterQueueDirectory().isEmpty()
              ? tempLocation + "dlq/"
              : options.getDeadLetterQueueDirectory();

      return DeadLetterQueueManager.create(dlqDirectory);
    } else {
      return null;
    }
  }
}
